< prev index next >

src/cpu/x86/vm/templateTable_x86.cpp

Print this page




3910 
3911     // Compare rax, with the top addr, and if still equal, store the new
3912     // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3913     // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3914     //
3915     // rax,: object begin
3916     // rbx,: object end
3917     // rdx: instance size in bytes
3918     __ locked_cmpxchgptr(rbx, heap_top);
3919 
3920     // if someone beat us on the allocation, try again, otherwise continue
3921     __ jcc(Assembler::notEqual, retry);
3922 
3923     __ incr_allocated_bytes(thread, rdx, 0);
3924   }
3925 
3926   if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3927     // The object is initialized before the header.  If the object size is
3928     // zero, go directly to the header initialization.
3929     __ bind(initialize_object);

3930     __ decrement(rdx, sizeof(oopDesc));
3931     __ jcc(Assembler::zero, initialize_header);
3932 
3933     // Initialize topmost object field, divide rdx by 8, check if odd and
3934     // test if zero.
3935     __ xorl(rcx, rcx);    // use zero reg to clear memory (shorter code)
3936     __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3937 
3938     // rdx must have been multiple of 8
3939 #ifdef ASSERT
3940     // make sure rdx was multiple of 8
3941     Label L;
3942     // Ignore partial flag stall after shrl() since it is debug VM
3943     __ jccb(Assembler::carryClear, L);
3944     __ stop("object size is not multiple of 2 - adjust this code");
3945     __ bind(L);
3946     // rdx must be > 0, no extra check needed here
3947 #endif
3948 
3949     // initialize remaining object fields: rdx was a multiple of 8
3950     { Label loop;
3951     __ bind(loop);
3952     __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3953     NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3954     __ decrement(rdx);
3955     __ jcc(Assembler::notZero, loop);
3956     }
3957 
3958     // initialize object header only.
3959     __ bind(initialize_header);




3960     if (UseBiasedLocking) {
3961       __ pop(rcx);   // get saved klass back in the register.
3962       __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
3963       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3964     } else {
3965       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3966                 (intptr_t)markOopDesc::prototype()); // header
3967       __ pop(rcx);   // get saved klass back in the register.
3968     }
3969 #ifdef _LP64
3970     __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
3971     __ store_klass_gap(rax, rsi);  // zero klass gap for compressed oops
3972 #endif
3973     __ store_klass(rax, rcx);  // klass
3974 
3975     {
3976       SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3977       // Trigger dtrace event for fastpath
3978       __ push(atos);
3979       __ call_VM_leaf(
3980            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);

3981       __ pop(atos);
3982     }









3983 
3984     __ jmp(done);
3985   }
3986 
3987   // slow case
3988   __ bind(slow_case);
3989   __ pop(rcx);   // restore stack pointer to what it was when we came in.
3990   __ bind(slow_case_no_pop);
3991 
3992   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rax);
3993   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
3994 
3995   __ get_constant_pool(rarg1);
3996   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
3997   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rarg1, rarg2);
3998    __ verify_oop(rax);
3999 
4000   // continue
4001   __ bind(done);
4002 }




3910 
3911     // Compare rax, with the top addr, and if still equal, store the new
3912     // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3913     // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3914     //
3915     // rax,: object begin
3916     // rbx,: object end
3917     // rdx: instance size in bytes
3918     __ locked_cmpxchgptr(rbx, heap_top);
3919 
3920     // if someone beat us on the allocation, try again, otherwise continue
3921     __ jcc(Assembler::notEqual, retry);
3922 
3923     __ incr_allocated_bytes(thread, rdx, 0);
3924   }
3925 
3926   if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3927     // The object is initialized before the header.  If the object size is
3928     // zero, go directly to the header initialization.
3929     __ bind(initialize_object);
3930     __ movq(rbx, rdx);    // Save the size for HeapMonitoring
3931     __ decrement(rdx, sizeof(oopDesc));
3932     __ jcc(Assembler::zero, initialize_header);
3933 
3934     // Initialize topmost object field, divide rdx by 8, check if odd and
3935     // test if zero.
3936     __ xorl(rcx, rcx);    // use zero reg to clear memory (shorter code)
3937     __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3938 
3939     // rdx must have been multiple of 8
3940 #ifdef ASSERT
3941     // make sure rdx was multiple of 8
3942     Label L;
3943     // Ignore partial flag stall after shrl() since it is debug VM
3944     __ jccb(Assembler::carryClear, L);
3945     __ stop("object size is not multiple of 2 - adjust this code");
3946     __ bind(L);
3947     // rdx must be > 0, no extra check needed here
3948 #endif
3949 
3950     // initialize remaining object fields: rdx was a multiple of 8
3951     { Label loop;
3952     __ bind(loop);
3953     __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3954     NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3955     __ decrement(rdx);
3956     __ jcc(Assembler::notZero, loop);
3957     }
3958 
3959     // initialize object header only.
3960     __ bind(initialize_header);
3961 
3962     // Restore size for HeapMonitoring
3963     __ movq(rdx, rbx);
3964 
3965     if (UseBiasedLocking) {
3966       __ pop(rcx);   // get saved klass back in the register.
3967       __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
3968       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3969     } else {
3970       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3971                 (intptr_t)markOopDesc::prototype()); // header
3972       __ pop(rcx);   // get saved klass back in the register.
3973     }
3974 #ifdef _LP64
3975     __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
3976     __ store_klass_gap(rax, rsi);  // zero klass gap for compressed oops
3977 #endif
3978     __ store_klass(rax, rcx);  // klass
3979 
3980     {
3981       SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3982       // Trigger dtrace event for fastpath
3983       __ push(atos);
3984       __ call_VM_leaf(
3985            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),
3986            rax, rdx);
3987       __ pop(atos);
3988     }
3989 
3990     HEAP_MONITORING(_masm, noreg, rdx, 0, rax, rcx, noreg, \
3991       { \
3992         __ push(atos); \
3993         __ call_VM_leaf( \
3994             CAST_FROM_FN_PTR(address, HeapMonitoring::object_alloc), \
3995             rax, rdx); \
3996         __ pop(atos); \
3997       });
3998 
3999     __ jmp(done);
4000   }
4001 
4002   // slow case
4003   __ bind(slow_case);
4004   __ pop(rcx);   // restore stack pointer to what it was when we came in.
4005   __ bind(slow_case_no_pop);
4006 
4007   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4008   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4009 
4010   __ get_constant_pool(rarg1);
4011   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4012   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rarg1, rarg2);
4013    __ verify_oop(rax);
4014 
4015   // continue
4016   __ bind(done);
4017 }


< prev index next >