< prev index next >

src/cpu/x86/vm/templateTable_x86.cpp

Print this page
rev 11777 : [mq]: gcinterface.patch


3857 
3858   // make sure klass is initialized & doesn't have finalizer
3859   // make sure klass is fully initialized
3860   __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3861   __ jcc(Assembler::notEqual, slow_case);
3862 
3863   // get instance_size in InstanceKlass (scaled to a count of bytes)
3864   __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
3865   // test to see if it has a finalizer or is malformed in some way
3866   __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3867   __ jcc(Assembler::notZero, slow_case);
3868 
3869   //
3870   // Allocate the instance
3871   // 1) Try to allocate in the TLAB
3872   // 2) if fail and the object is large allocate in the shared Eden
3873   // 3) if the above fails (or is not applicable), go to a slow case
3874   // (creates a new TLAB, etc.)
3875 
3876   const bool allow_shared_alloc =
3877     Universe::heap()->supports_inline_contig_alloc();
3878 
3879   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
3880 #ifndef _LP64
3881   if (UseTLAB || allow_shared_alloc) {
3882     __ get_thread(thread);
3883   }
3884 #endif // _LP64
3885 
3886   if (UseTLAB) {
3887     __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3888     __ lea(rbx, Address(rax, rdx, Address::times_1));
3889     __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3890     __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3891     __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3892     if (ZeroTLAB) {
3893       // the fields have been already cleared
3894       __ jmp(initialize_header);
3895     } else {
3896       // initialize both the header and fields
3897       __ jmp(initialize_object);
3898     }
3899   }
3900 
3901   // Allocation in the shared Eden, if allowed.
3902   //
3903   // rdx: instance size in bytes
3904   if (allow_shared_alloc) {
3905     __ bind(allocate_shared);
3906 
3907     ExternalAddress heap_top((address)Universe::heap()->top_addr());
3908     ExternalAddress heap_end((address)Universe::heap()->end_addr());
3909 
3910     Label retry;
3911     __ bind(retry);
3912     __ movptr(rax, heap_top);
3913     __ lea(rbx, Address(rax, rdx, Address::times_1));
3914     __ cmpptr(rbx, heap_end);
3915     __ jcc(Assembler::above, slow_case);
3916 
3917     // Compare rax, with the top addr, and if still equal, store the new
3918     // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3919     // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3920     //
3921     // rax,: object begin
3922     // rbx,: object end
3923     // rdx: instance size in bytes
3924     __ locked_cmpxchgptr(rbx, heap_top);
3925 
3926     // if someone beat us on the allocation, try again, otherwise continue
3927     __ jcc(Assembler::notEqual, retry);
3928 
3929     __ incr_allocated_bytes(thread, rdx, 0);
3930   }
3931 
3932   if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3933     // The object is initialized before the header.  If the object size is
3934     // zero, go directly to the header initialization.
3935     __ bind(initialize_object);
3936     __ decrement(rdx, sizeof(oopDesc));
3937     __ jcc(Assembler::zero, initialize_header);
3938 
3939     // Initialize topmost object field, divide rdx by 8, check if odd and
3940     // test if zero.
3941     __ xorl(rcx, rcx);    // use zero reg to clear memory (shorter code)
3942     __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3943 
3944     // rdx must have been multiple of 8
3945 #ifdef ASSERT
3946     // make sure rdx was multiple of 8
3947     Label L;
3948     // Ignore partial flag stall after shrl() since it is debug VM
3949     __ jccb(Assembler::carryClear, L);
3950     __ stop("object size is not multiple of 2 - adjust this code");
3951     __ bind(L);
3952     // rdx must be > 0, no extra check needed here




3857 
3858   // make sure klass is initialized & doesn't have finalizer
3859   // make sure klass is fully initialized
3860   __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3861   __ jcc(Assembler::notEqual, slow_case);
3862 
3863   // get instance_size in InstanceKlass (scaled to a count of bytes)
3864   __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
3865   // test to see if it has a finalizer or is malformed in some way
3866   __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3867   __ jcc(Assembler::notZero, slow_case);
3868 
3869   //
3870   // Allocate the instance
3871   // 1) Try to allocate in the TLAB
3872   // 2) if fail and the object is large allocate in the shared Eden
3873   // 3) if the above fails (or is not applicable), go to a slow case
3874   // (creates a new TLAB, etc.)
3875 
3876   const bool allow_shared_alloc =
3877     GC::gc()->heap()->supports_inline_contig_alloc();
3878 
3879   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
3880 #ifndef _LP64
3881   if (UseTLAB || allow_shared_alloc) {
3882     __ get_thread(thread);
3883   }
3884 #endif // _LP64
3885 
3886   if (UseTLAB) {
3887     __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3888     __ lea(rbx, Address(rax, rdx, Address::times_1));
3889     __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3890     __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3891     __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3892     if (ZeroTLAB) {
3893       // the fields have been already cleared
3894       __ jmp(initialize_header);
3895     } else {
3896       // initialize both the header and fields
3897       __ jmp(initialize_object);
3898     }
3899   }
3900 
3901   // Allocation in the shared Eden, if allowed.
3902   //
3903   // rdx: instance size in bytes
3904   if (allow_shared_alloc) {
3905     __ bind(allocate_shared);
3906 
3907     ExternalAddress heap_top((address)GC::gc()->heap()->top_addr());
3908     ExternalAddress heap_end((address)GC::gc()->heap()->end_addr());
3909 
3910     Label retry;
3911     __ bind(retry);
3912     __ movptr(rax, heap_top);
3913     __ lea(rbx, Address(rax, rdx, Address::times_1));
3914     __ cmpptr(rbx, heap_end);
3915     __ jcc(Assembler::above, slow_case);
3916 
3917     // Compare rax, with the top addr, and if still equal, store the new
3918     // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3919     // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3920     //
3921     // rax,: object begin
3922     // rbx,: object end
3923     // rdx: instance size in bytes
3924     __ locked_cmpxchgptr(rbx, heap_top);
3925 
3926     // if someone beat us on the allocation, try again, otherwise continue
3927     __ jcc(Assembler::notEqual, retry);
3928 
3929     __ incr_allocated_bytes(thread, rdx, 0);
3930   }
3931 
3932   if (UseTLAB || GC::gc()->heap()->supports_inline_contig_alloc()) {
3933     // The object is initialized before the header.  If the object size is
3934     // zero, go directly to the header initialization.
3935     __ bind(initialize_object);
3936     __ decrement(rdx, sizeof(oopDesc));
3937     __ jcc(Assembler::zero, initialize_header);
3938 
3939     // Initialize topmost object field, divide rdx by 8, check if odd and
3940     // test if zero.
3941     __ xorl(rcx, rcx);    // use zero reg to clear memory (shorter code)
3942     __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3943 
3944     // rdx must have been multiple of 8
3945 #ifdef ASSERT
3946     // make sure rdx was multiple of 8
3947     Label L;
3948     // Ignore partial flag stall after shrl() since it is debug VM
3949     __ jccb(Assembler::carryClear, L);
3950     __ stop("object size is not multiple of 2 - adjust this code");
3951     __ bind(L);
3952     // rdx must be > 0, no extra check needed here


< prev index next >