< prev index next >

src/cpu/x86/vm/templateTable_x86_32.cpp

Print this page
rev 7209 : [mq]: inccms


3197 
3198   // make sure klass is initialized & doesn't have finalizer
3199   // make sure klass is fully initialized
3200   __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3201   __ jcc(Assembler::notEqual, slow_case);
3202 
3203   // get instance_size in InstanceKlass (scaled to a count of bytes)
3204   __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
3205   // test to see if it has a finalizer or is malformed in some way
3206   __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3207   __ jcc(Assembler::notZero, slow_case);
3208 
3209   //
3210   // Allocate the instance
3211   // 1) Try to allocate in the TLAB
3212   // 2) if fail and the object is large allocate in the shared Eden
3213   // 3) if the above fails (or is not applicable), go to a slow case
3214   // (creates a new TLAB, etc.)
3215 
3216   const bool allow_shared_alloc =
3217     Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3218 
3219   const Register thread = rcx;
3220   if (UseTLAB || allow_shared_alloc) {
3221     __ get_thread(thread);
3222   }
3223 
3224   if (UseTLAB) {
3225     __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3226     __ lea(rbx, Address(rax, rdx, Address::times_1));
3227     __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3228     __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3229     __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3230     if (ZeroTLAB) {
3231       // the fields have been already cleared
3232       __ jmp(initialize_header);
3233     } else {
3234       // initialize both the header and fields
3235       __ jmp(initialize_object);
3236     }
3237   }




3197 
3198   // make sure klass is initialized & doesn't have finalizer
3199   // make sure klass is fully initialized
3200   __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3201   __ jcc(Assembler::notEqual, slow_case);
3202 
3203   // get instance_size in InstanceKlass (scaled to a count of bytes)
3204   __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
3205   // test to see if it has a finalizer or is malformed in some way
3206   __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3207   __ jcc(Assembler::notZero, slow_case);
3208 
3209   //
3210   // Allocate the instance
3211   // 1) Try to allocate in the TLAB
3212   // 2) if fail and the object is large allocate in the shared Eden
3213   // 3) if the above fails (or is not applicable), go to a slow case
3214   // (creates a new TLAB, etc.)
3215 
3216   const bool allow_shared_alloc =
3217     Universe::heap()->supports_inline_contig_alloc();
3218 
3219   const Register thread = rcx;
3220   if (UseTLAB || allow_shared_alloc) {
3221     __ get_thread(thread);
3222   }
3223 
3224   if (UseTLAB) {
3225     __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3226     __ lea(rbx, Address(rax, rdx, Address::times_1));
3227     __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3228     __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3229     __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3230     if (ZeroTLAB) {
3231       // the fields have been already cleared
3232       __ jmp(initialize_header);
3233     } else {
3234       // initialize both the header and fields
3235       __ jmp(initialize_object);
3236     }
3237   }


< prev index next >