< prev index next >

src/hotspot/cpu/x86/templateTable_x86.cpp

Print this page
rev 47680 : [mq]: x86_tlab
rev 47681 : [mq]: x86_tlab2
rev 47682 : [mq]: x86_3

@@ -3842,11 +3842,10 @@
   Label slow_case;
   Label slow_case_no_pop;
   Label done;
   Label initialize_header;
   Label initialize_object;  // including clearing the fields
-  Label allocate_shared;
 
   __ get_cpool_and_tags(rcx, rax);
 
   // Make sure the class we're about to instantiate has been resolved.
   // This is done before loading InstanceKlass to be consistent with the order

@@ -3868,16 +3867,22 @@
   __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
   // test to see if it has a finalizer or is malformed in some way
   __ testl(rdx, Klass::_lh_instance_slow_path_bit);
   __ jcc(Assembler::notZero, slow_case);
 
-  //
-  // Allocate the instance
-  // 1) Try to allocate in the TLAB
-  // 2) if fail and the object is large allocate in the shared Eden
-  // 3) if the above fails (or is not applicable), go to a slow case
-  // (creates a new TLAB, etc.)
+  // Allocate the instance:
+  // 1) If TLAB is enabled:
+  //  a) Try to allocate in the TLAB
+  //  b) If fails, go to the slow path.
+  // 2) Else TLAB is disabled:
+  //  a) If inline contiguous allocations are enabled:
+  //    i) Try to allocate in eden
+  //    ii) If fails due to heap end, go to slow path.
+  // 3) If TLAB is enabled OR inline contiguous is enabled:
+  //  a) Initialize the allocation
+  //  b) Exit.
+  // 4) If neither 1 OR 2 are applicable, go to slow path.
 
   const bool allow_shared_alloc =
     Universe::heap()->supports_inline_contig_alloc();
 
   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);

@@ -3889,27 +3894,24 @@
 
   if (UseTLAB) {
     __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
     __ lea(rbx, Address(rax, rdx, Address::times_1));
     __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
-    __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
+    __ jcc(Assembler::above, slow_case);
     __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
     if (ZeroTLAB) {
       // the fields have been already cleared
       __ jmp(initialize_header);
     } else {
       // initialize both the header and fields
       __ jmp(initialize_object);
     }
-  }
-
+  } else {
   // Allocation in the shared Eden, if allowed.
   //
   // rdx: instance size in bytes
   if (allow_shared_alloc) {
-    __ bind(allocate_shared);
-
     ExternalAddress heap_top((address)Universe::heap()->top_addr());
     ExternalAddress heap_end((address)Universe::heap()->end_addr());
 
     Label retry;
     __ bind(retry);

@@ -3930,12 +3932,15 @@
     // if someone beat us on the allocation, try again, otherwise continue
     __ jcc(Assembler::notEqual, retry);
 
     __ incr_allocated_bytes(thread, rdx, 0);
   }
+  }
 
-  if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
+  // If UseTLAB or allow_shared_alloc are true, the object is created above and
+  // there is an initialize need. Otherwise, skip and go to the slow path.
+  if (UseTLAB || allow_shared_alloc) {
     // The object is initialized before the header.  If the object size is
     // zero, go directly to the header initialization.
     __ bind(initialize_object);
     __ decrement(rdx, sizeof(oopDesc));
     __ jcc(Assembler::zero, initialize_header);
< prev index next >