< prev index next >

src/cpu/x86/vm/templateTable_x86.cpp

Print this page
rev 12310 : [mq]: gcinterface.patch

@@ -3870,11 +3870,11 @@
   // 2) if fail and the object is large allocate in the shared Eden
   // 3) if the above fails (or is not applicable), go to a slow case
   // (creates a new TLAB, etc.)
 
   const bool allow_shared_alloc =
-    Universe::heap()->supports_inline_contig_alloc();
+    GC::gc()->heap()->supports_inline_contig_alloc();
 
   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
 #ifndef _LP64
   if (UseTLAB || allow_shared_alloc) {
     __ get_thread(thread);

@@ -3900,12 +3900,12 @@
   //
   // rdx: instance size in bytes
   if (allow_shared_alloc) {
     __ bind(allocate_shared);
 
-    ExternalAddress heap_top((address)Universe::heap()->top_addr());
-    ExternalAddress heap_end((address)Universe::heap()->end_addr());
+    ExternalAddress heap_top((address)GC::gc()->heap()->top_addr());
+    ExternalAddress heap_end((address)GC::gc()->heap()->end_addr());
 
     Label retry;
     __ bind(retry);
     __ movptr(rax, heap_top);
     __ lea(rbx, Address(rax, rdx, Address::times_1));

@@ -3925,11 +3925,11 @@
     __ jcc(Assembler::notEqual, retry);
 
     __ incr_allocated_bytes(thread, rdx, 0);
   }
 
-  if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
+  if (UseTLAB || GC::gc()->heap()->supports_inline_contig_alloc()) {
     // The object is initialized before the header.  If the object size is
     // zero, go directly to the header initialization.
     __ bind(initialize_object);
     __ decrement(rdx, sizeof(oopDesc));
     __ jcc(Assembler::zero, initialize_header);
< prev index next >