< prev index next >

src/cpu/x86/vm/c1_Runtime1_x86.cpp

Print this page

        

@@ -412,11 +412,12 @@
 
   return map;
 }
 
 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
-                                   bool save_fpu_registers = true) {
+                                   bool save_fpu_registers = true,
+                                   bool do_generate_oop_map = true) {
   __ block_comment("save_live_registers");
 
   __ pusha();         // integer registers
 
   // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");

@@ -487,11 +488,13 @@
   }
 
   // FPU stack must be empty now
   __ verify_FPU(0, "save_live_registers");
 
-  return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
+  return do_generate_oop_map
+      ? generate_oop_map(sasm, num_rt_args, save_fpu_registers)
+      : NULL;
 }
 
 
 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
   if (restore_fpu_registers) {

@@ -955,10 +958,28 @@
   __ ret(0);
 
   return oop_maps;
 }
 
+static void heap_support_stub(StubAssembler* sasm, Register obj,
+                              Register size_in_bytes, int con_size_in_bytes,
+                              Register t1, Register t2) {
+  // Usually, when we invoke the sampling methods from within the client
+  // compiler, we do so in a stub.  However, sometimes, we are already in a stub
+  // when we want to call these things, and stack trace gathering gets confused
+  // when you call a stub inside another stub.
+  HEAP_MONITORING(sasm, noreg, size_in_bytes, con_size_in_bytes, obj, t1, t2, \
+  { \
+    save_live_registers(sasm, 1, true, false); \
+    __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); \
+    __ call(RuntimeAddress(
+        CAST_FROM_FN_PTR(address, \
+                         HeapMonitoring::object_alloc_unsized))); \
+    NOT_LP64(__ pop(rax)); \
+    restore_live_registers(sasm); \
+  });
+}
 
 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 
   // for better readability
   const bool must_gc_arguments = true;

@@ -1040,10 +1061,11 @@
 
           __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
 
           __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ true);
           __ verify_oop(obj);
+          heap_support_stub(sasm, obj, obj_size, 0, t1, t2);
           __ pop(rbx);
           __ pop(rdi);
           __ ret(0);
 
           __ bind(try_eden);

@@ -1168,12 +1190,16 @@
           assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
           __ andptr(t1, Klass::_lh_header_size_mask);
           __ subptr(arr_size, t1);  // body length
           __ addptr(t1, obj);       // body start
           if (!ZeroTLAB) {
+            // Initialize body destroys arr_size so remember it.
+            __ push(arr_size);
             __ initialize_body(t1, arr_size, 0, t2);
+            __ pop(arr_size);
           }
+          heap_support_stub(sasm, obj, arr_size, 0, t1, t2);
           __ verify_oop(obj);
           __ ret(0);
 
           __ bind(try_eden);
           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))

@@ -1504,10 +1530,26 @@
         NOT_LP64(__ pop(rax));
 
         restore_live_registers(sasm);
       }
       break;
+
+    case heap_object_sample_id:
+      { // rax,: object
+        StubFrame f(sasm, "heap_object_sample", dont_gc_arguments);
+        // We can't gc here so skip the oopmap but make sure that all
+        // the live registers get saved
+        save_live_registers(sasm, 1);
+
+        __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
+        __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
+                                                HeapMonitoring::object_alloc)));
+        NOT_LP64(__ pop(rax));
+
+        restore_live_registers(sasm);
+      }
+      break;
 
     case fpu2long_stub_id:
       {
         // rax, and rdx are destroyed, but should be free since the result is returned there
         // preserve rsi,ecx
< prev index next >