< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page

        

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -1432,10 +1432,68 @@
       }
     }
   }
 }
 
+// Pin object, return pinned object or null in rax
+static void gen_pin_object(MacroAssembler* masm,
+                           VMRegPair reg) {
+  __ block_comment("gen_pin_object {");
+
+  // rax always contains oop, either incoming or
+  // pinned.
+  Register tmp_reg = rax;
+
+  Label is_null;
+  VMRegPair tmp;
+  VMRegPair in_reg = reg;
+
+  tmp.set_ptr(tmp_reg->as_VMReg());
+  if (reg.first()->is_stack()) {
+    // Load the arg up from the stack
+    move_ptr(masm, reg, tmp);
+    reg = tmp;
+  } else {
+    __ movptr(rax, reg.first()->as_Register());
+  }
+  __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
+  __ jccb(Assembler::equal, is_null);
+
+  if (reg.first()->as_Register() != c_rarg1) {
+    __ movptr(c_rarg1, reg.first()->as_Register());
+  }
+
+  __ call_VM_leaf(
+    CAST_FROM_FN_PTR(address, SharedRuntime::pin_object),
+    r15_thread, c_rarg1);
+
+  __ bind(is_null);
+  __ block_comment("} gen_pin_object");
+}
+
+// Unpin object
+static void gen_unpin_object(MacroAssembler* masm,
+                             VMRegPair reg) {
+  __ block_comment("gen_unpin_object {");
+  Label is_null;
+
+  if (reg.first()->is_stack()) {
+    __ movptr(c_rarg1, Address(rbp, reg2offset_in(reg.first())));
+  } else if (reg.first()->as_Register() != c_rarg1) {
+    __ movptr(c_rarg1, reg.first()->as_Register());
+  }
+
+  __ testptr(c_rarg1, c_rarg1);
+  __ jccb(Assembler::equal, is_null);
+
+  __ call_VM_leaf(
+    CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object),
+    r15_thread, c_rarg1);
+
+  __ bind(is_null);
+  __ block_comment("} gen_unpin_object");
+}
 
 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
 // keeps a new JNI critical region from starting until a GC has been
 // forced.  Save down any oops in registers and describe them in an
 // OopMap.

@@ -2127,11 +2185,11 @@
   // We use r14 as the oop handle for the receiver/klass
   // It is callee save so it survives the call to native
 
   const Register oop_handle_reg = r14;
 
-  if (is_critical_native) {
+  if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
   }
 
   //

@@ -2184,10 +2242,15 @@
   // This may iterate in two different directions depending on the
   // kind of native it is.  The reason is that for regular JNI natives
   // the incoming and outgoing registers are offset upwards and for
   // critical natives they are offset down.
   GrowableArray<int> arg_order(2 * total_in_args);
+  // Inbound arguments that need to be pinned for critical natives
+  GrowableArray<int> pinned_args(total_in_args);
+  // Current stack slot for storing register based array argument
+  int pinned_slot = oop_handle_offset;
+
   VMRegPair tmp_vmreg;
   tmp_vmreg.set2(rbx->as_VMReg());
 
   if (!is_critical_native) {
     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {

@@ -2231,10 +2294,27 @@
     }
 #endif /* ASSERT */
     switch (in_sig_bt[i]) {
       case T_ARRAY:
         if (is_critical_native) {
+          // pin before unpack
+          if (Universe::heap()->supports_object_pinning()) {
+            save_args(masm, total_c_args, 0, out_regs);
+            gen_pin_object(masm, in_regs[i]);
+            pinned_args.append(i);
+            restore_args(masm, total_c_args, 0, out_regs);
+
+            // rax has pinned array
+            VMRegPair result_reg;
+            result_reg.set_ptr(rax->as_VMReg());
+            move_ptr(masm, result_reg, in_regs[i]);
+            if (!in_regs[i].first()->is_stack()) {
+              assert(pinned_slot <= stack_slots, "overflow");
+              move_ptr(masm, result_reg, VMRegImpl::stack2reg(pinned_slot));
+              pinned_slot += VMRegImpl::slots_per_word;
+            }
+          }
           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
           c_arg++;
 #ifdef ASSERT
           if (out_regs[c_arg].first()->is_Register()) {
             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;

@@ -2447,10 +2527,28 @@
   case T_VOID: break;
   case T_LONG: break;
   default       : ShouldNotReachHere();
   }
 
+  // unpin pinned arguments
+  pinned_slot = oop_handle_offset;
+  if (pinned_args.length() > 0) {
+    // save return value that may be overwritten otherwise.
+    save_native_result(masm, ret_type, stack_slots);
+    for (int index = 0; index < pinned_args.length(); index ++) {
+      int i = pinned_args.at(index);
+      assert(pinned_slot <= stack_slots, "overflow");
+      if (!in_regs[i].first()->is_stack()) {
+        int offset = pinned_slot * VMRegImpl::stack_slot_size;
+        __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
+        pinned_slot += VMRegImpl::slots_per_word;
+      }
+      gen_unpin_object(masm, in_regs[i]);
+    }
+    restore_native_result(masm, ret_type, stack_slots);
+  }
+
   // Switch thread to "native transition" state before reading the synchronization state.
   // This additional state is necessary because reading and testing the synchronization
   // state is not atomic w.r.t. GC, as this scenario demonstrates:
   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
   //     VM thread changes sync state to synchronizing and suspends threads for GC.
< prev index next >