--- old/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp 2018-04-04 09:02:15.035279510 -0400 +++ new/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp 2018-04-04 09:02:14.519278864 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1434,6 +1434,100 @@ } } +// Pin incoming array argument of java critical method +static void pin_critical_native_array(MacroAssembler* masm, + VMRegPair reg, + int& pinned_slot) { + __ block_comment("pin_critical_native_array {"); + Register tmp_reg = rax; + + Label is_null; + VMRegPair tmp; + VMRegPair in_reg = reg; + bool on_stack = false; + + tmp.set_ptr(tmp_reg->as_VMReg()); + if (reg.first()->is_stack()) { + // Load the arg up from the stack + move_ptr(masm, reg, tmp); + reg = tmp; + on_stack = true; + } else { + __ movptr(rax, reg.first()->as_Register()); + } + __ testptr(reg.first()->as_Register(), reg.first()->as_Register()); + __ jccb(Assembler::equal, is_null); + + __ push(c_rarg0); + __ push(c_rarg1); + __ push(c_rarg2); + __ push(c_rarg3); +#ifdef _WIN64 + // caller-saved registers on Windows + __ push(r10); + __ push(r11); +#else + __ push(c_rarg4); + __ push(c_rarg5); +#endif + + if (reg.first()->as_Register() != c_rarg1) { + __ movptr(c_rarg1, reg.first()->as_Register()); + } + __ movptr(c_rarg0, r15_thread); + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::pin_object))); + +#ifdef _WIN64 + __ pop(r11); + __ pop(r10); +#else + __ pop(c_rarg5); + __ pop(c_rarg4); +#endif + __ pop(c_rarg3); + __ pop(c_rarg2); + __ pop(c_rarg1); + __ pop(c_rarg0); + + if (on_stack) { + __ movptr(Address(rbp, reg2offset_in(in_reg.first())), rax); + __ bind(is_null); + } else { + __ movptr(reg.first()->as_Register(), rax); + + // save on stack for unpinning later + __ bind(is_null); + assert(reg.first()->is_Register(), "Must be a register"); + int offset = pinned_slot * VMRegImpl::stack_slot_size; + pinned_slot += VMRegImpl::slots_per_word; + __ movq(Address(rsp, offset), rax); + } + __ block_comment("} pin_critical_native_array"); +} + +// Unpin array argument of java critical method +static void unpin_critical_native_array(MacroAssembler* masm, + VMRegPair reg, + int& pinned_slot) { + __ block_comment("unpin_critical_native_array {"); + Label is_null; + + if (reg.first()->is_stack()) { + __ movptr(c_rarg1, Address(rbp, reg2offset_in(reg.first()))); + } else { + int offset = pinned_slot * VMRegImpl::stack_slot_size; + pinned_slot += VMRegImpl::slots_per_word; + __ movq(c_rarg1, Address(rsp, offset)); + } + __ testptr(c_rarg1, c_rarg1); + __ jccb(Assembler::equal, is_null); + + __ movptr(c_rarg0, r15_thread); + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object))); + + __ bind(is_null); + __ block_comment("} unpin_critical_native_array"); +} // Check GCLocker::needs_gc and enter the runtime if it's true. This // keeps a new JNI critical region from starting until a GC has been @@ -2129,7 +2223,7 @@ const Register oop_handle_reg = r14; - if (is_critical_native) { + if (is_critical_native && !Universe::heap()->supports_object_pinning()) { check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args, oop_handle_offset, oop_maps, in_regs, in_sig_bt); } @@ -2186,6 +2280,11 @@ // the incoming and outgoing registers are offset upwards and for // critical natives they are offset down. GrowableArray arg_order(2 * total_in_args); + // Inbound arguments that need to be pinned for critical natives + GrowableArray pinned_args(total_in_args); + // Current stack slot for storing register based array argument + int pinned_slot = oop_handle_offset; + VMRegPair tmp_vmreg; tmp_vmreg.set2(rbx->as_VMReg()); @@ -2233,6 +2332,12 @@ switch (in_sig_bt[i]) { case T_ARRAY: if (is_critical_native) { + // pin before unpack + if (Universe::heap()->supports_object_pinning()) { + assert(pinned_slot <= stack_slots, "overflow"); + pin_critical_native_array(masm, in_regs[i], pinned_slot); + pinned_args.append(i); + } unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); c_arg++; #ifdef ASSERT @@ -2449,6 +2554,19 @@ default : ShouldNotReachHere(); } + // unpin pinned arguments + pinned_slot = oop_handle_offset; + if (pinned_args.length() > 0) { + // save return value that may be overwritten otherwise. + save_native_result(masm, ret_type, stack_slots); + for (int index = 0; index < pinned_args.length(); index ++) { + int i = pinned_args.at(index); + assert(pinned_slot <= stack_slots, "overflow"); + unpin_critical_native_array(masm, in_regs[i], pinned_slot); + } + restore_native_result(masm, ret_type, stack_slots); + } + // Switch thread to "native transition" state before reading the synchronization state. // This additional state is necessary because reading and testing the synchronization // state is not atomic w.r.t. GC, as this scenario demonstrates: --- old/src/hotspot/share/runtime/sharedRuntime.cpp 2018-04-04 09:02:16.305281100 -0400 +++ new/src/hotspot/share/runtime/sharedRuntime.cpp 2018-04-04 09:02:15.859280542 -0400 @@ -2863,6 +2863,22 @@ GCLocker::unlock_critical(thread); JRT_END +JRT_LEAF(oopDesc*, SharedRuntime::pin_object(JavaThread* thread, oopDesc* obj)) + assert(Universe::heap()->supports_object_pinning(), "Why we here?"); + assert(obj != NULL, "Should not be null"); + oop o(obj); + o = Universe::heap()->pin_object(thread, o); + assert(o != NULL, "Should not be null"); + return o; +JRT_END + +JRT_LEAF(void, SharedRuntime::unpin_object(JavaThread* thread, oopDesc* obj)) + assert(Universe::heap()->supports_object_pinning(), "Why we here?"); + assert(obj != NULL, "Should not be null"); + oop o(obj); + Universe::heap()->unpin_object(thread, o); +JRT_END + // ------------------------------------------------------------------------- // Java-Java calling convention // (what you use when Java calls Java) --- old/src/hotspot/share/runtime/sharedRuntime.hpp 2018-04-04 09:02:17.666282804 -0400 +++ new/src/hotspot/share/runtime/sharedRuntime.hpp 2018-04-04 09:02:17.197282217 -0400 @@ -492,6 +492,10 @@ // Block before entering a JNI critical method static void block_for_jni_critical(JavaThread* thread); + // Pin/Unpin object + static oopDesc* pin_object(JavaThread* thread, oopDesc* obj); + static void unpin_object(JavaThread* thread, oopDesc* obj); + // A compiled caller has just called the interpreter, but compiled code // exists. Patch the caller so he no longer calls into the interpreter. static void fixup_callers_callsite(Method* moop, address ret_pc);