1 /*
2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #ifndef _WINDOWS
27 #include "alloca.h"
28 #endif
29 #include "asm/macroAssembler.hpp"
30 #include "asm/macroAssembler.inline.hpp"
31 #include "code/debugInfoRec.hpp"
32 #include "code/icBuffer.hpp"
33 #include "code/nativeInst.hpp"
34 #include "code/vtableStubs.hpp"
35 #include "gc/shared/gcLocker.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "logging/log.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/compiledICHolder.hpp"
40 #include "runtime/safepointMechanism.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "runtime/vframeArray.hpp"
43 #include "utilities/align.hpp"
44 #include "utilities/formatBuffer.hpp"
45 #include "vm_version_x86.hpp"
46 #include "vmreg_x86.inline.hpp"
47 #ifdef COMPILER1
48 #include "c1/c1_Runtime1.hpp"
49 #endif
50 #ifdef COMPILER2
51 #include "opto/runtime.hpp"
52 #endif
53 #if INCLUDE_JVMCI
54 #include "jvmci/jvmciJavaClasses.hpp"
55 #endif
1417 } else if (in_regs[i].first()->is_XMMRegister()) {
1418 if (in_sig_bt[i] == T_FLOAT) {
1419 int offset = slot * VMRegImpl::stack_slot_size;
1420 slot++;
1421 assert(slot <= stack_slots, "overflow");
1422 if (map != NULL) {
1423 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1424 } else {
1425 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1426 }
1427 }
1428 } else if (in_regs[i].first()->is_stack()) {
1429 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1430 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1431 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1432 }
1433 }
1434 }
1435 }
1436
1437
1438 // Check GCLocker::needs_gc and enter the runtime if it's true. This
1439 // keeps a new JNI critical region from starting until a GC has been
1440 // forced. Save down any oops in registers and describe them in an
1441 // OopMap.
1442 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1443 int stack_slots,
1444 int total_c_args,
1445 int total_in_args,
1446 int arg_save_area,
1447 OopMapSet* oop_maps,
1448 VMRegPair* in_regs,
1449 BasicType* in_sig_bt) {
1450 __ block_comment("check GCLocker::needs_gc");
1451 Label cont;
1452 __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1453 __ jcc(Assembler::equal, cont);
1454
1455 // Save down any incoming oops and call into the runtime to halt for a GC
1456
2113 }
2114
2115 #ifdef ASSERT
2116 {
2117 Label L;
2118 __ mov(rax, rsp);
2119 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2120 __ cmpptr(rax, rsp);
2121 __ jcc(Assembler::equal, L);
2122 __ stop("improperly aligned stack");
2123 __ bind(L);
2124 }
2125 #endif /* ASSERT */
2126
2127
2128 // We use r14 as the oop handle for the receiver/klass
2129 // It is callee save so it survives the call to native
2130
2131 const Register oop_handle_reg = r14;
2132
2133 if (is_critical_native) {
2134 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2135 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2136 }
2137
2138 //
2139 // We immediately shuffle the arguments so that any vm call we have to
2140 // make from here on out (sync slow path, jvmti, etc.) we will have
2141 // captured the oops from our caller and have a valid oopMap for
2142 // them.
2143
2144 // -----------------
2145 // The Grand Shuffle
2146
2147 // The Java calling convention is either equal (linux) or denser (win64) than the
2148 // c calling convention. However the because of the jni_env argument the c calling
2149 // convention always has at least one more (and two for static) arguments than Java.
2150 // Therefore if we move the args from java -> c backwards then we will never have
2151 // a register->register conflict and we don't have to build a dependency graph
2152 // and figure out how to break any cycles.
2153 //
2170 // All inbound args are referenced based on rbp and all outbound args via rsp.
2171
2172
2173 #ifdef ASSERT
2174 bool reg_destroyed[RegisterImpl::number_of_registers];
2175 bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2176 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2177 reg_destroyed[r] = false;
2178 }
2179 for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2180 freg_destroyed[f] = false;
2181 }
2182
2183 #endif /* ASSERT */
2184
2185 // This may iterate in two different directions depending on the
2186 // kind of native it is. The reason is that for regular JNI natives
2187 // the incoming and outgoing registers are offset upwards and for
2188 // critical natives they are offset down.
2189 GrowableArray<int> arg_order(2 * total_in_args);
2190 VMRegPair tmp_vmreg;
2191 tmp_vmreg.set2(rbx->as_VMReg());
2192
2193 if (!is_critical_native) {
2194 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2195 arg_order.push(i);
2196 arg_order.push(c_arg);
2197 }
2198 } else {
2199 // Compute a valid move order, using tmp_vmreg to break any cycles
2200 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2201 }
2202
2203 int temploc = -1;
2204 for (int ai = 0; ai < arg_order.length(); ai += 2) {
2205 int i = arg_order.at(ai);
2206 int c_arg = arg_order.at(ai + 1);
2207 __ block_comment(err_msg("move %d -> %d", i, c_arg));
2208 if (c_arg == -1) {
2209 assert(is_critical_native, "should only be required for critical natives");
2217 // Read from the temporary location
2218 assert(temploc != -1, "must be valid");
2219 i = temploc;
2220 temploc = -1;
2221 }
2222 #ifdef ASSERT
2223 if (in_regs[i].first()->is_Register()) {
2224 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2225 } else if (in_regs[i].first()->is_XMMRegister()) {
2226 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2227 }
2228 if (out_regs[c_arg].first()->is_Register()) {
2229 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2230 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2231 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2232 }
2233 #endif /* ASSERT */
2234 switch (in_sig_bt[i]) {
2235 case T_ARRAY:
2236 if (is_critical_native) {
2237 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2238 c_arg++;
2239 #ifdef ASSERT
2240 if (out_regs[c_arg].first()->is_Register()) {
2241 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2242 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2243 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2244 }
2245 #endif
2246 break;
2247 }
2248 case T_OBJECT:
2249 assert(!is_critical_native, "no oop arguments");
2250 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2251 ((i == 0) && (!is_static)),
2252 &receiver_offset);
2253 break;
2254 case T_VOID:
2255 break;
2256
2431 // Verify or restore cpu control state after JNI call
2432 __ restore_cpu_control_state_after_jni();
2433
2434 // Unpack native results.
2435 switch (ret_type) {
2436 case T_BOOLEAN: __ c2bool(rax); break;
2437 case T_CHAR : __ movzwl(rax, rax); break;
2438 case T_BYTE : __ sign_extend_byte (rax); break;
2439 case T_SHORT : __ sign_extend_short(rax); break;
2440 case T_INT : /* nothing to do */ break;
2441 case T_DOUBLE :
2442 case T_FLOAT :
2443 // Result is in xmm0 we'll save as needed
2444 break;
2445 case T_ARRAY: // Really a handle
2446 case T_OBJECT: // Really a handle
2447 break; // can't de-handlize until after safepoint check
2448 case T_VOID: break;
2449 case T_LONG: break;
2450 default : ShouldNotReachHere();
2451 }
2452
2453 // Switch thread to "native transition" state before reading the synchronization state.
2454 // This additional state is necessary because reading and testing the synchronization
2455 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2456 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2457 // VM thread changes sync state to synchronizing and suspends threads for GC.
2458 // Thread A is resumed to finish this native method, but doesn't block here since it
2459 // didn't see any synchronization is progress, and escapes.
2460 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2461
2462 if(os::is_MP()) {
2463 if (UseMembar) {
2464 // Force this write out before the read below
2465 __ membar(Assembler::Membar_mask_bits(
2466 Assembler::LoadLoad | Assembler::LoadStore |
2467 Assembler::StoreLoad | Assembler::StoreStore));
2468 } else {
2469 // Write serialization page so VM thread can do a pseudo remote membar.
2470 // We use the current thread pointer to calculate a thread specific
|
1 /*
2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #ifndef _WINDOWS
27 #include "alloca.h"
28 #endif
29 #include "asm/macroAssembler.hpp"
30 #include "asm/macroAssembler.inline.hpp"
31 #include "code/debugInfoRec.hpp"
32 #include "code/icBuffer.hpp"
33 #include "code/nativeInst.hpp"
34 #include "code/vtableStubs.hpp"
35 #include "gc/shared/gcLocker.hpp"
36 #include "gc/shared/collectedHeap.hpp"
37 #include "interpreter/interpreter.hpp"
38 #include "logging/log.hpp"
39 #include "memory/resourceArea.hpp"
40 #include "oops/compiledICHolder.hpp"
41 #include "runtime/safepointMechanism.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/vframeArray.hpp"
44 #include "utilities/align.hpp"
45 #include "utilities/formatBuffer.hpp"
46 #include "vm_version_x86.hpp"
47 #include "vmreg_x86.inline.hpp"
48 #ifdef COMPILER1
49 #include "c1/c1_Runtime1.hpp"
50 #endif
51 #ifdef COMPILER2
52 #include "opto/runtime.hpp"
53 #endif
54 #if INCLUDE_JVMCI
55 #include "jvmci/jvmciJavaClasses.hpp"
56 #endif
1418 } else if (in_regs[i].first()->is_XMMRegister()) {
1419 if (in_sig_bt[i] == T_FLOAT) {
1420 int offset = slot * VMRegImpl::stack_slot_size;
1421 slot++;
1422 assert(slot <= stack_slots, "overflow");
1423 if (map != NULL) {
1424 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1425 } else {
1426 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1427 }
1428 }
1429 } else if (in_regs[i].first()->is_stack()) {
1430 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1431 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1432 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1433 }
1434 }
1435 }
1436 }
1437
1438 // Pin incoming array argument of java critical method
1439 static void pin_critical_native_array(MacroAssembler* masm,
1440 VMRegPair reg,
1441 int& pinned_slot) {
1442 assert(UseShenandoahGC, "only supported in Shenandoah for now");
1443 __ block_comment("pin_critical_native_array {");
1444 Register tmp_reg = rax;
1445
1446 Label is_null;
1447 VMRegPair tmp;
1448 VMRegPair in_reg = reg;
1449 bool on_stack = false;
1450
1451 tmp.set_ptr(tmp_reg->as_VMReg());
1452 if (reg.first()->is_stack()) {
1453 // Load the arg up from the stack
1454 move_ptr(masm, reg, tmp);
1455 reg = tmp;
1456 on_stack = true;
1457 } else {
1458 __ movptr(rax, reg.first()->as_Register());
1459 }
1460 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1461 __ jccb(Assembler::equal, is_null);
1462
1463 __ push(c_rarg0);
1464 __ push(c_rarg1);
1465 __ push(c_rarg2);
1466 __ push(c_rarg3);
1467 #ifdef _WIN64
1468 // caller-saved registers on Windows
1469 __ push(r10);
1470 __ push(r11);
1471 #else
1472 __ push(c_rarg4);
1473 __ push(c_rarg5);
1474 #endif
1475
1476 if (reg.first()->as_Register() != c_rarg1) {
1477 __ movptr(c_rarg1, reg.first()->as_Register());
1478 }
1479 __ movptr(c_rarg0, r15_thread);
1480 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::pin_object)));
1481
1482 #ifdef _WIN64
1483 __ pop(r11);
1484 __ pop(r10);
1485 #else
1486 __ pop(c_rarg5);
1487 __ pop(c_rarg4);
1488 #endif
1489 __ pop(c_rarg3);
1490 __ pop(c_rarg2);
1491 __ pop(c_rarg1);
1492 __ pop(c_rarg0);
1493
1494 if (on_stack) {
1495 __ movptr(Address(rbp, reg2offset_in(in_reg.first())), rax);
1496 __ bind(is_null);
1497 } else {
1498 __ movptr(reg.first()->as_Register(), rax);
1499
1500 // save on stack for unpinning later
1501 __ bind(is_null);
1502 assert(reg.first()->is_Register(), "Must be a register");
1503 int offset = pinned_slot * VMRegImpl::stack_slot_size;
1504 pinned_slot += VMRegImpl::slots_per_word;
1505 __ movq(Address(rsp, offset), rax);
1506 }
1507 __ block_comment("} pin_critical_native_array");
1508 }
1509
1510 // Unpin array argument of java critical method
1511 static void unpin_critical_native_array(MacroAssembler* masm,
1512 VMRegPair reg,
1513 int& pinned_slot) {
1514 assert(UseShenandoahGC, "only supported in Shenandoah for now");
1515 __ block_comment("unpin_critical_native_array {");
1516 Label is_null;
1517
1518 if (reg.first()->is_stack()) {
1519 __ movptr(c_rarg1, Address(rbp, reg2offset_in(reg.first())));
1520 } else {
1521 int offset = pinned_slot * VMRegImpl::stack_slot_size;
1522 pinned_slot += VMRegImpl::slots_per_word;
1523 __ movq(c_rarg1, Address(rsp, offset));
1524 }
1525 __ testptr(c_rarg1, c_rarg1);
1526 __ jccb(Assembler::equal, is_null);
1527
1528 __ movptr(c_rarg0, r15_thread);
1529 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object)));
1530
1531 __ bind(is_null);
1532 __ block_comment("} unpin_critical_native_array");
1533 }
1534
1535 // Check GCLocker::needs_gc and enter the runtime if it's true. This
1536 // keeps a new JNI critical region from starting until a GC has been
1537 // forced. Save down any oops in registers and describe them in an
1538 // OopMap.
1539 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1540 int stack_slots,
1541 int total_c_args,
1542 int total_in_args,
1543 int arg_save_area,
1544 OopMapSet* oop_maps,
1545 VMRegPair* in_regs,
1546 BasicType* in_sig_bt) {
1547 __ block_comment("check GCLocker::needs_gc");
1548 Label cont;
1549 __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1550 __ jcc(Assembler::equal, cont);
1551
1552 // Save down any incoming oops and call into the runtime to halt for a GC
1553
2210 }
2211
2212 #ifdef ASSERT
2213 {
2214 Label L;
2215 __ mov(rax, rsp);
2216 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2217 __ cmpptr(rax, rsp);
2218 __ jcc(Assembler::equal, L);
2219 __ stop("improperly aligned stack");
2220 __ bind(L);
2221 }
2222 #endif /* ASSERT */
2223
2224
2225 // We use r14 as the oop handle for the receiver/klass
2226 // It is callee save so it survives the call to native
2227
2228 const Register oop_handle_reg = r14;
2229
2230 if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
2231 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2232 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2233 }
2234
2235 //
2236 // We immediately shuffle the arguments so that any vm call we have to
2237 // make from here on out (sync slow path, jvmti, etc.) we will have
2238 // captured the oops from our caller and have a valid oopMap for
2239 // them.
2240
2241 // -----------------
2242 // The Grand Shuffle
2243
2244 // The Java calling convention is either equal (linux) or denser (win64) than the
2245 // c calling convention. However the because of the jni_env argument the c calling
2246 // convention always has at least one more (and two for static) arguments than Java.
2247 // Therefore if we move the args from java -> c backwards then we will never have
2248 // a register->register conflict and we don't have to build a dependency graph
2249 // and figure out how to break any cycles.
2250 //
2267 // All inbound args are referenced based on rbp and all outbound args via rsp.
2268
2269
2270 #ifdef ASSERT
2271 bool reg_destroyed[RegisterImpl::number_of_registers];
2272 bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2273 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2274 reg_destroyed[r] = false;
2275 }
2276 for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2277 freg_destroyed[f] = false;
2278 }
2279
2280 #endif /* ASSERT */
2281
2282 // This may iterate in two different directions depending on the
2283 // kind of native it is. The reason is that for regular JNI natives
2284 // the incoming and outgoing registers are offset upwards and for
2285 // critical natives they are offset down.
2286 GrowableArray<int> arg_order(2 * total_in_args);
2287 // Inbound arguments that need to be pinned for critical natives
2288 GrowableArray<int> pinned_args(total_in_args);
2289 // Current stack slot for storing register based array argument
2290 int pinned_slot = oop_handle_offset;
2291
2292 VMRegPair tmp_vmreg;
2293 tmp_vmreg.set2(rbx->as_VMReg());
2294
2295 if (!is_critical_native) {
2296 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2297 arg_order.push(i);
2298 arg_order.push(c_arg);
2299 }
2300 } else {
2301 // Compute a valid move order, using tmp_vmreg to break any cycles
2302 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2303 }
2304
2305 int temploc = -1;
2306 for (int ai = 0; ai < arg_order.length(); ai += 2) {
2307 int i = arg_order.at(ai);
2308 int c_arg = arg_order.at(ai + 1);
2309 __ block_comment(err_msg("move %d -> %d", i, c_arg));
2310 if (c_arg == -1) {
2311 assert(is_critical_native, "should only be required for critical natives");
2319 // Read from the temporary location
2320 assert(temploc != -1, "must be valid");
2321 i = temploc;
2322 temploc = -1;
2323 }
2324 #ifdef ASSERT
2325 if (in_regs[i].first()->is_Register()) {
2326 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2327 } else if (in_regs[i].first()->is_XMMRegister()) {
2328 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2329 }
2330 if (out_regs[c_arg].first()->is_Register()) {
2331 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2332 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2333 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2334 }
2335 #endif /* ASSERT */
2336 switch (in_sig_bt[i]) {
2337 case T_ARRAY:
2338 if (is_critical_native) {
2339 // pin before unpack
2340 if (Universe::heap()->supports_object_pinning()) {
2341 assert(pinned_slot <= stack_slots, "overflow");
2342 pin_critical_native_array(masm, in_regs[i], pinned_slot);
2343 pinned_args.append(i);
2344 }
2345 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2346 c_arg++;
2347 #ifdef ASSERT
2348 if (out_regs[c_arg].first()->is_Register()) {
2349 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2350 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2351 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2352 }
2353 #endif
2354 break;
2355 }
2356 case T_OBJECT:
2357 assert(!is_critical_native, "no oop arguments");
2358 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2359 ((i == 0) && (!is_static)),
2360 &receiver_offset);
2361 break;
2362 case T_VOID:
2363 break;
2364
2539 // Verify or restore cpu control state after JNI call
2540 __ restore_cpu_control_state_after_jni();
2541
2542 // Unpack native results.
2543 switch (ret_type) {
2544 case T_BOOLEAN: __ c2bool(rax); break;
2545 case T_CHAR : __ movzwl(rax, rax); break;
2546 case T_BYTE : __ sign_extend_byte (rax); break;
2547 case T_SHORT : __ sign_extend_short(rax); break;
2548 case T_INT : /* nothing to do */ break;
2549 case T_DOUBLE :
2550 case T_FLOAT :
2551 // Result is in xmm0 we'll save as needed
2552 break;
2553 case T_ARRAY: // Really a handle
2554 case T_OBJECT: // Really a handle
2555 break; // can't de-handlize until after safepoint check
2556 case T_VOID: break;
2557 case T_LONG: break;
2558 default : ShouldNotReachHere();
2559 }
2560
2561 // unpin pinned arguments
2562 pinned_slot = oop_handle_offset;
2563 if (pinned_args.length() > 0) {
2564 // save return value that may be overwritten otherwise.
2565 save_native_result(masm, ret_type, stack_slots);
2566 for (int index = 0; index < pinned_args.length(); index ++) {
2567 int i = pinned_args.at(index);
2568 assert(pinned_slot <= stack_slots, "overflow");
2569 unpin_critical_native_array(masm, in_regs[i], pinned_slot);
2570 }
2571 restore_native_result(masm, ret_type, stack_slots);
2572 }
2573
2574 // Switch thread to "native transition" state before reading the synchronization state.
2575 // This additional state is necessary because reading and testing the synchronization
2576 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2577 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2578 // VM thread changes sync state to synchronizing and suspends threads for GC.
2579 // Thread A is resumed to finish this native method, but doesn't block here since it
2580 // didn't see any synchronization is progress, and escapes.
2581 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2582
2583 if(os::is_MP()) {
2584 if (UseMembar) {
2585 // Force this write out before the read below
2586 __ membar(Assembler::Membar_mask_bits(
2587 Assembler::LoadLoad | Assembler::LoadStore |
2588 Assembler::StoreLoad | Assembler::StoreStore));
2589 } else {
2590 // Write serialization page so VM thread can do a pseudo remote membar.
2591 // We use the current thread pointer to calculate a thread specific
|