< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page


   1 /*
   2  * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


1417     } else if (in_regs[i].first()->is_XMMRegister()) {
1418       if (in_sig_bt[i] == T_FLOAT) {
1419         int offset = slot * VMRegImpl::stack_slot_size;
1420         slot++;
1421         assert(slot <= stack_slots, "overflow");
1422         if (map != NULL) {
1423           __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1424         } else {
1425           __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1426         }
1427       }
1428     } else if (in_regs[i].first()->is_stack()) {
1429       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1430         int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1431         map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1432       }
1433     }
1434   }
1435 }
1436 


























































1437 
1438 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
1439 // keeps a new JNI critical region from starting until a GC has been
1440 // forced.  Save down any oops in registers and describe them in an
1441 // OopMap.
1442 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1443                                                int stack_slots,
1444                                                int total_c_args,
1445                                                int total_in_args,
1446                                                int arg_save_area,
1447                                                OopMapSet* oop_maps,
1448                                                VMRegPair* in_regs,
1449                                                BasicType* in_sig_bt) {
1450   __ block_comment("check GCLocker::needs_gc");
1451   Label cont;
1452   __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1453   __ jcc(Assembler::equal, cont);
1454 
1455   // Save down any incoming oops and call into the runtime to halt for a GC
1456 


2112     }
2113 
2114 #ifdef ASSERT
2115     {
2116       Label L;
2117       __ mov(rax, rsp);
2118       __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2119       __ cmpptr(rax, rsp);
2120       __ jcc(Assembler::equal, L);
2121       __ stop("improperly aligned stack");
2122       __ bind(L);
2123     }
2124 #endif /* ASSERT */
2125 
2126 
2127   // We use r14 as the oop handle for the receiver/klass
2128   // It is callee save so it survives the call to native
2129 
2130   const Register oop_handle_reg = r14;
2131 
2132   if (is_critical_native) {
2133     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2134                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2135   }
2136 
2137   //
2138   // We immediately shuffle the arguments so that any vm call we have to
2139   // make from here on out (sync slow path, jvmti, etc.) we will have
2140   // captured the oops from our caller and have a valid oopMap for
2141   // them.
2142 
2143   // -----------------
2144   // The Grand Shuffle
2145 
2146   // The Java calling convention is either equal (linux) or denser (win64) than the
2147   // c calling convention. However the because of the jni_env argument the c calling
2148   // convention always has at least one more (and two for static) arguments than Java.
2149   // Therefore if we move the args from java -> c backwards then we will never have
2150   // a register->register conflict and we don't have to build a dependency graph
2151   // and figure out how to break any cycles.
2152   //


2169   // All inbound args are referenced based on rbp and all outbound args via rsp.
2170 
2171 
2172 #ifdef ASSERT
2173   bool reg_destroyed[RegisterImpl::number_of_registers];
2174   bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2175   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2176     reg_destroyed[r] = false;
2177   }
2178   for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2179     freg_destroyed[f] = false;
2180   }
2181 
2182 #endif /* ASSERT */
2183 
2184   // This may iterate in two different directions depending on the
2185   // kind of native it is.  The reason is that for regular JNI natives
2186   // the incoming and outgoing registers are offset upwards and for
2187   // critical natives they are offset down.
2188   GrowableArray<int> arg_order(2 * total_in_args);





2189   VMRegPair tmp_vmreg;
2190   tmp_vmreg.set2(rbx->as_VMReg());
2191 
2192   if (!is_critical_native) {
2193     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2194       arg_order.push(i);
2195       arg_order.push(c_arg);
2196     }
2197   } else {
2198     // Compute a valid move order, using tmp_vmreg to break any cycles
2199     ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2200   }
2201 
2202   int temploc = -1;
2203   for (int ai = 0; ai < arg_order.length(); ai += 2) {
2204     int i = arg_order.at(ai);
2205     int c_arg = arg_order.at(ai + 1);
2206     __ block_comment(err_msg("move %d -> %d", i, c_arg));
2207     if (c_arg == -1) {
2208       assert(is_critical_native, "should only be required for critical natives");


2216       // Read from the temporary location
2217       assert(temploc != -1, "must be valid");
2218       i = temploc;
2219       temploc = -1;
2220     }
2221 #ifdef ASSERT
2222     if (in_regs[i].first()->is_Register()) {
2223       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2224     } else if (in_regs[i].first()->is_XMMRegister()) {
2225       assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2226     }
2227     if (out_regs[c_arg].first()->is_Register()) {
2228       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2229     } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2230       freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2231     }
2232 #endif /* ASSERT */
2233     switch (in_sig_bt[i]) {
2234       case T_ARRAY:
2235         if (is_critical_native) {

















2236           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2237           c_arg++;
2238 #ifdef ASSERT
2239           if (out_regs[c_arg].first()->is_Register()) {
2240             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2241           } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2242             freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2243           }
2244 #endif
2245           break;
2246         }
2247       case T_OBJECT:
2248         assert(!is_critical_native, "no oop arguments");
2249         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2250                     ((i == 0) && (!is_static)),
2251                     &receiver_offset);
2252         break;
2253       case T_VOID:
2254         break;
2255 


2430   // Verify or restore cpu control state after JNI call
2431   __ restore_cpu_control_state_after_jni();
2432 
2433   // Unpack native results.
2434   switch (ret_type) {
2435   case T_BOOLEAN: __ c2bool(rax);            break;
2436   case T_CHAR   : __ movzwl(rax, rax);      break;
2437   case T_BYTE   : __ sign_extend_byte (rax); break;
2438   case T_SHORT  : __ sign_extend_short(rax); break;
2439   case T_INT    : /* nothing to do */        break;
2440   case T_DOUBLE :
2441   case T_FLOAT  :
2442     // Result is in xmm0 we'll save as needed
2443     break;
2444   case T_ARRAY:                 // Really a handle
2445   case T_OBJECT:                // Really a handle
2446       break; // can't de-handlize until after safepoint check
2447   case T_VOID: break;
2448   case T_LONG: break;
2449   default       : ShouldNotReachHere();


















2450   }
2451 
2452   // Switch thread to "native transition" state before reading the synchronization state.
2453   // This additional state is necessary because reading and testing the synchronization
2454   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2455   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2456   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2457   //     Thread A is resumed to finish this native method, but doesn't block here since it
2458   //     didn't see any synchronization is progress, and escapes.
2459   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2460 
2461   if(os::is_MP()) {
2462     if (UseMembar) {
2463       // Force this write out before the read below
2464       __ membar(Assembler::Membar_mask_bits(
2465            Assembler::LoadLoad | Assembler::LoadStore |
2466            Assembler::StoreLoad | Assembler::StoreStore));
2467     } else {
2468       // Write serialization page so VM thread can do a pseudo remote membar.
2469       // We use the current thread pointer to calculate a thread specific


   1 /*
   2  * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


1417     } else if (in_regs[i].first()->is_XMMRegister()) {
1418       if (in_sig_bt[i] == T_FLOAT) {
1419         int offset = slot * VMRegImpl::stack_slot_size;
1420         slot++;
1421         assert(slot <= stack_slots, "overflow");
1422         if (map != NULL) {
1423           __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1424         } else {
1425           __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1426         }
1427       }
1428     } else if (in_regs[i].first()->is_stack()) {
1429       if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1430         int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1431         map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1432       }
1433     }
1434   }
1435 }
1436 
1437 // Pin object, return pinned object or null in rax
1438 static void gen_pin_object(MacroAssembler* masm,
1439                            VMRegPair reg) {
1440   __ block_comment("gen_pin_object {");
1441 
1442   // rax always contains oop, either incoming or
1443   // pinned.
1444   Register tmp_reg = rax;
1445 
1446   Label is_null;
1447   VMRegPair tmp;
1448   VMRegPair in_reg = reg;
1449 
1450   tmp.set_ptr(tmp_reg->as_VMReg());
1451   if (reg.first()->is_stack()) {
1452     // Load the arg up from the stack
1453     move_ptr(masm, reg, tmp);
1454     reg = tmp;
1455   } else {
1456     __ movptr(rax, reg.first()->as_Register());
1457   }
1458   __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1459   __ jccb(Assembler::equal, is_null);
1460 
1461   if (reg.first()->as_Register() != c_rarg1) {
1462     __ movptr(c_rarg1, reg.first()->as_Register());
1463   }
1464 
1465   __ call_VM_leaf(
1466     CAST_FROM_FN_PTR(address, SharedRuntime::pin_object),
1467     r15_thread, c_rarg1);
1468 
1469   __ bind(is_null);
1470   __ block_comment("} gen_pin_object");
1471 }
1472 
1473 // Unpin object
1474 static void gen_unpin_object(MacroAssembler* masm,
1475                              VMRegPair reg) {
1476   __ block_comment("gen_unpin_object {");
1477   Label is_null;
1478 
1479   if (reg.first()->is_stack()) {
1480     __ movptr(c_rarg1, Address(rbp, reg2offset_in(reg.first())));
1481   } else if (reg.first()->as_Register() != c_rarg1) {
1482     __ movptr(c_rarg1, reg.first()->as_Register());
1483   }
1484 
1485   __ testptr(c_rarg1, c_rarg1);
1486   __ jccb(Assembler::equal, is_null);
1487 
1488   __ call_VM_leaf(
1489     CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object),
1490     r15_thread, c_rarg1);
1491 
1492   __ bind(is_null);
1493   __ block_comment("} gen_unpin_object");
1494 }
1495 
1496 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
1497 // keeps a new JNI critical region from starting until a GC has been
1498 // forced.  Save down any oops in registers and describe them in an
1499 // OopMap.
1500 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1501                                                int stack_slots,
1502                                                int total_c_args,
1503                                                int total_in_args,
1504                                                int arg_save_area,
1505                                                OopMapSet* oop_maps,
1506                                                VMRegPair* in_regs,
1507                                                BasicType* in_sig_bt) {
1508   __ block_comment("check GCLocker::needs_gc");
1509   Label cont;
1510   __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1511   __ jcc(Assembler::equal, cont);
1512 
1513   // Save down any incoming oops and call into the runtime to halt for a GC
1514 


2170     }
2171 
2172 #ifdef ASSERT
2173     {
2174       Label L;
2175       __ mov(rax, rsp);
2176       __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2177       __ cmpptr(rax, rsp);
2178       __ jcc(Assembler::equal, L);
2179       __ stop("improperly aligned stack");
2180       __ bind(L);
2181     }
2182 #endif /* ASSERT */
2183 
2184 
2185   // We use r14 as the oop handle for the receiver/klass
2186   // It is callee save so it survives the call to native
2187 
2188   const Register oop_handle_reg = r14;
2189 
2190   if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
2191     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2192                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2193   }
2194 
2195   //
2196   // We immediately shuffle the arguments so that any vm call we have to
2197   // make from here on out (sync slow path, jvmti, etc.) we will have
2198   // captured the oops from our caller and have a valid oopMap for
2199   // them.
2200 
2201   // -----------------
2202   // The Grand Shuffle
2203 
2204   // The Java calling convention is either equal (linux) or denser (win64) than the
2205   // c calling convention. However the because of the jni_env argument the c calling
2206   // convention always has at least one more (and two for static) arguments than Java.
2207   // Therefore if we move the args from java -> c backwards then we will never have
2208   // a register->register conflict and we don't have to build a dependency graph
2209   // and figure out how to break any cycles.
2210   //


2227   // All inbound args are referenced based on rbp and all outbound args via rsp.
2228 
2229 
2230 #ifdef ASSERT
2231   bool reg_destroyed[RegisterImpl::number_of_registers];
2232   bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2233   for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2234     reg_destroyed[r] = false;
2235   }
2236   for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2237     freg_destroyed[f] = false;
2238   }
2239 
2240 #endif /* ASSERT */
2241 
2242   // This may iterate in two different directions depending on the
2243   // kind of native it is.  The reason is that for regular JNI natives
2244   // the incoming and outgoing registers are offset upwards and for
2245   // critical natives they are offset down.
2246   GrowableArray<int> arg_order(2 * total_in_args);
2247   // Inbound arguments that need to be pinned for critical natives
2248   GrowableArray<int> pinned_args(total_in_args);
2249   // Current stack slot for storing register based array argument
2250   int pinned_slot = oop_handle_offset;
2251 
2252   VMRegPair tmp_vmreg;
2253   tmp_vmreg.set2(rbx->as_VMReg());
2254 
2255   if (!is_critical_native) {
2256     for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2257       arg_order.push(i);
2258       arg_order.push(c_arg);
2259     }
2260   } else {
2261     // Compute a valid move order, using tmp_vmreg to break any cycles
2262     ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2263   }
2264 
2265   int temploc = -1;
2266   for (int ai = 0; ai < arg_order.length(); ai += 2) {
2267     int i = arg_order.at(ai);
2268     int c_arg = arg_order.at(ai + 1);
2269     __ block_comment(err_msg("move %d -> %d", i, c_arg));
2270     if (c_arg == -1) {
2271       assert(is_critical_native, "should only be required for critical natives");


2279       // Read from the temporary location
2280       assert(temploc != -1, "must be valid");
2281       i = temploc;
2282       temploc = -1;
2283     }
2284 #ifdef ASSERT
2285     if (in_regs[i].first()->is_Register()) {
2286       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2287     } else if (in_regs[i].first()->is_XMMRegister()) {
2288       assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2289     }
2290     if (out_regs[c_arg].first()->is_Register()) {
2291       reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2292     } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2293       freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2294     }
2295 #endif /* ASSERT */
2296     switch (in_sig_bt[i]) {
2297       case T_ARRAY:
2298         if (is_critical_native) {
2299           // pin before unpack
2300           if (Universe::heap()->supports_object_pinning()) {
2301             save_args(masm, total_c_args, 0, out_regs);
2302             gen_pin_object(masm, in_regs[i]);
2303             pinned_args.append(i);
2304             restore_args(masm, total_c_args, 0, out_regs);
2305 
2306             // rax has pinned array
2307             VMRegPair result_reg;
2308             result_reg.set_ptr(rax->as_VMReg());
2309             move_ptr(masm, result_reg, in_regs[i]);
2310             if (!in_regs[i].first()->is_stack()) {
2311               assert(pinned_slot <= stack_slots, "overflow");
2312               move_ptr(masm, result_reg, VMRegImpl::stack2reg(pinned_slot));
2313               pinned_slot += VMRegImpl::slots_per_word;
2314             }
2315           }
2316           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2317           c_arg++;
2318 #ifdef ASSERT
2319           if (out_regs[c_arg].first()->is_Register()) {
2320             reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2321           } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2322             freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2323           }
2324 #endif
2325           break;
2326         }
2327       case T_OBJECT:
2328         assert(!is_critical_native, "no oop arguments");
2329         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2330                     ((i == 0) && (!is_static)),
2331                     &receiver_offset);
2332         break;
2333       case T_VOID:
2334         break;
2335 


2510   // Verify or restore cpu control state after JNI call
2511   __ restore_cpu_control_state_after_jni();
2512 
2513   // Unpack native results.
2514   switch (ret_type) {
2515   case T_BOOLEAN: __ c2bool(rax);            break;
2516   case T_CHAR   : __ movzwl(rax, rax);      break;
2517   case T_BYTE   : __ sign_extend_byte (rax); break;
2518   case T_SHORT  : __ sign_extend_short(rax); break;
2519   case T_INT    : /* nothing to do */        break;
2520   case T_DOUBLE :
2521   case T_FLOAT  :
2522     // Result is in xmm0 we'll save as needed
2523     break;
2524   case T_ARRAY:                 // Really a handle
2525   case T_OBJECT:                // Really a handle
2526       break; // can't de-handlize until after safepoint check
2527   case T_VOID: break;
2528   case T_LONG: break;
2529   default       : ShouldNotReachHere();
2530   }
2531 
2532   // unpin pinned arguments
2533   pinned_slot = oop_handle_offset;
2534   if (pinned_args.length() > 0) {
2535     // save return value that may be overwritten otherwise.
2536     save_native_result(masm, ret_type, stack_slots);
2537     for (int index = 0; index < pinned_args.length(); index ++) {
2538       int i = pinned_args.at(index);
2539       assert(pinned_slot <= stack_slots, "overflow");
2540       if (!in_regs[i].first()->is_stack()) {
2541         int offset = pinned_slot * VMRegImpl::stack_slot_size;
2542         __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
2543         pinned_slot += VMRegImpl::slots_per_word;
2544       }
2545       gen_unpin_object(masm, in_regs[i]);
2546     }
2547     restore_native_result(masm, ret_type, stack_slots);
2548   }
2549 
2550   // Switch thread to "native transition" state before reading the synchronization state.
2551   // This additional state is necessary because reading and testing the synchronization
2552   // state is not atomic w.r.t. GC, as this scenario demonstrates:
2553   //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2554   //     VM thread changes sync state to synchronizing and suspends threads for GC.
2555   //     Thread A is resumed to finish this native method, but doesn't block here since it
2556   //     didn't see any synchronization is progress, and escapes.
2557   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2558 
2559   if(os::is_MP()) {
2560     if (UseMembar) {
2561       // Force this write out before the read below
2562       __ membar(Assembler::Membar_mask_bits(
2563            Assembler::LoadLoad | Assembler::LoadStore |
2564            Assembler::StoreLoad | Assembler::StoreStore));
2565     } else {
2566       // Write serialization page so VM thread can do a pseudo remote membar.
2567       // We use the current thread pointer to calculate a thread specific


< prev index next >