< prev index next >

src/cpu/x86/vm/sharedRuntime_x86_32.cpp

Print this page




  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/debugInfoRec.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "logging/log.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/compiledICHolder.hpp"
  35 #include "runtime/sharedRuntime.hpp"
  36 #include "runtime/vframeArray.hpp"
  37 #include "vmreg_x86.inline.hpp"
  38 #ifdef COMPILER1
  39 #include "c1/c1_Runtime1.hpp"
  40 #endif
  41 #ifdef COMPILER2
  42 #include "opto/runtime.hpp"
  43 #endif

  44 
  45 #define __ masm->
  46 
  47 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  48 
  49 class RegisterSaver {
  50   // Capture info about frame layout
  51 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
  52   enum layout {
  53                 fpu_state_off = 0,
  54                 fpu_state_end = fpu_state_off+FPUStateSizeInWords,
  55                 st0_off, st0H_off,
  56                 st1_off, st1H_off,
  57                 st2_off, st2H_off,
  58                 st3_off, st3H_off,
  59                 st4_off, st4H_off,
  60                 st5_off, st5H_off,
  61                 st6_off, st6H_off,
  62                 st7_off, st7H_off,
  63                 xmm_off,


 103   static int rdxOffset(void) { return rdx_off; }
 104   static int rbxOffset(void) { return rbx_off; }
 105   static int xmm0Offset(void) { return xmm0_off; }
 106   // This really returns a slot in the fp save area, which one is not important
 107   static int fpResultOffset(void) { return st0_off; }
 108 
 109   // During deoptimization only the result register need to be restored
 110   // all the other values have already been extracted.
 111 
 112   static void restore_result_registers(MacroAssembler* masm);
 113 
 114 };
 115 
 116 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
 117                                            int* total_frame_words, bool verify_fpu, bool save_vectors) {
 118   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 119   int ymm_bytes = num_xmm_regs * 16;
 120   int zmm_bytes = num_xmm_regs * 32;
 121 #ifdef COMPILER2
 122   if (save_vectors) {
 123     assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX");
 124     assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now");
 125     // Save upper half of YMM registers
 126     int vect_bytes = ymm_bytes;
 127     if (UseAVX > 2) {
 128       // Save upper half of ZMM registers as well
 129       vect_bytes += zmm_bytes;
 130     }
 131     additional_frame_words += vect_bytes / wordSize;
 132   }
 133 #else
 134   assert(!save_vectors, "vectors are generated only by C2");
 135 #endif
 136   int frame_size_in_bytes = (reg_save_size + additional_frame_words) * wordSize;
 137   int frame_words = frame_size_in_bytes / wordSize;
 138   *total_frame_words = frame_words;
 139 
 140   assert(FPUStateSizeInWords == 27, "update stack layout");
 141 
 142   // save registers, fpu state, and flags
 143   // We assume caller has already has return address slot on the stack
 144   // We push epb twice in this sequence because we want the real rbp,


 202     for (int n = 0; n < num_xmm_regs; n++) {
 203       __ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
 204       off += delta;
 205     }
 206   }
 207 
 208   if (save_vectors) {
 209     __ subptr(rsp, ymm_bytes);
 210     // Save upper half of YMM registers
 211     for (int n = 0; n < num_xmm_regs; n++) {
 212       __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
 213     }
 214     if (UseAVX > 2) {
 215       __ subptr(rsp, zmm_bytes);
 216       // Save upper half of ZMM registers
 217       for (int n = 0; n < num_xmm_regs; n++) {
 218         __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
 219       }
 220     }
 221   }

 222 
 223   // Set an oopmap for the call site.  This oopmap will map all
 224   // oop-registers and debug-info registers as callee-saved.  This
 225   // will allow deoptimization at this safepoint to find all possible
 226   // debug-info recordings, as well as let GC find all oops.
 227 
 228   OopMapSet *oop_maps = new OopMapSet();
 229   OopMap* map =  new OopMap( frame_words, 0 );
 230 
 231 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
 232 #define NEXTREG(x) (x)->as_VMReg()->next()
 233 
 234   map->set_callee_saved(STACK_OFFSET(rax_off), rax->as_VMReg());
 235   map->set_callee_saved(STACK_OFFSET(rcx_off), rcx->as_VMReg());
 236   map->set_callee_saved(STACK_OFFSET(rdx_off), rdx->as_VMReg());
 237   map->set_callee_saved(STACK_OFFSET(rbx_off), rbx->as_VMReg());
 238   // rbp, location is known implicitly, no oopMap
 239   map->set_callee_saved(STACK_OFFSET(rsi_off), rsi->as_VMReg());
 240   map->set_callee_saved(STACK_OFFSET(rdi_off), rdi->as_VMReg());
 241   // %%% This is really a waste but we'll keep things as they were for now for the upper component


 252   for (int n = 0; n < num_xmm_regs; n++) {
 253     XMMRegister xmm_name = as_XMMRegister(n);
 254     map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
 255     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(xmm_name));
 256     off += delta;
 257   }
 258 #undef NEXTREG
 259 #undef STACK_OFFSET
 260 
 261   return map;
 262 }
 263 
 264 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 265   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 266   int ymm_bytes = num_xmm_regs * 16;
 267   int zmm_bytes = num_xmm_regs * 32;
 268   // Recover XMM & FPU state
 269   int additional_frame_bytes = 0;
 270 #ifdef COMPILER2
 271   if (restore_vectors) {
 272     assert(UseAVX > 0, "up to 512bit vectors are supported with EVEX");
 273     assert(MaxVectorSize <= 64, "up to 512bit vectors are supported now");
 274     // Save upper half of YMM registers
 275     additional_frame_bytes = ymm_bytes;
 276     if (UseAVX > 2) {
 277       // Save upper half of ZMM registers as well
 278       additional_frame_bytes += zmm_bytes;
 279     }
 280   }
 281 #else
 282   assert(!restore_vectors, "vectors are generated only by C2");
 283 #endif
 284 
 285   int off = xmm0_off;
 286   int delta = xmm1_off - off;
 287 


 288   if (UseSSE == 1) {
 289     // Restore XMM registers
 290     assert(additional_frame_bytes == 0, "");
 291     for (int n = 0; n < num_xmm_regs; n++) {
 292       __ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
 293       off += delta;
 294     }
 295   } else if (UseSSE >= 2) {
 296     // Restore whole 128bit (16 bytes) XMM registers. Do this before restoring YMM and
 297     // ZMM because the movdqu instruction zeros the upper part of the XMM register.
 298     for (int n = 0; n < num_xmm_regs; n++) {
 299       __ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
 300       off += delta;
 301     }
 302   }
 303 
 304   if (restore_vectors) {
 305     if (UseAVX > 2) {
 306       // Restore upper half of ZMM registers.
 307       for (int n = 0; n < num_xmm_regs; n++) {


2106   Label after_transition;
2107 
2108   // check for safepoint operation in progress and/or pending suspend requests
2109   { Label Continue;
2110 
2111     __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
2112              SafepointSynchronize::_not_synchronized);
2113 
2114     Label L;
2115     __ jcc(Assembler::notEqual, L);
2116     __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
2117     __ jcc(Assembler::equal, Continue);
2118     __ bind(L);
2119 
2120     // Don't use call_VM as it will see a possible pending exception and forward it
2121     // and never return here preventing us from clearing _last_native_pc down below.
2122     // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2123     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2124     // by hand.
2125     //


2126     save_native_result(masm, ret_type, stack_slots);
2127     __ push(thread);
2128     if (!is_critical_native) {
2129       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2130                                               JavaThread::check_special_condition_for_native_trans)));
2131     } else {
2132       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2133                                               JavaThread::check_special_condition_for_native_trans_and_transition)));
2134     }
2135     __ increment(rsp, wordSize);
2136     // Restore any method result value
2137     restore_native_result(masm, ret_type, stack_slots);
2138 
2139     if (is_critical_native) {
2140       // The call above performed the transition to thread_in_Java so
2141       // skip the transition logic below.
2142       __ jmpb(after_transition);
2143     }
2144 
2145     __ bind(Continue);


2290     __ push(thread);
2291     __ push(lock_reg);
2292     __ push(obj_reg);
2293     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
2294     __ addptr(rsp, 3*wordSize);
2295 
2296 #ifdef ASSERT
2297     { Label L;
2298     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2299     __ jcc(Assembler::equal, L);
2300     __ stop("no pending exception allowed on exit from monitorenter");
2301     __ bind(L);
2302     }
2303 #endif
2304     __ jmp(lock_done);
2305 
2306     // END Slow path lock
2307 
2308     // BEGIN Slow path unlock
2309     __ bind(slow_path_unlock);
2310 
2311     // Slow path unlock
2312 
2313     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2314       save_native_result(masm, ret_type, stack_slots);
2315     }
2316     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2317 
2318     __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2319     __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2320 
2321 
2322     // should be a peal
2323     // +wordSize because of the push above
2324     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2325     __ push(thread);
2326     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2327     __ push(rax);
2328 
2329     __ push(obj_reg);
2330     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));


2335       __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2336       __ jcc(Assembler::equal, L);
2337       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2338       __ bind(L);
2339     }
2340 #endif /* ASSERT */
2341 
2342     __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2343 
2344     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2345       restore_native_result(masm, ret_type, stack_slots);
2346     }
2347     __ jmp(unlock_done);
2348     // END Slow path unlock
2349 
2350   }
2351 
2352   // SLOW PATH Reguard the stack if needed
2353 
2354   __ bind(reguard);

2355   save_native_result(masm, ret_type, stack_slots);
2356   {
2357     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2358   }
2359   restore_native_result(masm, ret_type, stack_slots);
2360   __ jmp(reguard_done);
2361 
2362 
2363   // BEGIN EXCEPTION PROCESSING
2364 
2365   if (!is_critical_native) {
2366     // Forward  the exception
2367     __ bind(exception_pending);
2368 
2369     // remove possible return value from FPU register stack
2370     __ empty_FPU_stack();
2371 
2372     // pop our frame
2373     __ leave();
2374     // and forward the exception




  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/debugInfoRec.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/vtableStubs.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "logging/log.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/compiledICHolder.hpp"
  35 #include "runtime/sharedRuntime.hpp"
  36 #include "runtime/vframeArray.hpp"
  37 #include "vmreg_x86.inline.hpp"
  38 #ifdef COMPILER1
  39 #include "c1/c1_Runtime1.hpp"
  40 #endif
  41 #ifdef COMPILER2
  42 #include "opto/runtime.hpp"
  43 #endif
  44 #include "vm_version_x86.hpp"
  45 
  46 #define __ masm->
  47 
  48 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
  49 
  50 class RegisterSaver {
  51   // Capture info about frame layout
  52 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
  53   enum layout {
  54                 fpu_state_off = 0,
  55                 fpu_state_end = fpu_state_off+FPUStateSizeInWords,
  56                 st0_off, st0H_off,
  57                 st1_off, st1H_off,
  58                 st2_off, st2H_off,
  59                 st3_off, st3H_off,
  60                 st4_off, st4H_off,
  61                 st5_off, st5H_off,
  62                 st6_off, st6H_off,
  63                 st7_off, st7H_off,
  64                 xmm_off,


 104   static int rdxOffset(void) { return rdx_off; }
 105   static int rbxOffset(void) { return rbx_off; }
 106   static int xmm0Offset(void) { return xmm0_off; }
 107   // This really returns a slot in the fp save area, which one is not important
 108   static int fpResultOffset(void) { return st0_off; }
 109 
 110   // During deoptimization only the result register need to be restored
 111   // all the other values have already been extracted.
 112 
 113   static void restore_result_registers(MacroAssembler* masm);
 114 
 115 };
 116 
 117 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
 118                                            int* total_frame_words, bool verify_fpu, bool save_vectors) {
 119   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 120   int ymm_bytes = num_xmm_regs * 16;
 121   int zmm_bytes = num_xmm_regs * 32;
 122 #ifdef COMPILER2
 123   if (save_vectors) {
 124     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 125     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 126     // Save upper half of YMM registers
 127     int vect_bytes = ymm_bytes;
 128     if (UseAVX > 2) {
 129       // Save upper half of ZMM registers as well
 130       vect_bytes += zmm_bytes;
 131     }
 132     additional_frame_words += vect_bytes / wordSize;
 133   }
 134 #else
 135   assert(!save_vectors, "vectors are generated only by C2");
 136 #endif
 137   int frame_size_in_bytes = (reg_save_size + additional_frame_words) * wordSize;
 138   int frame_words = frame_size_in_bytes / wordSize;
 139   *total_frame_words = frame_words;
 140 
 141   assert(FPUStateSizeInWords == 27, "update stack layout");
 142 
 143   // save registers, fpu state, and flags
 144   // We assume caller has already has return address slot on the stack
 145   // We push epb twice in this sequence because we want the real rbp,


 203     for (int n = 0; n < num_xmm_regs; n++) {
 204       __ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
 205       off += delta;
 206     }
 207   }
 208 
 209   if (save_vectors) {
 210     __ subptr(rsp, ymm_bytes);
 211     // Save upper half of YMM registers
 212     for (int n = 0; n < num_xmm_regs; n++) {
 213       __ vextractf128_high(Address(rsp, n*16), as_XMMRegister(n));
 214     }
 215     if (UseAVX > 2) {
 216       __ subptr(rsp, zmm_bytes);
 217       // Save upper half of ZMM registers
 218       for (int n = 0; n < num_xmm_regs; n++) {
 219         __ vextractf64x4_high(Address(rsp, n*32), as_XMMRegister(n));
 220       }
 221     }
 222   }
 223   __ vzeroupper();
 224 
 225   // Set an oopmap for the call site.  This oopmap will map all
 226   // oop-registers and debug-info registers as callee-saved.  This
 227   // will allow deoptimization at this safepoint to find all possible
 228   // debug-info recordings, as well as let GC find all oops.
 229 
 230   OopMapSet *oop_maps = new OopMapSet();
 231   OopMap* map =  new OopMap( frame_words, 0 );
 232 
 233 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
 234 #define NEXTREG(x) (x)->as_VMReg()->next()
 235 
 236   map->set_callee_saved(STACK_OFFSET(rax_off), rax->as_VMReg());
 237   map->set_callee_saved(STACK_OFFSET(rcx_off), rcx->as_VMReg());
 238   map->set_callee_saved(STACK_OFFSET(rdx_off), rdx->as_VMReg());
 239   map->set_callee_saved(STACK_OFFSET(rbx_off), rbx->as_VMReg());
 240   // rbp, location is known implicitly, no oopMap
 241   map->set_callee_saved(STACK_OFFSET(rsi_off), rsi->as_VMReg());
 242   map->set_callee_saved(STACK_OFFSET(rdi_off), rdi->as_VMReg());
 243   // %%% This is really a waste but we'll keep things as they were for now for the upper component


 254   for (int n = 0; n < num_xmm_regs; n++) {
 255     XMMRegister xmm_name = as_XMMRegister(n);
 256     map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
 257     map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(xmm_name));
 258     off += delta;
 259   }
 260 #undef NEXTREG
 261 #undef STACK_OFFSET
 262 
 263   return map;
 264 }
 265 
 266 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
 267   int num_xmm_regs = XMMRegisterImpl::number_of_registers;
 268   int ymm_bytes = num_xmm_regs * 16;
 269   int zmm_bytes = num_xmm_regs * 32;
 270   // Recover XMM & FPU state
 271   int additional_frame_bytes = 0;
 272 #ifdef COMPILER2
 273   if (restore_vectors) {
 274     assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
 275     assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
 276     // Save upper half of YMM registers
 277     additional_frame_bytes = ymm_bytes;
 278     if (UseAVX > 2) {
 279       // Save upper half of ZMM registers as well
 280       additional_frame_bytes += zmm_bytes;
 281     }
 282   }
 283 #else
 284   assert(!restore_vectors, "vectors are generated only by C2");
 285 #endif
 286 
 287   int off = xmm0_off;
 288   int delta = xmm1_off - off;
 289 
 290   __ vzeroupper();
 291 
 292   if (UseSSE == 1) {
 293     // Restore XMM registers
 294     assert(additional_frame_bytes == 0, "");
 295     for (int n = 0; n < num_xmm_regs; n++) {
 296       __ movflt(as_XMMRegister(n), Address(rsp, off*wordSize));
 297       off += delta;
 298     }
 299   } else if (UseSSE >= 2) {
 300     // Restore whole 128bit (16 bytes) XMM registers. Do this before restoring YMM and
 301     // ZMM because the movdqu instruction zeros the upper part of the XMM register.
 302     for (int n = 0; n < num_xmm_regs; n++) {
 303       __ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
 304       off += delta;
 305     }
 306   }
 307 
 308   if (restore_vectors) {
 309     if (UseAVX > 2) {
 310       // Restore upper half of ZMM registers.
 311       for (int n = 0; n < num_xmm_regs; n++) {


2110   Label after_transition;
2111 
2112   // check for safepoint operation in progress and/or pending suspend requests
2113   { Label Continue;
2114 
2115     __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
2116              SafepointSynchronize::_not_synchronized);
2117 
2118     Label L;
2119     __ jcc(Assembler::notEqual, L);
2120     __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
2121     __ jcc(Assembler::equal, Continue);
2122     __ bind(L);
2123 
2124     // Don't use call_VM as it will see a possible pending exception and forward it
2125     // and never return here preventing us from clearing _last_native_pc down below.
2126     // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2127     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2128     // by hand.
2129     //
2130     __ vzeroupper();
2131 
2132     save_native_result(masm, ret_type, stack_slots);
2133     __ push(thread);
2134     if (!is_critical_native) {
2135       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2136                                               JavaThread::check_special_condition_for_native_trans)));
2137     } else {
2138       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
2139                                               JavaThread::check_special_condition_for_native_trans_and_transition)));
2140     }
2141     __ increment(rsp, wordSize);
2142     // Restore any method result value
2143     restore_native_result(masm, ret_type, stack_slots);
2144 
2145     if (is_critical_native) {
2146       // The call above performed the transition to thread_in_Java so
2147       // skip the transition logic below.
2148       __ jmpb(after_transition);
2149     }
2150 
2151     __ bind(Continue);


2296     __ push(thread);
2297     __ push(lock_reg);
2298     __ push(obj_reg);
2299     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
2300     __ addptr(rsp, 3*wordSize);
2301 
2302 #ifdef ASSERT
2303     { Label L;
2304     __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2305     __ jcc(Assembler::equal, L);
2306     __ stop("no pending exception allowed on exit from monitorenter");
2307     __ bind(L);
2308     }
2309 #endif
2310     __ jmp(lock_done);
2311 
2312     // END Slow path lock
2313 
2314     // BEGIN Slow path unlock
2315     __ bind(slow_path_unlock);
2316     __ vzeroupper();
2317     // Slow path unlock
2318 
2319     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2320       save_native_result(masm, ret_type, stack_slots);
2321     }
2322     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2323 
2324     __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2325     __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2326 
2327 
2328     // should be a peal
2329     // +wordSize because of the push above
2330     // args are (oop obj, BasicLock* lock, JavaThread* thread)
2331     __ push(thread);
2332     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
2333     __ push(rax);
2334 
2335     __ push(obj_reg);
2336     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));


2341       __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2342       __ jcc(Assembler::equal, L);
2343       __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2344       __ bind(L);
2345     }
2346 #endif /* ASSERT */
2347 
2348     __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
2349 
2350     if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2351       restore_native_result(masm, ret_type, stack_slots);
2352     }
2353     __ jmp(unlock_done);
2354     // END Slow path unlock
2355 
2356   }
2357 
2358   // SLOW PATH Reguard the stack if needed
2359 
2360   __ bind(reguard);
2361   __ vzeroupper();
2362   save_native_result(masm, ret_type, stack_slots);
2363   {
2364     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2365   }
2366   restore_native_result(masm, ret_type, stack_slots);
2367   __ jmp(reguard_done);
2368 
2369 
2370   // BEGIN EXCEPTION PROCESSING
2371 
2372   if (!is_critical_native) {
2373     // Forward  the exception
2374     __ bind(exception_pending);
2375 
2376     // remove possible return value from FPU register stack
2377     __ empty_FPU_stack();
2378 
2379     // pop our frame
2380     __ leave();
2381     // and forward the exception


< prev index next >