< prev index next >

src/share/vm/c1/c1_Runtime1.cpp

Print this page




 784 //             jmp patch_site
 785 //
 786 // If the class is being initialized the patch body is rewritten and
 787 // the patch site is rewritten to jump to being_init, instead of
 788 // patch_stub.  Whenever this code is executed it checks the current
 789 // thread against the intializing thread so other threads will enter
 790 // the runtime and end up blocked waiting the class to finish
 791 // initializing inside the calls to resolve_field below.  The
 792 // initializing class will continue on it's way.  Once the class is
 793 // fully_initialized, the intializing_thread of the class becomes
 794 // NULL, so the next thread to execute this code will fail the test,
 795 // call into patch_code and complete the patching process by copying
 796 // the patch body back into the main part of the nmethod and resume
 797 // executing.
 798 //
 799 //
 800 
 801 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))
 802   NOT_PRODUCT(_patch_code_slowcase_cnt++;)
 803 





 804   ResourceMark rm(thread);
 805   RegisterMap reg_map(thread, false);
 806   frame runtime_frame = thread->last_frame();
 807   frame caller_frame = runtime_frame.sender(&reg_map);
 808 
 809   // last java frame on stack
 810   vframeStream vfst(thread, true);
 811   assert(!vfst.at_end(), "Java frame must exist");
 812 
 813   methodHandle caller_method(THREAD, vfst.method());
 814   // Note that caller_method->code() may not be same as caller_code because of OSR's
 815   // Note also that in the presence of inlining it is not guaranteed
 816   // that caller_method() == caller_code->method()
 817 
 818   int bci = vfst.bci();
 819   Bytecodes::Code code = caller_method()->java_code_at(bci);
 820 
 821 #ifndef PRODUCT
 822   // this is used by assertions in the access_field_patching_id
 823   BasicType patch_field_type = T_ILLEGAL;


 930   if (deoptimize_for_volatile) {
 931     // At compile time we assumed the field wasn't volatile but after
 932     // loading it turns out it was volatile so we have to throw the
 933     // compiled code out and let it be regenerated.
 934     if (TracePatching) {
 935       tty->print_cr("Deoptimizing for patching volatile field reference");
 936     }
 937     // It's possible the nmethod was invalidated in the last
 938     // safepoint, but if it's still alive then make it not_entrant.
 939     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
 940     if (nm != NULL) {
 941       nm->make_not_entrant();
 942     }
 943 
 944     Deoptimization::deoptimize_frame(thread, caller_frame.id());
 945 
 946     // Return to the now deoptimized frame.
 947   }
 948 
 949   // Now copy code back
 950 
 951   {
 952     MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
 953     //
 954     // Deoptimization may have happened while we waited for the lock.
 955     // In that case we don't bother to do any patching we just return
 956     // and let the deopt happen
 957     if (!caller_is_deopted()) {
 958       NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
 959       address instr_pc = jump->jump_destination();
 960       NativeInstruction* ni = nativeInstruction_at(instr_pc);
 961       if (ni->is_jump() ) {
 962         // the jump has not been patched yet
 963         // The jump destination is slow case and therefore not part of the stubs
 964         // (stubs are only for StaticCalls)
 965 
 966         // format of buffer
 967         //    ....
 968         //    instr byte 0     <-- copy_buff
 969         //    instr byte 1
 970         //    ..


1173     guarantee(nm != NULL, "only nmethods can contain non-perm oops");
1174     if (!nm->on_scavenge_root_list()) {
1175       CodeCache::add_scavenge_root_nmethod(nm);
1176     }
1177 
1178     // Since we've patched some oops in the nmethod,
1179     // (re)register it with the heap.
1180     Universe::heap()->register_nmethod(nm);
1181   }
1182 JRT_END
1183 
1184 //
1185 // Entry point for compiled code. We want to patch a nmethod.
1186 // We don't do a normal VM transition here because we want to
1187 // know after the patching is complete and any safepoint(s) are taken
1188 // if the calling nmethod was deoptimized. We do this by calling a
1189 // helper method which does the normal VM transition and when it
1190 // completes we can check for deoptimization. This simplifies the
1191 // assembly code in the cpu directories.
1192 //

1193 int Runtime1::move_klass_patching(JavaThread* thread) {
1194 //
1195 // NOTE: we are still in Java
1196 //
1197   Thread* THREAD = thread;
1198   debug_only(NoHandleMark nhm;)
1199   {
1200     // Enter VM mode
1201 
1202     ResetNoHandleMark rnhm;
1203     patch_code(thread, load_klass_patching_id);
1204   }
1205   // Back in JAVA, use no oops DON'T safepoint
1206 
1207   // Return true if calling code is deoptimized
1208 
1209   return caller_is_deopted();
1210 }
1211 
1212 int Runtime1::move_mirror_patching(JavaThread* thread) {


1257 //
1258 
1259 int Runtime1::access_field_patching(JavaThread* thread) {
1260 //
1261 // NOTE: we are still in Java
1262 //
1263   Thread* THREAD = thread;
1264   debug_only(NoHandleMark nhm;)
1265   {
1266     // Enter VM mode
1267 
1268     ResetNoHandleMark rnhm;
1269     patch_code(thread, access_field_patching_id);
1270   }
1271   // Back in JAVA, use no oops DON'T safepoint
1272 
1273   // Return true if calling code is deoptimized
1274 
1275   return caller_is_deopted();
1276 JRT_END
1277 
1278 
1279 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1280   // for now we just print out the block id
1281   tty->print("%d ", block_id);
1282 JRT_END
1283 
1284 
1285 // Array copy return codes.
1286 enum {
1287   ac_failed = -1, // arraycopy failed
1288   ac_ok = 0       // arraycopy succeeded
1289 };
1290 
1291 
1292 // Below length is the # elements copied.
1293 template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
1294                                           oopDesc* dst, T* dst_addr,
1295                                           int length) {
1296 
1297   // For performance reasons, we assume we are using a card marking write




 784 //             jmp patch_site
 785 //
 786 // If the class is being initialized the patch body is rewritten and
 787 // the patch site is rewritten to jump to being_init, instead of
 788 // patch_stub.  Whenever this code is executed it checks the current
 789 // thread against the intializing thread so other threads will enter
 790 // the runtime and end up blocked waiting the class to finish
 791 // initializing inside the calls to resolve_field below.  The
 792 // initializing class will continue on it's way.  Once the class is
 793 // fully_initialized, the intializing_thread of the class becomes
 794 // NULL, so the next thread to execute this code will fail the test,
 795 // call into patch_code and complete the patching process by copying
 796 // the patch body back into the main part of the nmethod and resume
 797 // executing.
 798 //
 799 //
 800 
 801 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))
 802   NOT_PRODUCT(_patch_code_slowcase_cnt++;)
 803 
 804 #ifdef AARCH64
 805   // AArch64 does not patch C1-generated code.
 806   ShouldNotReachHere();
 807 #endif
 808 
 809   ResourceMark rm(thread);
 810   RegisterMap reg_map(thread, false);
 811   frame runtime_frame = thread->last_frame();
 812   frame caller_frame = runtime_frame.sender(&reg_map);
 813 
 814   // last java frame on stack
 815   vframeStream vfst(thread, true);
 816   assert(!vfst.at_end(), "Java frame must exist");
 817 
 818   methodHandle caller_method(THREAD, vfst.method());
 819   // Note that caller_method->code() may not be same as caller_code because of OSR's
 820   // Note also that in the presence of inlining it is not guaranteed
 821   // that caller_method() == caller_code->method()
 822 
 823   int bci = vfst.bci();
 824   Bytecodes::Code code = caller_method()->java_code_at(bci);
 825 
 826 #ifndef PRODUCT
 827   // this is used by assertions in the access_field_patching_id
 828   BasicType patch_field_type = T_ILLEGAL;


 935   if (deoptimize_for_volatile) {
 936     // At compile time we assumed the field wasn't volatile but after
 937     // loading it turns out it was volatile so we have to throw the
 938     // compiled code out and let it be regenerated.
 939     if (TracePatching) {
 940       tty->print_cr("Deoptimizing for patching volatile field reference");
 941     }
 942     // It's possible the nmethod was invalidated in the last
 943     // safepoint, but if it's still alive then make it not_entrant.
 944     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
 945     if (nm != NULL) {
 946       nm->make_not_entrant();
 947     }
 948 
 949     Deoptimization::deoptimize_frame(thread, caller_frame.id());
 950 
 951     // Return to the now deoptimized frame.
 952   }
 953 
 954   // Now copy code back

 955   {
 956     MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
 957     //
 958     // Deoptimization may have happened while we waited for the lock.
 959     // In that case we don't bother to do any patching we just return
 960     // and let the deopt happen
 961     if (!caller_is_deopted()) {
 962       NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
 963       address instr_pc = jump->jump_destination();
 964       NativeInstruction* ni = nativeInstruction_at(instr_pc);
 965       if (ni->is_jump() ) {
 966         // the jump has not been patched yet
 967         // The jump destination is slow case and therefore not part of the stubs
 968         // (stubs are only for StaticCalls)
 969 
 970         // format of buffer
 971         //    ....
 972         //    instr byte 0     <-- copy_buff
 973         //    instr byte 1
 974         //    ..


1177     guarantee(nm != NULL, "only nmethods can contain non-perm oops");
1178     if (!nm->on_scavenge_root_list()) {
1179       CodeCache::add_scavenge_root_nmethod(nm);
1180     }
1181 
1182     // Since we've patched some oops in the nmethod,
1183     // (re)register it with the heap.
1184     Universe::heap()->register_nmethod(nm);
1185   }
1186 JRT_END
1187 
1188 //
1189 // Entry point for compiled code. We want to patch a nmethod.
1190 // We don't do a normal VM transition here because we want to
1191 // know after the patching is complete and any safepoint(s) are taken
1192 // if the calling nmethod was deoptimized. We do this by calling a
1193 // helper method which does the normal VM transition and when it
1194 // completes we can check for deoptimization. This simplifies the
1195 // assembly code in the cpu directories.
1196 //
1197 #ifndef TARGET_ARCH_aarch64
1198 int Runtime1::move_klass_patching(JavaThread* thread) {
1199 //
1200 // NOTE: we are still in Java
1201 //
1202   Thread* THREAD = thread;
1203   debug_only(NoHandleMark nhm;)
1204   {
1205     // Enter VM mode
1206 
1207     ResetNoHandleMark rnhm;
1208     patch_code(thread, load_klass_patching_id);
1209   }
1210   // Back in JAVA, use no oops DON'T safepoint
1211 
1212   // Return true if calling code is deoptimized
1213 
1214   return caller_is_deopted();
1215 }
1216 
1217 int Runtime1::move_mirror_patching(JavaThread* thread) {


1262 //
1263 
1264 int Runtime1::access_field_patching(JavaThread* thread) {
1265 //
1266 // NOTE: we are still in Java
1267 //
1268   Thread* THREAD = thread;
1269   debug_only(NoHandleMark nhm;)
1270   {
1271     // Enter VM mode
1272 
1273     ResetNoHandleMark rnhm;
1274     patch_code(thread, access_field_patching_id);
1275   }
1276   // Back in JAVA, use no oops DON'T safepoint
1277 
1278   // Return true if calling code is deoptimized
1279 
1280   return caller_is_deopted();
1281 JRT_END
1282 #endif
1283 
1284 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1285   // for now we just print out the block id
1286   tty->print("%d ", block_id);
1287 JRT_END
1288 
1289 
1290 // Array copy return codes.
1291 enum {
1292   ac_failed = -1, // arraycopy failed
1293   ac_ok = 0       // arraycopy succeeded
1294 };
1295 
1296 
1297 // Below length is the # elements copied.
1298 template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
1299                                           oopDesc* dst, T* dst_addr,
1300                                           int length) {
1301 
1302   // For performance reasons, we assume we are using a card marking write


< prev index next >