941 LOG_OFFSET(xtty, consts); 942 LOG_OFFSET(xtty, insts); 943 LOG_OFFSET(xtty, stub); 944 LOG_OFFSET(xtty, scopes_data); 945 LOG_OFFSET(xtty, scopes_pcs); 946 LOG_OFFSET(xtty, dependencies); 947 LOG_OFFSET(xtty, handler_table); 948 LOG_OFFSET(xtty, nul_chk_table); 949 LOG_OFFSET(xtty, oops); 950 951 xtty->method(method()); 952 xtty->stamp(); 953 xtty->end_elem(); 954 } 955 } 956 957 #undef LOG_OFFSET 958 959 960 // Print out more verbose output usually for a newly created nmethod. 961 void nmethod::print_on(outputStream* st, const char* msg) const { 962 if (st != NULL) { 963 ttyLocker ttyl; 964 if (WizardMode) { 965 CompileTask::print_compilation(st, this, msg, /*short_form:*/ true); 966 st->print_cr(" (" INTPTR_FORMAT ")", this); 967 } else { 968 CompileTask::print_compilation(st, this, msg, /*short_form:*/ false); 969 } 970 } 971 } 972 973 974 void nmethod::print_nmethod(bool printmethod) { 975 ttyLocker ttyl; // keep the following output all in one block 976 if (xtty != NULL) { 977 xtty->begin_head("print_nmethod"); 978 xtty->stamp(); 979 xtty->end_head(); 980 } 981 // print the header part first 982 print(); 983 // then print the requested information 984 if (printmethod) { 985 print_code(); 986 print_pcs(); 987 if (oop_maps()) { 988 oop_maps()->print(); 1252 // Log the unloading. 1253 log_state_change(); 1254 1255 // The Method* is gone at this point 1256 assert(_method == NULL, "Tautology"); 1257 1258 set_osr_link(NULL); 1259 //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods 1260 NMethodSweeper::report_state_change(this); 1261 } 1262 1263 void nmethod::invalidate_osr_method() { 1264 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); 1265 // Remove from list of active nmethods 1266 if (method() != NULL) 1267 method()->method_holder()->remove_osr_nmethod(this); 1268 // Set entry as invalid 1269 _entry_bci = InvalidOSREntryBci; 1270 } 1271 1272 void nmethod::log_state_change() const { 1273 if (LogCompilation) { 1274 if (xtty != NULL) { 1275 ttyLocker ttyl; // keep the following output all in one block 1276 if (_state == unloaded) { 1277 xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'", 1278 os::current_thread_id()); 1279 } else { 1280 xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s", 1281 os::current_thread_id(), 1282 (_state == zombie ? " zombie='1'" : "")); 1283 } 1284 log_identity(xtty); 1285 xtty->stamp(); 1286 xtty->end_elem(); 1287 } 1288 } 1289 if (PrintCompilation && _state != unloaded) { 1290 print_on(tty, _state == zombie ? "made zombie" : "made not entrant"); 1291 } 1292 } 1293 1294 /** 1295 * Common functionality for both make_not_entrant and make_zombie 1296 */ 1297 bool nmethod::make_not_entrant_or_zombie(unsigned int state) { 1298 assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); 1299 assert(!is_zombie(), "should not already be a zombie"); 1300 1301 // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. 1302 nmethodLocker nml(this); 1303 methodHandle the_method(method()); 1304 No_Safepoint_Verifier nsv; 1305 1306 // during patching, depending on the nmethod state we must notify the GC that 1307 // code has been unloaded, unregistering it. We cannot do this right while 1308 // holding the Patching_lock because we need to use the CodeCache_lock. This 1309 // would be prone to deadlocks. 1310 // This flag is used to remember whether we need to later lock and unregister. 1311 bool nmethod_needs_unregister = false; 1312 1313 { 1314 // invalidate osr nmethod before acquiring the patching lock since 1315 // they both acquire leaf locks and we don't want a deadlock. 1316 // This logic is equivalent to the logic below for patching the 1317 // verified entry point of regular methods. 1318 if (is_osr_method()) { 1319 // this effectively makes the osr nmethod not entrant 1320 invalidate_osr_method(); 1321 } 1322 1323 // Enter critical section. Does not block for safepoint. 1324 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 1325 1326 if (_state == state) { 1327 // another thread already performed this transition so nothing 1328 // to do, but return false to indicate this. 1329 return false; 1330 } 1331 1332 // The caller can be calling the method statically or through an inline 1333 // cache call. 1334 if (!is_osr_method() && !is_not_entrant()) { 1335 NativeJump::patch_verified_entry(entry_point(), verified_entry_point(), 1336 SharedRuntime::get_handle_wrong_method_stub()); 1337 } 1344 1345 // If the state is becoming a zombie, signal to unregister the nmethod with 1346 // the heap. 1347 // This nmethod may have already been unloaded during a full GC. 1348 if ((state == zombie) && !is_unloaded()) { 1349 nmethod_needs_unregister = true; 1350 } 1351 1352 // Must happen before state change. Otherwise we have a race condition in 1353 // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately 1354 // transition its state from 'not_entrant' to 'zombie' without having to wait 1355 // for stack scanning. 1356 if (state == not_entrant) { 1357 mark_as_seen_on_stack(); 1358 OrderAccess::storestore(); 1359 } 1360 1361 // Change state 1362 _state = state; 1363 1364 // Log the transition once 1365 log_state_change(); 1366 1367 // Remove nmethod from method. 1368 // We need to check if both the _code and _from_compiled_code_entry_point 1369 // refer to this nmethod because there is a race in setting these two fields 1370 // in Method* as seen in bugid 4947125. 1371 // If the vep() points to the zombie nmethod, the memory for the nmethod 1372 // could be flushed and the compiler and vtable stubs could still call 1373 // through it. 1374 if (method() != NULL && (method()->code() == this || 1375 method()->from_compiled_entry() == verified_entry_point())) { 1376 HandleMark hm; 1377 method()->clear_code(); 1378 } 1379 } // leave critical region under Patching_lock 1380 1381 // When the nmethod becomes zombie it is no longer alive so the 1382 // dependencies must be flushed. nmethods in the not_entrant 1383 // state will be flushed later when the transition to zombie 1384 // happens or they get unloaded. 1385 if (state == zombie) { | 941 LOG_OFFSET(xtty, consts); 942 LOG_OFFSET(xtty, insts); 943 LOG_OFFSET(xtty, stub); 944 LOG_OFFSET(xtty, scopes_data); 945 LOG_OFFSET(xtty, scopes_pcs); 946 LOG_OFFSET(xtty, dependencies); 947 LOG_OFFSET(xtty, handler_table); 948 LOG_OFFSET(xtty, nul_chk_table); 949 LOG_OFFSET(xtty, oops); 950 951 xtty->method(method()); 952 xtty->stamp(); 953 xtty->end_elem(); 954 } 955 } 956 957 #undef LOG_OFFSET 958 959 960 // Print out more verbose output usually for a newly created nmethod. 961 void nmethod::print_on(outputStream* st, const char* msg, int entry_bci) const { 962 if (st != NULL) { 963 ttyLocker ttyl; 964 if (WizardMode) { 965 CompileTask::print_compilation(st, this, entry_bci, msg, /*short_form:*/ true); 966 st->print_cr(" (" INTPTR_FORMAT ")", this); 967 } else { 968 CompileTask::print_compilation(st, this, entry_bci, msg, /*short_form:*/ false); 969 } 970 } 971 } 972 973 974 void nmethod::print_nmethod(bool printmethod) { 975 ttyLocker ttyl; // keep the following output all in one block 976 if (xtty != NULL) { 977 xtty->begin_head("print_nmethod"); 978 xtty->stamp(); 979 xtty->end_head(); 980 } 981 // print the header part first 982 print(); 983 // then print the requested information 984 if (printmethod) { 985 print_code(); 986 print_pcs(); 987 if (oop_maps()) { 988 oop_maps()->print(); 1252 // Log the unloading. 1253 log_state_change(); 1254 1255 // The Method* is gone at this point 1256 assert(_method == NULL, "Tautology"); 1257 1258 set_osr_link(NULL); 1259 //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods 1260 NMethodSweeper::report_state_change(this); 1261 } 1262 1263 void nmethod::invalidate_osr_method() { 1264 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); 1265 // Remove from list of active nmethods 1266 if (method() != NULL) 1267 method()->method_holder()->remove_osr_nmethod(this); 1268 // Set entry as invalid 1269 _entry_bci = InvalidOSREntryBci; 1270 } 1271 1272 void nmethod::log_state_change(int entry_bci) const { 1273 if (LogCompilation) { 1274 if (xtty != NULL) { 1275 ttyLocker ttyl; // keep the following output all in one block 1276 if (_state == unloaded) { 1277 xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'", 1278 os::current_thread_id()); 1279 } else { 1280 xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s", 1281 os::current_thread_id(), 1282 (_state == zombie ? " zombie='1'" : "")); 1283 } 1284 log_identity(xtty); 1285 xtty->stamp(); 1286 xtty->end_elem(); 1287 } 1288 } 1289 if (PrintCompilation && _state != unloaded) { 1290 print_on(tty, _state == zombie ? "made zombie" : "made not entrant", entry_bci); 1291 } 1292 } 1293 1294 /** 1295 * Common functionality for both make_not_entrant and make_zombie 1296 */ 1297 bool nmethod::make_not_entrant_or_zombie(unsigned int state) { 1298 assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); 1299 assert(!is_zombie(), "should not already be a zombie"); 1300 1301 // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. 1302 nmethodLocker nml(this); 1303 methodHandle the_method(method()); 1304 No_Safepoint_Verifier nsv; 1305 1306 // during patching, depending on the nmethod state we must notify the GC that 1307 // code has been unloaded, unregistering it. We cannot do this right while 1308 // holding the Patching_lock because we need to use the CodeCache_lock. This 1309 // would be prone to deadlocks. 1310 // This flag is used to remember whether we need to later lock and unregister. 1311 bool nmethod_needs_unregister = false; 1312 1313 { 1314 // invalidate osr nmethod before acquiring the patching lock since 1315 // they both acquire leaf locks and we don't want a deadlock. 1316 // This logic is equivalent to the logic below for patching the 1317 // verified entry point of regular methods. 1318 // Save entry bci for logging before invalidating the index. 1319 int entry_bci = _entry_bci; 1320 if (is_osr_method()) { 1321 // this effectively makes the osr nmethod not entrant 1322 invalidate_osr_method(); 1323 } 1324 1325 // Enter critical section. Does not block for safepoint. 1326 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 1327 1328 if (_state == state) { 1329 // another thread already performed this transition so nothing 1330 // to do, but return false to indicate this. 1331 return false; 1332 } 1333 1334 // The caller can be calling the method statically or through an inline 1335 // cache call. 1336 if (!is_osr_method() && !is_not_entrant()) { 1337 NativeJump::patch_verified_entry(entry_point(), verified_entry_point(), 1338 SharedRuntime::get_handle_wrong_method_stub()); 1339 } 1346 1347 // If the state is becoming a zombie, signal to unregister the nmethod with 1348 // the heap. 1349 // This nmethod may have already been unloaded during a full GC. 1350 if ((state == zombie) && !is_unloaded()) { 1351 nmethod_needs_unregister = true; 1352 } 1353 1354 // Must happen before state change. Otherwise we have a race condition in 1355 // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately 1356 // transition its state from 'not_entrant' to 'zombie' without having to wait 1357 // for stack scanning. 1358 if (state == not_entrant) { 1359 mark_as_seen_on_stack(); 1360 OrderAccess::storestore(); 1361 } 1362 1363 // Change state 1364 _state = state; 1365 1366 // Log the transition once (print saved entry bci for osr nmethods) 1367 log_state_change(entry_bci); 1368 1369 // Remove nmethod from method. 1370 // We need to check if both the _code and _from_compiled_code_entry_point 1371 // refer to this nmethod because there is a race in setting these two fields 1372 // in Method* as seen in bugid 4947125. 1373 // If the vep() points to the zombie nmethod, the memory for the nmethod 1374 // could be flushed and the compiler and vtable stubs could still call 1375 // through it. 1376 if (method() != NULL && (method()->code() == this || 1377 method()->from_compiled_entry() == verified_entry_point())) { 1378 HandleMark hm; 1379 method()->clear_code(); 1380 } 1381 } // leave critical region under Patching_lock 1382 1383 // When the nmethod becomes zombie it is no longer alive so the 1384 // dependencies must be flushed. nmethods in the not_entrant 1385 // state will be flushed later when the transition to zombie 1386 // happens or they get unloaded. 1387 if (state == zombie) { |