1285 log_identity(xtty);
1286 xtty->stamp();
1287 xtty->end_elem();
1288 }
1289 }
1290 if (PrintCompilation && _state != unloaded) {
1291 print_on(tty, _state == zombie ? "made zombie" : "made not entrant");
1292 }
1293 }
1294
1295 // Common functionality for both make_not_entrant and make_zombie
1296 bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
1297 assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1298 assert(!is_zombie(), "should not already be a zombie");
1299
1300 // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1301 nmethodLocker nml(this);
1302 methodHandle the_method(method());
1303 No_Safepoint_Verifier nsv;
1304
1305 {
1306 // invalidate osr nmethod before acquiring the patching lock since
1307 // they both acquire leaf locks and we don't want a deadlock.
1308 // This logic is equivalent to the logic below for patching the
1309 // verified entry point of regular methods.
1310 if (is_osr_method()) {
1311 // this effectively makes the osr nmethod not entrant
1312 invalidate_osr_method();
1313 }
1314
1315 // Enter critical section. Does not block for safepoint.
1316 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1317
1318 if (_state == state) {
1319 // another thread already performed this transition so nothing
1320 // to do, but return false to indicate this.
1321 return false;
1322 }
1323
1324 // The caller can be calling the method statically or through an inline
1325 // cache call.
1326 if (!is_osr_method() && !is_not_entrant()) {
1327 NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1328 SharedRuntime::get_handle_wrong_method_stub());
1329 }
1330
1331 if (is_in_use()) {
1332 // It's a true state change, so mark the method as decompiled.
1333 // Do it only for transition from alive.
1334 inc_decompile_count();
1335 }
1336
1337 // If the state is becoming a zombie, unregister the nmethod with heap
1338 // This nmethod may have already been unloaded during a full GC.
1339 if ((state == zombie) && !is_unloaded()) {
1340 Universe::heap()->unregister_nmethod(this);
1341 }
1342
1343 // Change state
1344 _state = state;
1345
1346 // Log the transition once
1347 log_state_change();
1348
1349 // Remove nmethod from method.
1350 // We need to check if both the _code and _from_compiled_code_entry_point
1351 // refer to this nmethod because there is a race in setting these two fields
1352 // in Method* as seen in bugid 4947125.
1353 // If the vep() points to the zombie nmethod, the memory for the nmethod
1354 // could be flushed and the compiler and vtable stubs could still call
1355 // through it.
1356 if (method() != NULL && (method()->code() == this ||
1357 method()->from_compiled_entry() == verified_entry_point())) {
1358 HandleMark hm;
1359 method()->clear_code();
1360 }
1361
1362 if (state == not_entrant) {
1363 mark_as_seen_on_stack();
1364 }
1365
1366 } // leave critical region under Patching_lock
1367
1368 // When the nmethod becomes zombie it is no longer alive so the
1369 // dependencies must be flushed. nmethods in the not_entrant
1370 // state will be flushed later when the transition to zombie
1371 // happens or they get unloaded.
1372 if (state == zombie) {
1373 {
1374 // Flushing dependecies must be done before any possible
1375 // safepoint can sneak in, otherwise the oops used by the
1376 // dependency logic could have become stale.
1377 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1378 flush_dependencies(NULL);
1379 }
1380
1381 // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1382 // event and it hasn't already been reported for this nmethod then
1383 // report it now. The event may have been reported earilier if the GC
1384 // marked it for unloading). JvmtiDeferredEventQueue support means
1385 // we no longer go to a safepoint here.
1386 post_compiled_method_unload();
1387
1388 #ifdef ASSERT
1389 // It's no longer safe to access the oops section since zombie
1390 // nmethods aren't scanned for GC.
1391 _oops_are_stale = true;
1392 #endif
1393 } else {
1394 assert(state == not_entrant, "other cases may need to be handled differently");
1395 }
1396
1397 if (TraceCreateZombies) {
1821 f(md);
1822 }
1823
1824 // Call function Method*, not embedded in these other places.
1825 if (_method != NULL) f(_method);
1826 }
1827
1828
1829 // This method is called twice during GC -- once while
1830 // tracing the "active" nmethods on thread stacks during
1831 // the (strong) marking phase, and then again when walking
1832 // the code cache contents during the weak roots processing
1833 // phase. The two uses are distinguished by means of the
1834 // 'do_strong_roots_only' flag, which is true in the first
1835 // case. We want to walk the weak roots in the nmethod
1836 // only in the second case. The weak roots in the nmethod
1837 // are the oops in the ExceptionCache and the InlineCache
1838 // oops.
1839 void nmethod::oops_do(OopClosure* f, bool do_strong_roots_only) {
1840 // make sure the oops ready to receive visitors
1841 assert(!is_zombie() && !is_unloaded(),
1842 "should not call follow on zombie or unloaded nmethod");
1843
1844 // If the method is not entrant or zombie then a JMP is plastered over the
1845 // first few bytes. If an oop in the old code was there, that oop
1846 // should not get GC'd. Skip the first few bytes of oops on
1847 // not-entrant methods.
1848 address low_boundary = verified_entry_point();
1849 if (is_not_entrant()) {
1850 low_boundary += NativeJump::instruction_size;
1851 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
1852 // (See comment above.)
1853 }
1854
1855 RelocIterator iter(this, low_boundary);
1856
1857 while (iter.next()) {
1858 if (iter.type() == relocInfo::oop_type ) {
1859 oop_Relocation* r = iter.oop_reloc();
1860 // In this loop, we must only follow those oops directly embedded in
1861 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
|
1285 log_identity(xtty);
1286 xtty->stamp();
1287 xtty->end_elem();
1288 }
1289 }
1290 if (PrintCompilation && _state != unloaded) {
1291 print_on(tty, _state == zombie ? "made zombie" : "made not entrant");
1292 }
1293 }
1294
1295 // Common functionality for both make_not_entrant and make_zombie
1296 bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
1297 assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1298 assert(!is_zombie(), "should not already be a zombie");
1299
1300 // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1301 nmethodLocker nml(this);
1302 methodHandle the_method(method());
1303 No_Safepoint_Verifier nsv;
1304
1305 // during patching, depending on the nmethod state we must notify the GC that
1306 // code has been unloaded, unregistering it. We cannot do this right while
1307 // holding the Patching_lock because we need to use the CodeCache_lock. This
1308 // would be prone to deadlocks.
1309 // This flag is used to remember whether we need to later lock and unregister.
1310 bool nmethod_needs_unregister = false;
1311
1312 {
1313 // invalidate osr nmethod before acquiring the patching lock since
1314 // they both acquire leaf locks and we don't want a deadlock.
1315 // This logic is equivalent to the logic below for patching the
1316 // verified entry point of regular methods.
1317 if (is_osr_method()) {
1318 // this effectively makes the osr nmethod not entrant
1319 invalidate_osr_method();
1320 }
1321
1322 // Enter critical section. Does not block for safepoint.
1323 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1324
1325 if (_state == state) {
1326 // another thread already performed this transition so nothing
1327 // to do, but return false to indicate this.
1328 return false;
1329 }
1330
1331 // The caller can be calling the method statically or through an inline
1332 // cache call.
1333 if (!is_osr_method() && !is_not_entrant()) {
1334 NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1335 SharedRuntime::get_handle_wrong_method_stub());
1336 }
1337
1338 if (is_in_use()) {
1339 // It's a true state change, so mark the method as decompiled.
1340 // Do it only for transition from alive.
1341 inc_decompile_count();
1342 }
1343
1344 // If the state is becoming a zombie, signal to unregister the nmethod with
1345 // the heap.
1346 // This nmethod may have already been unloaded during a full GC.
1347 if ((state == zombie) && !is_unloaded()) {
1348 nmethod_needs_unregister = true;
1349 }
1350
1351 // Change state
1352 _state = state;
1353
1354 // Log the transition once
1355 log_state_change();
1356
1357 // Remove nmethod from method.
1358 // We need to check if both the _code and _from_compiled_code_entry_point
1359 // refer to this nmethod because there is a race in setting these two fields
1360 // in Method* as seen in bugid 4947125.
1361 // If the vep() points to the zombie nmethod, the memory for the nmethod
1362 // could be flushed and the compiler and vtable stubs could still call
1363 // through it.
1364 if (method() != NULL && (method()->code() == this ||
1365 method()->from_compiled_entry() == verified_entry_point())) {
1366 HandleMark hm;
1367 method()->clear_code();
1368 }
1369
1370 if (state == not_entrant) {
1371 mark_as_seen_on_stack();
1372 }
1373
1374 } // leave critical region under Patching_lock
1375
1376 // When the nmethod becomes zombie it is no longer alive so the
1377 // dependencies must be flushed. nmethods in the not_entrant
1378 // state will be flushed later when the transition to zombie
1379 // happens or they get unloaded.
1380 if (state == zombie) {
1381 {
1382 // Flushing dependecies must be done before any possible
1383 // safepoint can sneak in, otherwise the oops used by the
1384 // dependency logic could have become stale.
1385 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1386 if (nmethod_needs_unregister) {
1387 Universe::heap()->unregister_nmethod(this);
1388 }
1389 flush_dependencies(NULL);
1390 }
1391
1392 // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1393 // event and it hasn't already been reported for this nmethod then
1394 // report it now. The event may have been reported earilier if the GC
1395 // marked it for unloading). JvmtiDeferredEventQueue support means
1396 // we no longer go to a safepoint here.
1397 post_compiled_method_unload();
1398
1399 #ifdef ASSERT
1400 // It's no longer safe to access the oops section since zombie
1401 // nmethods aren't scanned for GC.
1402 _oops_are_stale = true;
1403 #endif
1404 } else {
1405 assert(state == not_entrant, "other cases may need to be handled differently");
1406 }
1407
1408 if (TraceCreateZombies) {
1832 f(md);
1833 }
1834
1835 // Call function Method*, not embedded in these other places.
1836 if (_method != NULL) f(_method);
1837 }
1838
1839
1840 // This method is called twice during GC -- once while
1841 // tracing the "active" nmethods on thread stacks during
1842 // the (strong) marking phase, and then again when walking
1843 // the code cache contents during the weak roots processing
1844 // phase. The two uses are distinguished by means of the
1845 // 'do_strong_roots_only' flag, which is true in the first
1846 // case. We want to walk the weak roots in the nmethod
1847 // only in the second case. The weak roots in the nmethod
1848 // are the oops in the ExceptionCache and the InlineCache
1849 // oops.
1850 void nmethod::oops_do(OopClosure* f, bool do_strong_roots_only) {
1851 // make sure the oops ready to receive visitors
1852 assert(NMethodSweeper::is_sweeping(this) || is_marked_for_deoptimization() ||
1853 (!is_zombie() && !is_unloaded()),
1854 "should not call follow on zombie or unloaded nmethod");
1855
1856 // If the method is not entrant or zombie then a JMP is plastered over the
1857 // first few bytes. If an oop in the old code was there, that oop
1858 // should not get GC'd. Skip the first few bytes of oops on
1859 // not-entrant methods.
1860 address low_boundary = verified_entry_point();
1861 if (is_not_entrant()) {
1862 low_boundary += NativeJump::instruction_size;
1863 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
1864 // (See comment above.)
1865 }
1866
1867 RelocIterator iter(this, low_boundary);
1868
1869 while (iter.next()) {
1870 if (iter.type() == relocInfo::oop_type ) {
1871 oop_Relocation* r = iter.oop_reloc();
1872 // In this loop, we must only follow those oops directly embedded in
1873 // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
|