1163 CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1164 if( cb != NULL && cb->is_nmethod() ) {
1165 nmethod* nm = (nmethod*)cb;
1166 // Clean inline caches pointing to both zombie and not_entrant methods
1167 if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
1168 }
1169 break;
1170 }
1171 }
1172 }
1173 }
1174
1175 // This is a private interface with the sweeper.
1176 void nmethod::mark_as_seen_on_stack() {
1177 assert(is_not_entrant(), "must be a non-entrant method");
1178 // Set the traversal mark to ensure that the sweeper does 2
1179 // cleaning passes before moving to zombie.
1180 set_stack_traversal_mark(NMethodSweeper::traversal_count());
1181 }
1182
1183 // Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
1184 bool nmethod::can_not_entrant_be_converted() {
1185 assert(is_not_entrant(), "must be a non-entrant method");
1186
1187 // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1188 // count can be greater than the stack traversal count before it hits the
1189 // nmethod for the second time.
1190 return stack_traversal_mark()+1 < NMethodSweeper::traversal_count();
1191 }
1192
1193 void nmethod::inc_decompile_count() {
1194 if (!is_compiled_by_c2()) return;
1195 // Could be gated by ProfileTraps, but do not bother...
1196 methodOop m = method();
1197 if (m == NULL) return;
1198 methodDataOop mdo = m->method_data();
1199 if (mdo == NULL) return;
1200 // There is a benign race here. See comments in methodDataOop.hpp.
1201 mdo->inc_decompile_count();
1202 }
1203
1204 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1205
1206 post_compiled_method_unload();
1207
1208 // Since this nmethod is being unloaded, make sure that dependencies
1209 // recorded in instanceKlasses get flushed and pass non-NULL closure to
1210 // indicate that this work is being done during a GC.
1277 os::current_thread_id());
1278 } else {
1279 xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1280 os::current_thread_id(),
1281 (_state == zombie ? " zombie='1'" : ""));
1282 }
1283 log_identity(xtty);
1284 xtty->stamp();
1285 xtty->end_elem();
1286 }
1287 }
1288 if (PrintCompilation && _state != unloaded) {
1289 print_on(tty, _state == zombie ? "made zombie " : "made not entrant ");
1290 tty->cr();
1291 }
1292 }
1293
1294 // Common functionality for both make_not_entrant and make_zombie
1295 bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
1296 assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1297
1298 // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1299 nmethodLocker nml(this);
1300 methodHandle the_method(method());
1301 No_Safepoint_Verifier nsv;
1302
1303 {
1304 // If the method is already zombie there is nothing to do
1305 if (is_zombie()) {
1306 return false;
1307 }
1308
1309 // invalidate osr nmethod before acquiring the patching lock since
1310 // they both acquire leaf locks and we don't want a deadlock.
1311 // This logic is equivalent to the logic below for patching the
1312 // verified entry point of regular methods.
1313 if (is_osr_method()) {
1314 // this effectively makes the osr nmethod not entrant
1315 invalidate_osr_method();
1316 }
1317
1318 // Enter critical section. Does not block for safepoint.
1319 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1320
1321 if (_state == state) {
1322 // another thread already performed this transition so nothing
1323 // to do, but return false to indicate this.
1324 return false;
1325 }
1326
1327 // The caller can be calling the method statically or through an inline
1328 // cache call.
1358
1359 if (state == not_entrant) {
1360 mark_as_seen_on_stack();
1361 }
1362
1363 } // leave critical region under Patching_lock
1364
1365 // When the nmethod becomes zombie it is no longer alive so the
1366 // dependencies must be flushed. nmethods in the not_entrant
1367 // state will be flushed later when the transition to zombie
1368 // happens or they get unloaded.
1369 if (state == zombie) {
1370 {
1371 // Flushing dependecies must be done before any possible
1372 // safepoint can sneak in, otherwise the oops used by the
1373 // dependency logic could have become stale.
1374 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1375 flush_dependencies(NULL);
1376 }
1377
1378 {
1379 // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
1380 // and it hasn't already been reported for this nmethod then report it now.
1381 // (the event may have been reported earilier if the GC marked it for unloading).
1382 Pause_No_Safepoint_Verifier pnsv(&nsv);
1383 post_compiled_method_unload();
1384 }
1385
1386 #ifdef ASSERT
1387 // It's no longer safe to access the oops section since zombie
1388 // nmethods aren't scanned for GC.
1389 _oops_are_stale = true;
1390 #endif
1391 } else {
1392 assert(state == not_entrant, "other cases may need to be handled differently");
1393 }
1394
1395 if (TraceCreateZombies) {
1396 tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
1397 }
1398
1399 // Make sweeper aware that there is a zombie method that needs to be removed
1400 NMethodSweeper::notify(this);
1401
1402 return true;
1403 }
1404
1549 if (unload_reported()) {
1550 // During unloading we transition to unloaded and then to zombie
1551 // and the unloading is reported during the first transition.
1552 return;
1553 }
1554
1555 assert(_method != NULL && !is_unloaded(), "just checking");
1556 DTRACE_METHOD_UNLOAD_PROBE(method());
1557
1558 // If a JVMTI agent has enabled the CompiledMethodUnload event then
1559 // post the event. Sometime later this nmethod will be made a zombie
1560 // by the sweeper but the methodOop will not be valid at that point.
1561 // If the _jmethod_id is null then no load event was ever requested
1562 // so don't bother posting the unload. The main reason for this is
1563 // that the jmethodID is a weak reference to the methodOop so if
1564 // it's being unloaded there's no way to look it up since the weak
1565 // ref will have been cleared.
1566 if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
1567 assert(!unload_reported(), "already unloaded");
1568 JvmtiDeferredEvent event =
1569 JvmtiDeferredEvent::compiled_method_unload_event(
1570 _jmethod_id, insts_begin());
1571 if (SafepointSynchronize::is_at_safepoint()) {
1572 // Don't want to take the queueing lock. Add it as pending and
1573 // it will get enqueued later.
1574 JvmtiDeferredEventQueue::add_pending_event(event);
1575 } else {
1576 MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1577 JvmtiDeferredEventQueue::enqueue(event);
1578 }
1579 }
1580
1581 // The JVMTI CompiledMethodUnload event can be enabled or disabled at
1582 // any time. As the nmethod is being unloaded now we mark it has
1583 // having the unload event reported - this will ensure that we don't
1584 // attempt to report the event in the unlikely scenario where the
1585 // event is enabled at the time the nmethod is made a zombie.
1586 set_unload_reported();
1587 }
1588
1589 // This is called at the end of the strong tracing/marking phase of a
2154
2155
2156
2157 void nmethod_init() {
2158 // make sure you didn't forget to adjust the filler fields
2159 assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
2160 }
2161
2162
2163 //-------------------------------------------------------------------------------------------
2164
2165
2166 // QQQ might we make this work from a frame??
2167 nmethodLocker::nmethodLocker(address pc) {
2168 CodeBlob* cb = CodeCache::find_blob(pc);
2169 guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
2170 _nm = (nmethod*)cb;
2171 lock_nmethod(_nm);
2172 }
2173
2174 void nmethodLocker::lock_nmethod(nmethod* nm) {
2175 if (nm == NULL) return;
2176 Atomic::inc(&nm->_lock_count);
2177 guarantee(!nm->is_zombie(), "cannot lock a zombie method");
2178 }
2179
2180 void nmethodLocker::unlock_nmethod(nmethod* nm) {
2181 if (nm == NULL) return;
2182 Atomic::dec(&nm->_lock_count);
2183 guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
2184 }
2185
2186
2187 // -----------------------------------------------------------------------------
2188 // nmethod::get_deopt_original_pc
2189 //
2190 // Return the original PC for the given PC if:
2191 // (a) the given PC belongs to a nmethod and
2192 // (b) it is a deopt PC
2193 address nmethod::get_deopt_original_pc(const frame* fr) {
2194 if (fr->cb() == NULL) return NULL;
2195
2196 nmethod* nm = fr->cb()->as_nmethod_or_null();
2197 if (nm != NULL && nm->is_deopt_pc(fr->pc()))
|
1163 CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1164 if( cb != NULL && cb->is_nmethod() ) {
1165 nmethod* nm = (nmethod*)cb;
1166 // Clean inline caches pointing to both zombie and not_entrant methods
1167 if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
1168 }
1169 break;
1170 }
1171 }
1172 }
1173 }
1174
1175 // This is a private interface with the sweeper.
1176 void nmethod::mark_as_seen_on_stack() {
1177 assert(is_not_entrant(), "must be a non-entrant method");
1178 // Set the traversal mark to ensure that the sweeper does 2
1179 // cleaning passes before moving to zombie.
1180 set_stack_traversal_mark(NMethodSweeper::traversal_count());
1181 }
1182
1183 // Tell if a non-entrant method can be converted to a zombie (i.e.,
1184 // there are no activations on the stack, not in use by the VM,
1185 // and not in use by the ServiceThread)
1186 bool nmethod::can_not_entrant_be_converted() {
1187 assert(is_not_entrant(), "must be a non-entrant method");
1188
1189 // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1190 // count can be greater than the stack traversal count before it hits the
1191 // nmethod for the second time.
1192 return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
1193 !is_locked_by_vm();
1194 }
1195
1196 void nmethod::inc_decompile_count() {
1197 if (!is_compiled_by_c2()) return;
1198 // Could be gated by ProfileTraps, but do not bother...
1199 methodOop m = method();
1200 if (m == NULL) return;
1201 methodDataOop mdo = m->method_data();
1202 if (mdo == NULL) return;
1203 // There is a benign race here. See comments in methodDataOop.hpp.
1204 mdo->inc_decompile_count();
1205 }
1206
1207 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1208
1209 post_compiled_method_unload();
1210
1211 // Since this nmethod is being unloaded, make sure that dependencies
1212 // recorded in instanceKlasses get flushed and pass non-NULL closure to
1213 // indicate that this work is being done during a GC.
1280 os::current_thread_id());
1281 } else {
1282 xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1283 os::current_thread_id(),
1284 (_state == zombie ? " zombie='1'" : ""));
1285 }
1286 log_identity(xtty);
1287 xtty->stamp();
1288 xtty->end_elem();
1289 }
1290 }
1291 if (PrintCompilation && _state != unloaded) {
1292 print_on(tty, _state == zombie ? "made zombie " : "made not entrant ");
1293 tty->cr();
1294 }
1295 }
1296
1297 // Common functionality for both make_not_entrant and make_zombie
1298 bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
1299 assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1300 assert(!is_zombie(), "should not already be a zombie");
1301
1302 // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1303 nmethodLocker nml(this);
1304 methodHandle the_method(method());
1305 No_Safepoint_Verifier nsv;
1306
1307 {
1308 // invalidate osr nmethod before acquiring the patching lock since
1309 // they both acquire leaf locks and we don't want a deadlock.
1310 // This logic is equivalent to the logic below for patching the
1311 // verified entry point of regular methods.
1312 if (is_osr_method()) {
1313 // this effectively makes the osr nmethod not entrant
1314 invalidate_osr_method();
1315 }
1316
1317 // Enter critical section. Does not block for safepoint.
1318 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1319
1320 if (_state == state) {
1321 // another thread already performed this transition so nothing
1322 // to do, but return false to indicate this.
1323 return false;
1324 }
1325
1326 // The caller can be calling the method statically or through an inline
1327 // cache call.
1357
1358 if (state == not_entrant) {
1359 mark_as_seen_on_stack();
1360 }
1361
1362 } // leave critical region under Patching_lock
1363
1364 // When the nmethod becomes zombie it is no longer alive so the
1365 // dependencies must be flushed. nmethods in the not_entrant
1366 // state will be flushed later when the transition to zombie
1367 // happens or they get unloaded.
1368 if (state == zombie) {
1369 {
1370 // Flushing dependecies must be done before any possible
1371 // safepoint can sneak in, otherwise the oops used by the
1372 // dependency logic could have become stale.
1373 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1374 flush_dependencies(NULL);
1375 }
1376
1377 // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1378 // event and it hasn't already been reported for this nmethod then
1379 // report it now. The event may have been reported earilier if the GC
1380 // marked it for unloading). JvmtiDeferredEventQueue support means
1381 // we no longer go to a safepoint here.
1382 post_compiled_method_unload();
1383
1384 #ifdef ASSERT
1385 // It's no longer safe to access the oops section since zombie
1386 // nmethods aren't scanned for GC.
1387 _oops_are_stale = true;
1388 #endif
1389 } else {
1390 assert(state == not_entrant, "other cases may need to be handled differently");
1391 }
1392
1393 if (TraceCreateZombies) {
1394 tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
1395 }
1396
1397 // Make sweeper aware that there is a zombie method that needs to be removed
1398 NMethodSweeper::notify(this);
1399
1400 return true;
1401 }
1402
1547 if (unload_reported()) {
1548 // During unloading we transition to unloaded and then to zombie
1549 // and the unloading is reported during the first transition.
1550 return;
1551 }
1552
1553 assert(_method != NULL && !is_unloaded(), "just checking");
1554 DTRACE_METHOD_UNLOAD_PROBE(method());
1555
1556 // If a JVMTI agent has enabled the CompiledMethodUnload event then
1557 // post the event. Sometime later this nmethod will be made a zombie
1558 // by the sweeper but the methodOop will not be valid at that point.
1559 // If the _jmethod_id is null then no load event was ever requested
1560 // so don't bother posting the unload. The main reason for this is
1561 // that the jmethodID is a weak reference to the methodOop so if
1562 // it's being unloaded there's no way to look it up since the weak
1563 // ref will have been cleared.
1564 if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
1565 assert(!unload_reported(), "already unloaded");
1566 JvmtiDeferredEvent event =
1567 JvmtiDeferredEvent::compiled_method_unload_event(this,
1568 _jmethod_id, insts_begin());
1569 if (SafepointSynchronize::is_at_safepoint()) {
1570 // Don't want to take the queueing lock. Add it as pending and
1571 // it will get enqueued later.
1572 JvmtiDeferredEventQueue::add_pending_event(event);
1573 } else {
1574 MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1575 JvmtiDeferredEventQueue::enqueue(event);
1576 }
1577 }
1578
1579 // The JVMTI CompiledMethodUnload event can be enabled or disabled at
1580 // any time. As the nmethod is being unloaded now we mark it has
1581 // having the unload event reported - this will ensure that we don't
1582 // attempt to report the event in the unlikely scenario where the
1583 // event is enabled at the time the nmethod is made a zombie.
1584 set_unload_reported();
1585 }
1586
1587 // This is called at the end of the strong tracing/marking phase of a
2152
2153
2154
2155 void nmethod_init() {
2156 // make sure you didn't forget to adjust the filler fields
2157 assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
2158 }
2159
2160
2161 //-------------------------------------------------------------------------------------------
2162
2163
2164 // QQQ might we make this work from a frame??
2165 nmethodLocker::nmethodLocker(address pc) {
2166 CodeBlob* cb = CodeCache::find_blob(pc);
2167 guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
2168 _nm = (nmethod*)cb;
2169 lock_nmethod(_nm);
2170 }
2171
2172 // Only JvmtiDeferredEvent::compiled_method_unload_event()
2173 // should pass zombie_ok == true.
2174 void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
2175 if (nm == NULL) return;
2176 Atomic::inc(&nm->_lock_count);
2177 guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
2178 }
2179
2180 void nmethodLocker::unlock_nmethod(nmethod* nm) {
2181 if (nm == NULL) return;
2182 Atomic::dec(&nm->_lock_count);
2183 guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
2184 }
2185
2186
2187 // -----------------------------------------------------------------------------
2188 // nmethod::get_deopt_original_pc
2189 //
2190 // Return the original PC for the given PC if:
2191 // (a) the given PC belongs to a nmethod and
2192 // (b) it is a deopt PC
2193 address nmethod::get_deopt_original_pc(const frame* fr) {
2194 if (fr->cb() == NULL) return NULL;
2195
2196 nmethod* nm = fr->cb()->as_nmethod_or_null();
2197 if (nm != NULL && nm->is_deopt_pc(fr->pc()))
|