src/share/vm/code/nmethod.cpp

Print this page

        

*** 1178,1195 **** // Set the traversal mark to ensure that the sweeper does 2 // cleaning passes before moving to zombie. set_stack_traversal_mark(NMethodSweeper::traversal_count()); } ! // Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack) bool nmethod::can_not_entrant_be_converted() { assert(is_not_entrant(), "must be a non-entrant method"); // Since the nmethod sweeper only does partial sweep the sweeper's traversal // count can be greater than the stack traversal count before it hits the // nmethod for the second time. ! return stack_traversal_mark()+1 < NMethodSweeper::traversal_count(); } void nmethod::inc_decompile_count() { if (!is_compiled_by_c2()) return; // Could be gated by ProfileTraps, but do not bother... --- 1178,1198 ---- // Set the traversal mark to ensure that the sweeper does 2 // cleaning passes before moving to zombie. set_stack_traversal_mark(NMethodSweeper::traversal_count()); } ! // Tell if a non-entrant method can be converted to a zombie (i.e., ! // there are no activations on the stack, not in use by the VM, ! // and not in use by the ServiceThread) bool nmethod::can_not_entrant_be_converted() { assert(is_not_entrant(), "must be a non-entrant method"); // Since the nmethod sweeper only does partial sweep the sweeper's traversal // count can be greater than the stack traversal count before it hits the // nmethod for the second time. ! return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() && ! !is_locked_by_vm(); } void nmethod::inc_decompile_count() { if (!is_compiled_by_c2()) return; // Could be gated by ProfileTraps, but do not bother...
*** 1292,1313 **** } // Common functionality for both make_not_entrant and make_zombie bool nmethod::make_not_entrant_or_zombie(unsigned int state) { assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. nmethodLocker nml(this); methodHandle the_method(method()); No_Safepoint_Verifier nsv; { - // If the method is already zombie there is nothing to do - if (is_zombie()) { - return false; - } - // invalidate osr nmethod before acquiring the patching lock since // they both acquire leaf locks and we don't want a deadlock. // This logic is equivalent to the logic below for patching the // verified entry point of regular methods. if (is_osr_method()) { --- 1295,1312 ---- } // Common functionality for both make_not_entrant and make_zombie bool nmethod::make_not_entrant_or_zombie(unsigned int state) { assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); + assert(!is_zombie(), "should not already be a zombie"); // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. nmethodLocker nml(this); methodHandle the_method(method()); No_Safepoint_Verifier nsv; { // invalidate osr nmethod before acquiring the patching lock since // they both acquire leaf locks and we don't want a deadlock. // This logic is equivalent to the logic below for patching the // verified entry point of regular methods. if (is_osr_method()) {
*** 1373,1389 **** // dependency logic could have become stale. MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); flush_dependencies(NULL); } ! { ! // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event ! // and it hasn't already been reported for this nmethod then report it now. ! // (the event may have been reported earilier if the GC marked it for unloading). ! Pause_No_Safepoint_Verifier pnsv(&nsv); post_compiled_method_unload(); - } #ifdef ASSERT // It's no longer safe to access the oops section since zombie // nmethods aren't scanned for GC. _oops_are_stale = true; --- 1372,1387 ---- // dependency logic could have become stale. MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); flush_dependencies(NULL); } ! // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload ! // event and it hasn't already been reported for this nmethod then ! // report it now. The event may have been reported earilier if the GC ! // marked it for unloading). JvmtiDeferredEventQueue support means ! // we no longer go to a safepoint here. post_compiled_method_unload(); #ifdef ASSERT // It's no longer safe to access the oops section since zombie // nmethods aren't scanned for GC. _oops_are_stale = true;
*** 1564,1574 **** // it's being unloaded there's no way to look it up since the weak // ref will have been cleared. if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { assert(!unload_reported(), "already unloaded"); JvmtiDeferredEvent event = ! JvmtiDeferredEvent::compiled_method_unload_event( _jmethod_id, insts_begin()); if (SafepointSynchronize::is_at_safepoint()) { // Don't want to take the queueing lock. Add it as pending and // it will get enqueued later. JvmtiDeferredEventQueue::add_pending_event(event); --- 1562,1572 ---- // it's being unloaded there's no way to look it up since the weak // ref will have been cleared. if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { assert(!unload_reported(), "already unloaded"); JvmtiDeferredEvent event = ! JvmtiDeferredEvent::compiled_method_unload_event(this, _jmethod_id, insts_begin()); if (SafepointSynchronize::is_at_safepoint()) { // Don't want to take the queueing lock. Add it as pending and // it will get enqueued later. JvmtiDeferredEventQueue::add_pending_event(event);
*** 2169,2182 **** guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found"); _nm = (nmethod*)cb; lock_nmethod(_nm); } ! void nmethodLocker::lock_nmethod(nmethod* nm) { if (nm == NULL) return; Atomic::inc(&nm->_lock_count); ! guarantee(!nm->is_zombie(), "cannot lock a zombie method"); } void nmethodLocker::unlock_nmethod(nmethod* nm) { if (nm == NULL) return; Atomic::dec(&nm->_lock_count); --- 2167,2182 ---- guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found"); _nm = (nmethod*)cb; lock_nmethod(_nm); } ! // Only JvmtiDeferredEvent::compiled_method_unload_event() ! // should pass zombie_ok == true. ! void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) { if (nm == NULL) return; Atomic::inc(&nm->_lock_count); ! guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method"); } void nmethodLocker::unlock_nmethod(nmethod* nm) { if (nm == NULL) return; Atomic::dec(&nm->_lock_count);