--- old/src/share/vm/code/nmethod.cpp 2016-03-16 17:16:55.852560964 +0100 +++ new/src/share/vm/code/nmethod.cpp 2016-03-16 17:16:55.784560962 +0100 @@ -1331,7 +1331,7 @@ cause->klass()->print_on(log); } // Unlink the osr method, so we do not look this up again - if (is_osr_method()) { + if (is_osr_method() && is_in_use()) { invalidate_osr_method(); } // If _method is already NULL the Method* is about to be unloaded, @@ -1387,6 +1387,8 @@ void nmethod::invalidate_osr_method() { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); + // Make sure this is invoked only once + assert(is_in_use(), "osr nmethod should be in use"); // Remove from list of active nmethods if (method() != NULL) method()->method_holder()->remove_osr_nmethod(this); @@ -1438,7 +1440,7 @@ // they both acquire leaf locks and we don't want a deadlock. // This logic is equivalent to the logic below for patching the // verified entry point of regular methods. - if (is_osr_method()) { + if (is_osr_method() && is_in_use()) { // this effectively makes the osr nmethod not entrant invalidate_osr_method(); } @@ -1510,7 +1512,7 @@ // happens or they get unloaded. if (state == zombie) { { - // Flushing dependecies must be done before any possible + // Flushing dependencies must be done before any possible // safepoint can sneak in, otherwise the oops used by the // dependency logic could have become stale. MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); @@ -1526,7 +1528,7 @@ // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload // event and it hasn't already been reported for this nmethod then - // report it now. The event may have been reported earilier if the GC + // report it now. The event may have been reported earlier if the GC // marked it for unloading). JvmtiDeferredEventQueue support means // we no longer go to a safepoint here. post_compiled_method_unload(); @@ -1554,8 +1556,10 @@ void nmethod::flush() { // Note that there are no valid oops in the nmethod anymore. - assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method"); - assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation"); + assert(!is_osr_method() || is_unloaded() || is_zombie(), + "osr nmethod must be unloaded or zombie before flushing"); + assert(is_zombie() || is_osr_method(), "must be a zombie method"); + assert(is_marked_for_reclamation() || is_osr_method(), "must be marked for reclamation"); assert (!is_locked_by_vm(), "locked methods shouldn't be flushed"); assert_locked_or_safepoint(CodeCache_lock); @@ -1563,9 +1567,9 @@ // completely deallocate this method Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, p2i(this)); if (PrintMethodFlushing) { - tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT + tty->print_cr("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb", - _compile_id, p2i(this), CodeCache::blob_count(), + is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024); } @@ -2917,10 +2921,7 @@ tty->print("((nmethod*) " INTPTR_FORMAT ") ", p2i(this)); tty->print(" for method " INTPTR_FORMAT , p2i(method())); tty->print(" { "); - if (is_in_use()) tty->print("in_use "); - if (is_not_entrant()) tty->print("not_entrant "); - if (is_zombie()) tty->print("zombie "); - if (is_unloaded()) tty->print("unloaded "); + tty->print_cr("%s ", state()); if (on_scavenge_root_list()) tty->print("scavenge_root "); tty->print_cr("}:"); } --- old/src/share/vm/code/nmethod.hpp 2016-03-16 17:16:56.172560974 +0100 +++ new/src/share/vm/code/nmethod.hpp 2016-03-16 17:16:56.096560971 +0100 @@ -207,7 +207,7 @@ unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints // Protected by Patching_lock - volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded} + volatile unsigned char _state; // {in_use, not_entrant, zombie, unloaded} volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod @@ -438,7 +438,20 @@ bool is_alive() const { return _state == in_use || _state == not_entrant; } bool is_not_entrant() const { return _state == not_entrant; } bool is_zombie() const { return _state == zombie; } - bool is_unloaded() const { return _state == unloaded; } + bool is_unloaded() const { return _state == unloaded; } + + // returns a string version of the nmethod state + const char* state() const { + switch(_state) { + case in_use: return "in use"; + case not_entrant: return "not_entrant"; + case zombie: return "zombie"; + case unloaded: return "unloaded"; + default: + ShouldNotReachHere(); + return NULL; + } + } #if INCLUDE_RTM_OPT // rtm state accessing and manipulating --- old/src/share/vm/runtime/sweeper.cpp 2016-03-16 17:16:56.600560986 +0100 +++ new/src/share/vm/runtime/sweeper.cpp 2016-03-16 17:16:56.488560983 +0100 @@ -355,8 +355,8 @@ bool forced = _force_sweep; // Force stack scanning if there is only 10% free space in the code cache. - // We force stack scanning only non-profiled code heap gets full, since critical - // allocation go to the non-profiled heap and we must be make sure that there is + // We force stack scanning only if the non-profiled code heap gets full, since critical + // allocations go to the non-profiled heap and we must be make sure that there is // enough space. double free_percent = 1 / CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled) * 100; if (free_percent <= StartAggressiveSweepingAt) { @@ -423,12 +423,19 @@ // Now ready to process nmethod and give up CodeCache_lock { MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + // Save information before potentially flushing the nmethod int size = nm->total_size(); bool is_c2_method = nm->is_compiled_by_c2(); + bool is_osr = nm->is_osr_method(); + int compile_id = nm->compile_id(); + intptr_t address = p2i(nm); + const char* state_before = nm->state(); + const char* state_after = ""; MethodStateChange type = process_nmethod(nm); switch (type) { case Flushed: + state_after = "flushed"; freed_memory += size; ++flushed_count; if (is_c2_method) { @@ -436,9 +443,11 @@ } break; case MarkedForReclamation: + state_after = "marked for reclamation"; ++marked_for_reclamation_count; break; case MadeZombie: + state_after = "made zombie"; ++zombified_count; break; case None: @@ -446,7 +455,11 @@ default: ShouldNotReachHere(); } + if (PrintMethodFlushing && Verbose && type != None) { + tty->print_cr("### %s nmethod %3d/" PTR_FORMAT " (%s) %s", is_osr ? "osr" : "", compile_id, address, state_before, state_after); + } } + _seen++; handle_safepoint_request(); } @@ -533,7 +546,7 @@ NMethodMarker(nmethod* nm) { JavaThread* current = JavaThread::current(); assert (current->is_Code_cache_sweeper_thread(), "Must be"); - _thread = (CodeCacheSweeperThread*)JavaThread::current(); + _thread = (CodeCacheSweeperThread*)current; if (!nm->is_zombie() && !nm->is_unloaded()) { // Only expose live nmethods for scanning _thread->set_scanned_nmethod(nm); @@ -545,6 +558,10 @@ }; void NMethodSweeper::release_nmethod(nmethod* nm) { + // Make sure the released nmethod is no longer referenced by the sweeper thread + CodeCacheSweeperThread* thread = (CodeCacheSweeperThread*)JavaThread::current(); + thread->set_scanned_nmethod(NULL); + // Clean up any CompiledICHolders { ResourceMark rm; @@ -575,7 +592,7 @@ if (nm->is_locked_by_vm()) { // But still remember to clean-up inline caches for alive nmethods if (nm->is_alive()) { - // Clean inline caches that point to zombie/non-entrant methods + // Clean inline caches that point to zombie/non-entrant/unloaded nmethods MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); SWEEP(nm); @@ -589,42 +606,44 @@ // there are no inline caches that refer to it. if (nm->is_marked_for_reclamation()) { assert(!nm->is_locked_by_vm(), "must not flush locked nmethods"); - if (PrintMethodFlushing && Verbose) { - tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), p2i(nm)); - } release_nmethod(nm); assert(result == None, "sanity"); result = Flushed; } else { - if (PrintMethodFlushing && Verbose) { - tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), p2i(nm)); - } nm->mark_for_reclamation(); // Keep track of code cache state change _bytes_changed += nm->total_size(); SWEEP(nm); assert(result == None, "sanity"); result = MarkedForReclamation; + assert(nm->is_marked_for_reclamation(), "nmethod must be marked for reclamation"); } } else if (nm->is_not_entrant()) { // If there are no current activations of this method on the // stack we can safely convert it to a zombie method if (nm->can_convert_to_zombie()) { - // Clear ICStubs to prevent back patching stubs of zombie or unloaded + // Clear ICStubs to prevent back patching stubs of zombie or flushed // nmethods during the next safepoint (see ICStub::finalize). { MutexLocker cl(CompiledIC_lock); nm->clear_ic_stubs(); } - if (PrintMethodFlushing && Verbose) { - tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), p2i(nm)); - } // Code cache state change is tracked in make_zombie() nm->make_zombie(); SWEEP(nm); - assert(result == None, "sanity"); - result = MadeZombie; - assert(nm->is_zombie(), "nmethod must be zombie"); + if (nm->is_osr_method()) { + // No inline caches will ever point to osr methods, so we can just remove it. + // Make sure that we unregistered the nmethod with the heap and flushed all + // dependencies before removing the nmethod (done in make_zombie()). + assert(nm->is_zombie(), "nmethod must be unregistered"); + release_nmethod(nm); + assert(result == None, "sanity"); + result = Flushed; + } else { + assert(result == None, "sanity"); + result = MadeZombie; + assert(nm->is_zombie(), "nmethod must be zombie"); + } } else { // Still alive, clean up its inline caches MutexLocker cl(CompiledIC_lock); @@ -632,9 +651,13 @@ SWEEP(nm); } } else if (nm->is_unloaded()) { - // Unloaded code, just make it a zombie - if (PrintMethodFlushing && Verbose) { - tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), p2i(nm)); + // Code is unloaded, so there are no activations on the stack. + // Convert the nmethod to zombie or flush it directly in the OSR case. + { + // Clean ICs of unloaded nmethods as well because they may reference other + // unloaded nmethods that may be flushed earlier in the sweeper cycle. + MutexLocker cl(CompiledIC_lock); + nm->cleanup_inline_caches(); } if (nm->is_osr_method()) { SWEEP(nm); @@ -643,12 +666,6 @@ assert(result == None, "sanity"); result = Flushed; } else { - { - // Clean ICs of unloaded nmethods as well because they may reference other - // unloaded nmethods that may be flushed earlier in the sweeper cycle. - MutexLocker cl(CompiledIC_lock); - nm->cleanup_inline_caches(); - } // Code cache state change is tracked in make_zombie() nm->make_zombie(); SWEEP(nm); @@ -657,7 +674,7 @@ } } else { possibly_flush(nm); - // Clean-up all inline caches that point to zombie/non-reentrant methods + // Clean inline caches that point to zombie/non-entrant/unloaded nmethods MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); SWEEP(nm); @@ -668,10 +685,10 @@ void NMethodSweeper::possibly_flush(nmethod* nm) { if (UseCodeCacheFlushing) { - if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) { + if (!nm->is_locked_by_vm() && !nm->is_native_method()) { bool make_not_entrant = false; - // Do not make native methods and OSR-methods not-entrant + // Do not make native methods not-entrant nm->dec_hotness_counter(); // Get the initial value of the hotness counter. This value depends on the // ReservedCodeCacheSize --- old/src/share/vm/runtime/sweeper.hpp 2016-03-16 17:16:56.908560996 +0100 +++ new/src/share/vm/runtime/sweeper.hpp 2016-03-16 17:16:56.816560993 +0100 @@ -45,12 +45,12 @@ // and sweep_code_cache() cannot execute at the same time. // To reclaim memory, nmethods are first marked as 'not-entrant'. Methods can // be made not-entrant by (i) the sweeper, (ii) deoptimization, (iii) dependency -// invalidation, and (iv) being replaced be a different method version (tiered -// compilation). Not-entrant nmethod cannot be called by Java threads, but they -// can still be active on the stack. To ensure that active nmethod are not reclaimed, +// invalidation, and (iv) being replaced by a different method version (tiered +// compilation). Not-entrant nmethods cannot be called by Java threads, but they +// can still be active on the stack. To ensure that active nmethods are not reclaimed, // we have to wait until the next marking phase has completed. If a not-entrant // nmethod was NOT marked as active, it can be converted to 'zombie' state. To safely -// remove the nmethod, all inline caches (IC) that point to the the nmethod must be +// remove the nmethod, all inline caches (IC) that point to the nmethod must be // cleared. After that, the nmethod can be evicted from the code cache. Each nmethod's // state change happens during separate sweeps. It may take at least 3 sweeps before an // nmethod's space is freed.