--- old/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp 2019-02-20 22:12:19.456374827 +0100 +++ new/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp 2019-02-20 22:12:18.928365862 +0100 @@ -27,12 +27,12 @@ #include "gc/z/zGlobals.hpp" #include "gc/z/zLock.inline.hpp" #include "gc/z/zOopClosures.hpp" -#include "gc/z/zNMethodTable.hpp" +#include "gc/z/zNMethod.hpp" #include "gc/z/zThreadLocalData.hpp" #include "logging/log.hpp" bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { - ZLocker locker(ZNMethodTable::lock_for_nmethod(nm)); + ZLocker locker(ZNMethod::lock_for_nmethod(nm)); log_trace(nmethod, barrier)("Entered critical zone for %p", nm); if (!is_armed(nm)) { --- old/src/hotspot/share/gc/z/zCollectedHeap.cpp 2019-02-20 22:12:20.812397850 +0100 +++ new/src/hotspot/share/gc/z/zCollectedHeap.cpp 2019-02-20 22:12:20.240388138 +0100 @@ -27,7 +27,7 @@ #include "gc/z/zCollectedHeap.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zHeap.inline.hpp" -#include "gc/z/zNMethodTable.hpp" +#include "gc/z/zNMethod.hpp" #include "gc/z/zServiceability.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zUtils.inline.hpp" @@ -255,11 +255,11 @@ } void ZCollectedHeap::register_nmethod(nmethod* nm) { - ZNMethodTable::register_nmethod(nm); + ZNMethod::register_nmethod(nm); } void ZCollectedHeap::unregister_nmethod(nmethod* nm) { - ZNMethodTable::unregister_nmethod(nm); + ZNMethod::unregister_nmethod(nm); } void ZCollectedHeap::verify_nmethod(nmethod* nm) { --- old/src/hotspot/share/gc/z/zNMethodTable.cpp 2019-02-20 22:12:22.112419922 +0100 +++ new/src/hotspot/share/gc/z/zNMethodTable.cpp 2019-02-20 22:12:21.580410889 +0100 @@ -34,6 +34,7 @@ #include "gc/z/zNMethodClosure.hpp" #include "gc/z/zNMethodData.hpp" #include "gc/z/zNMethodTable.hpp" +#include "gc/z/zNMethodTableEntry.hpp" #include "gc/z/zNMethodTableIteration.hpp" #include "gc/z/zOopClosures.inline.hpp" #include "gc/z/zTask.hpp" @@ -51,14 +52,6 @@ size_t ZNMethodTable::_nunregistered = 0; ZNMethodTableIteration ZNMethodTable::_iteration; -static ZNMethodData* gc_data(const nmethod* nm) { - return nm->gc_data(); -} - -static void set_gc_data(nmethod* nm, ZNMethodData* data) { - return nm->set_gc_data(data); -} - ZNMethodTableEntry* ZNMethodTable::create(size_t size) { void* const mem = ZNMethodAllocator::allocate(size * sizeof(ZNMethodTableEntry)); return ::new (mem) ZNMethodTableEntry[size]; @@ -68,61 +61,6 @@ ZNMethodAllocator::free(table); } -void ZNMethodTable::attach_gc_data(nmethod* nm) { - GrowableArray immediate_oops; - bool non_immediate_oops = false; - - // Find all oops relocations - RelocIterator iter(nm); - while (iter.next()) { - if (iter.type() != relocInfo::oop_type) { - // Not an oop - continue; - } - - oop_Relocation* r = iter.oop_reloc(); - - if (!r->oop_is_immediate()) { - // Non-immediate oop found - non_immediate_oops = true; - continue; - } - - if (r->oop_value() != NULL) { - // Non-NULL immediate oop found. NULL oops can safely be - // ignored since the method will be re-registered if they - // are later patched to be non-NULL. - immediate_oops.push(r->oop_addr()); - } - } - - // Attach GC data to nmethod - ZNMethodData* data = gc_data(nm); - if (data == NULL) { - data = ZNMethodData::create(nm); - set_gc_data(nm, data); - } - - // Attach oops in GC data - ZNMethodDataOops* const new_oops = ZNMethodDataOops::create(immediate_oops, non_immediate_oops); - ZNMethodDataOops* const old_oops = data->swap_oops(new_oops); - ZNMethodDataOops::destroy(old_oops); -} - -void ZNMethodTable::detach_gc_data(nmethod* nm) { - // Destroy GC data - ZNMethodData::destroy(gc_data(nm)); - set_gc_data(nm, NULL); -} - -ZReentrantLock* ZNMethodTable::lock_for_nmethod(nmethod* nm) { - ZNMethodData* const data = gc_data(nm); - if (data == NULL) { - return NULL; - } - return data->lock(); -} - size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) { assert(is_power_of_2(size), "Invalid size"); const size_t mask = size - 1; @@ -238,60 +176,6 @@ } } -void ZNMethodTable::log_register(const nmethod* nm) { - LogTarget(Trace, gc, nmethod) log; - if (!log.is_enabled()) { - return; - } - - const ZNMethodDataOops* const oops = gc_data(nm)->oops(); - - log.print("Register NMethod: %s.%s (" PTR_FORMAT "), " - "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s", - nm->method()->method_holder()->external_name(), - nm->method()->name()->as_C_string(), - p2i(nm), - nm->compiler_name(), - nm->oops_count() - 1, - oops->immediates_count(), - oops->has_non_immediates() ? "Yes" : "No"); - - LogTarget(Trace, gc, nmethod, oops) log_oops; - if (!log_oops.is_enabled()) { - return; - } - - // Print nmethod oops table - oop* const begin = nm->oops_begin(); - oop* const end = nm->oops_end(); - for (oop* p = begin; p < end; p++) { - log_oops.print(" Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)", - (p - begin), p2i(*p), (*p)->klass()->external_name()); - } - - // Print nmethod immediate oops - if (oops->immediates_count() > 0) { - oop** const begin = oops->immediates_begin(); - oop** const end = oops->immediates_end(); - for (oop** p = begin; p < end; p++) { - log_oops.print(" ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)", - (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name()); - } - } -} - -void ZNMethodTable::log_unregister(const nmethod* nm) { - LogTarget(Debug, gc, nmethod) log; - if (!log.is_enabled()) { - return; - } - - log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")", - nm->method()->method_holder()->external_name(), - nm->method()->name()->as_C_string(), - p2i(nm)); -} - size_t ZNMethodTable::registered_nmethods() { return _nregistered; } @@ -302,16 +186,10 @@ void ZNMethodTable::register_nmethod(nmethod* nm) { assert(CodeCache_lock->owned_by_self(), "Lock must be held"); - ResourceMark rm; // Grow/Shrink/Prune table if needed rebuild_if_needed(); - // Create and attach gc data - attach_gc_data(nm); - - log_register(nm); - // Insert new entry if (register_entry(_table, _size, nm)) { // New entry registered. When register_entry() instead returns @@ -319,9 +197,6 @@ // to increase number of registered entries in that case. _nregistered++; } - - // Disarm nmethod entry barrier - disarm_nmethod(nm); } void ZNMethodTable::wait_until_iteration_done() { @@ -335,29 +210,10 @@ void ZNMethodTable::unregister_nmethod(nmethod* nm) { assert(CodeCache_lock->owned_by_self(), "Lock must be held"); - if (Thread::current()->is_Code_cache_sweeper_thread()) { - // The sweeper must wait for any ongoing iteration to complete - // before it can unregister an nmethod. - ZNMethodTable::wait_until_iteration_done(); - } - - ResourceMark rm; - - log_unregister(nm); - // Remove entry unregister_entry(_table, _size, nm); _nunregistered++; _nregistered--; - - detach_gc_data(nm); -} - -void ZNMethodTable::disarm_nmethod(nmethod* nm) { - BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod(); - if (bs != NULL) { - bs->disarm(nm); - } } void ZNMethodTable::nmethods_do_begin() { @@ -383,187 +239,6 @@ CodeCache_lock->notify_all(); } -void ZNMethodTable::oops_do(nmethod* nm, OopClosure* cl) { - // Process oops table - oop* const begin = nm->oops_begin(); - oop* const end = nm->oops_end(); - for (oop* p = begin; p < end; p++) { - if (*p != Universe::non_oop_word()) { - cl->do_oop(p); - } - } - - ZNMethodDataOops* const oops = gc_data(nm)->oops(); - - // Process immediate oops - if (oops->immediates_count() > 0) { - oop** const begin = oops->immediates_begin(); - oop** const end = oops->immediates_end(); - for (oop** p = begin; p < end; p++) { - if (**p != Universe::non_oop_word()) { - cl->do_oop(*p); - } - } - } - - // Process non-immediate oops - if (oops->has_non_immediates()) { - nm->fix_oop_relocations(); - } -} - -class ZNMethodToOopsDo : public ZNMethodClosure { -private: - OopClosure* _cl; - -public: - ZNMethodToOopsDo(OopClosure* cl) : - _cl(cl) {} - - void do_nmethod(nmethod* nm) { - ZNMethodTable::oops_do(nm, _cl); - } -}; - -void ZNMethodTable::oops_do(OopClosure* cl) { - ZNMethodToOopsDo nm_cl(cl); - nmethods_do(&nm_cl); -} - void ZNMethodTable::nmethods_do(ZNMethodClosure* cl) { _iteration.nmethods_do(cl); } - -class ZNMethodTableUnlinkClosure : public ZNMethodClosure { -private: - bool _unloading_occurred; - volatile bool _failed; - - void set_failed() { - Atomic::store(true, &_failed); - } - -public: - ZNMethodTableUnlinkClosure(bool unloading_occurred) : - _unloading_occurred(unloading_occurred), - _failed(false) {} - - virtual void do_nmethod(nmethod* nm) { - if (failed()) { - return; - } - - if (!nm->is_alive()) { - return; - } - - ZLocker locker(ZNMethodTable::lock_for_nmethod(nm)); - - if (nm->is_unloading()) { - // Unlinking of the dependencies must happen before the - // handshake separating unlink and purge. - nm->flush_dependencies(false /* delete_immediately */); - - // We don't need to take the lock when unlinking nmethods from - // the Method, because it is only concurrently unlinked by - // the entry barrier, which acquires the per nmethod lock. - nm->unlink_from_method(false /* acquire_lock */); - return; - } - - // Heal oops and disarm - ZNMethodOopClosure cl; - ZNMethodTable::oops_do(nm, &cl); - ZNMethodTable::disarm_nmethod(nm); - - // Clear compiled ICs and exception caches - if (!nm->unload_nmethod_caches(_unloading_occurred)) { - set_failed(); - } - } - - bool failed() const { - return Atomic::load(&_failed); - } -}; - -class ZNMethodTableUnlinkTask : public ZTask { -private: - ZNMethodTableUnlinkClosure _cl; - ICRefillVerifier* _verifier; - -public: - ZNMethodTableUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) : - ZTask("ZNMethodTableUnlinkTask"), - _cl(unloading_occurred), - _verifier(verifier) { - ZNMethodTable::nmethods_do_begin(); - } - - ~ZNMethodTableUnlinkTask() { - ZNMethodTable::nmethods_do_end(); - } - - virtual void work() { - ICRefillVerifierMark mark(_verifier); - ZNMethodTable::nmethods_do(&_cl); - } - - bool success() const { - return !_cl.failed(); - } -}; - -void ZNMethodTable::unlink(ZWorkers* workers, bool unloading_occurred) { - for (;;) { - ICRefillVerifier verifier; - - { - ZNMethodTableUnlinkTask task(unloading_occurred, &verifier); - workers->run_concurrent(&task); - if (task.success()) { - return; - } - } - - // Cleaning failed because we ran out of transitional IC stubs, - // so we have to refill and try again. Refilling requires taking - // a safepoint, so we temporarily leave the suspendible thread set. - SuspendibleThreadSetLeaver sts; - InlineCacheBuffer::refill_ic_stubs(); - } -} - -class ZNMethodTablePurgeClosure : public ZNMethodClosure { -public: - virtual void do_nmethod(nmethod* nm) { - if (nm->is_alive() && nm->is_unloading()) { - nm->make_unloaded(); - } - } -}; - -class ZNMethodTablePurgeTask : public ZTask { -private: - ZNMethodTablePurgeClosure _cl; - -public: - ZNMethodTablePurgeTask() : - ZTask("ZNMethodTablePurgeTask"), - _cl() { - ZNMethodTable::nmethods_do_begin(); - } - - ~ZNMethodTablePurgeTask() { - ZNMethodTable::nmethods_do_end(); - } - - virtual void work() { - ZNMethodTable::nmethods_do(&_cl); - } -}; - -void ZNMethodTable::purge(ZWorkers* workers) { - ZNMethodTablePurgeTask task; - workers->run_concurrent(&task); -} --- old/src/hotspot/share/gc/z/zNMethodTable.hpp 2019-02-20 22:12:23.420442130 +0100 +++ new/src/hotspot/share/gc/z/zNMethodTable.hpp 2019-02-20 22:12:22.892433165 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,17 +24,12 @@ #ifndef SHARE_GC_Z_ZNMETHODTABLE_HPP #define SHARE_GC_Z_ZNMETHODTABLE_HPP -#include "gc/z/zArray.hpp" -#include "gc/z/zGlobals.hpp" -#include "gc/z/zLock.hpp" -#include "gc/z/zNMethodTableEntry.hpp" #include "gc/z/zNMethodTableIteration.hpp" #include "memory/allocation.hpp" class nmethod; class ZNMethodClosure; -class ZNMethodData; -class ZNMethodDataOops; +class ZNMethodTableEntry; class ZWorkers; class ZNMethodTable : public AllStatic { @@ -48,36 +43,23 @@ static ZNMethodTableEntry* create(size_t size); static void destroy(ZNMethodTableEntry* table); - static void attach_gc_data(nmethod* nm); - static void detach_gc_data(nmethod* nm); - static size_t first_index(const nmethod* nm, size_t size); static size_t next_index(size_t prev_index, size_t size); - static void wait_until_iteration_done(); - static bool register_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm); static void unregister_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm); static void rebuild(size_t new_size); static void rebuild_if_needed(); - static void log_register(const nmethod* nm); - static void log_unregister(const nmethod* nm); - public: static size_t registered_nmethods(); static size_t unregistered_nmethods(); static void register_nmethod(nmethod* nm); static void unregister_nmethod(nmethod* nm); - static void disarm_nmethod(nmethod* nm); - static ZReentrantLock* lock_for_nmethod(nmethod* nm); - - static void oops_do(OopClosure* cl); - - static void oops_do(nmethod* nm, OopClosure* cl); + static void wait_until_iteration_done(); static void nmethods_do_begin(); static void nmethods_do_end(); --- old/src/hotspot/share/gc/z/zRootsIterator.cpp 2019-02-20 22:12:24.772465085 +0100 +++ new/src/hotspot/share/gc/z/zRootsIterator.cpp 2019-02-20 22:12:24.236455984 +0100 @@ -33,7 +33,7 @@ #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/z/zBarrierSetNMethod.hpp" #include "gc/z/zGlobals.hpp" -#include "gc/z/zNMethodTable.hpp" +#include "gc/z/zNMethod.hpp" #include "gc/z/zOopClosures.inline.hpp" #include "gc/z/zRootsIterator.hpp" #include "gc/z/zStat.hpp" @@ -175,7 +175,7 @@ if (ClassUnloading) { nmethod::oops_do_marking_prologue(); } else { - ZNMethodTable::nmethods_do_begin(); + ZNMethod::oops_do_begin(); } } @@ -185,7 +185,7 @@ if (ClassUnloading) { nmethod::oops_do_marking_epilogue(); } else { - ZNMethodTable::nmethods_do_end(); + ZNMethod::oops_do_end(); } JvmtiExport::gc_epilogue(); @@ -232,7 +232,7 @@ void ZRootsIterator::do_code_cache(ZRootsIteratorClosure* cl) { ZStatTimer timer(ZSubPhasePauseRootsCodeCache); - ZNMethodTable::oops_do(cl); + ZNMethod::oops_do(cl); } void ZRootsIterator::oops_do(ZRootsIteratorClosure* cl, bool visit_jvmti_weak_export) { --- old/src/hotspot/share/gc/z/zUnload.cpp 2019-02-20 22:12:26.160488651 +0100 +++ new/src/hotspot/share/gc/z/zUnload.cpp 2019-02-20 22:12:25.632479687 +0100 @@ -30,7 +30,7 @@ #include "gc/shared/gcBehaviours.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/z/zLock.inline.hpp" -#include "gc/z/zNMethodTable.hpp" +#include "gc/z/zNMethod.hpp" #include "gc/z/zOopClosures.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zUnload.hpp" @@ -75,7 +75,7 @@ public: virtual bool is_unloading(CompiledMethod* method) const { nmethod* const nm = method->as_nmethod(); - ZReentrantLock* const lock = ZNMethodTable::lock_for_nmethod(nm); + ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm); if (lock == NULL) { return is_unloading(nm); } else { @@ -89,7 +89,7 @@ public: virtual bool lock(CompiledMethod* method) { nmethod* const nm = method->as_nmethod(); - ZReentrantLock* const lock = ZNMethodTable::lock_for_nmethod(nm); + ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm); if (lock != NULL) { lock->lock(); } @@ -98,7 +98,7 @@ virtual void unlock(CompiledMethod* method) { nmethod* const nm = method->as_nmethod(); - ZReentrantLock* const lock = ZNMethodTable::lock_for_nmethod(nm); + ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm); if (lock != NULL) { lock->unlock(); } @@ -110,7 +110,7 @@ } nmethod* const nm = method->as_nmethod(); - ZReentrantLock* const lock = ZNMethodTable::lock_for_nmethod(nm); + ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm); return lock == NULL || lock->is_owned(); } }; @@ -149,7 +149,7 @@ Klass::clean_weak_klass_links(unloading_occurred); - ZNMethodTable::unlink(_workers, unloading_occurred); + ZNMethod::unlink(_workers, unloading_occurred); DependencyContext::cleaning_end(); } @@ -157,7 +157,7 @@ void ZUnload::purge() { { SuspendibleThreadSetJoiner sts; - ZNMethodTable::purge(_workers); + ZNMethod::purge(_workers); } ClassLoaderDataGraph::purge(); --- /dev/null 2019-02-14 11:31:37.500399000 +0100 +++ new/src/hotspot/share/gc/z/zNMethod.cpp 2019-02-20 22:12:27.012503117 +0100 @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "code/relocInfo.hpp" +#include "code/nmethod.hpp" +#include "code/icBuffer.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/barrierSetNMethod.hpp" +#include "gc/z/zGlobals.hpp" +#include "gc/z/zLock.inline.hpp" +#include "gc/z/zNMethod.hpp" +#include "gc/z/zNMethodClosure.hpp" +#include "gc/z/zNMethodData.hpp" +#include "gc/z/zNMethodTable.hpp" +#include "gc/z/zOopClosures.inline.hpp" +#include "gc/z/zTask.hpp" +#include "gc/z/zWorkers.hpp" +#include "logging/log.hpp" +#include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/atomic.hpp" +#include "runtime/orderAccess.hpp" +#include "utilities/debug.hpp" + +static ZNMethodData* gc_data(const nmethod* nm) { + return nm->gc_data(); +} + +static void set_gc_data(nmethod* nm, ZNMethodData* data) { + return nm->set_gc_data(data); +} + +void ZNMethod::attach_gc_data(nmethod* nm) { + GrowableArray immediate_oops; + bool non_immediate_oops = false; + + // Find all oops relocations + RelocIterator iter(nm); + while (iter.next()) { + if (iter.type() != relocInfo::oop_type) { + // Not an oop + continue; + } + + oop_Relocation* r = iter.oop_reloc(); + + if (!r->oop_is_immediate()) { + // Non-immediate oop found + non_immediate_oops = true; + continue; + } + + if (r->oop_value() != NULL) { + // Non-NULL immediate oop found. NULL oops can safely be + // ignored since the method will be re-registered if they + // are later patched to be non-NULL. + immediate_oops.push(r->oop_addr()); + } + } + + // Attach GC data to nmethod + ZNMethodData* data = gc_data(nm); + if (data == NULL) { + data = ZNMethodData::create(nm); + set_gc_data(nm, data); + } + + // Attach oops in GC data + ZNMethodDataOops* const new_oops = ZNMethodDataOops::create(immediate_oops, non_immediate_oops); + ZNMethodDataOops* const old_oops = data->swap_oops(new_oops); + ZNMethodDataOops::destroy(old_oops); +} + +void ZNMethod::detach_gc_data(nmethod* nm) { + // Destroy GC data + ZNMethodData::destroy(gc_data(nm)); + set_gc_data(nm, NULL); +} + +ZReentrantLock* ZNMethod::lock_for_nmethod(nmethod* nm) { + ZNMethodData* const data = gc_data(nm); + if (data == NULL) { + return NULL; + } + return data->lock(); +} + +void ZNMethod::log_register(const nmethod* nm) { + LogTarget(Trace, gc, nmethod) log; + if (!log.is_enabled()) { + return; + } + + const ZNMethodDataOops* const oops = gc_data(nm)->oops(); + + log.print("Register NMethod: %s.%s (" PTR_FORMAT "), " + "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s", + nm->method()->method_holder()->external_name(), + nm->method()->name()->as_C_string(), + p2i(nm), + nm->compiler_name(), + nm->oops_count() - 1, + oops->immediates_count(), + oops->has_non_immediates() ? "Yes" : "No"); + + LogTarget(Trace, gc, nmethod, oops) log_oops; + if (!log_oops.is_enabled()) { + return; + } + + // Print nmethod oops table + oop* const begin = nm->oops_begin(); + oop* const end = nm->oops_end(); + for (oop* p = begin; p < end; p++) { + log_oops.print(" Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)", + (p - begin), p2i(*p), (*p)->klass()->external_name()); + } + + // Print nmethod immediate oops + if (oops->immediates_count() > 0) { + oop** const begin = oops->immediates_begin(); + oop** const end = oops->immediates_end(); + for (oop** p = begin; p < end; p++) { + log_oops.print(" ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)", + (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name()); + } + } +} + +void ZNMethod::log_unregister(const nmethod* nm) { + LogTarget(Debug, gc, nmethod) log; + if (!log.is_enabled()) { + return; + } + + log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")", + nm->method()->method_holder()->external_name(), + nm->method()->name()->as_C_string(), + p2i(nm)); +} + +void ZNMethod::register_nmethod(nmethod* nm) { + ResourceMark rm; + + // Create and attach gc data + attach_gc_data(nm); + + log_register(nm); + + ZNMethodTable::register_nmethod(nm); + + // Disarm nmethod entry barrier + disarm_nmethod(nm); +} + +void ZNMethod::unregister_nmethod(nmethod* nm) { + assert(CodeCache_lock->owned_by_self(), "Lock must be held"); + + if (Thread::current()->is_Code_cache_sweeper_thread()) { + // The sweeper must wait for any ongoing iteration to complete + // before it can unregister an nmethod. + ZNMethodTable::wait_until_iteration_done(); + } + + ResourceMark rm; + + log_unregister(nm); + + ZNMethodTable::unregister_nmethod(nm); + + // Destroy and detach gc data + detach_gc_data(nm); +} + +void ZNMethod::disarm_nmethod(nmethod* nm) { + BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod(); + if (bs != NULL) { + bs->disarm(nm); + } +} + +void ZNMethod::nmethod_oops_do(nmethod* nm, OopClosure* cl) { + // Process oops table + oop* const begin = nm->oops_begin(); + oop* const end = nm->oops_end(); + for (oop* p = begin; p < end; p++) { + if (*p != Universe::non_oop_word()) { + cl->do_oop(p); + } + } + + ZNMethodDataOops* const oops = gc_data(nm)->oops(); + + // Process immediate oops + if (oops->immediates_count() > 0) { + oop** const begin = oops->immediates_begin(); + oop** const end = oops->immediates_end(); + for (oop** p = begin; p < end; p++) { + if (**p != Universe::non_oop_word()) { + cl->do_oop(*p); + } + } + } + + // Process non-immediate oops + if (oops->has_non_immediates()) { + nm->fix_oop_relocations(); + } +} + +class ZNMethodToOopsDo : public ZNMethodClosure { +private: + OopClosure* _cl; + +public: + ZNMethodToOopsDo(OopClosure* cl) : + _cl(cl) {} + + void do_nmethod(nmethod* nm) { + ZNMethod::nmethod_oops_do(nm, _cl); + } +}; + +void ZNMethod::oops_do_begin() { + ZNMethodTable::nmethods_do_begin(); +} + +void ZNMethod::oops_do_end() { + ZNMethodTable::nmethods_do_end(); +} + +void ZNMethod::oops_do(OopClosure* cl) { + ZNMethodToOopsDo nmethod_cl(cl); + ZNMethodTable::nmethods_do(&nmethod_cl); +} + +class ZNMethodUnlinkClosure : public ZNMethodClosure { +private: + bool _unloading_occurred; + volatile bool _failed; + + void set_failed() { + Atomic::store(true, &_failed); + } + +public: + ZNMethodUnlinkClosure(bool unloading_occurred) : + _unloading_occurred(unloading_occurred), + _failed(false) {} + + virtual void do_nmethod(nmethod* nm) { + if (failed()) { + return; + } + + if (!nm->is_alive()) { + return; + } + + ZLocker locker(ZNMethod::lock_for_nmethod(nm)); + + if (nm->is_unloading()) { + // Unlinking of the dependencies must happen before the + // handshake separating unlink and purge. + nm->flush_dependencies(false /* delete_immediately */); + + // We don't need to take the lock when unlinking nmethods from + // the Method, because it is only concurrently unlinked by + // the entry barrier, which acquires the per nmethod lock. + nm->unlink_from_method(false /* acquire_lock */); + return; + } + + // Heal oops and disarm + ZNMethodOopClosure cl; + ZNMethod::nmethod_oops_do(nm, &cl); + ZNMethod::disarm_nmethod(nm); + + // Clear compiled ICs and exception caches + if (!nm->unload_nmethod_caches(_unloading_occurred)) { + set_failed(); + } + } + + bool failed() const { + return Atomic::load(&_failed); + } +}; + +class ZNMethodUnlinkTask : public ZTask { +private: + ZNMethodUnlinkClosure _cl; + ICRefillVerifier* _verifier; + +public: + ZNMethodUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) : + ZTask("ZNMethodUnlinkTask"), + _cl(unloading_occurred), + _verifier(verifier) { + ZNMethodTable::nmethods_do_begin(); + } + + ~ZNMethodUnlinkTask() { + ZNMethodTable::nmethods_do_end(); + } + + virtual void work() { + ICRefillVerifierMark mark(_verifier); + ZNMethodTable::nmethods_do(&_cl); + } + + bool success() const { + return !_cl.failed(); + } +}; + +void ZNMethod::unlink(ZWorkers* workers, bool unloading_occurred) { + for (;;) { + ICRefillVerifier verifier; + + { + ZNMethodUnlinkTask task(unloading_occurred, &verifier); + workers->run_concurrent(&task); + if (task.success()) { + return; + } + } + + // Cleaning failed because we ran out of transitional IC stubs, + // so we have to refill and try again. Refilling requires taking + // a safepoint, so we temporarily leave the suspendible thread set. + SuspendibleThreadSetLeaver sts; + InlineCacheBuffer::refill_ic_stubs(); + } +} + +class ZNMethodPurgeClosure : public ZNMethodClosure { +public: + virtual void do_nmethod(nmethod* nm) { + if (nm->is_alive() && nm->is_unloading()) { + nm->make_unloaded(); + } + } +}; + +class ZNMethodPurgeTask : public ZTask { +private: + ZNMethodPurgeClosure _cl; + +public: + ZNMethodPurgeTask() : + ZTask("ZNMethodPurgeTask"), + _cl() { + ZNMethodTable::nmethods_do_begin(); + } + + ~ZNMethodPurgeTask() { + ZNMethodTable::nmethods_do_end(); + } + + virtual void work() { + ZNMethodTable::nmethods_do(&_cl); + } +}; + +void ZNMethod::purge(ZWorkers* workers) { + ZNMethodPurgeTask task; + workers->run_concurrent(&task); +} --- /dev/null 2019-02-14 11:31:37.500399000 +0100 +++ new/src/hotspot/share/gc/z/zNMethod.hpp 2019-02-20 22:12:28.368526140 +0100 @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZNMETHOD_HPP +#define SHARE_GC_Z_ZNMETHOD_HPP + +#include "memory/allocation.hpp" + +class nmethod; +class OopClosure; +class ZReentrantLock; +class ZWorkers; + +class ZNMethod : public AllStatic { +private: + static void attach_gc_data(nmethod* nm); + static void detach_gc_data(nmethod* nm); + + static void log_register(const nmethod* nm); + static void log_unregister(const nmethod* nm); + +public: + static void register_nmethod(nmethod* nm); + static void unregister_nmethod(nmethod* nm); + + static void disarm_nmethod(nmethod* nm); + + static void nmethod_oops_do(nmethod* nm, OopClosure* cl); + + static void oops_do_begin(); + static void oops_do_end(); + static void oops_do(OopClosure* cl); + + static ZReentrantLock* lock_for_nmethod(nmethod* nm); + + static void unlink(ZWorkers* workers, bool unloading_occurred); + static void purge(ZWorkers* workers); +}; + +#endif // SHARE_GC_Z_ZNMETHOD_HPP