< prev index next > src/hotspot/share/code/nmethod.cpp
Concurrent class unloading
basic_lock_sp_offset, oop_maps);
NOT_PRODUCT(if (nm != NULL) native_nmethod_stats.note_native_nmethod(nm));
}
if (nm != NULL) {
- nmethodBarrier::initialize(nm);
// verify nmethod
debug_only(nm->verify();) // might block
nm->log_new_nmethod();
nm->make_in_use();
NOT_PRODUCT(if (nm != NULL) note_java_nmethod(nm));
}
}
// Do verification and logging outside CodeCache_lock.
if (nm != NULL) {
- nmethodBarrier::initialize(nm);
// Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
DEBUG_ONLY(nm->verify();)
nm->log_new_nmethod();
}
return nm;
return nmethodBarrier::is_armed(this);
}
void nmethod::disarm_barrier() {
- if (!is_in_use()) {
- return;
- }
-
if (method()->is_method_handle_intrinsic()) {
return;
}
if (!is_native_method() && !is_compiled_by_c2() && !is_compiled_by_c1()) {
assert(is_not_entrant(), "must be a non-entrant method");
// Since the nmethod sweeper only does partial sweep the sweeper's traversal
// count can be greater than the stack traversal count before it hits the
// nmethod for the second time.
- return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
- !is_locked_by_vm();
+ return stack_traversal_mark() + 1 < NMethodSweeper::traversal_count() &&
+ !is_locked_by_vm() && !is_unloading();
}
void nmethod::inc_decompile_count() {
if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
// Could be gated by ProfileTraps, but do not bother...
// This nmethod is being unloaded, make sure that dependencies
// recorded in instanceKlasses get flushed.
// Since this work is being done during a GC, defer deleting dependencies from the
// InstanceKlass.
- assert(Universe::heap()->is_gc_active(), "should only be called during gc");
+ assert(UseZGC || Universe::heap()->is_gc_active(), "should only be called during gc");
flush_dependencies(/*delete_immediately*/false);
// Break cycle between nmethod & method
LogTarget(Trace, class, unload, nmethod) lt;
if (lt.is_enabled()) {
}
_method = NULL; // Clear the method of this dead nmethod
}
// Make the class unloaded - i.e., change state and notify sweeper
- assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+ assert(UseZGC || SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
// Unregister must be done before the state change
Universe::heap()->unregister_nmethod(this);
_state = unloaded;
}
// If the state is becoming a zombie, signal to unregister the nmethod with
// the heap.
// This nmethod may have already been unloaded during a full GC.
- if ((state == zombie) && !is_unloaded()) {
+ if (state == zombie) {
nmethod_needs_unregister = true;
}
// Must happen before state change. Otherwise we have a race condition in
// nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
// traversing the dependency information in unsafe. In that case this
// function is called with a boolean argument and this function only
// notifies instanceKlasses that are reachable
void nmethod::flush_dependencies(bool delete_immediately) {
+ MutexLockerEx m(SafepointSynchronize::is_at_safepoint() ||
+ !CodeCache_lock->owned_by_self() ? CodeCache_lock : NULL,
+ Mutex::_no_safepoint_check_flag);
assert_locked_or_safepoint(CodeCache_lock);
- assert(Universe::heap()->is_gc_active() != delete_immediately,
+ assert(UseZGC || Universe::heap()->is_gc_active() != delete_immediately,
"delete_immediately is false if and only if we are called during GC");
if (!has_flushed_dependencies()) {
set_has_flushed_dependencies();
for (Dependencies::DepStream deps(this); deps.next(); ) {
if (deps.type() == Dependencies::call_site_target_value) {
// marked for deoptimization. A particular dependency is only checked once.
NMethodIterator iter;
while(iter.next()) {
nmethod* nm = iter.method();
// Only notify for live nmethods
- if (nm->is_alive() && !nm->is_marked_for_deoptimization()) {
+ if (nm->is_alive() && !(nm->is_marked_for_deoptimization() || nm->is_unloading())) {
for (Dependencies::DepStream deps(nm); deps.next(); ) {
// Construct abstraction of a dependency.
DependencySignature* current_sig = new DependencySignature(deps);
// Determine if dependency is already checked. table->put(...) returns
_speculation_log = NULL;
}
}
void nmethod::maybe_invalidate_installed_code() {
+ MutexLockerEx m(!SafepointSynchronize::is_at_safepoint() &&
+ !Patching_lock->owned_by_self() ? Patching_lock : NULL,
+ Mutex::_no_safepoint_check_flag);
assert(Patching_lock->is_locked() ||
SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
oop installed_code = JNIHandles::resolve(_jvmci_installed_code);
if (installed_code != NULL) {
// Update the values in the InstalledCode instance if it still refers to this nmethod
nmethod* nm = (nmethod*)InstalledCode::address(installed_code);
if (nm == this) {
- if (!is_alive()) {
+ if (!is_alive() || is_unloading()) {
// Break the link between nmethod and InstalledCode such that the nmethod
// can subsequently be flushed safely. The link must be maintained while
// the method could have live activations since invalidateInstalledCode
// might want to invalidate all existing activations.
InstalledCode::set_address(installed_code, 0);
// be invalidated.
InstalledCode::set_entryPoint(installed_code, 0);
}
}
}
- if (!is_alive()) {
+ if (!is_alive() || is_unloading()) {
// Clear these out after the nmethod has been unregistered and any
// updates to the InstalledCode instance have been performed.
clear_jvmci_installed_code();
clear_speculation_log();
}
nmethodLocker nml(nm);
#ifdef ASSERT
{
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
// This relationship can only be checked safely under a lock
- assert(!nm->is_alive() || nm->jvmci_installed_code() == installedCode(), "sanity check");
+ assert(!nm->is_alive() || nm->is_unloading() || nm->jvmci_installed_code() == installedCode(), "sanity check");
}
#endif
if (nm->is_alive()) {
// Invalidating the InstalledCode means we want the nmethod
< prev index next >