< prev index next >
src/hotspot/share/classfile/classLoaderData.cpp
Concurrent class unloading
}
return false;
}
#endif // PRODUCT
! bool ClassLoaderData::claim() {
! if (_claimed == 1) {
! return false;
}
-
- return (int) Atomic::cmpxchg(1, &_claimed, 0) == 0;
}
// Anonymous classes have their own ClassLoaderData that is marked to keep alive
// while the class is being parsed, and if the class appears on the module fixup list.
// Due to the uniqueness that no other class shares the anonymous class' name or
}
return false;
}
#endif // PRODUCT
! bool ClassLoaderData::claim(bool finalizable) {
! for (;;) {
! int old_claim = Atomic::load(&_claimed);
! int claim_mask = finalizable ? 1 : 3;
! if ((old_claim & claim_mask) == claim_mask) {
! return false;
! }
! int new_claim = old_claim | claim_mask;
! if (Atomic::cmpxchg(new_claim, &_claimed, old_claim) == old_claim) {
! return true;
! }
}
}
// Anonymous classes have their own ClassLoaderData that is marked to keep alive
// while the class is being parsed, and if the class appears on the module fixup list.
// Due to the uniqueness that no other class shares the anonymous class' name or
***************
InstanceKlass* ClassLoaderDataGraph::try_get_next_class() {
return static_klass_iterator.try_get_next_class();
}
-
void ClassLoaderData::initialize_holder(Handle loader_or_mirror) {
if (loader_or_mirror() != NULL) {
assert(_holder.is_null(), "never replace holders");
_holder = WeakHandle<vm_class_loader_data>::create(loader_or_mirror);
}
}
// Remove a klass from the _klasses list for scratch_class during redefinition
// or parsed class in the case of an error.
void ClassLoaderData::remove_class(Klass* scratch_class) {
! assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
// Adjust global class iterator.
static_klass_iterator.adjust_saved_class(scratch_class);
Klass* prev = NULL;
InstanceKlass* ClassLoaderDataGraph::try_get_next_class() {
return static_klass_iterator.try_get_next_class();
}
void ClassLoaderData::initialize_holder(Handle loader_or_mirror) {
if (loader_or_mirror() != NULL) {
assert(_holder.is_null(), "never replace holders");
_holder = WeakHandle<vm_class_loader_data>::create(loader_or_mirror);
}
}
// Remove a klass from the _klasses list for scratch_class during redefinition
// or parsed class in the case of an error.
void ClassLoaderData::remove_class(Klass* scratch_class) {
! assert(UseZGC || SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
// Adjust global class iterator.
static_klass_iterator.adjust_saved_class(scratch_class);
Klass* prev = NULL;
***************
}
ShouldNotReachHere(); // should have found this class!!
}
void ClassLoaderData::unload() {
+ MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
_unloading = true;
LogTarget(Trace, class, loader, data) lt;
if (lt.is_enabled()) {
ResourceMark rm;
***************
} else {
return NULL;
}
}
+ oop ClassLoaderData::holder_no_keepalive() const {
+ if (!_holder.is_null()) { // NULL class_loader
+ return _holder.peek();
+ } else {
+ return NULL;
+ }
+ }
+
// Unloading support
bool ClassLoaderData::is_alive() const {
bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
|| (_holder.peek() != NULL); // and not cleaned by the GC weak handle processing.
***************
}
}
};
ClassLoaderData::~ClassLoaderData() {
! // Release C heap structures for all the classes.
! ReleaseKlassClosure cl;
! classes_do(&cl);
! ClassLoaderDataGraph::dec_array_classes(cl.array_class_released());
! ClassLoaderDataGraph::dec_instance_classes(cl.instance_class_released());
// Release the WeakHandle
_holder.release();
// Release C heap allocated hashtable for all the packages.
}
}
};
ClassLoaderData::~ClassLoaderData() {
! {
! MutexLockerEx m(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
! Mutex::_no_safepoint_check_flag);
! // Release C heap structures for all the classes.
! ReleaseKlassClosure cl;
! classes_do(&cl);
! ClassLoaderDataGraph::dec_array_classes(cl.array_class_released());
! ClassLoaderDataGraph::dec_instance_classes(cl.instance_class_released());
! }
// Release the WeakHandle
_holder.release();
// Release C heap allocated hashtable for all the packages.
***************
}
}
// Deallocate free metadata on the free list. How useful the PermGen was!
void ClassLoaderData::free_deallocate_list() {
! // Don't need lock, at safepoint
! assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
assert(!is_unloading(), "only called for ClassLoaderData that are not unloading");
if (_deallocate_list == NULL) {
return;
}
// Go backwards because this removes entries that are freed.
}
}
// Deallocate free metadata on the free list. How useful the PermGen was!
void ClassLoaderData::free_deallocate_list() {
! MutexLockerEx ml(SafepointSynchronize::is_at_safepoint() ? NULL : metaspace_lock(),
! Mutex::_no_safepoint_check_flag);
assert(!is_unloading(), "only called for ClassLoaderData that are not unloading");
if (_deallocate_list == NULL) {
return;
}
// Go backwards because this removes entries that are freed.
***************
// scratch or error classes so that unloading events aren't triggered for these
// classes. The metadata is removed with the unloading metaspace.
// There isn't C heap memory allocated for methods, so nothing is done for them.
void ClassLoaderData::free_deallocate_list_C_heap_structures() {
// Don't need lock, at safepoint
! assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
assert(is_unloading(), "only called for ClassLoaderData that are unloading");
if (_deallocate_list == NULL) {
return;
}
// Go backwards because this removes entries that are freed.
// scratch or error classes so that unloading events aren't triggered for these
// classes. The metadata is removed with the unloading metaspace.
// There isn't C heap memory allocated for methods, so nothing is done for them.
void ClassLoaderData::free_deallocate_list_C_heap_structures() {
// Don't need lock, at safepoint
! assert(UseZGC || SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
assert(is_unloading(), "only called for ClassLoaderData that are unloading");
if (_deallocate_list == NULL) {
return;
}
// Go backwards because this removes entries that are freed.
***************
event.set_definingClassLoader(k->class_loader_data());
event.commit();
}
static void post_class_unload_events() {
! assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
if (Jfr::is_enabled()) {
if (EventClassUnload::is_enabled()) {
class_unload_time = Ticks::now();
ClassLoaderDataGraph::classes_unloading_do(&post_class_unload_event);
}
event.set_definingClassLoader(k->class_loader_data());
event.commit();
}
static void post_class_unload_events() {
! assert(UseZGC || SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
if (Jfr::is_enabled()) {
if (EventClassUnload::is_enabled()) {
class_unload_time = Ticks::now();
ClassLoaderDataGraph::classes_unloading_do(&post_class_unload_event);
}
***************
// lists to determine if there are modules on those lists that are now
// dead and should be removed. A module's life cycle is equivalent
// to its defining class loader's life cycle. Since a module is
// considered dead if its class loader is dead, these walks must
// occur after each class loader's aliveness is determined.
if (data->packages() != NULL) {
data->packages()->purge_all_package_exports();
}
if (data->modules_defined()) {
data->modules()->purge_all_module_reads();
}
data = data->next();
}
! SymbolTable::do_check_concurrent_work();
JFR_ONLY(post_class_unload_events();)
}
log_debug(class, loader, data)("do_unloading: loaders processed %u, loaders removed %u", loaders_processed, loaders_removed);
// lists to determine if there are modules on those lists that are now
// dead and should be removed. A module's life cycle is equivalent
// to its defining class loader's life cycle. Since a module is
// considered dead if its class loader is dead, these walks must
// occur after each class loader's aliveness is determined.
+ MutexLockerEx ml(UseZGC ? Module_lock : NULL);
if (data->packages() != NULL) {
data->packages()->purge_all_package_exports();
}
if (data->modules_defined()) {
data->modules()->purge_all_module_reads();
}
data = data->next();
}
! if (!UseZGC) {
! SymbolTable::do_check_concurrent_work();
! }
JFR_ONLY(post_class_unload_events();)
}
log_debug(class, loader, data)("do_unloading: loaders processed %u, loaders removed %u", loaders_processed, loaders_removed);
***************
// lists to determine if there are modules on those lists that are now
// dead and should be removed. A module's life cycle is equivalent
// to its defining class loader's life cycle. Since a module is
// considered dead if its class loader is dead, these walks must
// occur after each class loader's aliveness is determined.
+ MutexLockerEx ml(UseZGC ? Module_lock : NULL);
if (data->packages() != NULL) {
data->packages()->purge_all_package_exports();
}
if (data->modules_defined()) {
data->modules()->purge_all_module_reads();
***************
data = data->next();
}
}
void ClassLoaderDataGraph::purge() {
! assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
ClassLoaderData* list = _unloading;
_unloading = NULL;
ClassLoaderData* next = list;
bool classes_unloaded = false;
while (next != NULL) {
data = data->next();
}
}
void ClassLoaderDataGraph::purge() {
! assert(UseZGC || SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
ClassLoaderData* list = _unloading;
_unloading = NULL;
ClassLoaderData* next = list;
bool classes_unloaded = false;
while (next != NULL) {
< prev index next >