< prev index next >

src/hotspot/share/classfile/classLoaderData.cpp

Concurrent class unloading

*** 277,292 **** } return false; } #endif // PRODUCT ! bool ClassLoaderData::claim() { ! if (_claimed == 1) { ! return false; } - - return (int) Atomic::cmpxchg(1, &_claimed, 0) == 0; } // Anonymous classes have their own ClassLoaderData that is marked to keep alive // while the class is being parsed, and if the class appears on the module fixup list. // Due to the uniqueness that no other class shares the anonymous class' name or --- 277,298 ---- } return false; } #endif // PRODUCT ! bool ClassLoaderData::claim(bool finalizable) { ! for (;;) { ! int old_claim = Atomic::load(&_claimed); ! int claim_mask = finalizable ? 1 : 3; ! if ((old_claim & claim_mask) == claim_mask) { ! return false; ! } ! int new_claim = old_claim | claim_mask; ! if (Atomic::cmpxchg(new_claim, &_claimed, old_claim) == old_claim) { ! return true; ! } } } // Anonymous classes have their own ClassLoaderData that is marked to keep alive // while the class is being parsed, and if the class appears on the module fixup list. // Due to the uniqueness that no other class shares the anonymous class' name or ***************
*** 550,571 **** InstanceKlass* ClassLoaderDataGraph::try_get_next_class() { return static_klass_iterator.try_get_next_class(); } - void ClassLoaderData::initialize_holder(Handle loader_or_mirror) { if (loader_or_mirror() != NULL) { assert(_holder.is_null(), "never replace holders"); _holder = WeakHandle<vm_class_loader_data>::create(loader_or_mirror); } } // Remove a klass from the _klasses list for scratch_class during redefinition // or parsed class in the case of an error. void ClassLoaderData::remove_class(Klass* scratch_class) { ! assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint"); // Adjust global class iterator. static_klass_iterator.adjust_saved_class(scratch_class); Klass* prev = NULL; --- 556,576 ---- InstanceKlass* ClassLoaderDataGraph::try_get_next_class() { return static_klass_iterator.try_get_next_class(); } void ClassLoaderData::initialize_holder(Handle loader_or_mirror) { if (loader_or_mirror() != NULL) { assert(_holder.is_null(), "never replace holders"); _holder = WeakHandle<vm_class_loader_data>::create(loader_or_mirror); } } // Remove a klass from the _klasses list for scratch_class during redefinition // or parsed class in the case of an error. void ClassLoaderData::remove_class(Klass* scratch_class) { ! assert(UseZGC || SafepointSynchronize::is_at_safepoint(), "only called at safepoint"); // Adjust global class iterator. static_klass_iterator.adjust_saved_class(scratch_class); Klass* prev = NULL; ***************
*** 591,600 **** --- 596,606 ---- } ShouldNotReachHere(); // should have found this class!! } void ClassLoaderData::unload() { + MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); _unloading = true; LogTarget(Trace, class, loader, data) lt; if (lt.is_enabled()) { ResourceMark rm; ***************
*** 673,682 **** --- 679,696 ---- } else { return NULL; } } + oop ClassLoaderData::holder_no_keepalive() const { + if (!_holder.is_null()) { // NULL class_loader + return _holder.peek(); + } else { + return NULL; + } + } + // Unloading support bool ClassLoaderData::is_alive() const { bool alive = keep_alive() // null class loader and incomplete anonymous klasses. || (_holder.peek() != NULL); // and not cleaned by the GC weak handle processing. ***************
*** 703,718 **** } } }; ClassLoaderData::~ClassLoaderData() { ! // Release C heap structures for all the classes. ! ReleaseKlassClosure cl; ! classes_do(&cl); ! ClassLoaderDataGraph::dec_array_classes(cl.array_class_released()); ! ClassLoaderDataGraph::dec_instance_classes(cl.instance_class_released()); // Release the WeakHandle _holder.release(); // Release C heap allocated hashtable for all the packages. --- 717,736 ---- } } }; ClassLoaderData::~ClassLoaderData() { ! { ! MutexLockerEx m(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock, ! Mutex::_no_safepoint_check_flag); ! // Release C heap structures for all the classes. ! ReleaseKlassClosure cl; ! classes_do(&cl); ! ClassLoaderDataGraph::dec_array_classes(cl.array_class_released()); ! ClassLoaderDataGraph::dec_instance_classes(cl.instance_class_released()); ! } // Release the WeakHandle _holder.release(); // Release C heap allocated hashtable for all the packages. ***************
*** 863,874 **** } } // Deallocate free metadata on the free list. How useful the PermGen was! void ClassLoaderData::free_deallocate_list() { ! // Don't need lock, at safepoint ! assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint"); assert(!is_unloading(), "only called for ClassLoaderData that are not unloading"); if (_deallocate_list == NULL) { return; } // Go backwards because this removes entries that are freed. --- 881,892 ---- } } // Deallocate free metadata on the free list. How useful the PermGen was! void ClassLoaderData::free_deallocate_list() { ! MutexLockerEx ml(SafepointSynchronize::is_at_safepoint() ? NULL : metaspace_lock(), ! Mutex::_no_safepoint_check_flag); assert(!is_unloading(), "only called for ClassLoaderData that are not unloading"); if (_deallocate_list == NULL) { return; } // Go backwards because this removes entries that are freed. ***************
*** 938,948 **** // scratch or error classes so that unloading events aren't triggered for these // classes. The metadata is removed with the unloading metaspace. // There isn't C heap memory allocated for methods, so nothing is done for them. void ClassLoaderData::free_deallocate_list_C_heap_structures() { // Don't need lock, at safepoint ! assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint"); assert(is_unloading(), "only called for ClassLoaderData that are unloading"); if (_deallocate_list == NULL) { return; } // Go backwards because this removes entries that are freed. --- 956,966 ---- // scratch or error classes so that unloading events aren't triggered for these // classes. The metadata is removed with the unloading metaspace. // There isn't C heap memory allocated for methods, so nothing is done for them. void ClassLoaderData::free_deallocate_list_C_heap_structures() { // Don't need lock, at safepoint ! assert(UseZGC || SafepointSynchronize::is_at_safepoint(), "only called at safepoint"); assert(is_unloading(), "only called for ClassLoaderData that are unloading"); if (_deallocate_list == NULL) { return; } // Go backwards because this removes entries that are freed. ***************
*** 1361,1371 **** event.set_definingClassLoader(k->class_loader_data()); event.commit(); } static void post_class_unload_events() { ! assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); if (Jfr::is_enabled()) { if (EventClassUnload::is_enabled()) { class_unload_time = Ticks::now(); ClassLoaderDataGraph::classes_unloading_do(&post_class_unload_event); } --- 1379,1389 ---- event.set_definingClassLoader(k->class_loader_data()); event.commit(); } static void post_class_unload_events() { ! assert(UseZGC || SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); if (Jfr::is_enabled()) { if (EventClassUnload::is_enabled()) { class_unload_time = Ticks::now(); ClassLoaderDataGraph::classes_unloading_do(&post_class_unload_event); } ***************
*** 1429,1447 **** // lists to determine if there are modules on those lists that are now // dead and should be removed. A module's life cycle is equivalent // to its defining class loader's life cycle. Since a module is // considered dead if its class loader is dead, these walks must // occur after each class loader's aliveness is determined. if (data->packages() != NULL) { data->packages()->purge_all_package_exports(); } if (data->modules_defined()) { data->modules()->purge_all_module_reads(); } data = data->next(); } ! SymbolTable::do_check_concurrent_work(); JFR_ONLY(post_class_unload_events();) } log_debug(class, loader, data)("do_unloading: loaders processed %u, loaders removed %u", loaders_processed, loaders_removed); --- 1447,1468 ---- // lists to determine if there are modules on those lists that are now // dead and should be removed. A module's life cycle is equivalent // to its defining class loader's life cycle. Since a module is // considered dead if its class loader is dead, these walks must // occur after each class loader's aliveness is determined. + MutexLockerEx ml(UseZGC ? Module_lock : NULL); if (data->packages() != NULL) { data->packages()->purge_all_package_exports(); } if (data->modules_defined()) { data->modules()->purge_all_module_reads(); } data = data->next(); } ! if (!UseZGC) { ! SymbolTable::do_check_concurrent_work(); ! } JFR_ONLY(post_class_unload_events();) } log_debug(class, loader, data)("do_unloading: loaders processed %u, loaders removed %u", loaders_processed, loaders_removed); ***************
*** 1462,1471 **** --- 1483,1493 ---- // lists to determine if there are modules on those lists that are now // dead and should be removed. A module's life cycle is equivalent // to its defining class loader's life cycle. Since a module is // considered dead if its class loader is dead, these walks must // occur after each class loader's aliveness is determined. + MutexLockerEx ml(UseZGC ? Module_lock : NULL); if (data->packages() != NULL) { data->packages()->purge_all_package_exports(); } if (data->modules_defined()) { data->modules()->purge_all_module_reads(); ***************
*** 1473,1483 **** data = data->next(); } } void ClassLoaderDataGraph::purge() { ! assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); ClassLoaderData* list = _unloading; _unloading = NULL; ClassLoaderData* next = list; bool classes_unloaded = false; while (next != NULL) { --- 1495,1505 ---- data = data->next(); } } void ClassLoaderDataGraph::purge() { ! assert(UseZGC || SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); ClassLoaderData* list = _unloading; _unloading = NULL; ClassLoaderData* next = list; bool classes_unloaded = false; while (next != NULL) {
< prev index next >