261
262 bool ClassLoaderData::ChunkedHandleList::contains(oop p) {
263 VerifyContainsOopClosure cl(p);
264 oops_do(&cl);
265 return cl.found();
266 }
267
268 #ifndef PRODUCT
269 bool ClassLoaderData::ChunkedHandleList::owner_of(oop* oop_handle) {
270 Chunk* chunk = _head;
271 while (chunk != NULL) {
272 if (&(chunk->_data[0]) <= oop_handle && oop_handle < &(chunk->_data[chunk->_size])) {
273 return true;
274 }
275 chunk = chunk->_next;
276 }
277 return false;
278 }
279 #endif // PRODUCT
280
281 bool ClassLoaderData::claim() {
282 if (_claimed == 1) {
283 return false;
284 }
285
286 return (int) Atomic::cmpxchg(1, &_claimed, 0) == 0;
287 }
288
289 // Anonymous classes have their own ClassLoaderData that is marked to keep alive
290 // while the class is being parsed, and if the class appears on the module fixup list.
291 // Due to the uniqueness that no other class shares the anonymous class' name or
292 // ClassLoaderData, no other non-GC thread has knowledge of the anonymous class while
293 // it is being defined, therefore _keep_alive is not volatile or atomic.
294 void ClassLoaderData::inc_keep_alive() {
295 if (is_anonymous()) {
296 assert(_keep_alive >= 0, "Invalid keep alive increment count");
297 _keep_alive++;
298 }
299 }
300
301 void ClassLoaderData::dec_keep_alive() {
302 if (is_anonymous()) {
303 assert(_keep_alive > 0, "Invalid keep alive decrement count");
304 _keep_alive--;
305 }
|
261
262 bool ClassLoaderData::ChunkedHandleList::contains(oop p) {
263 VerifyContainsOopClosure cl(p);
264 oops_do(&cl);
265 return cl.found();
266 }
267
268 #ifndef PRODUCT
269 bool ClassLoaderData::ChunkedHandleList::owner_of(oop* oop_handle) {
270 Chunk* chunk = _head;
271 while (chunk != NULL) {
272 if (&(chunk->_data[0]) <= oop_handle && oop_handle < &(chunk->_data[chunk->_size])) {
273 return true;
274 }
275 chunk = chunk->_next;
276 }
277 return false;
278 }
279 #endif // PRODUCT
280
281 bool ClassLoaderData::claim(bool finalizable) {
282 for (;;) {
283 int old_claim = Atomic::load(&_claimed);
284 int claim_mask = finalizable ? 1 : 3;
285 if ((old_claim & claim_mask) == claim_mask) {
286 return false;
287 }
288 int new_claim = old_claim | claim_mask;
289 if (Atomic::cmpxchg(new_claim, &_claimed, old_claim) == old_claim) {
290 return true;
291 }
292 }
293 }
294
295 // Anonymous classes have their own ClassLoaderData that is marked to keep alive
296 // while the class is being parsed, and if the class appears on the module fixup list.
297 // Due to the uniqueness that no other class shares the anonymous class' name or
298 // ClassLoaderData, no other non-GC thread has knowledge of the anonymous class while
299 // it is being defined, therefore _keep_alive is not volatile or atomic.
300 void ClassLoaderData::inc_keep_alive() {
301 if (is_anonymous()) {
302 assert(_keep_alive >= 0, "Invalid keep alive increment count");
303 _keep_alive++;
304 }
305 }
306
307 void ClassLoaderData::dec_keep_alive() {
308 if (is_anonymous()) {
309 assert(_keep_alive > 0, "Invalid keep alive decrement count");
310 _keep_alive--;
311 }
|
534 _current_loader_data = cld->next();
535 if (_current_loader_data != NULL) {
536 _current_class_entry = _current_loader_data->klasses();
537 } // else try_get_next_class will start at the head
538 }
539 }
540
541 void adjust_saved_class(Klass* klass) {
542 if (_current_class_entry == klass) {
543 _current_class_entry = klass->next_link();
544 }
545 }
546 };
547
548 static ClassLoaderDataGraphKlassIteratorStatic static_klass_iterator;
549
550 InstanceKlass* ClassLoaderDataGraph::try_get_next_class() {
551 return static_klass_iterator.try_get_next_class();
552 }
553
554
555 void ClassLoaderData::initialize_holder(Handle loader_or_mirror) {
556 if (loader_or_mirror() != NULL) {
557 assert(_holder.is_null(), "never replace holders");
558 _holder = WeakHandle<vm_class_loader_data>::create(loader_or_mirror);
559 }
560 }
561
562 // Remove a klass from the _klasses list for scratch_class during redefinition
563 // or parsed class in the case of an error.
564 void ClassLoaderData::remove_class(Klass* scratch_class) {
565 assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
566
567 // Adjust global class iterator.
568 static_klass_iterator.adjust_saved_class(scratch_class);
569
570 Klass* prev = NULL;
571 for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
572 if (k == scratch_class) {
573 if (prev == NULL) {
574 _klasses = k->next_link();
575 } else {
576 Klass* next = k->next_link();
577 prev->set_next_link(next);
578 }
579
580 if (k->is_array_klass()) {
581 ClassLoaderDataGraph::dec_array_classes(1);
582 } else {
583 ClassLoaderDataGraph::dec_instance_classes(1);
584 }
585
586 return;
587 }
588 prev = k;
589 assert(k != k->next_link(), "no loops!");
590 }
591 ShouldNotReachHere(); // should have found this class!!
592 }
593
594 void ClassLoaderData::unload() {
595 _unloading = true;
596
597 LogTarget(Trace, class, loader, data) lt;
598 if (lt.is_enabled()) {
599 ResourceMark rm;
600 LogStream ls(lt);
601 ls.print("unload");
602 print_value_on(&ls);
603 ls.cr();
604 }
605
606 // Some items on the _deallocate_list need to free their C heap structures
607 // if they are not already on the _klasses list.
608 free_deallocate_list_C_heap_structures();
609
610 // Tell serviceability tools these classes are unloading
611 // after erroneous classes are released.
612 classes_do(InstanceKlass::notify_unload_class);
613
|
540 _current_loader_data = cld->next();
541 if (_current_loader_data != NULL) {
542 _current_class_entry = _current_loader_data->klasses();
543 } // else try_get_next_class will start at the head
544 }
545 }
546
547 void adjust_saved_class(Klass* klass) {
548 if (_current_class_entry == klass) {
549 _current_class_entry = klass->next_link();
550 }
551 }
552 };
553
554 static ClassLoaderDataGraphKlassIteratorStatic static_klass_iterator;
555
556 InstanceKlass* ClassLoaderDataGraph::try_get_next_class() {
557 return static_klass_iterator.try_get_next_class();
558 }
559
560 void ClassLoaderData::initialize_holder(Handle loader_or_mirror) {
561 if (loader_or_mirror() != NULL) {
562 assert(_holder.is_null(), "never replace holders");
563 _holder = WeakHandle<vm_class_loader_data>::create(loader_or_mirror);
564 }
565 }
566
567 // Remove a klass from the _klasses list for scratch_class during redefinition
568 // or parsed class in the case of an error.
569 void ClassLoaderData::remove_class(Klass* scratch_class) {
570 assert(UseZGC || SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
571
572 // Adjust global class iterator.
573 static_klass_iterator.adjust_saved_class(scratch_class);
574
575 Klass* prev = NULL;
576 for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
577 if (k == scratch_class) {
578 if (prev == NULL) {
579 _klasses = k->next_link();
580 } else {
581 Klass* next = k->next_link();
582 prev->set_next_link(next);
583 }
584
585 if (k->is_array_klass()) {
586 ClassLoaderDataGraph::dec_array_classes(1);
587 } else {
588 ClassLoaderDataGraph::dec_instance_classes(1);
589 }
590
591 return;
592 }
593 prev = k;
594 assert(k != k->next_link(), "no loops!");
595 }
596 ShouldNotReachHere(); // should have found this class!!
597 }
598
599 void ClassLoaderData::unload() {
600 MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
601 _unloading = true;
602
603 LogTarget(Trace, class, loader, data) lt;
604 if (lt.is_enabled()) {
605 ResourceMark rm;
606 LogStream ls(lt);
607 ls.print("unload");
608 print_value_on(&ls);
609 ls.cr();
610 }
611
612 // Some items on the _deallocate_list need to free their C heap structures
613 // if they are not already on the _klasses list.
614 free_deallocate_list_C_heap_structures();
615
616 // Tell serviceability tools these classes are unloading
617 // after erroneous classes are released.
618 classes_do(InstanceKlass::notify_unload_class);
619
|
657 if (!DynamicallyResizeSystemDictionaries || DumpSharedSpaces || UseSharedSpaces) {
658 resizable = false;
659 }
660 return new Dictionary(this, size, resizable);
661 }
662
663 // Tell the GC to keep this klass alive while iterating ClassLoaderDataGraph
664 oop ClassLoaderData::holder_phantom() const {
665 // A klass that was previously considered dead can be looked up in the
666 // CLD/SD, and its _java_mirror or _class_loader can be stored in a root
667 // or a reachable object making it alive again. The SATB part of G1 needs
668 // to get notified about this potential resurrection, otherwise the marking
669 // might not find the object.
670 if (!_holder.is_null()) { // NULL class_loader
671 return _holder.resolve();
672 } else {
673 return NULL;
674 }
675 }
676
677 // Unloading support
678 bool ClassLoaderData::is_alive() const {
679 bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
680 || (_holder.peek() != NULL); // and not cleaned by the GC weak handle processing.
681
682 return alive;
683 }
684
685 class ReleaseKlassClosure: public KlassClosure {
686 private:
687 size_t _instance_class_released;
688 size_t _array_class_released;
689 public:
690 ReleaseKlassClosure() : _instance_class_released(0), _array_class_released(0) { }
691
692 size_t instance_class_released() const { return _instance_class_released; }
693 size_t array_class_released() const { return _array_class_released; }
694
695 void do_klass(Klass* k) {
696 if (k->is_array_klass()) {
697 _array_class_released ++;
698 } else {
699 assert(k->is_instance_klass(), "Must be");
700 _instance_class_released ++;
701 InstanceKlass::release_C_heap_structures(InstanceKlass::cast(k));
702 }
703 }
704 };
705
706 ClassLoaderData::~ClassLoaderData() {
707 // Release C heap structures for all the classes.
708 ReleaseKlassClosure cl;
709 classes_do(&cl);
710
711 ClassLoaderDataGraph::dec_array_classes(cl.array_class_released());
712 ClassLoaderDataGraph::dec_instance_classes(cl.instance_class_released());
713
714 // Release the WeakHandle
715 _holder.release();
716
717 // Release C heap allocated hashtable for all the packages.
718 if (_packages != NULL) {
719 // Destroy the table itself
720 delete _packages;
721 _packages = NULL;
722 }
723
724 // Release C heap allocated hashtable for all the modules.
725 if (_modules != NULL) {
726 // Destroy the table itself
727 delete _modules;
728 _modules = NULL;
729 }
730
731 // Release C heap allocated hashtable for the dictionary
|
663 if (!DynamicallyResizeSystemDictionaries || DumpSharedSpaces || UseSharedSpaces) {
664 resizable = false;
665 }
666 return new Dictionary(this, size, resizable);
667 }
668
669 // Tell the GC to keep this klass alive while iterating ClassLoaderDataGraph
670 oop ClassLoaderData::holder_phantom() const {
671 // A klass that was previously considered dead can be looked up in the
672 // CLD/SD, and its _java_mirror or _class_loader can be stored in a root
673 // or a reachable object making it alive again. The SATB part of G1 needs
674 // to get notified about this potential resurrection, otherwise the marking
675 // might not find the object.
676 if (!_holder.is_null()) { // NULL class_loader
677 return _holder.resolve();
678 } else {
679 return NULL;
680 }
681 }
682
683 oop ClassLoaderData::holder_no_keepalive() const {
684 if (!_holder.is_null()) { // NULL class_loader
685 return _holder.peek();
686 } else {
687 return NULL;
688 }
689 }
690
691 // Unloading support
692 bool ClassLoaderData::is_alive() const {
693 bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
694 || (_holder.peek() != NULL); // and not cleaned by the GC weak handle processing.
695
696 return alive;
697 }
698
699 class ReleaseKlassClosure: public KlassClosure {
700 private:
701 size_t _instance_class_released;
702 size_t _array_class_released;
703 public:
704 ReleaseKlassClosure() : _instance_class_released(0), _array_class_released(0) { }
705
706 size_t instance_class_released() const { return _instance_class_released; }
707 size_t array_class_released() const { return _array_class_released; }
708
709 void do_klass(Klass* k) {
710 if (k->is_array_klass()) {
711 _array_class_released ++;
712 } else {
713 assert(k->is_instance_klass(), "Must be");
714 _instance_class_released ++;
715 InstanceKlass::release_C_heap_structures(InstanceKlass::cast(k));
716 }
717 }
718 };
719
720 ClassLoaderData::~ClassLoaderData() {
721 {
722 MutexLockerEx m(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
723 Mutex::_no_safepoint_check_flag);
724 // Release C heap structures for all the classes.
725 ReleaseKlassClosure cl;
726 classes_do(&cl);
727
728 ClassLoaderDataGraph::dec_array_classes(cl.array_class_released());
729 ClassLoaderDataGraph::dec_instance_classes(cl.instance_class_released());
730 }
731
732 // Release the WeakHandle
733 _holder.release();
734
735 // Release C heap allocated hashtable for all the packages.
736 if (_packages != NULL) {
737 // Destroy the table itself
738 delete _packages;
739 _packages = NULL;
740 }
741
742 // Release C heap allocated hashtable for all the modules.
743 if (_modules != NULL) {
744 // Destroy the table itself
745 delete _modules;
746 _modules = NULL;
747 }
748
749 // Release C heap allocated hashtable for the dictionary
|
847 }
848 }
849
850 // Add this metadata pointer to be freed when it's safe. This is only during
851 // a safepoint which checks if handles point to this metadata field.
852 void ClassLoaderData::add_to_deallocate_list(Metadata* m) {
853 // Metadata in shared region isn't deleted.
854 if (!m->is_shared()) {
855 MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
856 if (_deallocate_list == NULL) {
857 _deallocate_list = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(100, true);
858 }
859 _deallocate_list->append_if_missing(m);
860 log_debug(class, loader, data)("deallocate added for %s", m->print_value_string());
861 ClassLoaderDataGraph::set_should_clean_deallocate_lists();
862 }
863 }
864
865 // Deallocate free metadata on the free list. How useful the PermGen was!
866 void ClassLoaderData::free_deallocate_list() {
867 // Don't need lock, at safepoint
868 assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
869 assert(!is_unloading(), "only called for ClassLoaderData that are not unloading");
870 if (_deallocate_list == NULL) {
871 return;
872 }
873 // Go backwards because this removes entries that are freed.
874 for (int i = _deallocate_list->length() - 1; i >= 0; i--) {
875 Metadata* m = _deallocate_list->at(i);
876 if (!m->on_stack()) {
877 _deallocate_list->remove_at(i);
878 // There are only three types of metadata that we deallocate directly.
879 // Cast them so they can be used by the template function.
880 if (m->is_method()) {
881 MetadataFactory::free_metadata(this, (Method*)m);
882 } else if (m->is_constantPool()) {
883 MetadataFactory::free_metadata(this, (ConstantPool*)m);
884 } else if (m->is_klass()) {
885 MetadataFactory::free_metadata(this, (InstanceKlass*)m);
886 } else {
887 ShouldNotReachHere();
|
865 }
866 }
867
868 // Add this metadata pointer to be freed when it's safe. This is only during
869 // a safepoint which checks if handles point to this metadata field.
870 void ClassLoaderData::add_to_deallocate_list(Metadata* m) {
871 // Metadata in shared region isn't deleted.
872 if (!m->is_shared()) {
873 MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
874 if (_deallocate_list == NULL) {
875 _deallocate_list = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(100, true);
876 }
877 _deallocate_list->append_if_missing(m);
878 log_debug(class, loader, data)("deallocate added for %s", m->print_value_string());
879 ClassLoaderDataGraph::set_should_clean_deallocate_lists();
880 }
881 }
882
883 // Deallocate free metadata on the free list. How useful the PermGen was!
884 void ClassLoaderData::free_deallocate_list() {
885 MutexLockerEx ml(SafepointSynchronize::is_at_safepoint() ? NULL : metaspace_lock(),
886 Mutex::_no_safepoint_check_flag);
887 assert(!is_unloading(), "only called for ClassLoaderData that are not unloading");
888 if (_deallocate_list == NULL) {
889 return;
890 }
891 // Go backwards because this removes entries that are freed.
892 for (int i = _deallocate_list->length() - 1; i >= 0; i--) {
893 Metadata* m = _deallocate_list->at(i);
894 if (!m->on_stack()) {
895 _deallocate_list->remove_at(i);
896 // There are only three types of metadata that we deallocate directly.
897 // Cast them so they can be used by the template function.
898 if (m->is_method()) {
899 MetadataFactory::free_metadata(this, (Method*)m);
900 } else if (m->is_constantPool()) {
901 MetadataFactory::free_metadata(this, (ConstantPool*)m);
902 } else if (m->is_klass()) {
903 MetadataFactory::free_metadata(this, (InstanceKlass*)m);
904 } else {
905 ShouldNotReachHere();
|
922
923 // Mark metadata seen on the stack so we can delete unreferenced entries.
924 // Walk all metadata, including the expensive code cache walk, only for class redefinition.
925 // The MetadataOnStackMark walk during redefinition saves previous versions if it finds old methods
926 // on the stack or in the code cache, so we only have to repeat the full walk if
927 // they were found at that time.
928 // TODO: have redefinition clean old methods out of the code cache. They still exist in some places.
929 bool walk_all_metadata = InstanceKlass::has_previous_versions_and_reset();
930
931 MetadataOnStackMark md_on_stack(walk_all_metadata);
932 clean_deallocate_lists(walk_all_metadata);
933 }
934
935 // This is distinct from free_deallocate_list. For class loader data that are
936 // unloading, this frees the C heap memory for items on the list, and unlinks
937 // scratch or error classes so that unloading events aren't triggered for these
938 // classes. The metadata is removed with the unloading metaspace.
939 // There isn't C heap memory allocated for methods, so nothing is done for them.
940 void ClassLoaderData::free_deallocate_list_C_heap_structures() {
941 // Don't need lock, at safepoint
942 assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
943 assert(is_unloading(), "only called for ClassLoaderData that are unloading");
944 if (_deallocate_list == NULL) {
945 return;
946 }
947 // Go backwards because this removes entries that are freed.
948 for (int i = _deallocate_list->length() - 1; i >= 0; i--) {
949 Metadata* m = _deallocate_list->at(i);
950 _deallocate_list->remove_at(i);
951 if (m->is_constantPool()) {
952 ((ConstantPool*)m)->release_C_heap_structures();
953 } else if (m->is_klass()) {
954 InstanceKlass* ik = (InstanceKlass*)m;
955 // also releases ik->constants() C heap memory
956 InstanceKlass::release_C_heap_structures(ik);
957 // Remove the class so unloading events aren't triggered for
958 // this class (scratch or error class) in do_unloading().
959 remove_class(ik);
960 }
961 }
|
940
941 // Mark metadata seen on the stack so we can delete unreferenced entries.
942 // Walk all metadata, including the expensive code cache walk, only for class redefinition.
943 // The MetadataOnStackMark walk during redefinition saves previous versions if it finds old methods
944 // on the stack or in the code cache, so we only have to repeat the full walk if
945 // they were found at that time.
946 // TODO: have redefinition clean old methods out of the code cache. They still exist in some places.
947 bool walk_all_metadata = InstanceKlass::has_previous_versions_and_reset();
948
949 MetadataOnStackMark md_on_stack(walk_all_metadata);
950 clean_deallocate_lists(walk_all_metadata);
951 }
952
953 // This is distinct from free_deallocate_list. For class loader data that are
954 // unloading, this frees the C heap memory for items on the list, and unlinks
955 // scratch or error classes so that unloading events aren't triggered for these
956 // classes. The metadata is removed with the unloading metaspace.
957 // There isn't C heap memory allocated for methods, so nothing is done for them.
958 void ClassLoaderData::free_deallocate_list_C_heap_structures() {
959 // Don't need lock, at safepoint
960 assert(UseZGC || SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
961 assert(is_unloading(), "only called for ClassLoaderData that are unloading");
962 if (_deallocate_list == NULL) {
963 return;
964 }
965 // Go backwards because this removes entries that are freed.
966 for (int i = _deallocate_list->length() - 1; i >= 0; i--) {
967 Metadata* m = _deallocate_list->at(i);
968 _deallocate_list->remove_at(i);
969 if (m->is_constantPool()) {
970 ((ConstantPool*)m)->release_C_heap_structures();
971 } else if (m->is_klass()) {
972 InstanceKlass* ik = (InstanceKlass*)m;
973 // also releases ik->constants() C heap memory
974 InstanceKlass::release_C_heap_structures(ik);
975 // Remove the class so unloading events aren't triggered for
976 // this class (scratch or error class) in do_unloading().
977 remove_class(ik);
978 }
979 }
|
1345 return true;
1346 }
1347 }
1348
1349 return false;
1350 }
1351 #endif // PRODUCT
1352
1353 #if INCLUDE_JFR
1354 static Ticks class_unload_time;
1355 static void post_class_unload_event(Klass* const k) {
1356 assert(k != NULL, "invariant");
1357 EventClassUnload event(UNTIMED);
1358 event.set_endtime(class_unload_time);
1359 event.set_unloadedClass(k);
1360 event.set_definingClassLoader(k->class_loader_data());
1361 event.commit();
1362 }
1363
1364 static void post_class_unload_events() {
1365 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
1366 if (Jfr::is_enabled()) {
1367 if (EventClassUnload::is_enabled()) {
1368 class_unload_time = Ticks::now();
1369 ClassLoaderDataGraph::classes_unloading_do(&post_class_unload_event);
1370 }
1371 Jfr::on_unloading_classes();
1372 }
1373 }
1374 #endif // INCLUDE_JFR
1375
1376 // Move class loader data from main list to the unloaded list for unloading
1377 // and deallocation later.
1378 bool ClassLoaderDataGraph::do_unloading(bool do_cleaning) {
1379
1380 // Indicate whether safepoint cleanup is needed.
1381 _safepoint_cleanup_needed |= do_cleaning;
1382
1383 ClassLoaderData* data = _head;
1384 ClassLoaderData* prev = NULL;
|
1363 return true;
1364 }
1365 }
1366
1367 return false;
1368 }
1369 #endif // PRODUCT
1370
1371 #if INCLUDE_JFR
1372 static Ticks class_unload_time;
1373 static void post_class_unload_event(Klass* const k) {
1374 assert(k != NULL, "invariant");
1375 EventClassUnload event(UNTIMED);
1376 event.set_endtime(class_unload_time);
1377 event.set_unloadedClass(k);
1378 event.set_definingClassLoader(k->class_loader_data());
1379 event.commit();
1380 }
1381
1382 static void post_class_unload_events() {
1383 assert(UseZGC || SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
1384 if (Jfr::is_enabled()) {
1385 if (EventClassUnload::is_enabled()) {
1386 class_unload_time = Ticks::now();
1387 ClassLoaderDataGraph::classes_unloading_do(&post_class_unload_event);
1388 }
1389 Jfr::on_unloading_classes();
1390 }
1391 }
1392 #endif // INCLUDE_JFR
1393
1394 // Move class loader data from main list to the unloaded list for unloading
1395 // and deallocation later.
1396 bool ClassLoaderDataGraph::do_unloading(bool do_cleaning) {
1397
1398 // Indicate whether safepoint cleanup is needed.
1399 _safepoint_cleanup_needed |= do_cleaning;
1400
1401 ClassLoaderData* data = _head;
1402 ClassLoaderData* prev = NULL;
|
1413 _head = data;
1414 }
1415 dead->set_next(_unloading);
1416 _unloading = dead;
1417 }
1418
1419 if (seen_dead_loader) {
1420 data = _head;
1421 while (data != NULL) {
1422 // Remove entries in the dictionary of live class loader that have
1423 // initiated loading classes in a dead class loader.
1424 if (data->dictionary() != NULL) {
1425 data->dictionary()->do_unloading();
1426 }
1427 // Walk a ModuleEntry's reads, and a PackageEntry's exports
1428 // lists to determine if there are modules on those lists that are now
1429 // dead and should be removed. A module's life cycle is equivalent
1430 // to its defining class loader's life cycle. Since a module is
1431 // considered dead if its class loader is dead, these walks must
1432 // occur after each class loader's aliveness is determined.
1433 if (data->packages() != NULL) {
1434 data->packages()->purge_all_package_exports();
1435 }
1436 if (data->modules_defined()) {
1437 data->modules()->purge_all_module_reads();
1438 }
1439 data = data->next();
1440 }
1441 SymbolTable::do_check_concurrent_work();
1442 JFR_ONLY(post_class_unload_events();)
1443 }
1444
1445 log_debug(class, loader, data)("do_unloading: loaders processed %u, loaders removed %u", loaders_processed, loaders_removed);
1446
1447 return seen_dead_loader;
1448 }
1449
1450 // There's at least one dead class loader. Purge refererences of healthy module
1451 // reads lists and package export lists to modules belonging to dead loaders.
1452 void ClassLoaderDataGraph::clean_module_and_package_info() {
1453 ClassLoaderData* data = _head;
1454 while (data != NULL) {
1455 // Remove entries in the dictionary of live class loader that have
1456 // initiated loading classes in a dead class loader.
1457 if (data->dictionary() != NULL) {
1458 data->dictionary()->do_unloading();
1459 }
1460 // Walk a ModuleEntry's reads, and a PackageEntry's exports
1461 // lists to determine if there are modules on those lists that are now
1462 // dead and should be removed. A module's life cycle is equivalent
1463 // to its defining class loader's life cycle. Since a module is
1464 // considered dead if its class loader is dead, these walks must
1465 // occur after each class loader's aliveness is determined.
1466 if (data->packages() != NULL) {
1467 data->packages()->purge_all_package_exports();
1468 }
1469 if (data->modules_defined()) {
1470 data->modules()->purge_all_module_reads();
1471 }
1472 data = data->next();
1473 }
1474 }
1475
1476 void ClassLoaderDataGraph::purge() {
1477 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
1478 ClassLoaderData* list = _unloading;
1479 _unloading = NULL;
1480 ClassLoaderData* next = list;
1481 bool classes_unloaded = false;
1482 while (next != NULL) {
1483 ClassLoaderData* purge_me = next;
1484 next = purge_me->next();
1485 delete purge_me;
1486 classes_unloaded = true;
1487 }
1488 if (classes_unloaded) {
1489 Metaspace::purge();
1490 set_metaspace_oom(false);
1491 }
1492 }
1493
1494 int ClassLoaderDataGraph::resize_if_needed() {
1495 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
1496 int resized = 0;
|
1431 _head = data;
1432 }
1433 dead->set_next(_unloading);
1434 _unloading = dead;
1435 }
1436
1437 if (seen_dead_loader) {
1438 data = _head;
1439 while (data != NULL) {
1440 // Remove entries in the dictionary of live class loader that have
1441 // initiated loading classes in a dead class loader.
1442 if (data->dictionary() != NULL) {
1443 data->dictionary()->do_unloading();
1444 }
1445 // Walk a ModuleEntry's reads, and a PackageEntry's exports
1446 // lists to determine if there are modules on those lists that are now
1447 // dead and should be removed. A module's life cycle is equivalent
1448 // to its defining class loader's life cycle. Since a module is
1449 // considered dead if its class loader is dead, these walks must
1450 // occur after each class loader's aliveness is determined.
1451 MutexLockerEx ml(UseZGC ? Module_lock : NULL);
1452 if (data->packages() != NULL) {
1453 data->packages()->purge_all_package_exports();
1454 }
1455 if (data->modules_defined()) {
1456 data->modules()->purge_all_module_reads();
1457 }
1458 data = data->next();
1459 }
1460 if (!UseZGC) {
1461 SymbolTable::do_check_concurrent_work();
1462 }
1463 JFR_ONLY(post_class_unload_events();)
1464 }
1465
1466 log_debug(class, loader, data)("do_unloading: loaders processed %u, loaders removed %u", loaders_processed, loaders_removed);
1467
1468 return seen_dead_loader;
1469 }
1470
1471 // There's at least one dead class loader. Purge refererences of healthy module
1472 // reads lists and package export lists to modules belonging to dead loaders.
1473 void ClassLoaderDataGraph::clean_module_and_package_info() {
1474 ClassLoaderData* data = _head;
1475 while (data != NULL) {
1476 // Remove entries in the dictionary of live class loader that have
1477 // initiated loading classes in a dead class loader.
1478 if (data->dictionary() != NULL) {
1479 data->dictionary()->do_unloading();
1480 }
1481 // Walk a ModuleEntry's reads, and a PackageEntry's exports
1482 // lists to determine if there are modules on those lists that are now
1483 // dead and should be removed. A module's life cycle is equivalent
1484 // to its defining class loader's life cycle. Since a module is
1485 // considered dead if its class loader is dead, these walks must
1486 // occur after each class loader's aliveness is determined.
1487 MutexLockerEx ml(UseZGC ? Module_lock : NULL);
1488 if (data->packages() != NULL) {
1489 data->packages()->purge_all_package_exports();
1490 }
1491 if (data->modules_defined()) {
1492 data->modules()->purge_all_module_reads();
1493 }
1494 data = data->next();
1495 }
1496 }
1497
1498 void ClassLoaderDataGraph::purge() {
1499 assert(UseZGC || SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
1500 ClassLoaderData* list = _unloading;
1501 _unloading = NULL;
1502 ClassLoaderData* next = list;
1503 bool classes_unloaded = false;
1504 while (next != NULL) {
1505 ClassLoaderData* purge_me = next;
1506 next = purge_me->next();
1507 delete purge_me;
1508 classes_unloaded = true;
1509 }
1510 if (classes_unloaded) {
1511 Metaspace::purge();
1512 set_metaspace_oom(false);
1513 }
1514 }
1515
1516 int ClassLoaderDataGraph::resize_if_needed() {
1517 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
1518 int resized = 0;
|