< prev index next >

src/hotspot/share/classfile/classLoaderData.cpp

Print this page




 170     _dictionary = create_dictionary();
 171   }
 172 
 173   NOT_PRODUCT(_dependency_count = 0); // number of class loader dependencies
 174 
 175   JFR_ONLY(INIT_ID(this);)
 176 }
 177 
 178 ClassLoaderData::ChunkedHandleList::~ChunkedHandleList() {
 179   Chunk* c = _head;
 180   while (c != NULL) {
 181     Chunk* next = c->_next;
 182     delete c;
 183     c = next;
 184   }
 185 }
 186 
 187 oop* ClassLoaderData::ChunkedHandleList::add(oop o) {
 188   if (_head == NULL || _head->_size == Chunk::CAPACITY) {
 189     Chunk* next = new Chunk(_head);
 190     OrderAccess::release_store(&_head, next);
 191   }
 192   oop* handle = &_head->_data[_head->_size];
 193   NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, o);
 194   OrderAccess::release_store(&_head->_size, _head->_size + 1);
 195   return handle;
 196 }
 197 
 198 int ClassLoaderData::ChunkedHandleList::count() const {
 199   int count = 0;
 200   Chunk* chunk = _head;
 201   while (chunk != NULL) {
 202     count += chunk->_size;
 203     chunk = chunk->_next;
 204   }
 205   return count;
 206 }
 207 
 208 inline void ClassLoaderData::ChunkedHandleList::oops_do_chunk(OopClosure* f, Chunk* c, const juint size) {
 209   for (juint i = 0; i < size; i++) {
 210     if (c->_data[i] != NULL) {
 211       f->do_oop(&c->_data[i]);
 212     }
 213   }
 214 }
 215 
 216 void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) {
 217   Chunk* head = OrderAccess::load_acquire(&_head);
 218   if (head != NULL) {
 219     // Must be careful when reading size of head
 220     oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size));
 221     for (Chunk* c = head->_next; c != NULL; c = c->_next) {
 222       oops_do_chunk(f, c, c->_size);
 223     }
 224   }
 225 }
 226 
 227 class VerifyContainsOopClosure : public OopClosure {
 228   oop  _target;
 229   bool _found;
 230 
 231  public:
 232   VerifyContainsOopClosure(oop target) : _target(target), _found(false) {}
 233 
 234   void do_oop(oop* p) {
 235     if (p != NULL && NativeAccess<AS_NO_KEEPALIVE>::oop_load(p) == _target) {
 236       _found = true;
 237     }
 238   }
 239 
 240   void do_oop(narrowOop* p) {


 309     assert(_keep_alive > 0, "Invalid keep alive decrement count");
 310     _keep_alive--;
 311   }
 312 }
 313 
 314 void ClassLoaderData::oops_do(OopClosure* f, int claim_value, bool clear_mod_oops) {
 315   if (claim_value != ClassLoaderData::_claim_none && !try_claim(claim_value)) {
 316     return;
 317   }
 318 
 319   // Only clear modified_oops after the ClassLoaderData is claimed.
 320   if (clear_mod_oops) {
 321     clear_modified_oops();
 322   }
 323 
 324   _handles.oops_do(f);
 325 }
 326 
 327 void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
 328   // Lock-free access requires load_acquire
 329   for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
 330     klass_closure->do_klass(k);
 331     assert(k != k->next_link(), "no loops!");
 332   }
 333 }
 334 
 335 void ClassLoaderData::classes_do(void f(Klass * const)) {
 336   // Lock-free access requires load_acquire
 337   for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
 338     f(k);
 339     assert(k != k->next_link(), "no loops!");
 340   }
 341 }
 342 
 343 void ClassLoaderData::methods_do(void f(Method*)) {
 344   // Lock-free access requires load_acquire
 345   for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
 346     if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) {
 347       InstanceKlass::cast(k)->methods_do(f);
 348     }
 349   }
 350 }
 351 
 352 void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
 353   // Lock-free access requires load_acquire
 354   for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
 355     // Do not filter ArrayKlass oops here...
 356     if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) {
 357 #ifdef ASSERT
 358       oop m = k->java_mirror();
 359       assert(m != NULL, "NULL mirror");
 360       assert(m->is_a(SystemDictionary::Class_klass()), "invalid mirror");
 361 #endif
 362       klass_closure->do_klass(k);
 363     }
 364   }
 365 }
 366 
 367 void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
 368   // Lock-free access requires load_acquire
 369   for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
 370     if (k->is_instance_klass()) {
 371       f(InstanceKlass::cast(k));
 372     }
 373     assert(k != k->next_link(), "no loops!");
 374   }
 375 }
 376 
 377 void ClassLoaderData::modules_do(void f(ModuleEntry*)) {
 378   assert_locked_or_safepoint(Module_lock);
 379   if (_unnamed_module != NULL) {
 380     f(_unnamed_module);
 381   }
 382   if (_modules != NULL) {
 383     for (int i = 0; i < _modules->table_size(); i++) {
 384       for (ModuleEntry* entry = _modules->bucket(i);
 385            entry != NULL;
 386            entry = entry->next()) {
 387         f(entry);
 388       }
 389     }


 448       ls.print("adding dependency from ");
 449       print_value_on(&ls);
 450       ls.print(" to ");
 451       to_cld->print_value_on(&ls);
 452       ls.cr();
 453     }
 454     Handle dependency(Thread::current(), to);
 455     add_handle(dependency);
 456     // Added a potentially young gen oop to the ClassLoaderData
 457     record_modified_oops();
 458   }
 459 }
 460 
 461 void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
 462   {
 463     MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
 464     Klass* old_value = _klasses;
 465     k->set_next_link(old_value);
 466     // Link the new item into the list, making sure the linked class is stable
 467     // since the list can be walked without a lock
 468     OrderAccess::release_store(&_klasses, k);
 469     if (k->is_array_klass()) {
 470       ClassLoaderDataGraph::inc_array_classes(1);
 471     } else {
 472       ClassLoaderDataGraph::inc_instance_classes(1);
 473     }
 474   }
 475 
 476   if (publicize) {
 477     LogTarget(Trace, class, loader, data) lt;
 478     if (lt.is_enabled()) {
 479       ResourceMark rm;
 480       LogStream ls(lt);
 481       ls.print("Adding k: " PTR_FORMAT " %s to ", p2i(k), k->external_name());
 482       print_value_on(&ls);
 483       ls.cr();
 484     }
 485   }
 486 }
 487 
 488 void ClassLoaderData::initialize_holder(Handle loader_or_mirror) {


 535     print_value_on(&ls);
 536     ls.cr();
 537   }
 538 
 539   // Some items on the _deallocate_list need to free their C heap structures
 540   // if they are not already on the _klasses list.
 541   free_deallocate_list_C_heap_structures();
 542 
 543   // Clean up class dependencies and tell serviceability tools
 544   // these classes are unloading.  Must be called
 545   // after erroneous classes are released.
 546   classes_do(InstanceKlass::unload_class);
 547 
 548   // Clean up global class iterator for compiler
 549   ClassLoaderDataGraph::adjust_saved_class(this);
 550 }
 551 
 552 ModuleEntryTable* ClassLoaderData::modules() {
 553   // Lazily create the module entry table at first request.
 554   // Lock-free access requires load_acquire.
 555   ModuleEntryTable* modules = OrderAccess::load_acquire(&_modules);
 556   if (modules == NULL) {
 557     MutexLocker m1(Module_lock);
 558     // Check if _modules got allocated while we were waiting for this lock.
 559     if ((modules = _modules) == NULL) {
 560       modules = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size);
 561 
 562       {
 563         MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
 564         // Ensure _modules is stable, since it is examined without a lock
 565         OrderAccess::release_store(&_modules, modules);
 566       }
 567     }
 568   }
 569   return modules;
 570 }
 571 
 572 const int _boot_loader_dictionary_size    = 1009;
 573 const int _default_loader_dictionary_size = 107;
 574 
 575 Dictionary* ClassLoaderData::create_dictionary() {
 576   assert(!is_unsafe_anonymous(), "unsafe anonymous class loader data do not have a dictionary");
 577   int size;
 578   bool resizable = false;
 579   if (_the_null_class_loader_data == NULL) {
 580     size = _boot_loader_dictionary_size;
 581     resizable = true;
 582   } else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
 583     size = 1;  // there's only one class in relection class loader and no initiated classes
 584   } else if (is_system_class_loader_data()) {
 585     size = _boot_loader_dictionary_size;


 735 // even if its class loader is one of these loaders.
 736 bool ClassLoaderData::is_builtin_class_loader_data() const {
 737   return (is_boot_class_loader_data() ||
 738           SystemDictionary::is_system_class_loader(class_loader()) ||
 739           SystemDictionary::is_platform_class_loader(class_loader()));
 740 }
 741 
 742 // Returns true if this class loader data is a class loader data
 743 // that is not ever freed by a GC.  It must be the CLD for one of the builtin
 744 // class loaders and not the CLD for an unsafe anonymous class.
 745 bool ClassLoaderData::is_permanent_class_loader_data() const {
 746   return is_builtin_class_loader_data() && !is_unsafe_anonymous();
 747 }
 748 
 749 ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
 750   // If the metaspace has not been allocated, create a new one.  Might want
 751   // to create smaller arena for Reflection class loaders also.
 752   // The reason for the delayed allocation is because some class loaders are
 753   // simply for delegating with no metadata of their own.
 754   // Lock-free access requires load_acquire.
 755   ClassLoaderMetaspace* metaspace = OrderAccess::load_acquire(&_metaspace);
 756   if (metaspace == NULL) {
 757     MutexLocker ml(_metaspace_lock,  Mutex::_no_safepoint_check_flag);
 758     // Check if _metaspace got allocated while we were waiting for this lock.
 759     if ((metaspace = _metaspace) == NULL) {
 760       if (this == the_null_class_loader_data()) {
 761         assert (class_loader() == NULL, "Must be");
 762         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
 763       } else if (is_unsafe_anonymous()) {
 764         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::UnsafeAnonymousMetaspaceType);
 765       } else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
 766         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType);
 767       } else {
 768         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
 769       }
 770       // Ensure _metaspace is stable, since it is examined without a lock
 771       OrderAccess::release_store(&_metaspace, metaspace);
 772     }
 773   }
 774   return metaspace;
 775 }
 776 
 777 OopHandle ClassLoaderData::add_handle(Handle h) {
 778   MutexLocker ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
 779   record_modified_oops();
 780   return OopHandle(_handles.add(h()));
 781 }
 782 
 783 void ClassLoaderData::remove_handle(OopHandle h) {
 784   assert(!is_unloading(), "Do not remove a handle for a CLD that is unloading");
 785   oop* ptr = h.ptr_raw();
 786   if (ptr != NULL) {
 787     assert(_handles.owner_of(ptr), "Got unexpected handle " PTR_FORMAT, p2i(ptr));
 788     NativeAccess<>::oop_store(ptr, oop(NULL));
 789   }
 790 }
 791 


 952   assert_locked_or_safepoint(_metaspace_lock);
 953   oop cl = class_loader();
 954 
 955   guarantee(this == class_loader_data(cl) || is_unsafe_anonymous(), "Must be the same");
 956   guarantee(cl != NULL || this == ClassLoaderData::the_null_class_loader_data() || is_unsafe_anonymous(), "must be");
 957 
 958   // Verify the integrity of the allocated space.
 959   if (metaspace_or_null() != NULL) {
 960     metaspace_or_null()->verify();
 961   }
 962 
 963   for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
 964     guarantee(k->class_loader_data() == this, "Must be the same");
 965     k->verify();
 966     assert(k != k->next_link(), "no loops!");
 967   }
 968 }
 969 
 970 bool ClassLoaderData::contains_klass(Klass* klass) {
 971   // Lock-free access requires load_acquire
 972   for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
 973     if (k == klass) return true;
 974   }
 975   return false;
 976 }


 170     _dictionary = create_dictionary();
 171   }
 172 
 173   NOT_PRODUCT(_dependency_count = 0); // number of class loader dependencies
 174 
 175   JFR_ONLY(INIT_ID(this);)
 176 }
 177 
 178 ClassLoaderData::ChunkedHandleList::~ChunkedHandleList() {
 179   Chunk* c = _head;
 180   while (c != NULL) {
 181     Chunk* next = c->_next;
 182     delete c;
 183     c = next;
 184   }
 185 }
 186 
 187 oop* ClassLoaderData::ChunkedHandleList::add(oop o) {
 188   if (_head == NULL || _head->_size == Chunk::CAPACITY) {
 189     Chunk* next = new Chunk(_head);
 190     Atomic::release_store(&_head, next);
 191   }
 192   oop* handle = &_head->_data[_head->_size];
 193   NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, o);
 194   Atomic::release_store(&_head->_size, _head->_size + 1);
 195   return handle;
 196 }
 197 
 198 int ClassLoaderData::ChunkedHandleList::count() const {
 199   int count = 0;
 200   Chunk* chunk = _head;
 201   while (chunk != NULL) {
 202     count += chunk->_size;
 203     chunk = chunk->_next;
 204   }
 205   return count;
 206 }
 207 
 208 inline void ClassLoaderData::ChunkedHandleList::oops_do_chunk(OopClosure* f, Chunk* c, const juint size) {
 209   for (juint i = 0; i < size; i++) {
 210     if (c->_data[i] != NULL) {
 211       f->do_oop(&c->_data[i]);
 212     }
 213   }
 214 }
 215 
 216 void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) {
 217   Chunk* head = Atomic::load_acquire(&_head);
 218   if (head != NULL) {
 219     // Must be careful when reading size of head
 220     oops_do_chunk(f, head, Atomic::load_acquire(&head->_size));
 221     for (Chunk* c = head->_next; c != NULL; c = c->_next) {
 222       oops_do_chunk(f, c, c->_size);
 223     }
 224   }
 225 }
 226 
 227 class VerifyContainsOopClosure : public OopClosure {
 228   oop  _target;
 229   bool _found;
 230 
 231  public:
 232   VerifyContainsOopClosure(oop target) : _target(target), _found(false) {}
 233 
 234   void do_oop(oop* p) {
 235     if (p != NULL && NativeAccess<AS_NO_KEEPALIVE>::oop_load(p) == _target) {
 236       _found = true;
 237     }
 238   }
 239 
 240   void do_oop(narrowOop* p) {


 309     assert(_keep_alive > 0, "Invalid keep alive decrement count");
 310     _keep_alive--;
 311   }
 312 }
 313 
 314 void ClassLoaderData::oops_do(OopClosure* f, int claim_value, bool clear_mod_oops) {
 315   if (claim_value != ClassLoaderData::_claim_none && !try_claim(claim_value)) {
 316     return;
 317   }
 318 
 319   // Only clear modified_oops after the ClassLoaderData is claimed.
 320   if (clear_mod_oops) {
 321     clear_modified_oops();
 322   }
 323 
 324   _handles.oops_do(f);
 325 }
 326 
 327 void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
 328   // Lock-free access requires load_acquire
 329   for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
 330     klass_closure->do_klass(k);
 331     assert(k != k->next_link(), "no loops!");
 332   }
 333 }
 334 
 335 void ClassLoaderData::classes_do(void f(Klass * const)) {
 336   // Lock-free access requires load_acquire
 337   for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
 338     f(k);
 339     assert(k != k->next_link(), "no loops!");
 340   }
 341 }
 342 
 343 void ClassLoaderData::methods_do(void f(Method*)) {
 344   // Lock-free access requires load_acquire
 345   for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
 346     if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) {
 347       InstanceKlass::cast(k)->methods_do(f);
 348     }
 349   }
 350 }
 351 
 352 void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
 353   // Lock-free access requires load_acquire
 354   for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
 355     // Do not filter ArrayKlass oops here...
 356     if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) {
 357 #ifdef ASSERT
 358       oop m = k->java_mirror();
 359       assert(m != NULL, "NULL mirror");
 360       assert(m->is_a(SystemDictionary::Class_klass()), "invalid mirror");
 361 #endif
 362       klass_closure->do_klass(k);
 363     }
 364   }
 365 }
 366 
 367 void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
 368   // Lock-free access requires load_acquire
 369   for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
 370     if (k->is_instance_klass()) {
 371       f(InstanceKlass::cast(k));
 372     }
 373     assert(k != k->next_link(), "no loops!");
 374   }
 375 }
 376 
 377 void ClassLoaderData::modules_do(void f(ModuleEntry*)) {
 378   assert_locked_or_safepoint(Module_lock);
 379   if (_unnamed_module != NULL) {
 380     f(_unnamed_module);
 381   }
 382   if (_modules != NULL) {
 383     for (int i = 0; i < _modules->table_size(); i++) {
 384       for (ModuleEntry* entry = _modules->bucket(i);
 385            entry != NULL;
 386            entry = entry->next()) {
 387         f(entry);
 388       }
 389     }


 448       ls.print("adding dependency from ");
 449       print_value_on(&ls);
 450       ls.print(" to ");
 451       to_cld->print_value_on(&ls);
 452       ls.cr();
 453     }
 454     Handle dependency(Thread::current(), to);
 455     add_handle(dependency);
 456     // Added a potentially young gen oop to the ClassLoaderData
 457     record_modified_oops();
 458   }
 459 }
 460 
 461 void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
 462   {
 463     MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
 464     Klass* old_value = _klasses;
 465     k->set_next_link(old_value);
 466     // Link the new item into the list, making sure the linked class is stable
 467     // since the list can be walked without a lock
 468     Atomic::release_store(&_klasses, k);
 469     if (k->is_array_klass()) {
 470       ClassLoaderDataGraph::inc_array_classes(1);
 471     } else {
 472       ClassLoaderDataGraph::inc_instance_classes(1);
 473     }
 474   }
 475 
 476   if (publicize) {
 477     LogTarget(Trace, class, loader, data) lt;
 478     if (lt.is_enabled()) {
 479       ResourceMark rm;
 480       LogStream ls(lt);
 481       ls.print("Adding k: " PTR_FORMAT " %s to ", p2i(k), k->external_name());
 482       print_value_on(&ls);
 483       ls.cr();
 484     }
 485   }
 486 }
 487 
 488 void ClassLoaderData::initialize_holder(Handle loader_or_mirror) {


 535     print_value_on(&ls);
 536     ls.cr();
 537   }
 538 
 539   // Some items on the _deallocate_list need to free their C heap structures
 540   // if they are not already on the _klasses list.
 541   free_deallocate_list_C_heap_structures();
 542 
 543   // Clean up class dependencies and tell serviceability tools
 544   // these classes are unloading.  Must be called
 545   // after erroneous classes are released.
 546   classes_do(InstanceKlass::unload_class);
 547 
 548   // Clean up global class iterator for compiler
 549   ClassLoaderDataGraph::adjust_saved_class(this);
 550 }
 551 
 552 ModuleEntryTable* ClassLoaderData::modules() {
 553   // Lazily create the module entry table at first request.
 554   // Lock-free access requires load_acquire.
 555   ModuleEntryTable* modules = Atomic::load_acquire(&_modules);
 556   if (modules == NULL) {
 557     MutexLocker m1(Module_lock);
 558     // Check if _modules got allocated while we were waiting for this lock.
 559     if ((modules = _modules) == NULL) {
 560       modules = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size);
 561 
 562       {
 563         MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
 564         // Ensure _modules is stable, since it is examined without a lock
 565         Atomic::release_store(&_modules, modules);
 566       }
 567     }
 568   }
 569   return modules;
 570 }
 571 
 572 const int _boot_loader_dictionary_size    = 1009;
 573 const int _default_loader_dictionary_size = 107;
 574 
 575 Dictionary* ClassLoaderData::create_dictionary() {
 576   assert(!is_unsafe_anonymous(), "unsafe anonymous class loader data do not have a dictionary");
 577   int size;
 578   bool resizable = false;
 579   if (_the_null_class_loader_data == NULL) {
 580     size = _boot_loader_dictionary_size;
 581     resizable = true;
 582   } else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
 583     size = 1;  // there's only one class in relection class loader and no initiated classes
 584   } else if (is_system_class_loader_data()) {
 585     size = _boot_loader_dictionary_size;


 735 // even if its class loader is one of these loaders.
 736 bool ClassLoaderData::is_builtin_class_loader_data() const {
 737   return (is_boot_class_loader_data() ||
 738           SystemDictionary::is_system_class_loader(class_loader()) ||
 739           SystemDictionary::is_platform_class_loader(class_loader()));
 740 }
 741 
 742 // Returns true if this class loader data is a class loader data
 743 // that is not ever freed by a GC.  It must be the CLD for one of the builtin
 744 // class loaders and not the CLD for an unsafe anonymous class.
 745 bool ClassLoaderData::is_permanent_class_loader_data() const {
 746   return is_builtin_class_loader_data() && !is_unsafe_anonymous();
 747 }
 748 
 749 ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
 750   // If the metaspace has not been allocated, create a new one.  Might want
 751   // to create smaller arena for Reflection class loaders also.
 752   // The reason for the delayed allocation is because some class loaders are
 753   // simply for delegating with no metadata of their own.
 754   // Lock-free access requires load_acquire.
 755   ClassLoaderMetaspace* metaspace = Atomic::load_acquire(&_metaspace);
 756   if (metaspace == NULL) {
 757     MutexLocker ml(_metaspace_lock,  Mutex::_no_safepoint_check_flag);
 758     // Check if _metaspace got allocated while we were waiting for this lock.
 759     if ((metaspace = _metaspace) == NULL) {
 760       if (this == the_null_class_loader_data()) {
 761         assert (class_loader() == NULL, "Must be");
 762         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
 763       } else if (is_unsafe_anonymous()) {
 764         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::UnsafeAnonymousMetaspaceType);
 765       } else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
 766         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType);
 767       } else {
 768         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
 769       }
 770       // Ensure _metaspace is stable, since it is examined without a lock
 771       Atomic::release_store(&_metaspace, metaspace);
 772     }
 773   }
 774   return metaspace;
 775 }
 776 
 777 OopHandle ClassLoaderData::add_handle(Handle h) {
 778   MutexLocker ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
 779   record_modified_oops();
 780   return OopHandle(_handles.add(h()));
 781 }
 782 
 783 void ClassLoaderData::remove_handle(OopHandle h) {
 784   assert(!is_unloading(), "Do not remove a handle for a CLD that is unloading");
 785   oop* ptr = h.ptr_raw();
 786   if (ptr != NULL) {
 787     assert(_handles.owner_of(ptr), "Got unexpected handle " PTR_FORMAT, p2i(ptr));
 788     NativeAccess<>::oop_store(ptr, oop(NULL));
 789   }
 790 }
 791 


 952   assert_locked_or_safepoint(_metaspace_lock);
 953   oop cl = class_loader();
 954 
 955   guarantee(this == class_loader_data(cl) || is_unsafe_anonymous(), "Must be the same");
 956   guarantee(cl != NULL || this == ClassLoaderData::the_null_class_loader_data() || is_unsafe_anonymous(), "must be");
 957 
 958   // Verify the integrity of the allocated space.
 959   if (metaspace_or_null() != NULL) {
 960     metaspace_or_null()->verify();
 961   }
 962 
 963   for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
 964     guarantee(k->class_loader_data() == this, "Must be the same");
 965     k->verify();
 966     assert(k != k->next_link(), "no loops!");
 967   }
 968 }
 969 
 970 bool ClassLoaderData::contains_klass(Klass* klass) {
 971   // Lock-free access requires load_acquire
 972   for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
 973     if (k == klass) return true;
 974   }
 975   return false;
 976 }
< prev index next >