< prev index next >

src/hotspot/share/classfile/classLoaderData.cpp

Print this page

        

*** 185,199 **** } oop* ClassLoaderData::ChunkedHandleList::add(oop o) { if (_head == NULL || _head->_size == Chunk::CAPACITY) { Chunk* next = new Chunk(_head); ! OrderAccess::release_store(&_head, next); } oop* handle = &_head->_data[_head->_size]; NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, o); ! OrderAccess::release_store(&_head->_size, _head->_size + 1); return handle; } int ClassLoaderData::ChunkedHandleList::count() const { int count = 0; --- 185,199 ---- } oop* ClassLoaderData::ChunkedHandleList::add(oop o) { if (_head == NULL || _head->_size == Chunk::CAPACITY) { Chunk* next = new Chunk(_head); ! Atomic::release_store(&_head, next); } oop* handle = &_head->_data[_head->_size]; NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, o); ! Atomic::release_store(&_head->_size, _head->_size + 1); return handle; } int ClassLoaderData::ChunkedHandleList::count() const { int count = 0;
*** 212,225 **** } } } void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) { ! Chunk* head = OrderAccess::load_acquire(&_head); if (head != NULL) { // Must be careful when reading size of head ! oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size)); for (Chunk* c = head->_next; c != NULL; c = c->_next) { oops_do_chunk(f, c, c->_size); } } } --- 212,225 ---- } } } void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) { ! Chunk* head = Atomic::load_acquire(&_head); if (head != NULL) { // Must be careful when reading size of head ! oops_do_chunk(f, head, Atomic::load_acquire(&head->_size)); for (Chunk* c = head->_next; c != NULL; c = c->_next) { oops_do_chunk(f, c, c->_size); } } }
*** 324,359 **** _handles.oops_do(f); } void ClassLoaderData::classes_do(KlassClosure* klass_closure) { // Lock-free access requires load_acquire ! for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { klass_closure->do_klass(k); assert(k != k->next_link(), "no loops!"); } } void ClassLoaderData::classes_do(void f(Klass * const)) { // Lock-free access requires load_acquire ! for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { f(k); assert(k != k->next_link(), "no loops!"); } } void ClassLoaderData::methods_do(void f(Method*)) { // Lock-free access requires load_acquire ! for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) { InstanceKlass::cast(k)->methods_do(f); } } } void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) { // Lock-free access requires load_acquire ! for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { // Do not filter ArrayKlass oops here... if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) { #ifdef ASSERT oop m = k->java_mirror(); assert(m != NULL, "NULL mirror"); --- 324,359 ---- _handles.oops_do(f); } void ClassLoaderData::classes_do(KlassClosure* klass_closure) { // Lock-free access requires load_acquire ! for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { klass_closure->do_klass(k); assert(k != k->next_link(), "no loops!"); } } void ClassLoaderData::classes_do(void f(Klass * const)) { // Lock-free access requires load_acquire ! for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { f(k); assert(k != k->next_link(), "no loops!"); } } void ClassLoaderData::methods_do(void f(Method*)) { // Lock-free access requires load_acquire ! for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) { InstanceKlass::cast(k)->methods_do(f); } } } void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) { // Lock-free access requires load_acquire ! for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { // Do not filter ArrayKlass oops here... if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) { #ifdef ASSERT oop m = k->java_mirror(); assert(m != NULL, "NULL mirror");
*** 364,374 **** } } void ClassLoaderData::classes_do(void f(InstanceKlass*)) { // Lock-free access requires load_acquire ! for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k->is_instance_klass()) { f(InstanceKlass::cast(k)); } assert(k != k->next_link(), "no loops!"); } --- 364,374 ---- } } void ClassLoaderData::classes_do(void f(InstanceKlass*)) { // Lock-free access requires load_acquire ! for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k->is_instance_klass()) { f(InstanceKlass::cast(k)); } assert(k != k->next_link(), "no loops!"); }
*** 463,473 **** MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); Klass* old_value = _klasses; k->set_next_link(old_value); // Link the new item into the list, making sure the linked class is stable // since the list can be walked without a lock ! OrderAccess::release_store(&_klasses, k); if (k->is_array_klass()) { ClassLoaderDataGraph::inc_array_classes(1); } else { ClassLoaderDataGraph::inc_instance_classes(1); } --- 463,473 ---- MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); Klass* old_value = _klasses; k->set_next_link(old_value); // Link the new item into the list, making sure the linked class is stable // since the list can be walked without a lock ! Atomic::release_store(&_klasses, k); if (k->is_array_klass()) { ClassLoaderDataGraph::inc_array_classes(1); } else { ClassLoaderDataGraph::inc_instance_classes(1); }
*** 550,570 **** } ModuleEntryTable* ClassLoaderData::modules() { // Lazily create the module entry table at first request. // Lock-free access requires load_acquire. ! ModuleEntryTable* modules = OrderAccess::load_acquire(&_modules); if (modules == NULL) { MutexLocker m1(Module_lock); // Check if _modules got allocated while we were waiting for this lock. if ((modules = _modules) == NULL) { modules = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size); { MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag); // Ensure _modules is stable, since it is examined without a lock ! OrderAccess::release_store(&_modules, modules); } } } return modules; } --- 550,570 ---- } ModuleEntryTable* ClassLoaderData::modules() { // Lazily create the module entry table at first request. // Lock-free access requires load_acquire. ! ModuleEntryTable* modules = Atomic::load_acquire(&_modules); if (modules == NULL) { MutexLocker m1(Module_lock); // Check if _modules got allocated while we were waiting for this lock. if ((modules = _modules) == NULL) { modules = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size); { MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag); // Ensure _modules is stable, since it is examined without a lock ! Atomic::release_store(&_modules, modules); } } } return modules; }
*** 750,760 **** // If the metaspace has not been allocated, create a new one. Might want // to create smaller arena for Reflection class loaders also. // The reason for the delayed allocation is because some class loaders are // simply for delegating with no metadata of their own. // Lock-free access requires load_acquire. ! ClassLoaderMetaspace* metaspace = OrderAccess::load_acquire(&_metaspace); if (metaspace == NULL) { MutexLocker ml(_metaspace_lock, Mutex::_no_safepoint_check_flag); // Check if _metaspace got allocated while we were waiting for this lock. if ((metaspace = _metaspace) == NULL) { if (this == the_null_class_loader_data()) { --- 750,760 ---- // If the metaspace has not been allocated, create a new one. Might want // to create smaller arena for Reflection class loaders also. // The reason for the delayed allocation is because some class loaders are // simply for delegating with no metadata of their own. // Lock-free access requires load_acquire. ! ClassLoaderMetaspace* metaspace = Atomic::load_acquire(&_metaspace); if (metaspace == NULL) { MutexLocker ml(_metaspace_lock, Mutex::_no_safepoint_check_flag); // Check if _metaspace got allocated while we were waiting for this lock. if ((metaspace = _metaspace) == NULL) { if (this == the_null_class_loader_data()) {
*** 766,776 **** metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType); } else { metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType); } // Ensure _metaspace is stable, since it is examined without a lock ! OrderAccess::release_store(&_metaspace, metaspace); } } return metaspace; } --- 766,776 ---- metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType); } else { metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType); } // Ensure _metaspace is stable, since it is examined without a lock ! Atomic::release_store(&_metaspace, metaspace); } } return metaspace; }
*** 967,976 **** } } bool ClassLoaderData::contains_klass(Klass* klass) { // Lock-free access requires load_acquire ! for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k == klass) return true; } return false; } --- 967,976 ---- } } bool ClassLoaderData::contains_klass(Klass* klass) { // Lock-free access requires load_acquire ! for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k == klass) return true; } return false; }
< prev index next >