--- old/src/hotspot/share/classfile/classLoaderData.cpp 2019-11-21 11:17:06.727514567 +0100 +++ new/src/hotspot/share/classfile/classLoaderData.cpp 2019-11-21 11:17:06.311507889 +0100 @@ -187,11 +187,11 @@ oop* ClassLoaderData::ChunkedHandleList::add(oop o) { if (_head == NULL || _head->_size == Chunk::CAPACITY) { Chunk* next = new Chunk(_head); - OrderAccess::release_store(&_head, next); + Atomic::release_store(&_head, next); } oop* handle = &_head->_data[_head->_size]; NativeAccess::oop_store(handle, o); - OrderAccess::release_store(&_head->_size, _head->_size + 1); + Atomic::release_store(&_head->_size, _head->_size + 1); return handle; } @@ -214,10 +214,10 @@ } void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) { - Chunk* head = OrderAccess::load_acquire(&_head); + Chunk* head = Atomic::load_acquire(&_head); if (head != NULL) { // Must be careful when reading size of head - oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size)); + oops_do_chunk(f, head, Atomic::load_acquire(&head->_size)); for (Chunk* c = head->_next; c != NULL; c = c->_next) { oops_do_chunk(f, c, c->_size); } @@ -326,7 +326,7 @@ void ClassLoaderData::classes_do(KlassClosure* klass_closure) { // Lock-free access requires load_acquire - for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { + for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { klass_closure->do_klass(k); assert(k != k->next_link(), "no loops!"); } @@ -334,7 +334,7 @@ void ClassLoaderData::classes_do(void f(Klass * const)) { // Lock-free access requires load_acquire - for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { + for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { f(k); assert(k != k->next_link(), "no loops!"); } @@ -342,7 +342,7 @@ void ClassLoaderData::methods_do(void f(Method*)) { // Lock-free access requires load_acquire - for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { + for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) { InstanceKlass::cast(k)->methods_do(f); } @@ -351,7 +351,7 @@ void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) { // Lock-free access requires load_acquire - for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { + for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { // Do not filter ArrayKlass oops here... if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) { #ifdef ASSERT @@ -366,7 +366,7 @@ void ClassLoaderData::classes_do(void f(InstanceKlass*)) { // Lock-free access requires load_acquire - for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { + for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k->is_instance_klass()) { f(InstanceKlass::cast(k)); } @@ -465,7 +465,7 @@ k->set_next_link(old_value); // Link the new item into the list, making sure the linked class is stable // since the list can be walked without a lock - OrderAccess::release_store(&_klasses, k); + Atomic::release_store(&_klasses, k); if (k->is_array_klass()) { ClassLoaderDataGraph::inc_array_classes(1); } else { @@ -552,7 +552,7 @@ ModuleEntryTable* ClassLoaderData::modules() { // Lazily create the module entry table at first request. // Lock-free access requires load_acquire. - ModuleEntryTable* modules = OrderAccess::load_acquire(&_modules); + ModuleEntryTable* modules = Atomic::load_acquire(&_modules); if (modules == NULL) { MutexLocker m1(Module_lock); // Check if _modules got allocated while we were waiting for this lock. @@ -562,7 +562,7 @@ { MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag); // Ensure _modules is stable, since it is examined without a lock - OrderAccess::release_store(&_modules, modules); + Atomic::release_store(&_modules, modules); } } } @@ -752,7 +752,7 @@ // The reason for the delayed allocation is because some class loaders are // simply for delegating with no metadata of their own. // Lock-free access requires load_acquire. - ClassLoaderMetaspace* metaspace = OrderAccess::load_acquire(&_metaspace); + ClassLoaderMetaspace* metaspace = Atomic::load_acquire(&_metaspace); if (metaspace == NULL) { MutexLocker ml(_metaspace_lock, Mutex::_no_safepoint_check_flag); // Check if _metaspace got allocated while we were waiting for this lock. @@ -768,7 +768,7 @@ metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType); } // Ensure _metaspace is stable, since it is examined without a lock - OrderAccess::release_store(&_metaspace, metaspace); + Atomic::release_store(&_metaspace, metaspace); } } return metaspace; @@ -969,7 +969,7 @@ bool ClassLoaderData::contains_klass(Klass* klass) { // Lock-free access requires load_acquire - for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { + for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k == klass) return true; } return false;