--- old/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp 2017-10-11 09:29:54.687356701 -0400 +++ new/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp 2017-10-11 09:29:54.165774972 -0400 @@ -863,7 +863,7 @@ // // markOop displaced_header = obj->mark().set_unlocked(); // monitor->lock()->set_displaced_header(displaced_header); - // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { + // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { // // We stored the monitor address into the object's mark word. // } else if (THREAD->is_lock_owned((address)displaced_header)) // // Simple recursive case. @@ -901,7 +901,7 @@ std(displaced_header, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes(), monitor); - // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { + // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { // Store stack address of the BasicObjectLock (this is monitor) into object. addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes()); @@ -977,7 +977,7 @@ // if ((displaced_header = monitor->displaced_header()) == NULL) { // // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL. // monitor->set_obj(NULL); - // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) { + // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) { // // We swapped the unlocked mark in displaced_header into the object's mark word. // monitor->set_obj(NULL); // } else { @@ -1010,7 +1010,7 @@ cmpdi(CCR0, displaced_header, 0); beq(CCR0, free_slot); // recursive unlock - // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) { + // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) { // // We swapped the unlocked mark in displaced_header into the object's mark word. // monitor->set_obj(NULL); --- old/src/hotspot/cpu/ppc/vm_version_ppc.cpp 2017-10-11 09:30:02.102298726 -0400 +++ new/src/hotspot/cpu/ppc/vm_version_ppc.cpp 2017-10-11 09:30:01.626297605 -0400 @@ -149,8 +149,7 @@ print_features(); } - // PPC64 supports 8-byte compare-exchange operations (see - // Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr) + // PPC64 supports 8-byte compare-exchange operations (see Atomic::cmpxchg) // and 'atomic long memory ops' (see Unsafe_GetLongVolatile). _supports_cx8 = true; --- old/src/hotspot/cpu/s390/interp_masm_s390.cpp 2017-10-11 09:30:09.588156971 -0400 +++ new/src/hotspot/cpu/s390/interp_masm_s390.cpp 2017-10-11 09:30:09.057581649 -0400 @@ -914,7 +914,7 @@ // // markOop displaced_header = obj->mark().set_unlocked(); // monitor->lock()->set_displaced_header(displaced_header); - // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { + // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { // // We stored the monitor address into the object's mark word. // } else if (THREAD->is_lock_owned((address)displaced_header)) // // Simple recursive case. @@ -949,7 +949,7 @@ z_stg(displaced_header, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes(), monitor); - // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { + // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { // Store stack address of the BasicObjectLock (this is monitor) into object. add2reg(object_mark_addr, oopDesc::mark_offset_in_bytes(), object); @@ -1021,7 +1021,7 @@ // if ((displaced_header = monitor->displaced_header()) == NULL) { // // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL. // monitor->set_obj(NULL); - // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) { + // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) { // // We swapped the unlocked mark in displaced_header into the object's mark word. // monitor->set_obj(NULL); // } else { @@ -1062,7 +1062,7 @@ BasicLock::displaced_header_offset_in_bytes())); z_bre(done); // displaced_header == 0 -> goto done - // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) { + // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) { // // We swapped the unlocked mark in displaced_header into the object's mark word. // monitor->set_obj(NULL); --- old/src/hotspot/cpu/s390/vm_version_s390.cpp 2017-10-11 09:30:17.071037436 -0400 +++ new/src/hotspot/cpu/s390/vm_version_s390.cpp 2017-10-11 09:30:16.588247332 -0400 @@ -224,7 +224,7 @@ } // z/Architecture supports 8-byte compare-exchange operations - // (see Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr) + // (see Atomic::cmpxchg) // and 'atomic long memory ops' (see Unsafe_GetLongVolatile). _supports_cx8 = true; --- old/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp 2017-10-11 09:30:24.838283573 -0400 +++ new/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp 2017-10-11 09:30:24.285614621 -0400 @@ -566,7 +566,7 @@ return start; } - // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) + // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest) // // Arguments : // c_rarg0: exchange_value @@ -574,8 +574,8 @@ // // Result: // *dest <- ex, return (orig *dest) - address generate_atomic_xchg_ptr() { - StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); + address generate_atomic_xchg_long() { + StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); address start = __ pc(); __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow @@ -4998,7 +4998,7 @@ // atomic calls StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); - StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); + StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); --- old/src/hotspot/cpu/zero/cppInterpreter_zero.cpp 2017-10-11 09:30:32.629056484 -0400 +++ new/src/hotspot/cpu/zero/cppInterpreter_zero.cpp 2017-10-11 09:30:32.108899601 -0400 @@ -276,7 +276,7 @@ markOop disp = lockee->mark()->set_unlocked(); monitor->lock()->set_displaced_header(disp); - if (Atomic::cmpxchg_ptr(monitor, lockee->mark_addr(), disp) != disp) { + if (Atomic::cmpxchg(monitor, lockee->mark_addr(), disp) != disp) { if (thread->is_lock_owned((address) disp->clear_lock_bits())) { monitor->lock()->set_displaced_header(NULL); } @@ -420,7 +420,7 @@ monitor->set_obj(NULL); if (header != NULL) { - if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { + if (Atomic::cmpxchg(header, rcvr->mark_addr(), lock) != lock) { monitor->set_obj(rcvr); { HandleMark hm(thread); CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(thread, monitor)); --- old/src/hotspot/cpu/zero/stubGenerator_zero.cpp 2017-10-11 09:30:39.890577562 -0400 +++ new/src/hotspot/cpu/zero/stubGenerator_zero.cpp 2017-10-11 09:30:39.467284055 -0400 @@ -253,9 +253,8 @@ // atomic calls StubRoutines::_atomic_xchg_entry = ShouldNotCallThisStub(); - StubRoutines::_atomic_xchg_ptr_entry = ShouldNotCallThisStub(); + StubRoutines::_atomic_xchg_long_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_cmpxchg_entry = ShouldNotCallThisStub(); - StubRoutines::_atomic_cmpxchg_ptr_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_add_entry = ShouldNotCallThisStub(); --- old/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp 2017-10-11 09:30:47.963885123 -0400 +++ new/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp 2017-10-11 09:30:47.437593211 -0400 @@ -137,7 +137,7 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, T volatile* dest) const { STATIC_ASSERT(4 == sizeof(T)); - // Note that xchg_ptr doesn't necessarily do an acquire + // Note that xchg doesn't necessarily do an acquire // (see synchronizer.cpp). T old_value; @@ -176,7 +176,7 @@ inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, T volatile* dest) const { STATIC_ASSERT(8 == sizeof(T)); - // Note that xchg_ptr doesn't necessarily do an acquire + // Note that xchg doesn't necessarily do an acquire // (see synchronizer.cpp). T old_value; --- old/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp 2017-10-11 09:30:55.798873895 -0400 +++ new/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp 2017-10-11 09:30:55.275336742 -0400 @@ -134,7 +134,7 @@ template inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, T volatile* dest) const { - // Note that xchg_ptr doesn't necessarily do an acquire + // Note that xchg doesn't necessarily do an acquire // (see synchronizer.cpp). T old_value; @@ -173,7 +173,7 @@ inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, T volatile* dest) const { STATIC_ASSERT(8 == sizeof(T)); - // Note that xchg_ptr doesn't necessarily do an acquire + // Note that xchg doesn't necessarily do an acquire // (see synchronizer.cpp). T old_value; --- old/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp 2017-10-11 09:31:03.239140068 -0400 +++ new/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp 2017-10-11 09:31:02.708362036 -0400 @@ -73,7 +73,7 @@ } DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func) -DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_ptr_func) +DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_long_func) #undef DEFINE_STUB_XCHG --- old/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp 2017-10-11 09:31:10.231864910 -0400 +++ new/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp 2017-10-11 09:31:09.805015266 -0400 @@ -219,7 +219,7 @@ // Atomics and Stub Functions typedef jint xchg_func_t (jint, volatile jint*); -typedef intptr_t xchg_ptr_func_t (intptr_t, volatile intptr_t*); +typedef intptr_t xchg_long_func_t (jlong, volatile jlong*); typedef jint cmpxchg_func_t (jint, volatile jint*, jint); typedef jbyte cmpxchg_byte_func_t (jbyte, volatile jbyte*, jbyte); typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong); @@ -243,12 +243,12 @@ return old_value; } -intptr_t os::atomic_xchg_ptr_bootstrap(intptr_t exchange_value, volatile intptr_t* dest) { +intptr_t os::atomic_xchg_long_bootstrap(jlong exchange_value, volatile jlong* dest) { // try to use the stub: - xchg_ptr_func_t* func = CAST_TO_FN_PTR(xchg_ptr_func_t*, StubRoutines::atomic_xchg_ptr_entry()); + xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry()); if (func != NULL) { - os::atomic_xchg_ptr_func = func; + os::atomic_xchg_long_func = func; return (*func)(exchange_value, dest); } assert(Threads::number_of_threads() == 0, "for bootstrap only"); @@ -338,7 +338,7 @@ } xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap; -xchg_ptr_func_t* os::atomic_xchg_ptr_func = os::atomic_xchg_ptr_bootstrap; +xchg_long_func_t* os::atomic_xchg_long_func = os::atomic_xchg_long_bootstrap; cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap; cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap; add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; --- old/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp 2017-10-11 09:31:17.518394096 -0400 +++ new/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp 2017-10-11 09:31:17.092570609 -0400 @@ -30,7 +30,7 @@ // #ifdef AMD64 static jint (*atomic_xchg_func) (jint, volatile jint*); - static intptr_t (*atomic_xchg_ptr_func) (intptr_t, volatile intptr_t*); + static intptr_t (*atomic_xchg_long_func) (jlong, volatile jlong*); static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint); static jbyte (*atomic_cmpxchg_byte_func) (jbyte, volatile jbyte*, jbyte); @@ -40,7 +40,7 @@ static intptr_t (*atomic_add_ptr_func) (intptr_t, volatile intptr_t*); static jint atomic_xchg_bootstrap (jint, volatile jint*); - static intptr_t atomic_xchg_ptr_bootstrap (intptr_t, volatile intptr_t*); + static intptr_t atomic_xchg_long_bootstrap (jlong, volatile jlong*); static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint); static jbyte atomic_cmpxchg_byte_bootstrap(jbyte, volatile jbyte*, jbyte); --- old/src/hotspot/share/asm/assembler.cpp 2017-10-11 09:31:25.640181949 -0400 +++ new/src/hotspot/share/asm/assembler.cpp 2017-10-11 09:31:25.112021736 -0400 @@ -236,11 +236,9 @@ if (dcon->match(type, cfn)) return dcon; if (dcon->value_fn == NULL) { - // (cmpxchg not because this is multi-threaded but because I'm paranoid) - if (Atomic::cmpxchg_ptr(CAST_FROM_FN_PTR(void*, cfn), &dcon->value_fn, NULL) == NULL) { + dcon->value_fn = cfn; dcon->type = type; return dcon; - } } } // If this assert is hit (in pre-integration testing!) then re-evaluate --- old/src/hotspot/share/classfile/classLoader.hpp 2017-10-11 09:31:33.051708822 -0400 +++ new/src/hotspot/share/classfile/classLoader.hpp 2017-10-11 09:31:32.528533885 -0400 @@ -48,13 +48,11 @@ ClassPathEntry* volatile _next; public: // Next entry in class path - ClassPathEntry* next() const { - return (ClassPathEntry*) OrderAccess::load_ptr_acquire(&_next); - } + ClassPathEntry* next() const { return OrderAccess::load_acquire(&_next); } virtual ~ClassPathEntry() {} void set_next(ClassPathEntry* next) { // may have unlocked readers, so ensure visibility. - OrderAccess::release_store_ptr(&_next, next); + OrderAccess::release_store(&_next, next); } virtual bool is_jrt() = 0; virtual bool is_jar_file() const = 0; --- old/src/hotspot/share/classfile/classLoaderData.cpp 2017-10-11 09:31:40.233210396 -0400 +++ new/src/hotspot/share/classfile/classLoaderData.cpp 2017-10-11 09:31:39.806600447 -0400 @@ -82,11 +82,6 @@ #include "trace/tracing.hpp" #endif -// helper function to avoid in-line casts -template static T* load_ptr_acquire(T* volatile *p) { - return static_cast(OrderAccess::load_ptr_acquire(p)); -} - ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL; ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) : @@ -152,7 +147,7 @@ oop* ClassLoaderData::ChunkedHandleList::add(oop o) { if (_head == NULL || _head->_size == Chunk::CAPACITY) { Chunk* next = new Chunk(_head); - OrderAccess::release_store_ptr(&_head, next); + OrderAccess::release_store(&_head, next); } oop* handle = &_head->_data[_head->_size]; *handle = o; @@ -169,7 +164,7 @@ } void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) { - Chunk* head = (Chunk*) OrderAccess::load_ptr_acquire(&_head); + Chunk* head = (Chunk*) OrderAccess::load_acquire(&_head); if (head != NULL) { // Must be careful when reading size of head oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size)); @@ -257,24 +252,24 @@ } void ClassLoaderData::classes_do(KlassClosure* klass_closure) { - // Lock-free access requires load_ptr_acquire - for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) { + // Lock-free access requires load_acquire + for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { klass_closure->do_klass(k); assert(k != k->next_link(), "no loops!"); } } void ClassLoaderData::classes_do(void f(Klass * const)) { - // Lock-free access requires load_ptr_acquire - for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) { + // Lock-free access requires load_acquire + for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { f(k); assert(k != k->next_link(), "no loops!"); } } void ClassLoaderData::methods_do(void f(Method*)) { - // Lock-free access requires load_ptr_acquire - for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) { + // Lock-free access requires load_acquire + for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) { InstanceKlass::cast(k)->methods_do(f); } @@ -282,8 +277,8 @@ } void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) { - // Lock-free access requires load_ptr_acquire - for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) { + // Lock-free access requires load_acquire + for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { // Do not filter ArrayKlass oops here... if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) { klass_closure->do_klass(k); @@ -292,8 +287,8 @@ } void ClassLoaderData::classes_do(void f(InstanceKlass*)) { - // Lock-free access requires load_ptr_acquire - for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) { + // Lock-free access requires load_acquire + for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k->is_instance_klass()) { f(InstanceKlass::cast(k)); } @@ -449,7 +444,7 @@ k->set_next_link(old_value); // Link the new item into the list, making sure the linked class is stable // since the list can be walked without a lock - OrderAccess::release_store_ptr(&_klasses, k); + OrderAccess::release_store(&_klasses, k); } if (publicize && k->class_loader_data() != NULL) { @@ -589,8 +584,8 @@ ModuleEntryTable* ClassLoaderData::modules() { // Lazily create the module entry table at first request. - // Lock-free access requires load_ptr_acquire. - ModuleEntryTable* modules = load_ptr_acquire(&_modules); + // Lock-free access requires load_acquire. + ModuleEntryTable* modules = OrderAccess::load_acquire(&_modules); if (modules == NULL) { MutexLocker m1(Module_lock); // Check if _modules got allocated while we were waiting for this lock. @@ -600,7 +595,7 @@ { MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag); // Ensure _modules is stable, since it is examined without a lock - OrderAccess::release_store_ptr(&_modules, modules); + OrderAccess::release_store(&_modules, modules); } } } @@ -737,8 +732,8 @@ // to create smaller arena for Reflection class loaders also. // The reason for the delayed allocation is because some class loaders are // simply for delegating with no metadata of their own. - // Lock-free access requires load_ptr_acquire. - Metaspace* metaspace = load_ptr_acquire(&_metaspace); + // Lock-free access requires load_acquire. + Metaspace* metaspace = OrderAccess::load_acquire(&_metaspace); if (metaspace == NULL) { MutexLockerEx ml(_metaspace_lock, Mutex::_no_safepoint_check_flag); // Check if _metaspace got allocated while we were waiting for this lock. @@ -760,7 +755,7 @@ metaspace = new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType); } // Ensure _metaspace is stable, since it is examined without a lock - OrderAccess::release_store_ptr(&_metaspace, metaspace); + OrderAccess::release_store(&_metaspace, metaspace); } } return metaspace; @@ -914,8 +909,8 @@ } bool ClassLoaderData::contains_klass(Klass* klass) { - // Lock-free access requires load_ptr_acquire - for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) { + // Lock-free access requires load_acquire + for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k == klass) return true; } return false; @@ -948,7 +943,7 @@ if (!is_anonymous) { ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader()); // First, Atomically set it - ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL); + ClassLoaderData* old = Atomic::cmpxchg(cld, cld_addr, (ClassLoaderData*)NULL); if (old != NULL) { delete cld; // Returns the data. @@ -963,7 +958,7 @@ do { cld->set_next(next); - ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next); + ClassLoaderData* exchanged = Atomic::cmpxchg(cld, list_head, next); if (exchanged == next) { LogTarget(Debug, class, loader, data) lt; if (lt.is_enabled()) { @@ -1387,7 +1382,7 @@ while (head != NULL) { Klass* next = next_klass_in_cldg(head); - Klass* old_head = (Klass*)Atomic::cmpxchg_ptr(next, &_next_klass, head); + Klass* old_head = Atomic::cmpxchg(next, &_next_klass, head); if (old_head == head) { return head; // Won the CAS. --- old/src/hotspot/share/classfile/dictionary.hpp 2017-10-11 09:31:47.405282970 -0400 +++ new/src/hotspot/share/classfile/dictionary.hpp 2017-10-11 09:31:46.975686020 -0400 @@ -161,10 +161,10 @@ void set_pd_set(ProtectionDomainEntry* new_head) { _pd_set = new_head; } ProtectionDomainEntry* pd_set_acquire() const { - return (ProtectionDomainEntry*)OrderAccess::load_ptr_acquire(&_pd_set); + return OrderAccess::load_acquire(&_pd_set); } void release_set_pd_set(ProtectionDomainEntry* new_head) { - OrderAccess::release_store_ptr(&_pd_set, new_head); + OrderAccess::release_store(&_pd_set, new_head); } // Tells whether the initiating class' protection domain can access the klass in this entry --- old/src/hotspot/share/classfile/verifier.cpp 2017-10-11 09:31:54.495783594 -0400 +++ new/src/hotspot/share/classfile/verifier.cpp 2017-10-11 09:31:54.059014786 -0400 @@ -69,14 +69,14 @@ static volatile jint _is_new_verify_byte_codes_fn = (jint) true; static void* verify_byte_codes_fn() { - if (OrderAccess::load_ptr_acquire(&_verify_byte_codes_fn) == NULL) { + if (OrderAccess::load_acquire(&_verify_byte_codes_fn) == NULL) { void *lib_handle = os::native_java_library(); void *func = os::dll_lookup(lib_handle, "VerifyClassCodesForMajorVersion"); - OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func); + OrderAccess::release_store(&_verify_byte_codes_fn, func); if (func == NULL) { _is_new_verify_byte_codes_fn = false; func = os::dll_lookup(lib_handle, "VerifyClassCodes"); - OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func); + OrderAccess::release_store(&_verify_byte_codes_fn, func); } } return (void*)_verify_byte_codes_fn; --- old/src/hotspot/share/code/compiledMethod.hpp 2017-10-11 09:32:01.916785108 -0400 +++ new/src/hotspot/share/code/compiledMethod.hpp 2017-10-11 09:32:01.391428276 -0400 @@ -288,7 +288,7 @@ // Note: _exception_cache may be read concurrently. We rely on memory_order_consume here. ExceptionCache* exception_cache() const { return _exception_cache; } void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } - void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store_ptr(&_exception_cache, ec); } + void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store(&_exception_cache, ec); } address handler_for_exception_and_pc(Handle exception, address pc); void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); void clean_exception_cache(BoolObjectClosure* is_alive); --- old/src/hotspot/share/code/nmethod.cpp 2017-10-11 09:32:08.770206181 -0400 +++ new/src/hotspot/share/code/nmethod.cpp 2017-10-11 09:32:08.341680971 -0400 @@ -1664,17 +1664,14 @@ nmethod* observed_mark_link = _oops_do_mark_link; if (observed_mark_link == NULL) { // Claim this nmethod for this thread to mark. - observed_mark_link = (nmethod*) - Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL); - if (observed_mark_link == NULL) { - + if (Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_link, (nmethod*)NULL) == NULL) { // Atomically append this nmethod (now claimed) to the head of the list: nmethod* observed_mark_nmethods = _oops_do_mark_nmethods; for (;;) { nmethod* required_mark_nmethods = observed_mark_nmethods; _oops_do_mark_link = required_mark_nmethods; - observed_mark_nmethods = (nmethod*) - Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods); + observed_mark_nmethods = + Atomic::cmpxchg(this, &_oops_do_mark_nmethods, required_mark_nmethods); if (observed_mark_nmethods == required_mark_nmethods) break; } @@ -1690,9 +1687,9 @@ void nmethod::oops_do_marking_prologue() { if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); } assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row"); - // We use cmpxchg_ptr instead of regular assignment here because the user + // We use cmpxchg instead of regular assignment here because the user // may fork a bunch of threads, and we need them all to see the same state. - void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL); + nmethod* observed = Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, (nmethod*)NULL); guarantee(observed == NULL, "no races in this sequential code"); } @@ -1707,8 +1704,8 @@ NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark")); cur = next; } - void* required = _oops_do_mark_nmethods; - void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required); + nmethod* required = _oops_do_mark_nmethods; + nmethod* observed = Atomic::cmpxchg((nmethod*)NULL, &_oops_do_mark_nmethods, required); guarantee(observed == required, "no races in this sequential code"); if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); } } --- old/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp 2017-10-11 09:32:17.098291133 -0400 +++ new/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp 2017-10-11 09:32:16.421907550 -0400 @@ -1076,7 +1076,7 @@ NOT_PRODUCT( Atomic::inc(&_numObjectsPromoted); - Atomic::add_ptr(alloc_sz, &_numWordsPromoted); + Atomic::add(alloc_sz, &_numWordsPromoted); ) return obj; @@ -3179,7 +3179,7 @@ HeapWord* cur = read; while (f > read) { cur = read; - read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur); + read = Atomic::cmpxchg(f, &_global_finger, cur); if (cur == read) { // our cas succeeded assert(_global_finger >= f, "protocol consistency"); @@ -7852,7 +7852,7 @@ return false; } // Grab the entire list; we'll put back a suffix - oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); + oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list)); Thread* tid = Thread::current(); // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was // set to ParallelGCThreads. @@ -7867,7 +7867,7 @@ return false; } else if (_overflow_list != BUSY) { // Try and grab the prefix - prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); + prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list)); } } // If the list was found to be empty, or we spun long @@ -7880,7 +7880,7 @@ if (prefix == NULL) { // Write back the NULL in case we overwrote it with BUSY above // and it is still the same value. - (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); + Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY); } return false; } @@ -7895,7 +7895,7 @@ // Write back the NULL in lieu of the BUSY we wrote // above, if it is still the same value. if (_overflow_list == BUSY) { - (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); + Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY); } } else { // Chop off the suffix and return it to the global list. @@ -7911,7 +7911,7 @@ bool attached = false; while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { observed_overflow_list = - (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list); + Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list); if (cur_overflow_list == observed_overflow_list) { attached = true; break; @@ -7936,7 +7936,7 @@ } // ... and try to place spliced list back on overflow_list ... observed_overflow_list = - (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list); + Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list); } while (cur_overflow_list != observed_overflow_list); // ... until we have succeeded in doing so. } @@ -7957,7 +7957,7 @@ } #ifndef PRODUCT assert(_num_par_pushes >= n, "Too many pops?"); - Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); + Atomic::add(-n, &_num_par_pushes); #endif return true; } @@ -7986,7 +7986,7 @@ p->set_mark(NULL); } observed_overflow_list = - (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list); + Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list); } while (cur_overflow_list != observed_overflow_list); } #undef BUSY --- old/src/hotspot/share/gc/cms/parNewGeneration.cpp 2017-10-11 09:32:25.578778659 -0400 +++ new/src/hotspot/share/gc/cms/parNewGeneration.cpp 2017-10-11 09:32:25.152169228 -0400 @@ -1296,7 +1296,7 @@ from_space_obj->set_klass_to_list_ptr(NULL); } observed_overflow_list = - (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); + Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list); } while (cur_overflow_list != observed_overflow_list); } } @@ -1339,7 +1339,7 @@ if (_overflow_list == NULL) return false; // Otherwise, there was something there; try claiming the list. - oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); + oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list)); // Trim off a prefix of at most objsFromOverflow items Thread* tid = Thread::current(); size_t spin_count = ParallelGCThreads; @@ -1353,7 +1353,7 @@ return false; } else if (_overflow_list != BUSY) { // try and grab the prefix - prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); + prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list)); } } if (prefix == NULL || prefix == BUSY) { @@ -1361,7 +1361,7 @@ if (prefix == NULL) { // Write back the NULL in case we overwrote it with BUSY above // and it is still the same value. - (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); + (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY); } return false; } @@ -1380,7 +1380,7 @@ // Write back the NULL in lieu of the BUSY we wrote // above and it is still the same value. if (_overflow_list == BUSY) { - (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); + (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY); } } else { assert(suffix != BUSY, "Error"); @@ -1394,7 +1394,7 @@ bool attached = false; while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { observed_overflow_list = - (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); + Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list); if (cur_overflow_list == observed_overflow_list) { attached = true; break; @@ -1420,7 +1420,7 @@ last->set_klass_to_list_ptr(NULL); } observed_overflow_list = - (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); + Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list); } while (cur_overflow_list != observed_overflow_list); } } @@ -1452,7 +1452,7 @@ TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); #ifndef PRODUCT assert(_num_par_pushes >= n, "Too many pops?"); - Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); + Atomic::add(-n, &_num_par_pushes); #endif return true; } --- old/src/hotspot/share/gc/g1/dirtyCardQueue.cpp 2017-10-11 09:32:33.044447216 -0400 +++ new/src/hotspot/share/gc/g1/dirtyCardQueue.cpp 2017-10-11 09:32:32.527209589 -0400 @@ -280,7 +280,7 @@ BufferNode* nd = _cur_par_buffer_node; while (nd != NULL) { BufferNode* next = nd->next(); - void* actual = Atomic::cmpxchg_ptr(next, &_cur_par_buffer_node, nd); + void* actual = Atomic::cmpxchg(next, &_cur_par_buffer_node, nd); if (actual == nd) { bool b = apply_closure_to_buffer(cl, nd, false); guarantee(b, "Should not stop early."); --- old/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp 2017-10-11 09:32:40.265584552 -0400 +++ new/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp 2017-10-11 09:32:39.842186194 -0400 @@ -155,19 +155,19 @@ } G1CodeRootSetTable* G1CodeRootSet::load_acquire_table() { - return (G1CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table); + return OrderAccess::load_acquire(&_table); } void G1CodeRootSet::allocate_small_table() { G1CodeRootSetTable* temp = new G1CodeRootSetTable(SmallSize); - OrderAccess::release_store_ptr(&_table, temp); + OrderAccess::release_store(&_table, temp); } void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) { for (;;) { table->_purge_next = _purge_list; - G1CodeRootSetTable* old = (G1CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next); + G1CodeRootSetTable* old = Atomic::cmpxchg(table, &_purge_list, table->_purge_next); if (old == table->_purge_next) { break; } @@ -191,7 +191,7 @@ G1CodeRootSetTable::purge_list_append(_table); - OrderAccess::release_store_ptr(&_table, temp); + OrderAccess::release_store(&_table, temp); } void G1CodeRootSet::purge() { --- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2017-10-11 09:32:47.565493745 -0400 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2017-10-11 09:32:47.132552705 -0400 @@ -3498,7 +3498,7 @@ do { old = (CompiledMethod*)_postponed_list; nm->set_unloading_next(old); - } while ((CompiledMethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old); + } while (Atomic::cmpxchg(nm, &_postponed_list, old) != old); } void clean_nmethod(CompiledMethod* nm) { @@ -3541,7 +3541,7 @@ } } - } while ((CompiledMethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first); + } while (Atomic::cmpxchg(last.method(), &_claimed_nmethod, first) != first); } CompiledMethod* claim_postponed_nmethod() { @@ -3556,7 +3556,7 @@ next = claim->unloading_next(); - } while ((CompiledMethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim); + } while (Atomic::cmpxchg(next, &_postponed_list, claim) != claim); return claim; } --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2017-10-11 09:32:55.277159271 -0400 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2017-10-11 09:32:54.848488322 -0400 @@ -1870,7 +1870,7 @@ HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; // Is the gap between reading the finger and doing the CAS too long? - HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); + HeapWord* res = Atomic::cmpxchg(end, &_finger, finger); if (res == finger && curr_region != NULL) { // we succeeded HeapWord* bottom = curr_region->bottom(); --- old/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp 2017-10-11 09:33:02.489675538 -0400 +++ new/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp 2017-10-11 09:33:02.065651198 -0400 @@ -29,17 +29,17 @@ #include "runtime/atomic.hpp" inline void G1EvacStats::add_direct_allocated(size_t value) { - Atomic::add_ptr(value, &_direct_allocated); + Atomic::add(value, &_direct_allocated); } inline void G1EvacStats::add_region_end_waste(size_t value) { - Atomic::add_ptr(value, &_region_end_waste); - Atomic::add_ptr(1, &_regions_filled); + Atomic::add(value, &_region_end_waste); + Atomic::inc(&_regions_filled); } inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) { - Atomic::add_ptr(used, &_failure_used); - Atomic::add_ptr(waste, &_failure_waste); + Atomic::add(used, &_failure_used); + Atomic::add(waste, &_failure_waste); } #endif // SHARE_VM_GC_G1_G1EVACSTATS_INLINE_HPP --- old/src/hotspot/share/gc/g1/g1HotCardCache.cpp 2017-10-11 09:33:09.621325899 -0400 +++ new/src/hotspot/share/gc/g1/g1HotCardCache.cpp 2017-10-11 09:33:09.136994146 -0400 @@ -74,9 +74,9 @@ // card_ptr in favor of the other option, which would be starting over. This // should be OK since card_ptr will likely be the older card already when/if // this ever happens. - jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr, - &_hot_cache[masked_index], - current_ptr); + jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg(card_ptr, + &_hot_cache[masked_index], + current_ptr); return (previous_ptr == current_ptr) ? previous_ptr : card_ptr; } --- old/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp 2017-10-11 09:33:16.463080894 -0400 +++ new/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp 2017-10-11 09:33:16.033467272 -0400 @@ -251,7 +251,7 @@ virtual void work(uint worker_id) { size_t const actual_chunk_size = MAX2(chunk_size(), _page_size); while (true) { - char* touch_addr = (char*)Atomic::add_ptr((intptr_t)actual_chunk_size, (volatile void*) &_cur_addr) - actual_chunk_size; + char* touch_addr = (char*)Atomic::add(actual_chunk_size, &_cur_addr) - actual_chunk_size; if (touch_addr < _start_addr || touch_addr >= _end_addr) { break; } --- old/src/hotspot/share/gc/g1/g1StringDedup.cpp 2017-10-11 09:33:23.528665657 -0400 +++ new/src/hotspot/share/gc/g1/g1StringDedup.cpp 2017-10-11 09:33:23.074388357 -0400 @@ -203,12 +203,12 @@ // Atomically claims the next available queue for exclusive access by // the current thread. Returns the queue number of the claimed queue. size_t G1StringDedupUnlinkOrOopsDoClosure::claim_queue() { - return (size_t)Atomic::add_ptr(1, &_next_queue) - 1; + return Atomic::add((size_t)1, &_next_queue) - 1; } // Atomically claims the next available table partition for exclusive // access by the current thread. Returns the table bucket number where // the claimed partition starts. size_t G1StringDedupUnlinkOrOopsDoClosure::claim_table_partition(size_t partition_size) { - return (size_t)Atomic::add_ptr(partition_size, &_next_bucket) - partition_size; + return (size_t)Atomic::add(partition_size, &_next_bucket) - partition_size; } --- old/src/hotspot/share/gc/g1/heapRegion.inline.hpp 2017-10-11 09:33:30.486031794 -0400 +++ new/src/hotspot/share/gc/g1/heapRegion.inline.hpp 2017-10-11 09:33:30.060334835 -0400 @@ -59,7 +59,7 @@ size_t want_to_allocate = MIN2(available, desired_word_size); if (want_to_allocate >= min_word_size) { HeapWord* new_top = obj + want_to_allocate; - HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); + HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj); // result can be one of two: // the old top value: the exchange succeeded // otherwise: the new value of the top is returned. --- old/src/hotspot/share/gc/g1/heapRegionRemSet.cpp 2017-10-11 09:33:38.004171916 -0400 +++ new/src/hotspot/share/gc/g1/heapRegionRemSet.cpp 2017-10-11 09:33:37.581243088 -0400 @@ -113,9 +113,7 @@ public: - HeapRegion* hr() const { - return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr); - } + HeapRegion* hr() const { return OrderAccess::load_acquire(&_hr); } jint occupied() const { // Overkill, but if we ever need it... @@ -133,7 +131,7 @@ _bm.clear(); // Make sure that the bitmap clearing above has been finished before publishing // this PRT to concurrent threads. - OrderAccess::release_store_ptr(&_hr, hr); + OrderAccess::release_store(&_hr, hr); } void add_reference(OopOrNarrowOopStar from) { @@ -182,7 +180,7 @@ while (true) { PerRegionTable* fl = _free_list; last->set_next(fl); - PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl); + PerRegionTable* res = Atomic::cmpxchg(prt, &_free_list, fl); if (res == fl) { return; } @@ -200,8 +198,7 @@ while (fl != NULL) { PerRegionTable* nxt = fl->next(); PerRegionTable* res = - (PerRegionTable*) - Atomic::cmpxchg_ptr(nxt, &_free_list, fl); + Atomic::cmpxchg(nxt, &_free_list, fl); if (res == fl) { fl->init(hr, true); return fl; @@ -416,7 +413,7 @@ // some mark bits may not yet seem cleared or a 'later' update // performed by a concurrent thread could be undone when the // zeroing becomes visible). This requires store ordering. - OrderAccess::release_store_ptr((volatile PerRegionTable*)&_fine_grain_regions[ind], prt); + OrderAccess::release_store(&_fine_grain_regions[ind], prt); _n_fine_entries++; if (G1HRRSUseSparseTable) { --- old/src/hotspot/share/gc/g1/sparsePRT.cpp 2017-10-11 09:33:44.942243390 -0400 +++ new/src/hotspot/share/gc/g1/sparsePRT.cpp 2017-10-11 09:33:44.509302610 -0400 @@ -293,8 +293,7 @@ while (true) { sprt->_next_expanded = hd; SparsePRT* res = - (SparsePRT*) - Atomic::cmpxchg_ptr(sprt, &_head_expanded_list, hd); + Atomic::cmpxchg(sprt, &_head_expanded_list, hd); if (res == hd) return; else hd = res; } @@ -306,8 +305,7 @@ while (hd != NULL) { SparsePRT* next = hd->next_expanded(); SparsePRT* res = - (SparsePRT*) - Atomic::cmpxchg_ptr(next, &_head_expanded_list, hd); + Atomic::cmpxchg(next, &_head_expanded_list, hd); if (res == hd) { hd->set_next_expanded(NULL); return hd; --- old/src/hotspot/share/gc/parallel/gcTaskThread.cpp 2017-10-11 09:33:51.977585312 -0400 +++ new/src/hotspot/share/gc/parallel/gcTaskThread.cpp 2017-10-11 09:33:51.456707151 -0400 @@ -77,8 +77,7 @@ if (_time_stamps == NULL) { // We allocate the _time_stamps array lazily since logging can be enabled dynamically GCTaskTimeStamp* time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC); - void* old = Atomic::cmpxchg_ptr(time_stamps, &_time_stamps, NULL); - if (old != NULL) { + if (Atomic::cmpxchg(time_stamps, &_time_stamps, (GCTaskTimeStamp*)NULL) != NULL) { // Someone already setup the time stamps FREE_C_HEAP_ARRAY(GCTaskTimeStamp, time_stamps); } --- old/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp 2017-10-11 09:33:59.253934090 -0400 +++ new/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp 2017-10-11 09:33:58.822984914 -0400 @@ -862,7 +862,7 @@ if (p != NULL) { HeapWord* cur_top, *cur_chunk_top = p + size; while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated. - if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) { + if (Atomic::cmpxchg(cur_chunk_top, top_addr(), cur_top) == cur_top) { break; } } --- old/src/hotspot/share/gc/parallel/mutableSpace.cpp 2017-10-11 09:34:06.233375338 -0400 +++ new/src/hotspot/share/gc/parallel/mutableSpace.cpp 2017-10-11 09:34:05.806927699 -0400 @@ -192,7 +192,7 @@ HeapWord* obj = top(); if (pointer_delta(end(), obj) >= size) { HeapWord* new_top = obj + size; - HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); + HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj); // result can be one of two: // the old top value: the exchange succeeded // otherwise: the new value of the top is returned. @@ -211,7 +211,7 @@ // Try to deallocate previous allocation. Returns true upon success. bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) { HeapWord* expected_top = obj + size; - return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top; + return Atomic::cmpxchg(obj, top_addr(), expected_top) == expected_top; } void MutableSpace::oop_iterate_no_header(OopClosure* cl) { --- old/src/hotspot/share/gc/parallel/parMarkBitMap.cpp 2017-10-11 09:34:13.242356082 -0400 +++ new/src/hotspot/share/gc/parallel/parMarkBitMap.cpp 2017-10-11 09:34:12.819137465 -0400 @@ -90,7 +90,7 @@ bool end_bit_ok = _end_bits.par_set_bit(end_bit); assert(end_bit_ok, "concurrency problem"); DEBUG_ONLY(Atomic::inc(&mark_bitmap_count)); - DEBUG_ONLY(Atomic::add_ptr(size, &mark_bitmap_size)); + DEBUG_ONLY(Atomic::add(size, &mark_bitmap_size)); return true; } return false; --- old/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2017-10-11 09:34:20.950666917 -0400 +++ new/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2017-10-11 09:34:20.520189469 -0400 @@ -521,7 +521,7 @@ const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize; DEBUG_ONLY(Atomic::inc(&add_obj_count);) - DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);) + DEBUG_ONLY(Atomic::add(len, &add_obj_size);) if (beg_region == end_region) { // All in one region. --- old/src/hotspot/share/gc/parallel/psParallelCompact.hpp 2017-10-11 09:34:29.056068820 -0400 +++ new/src/hotspot/share/gc/parallel/psParallelCompact.hpp 2017-10-11 09:34:28.631399393 -0400 @@ -586,7 +586,7 @@ #ifdef ASSERT HeapWord* tmp = _highest_ref; while (addr > tmp) { - tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp); + tmp = Atomic::cmpxchg(addr, &_highest_ref, tmp); } #endif // #ifdef ASSERT } --- old/src/hotspot/share/gc/shared/plab.inline.hpp 2017-10-11 09:34:36.580850933 -0400 +++ new/src/hotspot/share/gc/shared/plab.inline.hpp 2017-10-11 09:34:36.062581251 -0400 @@ -43,19 +43,19 @@ } void PLABStats::add_allocated(size_t v) { - Atomic::add_ptr(v, &_allocated); + Atomic::add(v, &_allocated); } void PLABStats::add_unused(size_t v) { - Atomic::add_ptr(v, &_unused); + Atomic::add(v, &_unused); } void PLABStats::add_wasted(size_t v) { - Atomic::add_ptr(v, &_wasted); + Atomic::add(v, &_wasted); } void PLABStats::add_undo_wasted(size_t v) { - Atomic::add_ptr(v, &_undo_wasted); + Atomic::add(v, &_undo_wasted); } #endif // SHARE_VM_GC_SHARED_PLAB_INLINE_HPP --- old/src/hotspot/share/gc/shared/space.cpp 2017-10-11 09:34:43.691607257 -0400 +++ new/src/hotspot/share/gc/shared/space.cpp 2017-10-11 09:34:43.263792884 -0400 @@ -631,7 +631,7 @@ HeapWord* obj = top(); if (pointer_delta(end(), obj) >= size) { HeapWord* new_top = obj + size; - HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); + HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj); // result can be one of two: // the old top value: the exchange succeeded // otherwise: the new value of the top is returned. --- old/src/hotspot/share/gc/shared/taskqueue.inline.hpp 2017-10-11 09:34:51.035981915 -0400 +++ new/src/hotspot/share/gc/shared/taskqueue.inline.hpp 2017-10-11 09:34:50.613429337 -0400 @@ -259,9 +259,9 @@ template inline typename TaskQueueSuper::Age TaskQueueSuper::Age::cmpxchg(const Age new_age, const Age old_age) volatile { - return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data, - (volatile intptr_t *)&_data, - (intptr_t)old_age._data); + return (size_t) Atomic::cmpxchg((intptr_t)new_age._data, + (volatile intptr_t *)&_data, + (intptr_t)old_age._data); } template --- old/src/hotspot/share/interpreter/bytecodeInterpreter.cpp 2017-10-11 09:34:58.646278485 -0400 +++ new/src/hotspot/share/interpreter/bytecodeInterpreter.cpp 2017-10-11 09:34:58.075119017 -0400 @@ -705,7 +705,7 @@ if (hash != markOopDesc::no_hash) { header = header->copy_set_hash(hash); } - if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) { + if (Atomic::cmpxchg(header, rcvr->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) (*BiasedLocking::revoked_lock_entry_count_addr())++; } @@ -715,7 +715,7 @@ if (hash != markOopDesc::no_hash) { new_header = new_header->copy_set_hash(hash); } - if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) { + if (Atomic::cmpxchg((void*)new_header, rcvr->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) { (* BiasedLocking::rebiased_lock_entry_count_addr())++; } @@ -734,7 +734,7 @@ markOop new_header = (markOop) ((uintptr_t) header | thread_ident); // Debugging hint. DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) - if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) { + if (Atomic::cmpxchg((void*)new_header, rcvr->mark_addr(), header) == header) { if (PrintBiasedLockingStatistics) { (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; } @@ -750,7 +750,7 @@ markOop displaced = rcvr->mark()->set_unlocked(); mon->lock()->set_displaced_header(displaced); bool call_vm = UseHeavyMonitors; - if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) { + if (call_vm || Atomic::cmpxchg(mon, rcvr->mark_addr(), displaced) != displaced) { // Is it simple recursive case? if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { mon->lock()->set_displaced_header(NULL); @@ -903,7 +903,7 @@ if (hash != markOopDesc::no_hash) { header = header->copy_set_hash(hash); } - if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { + if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) { (*BiasedLocking::revoked_lock_entry_count_addr())++; } @@ -914,7 +914,7 @@ if (hash != markOopDesc::no_hash) { new_header = new_header->copy_set_hash(hash); } - if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { + if (Atomic::cmpxchg((void*)new_header, lockee->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) { (* BiasedLocking::rebiased_lock_entry_count_addr())++; } @@ -932,7 +932,7 @@ markOop new_header = (markOop) ((uintptr_t) header | thread_ident); // debugging hint DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) - if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { + if (Atomic::cmpxchg((void*)new_header, lockee->mark_addr(), header) == header) { if (PrintBiasedLockingStatistics) { (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; } @@ -948,7 +948,7 @@ markOop displaced = lockee->mark()->set_unlocked(); entry->lock()->set_displaced_header(displaced); bool call_vm = UseHeavyMonitors; - if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { + if (call_vm || Atomic::cmpxchg(entry, lockee->mark_addr(), displaced) != displaced) { // Is it simple recursive case? if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { entry->lock()->set_displaced_header(NULL); @@ -1844,7 +1844,7 @@ if (hash != markOopDesc::no_hash) { header = header->copy_set_hash(hash); } - if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { + if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) (*BiasedLocking::revoked_lock_entry_count_addr())++; } @@ -1855,7 +1855,7 @@ if (hash != markOopDesc::no_hash) { new_header = new_header->copy_set_hash(hash); } - if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { + if (Atomic::cmpxchg((void*)new_header, lockee->mark_addr(), mark) == mark) { if (PrintBiasedLockingStatistics) (* BiasedLocking::rebiased_lock_entry_count_addr())++; } @@ -1875,7 +1875,7 @@ markOop new_header = (markOop) ((uintptr_t) header | thread_ident); // debugging hint DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) - if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { + if (Atomic::cmpxchg((void*)new_header, lockee->mark_addr(), header) == header) { if (PrintBiasedLockingStatistics) (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; } @@ -1891,7 +1891,7 @@ markOop displaced = lockee->mark()->set_unlocked(); entry->lock()->set_displaced_header(displaced); bool call_vm = UseHeavyMonitors; - if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { + if (call_vm || Atomic::cmpxchg(entry, lockee->mark_addr(), displaced) != displaced) { // Is it simple recursive case? if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { entry->lock()->set_displaced_header(NULL); @@ -1923,7 +1923,7 @@ bool call_vm = UseHeavyMonitors; // If it isn't recursive we either must swap old header or call the runtime if (header != NULL || call_vm) { - if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { + if (call_vm || Atomic::cmpxchg(header, lockee->mark_addr(), lock) != lock) { // restore object for the slow case most_recent->set_obj(lockee); CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); @@ -2189,7 +2189,7 @@ HeapWord* compare_to = *Universe::heap()->top_addr(); HeapWord* new_top = compare_to + obj_size; if (new_top <= *Universe::heap()->end_addr()) { - if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { + if (Atomic::cmpxchg(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { goto retry; } result = (oop) compare_to; @@ -2975,7 +2975,7 @@ if (!lockee->mark()->has_bias_pattern()) { // If it isn't recursive we either must swap old header or call the runtime if (header != NULL) { - if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { + if (Atomic::cmpxchg(header, lockee->mark_addr(), lock) != lock) { // restore object for the slow case end->set_obj(lockee); { @@ -3050,7 +3050,7 @@ base->set_obj(NULL); // If it isn't recursive we either must swap old header or call the runtime if (header != NULL) { - if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { + if (Atomic::cmpxchg(header, rcvr->mark_addr(), lock) != lock) { // restore object for the slow case base->set_obj(rcvr); { --- old/src/hotspot/share/interpreter/oopMapCache.cpp 2017-10-11 09:35:06.101184336 -0400 +++ new/src/hotspot/share/interpreter/oopMapCache.cpp 2017-10-11 09:35:05.675337717 -0400 @@ -448,11 +448,11 @@ } OopMapCacheEntry* OopMapCache::entry_at(int i) const { - return (OopMapCacheEntry*)OrderAccess::load_ptr_acquire(&(_array[i % _size])); + return OrderAccess::load_acquire(&(_array[i % _size])); } bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) { - return Atomic::cmpxchg_ptr (entry, &_array[i % _size], old) == old; + return Atomic::cmpxchg(entry, &_array[i % _size], old) == old; } void OopMapCache::flush() { @@ -564,7 +564,7 @@ do { head = _old_entries; entry->_next = head; - success = Atomic::cmpxchg_ptr (entry, &_old_entries, head) == head; + success = Atomic::cmpxchg(entry, &_old_entries, head) == head; } while (!success); if (log_is_enabled(Debug, interpreter, oopmap)) { --- old/src/hotspot/share/memory/metaspace.cpp 2017-10-11 09:35:13.448298149 -0400 +++ new/src/hotspot/share/memory/metaspace.cpp 2017-10-11 09:35:12.924250230 -0400 @@ -1499,7 +1499,7 @@ } size_t MetaspaceGC::capacity_until_GC() { - size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); + size_t value = OrderAccess::load_acquire(&_capacity_until_GC); assert(value >= MetaspaceSize, "Not initialized properly?"); return value; } @@ -1507,16 +1507,16 @@ bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { assert_is_aligned(v, Metaspace::commit_alignment()); - size_t capacity_until_GC = (size_t) _capacity_until_GC; - size_t new_value = capacity_until_GC + v; + intptr_t capacity_until_GC = _capacity_until_GC; + intptr_t new_value = capacity_until_GC + v; if (new_value < capacity_until_GC) { // The addition wrapped around, set new_value to aligned max value. new_value = align_down(max_uintx, Metaspace::commit_alignment()); } - intptr_t expected = (intptr_t) capacity_until_GC; - intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); + intptr_t expected = _capacity_until_GC; + intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected); if (expected != actual) { return false; @@ -1534,7 +1534,7 @@ size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { assert_is_aligned(v, Metaspace::commit_alignment()); - return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); + return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC); } void MetaspaceGC::initialize() { @@ -2398,7 +2398,7 @@ void SpaceManager::inc_used_metrics(size_t words) { // Add to the per SpaceManager total - Atomic::add_ptr(words, &_allocated_blocks_words); + Atomic::add(words, &_allocated_blocks_words); // Add to the global total MetaspaceAux::inc_used(mdtype(), words); } @@ -2753,8 +2753,7 @@ // sweep which is a concurrent phase. Protection by the expand_lock() // is not enough since allocation is on a per Metaspace basis // and protected by the Metaspace lock. - jlong minus_words = (jlong) - (jlong) words; - Atomic::add_ptr(minus_words, &_used_words[mdtype]); + Atomic::sub(words, &_used_words[mdtype]); } void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { @@ -2762,7 +2761,7 @@ // each piece of metadata. Those allocations are // generally done concurrently by different application // threads so must be done atomically. - Atomic::add_ptr(words, &_used_words[mdtype]); + Atomic::add(words, &_used_words[mdtype]); } size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { --- old/src/hotspot/share/memory/universe.cpp 2017-10-11 09:35:22.070132216 -0400 +++ new/src/hotspot/share/memory/universe.cpp 2017-10-11 09:35:21.594931057 -0400 @@ -536,7 +536,7 @@ oop Universe::swap_reference_pending_list(oop list) { assert_pll_locked(is_locked); - return (oop)Atomic::xchg_ptr(list, &_reference_pending_list); + return Atomic::xchg(list, &_reference_pending_list); } #undef assert_pll_locked --- old/src/hotspot/share/oops/arrayKlass.inline.hpp 2017-10-11 09:35:29.257503281 -0400 +++ new/src/hotspot/share/oops/arrayKlass.inline.hpp 2017-10-11 09:35:28.742364553 -0400 @@ -29,11 +29,11 @@ #include "oops/arrayKlass.hpp" inline Klass* ArrayKlass::higher_dimension_acquire() const { - return (Klass*) OrderAccess::load_ptr_acquire(&_higher_dimension); + return OrderAccess::load_acquire(&_higher_dimension); } inline void ArrayKlass::release_set_higher_dimension(Klass* k) { - OrderAccess::release_store_ptr(&_higher_dimension, k); + OrderAccess::release_store(&_higher_dimension, k); } #endif // SHARE_VM_OOPS_ARRAYKLASS_INLINE_HPP --- old/src/hotspot/share/oops/constantPool.cpp 2017-10-11 09:35:36.135177027 -0400 +++ new/src/hotspot/share/oops/constantPool.cpp 2017-10-11 09:35:35.705623382 -0400 @@ -226,7 +226,7 @@ symbol_at_put(name_index, name); name->increment_refcount(); Klass** adr = resolved_klasses()->adr_at(resolved_klass_index); - OrderAccess::release_store_ptr((Klass* volatile *)adr, k); + OrderAccess::release_store((Klass* volatile *)adr, k); // The interpreter assumes when the tag is stored, the klass is resolved // and the Klass* non-NULL, so we need hardware store ordering here. @@ -243,7 +243,7 @@ CPKlassSlot kslot = klass_slot_at(class_index); int resolved_klass_index = kslot.resolved_klass_index(); Klass** adr = resolved_klasses()->adr_at(resolved_klass_index); - OrderAccess::release_store_ptr((Klass* volatile *)adr, k); + OrderAccess::release_store((Klass* volatile *)adr, k); // The interpreter assumes when the tag is stored, the klass is resolved // and the Klass* non-NULL, so we need hardware store ordering here. @@ -511,7 +511,7 @@ trace_class_resolution(this_cp, k); } Klass** adr = this_cp->resolved_klasses()->adr_at(resolved_klass_index); - OrderAccess::release_store_ptr((Klass* volatile *)adr, k); + OrderAccess::release_store((Klass* volatile *)adr, k); // The interpreter assumes when the tag is stored, the klass is resolved // and the Klass* stored in _resolved_klasses is non-NULL, so we need // hardware store ordering here. --- old/src/hotspot/share/oops/constantPool.hpp 2017-10-11 09:35:43.288224731 -0400 +++ new/src/hotspot/share/oops/constantPool.hpp 2017-10-11 09:35:42.863228420 -0400 @@ -145,7 +145,7 @@ assert(is_within_bounds(which), "index out of bounds"); assert(!tag_at(which).is_unresolved_klass() && !tag_at(which).is_unresolved_klass_in_error(), "Corrupted constant pool"); // Uses volatile because the klass slot changes without a lock. - volatile intptr_t adr = (intptr_t)OrderAccess::load_ptr_acquire(obj_at_addr_raw(which)); + volatile intptr_t adr = OrderAccess::load_acquire(obj_at_addr_raw(which)); assert(adr != 0 || which == 0, "cp entry for klass should not be zero"); return CPSlot(adr); } @@ -407,7 +407,7 @@ assert(tag_at(kslot.name_index()).is_symbol(), "sanity"); Klass** adr = resolved_klasses()->adr_at(kslot.resolved_klass_index()); - return (Klass*)OrderAccess::load_ptr_acquire(adr); + return OrderAccess::load_acquire(adr); } // RedefineClasses() API support: --- old/src/hotspot/share/oops/cpCache.cpp 2017-10-11 09:35:50.380189704 -0400 +++ new/src/hotspot/share/oops/cpCache.cpp 2017-10-11 09:35:49.944057241 -0400 @@ -91,7 +91,7 @@ assert(c == 0 || c == code || code == 0, "update must be consistent"); #endif // Need to flush pending stores here before bytecode is written. - OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_1_shift)); + OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift)); } void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) { @@ -101,18 +101,18 @@ assert(c == 0 || c == code || code == 0, "update must be consistent"); #endif // Need to flush pending stores here before bytecode is written. - OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_2_shift)); + OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift)); } // Sets f1, ordering with previous writes. void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) { assert(f1 != NULL, ""); - OrderAccess::release_store_ptr((HeapWord*) &_f1, f1); + OrderAccess::release_store(&_f1, f1); } // Sets flags, but only if the value was previously zero. bool ConstantPoolCacheEntry::init_flags_atomic(intptr_t flags) { - intptr_t result = Atomic::cmpxchg_ptr(flags, &_flags, 0); + intptr_t result = Atomic::cmpxchg(flags, &_flags, (intptr_t)0); return (result == 0); } @@ -154,7 +154,8 @@ // bother trying to update it once it's nonzero but always make // sure that the final parameter size agrees with what was passed. if (_flags == 0) { - Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0); + intx newflags = (value & parameter_size_mask); + Atomic::cmpxchg(newflags, &_flags, (intx)0); } guarantee(parameter_size() == value, "size must not change: parameter_size=%d, value=%d", parameter_size(), value); --- old/src/hotspot/share/oops/cpCache.hpp 2017-10-11 09:35:57.739311956 -0400 +++ new/src/hotspot/share/oops/cpCache.hpp 2017-10-11 09:35:57.316728650 -0400 @@ -332,11 +332,11 @@ // Accessors int indices() const { return _indices; } - int indices_ord() const { return (intx)OrderAccess::load_ptr_acquire(&_indices); } + int indices_ord() const { return OrderAccess::load_acquire(&_indices); } int constant_pool_index() const { return (indices() & cp_index_mask); } Bytecodes::Code bytecode_1() const { return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask); } Bytecodes::Code bytecode_2() const { return Bytecodes::cast((indices_ord() >> bytecode_2_shift) & bytecode_2_mask); } - Metadata* f1_ord() const { return (Metadata *)OrderAccess::load_ptr_acquire(&_f1); } + Metadata* f1_ord() const { return (Metadata *)OrderAccess::load_acquire(&_f1); } Method* f1_as_method() const { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), ""); return (Method*)f1; } Klass* f1_as_klass() const { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_klass(), ""); return (Klass*)f1; } // Use the accessor f1() to acquire _f1's value. This is needed for --- old/src/hotspot/share/oops/instanceKlass.cpp 2017-10-11 09:36:05.089915484 -0400 +++ new/src/hotspot/share/oops/instanceKlass.cpp 2017-10-11 09:36:04.654506969 -0400 @@ -1109,16 +1109,15 @@ void InstanceKlass::mask_for(const methodHandle& method, int bci, InterpreterOopMap* entry_for) { // Lazily create the _oop_map_cache at first request - // Lock-free access requires load_ptr_acquire. - OopMapCache* oop_map_cache = - static_cast(OrderAccess::load_ptr_acquire(&_oop_map_cache)); + // Lock-free access requires load_acquire. + OopMapCache* oop_map_cache = OrderAccess::load_acquire(&_oop_map_cache); if (oop_map_cache == NULL) { MutexLocker x(OopMapCacheAlloc_lock); // Check if _oop_map_cache was allocated while we were waiting for this lock if ((oop_map_cache = _oop_map_cache) == NULL) { oop_map_cache = new OopMapCache(); // Ensure _oop_map_cache is stable, since it is examined without a lock - OrderAccess::release_store_ptr(&_oop_map_cache, oop_map_cache); + OrderAccess::release_store(&_oop_map_cache, oop_map_cache); } } // _oop_map_cache is constant after init; lookup below does its own locking. @@ -1672,7 +1671,7 @@ // transitions from NULL to non-NULL which is safe because we use // release_set_methods_jmethod_ids() to advertise the new cache. // A partially constructed cache should never be seen by a racing - // thread. We also use release_store_ptr() to save a new jmethodID + // thread. We also use release_store() to save a new jmethodID // in the cache so a partially constructed jmethodID should never be // seen either. Cache reads of existing jmethodIDs proceed without a // lock, but cache writes of a new jmethodID requires uniqueness and @@ -1831,7 +1830,7 @@ // The jmethodID cache can be read while unlocked so we have to // make sure the new jmethodID is complete before installing it // in the cache. - OrderAccess::release_store_ptr(&jmeths[idnum+1], id); + OrderAccess::release_store(&jmeths[idnum+1], id); } else { *to_dealloc_id_p = new_id; // save new id for later delete } --- old/src/hotspot/share/oops/instanceKlass.inline.hpp 2017-10-11 09:36:12.247513745 -0400 +++ new/src/hotspot/share/oops/instanceKlass.inline.hpp 2017-10-11 09:36:11.775306168 -0400 @@ -35,19 +35,19 @@ #include "utilities/macros.hpp" inline Klass* InstanceKlass::array_klasses_acquire() const { - return (Klass*) OrderAccess::load_ptr_acquire(&_array_klasses); + return OrderAccess::load_acquire(&_array_klasses); } inline void InstanceKlass::release_set_array_klasses(Klass* k) { - OrderAccess::release_store_ptr(&_array_klasses, k); + OrderAccess::release_store(&_array_klasses, k); } inline jmethodID* InstanceKlass::methods_jmethod_ids_acquire() const { - return (jmethodID*)OrderAccess::load_ptr_acquire(&_methods_jmethod_ids); + return OrderAccess::load_acquire(&_methods_jmethod_ids); } inline void InstanceKlass::release_set_methods_jmethod_ids(jmethodID* jmeths) { - OrderAccess::release_store_ptr(&_methods_jmethod_ids, jmeths); + OrderAccess::release_store(&_methods_jmethod_ids, jmeths); } // The iteration over the oops in objects is a hot path in the GC code. --- old/src/hotspot/share/oops/method.cpp 2017-10-11 09:36:19.127221634 -0400 +++ new/src/hotspot/share/oops/method.cpp 2017-10-11 09:36:18.698970996 -0400 @@ -444,6 +444,11 @@ return mh->method_counters(); } +bool Method::init_method_counters(MethodCounters* counters) { + // Try to install a pointer to MethodCounters, return true on success. + return Atomic::cmpxchg(counters, &_method_counters, (MethodCounters*)NULL) == NULL; +} + void Method::cleanup_inline_caches() { // The current system doesn't use inline caches in the interpreter // => nothing to do (keep this method around for future use) @@ -1109,7 +1114,7 @@ } volatile address Method::from_compiled_entry_no_trampoline() const { - nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code); + CompiledMethod *code = OrderAccess::load_acquire(&_code); if (code) { return code->verified_entry_point(); } else { @@ -1135,7 +1140,7 @@ // Not inline to avoid circular ref. bool Method::check_code() const { // cached in a register or local. There's a race on the value of the field. - CompiledMethod *code = (CompiledMethod *)OrderAccess::load_ptr_acquire(&_code); + CompiledMethod *code = OrderAccess::load_acquire(&_code); return code == NULL || (code->method() == NULL) || (code->method() == (Method*)this && !code->is_osr_method()); } --- old/src/hotspot/share/oops/method.hpp 2017-10-11 09:36:26.235268952 -0400 +++ new/src/hotspot/share/oops/method.hpp 2017-10-11 09:36:25.797736299 -0400 @@ -136,9 +136,9 @@ static address make_adapters(const methodHandle& mh, TRAPS); - volatile address from_compiled_entry() const { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); } + volatile address from_compiled_entry() const { return OrderAccess::load_acquire(&_from_compiled_entry); } volatile address from_compiled_entry_no_trampoline() const; - volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); } + volatile address from_interpreted_entry() const{ return OrderAccess::load_acquire(&_from_interpreted_entry); } // access flag AccessFlags access_flags() const { return _access_flags; } @@ -337,7 +337,7 @@ // The store into method must be released. On platforms without // total store order (TSO) the reference may become visible before // the initialization of data otherwise. - OrderAccess::release_store_ptr((volatile void *)&_method_data, data); + OrderAccess::release_store(&_method_data, data); } MethodCounters* method_counters() const { @@ -348,10 +348,7 @@ _method_counters = NULL; } - bool init_method_counters(MethodCounters* counters) { - // Try to install a pointer to MethodCounters, return true on success. - return Atomic::cmpxchg_ptr(counters, (volatile void*)&_method_counters, NULL) == NULL; - } + bool init_method_counters(MethodCounters* counters); #ifdef TIERED // We are reusing interpreter_invocation_count as a holder for the previous event count! @@ -452,7 +449,7 @@ // nmethod/verified compiler entry address verified_code_entry(); bool check_code() const; // Not inline to avoid circular ref - CompiledMethod* volatile code() const { assert( check_code(), "" ); return (CompiledMethod *)OrderAccess::load_ptr_acquire(&_code); } + CompiledMethod* volatile code() const { assert( check_code(), "" ); return OrderAccess::load_acquire(&_code); } void clear_code(bool acquire_lock = true); // Clear out any compiled code static void set_code(const methodHandle& mh, CompiledMethod* code); void set_adapter_entry(AdapterHandlerEntry* adapter) { --- old/src/hotspot/share/oops/methodData.hpp 2017-10-11 09:36:33.586286216 -0400 +++ new/src/hotspot/share/oops/methodData.hpp 2017-10-11 09:36:33.146543364 -0400 @@ -202,7 +202,7 @@ _cells[index] = value; } void release_set_cell_at(int index, intptr_t value) { - OrderAccess::release_store_ptr(&_cells[index], value); + OrderAccess::release_store(&_cells[index], value); } intptr_t cell_at(int index) const { return _cells[index]; --- old/src/hotspot/share/oops/oop.inline.hpp 2017-10-11 09:36:40.818962975 -0400 +++ new/src/hotspot/share/oops/oop.inline.hpp 2017-10-11 09:36:40.339799861 -0400 @@ -66,7 +66,7 @@ template void oop_store(volatile T* p, oop v) { update_barrier_set_pre((T*)p, v); // cast away volatile - // Used by release_obj_field_put, so use release_store_ptr. + // Used by release_obj_field_put, so use release_store. oopDesc::release_encode_store_heap_oop(p, v); // When using CMS we must mark the card corresponding to p as dirty // with release sematics to prevent that CMS sees the dirty card but @@ -90,7 +90,7 @@ // We need a separate file to avoid circular references void oopDesc::release_set_mark(markOop m) { - OrderAccess::release_store_ptr(&_mark, m); + OrderAccess::release_store(&_mark, m); } markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) { @@ -124,7 +124,7 @@ volatile narrowKlass* xaddr = const_cast(addr); return Klass::decode_klass(OrderAccess::load_acquire(xaddr)); } else { - return (Klass*)OrderAccess::load_ptr_acquire(&_metadata._klass); + return OrderAccess::load_acquire(&_metadata._klass); } } @@ -161,7 +161,7 @@ OrderAccess::release_store(compressed_klass_addr(), Klass::encode_klass_not_null(k)); } else { - OrderAccess::release_store_ptr(klass_addr(), k); + OrderAccess::release_store(klass_addr(), k); } } @@ -361,7 +361,7 @@ // Store heap oop as is for volatile fields. void oopDesc::release_store_heap_oop(volatile oop* p, oop v) { - OrderAccess::release_store_ptr(p, v); + OrderAccess::release_store(p, v); } void oopDesc::release_store_heap_oop(volatile narrowOop* p, narrowOop v) { OrderAccess::release_store(p, v); @@ -372,11 +372,11 @@ OrderAccess::release_store(p, encode_heap_oop_not_null(v)); } void oopDesc::release_encode_store_heap_oop_not_null(volatile oop* p, oop v) { - OrderAccess::release_store_ptr(p, v); + OrderAccess::release_store(p, v); } void oopDesc::release_encode_store_heap_oop(volatile oop* p, oop v) { - OrderAccess::release_store_ptr(p, v); + OrderAccess::release_store(p, v); } void oopDesc::release_encode_store_heap_oop(volatile narrowOop* p, oop v) { OrderAccess::release_store(p, encode_heap_oop(v)); @@ -392,7 +392,7 @@ // decode old from T to oop return decode_heap_oop(old); } else { - return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest); + return Atomic::xchg(exchange_value, (oop*)dest); } } @@ -447,11 +447,11 @@ void oopDesc::metadata_field_put(int offset, Metadata* value) { *metadata_field_addr(offset) = value; } Metadata* oopDesc::metadata_field_acquire(int offset) const { - return (Metadata*)OrderAccess::load_ptr_acquire(metadata_field_addr(offset)); + return OrderAccess::load_acquire(metadata_field_addr(offset)); } void oopDesc::release_metadata_field_put(int offset, Metadata* value) { - OrderAccess::release_store_ptr(metadata_field_addr(offset), value); + OrderAccess::release_store(metadata_field_addr(offset), value); } jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); } @@ -485,8 +485,8 @@ return UseCompressedOops ? decode_heap_oop((narrowOop) OrderAccess::load_acquire(obj_field_addr(offset))) - : decode_heap_oop((oop) - OrderAccess::load_ptr_acquire(obj_field_addr(offset))); + : decode_heap_oop( + OrderAccess::load_acquire(obj_field_addr(offset))); } void oopDesc::release_obj_field_put(int offset, oop value) { UseCompressedOops ? @@ -518,8 +518,8 @@ jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); } void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); } -address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); } -void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); } +address oopDesc::address_field_acquire(int offset) const { return OrderAccess::load_acquire(address_field_addr(offset)); } +void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store(address_field_addr(offset), contents); } bool oopDesc::is_locked() const { return mark()->is_locked(); --- old/src/hotspot/share/opto/runtime.cpp 2017-10-11 09:36:48.541859247 -0400 +++ new/src/hotspot/share/opto/runtime.cpp 2017-10-11 09:36:47.922657764 -0400 @@ -1658,7 +1658,7 @@ c->set_next(NULL); head = _named_counters; c->set_next(head); - } while (Atomic::cmpxchg_ptr(c, &_named_counters, head) != head); + } while (Atomic::cmpxchg(c, &_named_counters, head) != head); return c; } --- old/src/hotspot/share/prims/jni.cpp 2017-10-11 09:36:55.752297090 -0400 +++ new/src/hotspot/share/prims/jni.cpp 2017-10-11 09:36:55.227954979 -0400 @@ -3775,7 +3775,7 @@ intptr_t *a = (intptr_t *) jni_functions(); intptr_t *b = (intptr_t *) new_jni_NativeInterface; for (uint i=0; i < sizeof(struct JNINativeInterface_)/sizeof(void *); i++) { - Atomic::store_ptr(*b++, a++); + Atomic::store(*b++, a++); } } @@ -3898,9 +3898,9 @@ jint a = 0xcafebabe; jint b = Atomic::xchg(0xdeadbeef, &a); void *c = &a; - void *d = Atomic::xchg_ptr(&b, &c); + void *d = Atomic::xchg(&b, &c); assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works"); - assert(c == &b && d == &a, "Atomic::xchg_ptr() works"); + assert(c == &b && d == &a, "Atomic::xchg() works"); } #endif // ZERO && ASSERT --- old/src/hotspot/share/prims/jvmtiRawMonitor.cpp 2017-10-11 09:37:03.374869364 -0400 +++ new/src/hotspot/share/prims/jvmtiRawMonitor.cpp 2017-10-11 09:37:02.944268203 -0400 @@ -127,7 +127,7 @@ int JvmtiRawMonitor::SimpleEnter (Thread * Self) { for (;;) { - if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { + if (Atomic::cmpxchg((void*)Self, &_owner, (void*)NULL) == NULL) { return OS_OK ; } @@ -139,7 +139,7 @@ Node._next = _EntryList ; _EntryList = &Node ; OrderAccess::fence() ; - if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { + if (_owner == NULL && Atomic::cmpxchg((void*)Self, &_owner, (void*)NULL) == NULL) { _EntryList = Node._next ; RawMonitor_lock->unlock() ; return OS_OK ; @@ -153,7 +153,7 @@ int JvmtiRawMonitor::SimpleExit (Thread * Self) { guarantee (_owner == Self, "invariant") ; - OrderAccess::release_store_ptr (&_owner, NULL) ; + OrderAccess::release_store(&_owner, (void*)NULL) ; OrderAccess::fence() ; if (_EntryList == NULL) return OS_OK ; ObjectWaiter * w ; @@ -277,10 +277,10 @@ jt->SR_lock()->lock_without_safepoint_check(); } // guarded by SR_lock to avoid racing with new external suspend requests. - Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ; + Contended = Atomic::cmpxchg((void*)THREAD, &_owner, (void*)NULL); jt->SR_lock()->unlock(); } else { - Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ; + Contended = Atomic::cmpxchg((void*)THREAD, &_owner, (void*)NULL); } if (Contended == THREAD) { --- old/src/hotspot/share/runtime/atomic.hpp 2017-10-11 09:37:16.646318224 -0400 +++ new/src/hotspot/share/runtime/atomic.hpp 2017-10-11 09:37:16.121860909 -0400 @@ -70,14 +70,6 @@ template inline static void store(T store_value, volatile D* dest); - inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest) { - Atomic::store(store_value, dest); - } - - inline static void store_ptr(void* store_value, volatile void* dest) { - Atomic::store(store_value, reinterpret_cast(dest)); - } - // Atomically load from a location // The type T must be either a pointer type, an integral/enum type, // or a type that is primitive convertible using PrimitiveConversions. @@ -90,13 +82,8 @@ template inline static D add(I add_value, D volatile* dest); - inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) { - return add(add_value, dest); - } - - inline static void* add_ptr(intptr_t add_value, volatile void* dest) { - return add(add_value, reinterpret_cast(dest)); - } + template + inline static D sub(I sub_value, D volatile* dest); // Atomically increment location. inc() provide: // increment-dest @@ -123,14 +110,6 @@ template inline static D xchg(T exchange_value, volatile D* dest); - inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { - return xchg(exchange_value, dest); - } - - inline static void* xchg_ptr(void* exchange_value, volatile void* dest) { - return xchg(exchange_value, reinterpret_cast(dest)); - } - // Performs atomic compare of *dest and compare_value, and exchanges // *dest with exchange_value if the comparison succeeded. Returns prior // value of *dest. cmpxchg*() provide: @@ -151,23 +130,6 @@ inline static bool replace_if_null(T* value, D* volatile* dest, cmpxchg_memory_order order = memory_order_conservative); - inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, - volatile intptr_t* dest, - intptr_t compare_value, - cmpxchg_memory_order order = memory_order_conservative) { - return cmpxchg(exchange_value, dest, compare_value, order); - } - - inline static void* cmpxchg_ptr(void* exchange_value, - volatile void* dest, - void* compare_value, - cmpxchg_memory_order order = memory_order_conservative) { - return cmpxchg(exchange_value, - reinterpret_cast(dest), - compare_value, - order); - } - private: // Test whether From is implicitly convertible to To. // From and To must be pointer types. @@ -555,6 +517,14 @@ Atomic::add(I(-1), dest); } +template +inline D Atomic::sub(I sub_value, D volatile* dest) { + STATIC_ASSERT(IsPointer::value || IsIntegral::value); + // Assumes two's complement integer representation. + #pragma warning(suppress: 4146) + return Atomic::add(-sub_value, dest); +} + // Define the class before including platform file, which may specialize // the operator definition. No generic definition of specializations // of the operator template are provided, nor are there any generic --- old/src/hotspot/share/runtime/mutex.cpp 2017-10-11 09:37:23.771339836 -0400 +++ new/src/hotspot/share/runtime/mutex.cpp 2017-10-11 09:37:23.253002464 -0400 @@ -251,12 +251,6 @@ // // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o - -// CASPTR() uses the canonical argument order that dominates in the literature. -// Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates. - -#define CASPTR(a, c, s) \ - intptr_t(Atomic::cmpxchg_ptr((void *)(s), (void *)(a), (void *)(c))) #define UNS(x) (uintptr_t(x)) #define TRACE(m) \ { \ @@ -297,7 +291,7 @@ intptr_t v = _LockWord.FullWord; for (;;) { if ((v & _LBIT) != 0) return 0; - const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT); + const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v); if (v == u) return 1; v = u; } @@ -307,12 +301,12 @@ // Optimistic fast-path form ... // Fast-path attempt for the common uncontended case. // Avoid RTS->RTO $ coherence upgrade on typical SMP systems. - intptr_t v = CASPTR(&_LockWord, 0, _LBIT); // agro ... + intptr_t v = Atomic::cmpxchg((intptr_t)_LBIT, &_LockWord.FullWord, (intptr_t)0); // agro ... if (v == 0) return 1; for (;;) { if ((v & _LBIT) != 0) return 0; - const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT); + const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v); if (v == u) return 1; v = u; } @@ -350,7 +344,7 @@ for (;;) { intptr_t v = _LockWord.FullWord; if ((v & _LBIT) == 0) { - if (CASPTR (&_LockWord, v, v|_LBIT) == v) { + if (Atomic::cmpxchg (v|_LBIT, &_LockWord.FullWord, v) == v) { return 1; } continue; @@ -419,13 +413,13 @@ intptr_t v = _LockWord.FullWord; for (;;) { if ((v & _LBIT) == 0) { - const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT); + const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v); if (u == v) return 1; // indicate acquired v = u; } else { // Anticipate success ... ESelf->ListNext = (ParkEvent *)(v & ~_LBIT); - const intptr_t u = CASPTR(&_LockWord, v, intptr_t(ESelf)|_LBIT); + const intptr_t u = Atomic::cmpxchg(intptr_t(ESelf)|_LBIT, &_LockWord.FullWord, v); if (u == v) return 0; // indicate pushed onto cxq v = u; } @@ -463,7 +457,7 @@ OrderAccess::fence(); // Optional optimization ... try barging on the inner lock - if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(ESelf)) == 0) { + if ((NativeMonitorFlags & 32) && Atomic::cmpxchg(ESelf, &_OnDeck, (ParkEvent*)NULL) == NULL) { goto OnDeck_LOOP; } @@ -474,7 +468,7 @@ // Only the OnDeck thread can try to acquire -- contend for -- the lock. // CONSIDER: use Self->OnDeck instead of m->OnDeck. // Deschedule Self so that others may run. - while (OrderAccess::load_ptr_acquire(&_OnDeck) != ESelf) { + while (OrderAccess::load_acquire(&_OnDeck) != ESelf) { ParkCommon(ESelf, 0); } @@ -570,7 +564,7 @@ // Unlike a normal lock, however, the exiting thread "locks" OnDeck, // picks a successor and marks that thread as OnDeck. That successor // thread will then clear OnDeck once it eventually acquires the outer lock. - if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) { + if (Atomic::cmpxchg((ParkEvent*)_LBIT, &_OnDeck, (ParkEvent*)NULL) != NULL) { return; } @@ -592,7 +586,7 @@ // Note that once we set _OnDeck that thread can acquire the mutex, proceed // with its critical section and then enter this code to unlock the mutex. So // you can have multiple threads active in IUnlock at the same time. - OrderAccess::release_store_ptr(&_OnDeck, w); + OrderAccess::release_store(&_OnDeck, w); // Another optional optimization ... // For heavily contended locks it's not uncommon that some other @@ -616,7 +610,7 @@ for (;;) { // optional optimization - if locked, the owner is responsible for succession if (cxq & _LBIT) goto Punt; - const intptr_t vfy = CASPTR(&_LockWord, cxq, cxq & _LBIT); + const intptr_t vfy = Atomic::cmpxchg(cxq & _LBIT, &_LockWord.FullWord, cxq); if (vfy == cxq) break; cxq = vfy; // Interference - LockWord changed - Just retry @@ -693,7 +687,7 @@ const intptr_t v = _LockWord.FullWord; assert((v & 0xFF) == _LBIT, "invariant"); nfy->ListNext = (ParkEvent *)(v & ~_LBIT); - if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break; + if (Atomic::cmpxchg(intptr_t(nfy)|_LBIT, &_LockWord.FullWord, v) == v) break; // interference - _LockWord changed -- just retry } // Note that setting Notified before pushing nfy onto the cxq is @@ -840,7 +834,7 @@ // ESelf is now on the cxq, EntryList or at the OnDeck position. // The following fragment is extracted from Monitor::ILock() for (;;) { - if (OrderAccess::load_ptr_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break; + if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break; ParkCommon(ESelf, 0); } assert(_OnDeck == ESelf, "invariant"); @@ -1058,7 +1052,7 @@ // Only the OnDeck thread can try to acquire -- contend for -- the lock. // CONSIDER: use Self->OnDeck instead of m->OnDeck. for (;;) { - if (OrderAccess::load_ptr_acquire(&_OnDeck) == ESelf && TrySpin(NULL)) break; + if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(NULL)) break; ParkCommon(ESelf, 0); } --- old/src/hotspot/share/runtime/objectMonitor.cpp 2017-10-11 09:37:30.374638000 -0400 +++ new/src/hotspot/share/runtime/objectMonitor.cpp 2017-10-11 09:37:29.849671239 -0400 @@ -249,7 +249,7 @@ // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. Thread * const Self = THREAD; - void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL); + void * cur = Atomic::cmpxchg((void*)Self, &_owner, (void*)NULL); if (cur == NULL) { // Either ASSERT _recursions == 0 or explicitly set _recursions = 0. assert(_recursions == 0, "invariant"); @@ -406,7 +406,7 @@ int ObjectMonitor::TryLock(Thread * Self) { void * own = _owner; if (own != NULL) return 0; - if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { + if (Atomic::cmpxchg((void*)Self, &_owner, (void*)NULL) == NULL) { // Either guarantee _recursions == 0 or set _recursions = 0. assert(_recursions == 0, "invariant"); assert(_owner == Self, "invariant"); @@ -476,7 +476,7 @@ ObjectWaiter * nxt; for (;;) { node._next = nxt = _cxq; - if (Atomic::cmpxchg_ptr(&node, &_cxq, nxt) == nxt) break; + if (Atomic::cmpxchg(&node, &_cxq, nxt) == nxt) break; // Interference - the CAS failed because _cxq changed. Just retry. // As an optional optimization we retry the lock. @@ -514,7 +514,7 @@ if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) { // Try to assume the role of responsible thread for the monitor. // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } - Atomic::cmpxchg_ptr(Self, &_Responsible, NULL); + Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL); } // The lock might have been released while this thread was occupied queueing @@ -538,7 +538,7 @@ assert(_owner != Self, "invariant"); if ((SyncFlags & 2) && _Responsible == NULL) { - Atomic::cmpxchg_ptr(Self, &_Responsible, NULL); + Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL); } // park self @@ -795,7 +795,7 @@ ObjectWaiter * v = _cxq; assert(v != NULL, "invariant"); - if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) { + if (v != SelfNode || Atomic::cmpxchg(SelfNode->_next, &_cxq, v) != v) { // The CAS above can fail from interference IFF a "RAT" arrived. // In that case Self must be in the interior and can no longer be // at the head of cxq. @@ -947,7 +947,7 @@ // in massive wasteful coherency traffic on classic SMP systems. // Instead, I use release_store(), which is implemented as just a simple // ST on x64, x86 and SPARC. - OrderAccess::release_store_ptr(&_owner, NULL); // drop the lock + OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock OrderAccess::storeload(); // See if we need to wake a successor if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { TEVENT(Inflated exit - simple egress); @@ -992,13 +992,13 @@ // to reacquire the lock the responsibility for ensuring succession // falls to the new owner. // - if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { + if (Atomic::cmpxchg((void*)THREAD, &_owner, (void*)NULL) != NULL) { return; } TEVENT(Exit - Reacquired); } else { if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { - OrderAccess::release_store_ptr(&_owner, NULL); // drop the lock + OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock OrderAccess::storeload(); // Ratify the previously observed values. if (_cxq == NULL || _succ != NULL) { @@ -1017,7 +1017,7 @@ // B. If the elements forming the EntryList|cxq are TSM // we could simply unpark() the lead thread and return // without having set _succ. - if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { + if (Atomic::cmpxchg((void*)THREAD, &_owner, (void*)NULL) != NULL) { TEVENT(Inflated exit - reacquired succeeded); return; } @@ -1052,7 +1052,7 @@ w = _cxq; for (;;) { assert(w != NULL, "Invariant"); - ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w); + ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w); if (u == w) break; w = u; } @@ -1093,7 +1093,7 @@ w = _cxq; for (;;) { assert(w != NULL, "Invariant"); - ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w); + ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w); if (u == w) break; w = u; } @@ -1146,7 +1146,7 @@ // The following loop is tantamount to: w = swap(&cxq, NULL) for (;;) { assert(w != NULL, "Invariant"); - ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr(NULL, &_cxq, w); + ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w); if (u == w) break; w = u; } @@ -1279,7 +1279,7 @@ Wakee = NULL; // Drop the lock - OrderAccess::release_store_ptr(&_owner, NULL); + OrderAccess::release_store(&_owner, (void*)NULL); OrderAccess::fence(); // ST _owner vs LD in unpark() if (SafepointSynchronize::do_call_back()) { @@ -1688,7 +1688,7 @@ for (;;) { ObjectWaiter * front = _cxq; iterator->_next = front; - if (Atomic::cmpxchg_ptr(iterator, &_cxq, front) == front) { + if (Atomic::cmpxchg(iterator, &_cxq, front) == front) { break; } } @@ -1699,7 +1699,7 @@ ObjectWaiter * tail = _cxq; if (tail == NULL) { iterator->_next = NULL; - if (Atomic::cmpxchg_ptr(iterator, &_cxq, NULL) == NULL) { + if (Atomic::cmpxchg(iterator, &_cxq, (ObjectWaiter*)NULL) == NULL) { break; } } else { @@ -1980,7 +1980,7 @@ Thread * ox = (Thread *) _owner; if (ox == NULL) { - ox = (Thread *) Atomic::cmpxchg_ptr(Self, &_owner, NULL); + ox = (Thread*)Atomic::cmpxchg((void*)Self, &_owner, (void*)NULL); if (ox == NULL) { // The CAS succeeded -- this thread acquired ownership // Take care of some bookkeeping to exit spin state. --- old/src/hotspot/share/runtime/objectMonitor.hpp 2017-10-11 09:37:37.559454486 -0400 +++ new/src/hotspot/share/runtime/objectMonitor.hpp 2017-10-11 09:37:37.088115670 -0400 @@ -251,6 +251,7 @@ ((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value) markOop header() const; + markOopDesc* volatile* header_addr(); void set_header(markOop hdr); intptr_t is_busy() const { --- old/src/hotspot/share/runtime/objectMonitor.inline.hpp 2017-10-11 09:37:44.363784492 -0400 +++ new/src/hotspot/share/runtime/objectMonitor.inline.hpp 2017-10-11 09:37:43.937461445 -0400 @@ -36,6 +36,10 @@ return _header; } +inline markOopDesc* volatile* ObjectMonitor::header_addr() { + return &_header; +} + inline void ObjectMonitor::set_header(markOop hdr) { _header = hdr; } --- old/src/hotspot/share/runtime/orderAccess.hpp 2017-10-11 09:37:51.530785209 -0400 +++ new/src/hotspot/share/runtime/orderAccess.hpp 2017-10-11 09:37:51.105198647 -0400 @@ -268,21 +268,12 @@ template static T load_acquire(const volatile T* p); - static intptr_t load_ptr_acquire(const volatile intptr_t* p); - static void* load_ptr_acquire(const volatile void* p); - template static void release_store(volatile D* p, T v); - static void release_store_ptr(volatile intptr_t* p, intptr_t v); - static void release_store_ptr(volatile void* p, void* v); - template static void release_store_fence(volatile D* p, T v); - static void release_store_ptr_fence(volatile intptr_t* p, intptr_t v); - static void release_store_ptr_fence(volatile void* p, void* v); - private: // This is a helper that invokes the StubRoutines::fence_entry() // routine if it exists, It should only be used by platforms that --- old/src/hotspot/share/runtime/orderAccess.inline.hpp 2017-10-11 09:37:58.359915573 -0400 +++ new/src/hotspot/share/runtime/orderAccess.inline.hpp 2017-10-11 09:37:57.925072635 -0400 @@ -54,28 +54,13 @@ return LoadImpl >()(p); } -inline intptr_t OrderAccess::load_ptr_acquire(const volatile intptr_t* p) { - return load_acquire(p); -} - -inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { - return load_acquire(static_cast(p)); -} - template inline void OrderAccess::release_store(volatile D* p, T v) { StoreImpl >()(v, p); } -inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { release_store(p, v); } -inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { release_store(static_cast(p), v); } - template inline void OrderAccess::release_store_fence(volatile D* p, T v) { StoreImpl >()(v, p); } - -inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { release_store_fence(p, v); } -inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { release_store_fence(static_cast(p), v); } - #endif // SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP --- old/src/hotspot/share/runtime/stubRoutines.cpp 2017-10-11 09:38:05.238641930 -0400 +++ new/src/hotspot/share/runtime/stubRoutines.cpp 2017-10-11 09:38:04.766537017 -0400 @@ -59,11 +59,10 @@ jint StubRoutines::_verify_oop_count = 0; address StubRoutines::_verify_oop_subroutine_entry = NULL; address StubRoutines::_atomic_xchg_entry = NULL; -address StubRoutines::_atomic_xchg_ptr_entry = NULL; +address StubRoutines::_atomic_xchg_long_entry = NULL; address StubRoutines::_atomic_store_entry = NULL; address StubRoutines::_atomic_store_ptr_entry = NULL; address StubRoutines::_atomic_cmpxchg_entry = NULL; -address StubRoutines::_atomic_cmpxchg_ptr_entry = NULL; address StubRoutines::_atomic_cmpxchg_byte_entry = NULL; address StubRoutines::_atomic_cmpxchg_long_entry = NULL; address StubRoutines::_atomic_add_entry = NULL; --- old/src/hotspot/share/runtime/stubRoutines.hpp 2017-10-11 09:38:12.620939560 -0400 +++ new/src/hotspot/share/runtime/stubRoutines.hpp 2017-10-11 09:38:12.150834464 -0400 @@ -101,11 +101,10 @@ static address _throw_delayed_StackOverflowError_entry; static address _atomic_xchg_entry; - static address _atomic_xchg_ptr_entry; + static address _atomic_xchg_long_entry; static address _atomic_store_entry; static address _atomic_store_ptr_entry; static address _atomic_cmpxchg_entry; - static address _atomic_cmpxchg_ptr_entry; static address _atomic_cmpxchg_byte_entry; static address _atomic_cmpxchg_long_entry; static address _atomic_add_entry; @@ -276,11 +275,10 @@ static address throw_delayed_StackOverflowError_entry() { return _throw_delayed_StackOverflowError_entry; } static address atomic_xchg_entry() { return _atomic_xchg_entry; } - static address atomic_xchg_ptr_entry() { return _atomic_xchg_ptr_entry; } + static address atomic_xchg_long_entry() { return _atomic_xchg_long_entry; } static address atomic_store_entry() { return _atomic_store_entry; } static address atomic_store_ptr_entry() { return _atomic_store_ptr_entry; } static address atomic_cmpxchg_entry() { return _atomic_cmpxchg_entry; } - static address atomic_cmpxchg_ptr_entry() { return _atomic_cmpxchg_ptr_entry; } static address atomic_cmpxchg_byte_entry() { return _atomic_cmpxchg_byte_entry; } static address atomic_cmpxchg_long_entry() { return _atomic_cmpxchg_long_entry; } static address atomic_add_entry() { return _atomic_add_entry; } --- old/src/hotspot/share/runtime/synchronizer.cpp 2017-10-11 09:38:19.565213234 -0400 +++ new/src/hotspot/share/runtime/synchronizer.cpp 2017-10-11 09:38:19.043784309 -0400 @@ -111,9 +111,7 @@ static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; // global list of blocks of monitors -// gBlockList is really PaddedEnd *, but we don't -// want to expose the PaddedEnd template more than necessary. -ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL; +PaddedEnd * volatile ObjectSynchronizer::gBlockList = NULL; // global monitor free list ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; // global monitor in-use list, for moribund threads, @@ -241,7 +239,7 @@ lock->set_displaced_header(markOopDesc::unused_mark()); if (owner == NULL && - Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) { + Atomic::cmpxchg((void*)Self, &(m->_owner), (void*)NULL) == NULL) { assert(m->_recursions == 0, "invariant"); assert(m->_owner == Self, "invariant"); return true; @@ -802,7 +800,7 @@ hash = get_next_hash(Self, obj); temp = mark->copy_set_hash(hash); // merge hash code into header assert(temp->is_neutral(), "invariant"); - test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); + test = Atomic::cmpxchg(temp, monitor->header_addr(), mark); if (test != mark) { // The only update to the header in the monitor (outside GC) // is install the hash code. If someone add new usage of @@ -939,8 +937,7 @@ // Visitors ... void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { - PaddedEnd * block = - (PaddedEnd *)OrderAccess::load_ptr_acquire(&gBlockList); + PaddedEnd * block = OrderAccess::load_acquire(&gBlockList); while (block != NULL) { assert(block->object() == CHAINMARKER, "must be a block header"); for (int i = _BLOCKSIZE - 1; i > 0; i--) { @@ -991,8 +988,7 @@ void ObjectSynchronizer::global_oops_do(OopClosure* f) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - PaddedEnd * block = - (PaddedEnd *)OrderAccess::load_ptr_acquire(&gBlockList); + PaddedEnd * block = OrderAccess::load_acquire(&gBlockList); for (; block != NULL; block = (PaddedEnd *)next(block)) { assert(block->object() == CHAINMARKER, "must be a block header"); for (int i = 1; i < _BLOCKSIZE; i++) { @@ -1232,7 +1228,7 @@ temp[0].FreeNext = gBlockList; // There are lock-free uses of gBlockList so make sure that // the previous stores happen before we update gBlockList. - OrderAccess::release_store_ptr(&gBlockList, temp); + OrderAccess::release_store(&gBlockList, temp); // Add the new string of objectMonitors to the global free list temp[_BLOCKSIZE - 1].FreeNext = gFreeList; @@ -1734,8 +1730,7 @@ } } else { - PaddedEnd * block = - (PaddedEnd *)OrderAccess::load_ptr_acquire(&gBlockList); + PaddedEnd * block = OrderAccess::load_acquire(&gBlockList); for (; block != NULL; block = (PaddedEnd *)next(block)) { // Iterate over all extant monitors - Scavenge all idle monitors. assert(block->object() == CHAINMARKER, "must be a block header"); @@ -1969,8 +1964,7 @@ // the list of extant blocks without taking a lock. int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { - PaddedEnd * block = - (PaddedEnd *)OrderAccess::load_ptr_acquire(&gBlockList); + PaddedEnd * block = OrderAccess::load_acquire(&gBlockList); while (block != NULL) { assert(block->object() == CHAINMARKER, "must be a block header"); if (monitor > (ObjectMonitor *)&block[0] && --- old/src/hotspot/share/runtime/synchronizer.hpp 2017-10-11 09:38:26.732196931 -0400 +++ new/src/hotspot/share/runtime/synchronizer.hpp 2017-10-11 09:38:26.259688771 -0400 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_RUNTIME_SYNCHRONIZER_HPP #define SHARE_VM_RUNTIME_SYNCHRONIZER_HPP +#include "memory/padded.hpp" #include "oops/markOop.hpp" #include "runtime/basicLock.hpp" #include "runtime/handles.hpp" @@ -159,9 +160,7 @@ private: enum { _BLOCKSIZE = 128 }; // global list of blocks of monitors - // gBlockList is really PaddedEnd *, but we don't - // want to expose the PaddedEnd template more than necessary. - static ObjectMonitor * volatile gBlockList; + static PaddedEnd * volatile gBlockList; // global monitor free list static ObjectMonitor * volatile gFreeList; // global monitor in-use list, for moribund threads, --- old/src/hotspot/share/runtime/thread.cpp 2017-10-11 09:38:33.692817026 -0400 +++ new/src/hotspot/share/runtime/thread.cpp 2017-10-11 09:38:33.263785400 -0400 @@ -4704,9 +4704,9 @@ enum MuxBits { LOCKBIT = 1 }; void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) { - intptr_t w = Atomic::cmpxchg_ptr(LOCKBIT, Lock, 0); + intptr_t w = Atomic::cmpxchg((intptr_t)LOCKBIT, Lock, (intptr_t)0); if (w == 0) return; - if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { + if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { return; } @@ -4719,7 +4719,7 @@ // Optional spin phase: spin-then-park strategy while (--its >= 0) { w = *Lock; - if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { + if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { return; } } @@ -4732,7 +4732,7 @@ for (;;) { w = *Lock; if ((w & LOCKBIT) == 0) { - if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { + if (Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { Self->OnList = 0; // hygiene - allows stronger asserts return; } @@ -4740,7 +4740,7 @@ } assert(w & LOCKBIT, "invariant"); Self->ListNext = (ParkEvent *) (w & ~LOCKBIT); - if (Atomic::cmpxchg_ptr(intptr_t(Self)|LOCKBIT, Lock, w) == w) break; + if (Atomic::cmpxchg(intptr_t(Self)|LOCKBIT, Lock, w) == w) break; } while (Self->OnList != 0) { @@ -4750,9 +4750,9 @@ } void Thread::muxAcquireW(volatile intptr_t * Lock, ParkEvent * ev) { - intptr_t w = Atomic::cmpxchg_ptr(LOCKBIT, Lock, 0); + intptr_t w = Atomic::cmpxchg((intptr_t)LOCKBIT, Lock, (intptr_t)0); if (w == 0) return; - if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { + if ((w & LOCKBIT) == 0 && Atomic::cmpxchg((intptr_t)w|LOCKBIT, Lock, w) == w) { return; } @@ -4769,7 +4769,7 @@ // Optional spin phase: spin-then-park strategy while (--its >= 0) { w = *Lock; - if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { + if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { if (ReleaseAfter != NULL) { ParkEvent::Release(ReleaseAfter); } @@ -4785,7 +4785,7 @@ for (;;) { w = *Lock; if ((w & LOCKBIT) == 0) { - if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { + if (Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) { ev->OnList = 0; // We call ::Release while holding the outer lock, thus // artificially lengthening the critical section. @@ -4800,7 +4800,7 @@ } assert(w & LOCKBIT, "invariant"); ev->ListNext = (ParkEvent *) (w & ~LOCKBIT); - if (Atomic::cmpxchg_ptr(intptr_t(ev)|LOCKBIT, Lock, w) == w) break; + if (Atomic::cmpxchg(intptr_t(ev)|LOCKBIT, Lock, w) == w) break; } while (ev->OnList != 0) { @@ -4836,7 +4836,7 @@ // store (CAS) to the lock-word that releases the lock becomes globally visible. void Thread::muxRelease(volatile intptr_t * Lock) { for (;;) { - const intptr_t w = Atomic::cmpxchg_ptr(0, Lock, LOCKBIT); + const intptr_t w = Atomic::cmpxchg((intptr_t)0, Lock, (intptr_t)LOCKBIT); assert(w & LOCKBIT, "invariant"); if (w == LOCKBIT) return; ParkEvent * const List = (ParkEvent *) (w & ~LOCKBIT); @@ -4847,7 +4847,7 @@ // The following CAS() releases the lock and pops the head element. // The CAS() also ratifies the previously fetched lock-word value. - if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) { + if (Atomic::cmpxchg(intptr_t(nxt), Lock, w) != w) { continue; } List->OnList = 0; --- old/src/hotspot/share/runtime/vmStructs.cpp 2017-10-11 09:38:41.763764290 -0400 +++ new/src/hotspot/share/runtime/vmStructs.cpp 2017-10-11 09:38:41.158461055 -0400 @@ -61,6 +61,7 @@ #include "memory/allocation.inline.hpp" #include "memory/heap.hpp" #include "memory/metachunk.hpp" +#include "memory/padded.hpp" #include "memory/referenceType.hpp" #include "memory/universe.hpp" #include "memory/virtualspace.hpp" @@ -198,6 +199,8 @@ typedef CompactHashtable SymbolCompactHashTable; typedef RehashableHashtable RehashableSymbolHashtable; +typedef PaddedEnd PaddedObjectMonitor; + //-------------------------------------------------------------------------------- // VM_STRUCTS // @@ -1052,7 +1055,7 @@ volatile_nonstatic_field(BasicLock, _displaced_header, markOop) \ nonstatic_field(BasicObjectLock, _lock, BasicLock) \ nonstatic_field(BasicObjectLock, _obj, oop) \ - static_ptr_volatile_field(ObjectSynchronizer, gBlockList, ObjectMonitor*) \ + static_ptr_volatile_field(ObjectSynchronizer, gBlockList, PaddedObjectMonitor*) \ \ /*********************/ \ /* Matcher (C2 only) */ \ @@ -1680,6 +1683,7 @@ /************/ \ \ declare_toplevel_type(ObjectMonitor) \ + declare_toplevel_type(PaddedObjectMonitor) \ declare_toplevel_type(ObjectSynchronizer) \ declare_toplevel_type(BasicLock) \ declare_toplevel_type(BasicObjectLock) \ @@ -2154,6 +2158,7 @@ declare_toplevel_type(nmethod*) \ COMPILER2_PRESENT(declare_unsigned_integer_type(node_idx_t)) \ declare_toplevel_type(ObjectMonitor*) \ + declare_toplevel_type(PaddedObjectMonitor*) \ declare_toplevel_type(oop*) \ declare_toplevel_type(OopMap**) \ declare_toplevel_type(OopMapCache*) \ --- old/src/hotspot/share/services/mallocSiteTable.cpp 2017-10-11 09:38:49.494615857 -0400 +++ new/src/hotspot/share/services/mallocSiteTable.cpp 2017-10-11 09:38:48.924118130 -0400 @@ -147,7 +147,7 @@ if (entry == NULL) return NULL; // swap in the head - if (Atomic::cmpxchg_ptr((void*)entry, (volatile void *)&_table[index], NULL) == NULL) { + if (Atomic::cmpxchg(entry, &_table[index], (MallocSiteHashtableEntry*)NULL) == NULL) { return entry->data(); } @@ -257,3 +257,7 @@ } _lock_state = ExclusiveLock; } + +bool MallocSiteHashtableEntry::atomic_insert(const MallocSiteHashtableEntry* entry) { + return Atomic::cmpxchg(entry, (const MallocSiteHashtableEntry**)&_next, (const MallocSiteHashtableEntry*)NULL) == NULL; +} --- old/src/hotspot/share/services/mallocSiteTable.hpp 2017-10-11 09:38:56.122128499 -0400 +++ new/src/hotspot/share/services/mallocSiteTable.hpp 2017-10-11 09:38:55.693240620 -0400 @@ -79,10 +79,7 @@ // Insert an entry atomically. // Return true if the entry is inserted successfully. // The operation can be failed due to contention from other thread. - bool atomic_insert(const MallocSiteHashtableEntry* entry) { - return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next, - NULL) == NULL); - } + bool atomic_insert(const MallocSiteHashtableEntry* entry); void set_callsite(const MallocSite& site) { _malloc_site = site; --- old/src/hotspot/share/services/mallocTracker.hpp 2017-10-11 09:39:03.001051704 -0400 +++ new/src/hotspot/share/services/mallocTracker.hpp 2017-10-11 09:39:02.571711746 -0400 @@ -68,7 +68,7 @@ if (sz > 0) { // unary minus operator applied to unsigned type, result still unsigned #pragma warning(suppress: 4146) - Atomic::add(-sz, &_size); + Atomic::sub(sz, &_size); } } --- old/src/hotspot/share/services/memoryManager.cpp 2017-10-11 09:39:09.797556776 -0400 +++ new/src/hotspot/share/services/memoryManager.cpp 2017-10-11 09:39:09.371337380 -0400 @@ -94,7 +94,7 @@ instanceOop MemoryManager::get_memory_manager_instance(TRAPS) { // Must do an acquire so as to force ordering of subsequent // loads from anything _memory_mgr_obj points to or implies. - instanceOop mgr_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_mgr_obj); + instanceOop mgr_obj = OrderAccess::load_acquire(&_memory_mgr_obj); if (mgr_obj == NULL) { // It's ok for more than one thread to execute the code up to the locked region. // Extra manager instances will just be gc'ed. @@ -147,7 +147,7 @@ // // The lock has done an acquire, so the load can't float above it, but // we need to do a load_acquire as above. - mgr_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_mgr_obj); + mgr_obj = OrderAccess::load_acquire(&_memory_mgr_obj); if (mgr_obj != NULL) { return mgr_obj; } @@ -159,7 +159,7 @@ // with creating the management object are visible before publishing // its address. The unlock will publish the store to _memory_mgr_obj // because it does a release first. - OrderAccess::release_store_ptr(&_memory_mgr_obj, mgr_obj); + OrderAccess::release_store(&_memory_mgr_obj, mgr_obj); } } --- old/src/hotspot/share/services/memoryPool.cpp 2017-10-11 09:39:16.542967828 -0400 +++ new/src/hotspot/share/services/memoryPool.cpp 2017-10-11 09:39:16.066074712 -0400 @@ -82,7 +82,7 @@ instanceOop MemoryPool::get_memory_pool_instance(TRAPS) { // Must do an acquire so as to force ordering of subsequent // loads from anything _memory_pool_obj points to or implies. - instanceOop pool_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_pool_obj); + instanceOop pool_obj = OrderAccess::load_acquire(&_memory_pool_obj); if (pool_obj == NULL) { // It's ok for more than one thread to execute the code up to the locked region. // Extra pool instances will just be gc'ed. @@ -123,7 +123,7 @@ // // The lock has done an acquire, so the load can't float above it, // but we need to do a load_acquire as above. - pool_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_pool_obj); + pool_obj = OrderAccess::load_acquire(&_memory_pool_obj); if (pool_obj != NULL) { return pool_obj; } @@ -135,7 +135,7 @@ // with creating the pool are visible before publishing its address. // The unlock will publish the store to _memory_pool_obj because // it does a release first. - OrderAccess::release_store_ptr(&_memory_pool_obj, pool_obj); + OrderAccess::release_store(&_memory_pool_obj, pool_obj); } } --- old/src/hotspot/share/utilities/bitMap.cpp 2017-10-11 09:39:23.705372461 -0400 +++ new/src/hotspot/share/utilities/bitMap.cpp 2017-10-11 09:39:23.178640776 -0400 @@ -626,7 +626,7 @@ table[i] = num_set_bits(i); } - if (!Atomic::replace_if_null(table, &_pop_count_table)) { + if (Atomic::cmpxchg(table, &_pop_count_table, (BitMap::idx_t*)NULL) != NULL) { guarantee(_pop_count_table != NULL, "invariant"); FREE_C_HEAP_ARRAY(idx_t, table); } --- old/src/hotspot/share/utilities/hashtable.cpp 2017-10-11 09:39:30.278857222 -0400 +++ new/src/hotspot/share/utilities/hashtable.cpp 2017-10-11 09:39:29.806412320 -0400 @@ -190,7 +190,7 @@ BasicHashtableEntry* current = _free_list; while (true) { context->_removed_tail->set_next(current); - BasicHashtableEntry* old = (BasicHashtableEntry*)Atomic::cmpxchg_ptr(context->_removed_head, &_free_list, current); + BasicHashtableEntry* old = Atomic::cmpxchg(context->_removed_head, &_free_list, current); if (old == current) { break; } --- old/src/hotspot/share/utilities/hashtable.inline.hpp 2017-10-11 09:39:37.583241859 -0400 +++ new/src/hotspot/share/utilities/hashtable.inline.hpp 2017-10-11 09:39:37.156797955 -0400 @@ -78,7 +78,7 @@ // SystemDictionary are read without locks. The new entry must be // complete before other threads can be allowed to see it // via a store to _buckets[index]. - OrderAccess::release_store_ptr(&_entry, l); + OrderAccess::release_store(&_entry, l); } @@ -87,7 +87,7 @@ // SystemDictionary are read without locks. The new entry must be // complete before other threads can be allowed to see it // via a store to _buckets[index]. - return (BasicHashtableEntry*) OrderAccess::load_ptr_acquire(&_entry); + return OrderAccess::load_acquire(&_entry); } --- old/src/hotspot/share/utilities/vmError.cpp 2017-10-11 09:39:44.281462885 -0400 +++ new/src/hotspot/share/utilities/vmError.cpp 2017-10-11 09:39:43.856514776 -0400 @@ -1269,7 +1269,7 @@ } intptr_t mytid = os::current_thread_id(); if (first_error_tid == -1 && - Atomic::cmpxchg_ptr(mytid, &first_error_tid, -1) == -1) { + Atomic::cmpxchg(mytid, &first_error_tid, (intptr_t)-1) == -1) { // Initialize time stamps to use the same base. out.time_stamp().update_to(1);