--- old/src/hotspot/share/code/nmethod.cpp 2018-01-04 02:53:27.689510104 -0500 +++ new/src/hotspot/share/code/nmethod.cpp 2018-01-04 02:53:27.353492902 -0500 @@ -1625,7 +1625,7 @@ assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called"); if (_oops_do_mark_link == NULL) { // Claim this nmethod for this thread to mark. - if (Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_link, (nmethod*)NULL) == NULL) { + if (Atomic::replace_if_null(NMETHOD_SENTINEL, &_oops_do_mark_link)) { // Atomically append this nmethod (now claimed) to the head of the list: nmethod* observed_mark_nmethods = _oops_do_mark_nmethods; for (;;) { --- old/src/hotspot/share/gc/parallel/gcTaskThread.cpp 2018-01-04 02:53:28.549554137 -0500 +++ new/src/hotspot/share/gc/parallel/gcTaskThread.cpp 2018-01-04 02:53:28.221537345 -0500 @@ -77,7 +77,7 @@ if (_time_stamps == NULL) { // We allocate the _time_stamps array lazily since logging can be enabled dynamically GCTaskTimeStamp* time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC); - if (Atomic::cmpxchg(time_stamps, &_time_stamps, (GCTaskTimeStamp*)NULL) != NULL) { + if (!Atomic::replace_if_null(time_stamps, &_time_stamps)) { // Someone already setup the time stamps FREE_C_HEAP_ARRAY(GCTaskTimeStamp, time_stamps); } --- old/src/hotspot/share/oops/method.cpp 2018-01-04 02:53:29.445600005 -0500 +++ new/src/hotspot/share/oops/method.cpp 2018-01-04 02:53:29.057580142 -0500 @@ -446,7 +446,7 @@ bool Method::init_method_counters(MethodCounters* counters) { // Try to install a pointer to MethodCounters, return true on success. - return Atomic::cmpxchg(counters, &_method_counters, (MethodCounters*)NULL) == NULL; + return Atomic::replace_if_null(counters, &_method_counters); } void Method::cleanup_inline_caches() { --- old/src/hotspot/share/prims/jvmtiRawMonitor.cpp 2018-01-04 02:53:30.385648131 -0500 +++ new/src/hotspot/share/prims/jvmtiRawMonitor.cpp 2018-01-04 02:53:29.997628267 -0500 @@ -127,7 +127,7 @@ int JvmtiRawMonitor::SimpleEnter (Thread * Self) { for (;;) { - if (Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) { + if (Atomic::replace_if_null(Self, &_owner)) { return OS_OK ; } @@ -139,7 +139,7 @@ Node._next = _EntryList ; _EntryList = &Node ; OrderAccess::fence() ; - if (_owner == NULL && Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) { + if (_owner == NULL && Atomic::replace_if_null(Self, &_owner)) { _EntryList = Node._next ; RawMonitor_lock->unlock() ; return OS_OK ; --- old/src/hotspot/share/runtime/mutex.cpp 2018-01-04 02:53:31.297694822 -0500 +++ new/src/hotspot/share/runtime/mutex.cpp 2018-01-04 02:53:30.909674962 -0500 @@ -467,7 +467,7 @@ OrderAccess::fence(); // Optional optimization ... try barging on the inner lock - if ((NativeMonitorFlags & 32) && Atomic::cmpxchg(ESelf, &_OnDeck, (ParkEvent*)NULL) == NULL) { + if ((NativeMonitorFlags & 32) && Atomic::replace_if_null(ESelf, &_OnDeck)) { goto OnDeck_LOOP; } @@ -574,7 +574,7 @@ // Unlike a normal lock, however, the exiting thread "locks" OnDeck, // picks a successor and marks that thread as OnDeck. That successor // thread will then clear OnDeck once it eventually acquires the outer lock. - if (Atomic::cmpxchg((ParkEvent*)_LBIT, &_OnDeck, (ParkEvent*)NULL) != NULL) { + if (!Atomic::replace_if_null((ParkEvent*)_LBIT, &_OnDeck)) { return; } --- old/src/hotspot/share/runtime/objectMonitor.cpp 2018-01-04 02:53:32.225742332 -0500 +++ new/src/hotspot/share/runtime/objectMonitor.cpp 2018-01-04 02:53:31.837722472 -0500 @@ -421,7 +421,7 @@ int ObjectMonitor::TryLock(Thread * Self) { void * own = _owner; if (own != NULL) return 0; - if (Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) { + if (Atomic::replace_if_null(Self, &_owner)) { // Either guarantee _recursions == 0 or set _recursions = 0. assert(_recursions == 0, "invariant"); assert(_owner == Self, "invariant"); @@ -529,7 +529,7 @@ if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) { // Try to assume the role of responsible thread for the monitor. // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } - Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL); + Atomic::replace_if_null(Self, &_Responsible); } // The lock might have been released while this thread was occupied queueing @@ -553,7 +553,7 @@ assert(_owner != Self, "invariant"); if ((SyncFlags & 2) && _Responsible == NULL) { - Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL); + Atomic::replace_if_null(Self, &_Responsible); } // park self @@ -1007,7 +1007,7 @@ // to reacquire the lock the responsibility for ensuring succession // falls to the new owner. // - if (Atomic::cmpxchg(THREAD, &_owner, (void*)NULL) != NULL) { + if (!Atomic::replace_if_null(THREAD, &_owner)) { return; } TEVENT(Exit - Reacquired); @@ -1032,7 +1032,7 @@ // B. If the elements forming the EntryList|cxq are TSM // we could simply unpark() the lead thread and return // without having set _succ. - if (Atomic::cmpxchg(THREAD, &_owner, (void*)NULL) != NULL) { + if (!Atomic::replace_if_null(THREAD, &_owner)) { TEVENT(Inflated exit - reacquired succeeded); return; } @@ -1714,7 +1714,7 @@ ObjectWaiter * tail = _cxq; if (tail == NULL) { iterator->_next = NULL; - if (Atomic::cmpxchg(iterator, &_cxq, (ObjectWaiter*)NULL) == NULL) { + if (Atomic::replace_if_null(iterator, &_cxq)) { break; } } else { --- old/src/hotspot/share/runtime/synchronizer.cpp 2018-01-04 02:53:33.165790448 -0500 +++ new/src/hotspot/share/runtime/synchronizer.cpp 2018-01-04 02:53:32.781770816 -0500 @@ -238,8 +238,7 @@ // and last are the inflated Java Monitor (ObjectMonitor) checks. lock->set_displaced_header(markOopDesc::unused_mark()); - if (owner == NULL && - Atomic::cmpxchg(Self, &(m->_owner), (void*)NULL) == NULL) { + if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) { assert(m->_recursions == 0, "invariant"); assert(m->_owner == Self, "invariant"); return true; --- old/src/hotspot/share/services/mallocSiteTable.cpp 2018-01-04 02:53:34.101838380 -0500 +++ new/src/hotspot/share/services/mallocSiteTable.cpp 2018-01-04 02:53:33.709818305 -0500 @@ -147,7 +147,7 @@ if (entry == NULL) return NULL; // swap in the head - if (Atomic::cmpxchg(entry, &_table[index], (MallocSiteHashtableEntry*)NULL) == NULL) { + if (Atomic::replace_if_null(entry, &_table[index])) { return entry->data(); } @@ -259,5 +259,5 @@ } bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) { - return Atomic::cmpxchg(entry, &_next, (MallocSiteHashtableEntry*)NULL) == NULL; + return Atomic::replace_if_null(entry, &_next); } --- old/src/hotspot/share/utilities/bitMap.cpp 2018-01-04 02:53:35.009884873 -0500 +++ new/src/hotspot/share/utilities/bitMap.cpp 2018-01-04 02:53:34.621864991 -0500 @@ -628,7 +628,7 @@ table[i] = num_set_bits(i); } - if (Atomic::cmpxchg(table, &_pop_count_table, (BitMap::idx_t*)NULL) != NULL) { + if (!Atomic::replace_if_null(table, &_pop_count_table)) { guarantee(_pop_count_table != NULL, "invariant"); FREE_C_HEAP_ARRAY(idx_t, table); }