--- old/src/hotspot/share/code/dependencyContext.cpp 2019-11-21 11:57:02.918858004 +0100 +++ new/src/hotspot/share/code/dependencyContext.cpp 2019-11-21 11:57:02.438849894 +0100 @@ -101,7 +101,7 @@ for (;;) { nmethodBucket* head = Atomic::load(_dependency_context_addr); new_head->set_next(head); - if (Atomic::cmpxchg(new_head, _dependency_context_addr, head) == head) { + if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) { break; } } @@ -124,7 +124,7 @@ for (;;) { nmethodBucket* purge_list_head = Atomic::load(&_purge_list); b->set_purge_list_next(purge_list_head); - if (Atomic::cmpxchg(b, &_purge_list, purge_list_head) == purge_list_head) { + if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) { break; } } @@ -260,7 +260,7 @@ #endif //PRODUCT int nmethodBucket::decrement() { - return Atomic::sub(1, &_count); + return Atomic::sub(&_count, 1); } // We use a monotonically increasing epoch counter to track the last epoch a given @@ -272,7 +272,7 @@ if (last_cleanup >= cleaning_epoch) { return false; } - return Atomic::cmpxchg(cleaning_epoch, _last_cleanup_addr, last_cleanup) == last_cleanup; + return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup; } // Retrieve the first nmethodBucket that has a dependent that does not correspond to @@ -291,7 +291,7 @@ // Unstable load of head w.r.t. head->next continue; } - if (Atomic::cmpxchg(head_next, _dependency_context_addr, head) == head) { + if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) { // Release is_unloading entries if unlinking was claimed DependencyContext::release(head); } @@ -300,7 +300,7 @@ // Relaxed accessors void DependencyContext::set_dependencies(nmethodBucket* b) { - Atomic::store(b, _dependency_context_addr); + Atomic::store(_dependency_context_addr, b); } nmethodBucket* DependencyContext::dependencies() { @@ -313,7 +313,7 @@ void DependencyContext::cleaning_start() { assert(SafepointSynchronize::is_at_safepoint(), "must be"); uint64_t epoch = ++_cleaning_epoch_monotonic; - Atomic::store(epoch, &_cleaning_epoch); + Atomic::store(&_cleaning_epoch, epoch); } // The epilogue marks the end of dependency context cleanup by the GC, @@ -323,7 +323,7 @@ // was called. That allows dependency contexts to be cleaned concurrently. void DependencyContext::cleaning_end() { uint64_t epoch = 0; - Atomic::store(epoch, &_cleaning_epoch); + Atomic::store(&_cleaning_epoch, epoch); } // This function skips over nmethodBuckets in the list corresponding to @@ -345,7 +345,7 @@ // Unstable load of next w.r.t. next->next continue; } - if (Atomic::cmpxchg(next_next, &_next, next) == next) { + if (Atomic::cmpxchg(&_next, next, next_next) == next) { // Release is_unloading entries if unlinking was claimed DependencyContext::release(next); } @@ -358,7 +358,7 @@ } void nmethodBucket::set_next(nmethodBucket* b) { - Atomic::store(b, &_next); + Atomic::store(&_next, b); } nmethodBucket* nmethodBucket::purge_list_next() { @@ -366,5 +366,5 @@ } void nmethodBucket::set_purge_list_next(nmethodBucket* b) { - Atomic::store(b, &_purge_list_next); + Atomic::store(&_purge_list_next, b); }