< prev index next >

src/hotspot/share/code/dependencyContext.cpp

Print this page

        

*** 99,109 **** } nmethodBucket* new_head = new nmethodBucket(nm, NULL); for (;;) { nmethodBucket* head = Atomic::load(_dependency_context_addr); new_head->set_next(head); ! if (Atomic::cmpxchg(new_head, _dependency_context_addr, head) == head) { break; } } if (UsePerfData) { _perf_total_buckets_allocated_count->inc(); --- 99,109 ---- } nmethodBucket* new_head = new nmethodBucket(nm, NULL); for (;;) { nmethodBucket* head = Atomic::load(_dependency_context_addr); new_head->set_next(head); ! if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) { break; } } if (UsePerfData) { _perf_total_buckets_allocated_count->inc();
*** 122,132 **** // Mark the context as having stale entries, since it is not safe to // expunge the list right now. for (;;) { nmethodBucket* purge_list_head = Atomic::load(&_purge_list); b->set_purge_list_next(purge_list_head); ! if (Atomic::cmpxchg(b, &_purge_list, purge_list_head) == purge_list_head) { break; } } if (UsePerfData) { _perf_total_buckets_stale_count->inc(); --- 122,132 ---- // Mark the context as having stale entries, since it is not safe to // expunge the list right now. for (;;) { nmethodBucket* purge_list_head = Atomic::load(&_purge_list); b->set_purge_list_next(purge_list_head); ! if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) { break; } } if (UsePerfData) { _perf_total_buckets_stale_count->inc();
*** 258,268 **** } #endif //PRODUCT int nmethodBucket::decrement() { ! return Atomic::sub(1, &_count); } // We use a monotonically increasing epoch counter to track the last epoch a given // dependency context was cleaned. GC threads claim cleanup tasks by performing // a CAS on this value. --- 258,268 ---- } #endif //PRODUCT int nmethodBucket::decrement() { ! return Atomic::sub(&_count, 1); } // We use a monotonically increasing epoch counter to track the last epoch a given // dependency context was cleaned. GC threads claim cleanup tasks by performing // a CAS on this value.
*** 270,280 **** uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch); uint64_t last_cleanup = Atomic::load(_last_cleanup_addr); if (last_cleanup >= cleaning_epoch) { return false; } ! return Atomic::cmpxchg(cleaning_epoch, _last_cleanup_addr, last_cleanup) == last_cleanup; } // Retrieve the first nmethodBucket that has a dependent that does not correspond to // an is_unloading nmethod. Any nmethodBucket entries observed from the original head // that is_unloading() will be unlinked and placed on the purge list. --- 270,280 ---- uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch); uint64_t last_cleanup = Atomic::load(_last_cleanup_addr); if (last_cleanup >= cleaning_epoch) { return false; } ! return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup; } // Retrieve the first nmethodBucket that has a dependent that does not correspond to // an is_unloading nmethod. Any nmethodBucket entries observed from the original head // that is_unloading() will be unlinked and placed on the purge list.
*** 289,308 **** OrderAccess::loadload(); if (Atomic::load(_dependency_context_addr) != head) { // Unstable load of head w.r.t. head->next continue; } ! if (Atomic::cmpxchg(head_next, _dependency_context_addr, head) == head) { // Release is_unloading entries if unlinking was claimed DependencyContext::release(head); } } } // Relaxed accessors void DependencyContext::set_dependencies(nmethodBucket* b) { ! Atomic::store(b, _dependency_context_addr); } nmethodBucket* DependencyContext::dependencies() { return Atomic::load(_dependency_context_addr); } --- 289,308 ---- OrderAccess::loadload(); if (Atomic::load(_dependency_context_addr) != head) { // Unstable load of head w.r.t. head->next continue; } ! if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) { // Release is_unloading entries if unlinking was claimed DependencyContext::release(head); } } } // Relaxed accessors void DependencyContext::set_dependencies(nmethodBucket* b) { ! Atomic::store(_dependency_context_addr, b); } nmethodBucket* DependencyContext::dependencies() { return Atomic::load(_dependency_context_addr); }
*** 311,331 **** // and releasing of nmethodBucket entries will be deferred and placed on // a purge list to be deleted later. void DependencyContext::cleaning_start() { assert(SafepointSynchronize::is_at_safepoint(), "must be"); uint64_t epoch = ++_cleaning_epoch_monotonic; ! Atomic::store(epoch, &_cleaning_epoch); } // The epilogue marks the end of dependency context cleanup by the GC, // and also makes subsequent releases of nmethodBuckets cause immediate // deletion. It is okay to delay calling of cleaning_end() to a concurrent // phase, subsequent to the safepoint operation in which cleaning_start() // was called. That allows dependency contexts to be cleaned concurrently. void DependencyContext::cleaning_end() { uint64_t epoch = 0; ! Atomic::store(epoch, &_cleaning_epoch); } // This function skips over nmethodBuckets in the list corresponding to // nmethods that are is_unloading. This allows exposing a view of the // dependents as-if they were already cleaned, despite being cleaned --- 311,331 ---- // and releasing of nmethodBucket entries will be deferred and placed on // a purge list to be deleted later. void DependencyContext::cleaning_start() { assert(SafepointSynchronize::is_at_safepoint(), "must be"); uint64_t epoch = ++_cleaning_epoch_monotonic; ! Atomic::store(&_cleaning_epoch, epoch); } // The epilogue marks the end of dependency context cleanup by the GC, // and also makes subsequent releases of nmethodBuckets cause immediate // deletion. It is okay to delay calling of cleaning_end() to a concurrent // phase, subsequent to the safepoint operation in which cleaning_start() // was called. That allows dependency contexts to be cleaned concurrently. void DependencyContext::cleaning_end() { uint64_t epoch = 0; ! Atomic::store(&_cleaning_epoch, epoch); } // This function skips over nmethodBuckets in the list corresponding to // nmethods that are is_unloading. This allows exposing a view of the // dependents as-if they were already cleaned, despite being cleaned
*** 343,353 **** OrderAccess::loadload(); if (Atomic::load(&_next) != next) { // Unstable load of next w.r.t. next->next continue; } ! if (Atomic::cmpxchg(next_next, &_next, next) == next) { // Release is_unloading entries if unlinking was claimed DependencyContext::release(next); } } } --- 343,353 ---- OrderAccess::loadload(); if (Atomic::load(&_next) != next) { // Unstable load of next w.r.t. next->next continue; } ! if (Atomic::cmpxchg(&_next, next, next_next) == next) { // Release is_unloading entries if unlinking was claimed DependencyContext::release(next); } } }
*** 356,370 **** nmethodBucket* nmethodBucket::next() { return Atomic::load(&_next); } void nmethodBucket::set_next(nmethodBucket* b) { ! Atomic::store(b, &_next); } nmethodBucket* nmethodBucket::purge_list_next() { return Atomic::load(&_purge_list_next); } void nmethodBucket::set_purge_list_next(nmethodBucket* b) { ! Atomic::store(b, &_purge_list_next); } --- 356,370 ---- nmethodBucket* nmethodBucket::next() { return Atomic::load(&_next); } void nmethodBucket::set_next(nmethodBucket* b) { ! Atomic::store(&_next, b); } nmethodBucket* nmethodBucket::purge_list_next() { return Atomic::load(&_purge_list_next); } void nmethodBucket::set_purge_list_next(nmethodBucket* b) { ! Atomic::store(&_purge_list_next, b); }
< prev index next >