< prev index next >

src/hotspot/share/code/dependencyContext.cpp

Print this page

        

*** 298,308 **** } } // Relaxed accessors void DependencyContext::set_dependencies(nmethodBucket* b) { ! Atomic::store(b, _dependency_context_addr); } nmethodBucket* DependencyContext::dependencies() { return Atomic::load(_dependency_context_addr); } --- 298,308 ---- } } // Relaxed accessors void DependencyContext::set_dependencies(nmethodBucket* b) { ! Atomic::store(_dependency_context_addr, b); } nmethodBucket* DependencyContext::dependencies() { return Atomic::load(_dependency_context_addr); }
*** 311,331 **** // and releasing of nmethodBucket entries will be deferred and placed on // a purge list to be deleted later. void DependencyContext::cleaning_start() { assert(SafepointSynchronize::is_at_safepoint(), "must be"); uint64_t epoch = ++_cleaning_epoch_monotonic; ! Atomic::store(epoch, &_cleaning_epoch); } // The epilogue marks the end of dependency context cleanup by the GC, // and also makes subsequent releases of nmethodBuckets cause immediate // deletion. It is okay to delay calling of cleaning_end() to a concurrent // phase, subsequent to the safepoint operation in which cleaning_start() // was called. That allows dependency contexts to be cleaned concurrently. void DependencyContext::cleaning_end() { uint64_t epoch = 0; ! Atomic::store(epoch, &_cleaning_epoch); } // This function skips over nmethodBuckets in the list corresponding to // nmethods that are is_unloading. This allows exposing a view of the // dependents as-if they were already cleaned, despite being cleaned --- 311,331 ---- // and releasing of nmethodBucket entries will be deferred and placed on // a purge list to be deleted later. void DependencyContext::cleaning_start() { assert(SafepointSynchronize::is_at_safepoint(), "must be"); uint64_t epoch = ++_cleaning_epoch_monotonic; ! Atomic::store(&_cleaning_epoch, epoch); } // The epilogue marks the end of dependency context cleanup by the GC, // and also makes subsequent releases of nmethodBuckets cause immediate // deletion. It is okay to delay calling of cleaning_end() to a concurrent // phase, subsequent to the safepoint operation in which cleaning_start() // was called. That allows dependency contexts to be cleaned concurrently. void DependencyContext::cleaning_end() { uint64_t epoch = 0; ! Atomic::store(&_cleaning_epoch, epoch); } // This function skips over nmethodBuckets in the list corresponding to // nmethods that are is_unloading. This allows exposing a view of the // dependents as-if they were already cleaned, despite being cleaned
*** 356,370 **** nmethodBucket* nmethodBucket::next() { return Atomic::load(&_next); } void nmethodBucket::set_next(nmethodBucket* b) { ! Atomic::store(b, &_next); } nmethodBucket* nmethodBucket::purge_list_next() { return Atomic::load(&_purge_list_next); } void nmethodBucket::set_purge_list_next(nmethodBucket* b) { ! Atomic::store(b, &_purge_list_next); } --- 356,370 ---- nmethodBucket* nmethodBucket::next() { return Atomic::load(&_next); } void nmethodBucket::set_next(nmethodBucket* b) { ! Atomic::store(&_next, b); } nmethodBucket* nmethodBucket::purge_list_next() { return Atomic::load(&_purge_list_next); } void nmethodBucket::set_purge_list_next(nmethodBucket* b) { ! Atomic::store(&_purge_list_next, b); }
< prev index next >