264 }
265
266 // We use a monotonically increasing epoch counter to track the last epoch a given
267 // dependency context was cleaned. GC threads claim cleanup tasks by performing
268 // a CAS on this value.
269 bool DependencyContext::claim_cleanup() {
270 uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
271 uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);
272 if (last_cleanup >= cleaning_epoch) {
273 return false;
274 }
275 return Atomic::cmpxchg(cleaning_epoch, _last_cleanup_addr, last_cleanup) == last_cleanup;
276 }
277
278 // Retrieve the first nmethodBucket that has a dependent that does not correspond to
279 // an is_unloading nmethod. Any nmethodBucket entries observed from the original head
280 // that is_unloading() will be unlinked and placed on the purge list.
281 nmethodBucket* DependencyContext::dependencies_not_unloading() {
282 for (;;) {
283 // Need acquire becase the read value could come from a concurrent insert.
284 nmethodBucket* head = OrderAccess::load_acquire(_dependency_context_addr);
285 if (head == NULL || !head->get_nmethod()->is_unloading()) {
286 return head;
287 }
288 nmethodBucket* head_next = head->next();
289 OrderAccess::loadload();
290 if (Atomic::load(_dependency_context_addr) != head) {
291 // Unstable load of head w.r.t. head->next
292 continue;
293 }
294 if (Atomic::cmpxchg(head_next, _dependency_context_addr, head) == head) {
295 // Release is_unloading entries if unlinking was claimed
296 DependencyContext::release(head);
297 }
298 }
299 }
300
301 // Relaxed accessors
302 void DependencyContext::set_dependencies(nmethodBucket* b) {
303 Atomic::store(b, _dependency_context_addr);
304 }
|
264 }
265
266 // We use a monotonically increasing epoch counter to track the last epoch a given
267 // dependency context was cleaned. GC threads claim cleanup tasks by performing
268 // a CAS on this value.
269 bool DependencyContext::claim_cleanup() {
270 uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
271 uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);
272 if (last_cleanup >= cleaning_epoch) {
273 return false;
274 }
275 return Atomic::cmpxchg(cleaning_epoch, _last_cleanup_addr, last_cleanup) == last_cleanup;
276 }
277
278 // Retrieve the first nmethodBucket that has a dependent that does not correspond to
279 // an is_unloading nmethod. Any nmethodBucket entries observed from the original head
280 // that is_unloading() will be unlinked and placed on the purge list.
281 nmethodBucket* DependencyContext::dependencies_not_unloading() {
282 for (;;) {
283 // Need acquire becase the read value could come from a concurrent insert.
284 nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr);
285 if (head == NULL || !head->get_nmethod()->is_unloading()) {
286 return head;
287 }
288 nmethodBucket* head_next = head->next();
289 OrderAccess::loadload();
290 if (Atomic::load(_dependency_context_addr) != head) {
291 // Unstable load of head w.r.t. head->next
292 continue;
293 }
294 if (Atomic::cmpxchg(head_next, _dependency_context_addr, head) == head) {
295 // Release is_unloading entries if unlinking was claimed
296 DependencyContext::release(head);
297 }
298 }
299 }
300
301 // Relaxed accessors
302 void DependencyContext::set_dependencies(nmethodBucket* b) {
303 Atomic::store(b, _dependency_context_addr);
304 }
|