< prev index next >

src/hotspot/share/code/dependencyContext.cpp

Print this page




 283     // Need acquire becase the read value could come from a concurrent insert.
 284     nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr);
 285     if (head == NULL || !head->get_nmethod()->is_unloading()) {
 286       return head;
 287     }
 288     nmethodBucket* head_next = head->next();
 289     OrderAccess::loadload();
 290     if (Atomic::load(_dependency_context_addr) != head) {
 291       // Unstable load of head w.r.t. head->next
 292       continue;
 293     }
 294     if (Atomic::cmpxchg(head_next, _dependency_context_addr, head) == head) {
 295       // Release is_unloading entries if unlinking was claimed
 296       DependencyContext::release(head);
 297     }
 298   }
 299 }
 300 
 301 // Relaxed accessors
 302 void DependencyContext::set_dependencies(nmethodBucket* b) {
 303   Atomic::store(b, _dependency_context_addr);
 304 }
 305 
 306 nmethodBucket* DependencyContext::dependencies() {
 307   return Atomic::load(_dependency_context_addr);
 308 }
 309 
 310 // After the gc_prologue, the dependency contexts may be claimed by the GC
 311 // and releasing of nmethodBucket entries will be deferred and placed on
 312 // a purge list to be deleted later.
 313 void DependencyContext::cleaning_start() {
 314   assert(SafepointSynchronize::is_at_safepoint(), "must be");
 315   uint64_t epoch = ++_cleaning_epoch_monotonic;
 316   Atomic::store(epoch, &_cleaning_epoch);
 317 }
 318 
 319 // The epilogue marks the end of dependency context cleanup by the GC,
 320 // and also makes subsequent releases of nmethodBuckets cause immediate
 321 // deletion. It is okay to delay calling of cleaning_end() to a concurrent
 322 // phase, subsequent to the safepoint operation in which cleaning_start()
 323 // was called. That allows dependency contexts to be cleaned concurrently.
 324 void DependencyContext::cleaning_end() {
 325   uint64_t epoch = 0;
 326   Atomic::store(epoch, &_cleaning_epoch);
 327 }
 328 
 329 // This function skips over nmethodBuckets in the list corresponding to
 330 // nmethods that are is_unloading. This allows exposing a view of the
 331 // dependents as-if they were already cleaned, despite being cleaned
 332 // concurrently. Any entry observed that is_unloading() will be unlinked
 333 // and placed on the purge list.
 334 nmethodBucket* nmethodBucket::next_not_unloading() {
 335   for (;;) {
 336     // Do not need acquire because the loaded entry can never be
 337     // concurrently inserted.
 338     nmethodBucket* next = Atomic::load(&_next);
 339     if (next == NULL || !next->get_nmethod()->is_unloading()) {
 340       return next;
 341     }
 342     nmethodBucket* next_next = Atomic::load(&next->_next);
 343     OrderAccess::loadload();
 344     if (Atomic::load(&_next) != next) {
 345       // Unstable load of next w.r.t. next->next
 346       continue;
 347     }
 348     if (Atomic::cmpxchg(next_next, &_next, next) == next) {
 349       // Release is_unloading entries if unlinking was claimed
 350       DependencyContext::release(next);
 351     }
 352   }
 353 }
 354 
 355 // Relaxed accessors
 356 nmethodBucket* nmethodBucket::next() {
 357   return Atomic::load(&_next);
 358 }
 359 
 360 void nmethodBucket::set_next(nmethodBucket* b) {
 361   Atomic::store(b, &_next);
 362 }
 363 
 364 nmethodBucket* nmethodBucket::purge_list_next() {
 365   return Atomic::load(&_purge_list_next);
 366 }
 367 
 368 void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
 369   Atomic::store(b, &_purge_list_next);
 370 }


 283     // Need acquire becase the read value could come from a concurrent insert.
 284     nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr);
 285     if (head == NULL || !head->get_nmethod()->is_unloading()) {
 286       return head;
 287     }
 288     nmethodBucket* head_next = head->next();
 289     OrderAccess::loadload();
 290     if (Atomic::load(_dependency_context_addr) != head) {
 291       // Unstable load of head w.r.t. head->next
 292       continue;
 293     }
 294     if (Atomic::cmpxchg(head_next, _dependency_context_addr, head) == head) {
 295       // Release is_unloading entries if unlinking was claimed
 296       DependencyContext::release(head);
 297     }
 298   }
 299 }
 300 
 301 // Relaxed accessors
 302 void DependencyContext::set_dependencies(nmethodBucket* b) {
 303   Atomic::store(_dependency_context_addr, b);
 304 }
 305 
 306 nmethodBucket* DependencyContext::dependencies() {
 307   return Atomic::load(_dependency_context_addr);
 308 }
 309 
 310 // After the gc_prologue, the dependency contexts may be claimed by the GC
 311 // and releasing of nmethodBucket entries will be deferred and placed on
 312 // a purge list to be deleted later.
 313 void DependencyContext::cleaning_start() {
 314   assert(SafepointSynchronize::is_at_safepoint(), "must be");
 315   uint64_t epoch = ++_cleaning_epoch_monotonic;
 316   Atomic::store(&_cleaning_epoch, epoch);
 317 }
 318 
 319 // The epilogue marks the end of dependency context cleanup by the GC,
 320 // and also makes subsequent releases of nmethodBuckets cause immediate
 321 // deletion. It is okay to delay calling of cleaning_end() to a concurrent
 322 // phase, subsequent to the safepoint operation in which cleaning_start()
 323 // was called. That allows dependency contexts to be cleaned concurrently.
 324 void DependencyContext::cleaning_end() {
 325   uint64_t epoch = 0;
 326   Atomic::store(&_cleaning_epoch, epoch);
 327 }
 328 
 329 // This function skips over nmethodBuckets in the list corresponding to
 330 // nmethods that are is_unloading. This allows exposing a view of the
 331 // dependents as-if they were already cleaned, despite being cleaned
 332 // concurrently. Any entry observed that is_unloading() will be unlinked
 333 // and placed on the purge list.
 334 nmethodBucket* nmethodBucket::next_not_unloading() {
 335   for (;;) {
 336     // Do not need acquire because the loaded entry can never be
 337     // concurrently inserted.
 338     nmethodBucket* next = Atomic::load(&_next);
 339     if (next == NULL || !next->get_nmethod()->is_unloading()) {
 340       return next;
 341     }
 342     nmethodBucket* next_next = Atomic::load(&next->_next);
 343     OrderAccess::loadload();
 344     if (Atomic::load(&_next) != next) {
 345       // Unstable load of next w.r.t. next->next
 346       continue;
 347     }
 348     if (Atomic::cmpxchg(next_next, &_next, next) == next) {
 349       // Release is_unloading entries if unlinking was claimed
 350       DependencyContext::release(next);
 351     }
 352   }
 353 }
 354 
 355 // Relaxed accessors
 356 nmethodBucket* nmethodBucket::next() {
 357   return Atomic::load(&_next);
 358 }
 359 
 360 void nmethodBucket::set_next(nmethodBucket* b) {
 361   Atomic::store(&_next, b);
 362 }
 363 
 364 nmethodBucket* nmethodBucket::purge_list_next() {
 365   return Atomic::load(&_purge_list_next);
 366 }
 367 
 368 void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
 369   Atomic::store(&_purge_list_next, b);
 370 }
< prev index next >