< prev index next >

src/hotspot/share/code/dependencyContext.cpp

Print this page




 243     }
 244   }
 245 }
 246 
 247 bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
 248   for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
 249     if (nm == b->get_nmethod()) {
 250 #ifdef ASSERT
 251       int count = b->count();
 252       assert(count >= 0, "count shouldn't be negative: %d", count);
 253 #endif
 254       return true;
 255     }
 256   }
 257   return false;
 258 }
 259 
 260 #endif //PRODUCT
 261 
 262 int nmethodBucket::decrement() {
 263   return Atomic::sub(1, &_count);
 264 }
 265 
 266 // We use a monotonically increasing epoch counter to track the last epoch a given
 267 // dependency context was cleaned. GC threads claim cleanup tasks by performing
 268 // a CAS on this value.
 269 bool DependencyContext::claim_cleanup() {
 270   uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
 271   uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);
 272   if (last_cleanup >= cleaning_epoch) {
 273     return false;
 274   }
 275   return Atomic::cmpxchg(cleaning_epoch, _last_cleanup_addr, last_cleanup) == last_cleanup;
 276 }
 277 
 278 // Retrieve the first nmethodBucket that has a dependent that does not correspond to
 279 // an is_unloading nmethod. Any nmethodBucket entries observed from the original head
 280 // that is_unloading() will be unlinked and placed on the purge list.
 281 nmethodBucket* DependencyContext::dependencies_not_unloading() {
 282   for (;;) {
 283     // Need acquire becase the read value could come from a concurrent insert.




 243     }
 244   }
 245 }
 246 
 247 bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
 248   for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
 249     if (nm == b->get_nmethod()) {
 250 #ifdef ASSERT
 251       int count = b->count();
 252       assert(count >= 0, "count shouldn't be negative: %d", count);
 253 #endif
 254       return true;
 255     }
 256   }
 257   return false;
 258 }
 259 
 260 #endif //PRODUCT
 261 
 262 int nmethodBucket::decrement() {
 263   return Atomic::sub(&_count, 1);
 264 }
 265 
 266 // We use a monotonically increasing epoch counter to track the last epoch a given
 267 // dependency context was cleaned. GC threads claim cleanup tasks by performing
 268 // a CAS on this value.
 269 bool DependencyContext::claim_cleanup() {
 270   uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
 271   uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);
 272   if (last_cleanup >= cleaning_epoch) {
 273     return false;
 274   }
 275   return Atomic::cmpxchg(cleaning_epoch, _last_cleanup_addr, last_cleanup) == last_cleanup;
 276 }
 277 
 278 // Retrieve the first nmethodBucket that has a dependent that does not correspond to
 279 // an is_unloading nmethod. Any nmethodBucket entries observed from the original head
 280 // that is_unloading() will be unlinked and placed on the purge list.
 281 nmethodBucket* DependencyContext::dependencies_not_unloading() {
 282   for (;;) {
 283     // Need acquire becase the read value could come from a concurrent insert.


< prev index next >