< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 47400 : [mq]: cmpxchg_ptr


3481       _first_nmethod = iter.method();
3482     }
3483     _claimed_nmethod = (volatile CompiledMethod*)_first_nmethod;
3484   }
3485 
3486   ~G1CodeCacheUnloadingTask() {
3487     CodeCache::verify_clean_inline_caches();
3488 
3489     CodeCache::set_needs_cache_clean(false);
3490     guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
3491 
3492     CodeCache::verify_icholder_relocations();
3493   }
3494 
3495  private:
3496   void add_to_postponed_list(CompiledMethod* nm) {
3497       CompiledMethod* old;
3498       do {
3499         old = (CompiledMethod*)_postponed_list;
3500         nm->set_unloading_next(old);
3501       } while ((CompiledMethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
3502   }
3503 
3504   void clean_nmethod(CompiledMethod* nm) {
3505     bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
3506 
3507     if (postponed) {
3508       // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
3509       add_to_postponed_list(nm);
3510     }
3511 
3512     // Mark that this thread has been cleaned/unloaded.
3513     // After this call, it will be safe to ask if this nmethod was unloaded or not.
3514     nm->set_unloading_clock(CompiledMethod::global_unloading_clock());
3515   }
3516 
3517   void clean_nmethod_postponed(CompiledMethod* nm) {
3518     nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
3519   }
3520 
3521   static const int MaxClaimNmethods = 16;


3524     CompiledMethod* first;
3525     CompiledMethodIterator last;
3526 
3527     do {
3528       *num_claimed_nmethods = 0;
3529 
3530       first = (CompiledMethod*)_claimed_nmethod;
3531       last = CompiledMethodIterator(first);
3532 
3533       if (first != NULL) {
3534 
3535         for (int i = 0; i < MaxClaimNmethods; i++) {
3536           if (!last.next_alive()) {
3537             break;
3538           }
3539           claimed_nmethods[i] = last.method();
3540           (*num_claimed_nmethods)++;
3541         }
3542       }
3543 
3544     } while ((CompiledMethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
3545   }
3546 
3547   CompiledMethod* claim_postponed_nmethod() {
3548     CompiledMethod* claim;
3549     CompiledMethod* next;
3550 
3551     do {
3552       claim = (CompiledMethod*)_postponed_list;
3553       if (claim == NULL) {
3554         return NULL;
3555       }
3556 
3557       next = claim->unloading_next();
3558 
3559     } while ((CompiledMethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
3560 
3561     return claim;
3562   }
3563 
3564  public:
3565   // Mark that we're done with the first pass of nmethod cleaning.
3566   void barrier_mark(uint worker_id) {
3567     MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
3568     _num_entered_barrier++;
3569     if (_num_entered_barrier == _num_workers) {
3570       ml.notify_all();
3571     }
3572   }
3573 
3574   // See if we have to wait for the other workers to
3575   // finish their first-pass nmethod cleaning work.
3576   void barrier_wait(uint worker_id) {
3577     if (_num_entered_barrier < _num_workers) {
3578       MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
3579       while (_num_entered_barrier < _num_workers) {




3481       _first_nmethod = iter.method();
3482     }
3483     _claimed_nmethod = (volatile CompiledMethod*)_first_nmethod;
3484   }
3485 
3486   ~G1CodeCacheUnloadingTask() {
3487     CodeCache::verify_clean_inline_caches();
3488 
3489     CodeCache::set_needs_cache_clean(false);
3490     guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
3491 
3492     CodeCache::verify_icholder_relocations();
3493   }
3494 
3495  private:
3496   void add_to_postponed_list(CompiledMethod* nm) {
3497       CompiledMethod* old;
3498       do {
3499         old = (CompiledMethod*)_postponed_list;
3500         nm->set_unloading_next(old);
3501       } while (Atomic::cmpxchg(nm, &_postponed_list, old) != old);
3502   }
3503 
3504   void clean_nmethod(CompiledMethod* nm) {
3505     bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
3506 
3507     if (postponed) {
3508       // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
3509       add_to_postponed_list(nm);
3510     }
3511 
3512     // Mark that this thread has been cleaned/unloaded.
3513     // After this call, it will be safe to ask if this nmethod was unloaded or not.
3514     nm->set_unloading_clock(CompiledMethod::global_unloading_clock());
3515   }
3516 
3517   void clean_nmethod_postponed(CompiledMethod* nm) {
3518     nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
3519   }
3520 
3521   static const int MaxClaimNmethods = 16;


3524     CompiledMethod* first;
3525     CompiledMethodIterator last;
3526 
3527     do {
3528       *num_claimed_nmethods = 0;
3529 
3530       first = (CompiledMethod*)_claimed_nmethod;
3531       last = CompiledMethodIterator(first);
3532 
3533       if (first != NULL) {
3534 
3535         for (int i = 0; i < MaxClaimNmethods; i++) {
3536           if (!last.next_alive()) {
3537             break;
3538           }
3539           claimed_nmethods[i] = last.method();
3540           (*num_claimed_nmethods)++;
3541         }
3542       }
3543 
3544     } while (Atomic::cmpxchg(last.method(), &_claimed_nmethod, first) != first);
3545   }
3546 
3547   CompiledMethod* claim_postponed_nmethod() {
3548     CompiledMethod* claim;
3549     CompiledMethod* next;
3550 
3551     do {
3552       claim = (CompiledMethod*)_postponed_list;
3553       if (claim == NULL) {
3554         return NULL;
3555       }
3556 
3557       next = claim->unloading_next();
3558 
3559     } while (Atomic::cmpxchg(next, &_postponed_list, claim) != claim);
3560 
3561     return claim;
3562   }
3563 
3564  public:
3565   // Mark that we're done with the first pass of nmethod cleaning.
3566   void barrier_mark(uint worker_id) {
3567     MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
3568     _num_entered_barrier++;
3569     if (_num_entered_barrier == _num_workers) {
3570       ml.notify_all();
3571     }
3572   }
3573 
3574   // See if we have to wait for the other workers to
3575   // finish their first-pass nmethod cleaning work.
3576   void barrier_wait(uint worker_id) {
3577     if (_num_entered_barrier < _num_workers) {
3578       MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
3579       while (_num_entered_barrier < _num_workers) {


< prev index next >