5032
5033 // Variables used to claim nmethods.
5034 nmethod* _first_nmethod;
5035 volatile nmethod* _claimed_nmethod;
5036
5037 // The list of nmethods that need to be processed by the second pass.
5038 volatile nmethod* _postponed_list;
5039 volatile uint _num_entered_barrier;
5040
5041 public:
5042 G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
5043 _is_alive(is_alive),
5044 _unloading_occurred(unloading_occurred),
5045 _num_workers(num_workers),
5046 _first_nmethod(NULL),
5047 _claimed_nmethod(NULL),
5048 _postponed_list(NULL),
5049 _num_entered_barrier(0)
5050 {
5051 nmethod::increase_unloading_clock();
5052 _first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
5053 _claimed_nmethod = (volatile nmethod*)_first_nmethod;
5054 }
5055
5056 ~G1CodeCacheUnloadingTask() {
5057 CodeCache::verify_clean_inline_caches();
5058
5059 CodeCache::set_needs_cache_clean(false);
5060 guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
5061
5062 CodeCache::verify_icholder_relocations();
5063 }
5064
5065 private:
5066 void add_to_postponed_list(nmethod* nm) {
5067 nmethod* old;
5068 do {
5069 old = (nmethod*)_postponed_list;
5070 nm->set_unloading_next(old);
5071 } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
5072 }
5075 bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
5076
5077 if (postponed) {
5078 // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
5079 add_to_postponed_list(nm);
5080 }
5081
5082 // Mark that this thread has been cleaned/unloaded.
5083 // After this call, it will be safe to ask if this nmethod was unloaded or not.
5084 nm->set_unloading_clock(nmethod::global_unloading_clock());
5085 }
5086
5087 void clean_nmethod_postponed(nmethod* nm) {
5088 nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
5089 }
5090
5091 static const int MaxClaimNmethods = 16;
5092
5093 void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
5094 nmethod* first;
5095 nmethod* last;
5096
5097 do {
5098 *num_claimed_nmethods = 0;
5099
5100 first = last = (nmethod*)_claimed_nmethod;
5101
5102 if (first != NULL) {
5103 for (int i = 0; i < MaxClaimNmethods; i++) {
5104 last = CodeCache::alive_nmethod(CodeCache::next(last));
5105
5106 if (last == NULL) {
5107 break;
5108 }
5109
5110 claimed_nmethods[i] = last;
5111 (*num_claimed_nmethods)++;
5112 }
5113 }
5114
5115 } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
5116 }
5117
5118 nmethod* claim_postponed_nmethod() {
5119 nmethod* claim;
5120 nmethod* next;
5121
5122 do {
5123 claim = (nmethod*)_postponed_list;
5124 if (claim == NULL) {
5125 return NULL;
5126 }
5127
5128 next = claim->unloading_next();
5129
5130 } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
5131
5132 return claim;
5133 }
5134
5135 public:
|
5032
5033 // Variables used to claim nmethods.
5034 nmethod* _first_nmethod;
5035 volatile nmethod* _claimed_nmethod;
5036
5037 // The list of nmethods that need to be processed by the second pass.
5038 volatile nmethod* _postponed_list;
5039 volatile uint _num_entered_barrier;
5040
5041 public:
5042 G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
5043 _is_alive(is_alive),
5044 _unloading_occurred(unloading_occurred),
5045 _num_workers(num_workers),
5046 _first_nmethod(NULL),
5047 _claimed_nmethod(NULL),
5048 _postponed_list(NULL),
5049 _num_entered_barrier(0)
5050 {
5051 nmethod::increase_unloading_clock();
5052 // Get first alive nmethod
5053 NMethodIterator iter = NMethodIterator();
5054 if(iter.next_alive()) {
5055 _first_nmethod = iter.method();
5056 }
5057 _claimed_nmethod = (volatile nmethod*)_first_nmethod;
5058 }
5059
5060 ~G1CodeCacheUnloadingTask() {
5061 CodeCache::verify_clean_inline_caches();
5062
5063 CodeCache::set_needs_cache_clean(false);
5064 guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
5065
5066 CodeCache::verify_icholder_relocations();
5067 }
5068
5069 private:
5070 void add_to_postponed_list(nmethod* nm) {
5071 nmethod* old;
5072 do {
5073 old = (nmethod*)_postponed_list;
5074 nm->set_unloading_next(old);
5075 } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
5076 }
5079 bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
5080
5081 if (postponed) {
5082 // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
5083 add_to_postponed_list(nm);
5084 }
5085
5086 // Mark that this thread has been cleaned/unloaded.
5087 // After this call, it will be safe to ask if this nmethod was unloaded or not.
5088 nm->set_unloading_clock(nmethod::global_unloading_clock());
5089 }
5090
5091 void clean_nmethod_postponed(nmethod* nm) {
5092 nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
5093 }
5094
5095 static const int MaxClaimNmethods = 16;
5096
5097 void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
5098 nmethod* first;
5099 NMethodIterator last;
5100
5101 do {
5102 *num_claimed_nmethods = 0;
5103
5104 first = (nmethod*)_claimed_nmethod;
5105 last = NMethodIterator(first);
5106
5107 if (first != NULL) {
5108
5109 for (int i = 0; i < MaxClaimNmethods; i++) {
5110 if (!last.next_alive()) {
5111 break;
5112 }
5113 claimed_nmethods[i] = last.method();
5114 (*num_claimed_nmethods)++;
5115 }
5116 }
5117
5118 } while ((nmethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
5119 }
5120
5121 nmethod* claim_postponed_nmethod() {
5122 nmethod* claim;
5123 nmethod* next;
5124
5125 do {
5126 claim = (nmethod*)_postponed_list;
5127 if (claim == NULL) {
5128 return NULL;
5129 }
5130
5131 next = claim->unloading_next();
5132
5133 } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
5134
5135 return claim;
5136 }
5137
5138 public:
|