94 b->increment();
95 return;
96 }
97 }
98 nmethodBucket* new_head = new nmethodBucket(nm, NULL);
99 for (;;) {
100 nmethodBucket* head = Atomic::load(_dependency_context_addr);
101 new_head->set_next(head);
102 if (Atomic::cmpxchg(new_head, _dependency_context_addr, head) == head) {
103 break;
104 }
105 }
106 if (UsePerfData) {
107 _perf_total_buckets_allocated_count->inc();
108 }
109 }
110
111 void DependencyContext::release(nmethodBucket* b) {
112 bool expunge = Atomic::load(&_cleaning_epoch) == 0;
113 if (expunge) {
114 delete b;
115 if (UsePerfData) {
116 _perf_total_buckets_deallocated_count->inc();
117 }
118 } else {
119 // Mark the context as having stale entries, since it is not safe to
120 // expunge the list right now.
121 for (;;) {
122 nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
123 b->set_purge_list_next(purge_list_head);
124 if (Atomic::cmpxchg(b, &_purge_list, purge_list_head) == purge_list_head) {
125 break;
126 }
127 }
128 if (UsePerfData) {
129 _perf_total_buckets_stale_count->inc();
130 _perf_total_buckets_stale_acc_count->inc();
131 }
132 }
|
94 b->increment();
95 return;
96 }
97 }
98 nmethodBucket* new_head = new nmethodBucket(nm, NULL);
99 for (;;) {
100 nmethodBucket* head = Atomic::load(_dependency_context_addr);
101 new_head->set_next(head);
102 if (Atomic::cmpxchg(new_head, _dependency_context_addr, head) == head) {
103 break;
104 }
105 }
106 if (UsePerfData) {
107 _perf_total_buckets_allocated_count->inc();
108 }
109 }
110
111 void DependencyContext::release(nmethodBucket* b) {
112 bool expunge = Atomic::load(&_cleaning_epoch) == 0;
113 if (expunge) {
114 assert_locked_or_safepoint(CodeCache_lock);
115 delete b;
116 if (UsePerfData) {
117 _perf_total_buckets_deallocated_count->inc();
118 }
119 } else {
120 // Mark the context as having stale entries, since it is not safe to
121 // expunge the list right now.
122 for (;;) {
123 nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
124 b->set_purge_list_next(purge_list_head);
125 if (Atomic::cmpxchg(b, &_purge_list, purge_list_head) == purge_list_head) {
126 break;
127 }
128 }
129 if (UsePerfData) {
130 _perf_total_buckets_stale_count->inc();
131 _perf_total_buckets_stale_acc_count->inc();
132 }
133 }
|
186 //
187 // Cleanup a dependency context by unlinking and placing all dependents corresponding
188 // to is_unloading nmethods on a purge list, which will be deleted later when it is safe.
189 void DependencyContext::clean_unloading_dependents() {
190 if (!claim_cleanup()) {
191 // Somebody else is cleaning up this dependency context.
192 return;
193 }
194 // Walk the nmethodBuckets and move dead entries on the purge list, which will
195 // be deleted during ClassLoaderDataGraph::purge().
196 nmethodBucket* b = dependencies_not_unloading();
197 while (b != NULL) {
198 nmethodBucket* next = b->next_not_unloading();
199 b = next;
200 }
201 }
202
203 //
204 // Invalidate all dependencies in the context
205 int DependencyContext::remove_all_dependents() {
206 assert_locked_or_safepoint(CodeCache_lock);
207 nmethodBucket* b = dependencies_not_unloading();
208 set_dependencies(NULL);
209 int marked = 0;
210 int removed = 0;
211 while (b != NULL) {
212 nmethod* nm = b->get_nmethod();
213 if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
214 nm->mark_for_deoptimization();
215 marked++;
216 }
217 nmethodBucket* next = b->next_not_unloading();
218 removed++;
219 release(b);
220 b = next;
221 }
222 if (UsePerfData && removed > 0) {
223 _perf_total_buckets_deallocated_count->inc(removed);
224 }
225 return marked;
|
187 //
188 // Cleanup a dependency context by unlinking and placing all dependents corresponding
189 // to is_unloading nmethods on a purge list, which will be deleted later when it is safe.
190 void DependencyContext::clean_unloading_dependents() {
191 if (!claim_cleanup()) {
192 // Somebody else is cleaning up this dependency context.
193 return;
194 }
195 // Walk the nmethodBuckets and move dead entries on the purge list, which will
196 // be deleted during ClassLoaderDataGraph::purge().
197 nmethodBucket* b = dependencies_not_unloading();
198 while (b != NULL) {
199 nmethodBucket* next = b->next_not_unloading();
200 b = next;
201 }
202 }
203
204 //
205 // Invalidate all dependencies in the context
206 int DependencyContext::remove_all_dependents() {
207 nmethodBucket* b = dependencies_not_unloading();
208 set_dependencies(NULL);
209 int marked = 0;
210 int removed = 0;
211 while (b != NULL) {
212 nmethod* nm = b->get_nmethod();
213 if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
214 nm->mark_for_deoptimization();
215 marked++;
216 }
217 nmethodBucket* next = b->next_not_unloading();
218 removed++;
219 release(b);
220 b = next;
221 }
222 if (UsePerfData && removed > 0) {
223 _perf_total_buckets_deallocated_count->inc(removed);
224 }
225 return marked;
|
291 }
292 if (Atomic::cmpxchg(head_next, _dependency_context_addr, head) == head) {
293 // Release is_unloading entries if unlinking was claimed
294 DependencyContext::release(head);
295 }
296 }
297 }
298
299 // Relaxed accessors
300 void DependencyContext::set_dependencies(nmethodBucket* b) {
301 Atomic::store(b, _dependency_context_addr);
302 }
303
304 nmethodBucket* DependencyContext::dependencies() {
305 return Atomic::load(_dependency_context_addr);
306 }
307
308 // After the gc_prologue, the dependency contexts may be claimed by the GC
309 // and releasing of nmethodBucket entries will be deferred and placed on
310 // a purge list to be deleted later.
311 void DependencyContext::gc_prologue() {
312 assert(SafepointSynchronize::is_at_safepoint(), "must be");
313 uint64_t epoch = SafepointSynchronize::_safepoint_counter;
314 Atomic::store(epoch, &_cleaning_epoch);
315 }
316
317 // The epilogue marks the end of dependency context cleanup by the GC,
318 // and also makes subsequent releases of nmethodBuckets case immediate
319 // deletion. It is admitted to end the cleanup in a concurrent phase.
320 void DependencyContext::gc_epilogue() {
321 uint64_t epoch = 0;
322 Atomic::store(epoch, &_cleaning_epoch);
323 }
324
325 // This function skips over nmethodBuckets in the list corresponding to
326 // nmethods that are is_unloading. This allows exposing a view of the
327 // dependents as-if they were already cleaned, despite being cleaned
328 // concurrently. Any entry observed that is_unloading() will be unlinked
329 // and placed on the purge list.
330 nmethodBucket* nmethodBucket::next_not_unloading() {
331 for (;;) {
332 // Do not need acquire because the loaded entry can never be
333 // concurrently inserted.
334 nmethodBucket* next = Atomic::load(&_next);
335 if (next == NULL || !next->get_nmethod()->is_unloading()) {
336 return next;
337 }
338 nmethodBucket* next_next = Atomic::load(&next->_next);
339 OrderAccess::loadload();
|
291 }
292 if (Atomic::cmpxchg(head_next, _dependency_context_addr, head) == head) {
293 // Release is_unloading entries if unlinking was claimed
294 DependencyContext::release(head);
295 }
296 }
297 }
298
299 // Relaxed accessors
300 void DependencyContext::set_dependencies(nmethodBucket* b) {
301 Atomic::store(b, _dependency_context_addr);
302 }
303
304 nmethodBucket* DependencyContext::dependencies() {
305 return Atomic::load(_dependency_context_addr);
306 }
307
308 // After the gc_prologue, the dependency contexts may be claimed by the GC
309 // and releasing of nmethodBucket entries will be deferred and placed on
310 // a purge list to be deleted later.
311 void DependencyContext::cleaning_start() {
312 assert(SafepointSynchronize::is_at_safepoint(), "must be");
313 uint64_t epoch = SafepointSynchronize::safepoint_counter();
314 Atomic::store(epoch, &_cleaning_epoch);
315 }
316
317 // The epilogue marks the end of dependency context cleanup by the GC,
318 // and also makes subsequent releases of nmethodBuckets case immediate
319 // deletion. It is admitted to end the cleanup in a concurrent phase.
320 void DependencyContext::cleaning_end() {
321 uint64_t epoch = 0;
322 Atomic::store(epoch, &_cleaning_epoch);
323 }
324
325 // This function skips over nmethodBuckets in the list corresponding to
326 // nmethods that are is_unloading. This allows exposing a view of the
327 // dependents as-if they were already cleaned, despite being cleaned
328 // concurrently. Any entry observed that is_unloading() will be unlinked
329 // and placed on the purge list.
330 nmethodBucket* nmethodBucket::next_not_unloading() {
331 for (;;) {
332 // Do not need acquire because the loaded entry can never be
333 // concurrently inserted.
334 nmethodBucket* next = Atomic::load(&_next);
335 if (next == NULL || !next->get_nmethod()->is_unloading()) {
336 return next;
337 }
338 nmethodBucket* next_next = Atomic::load(&next->_next);
339 OrderAccess::loadload();
|