84 }
85
86 //
87 // Add an nmethod to the dependency context.
88 // It's possible that an nmethod has multiple dependencies on a klass
89 // so a count is kept for each bucket to guarantee that creation and
90 // deletion of dependencies is consistent.
91 //
92 void DependencyContext::add_dependent_nmethod(nmethod* nm) {
93 assert_lock_strong(CodeCache_lock);
94 for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
95 if (nm == b->get_nmethod()) {
96 b->increment();
97 return;
98 }
99 }
100 nmethodBucket* new_head = new nmethodBucket(nm, NULL);
101 for (;;) {
102 nmethodBucket* head = Atomic::load(_dependency_context_addr);
103 new_head->set_next(head);
104 if (Atomic::cmpxchg(new_head, _dependency_context_addr, head) == head) {
105 break;
106 }
107 }
108 if (UsePerfData) {
109 _perf_total_buckets_allocated_count->inc();
110 }
111 }
112
113 void DependencyContext::release(nmethodBucket* b) {
114 bool expunge = Atomic::load(&_cleaning_epoch) == 0;
115 if (expunge) {
116 assert_locked_or_safepoint(CodeCache_lock);
117 delete b;
118 if (UsePerfData) {
119 _perf_total_buckets_deallocated_count->inc();
120 }
121 } else {
122 // Mark the context as having stale entries, since it is not safe to
123 // expunge the list right now.
124 for (;;) {
125 nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
126 b->set_purge_list_next(purge_list_head);
127 if (Atomic::cmpxchg(b, &_purge_list, purge_list_head) == purge_list_head) {
128 break;
129 }
130 }
131 if (UsePerfData) {
132 _perf_total_buckets_stale_count->inc();
133 _perf_total_buckets_stale_acc_count->inc();
134 }
135 }
136 }
137
138 //
139 // Remove an nmethod dependency from the context.
140 // Decrement count of the nmethod in the dependency list and, optionally, remove
141 // the bucket completely when the count goes to 0. This method must find
142 // a corresponding bucket otherwise there's a bug in the recording of dependencies.
143 // Can be called concurrently by parallel GC threads.
144 //
145 void DependencyContext::remove_dependent_nmethod(nmethod* nm) {
146 assert_locked_or_safepoint(CodeCache_lock);
147 nmethodBucket* first = dependencies_not_unloading();
255 }
256 }
257 return false;
258 }
259
260 #endif //PRODUCT
261
262 int nmethodBucket::decrement() {
263 return Atomic::sub(&_count, 1);
264 }
265
266 // We use a monotonically increasing epoch counter to track the last epoch a given
267 // dependency context was cleaned. GC threads claim cleanup tasks by performing
268 // a CAS on this value.
269 bool DependencyContext::claim_cleanup() {
270 uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
271 uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);
272 if (last_cleanup >= cleaning_epoch) {
273 return false;
274 }
275 return Atomic::cmpxchg(cleaning_epoch, _last_cleanup_addr, last_cleanup) == last_cleanup;
276 }
277
278 // Retrieve the first nmethodBucket that has a dependent that does not correspond to
279 // an is_unloading nmethod. Any nmethodBucket entries observed from the original head
280 // that is_unloading() will be unlinked and placed on the purge list.
281 nmethodBucket* DependencyContext::dependencies_not_unloading() {
282 for (;;) {
283 // Need acquire becase the read value could come from a concurrent insert.
284 nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr);
285 if (head == NULL || !head->get_nmethod()->is_unloading()) {
286 return head;
287 }
288 nmethodBucket* head_next = head->next();
289 OrderAccess::loadload();
290 if (Atomic::load(_dependency_context_addr) != head) {
291 // Unstable load of head w.r.t. head->next
292 continue;
293 }
294 if (Atomic::cmpxchg(head_next, _dependency_context_addr, head) == head) {
295 // Release is_unloading entries if unlinking was claimed
296 DependencyContext::release(head);
297 }
298 }
299 }
300
301 // Relaxed accessors
302 void DependencyContext::set_dependencies(nmethodBucket* b) {
303 Atomic::store(_dependency_context_addr, b);
304 }
305
306 nmethodBucket* DependencyContext::dependencies() {
307 return Atomic::load(_dependency_context_addr);
308 }
309
310 // After the gc_prologue, the dependency contexts may be claimed by the GC
311 // and releasing of nmethodBucket entries will be deferred and placed on
312 // a purge list to be deleted later.
313 void DependencyContext::cleaning_start() {
314 assert(SafepointSynchronize::is_at_safepoint(), "must be");
328
329 // This function skips over nmethodBuckets in the list corresponding to
330 // nmethods that are is_unloading. This allows exposing a view of the
331 // dependents as-if they were already cleaned, despite being cleaned
332 // concurrently. Any entry observed that is_unloading() will be unlinked
333 // and placed on the purge list.
334 nmethodBucket* nmethodBucket::next_not_unloading() {
335 for (;;) {
336 // Do not need acquire because the loaded entry can never be
337 // concurrently inserted.
338 nmethodBucket* next = Atomic::load(&_next);
339 if (next == NULL || !next->get_nmethod()->is_unloading()) {
340 return next;
341 }
342 nmethodBucket* next_next = Atomic::load(&next->_next);
343 OrderAccess::loadload();
344 if (Atomic::load(&_next) != next) {
345 // Unstable load of next w.r.t. next->next
346 continue;
347 }
348 if (Atomic::cmpxchg(next_next, &_next, next) == next) {
349 // Release is_unloading entries if unlinking was claimed
350 DependencyContext::release(next);
351 }
352 }
353 }
354
355 // Relaxed accessors
356 nmethodBucket* nmethodBucket::next() {
357 return Atomic::load(&_next);
358 }
359
360 void nmethodBucket::set_next(nmethodBucket* b) {
361 Atomic::store(&_next, b);
362 }
363
364 nmethodBucket* nmethodBucket::purge_list_next() {
365 return Atomic::load(&_purge_list_next);
366 }
367
368 void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
|
84 }
85
86 //
87 // Add an nmethod to the dependency context.
88 // It's possible that an nmethod has multiple dependencies on a klass
89 // so a count is kept for each bucket to guarantee that creation and
90 // deletion of dependencies is consistent.
91 //
92 void DependencyContext::add_dependent_nmethod(nmethod* nm) {
93 assert_lock_strong(CodeCache_lock);
94 for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
95 if (nm == b->get_nmethod()) {
96 b->increment();
97 return;
98 }
99 }
100 nmethodBucket* new_head = new nmethodBucket(nm, NULL);
101 for (;;) {
102 nmethodBucket* head = Atomic::load(_dependency_context_addr);
103 new_head->set_next(head);
104 if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) {
105 break;
106 }
107 }
108 if (UsePerfData) {
109 _perf_total_buckets_allocated_count->inc();
110 }
111 }
112
113 void DependencyContext::release(nmethodBucket* b) {
114 bool expunge = Atomic::load(&_cleaning_epoch) == 0;
115 if (expunge) {
116 assert_locked_or_safepoint(CodeCache_lock);
117 delete b;
118 if (UsePerfData) {
119 _perf_total_buckets_deallocated_count->inc();
120 }
121 } else {
122 // Mark the context as having stale entries, since it is not safe to
123 // expunge the list right now.
124 for (;;) {
125 nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
126 b->set_purge_list_next(purge_list_head);
127 if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) {
128 break;
129 }
130 }
131 if (UsePerfData) {
132 _perf_total_buckets_stale_count->inc();
133 _perf_total_buckets_stale_acc_count->inc();
134 }
135 }
136 }
137
138 //
139 // Remove an nmethod dependency from the context.
140 // Decrement count of the nmethod in the dependency list and, optionally, remove
141 // the bucket completely when the count goes to 0. This method must find
142 // a corresponding bucket otherwise there's a bug in the recording of dependencies.
143 // Can be called concurrently by parallel GC threads.
144 //
145 void DependencyContext::remove_dependent_nmethod(nmethod* nm) {
146 assert_locked_or_safepoint(CodeCache_lock);
147 nmethodBucket* first = dependencies_not_unloading();
255 }
256 }
257 return false;
258 }
259
260 #endif //PRODUCT
261
262 int nmethodBucket::decrement() {
263 return Atomic::sub(&_count, 1);
264 }
265
266 // We use a monotonically increasing epoch counter to track the last epoch a given
267 // dependency context was cleaned. GC threads claim cleanup tasks by performing
268 // a CAS on this value.
269 bool DependencyContext::claim_cleanup() {
270 uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
271 uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);
272 if (last_cleanup >= cleaning_epoch) {
273 return false;
274 }
275 return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup;
276 }
277
278 // Retrieve the first nmethodBucket that has a dependent that does not correspond to
279 // an is_unloading nmethod. Any nmethodBucket entries observed from the original head
280 // that is_unloading() will be unlinked and placed on the purge list.
281 nmethodBucket* DependencyContext::dependencies_not_unloading() {
282 for (;;) {
283 // Need acquire becase the read value could come from a concurrent insert.
284 nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr);
285 if (head == NULL || !head->get_nmethod()->is_unloading()) {
286 return head;
287 }
288 nmethodBucket* head_next = head->next();
289 OrderAccess::loadload();
290 if (Atomic::load(_dependency_context_addr) != head) {
291 // Unstable load of head w.r.t. head->next
292 continue;
293 }
294 if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) {
295 // Release is_unloading entries if unlinking was claimed
296 DependencyContext::release(head);
297 }
298 }
299 }
300
301 // Relaxed accessors
302 void DependencyContext::set_dependencies(nmethodBucket* b) {
303 Atomic::store(_dependency_context_addr, b);
304 }
305
306 nmethodBucket* DependencyContext::dependencies() {
307 return Atomic::load(_dependency_context_addr);
308 }
309
310 // After the gc_prologue, the dependency contexts may be claimed by the GC
311 // and releasing of nmethodBucket entries will be deferred and placed on
312 // a purge list to be deleted later.
313 void DependencyContext::cleaning_start() {
314 assert(SafepointSynchronize::is_at_safepoint(), "must be");
328
329 // This function skips over nmethodBuckets in the list corresponding to
330 // nmethods that are is_unloading. This allows exposing a view of the
331 // dependents as-if they were already cleaned, despite being cleaned
332 // concurrently. Any entry observed that is_unloading() will be unlinked
333 // and placed on the purge list.
334 nmethodBucket* nmethodBucket::next_not_unloading() {
335 for (;;) {
336 // Do not need acquire because the loaded entry can never be
337 // concurrently inserted.
338 nmethodBucket* next = Atomic::load(&_next);
339 if (next == NULL || !next->get_nmethod()->is_unloading()) {
340 return next;
341 }
342 nmethodBucket* next_next = Atomic::load(&next->_next);
343 OrderAccess::loadload();
344 if (Atomic::load(&_next) != next) {
345 // Unstable load of next w.r.t. next->next
346 continue;
347 }
348 if (Atomic::cmpxchg(&_next, next, next_next) == next) {
349 // Release is_unloading entries if unlinking was claimed
350 DependencyContext::release(next);
351 }
352 }
353 }
354
355 // Relaxed accessors
356 nmethodBucket* nmethodBucket::next() {
357 return Atomic::load(&_next);
358 }
359
360 void nmethodBucket::set_next(nmethodBucket* b) {
361 Atomic::store(&_next, b);
362 }
363
364 nmethodBucket* nmethodBucket::purge_list_next() {
365 return Atomic::load(&_purge_list_next);
366 }
367
368 void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
|