17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "code/nmethod.hpp"
26 #include "code/dependencies.hpp"
27 #include "code/dependencyContext.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "runtime/atomic.hpp"
30 #include "runtime/perfData.hpp"
31 #include "utilities/exceptions.hpp"
32
33 PerfCounter* DependencyContext::_perf_total_buckets_allocated_count = NULL;
34 PerfCounter* DependencyContext::_perf_total_buckets_deallocated_count = NULL;
35 PerfCounter* DependencyContext::_perf_total_buckets_stale_count = NULL;
36 PerfCounter* DependencyContext::_perf_total_buckets_stale_acc_count = NULL;
37
38 void dependencyContext_init() {
39 DependencyContext::init();
40 }
41
42 void DependencyContext::init() {
43 if (UsePerfData) {
44 EXCEPTION_MARK;
45 _perf_total_buckets_allocated_count =
46 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsAllocated", PerfData::U_Events, CHECK);
47 _perf_total_buckets_deallocated_count =
48 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsDeallocated", PerfData::U_Events, CHECK);
49 _perf_total_buckets_stale_count =
50 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStale", PerfData::U_Events, CHECK);
51 _perf_total_buckets_stale_acc_count =
52 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStaleAccumulated", PerfData::U_Events, CHECK);
53 }
54 }
55
56 //
57 // Walk the list of dependent nmethods searching for nmethods which
58 // are dependent on the changes that were passed in and mark them for
59 // deoptimization. Returns the number of nmethods found.
60 //
61 int DependencyContext::mark_dependent_nmethods(DepChange& changes) {
62 int found = 0;
63 for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
64 nmethod* nm = b->get_nmethod();
65 // since dependencies aren't removed until an nmethod becomes a zombie,
66 // the dependency list may contain nmethods which aren't alive.
67 if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
68 if (TraceDependencies) {
69 ResourceMark rm;
70 tty->print_cr("Marked for deoptimization");
71 changes.print();
72 nm->print();
73 nm->print_dependencies();
74 }
75 changes.mark_for_deoptimization(nm);
76 found++;
77 }
78 }
79 return found;
80 }
81
82 //
83 // Add an nmethod to the dependency context.
84 // It's possible that an nmethod has multiple dependencies on a klass
85 // so a count is kept for each bucket to guarantee that creation and
86 // deletion of dependencies is consistent.
87 //
88 void DependencyContext::add_dependent_nmethod(nmethod* nm, bool expunge) {
89 assert_lock_strong(CodeCache_lock);
90 for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
91 if (nm == b->get_nmethod()) {
92 b->increment();
93 return;
94 }
95 }
96 set_dependencies(new nmethodBucket(nm, dependencies()));
97 if (UsePerfData) {
98 _perf_total_buckets_allocated_count->inc();
99 }
100 if (expunge) {
101 // Remove stale entries from the list.
102 expunge_stale_entries();
103 }
104 }
105
106 //
107 // Remove an nmethod dependency from the context.
108 // Decrement count of the nmethod in the dependency list and, optionally, remove
109 // the bucket completely when the count goes to 0. This method must find
110 // a corresponding bucket otherwise there's a bug in the recording of dependencies.
111 // Can be called concurrently by parallel GC threads.
112 //
113 void DependencyContext::remove_dependent_nmethod(nmethod* nm, bool expunge) {
114 assert_locked_or_safepoint(CodeCache_lock);
115 nmethodBucket* first = dependencies();
116 nmethodBucket* last = NULL;
117 for (nmethodBucket* b = first; b != NULL; b = b->next()) {
118 if (nm == b->get_nmethod()) {
119 int val = b->decrement();
120 guarantee(val >= 0, "Underflow: %d", val);
121 if (val == 0) {
122 if (expunge) {
123 if (last == NULL) {
124 set_dependencies(b->next());
125 } else {
126 last->set_next(b->next());
127 }
128 delete b;
129 if (UsePerfData) {
130 _perf_total_buckets_deallocated_count->inc();
131 }
132 } else {
133 // Mark the context as having stale entries, since it is not safe to
134 // expunge the list right now.
135 set_has_stale_entries(true);
136 if (UsePerfData) {
137 _perf_total_buckets_stale_count->inc();
138 _perf_total_buckets_stale_acc_count->inc();
139 }
140 }
141 }
142 if (expunge) {
143 // Remove stale entries from the list.
144 expunge_stale_entries();
145 }
146 return;
147 }
148 last = b;
149 }
150 #ifdef ASSERT
151 tty->print_raw_cr("### can't find dependent nmethod");
152 nm->print();
153 #endif // ASSERT
154 ShouldNotReachHere();
155 }
156
157 //
158 // Reclaim all unused buckets.
159 //
160 void DependencyContext::expunge_stale_entries() {
161 assert_locked_or_safepoint(CodeCache_lock);
162 if (!has_stale_entries()) {
163 assert(!find_stale_entries(), "inconsistent info");
164 return;
165 }
166 nmethodBucket* first = dependencies();
167 nmethodBucket* last = NULL;
168 int removed = 0;
169 for (nmethodBucket* b = first; b != NULL;) {
170 assert(b->count() >= 0, "bucket count: %d", b->count());
171 nmethodBucket* next = b->next();
172 if (b->count() == 0) {
173 if (last == NULL) {
174 first = next;
175 } else {
176 last->set_next(next);
177 }
178 removed++;
179 delete b;
180 // last stays the same.
181 } else {
182 last = b;
183 }
184 b = next;
185 }
186 set_dependencies(first);
187 set_has_stale_entries(false);
188 if (UsePerfData && removed > 0) {
189 _perf_total_buckets_deallocated_count->inc(removed);
190 _perf_total_buckets_stale_count->dec(removed);
191 }
192 }
193
194 //
195 // Invalidate all dependencies in the context
196 int DependencyContext::remove_all_dependents() {
197 assert_locked_or_safepoint(CodeCache_lock);
198 nmethodBucket* b = dependencies();
199 set_dependencies(NULL);
200 int marked = 0;
201 int removed = 0;
202 while (b != NULL) {
203 nmethod* nm = b->get_nmethod();
204 if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
205 nm->mark_for_deoptimization();
206 marked++;
207 }
208 nmethodBucket* next = b->next();
209 removed++;
210 delete b;
211 b = next;
212 }
213 set_has_stale_entries(false);
214 if (UsePerfData && removed > 0) {
215 _perf_total_buckets_deallocated_count->inc(removed);
216 }
217 return marked;
218 }
219
220 #ifndef PRODUCT
221 void DependencyContext::print_dependent_nmethods(bool verbose) {
222 int idx = 0;
223 for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
224 nmethod* nm = b->get_nmethod();
225 tty->print("[%d] count=%d { ", idx++, b->count());
226 if (!verbose) {
227 nm->print_on(tty, "nmethod");
228 tty->print_cr(" } ");
229 } else {
230 nm->print();
231 nm->print_dependencies();
232 tty->print_cr("--- } ");
233 }
234 }
235 }
236
237 bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
238 for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
239 if (nm == b->get_nmethod()) {
240 #ifdef ASSERT
241 int count = b->count();
242 assert(count >= 0, "count shouldn't be negative: %d", count);
243 #endif
244 return true;
245 }
246 }
247 return false;
248 }
249
250 bool DependencyContext::find_stale_entries() {
251 for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
252 if (b->count() == 0) return true;
253 }
254 return false;
255 }
256
257 #endif //PRODUCT
258
259 int nmethodBucket::decrement() {
260 return Atomic::sub(1, &_count);
261 }
|
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "code/nmethod.hpp"
26 #include "code/dependencies.hpp"
27 #include "code/dependencyContext.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "runtime/atomic.hpp"
30 #include "runtime/perfData.hpp"
31 #include "utilities/exceptions.hpp"
32
33 PerfCounter* DependencyContext::_perf_total_buckets_allocated_count = NULL;
34 PerfCounter* DependencyContext::_perf_total_buckets_deallocated_count = NULL;
35 PerfCounter* DependencyContext::_perf_total_buckets_stale_count = NULL;
36 PerfCounter* DependencyContext::_perf_total_buckets_stale_acc_count = NULL;
37 nmethodBucket* volatile DependencyContext::_purge_list = NULL;
38 volatile uint64_t DependencyContext::_cleaning_epoch = 0;
39
40 void dependencyContext_init() {
41 DependencyContext::init();
42 }
43
44 void DependencyContext::init() {
45 if (UsePerfData) {
46 EXCEPTION_MARK;
47 _perf_total_buckets_allocated_count =
48 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsAllocated", PerfData::U_Events, CHECK);
49 _perf_total_buckets_deallocated_count =
50 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsDeallocated", PerfData::U_Events, CHECK);
51 _perf_total_buckets_stale_count =
52 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStale", PerfData::U_Events, CHECK);
53 _perf_total_buckets_stale_acc_count =
54 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStaleAccumulated", PerfData::U_Events, CHECK);
55 }
56 }
57
58 //
59 // Walk the list of dependent nmethods searching for nmethods which
60 // are dependent on the changes that were passed in and mark them for
61 // deoptimization. Returns the number of nmethods found.
62 //
63 int DependencyContext::mark_dependent_nmethods(DepChange& changes) {
64 int found = 0;
65 for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
66 nmethod* nm = b->get_nmethod();
67 // since dependencies aren't removed until an nmethod becomes a zombie,
68 // the dependency list may contain nmethods which aren't alive.
69 if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
70 if (TraceDependencies) {
71 ResourceMark rm;
72 tty->print_cr("Marked for deoptimization");
73 changes.print();
74 nm->print();
75 nm->print_dependencies();
76 }
77 changes.mark_for_deoptimization(nm);
78 found++;
79 }
80 }
81 return found;
82 }
83
84 //
85 // Add an nmethod to the dependency context.
86 // It's possible that an nmethod has multiple dependencies on a klass
87 // so a count is kept for each bucket to guarantee that creation and
88 // deletion of dependencies is consistent.
89 //
90 void DependencyContext::add_dependent_nmethod(nmethod* nm) {
91 assert_lock_strong(CodeCache_lock);
92 for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
93 if (nm == b->get_nmethod()) {
94 b->increment();
95 return;
96 }
97 }
98 nmethodBucket* new_head = new nmethodBucket(nm, NULL);
99 for (;;) {
100 nmethodBucket* head = Atomic::load(_dependency_context_addr);
101 new_head->set_next(head);
102 if (Atomic::cmpxchg(new_head, _dependency_context_addr, head) == head) {
103 break;
104 }
105 }
106 if (UsePerfData) {
107 _perf_total_buckets_allocated_count->inc();
108 }
109 }
110
111 void DependencyContext::release(nmethodBucket* b) {
112 bool expunge = Atomic::load(&_cleaning_epoch) == 0;
113 if (expunge) {
114 assert_locked_or_safepoint(CodeCache_lock);
115 delete b;
116 if (UsePerfData) {
117 _perf_total_buckets_deallocated_count->inc();
118 }
119 } else {
120 // Mark the context as having stale entries, since it is not safe to
121 // expunge the list right now.
122 for (;;) {
123 nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
124 b->set_purge_list_next(purge_list_head);
125 if (Atomic::cmpxchg(b, &_purge_list, purge_list_head) == purge_list_head) {
126 break;
127 }
128 }
129 if (UsePerfData) {
130 _perf_total_buckets_stale_count->inc();
131 _perf_total_buckets_stale_acc_count->inc();
132 }
133 }
134 }
135
136 //
137 // Remove an nmethod dependency from the context.
138 // Decrement count of the nmethod in the dependency list and, optionally, remove
139 // the bucket completely when the count goes to 0. This method must find
140 // a corresponding bucket otherwise there's a bug in the recording of dependencies.
141 // Can be called concurrently by parallel GC threads.
142 //
143 void DependencyContext::remove_dependent_nmethod(nmethod* nm) {
144 assert_locked_or_safepoint(CodeCache_lock);
145 nmethodBucket* first = dependencies_not_unloading();
146 nmethodBucket* last = NULL;
147 for (nmethodBucket* b = first; b != NULL; b = b->next_not_unloading()) {
148 if (nm == b->get_nmethod()) {
149 int val = b->decrement();
150 guarantee(val >= 0, "Underflow: %d", val);
151 if (val == 0) {
152 if (last == NULL) {
153 // If there was not a head that was not unloading, we can set a new
154 // head without a CAS, because we know there is no contending cleanup.
155 set_dependencies(b->next_not_unloading());
156 } else {
157 // Only supports a single inserting thread (protected by CodeCache_lock)
158 // for now. Therefore, the next pointer only competes with another cleanup
159 // operation. That interaction does not need a CAS.
160 last->set_next(b->next_not_unloading());
161 }
162 release(b);
163 }
164 return;
165 }
166 last = b;
167 }
168 }
169
170 //
171 // Reclaim all unused buckets.
172 //
173 void DependencyContext::purge_dependency_contexts() {
174 int removed = 0;
175 for (nmethodBucket* b = _purge_list; b != NULL;) {
176 nmethodBucket* next = b->purge_list_next();
177 removed++;
178 delete b;
179 b = next;
180 }
181 if (UsePerfData && removed > 0) {
182 _perf_total_buckets_deallocated_count->inc(removed);
183 }
184 _purge_list = NULL;
185 }
186
187 //
188 // Cleanup a dependency context by unlinking and placing all dependents corresponding
189 // to is_unloading nmethods on a purge list, which will be deleted later when it is safe.
190 void DependencyContext::clean_unloading_dependents() {
191 if (!claim_cleanup()) {
192 // Somebody else is cleaning up this dependency context.
193 return;
194 }
195 // Walk the nmethodBuckets and move dead entries on the purge list, which will
196 // be deleted during ClassLoaderDataGraph::purge().
197 nmethodBucket* b = dependencies_not_unloading();
198 while (b != NULL) {
199 nmethodBucket* next = b->next_not_unloading();
200 b = next;
201 }
202 }
203
204 //
205 // Invalidate all dependencies in the context
206 int DependencyContext::remove_all_dependents() {
207 nmethodBucket* b = dependencies_not_unloading();
208 set_dependencies(NULL);
209 int marked = 0;
210 int removed = 0;
211 while (b != NULL) {
212 nmethod* nm = b->get_nmethod();
213 if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
214 nm->mark_for_deoptimization();
215 marked++;
216 }
217 nmethodBucket* next = b->next_not_unloading();
218 removed++;
219 release(b);
220 b = next;
221 }
222 if (UsePerfData && removed > 0) {
223 _perf_total_buckets_deallocated_count->inc(removed);
224 }
225 return marked;
226 }
227
228 #ifndef PRODUCT
229 void DependencyContext::print_dependent_nmethods(bool verbose) {
230 int idx = 0;
231 for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
232 nmethod* nm = b->get_nmethod();
233 tty->print("[%d] count=%d { ", idx++, b->count());
234 if (!verbose) {
235 nm->print_on(tty, "nmethod");
236 tty->print_cr(" } ");
237 } else {
238 nm->print();
239 nm->print_dependencies();
240 tty->print_cr("--- } ");
241 }
242 }
243 }
244
245 bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
246 for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
247 if (nm == b->get_nmethod()) {
248 #ifdef ASSERT
249 int count = b->count();
250 assert(count >= 0, "count shouldn't be negative: %d", count);
251 #endif
252 return true;
253 }
254 }
255 return false;
256 }
257
258 #endif //PRODUCT
259
260 int nmethodBucket::decrement() {
261 return Atomic::sub(1, &_count);
262 }
263
264 // We use a safepoint counter to track the safepoint counter the last time a given
265 // dependency context was cleaned. GC threads claim cleanup tasks by performing
266 // a CAS on this value.
267 bool DependencyContext::claim_cleanup() {
268 uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
269 uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);
270 if (last_cleanup >= cleaning_epoch) {
271 return false;
272 }
273 return Atomic::cmpxchg(cleaning_epoch, _last_cleanup_addr, last_cleanup) == last_cleanup;
274 }
275
276 // Retrieve the first nmethodBucket that has a dependent that does not correspond to
277 // an is_unloading nmethod. Any nmethodBucket entries observed from the original head
278 // that is_unloading() will be unlinked and placed on the purge list.
279 nmethodBucket* DependencyContext::dependencies_not_unloading() {
280 for (;;) {
281 // Need acquire becase the read value could come from a concurrent insert.
282 nmethodBucket* head = OrderAccess::load_acquire(_dependency_context_addr);
283 if (head == NULL || !head->get_nmethod()->is_unloading()) {
284 return head;
285 }
286 nmethodBucket* head_next = head->next();
287 OrderAccess::loadload();
288 if (Atomic::load(_dependency_context_addr) != head) {
289 // Unstable load of head w.r.t. head->next
290 continue;
291 }
292 if (Atomic::cmpxchg(head_next, _dependency_context_addr, head) == head) {
293 // Release is_unloading entries if unlinking was claimed
294 DependencyContext::release(head);
295 }
296 }
297 }
298
299 // Relaxed accessors
300 void DependencyContext::set_dependencies(nmethodBucket* b) {
301 Atomic::store(b, _dependency_context_addr);
302 }
303
304 nmethodBucket* DependencyContext::dependencies() {
305 return Atomic::load(_dependency_context_addr);
306 }
307
308 // After the gc_prologue, the dependency contexts may be claimed by the GC
309 // and releasing of nmethodBucket entries will be deferred and placed on
310 // a purge list to be deleted later.
311 void DependencyContext::cleaning_start() {
312 assert(SafepointSynchronize::is_at_safepoint(), "must be");
313 uint64_t epoch = SafepointSynchronize::safepoint_counter();
314 Atomic::store(epoch, &_cleaning_epoch);
315 }
316
317 // The epilogue marks the end of dependency context cleanup by the GC,
318 // and also makes subsequent releases of nmethodBuckets case immediate
319 // deletion. It is admitted to end the cleanup in a concurrent phase.
320 void DependencyContext::cleaning_end() {
321 uint64_t epoch = 0;
322 Atomic::store(epoch, &_cleaning_epoch);
323 }
324
325 // This function skips over nmethodBuckets in the list corresponding to
326 // nmethods that are is_unloading. This allows exposing a view of the
327 // dependents as-if they were already cleaned, despite being cleaned
328 // concurrently. Any entry observed that is_unloading() will be unlinked
329 // and placed on the purge list.
330 nmethodBucket* nmethodBucket::next_not_unloading() {
331 for (;;) {
332 // Do not need acquire because the loaded entry can never be
333 // concurrently inserted.
334 nmethodBucket* next = Atomic::load(&_next);
335 if (next == NULL || !next->get_nmethod()->is_unloading()) {
336 return next;
337 }
338 nmethodBucket* next_next = Atomic::load(&next->_next);
339 OrderAccess::loadload();
340 if (Atomic::load(&_next) != next) {
341 // Unstable load of next w.r.t. next->next
342 continue;
343 }
344 if (Atomic::cmpxchg(next_next, &_next, next) == next) {
345 // Release is_unloading entries if unlinking was claimed
346 DependencyContext::release(next);
347 }
348 }
349 }
350
351 // Relaxed accessors
352 nmethodBucket* nmethodBucket::next() {
353 return Atomic::load(&_next);
354 }
355
356 void nmethodBucket::set_next(nmethodBucket* b) {
357 Atomic::store(b, &_next);
358 }
359
360 nmethodBucket* nmethodBucket::purge_list_next() {
361 return Atomic::load(&_purge_list_next);
362 }
363
364 void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
365 Atomic::store(b, &_purge_list_next);
366 }
|