1 /*
  2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/nmethod.hpp"
 27 #include "code/dependencies.hpp"
 28 #include "code/dependencyContext.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "runtime/atomic.hpp"
 31 #include "runtime/perfData.hpp"
 32 #include "utilities/exceptions.hpp"
 33 
 34 PerfCounter* DependencyContext::_perf_total_buckets_allocated_count   = NULL;
 35 PerfCounter* DependencyContext::_perf_total_buckets_deallocated_count = NULL;
 36 PerfCounter* DependencyContext::_perf_total_buckets_stale_count       = NULL;
 37 PerfCounter* DependencyContext::_perf_total_buckets_stale_acc_count   = NULL;
 38 nmethodBucket* volatile DependencyContext::_purge_list = NULL;
 39 volatile uint64_t DependencyContext::_cleaning_epoch = 0;
 40 
 41 void dependencyContext_init() {
 42   DependencyContext::init();
 43 }
 44 
 45 void DependencyContext::init() {
 46   if (UsePerfData) {
 47     EXCEPTION_MARK;
 48     _perf_total_buckets_allocated_count =
 49         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsAllocated", PerfData::U_Events, CHECK);
 50     _perf_total_buckets_deallocated_count =
 51         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsDeallocated", PerfData::U_Events, CHECK);
 52     _perf_total_buckets_stale_count =
 53         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStale", PerfData::U_Events, CHECK);
 54     _perf_total_buckets_stale_acc_count =
 55         PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStaleAccumulated", PerfData::U_Events, CHECK);
 56   }
 57 }
 58 
 59 //
 60 // Walk the list of dependent nmethods searching for nmethods which
 61 // are dependent on the changes that were passed in and mark them for
 62 // deoptimization.  Returns the number of nmethods found.
 63 //
 64 int DependencyContext::mark_dependent_nmethods(DepChange& changes) {
 65   int found = 0;
 66   for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
 67     nmethod* nm = b->get_nmethod();
 68     // since dependencies aren't removed until an nmethod becomes a zombie,
 69     // the dependency list may contain nmethods which aren't alive.
 70     if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
 71       if (TraceDependencies) {
 72         ResourceMark rm;
 73         tty->print_cr("Marked for deoptimization");
 74         changes.print();
 75         nm->print();
 76         nm->print_dependencies();
 77       }
 78       changes.mark_for_deoptimization(nm);
 79       found++;
 80     }
 81   }
 82   return found;
 83 }
 84 
 85 //
 86 // Add an nmethod to the dependency context.
 87 // It's possible that an nmethod has multiple dependencies on a klass
 88 // so a count is kept for each bucket to guarantee that creation and
 89 // deletion of dependencies is consistent.
 90 //
 91 void DependencyContext::add_dependent_nmethod(nmethod* nm) {
 92   assert_lock_strong(CodeCache_lock);
 93   for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
 94     if (nm == b->get_nmethod()) {
 95       b->increment();
 96       return;
 97     }
 98   }
 99   nmethodBucket* new_head = new nmethodBucket(nm, NULL);
100   for (;;) {
101     nmethodBucket* head = Atomic::load(_dependency_context_addr);
102     new_head->set_next(head);
103     if (Atomic::cmpxchg(new_head, _dependency_context_addr, head) == head) {
104       break;
105     }
106   }
107   if (UsePerfData) {
108     _perf_total_buckets_allocated_count->inc();
109   }
110 }
111 
112 void DependencyContext::release(nmethodBucket* b) {
113   bool expunge = Atomic::load(&_cleaning_epoch) == 0;
114   if (expunge) {
115     delete b;
116     if (UsePerfData) {
117       _perf_total_buckets_deallocated_count->inc();
118     }
119   } else {
120     // Mark the context as having stale entries, since it is not safe to
121     // expunge the list right now.
122     for (;;) {
123       nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
124       b->set_purge_list_next(purge_list_head);
125       if (Atomic::cmpxchg(b, &_purge_list, purge_list_head) == purge_list_head) {
126         break;
127       }
128     }
129     if (UsePerfData) {
130       _perf_total_buckets_stale_count->inc();
131       _perf_total_buckets_stale_acc_count->inc();
132     }
133   }
134 }
135 
136 //
137 // Remove an nmethod dependency from the context.
138 // Decrement count of the nmethod in the dependency list and, optionally, remove
139 // the bucket completely when the count goes to 0.  This method must find
140 // a corresponding bucket otherwise there's a bug in the recording of dependencies.
141 // Can be called concurrently by parallel GC threads.
142 //
143 void DependencyContext::remove_dependent_nmethod(nmethod* nm) {
144   assert_locked_or_safepoint(CodeCache_lock);
145   nmethodBucket* first = dependencies_not_unloading();
146   nmethodBucket* last = NULL;
147   for (nmethodBucket* b = first; b != NULL; b = b->next_not_unloading()) {
148     if (nm == b->get_nmethod()) {
149       int val = b->decrement();
150       guarantee(val >= 0, "Underflow: %d", val);
151       if (val == 0) {
152         if (last == NULL) {
153           // If there was not a head that was not unloading, we can set a new
154           // head without a CAS, because we know there is no contending cleanup.
155           set_dependencies(b->next_not_unloading());
156         } else {
157           // Only supports a single inserting thread (protected by CodeCache_lock)
158           // for now. Therefore, the next pointer only competes with another cleanup
159           // operation. That interaction does not need a CAS.
160           last->set_next(b->next_not_unloading());
161         }
162         release(b);
163       }
164       return;
165     }
166     last = b;
167   }
168 }
169 
170 //
171 // Reclaim all unused buckets.
172 //
173 void DependencyContext::purge_dependency_contexts() {
174   int removed = 0;
175   for (nmethodBucket* b = _purge_list; b != NULL;) {
176     nmethodBucket* next = b->purge_list_next();
177     removed++;
178     delete b;
179     b = next;
180   }
181   if (UsePerfData && removed > 0) {
182     _perf_total_buckets_deallocated_count->inc(removed);
183   }
184   _purge_list = NULL;
185 }
186 
187 //
188 // Cleanup a dependency context by unlinking and placing all dependents corresponding
189 // to is_unloading nmethods on a purge list, which will be deleted later when it is safe.
190 void DependencyContext::clean_unloading_dependents() {
191   if (!claim_cleanup()) {
192     // Somebody else is cleaning up this dependency context.
193     return;
194   }
195   // Walk the nmethodBuckets and move dead entries on the purge list, which will
196   // be deleted during ClassLoaderDataGraph::purge().
197   nmethodBucket* b = dependencies_not_unloading();
198   while (b != NULL) {
199     nmethodBucket* next = b->next_not_unloading();
200     b = next;
201   }
202 }
203 
204 //
205 // Invalidate all dependencies in the context
206 int DependencyContext::remove_all_dependents() {
207   assert_locked_or_safepoint(CodeCache_lock);
208   nmethodBucket* b = dependencies_not_unloading();
209   set_dependencies(NULL);
210   int marked = 0;
211   int removed = 0;
212   while (b != NULL) {
213     nmethod* nm = b->get_nmethod();
214     if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
215       nm->mark_for_deoptimization();
216       marked++;
217     }
218     nmethodBucket* next = b->next_not_unloading();
219     removed++;
220     release(b);
221     b = next;
222   }
223   if (UsePerfData && removed > 0) {
224     _perf_total_buckets_deallocated_count->inc(removed);
225   }
226   return marked;
227 }
228 
229 #ifndef PRODUCT
230 void DependencyContext::print_dependent_nmethods(bool verbose) {
231   int idx = 0;
232   for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
233     nmethod* nm = b->get_nmethod();
234     tty->print("[%d] count=%d { ", idx++, b->count());
235     if (!verbose) {
236       nm->print_on(tty, "nmethod");
237       tty->print_cr(" } ");
238     } else {
239       nm->print();
240       nm->print_dependencies();
241       tty->print_cr("--- } ");
242     }
243   }
244 }
245 
246 bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
247   for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
248     if (nm == b->get_nmethod()) {
249 #ifdef ASSERT
250       int count = b->count();
251       assert(count >= 0, "count shouldn't be negative: %d", count);
252 #endif
253       return true;
254     }
255   }
256   return false;
257 }
258 
259 #endif //PRODUCT
260 
261 int nmethodBucket::decrement() {
262   return Atomic::sub(1, &_count);
263 }
264 
265 // We use a safepoint counter to track the safepoint counter the last time a given
266 // dependency context was cleaned. GC threads claim cleanup tasks by performing
267 // a CAS on this value.
268 bool DependencyContext::claim_cleanup() {
269   uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
270   uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);
271   if (last_cleanup >= cleaning_epoch) {
272     return false;
273   }
274   return Atomic::cmpxchg(cleaning_epoch, _last_cleanup_addr, last_cleanup) == last_cleanup;
275 }
276 
277 // Retrieve the first nmethodBucket that has a dependent that does not correspond to
278 // an is_unloading nmethod. Any nmethodBucket entries observed from the original head
279 // that is_unloading() will be unlinked and placed on the purge list.
280 nmethodBucket* DependencyContext::dependencies_not_unloading() {
281   for (;;) {
282     // Need acquire becase the read value could come from a concurrent insert.
283     nmethodBucket* head = OrderAccess::load_acquire(_dependency_context_addr);
284     if (head == NULL || !head->get_nmethod()->is_unloading()) {
285       return head;
286     }
287     nmethodBucket* head_next = head->next();
288     OrderAccess::loadload();
289     if (Atomic::load(_dependency_context_addr) != head) {
290       // Unstable load of head w.r.t. head->next
291       continue;
292     }
293     if (Atomic::cmpxchg(head_next, _dependency_context_addr, head) == head) {
294       // Release is_unloading entries if unlinking was claimed
295       DependencyContext::release(head);
296     }
297   }
298 }
299 
300 // Relaxed accessors
301 void DependencyContext::set_dependencies(nmethodBucket* b) {
302   Atomic::store(b, _dependency_context_addr);
303 }
304 
305 nmethodBucket* DependencyContext::dependencies() {
306   return Atomic::load(_dependency_context_addr);
307 }
308 
309 // After the gc_prologue, the dependency contexts may be claimed by the GC
310 // and releasing of nmethodBucket entries will be deferred and placed on
311 // a purge list to be deleted later.
312 void DependencyContext::gc_prologue() {
313   assert(SafepointSynchronize::is_at_safepoint(), "must be");
314   uint64_t epoch = SafepointSynchronize::_safepoint_counter;
315   Atomic::store(epoch, &_cleaning_epoch);
316 }
317 
318 // The epilogue marks the end of dependency context cleanup by the GC,
319 // and also makes subsequent releases of nmethodBuckets case immediate
320 // deletion. It is admitted to end the cleanup in a concurrent phase.
321 void DependencyContext::gc_epilogue() {
322   uint64_t epoch = 0;
323   Atomic::store(epoch, &_cleaning_epoch);
324 }
325 
326 // This function skips over nmethodBuckets in the list corresponding to
327 // nmethods that are is_unloading. This allows exposing a view of the
328 // dependents as-if they were already cleaned, despite being cleaned
329 // concurrently. Any entry observed that is_unloading() will be unlinked
330 // and placed on the purge list.
331 nmethodBucket* nmethodBucket::next_not_unloading() {
332   for (;;) {
333     // Do not need acquire because the loaded entry can never be
334     // concurrently inserted.
335     nmethodBucket* next = Atomic::load(&_next);
336     if (next == NULL || !next->get_nmethod()->is_unloading()) {
337       return next;
338     }
339     nmethodBucket* next_next = Atomic::load(&next->_next);
340     OrderAccess::loadload();
341     if (Atomic::load(&_next) != next) {
342       // Unstable load of next w.r.t. next->next
343       continue;
344     }
345     if (Atomic::cmpxchg(next_next, &_next, next) == next) {
346       // Release is_unloading entries if unlinking was claimed
347       DependencyContext::release(next);
348     }
349   }
350 }
351 
352 // Relaxed accessors
353 nmethodBucket* nmethodBucket::next() {
354   return Atomic::load(&_next);
355 }
356 
357 void nmethodBucket::set_next(nmethodBucket* b) {
358   Atomic::store(b, &_next);
359 }
360 
361 nmethodBucket* nmethodBucket::purge_list_next() {
362   return Atomic::load(&_purge_list_next);
363 }
364 
365 void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
366   Atomic::store(b, &_purge_list_next);
367 }