1 /*
   2  * Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "code/nmethod.hpp"
  29 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  30 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc/shenandoah/shenandoahNMethod.inline.hpp"
  33 #include "gc/shenandoah/shenandoahUtils.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "memory/universe.hpp"
  36 #include "runtime/atomic.hpp"
  37 #include "utilities/powerOfTwo.hpp"
  38 
  39 ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps) {
  40   _length = heaps->length();
  41   _iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC);
  42   for (int h = 0; h < _length; h++) {
  43     _iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h));
  44   }
  45 }
  46 
  47 ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() {
  48   FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters);
  49 }
  50 
  51 void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) {
  52   for (int c = 0; c < _length; c++) {
  53     _iters[c].parallel_blobs_do(f);
  54   }
  55 }
  56 
  57 ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) :
  58         _heap(heap), _claimed_idx(0), _finished(false) {
  59 }
  60 
  61 void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
  62   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
  63 
  64   /*
  65    * Parallel code heap walk.
  66    *
  67    * This code makes all threads scan all code heaps, but only one thread would execute the
  68    * closure on given blob. This is achieved by recording the "claimed" blocks: if a thread
  69    * had claimed the block, it can process all blobs in it. Others have to fast-forward to
  70    * next attempt without processing.
  71    *
  72    * Late threads would return immediately if iterator is finished.
  73    */
  74 
  75   if (_finished) {
  76     return;
  77   }
  78 
  79   int stride = 256; // educated guess
  80   int stride_mask = stride - 1;
  81   assert (is_power_of_2(stride), "sanity");
  82 
  83   int count = 0;
  84   bool process_block = true;
  85 
  86   for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) {
  87     int current = count++;
  88     if ((current & stride_mask) == 0) {
  89       process_block = (current >= _claimed_idx) &&
  90                       (Atomic::cmpxchg(&_claimed_idx, current, current + stride) == current);
  91     }
  92     if (process_block) {
  93       if (cb->is_alive()) {
  94         f->do_code_blob(cb);
  95 #ifdef ASSERT
  96         if (cb->is_nmethod())
  97           Universe::heap()->verify_nmethod((nmethod*)cb);
  98 #endif
  99       }
 100     }
 101   }
 102 
 103   _finished = true;
 104 }
 105 
 106 ShenandoahNMethodTable* ShenandoahCodeRoots::_nmethod_table;
 107 int ShenandoahCodeRoots::_disarmed_value = 1;
 108 
 109 void ShenandoahCodeRoots::initialize() {
 110   _nmethod_table = new ShenandoahNMethodTable();
 111 }
 112 
 113 void ShenandoahCodeRoots::register_nmethod(nmethod* nm) {
 114   switch (ShenandoahCodeRootsStyle) {
 115     case 0:
 116     case 1:
 117       break;
 118     case 2: {
 119       assert_locked_or_safepoint(CodeCache_lock);
 120       _nmethod_table->register_nmethod(nm);
 121       break;
 122     }
 123     default:
 124       ShouldNotReachHere();
 125   }
 126 }
 127 
 128 void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) {
 129   switch (ShenandoahCodeRootsStyle) {
 130     case 0:
 131     case 1: {
 132       break;
 133     }
 134     case 2: {
 135       assert_locked_or_safepoint(CodeCache_lock);
 136       _nmethod_table->unregister_nmethod(nm);
 137       break;
 138     }
 139     default:
 140       ShouldNotReachHere();
 141   }
 142 }
 143 
 144 void ShenandoahCodeRoots::flush_nmethod(nmethod* nm) {
 145   switch (ShenandoahCodeRootsStyle) {
 146     case 0:
 147     case 1: {
 148       break;
 149     }
 150     case 2: {
 151       assert_locked_or_safepoint(CodeCache_lock);
 152       _nmethod_table->flush_nmethod(nm);
 153       break;
 154     }
 155     default:
 156       ShouldNotReachHere();
 157   }
 158 }
 159 
 160 void ShenandoahCodeRoots::arm_nmethods() {
 161   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 162   _disarmed_value ++;
 163   // 0 is reserved for new nmethod
 164   if (_disarmed_value == 0) {
 165     _disarmed_value = 1;
 166   }
 167 
 168   JavaThreadIteratorWithHandle jtiwh;
 169   for (JavaThread *thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
 170     ShenandoahThreadLocalData::set_disarmed_value(thr, _disarmed_value);
 171   }
 172 }
 173 
 174 class ShenandoahDisarmNMethodClosure : public NMethodClosure {
 175 private:
 176   BarrierSetNMethod* const _bs;
 177 
 178 public:
 179   ShenandoahDisarmNMethodClosure() :
 180     _bs(BarrierSet::barrier_set()->barrier_set_nmethod()) {
 181   }
 182 
 183   virtual void do_nmethod(nmethod* nm) {
 184     _bs->disarm(nm);
 185   }
 186 };
 187 
 188 class ShenandoahDisarmNMethodsTask : public AbstractGangTask {
 189 private:
 190   ShenandoahDisarmNMethodClosure      _cl;
 191   ShenandoahConcurrentNMethodIterator _iterator;
 192 
 193 public:
 194   ShenandoahDisarmNMethodsTask() :
 195     AbstractGangTask("ShenandoahDisarmNMethodsTask"),
 196     _iterator(ShenandoahCodeRoots::table()) {
 197     assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint");
 198     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 199     _iterator.nmethods_do_begin();
 200   }
 201 
 202   ~ShenandoahDisarmNMethodsTask() {
 203     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 204     _iterator.nmethods_do_end();
 205   }
 206 
 207   virtual void work(uint worker_id) {
 208     ShenandoahParallelWorkerSession worker_session(worker_id);
 209     _iterator.nmethods_do(&_cl);
 210   }
 211 };
 212 
 213 void ShenandoahCodeRoots::disarm_nmethods() {
 214   ShenandoahDisarmNMethodsTask task;
 215   ShenandoahHeap::heap()->workers()->run_task(&task);
 216 }
 217 
 218 class ShenandoahNMethodUnlinkClosure : public NMethodClosure {
 219 private:
 220   bool                      _unloading_occurred;
 221   volatile bool             _failed;
 222   ShenandoahHeap* const     _heap;
 223   BarrierSetNMethod* const  _bs;
 224 
 225   void set_failed() {
 226     Atomic::store(&_failed, true);
 227   }
 228 
 229    void unlink(nmethod* nm) {
 230      // Unlinking of the dependencies must happen before the
 231      // handshake separating unlink and purge.
 232      nm->flush_dependencies(false /* delete_immediately */);
 233 
 234      // unlink_from_method will take the CompiledMethod_lock.
 235      // In this case we don't strictly need it when unlinking nmethods from
 236      // the Method, because it is only concurrently unlinked by
 237      // the entry barrier, which acquires the per nmethod lock.
 238      nm->unlink_from_method();
 239 
 240      if (nm->is_osr_method()) {
 241        // Invalidate the osr nmethod only once
 242        nm->invalidate_osr_method();
 243      }
 244    }
 245 public:
 246   ShenandoahNMethodUnlinkClosure(bool unloading_occurred) :
 247       _unloading_occurred(unloading_occurred),
 248       _failed(false),
 249       _heap(ShenandoahHeap::heap()),
 250       _bs(ShenandoahBarrierSet::barrier_set()->barrier_set_nmethod()) {}
 251 
 252   virtual void do_nmethod(nmethod* nm) {
 253     assert(_heap->is_concurrent_weak_root_in_progress(), "Only this phase");
 254     if (failed()) {
 255       return;
 256     }
 257 
 258     ShenandoahNMethod* nm_data = ShenandoahNMethod::gc_data(nm);
 259     assert(!nm_data->is_unregistered(), "Should not see unregistered entry");
 260 
 261     if (!nm->is_alive()) {
 262       return;
 263     }
 264 
 265     if (nm->is_unloading()) {
 266       ShenandoahReentrantLocker locker(nm_data->lock());
 267       unlink(nm);
 268       return;
 269     }
 270 
 271     ShenandoahReentrantLocker locker(nm_data->lock());
 272 
 273     // Heal oops and disarm
 274     if (_bs->is_armed(nm)) {
 275       ShenandoahNMethod::heal_nmethod(nm);
 276       _bs->disarm(nm);
 277     }
 278 
 279     // Clear compiled ICs and exception caches
 280     if (!nm->unload_nmethod_caches(_unloading_occurred)) {
 281       set_failed();
 282     }
 283   }
 284 
 285   bool failed() const {
 286     return Atomic::load(&_failed);
 287   }
 288 };
 289 
 290 class ShenandoahUnlinkTask : public AbstractGangTask {
 291 private:
 292   ShenandoahNMethodUnlinkClosure      _cl;
 293   ICRefillVerifier*                   _verifier;
 294   ShenandoahConcurrentNMethodIterator _iterator;
 295 
 296 public:
 297   ShenandoahUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
 298     AbstractGangTask("ShenandoahNMethodUnlinkTask"),
 299     _cl(unloading_occurred),
 300     _verifier(verifier),
 301     _iterator(ShenandoahCodeRoots::table()) {
 302     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 303     _iterator.nmethods_do_begin();
 304   }
 305 
 306   ~ShenandoahUnlinkTask() {
 307     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 308     _iterator.nmethods_do_end();
 309   }
 310 
 311   virtual void work(uint worker_id) {
 312     ICRefillVerifierMark mark(_verifier);
 313     _iterator.nmethods_do(&_cl);
 314   }
 315 
 316   bool success() const {
 317     return !_cl.failed();
 318   }
 319 };
 320 
 321 void ShenandoahCodeRoots::unlink(WorkGang* workers, bool unloading_occurred) {
 322   assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
 323          "Only when running concurrent class unloading");
 324 
 325   for (;;) {
 326     ICRefillVerifier verifier;
 327 
 328     {
 329       ShenandoahUnlinkTask task(unloading_occurred, &verifier);
 330       workers->run_task(&task);
 331       if (task.success()) {
 332         return;
 333       }
 334     }
 335 
 336     // Cleaning failed because we ran out of transitional IC stubs,
 337     // so we have to refill and try again. Refilling requires taking
 338     // a safepoint, so we temporarily leave the suspendible thread set.
 339     SuspendibleThreadSetLeaver sts;
 340     InlineCacheBuffer::refill_ic_stubs();
 341   }
 342 }
 343 
 344 class ShenandoahNMethodPurgeClosure : public NMethodClosure {
 345 public:
 346   virtual void do_nmethod(nmethod* nm) {
 347     if (nm->is_alive() && nm->is_unloading()) {
 348       nm->make_unloaded();
 349     }
 350   }
 351 };
 352 
 353 class ShenandoahNMethodPurgeTask : public AbstractGangTask {
 354 private:
 355   ShenandoahNMethodPurgeClosure       _cl;
 356   ShenandoahConcurrentNMethodIterator _iterator;
 357 
 358 public:
 359   ShenandoahNMethodPurgeTask() :
 360     AbstractGangTask("ShenandoahNMethodPurgeTask"),
 361     _cl(),
 362     _iterator(ShenandoahCodeRoots::table()) {
 363     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 364     _iterator.nmethods_do_begin();
 365   }
 366 
 367   ~ShenandoahNMethodPurgeTask() {
 368     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 369     _iterator.nmethods_do_end();
 370   }
 371 
 372   virtual void work(uint worker_id) {
 373     _iterator.nmethods_do(&_cl);
 374   }
 375 };
 376 
 377 void ShenandoahCodeRoots::purge(WorkGang* workers) {
 378   assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
 379          "Only when running concurrent class unloading");
 380 
 381   ShenandoahNMethodPurgeTask task;
 382   workers->run_task(&task);
 383 }
 384 
 385 ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
 386         _par_iterator(CodeCache::heaps()),
 387         _table_snapshot(NULL) {
 388   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
 389   assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers");
 390   switch (ShenandoahCodeRootsStyle) {
 391     case 0:
 392     case 1: {
 393       // No need to do anything here
 394       break;
 395     }
 396     case 2: {
 397       CodeCache_lock->lock_without_safepoint_check();
 398       _table_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
 399       break;
 400     }
 401     default:
 402       ShouldNotReachHere();
 403   }
 404 }
 405 
 406 ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() {
 407   switch (ShenandoahCodeRootsStyle) {
 408     case 0:
 409     case 1: {
 410       // No need to do anything here
 411       break;
 412     }
 413     case 2: {
 414       ShenandoahCodeRoots::table()->finish_iteration(_table_snapshot);
 415       _table_snapshot = NULL;
 416       CodeCache_lock->unlock();
 417       break;
 418     }
 419     default:
 420       ShouldNotReachHere();
 421   }
 422 }
 423 
 424 template<bool CSET_FILTER>
 425 void ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(CodeBlobClosure *f) {
 426   switch (ShenandoahCodeRootsStyle) {
 427     case 0: {
 428       if (_seq_claimed.try_set()) {
 429         CodeCache::blobs_do(f);
 430       }
 431       break;
 432     }
 433     case 1: {
 434       _par_iterator.parallel_blobs_do(f);
 435       break;
 436     }
 437     case 2: {
 438       ShenandoahCodeRootsIterator::fast_parallel_blobs_do<CSET_FILTER>(f);
 439       break;
 440     }
 441     default:
 442       ShouldNotReachHere();
 443   }
 444 }
 445 
 446 void ShenandoahAllCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
 447   ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<false>(f);
 448 }
 449 
 450 void ShenandoahCsetCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
 451   ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<true>(f);
 452 }
 453 
 454 template <bool CSET_FILTER>
 455 void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) {
 456   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
 457   assert(_table_snapshot != NULL, "Sanity");
 458   _table_snapshot->parallel_blobs_do<CSET_FILTER>(f);
 459 }
 460