1 /*
   2  * Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "code/nmethod.hpp"
  29 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  30 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  31 #include "gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp"
  32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  33 #include "gc/shenandoah/shenandoahNMethod.inline.hpp"
  34 #include "gc/shenandoah/shenandoahUtils.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.hpp"
  37 #include "runtime/atomic.hpp"
  38 #include "utilities/powerOfTwo.hpp"
  39 
  40 ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps) {
  41   _length = heaps->length();
  42   _iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC);
  43   for (int h = 0; h < _length; h++) {
  44     _iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h));
  45   }
  46 }
  47 
  48 ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() {
  49   FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters);
  50 }
  51 
  52 void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) {
  53   for (int c = 0; c < _length; c++) {
  54     _iters[c].parallel_blobs_do(f);
  55   }
  56 }
  57 
  58 ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) :
  59         _heap(heap), _claimed_idx(0), _finished(false) {
  60 }
  61 
  62 void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
  63   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
  64 
  65   /*
  66    * Parallel code heap walk.
  67    *
  68    * This code makes all threads scan all code heaps, but only one thread would execute the
  69    * closure on given blob. This is achieved by recording the "claimed" blocks: if a thread
  70    * had claimed the block, it can process all blobs in it. Others have to fast-forward to
  71    * next attempt without processing.
  72    *
  73    * Late threads would return immediately if iterator is finished.
  74    */
  75 
  76   if (_finished) {
  77     return;
  78   }
  79 
  80   int stride = 256; // educated guess
  81   int stride_mask = stride - 1;
  82   assert (is_power_of_2(stride), "sanity");
  83 
  84   int count = 0;
  85   bool process_block = true;
  86 
  87   for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) {
  88     int current = count++;
  89     if ((current & stride_mask) == 0) {
  90       process_block = (current >= _claimed_idx) &&
  91                       (Atomic::cmpxchg(&_claimed_idx, current, current + stride) == current);
  92     }
  93     if (process_block) {
  94       if (cb->is_alive()) {
  95         f->do_code_blob(cb);
  96 #ifdef ASSERT
  97         if (cb->is_nmethod())
  98           Universe::heap()->verify_nmethod((nmethod*)cb);
  99 #endif
 100       }
 101     }
 102   }
 103 
 104   _finished = true;
 105 }
 106 
 107 ShenandoahNMethodTable* ShenandoahCodeRoots::_nmethod_table;
 108 int ShenandoahCodeRoots::_disarmed_value = 1;
 109 
 110 void ShenandoahCodeRoots::initialize() {
 111   _nmethod_table = new ShenandoahNMethodTable();
 112 }
 113 
 114 void ShenandoahCodeRoots::register_nmethod(nmethod* nm) {
 115   switch (ShenandoahCodeRootsStyle) {
 116     case 0:
 117     case 1:
 118       break;
 119     case 2: {
 120       assert_locked_or_safepoint(CodeCache_lock);
 121       _nmethod_table->register_nmethod(nm);
 122       break;
 123     }
 124     default:
 125       ShouldNotReachHere();
 126   }
 127 }
 128 
 129 void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) {
 130   switch (ShenandoahCodeRootsStyle) {
 131     case 0:
 132     case 1: {
 133       break;
 134     }
 135     case 2: {
 136       assert_locked_or_safepoint(CodeCache_lock);
 137       _nmethod_table->unregister_nmethod(nm);
 138       break;
 139     }
 140     default:
 141       ShouldNotReachHere();
 142   }
 143 }
 144 
 145 void ShenandoahCodeRoots::flush_nmethod(nmethod* nm) {
 146   switch (ShenandoahCodeRootsStyle) {
 147     case 0:
 148     case 1: {
 149       break;
 150     }
 151     case 2: {
 152       assert_locked_or_safepoint(CodeCache_lock);
 153       _nmethod_table->flush_nmethod(nm);
 154       break;
 155     }
 156     default:
 157       ShouldNotReachHere();
 158   }
 159 }
 160 
 161 void ShenandoahCodeRoots::arm_nmethods() {
 162   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 163   _disarmed_value ++;
 164   // 0 is reserved for new nmethod
 165   if (_disarmed_value == 0) {
 166     _disarmed_value = 1;
 167   }
 168 
 169   JavaThreadIteratorWithHandle jtiwh;
 170   for (JavaThread *thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
 171     ShenandoahThreadLocalData::set_disarmed_value(thr, _disarmed_value);
 172   }
 173 }
 174 
 175 class ShenandoahDisarmNMethodClosure : public NMethodClosure {
 176 private:
 177   BarrierSetNMethod* const _bs;
 178 
 179 public:
 180   ShenandoahDisarmNMethodClosure() :
 181     _bs(BarrierSet::barrier_set()->barrier_set_nmethod()) {
 182   }
 183 
 184   virtual void do_nmethod(nmethod* nm) {
 185     _bs->disarm(nm);
 186   }
 187 };
 188 
 189 class ShenandoahDisarmNMethodsTask : public AbstractGangTask {
 190 private:
 191   ShenandoahDisarmNMethodClosure      _cl;
 192   ShenandoahConcurrentNMethodIterator _iterator;
 193 
 194 public:
 195   ShenandoahDisarmNMethodsTask() :
 196     AbstractGangTask("ShenandoahDisarmNMethodsTask"),
 197     _iterator(ShenandoahCodeRoots::table()) {
 198     assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint");
 199     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 200     _iterator.nmethods_do_begin();
 201   }
 202 
 203   ~ShenandoahDisarmNMethodsTask() {
 204     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 205     _iterator.nmethods_do_end();
 206   }
 207 
 208   virtual void work(uint worker_id) {
 209     ShenandoahParallelWorkerSession worker_session(worker_id);
 210     _iterator.nmethods_do(&_cl);
 211   }
 212 };
 213 
 214 void ShenandoahCodeRoots::disarm_nmethods() {
 215   ShenandoahDisarmNMethodsTask task;
 216   ShenandoahHeap::heap()->workers()->run_task(&task);
 217 }
 218 
 219 class ShenandoahNMethodUnlinkClosure : public NMethodClosure {
 220 private:
 221   bool                      _unloading_occurred;
 222   volatile bool             _failed;
 223   bool                      _refill_icBuffer;
 224   ShenandoahHeap* const     _heap;
 225   BarrierSetNMethod* const  _bs;
 226 
 227   void set_failed() {
 228     Atomic::store(&_failed, true);
 229   }
 230 
 231    void unlink(nmethod* nm) {
 232      // Unlinking of the dependencies must happen before the
 233      // handshake separating unlink and purge.
 234      nm->flush_dependencies(false /* delete_immediately */);
 235 
 236      // unlink_from_method will take the CompiledMethod_lock.
 237      // In this case we don't strictly need it when unlinking nmethods from
 238      // the Method, because it is only concurrently unlinked by
 239      // the entry barrier, which acquires the per nmethod lock.
 240      nm->unlink_from_method();
 241 
 242      if (nm->is_osr_method()) {
 243        // Invalidate the osr nmethod only once
 244        nm->invalidate_osr_method();
 245      }
 246    }
 247 public:
 248   ShenandoahNMethodUnlinkClosure(bool unloading_occurred) :
 249       _unloading_occurred(unloading_occurred),
 250       _failed(false),
 251       _refill_icBuffer(false),
 252       _heap(ShenandoahHeap::heap()),
 253       _bs(ShenandoahBarrierSet::barrier_set()->barrier_set_nmethod()) {}
 254 
 255   virtual void do_nmethod(nmethod* nm) {
 256     assert(_heap->is_concurrent_weak_root_in_progress(), "Only this phase");
 257     if (failed()) {
 258       return;
 259     }
 260 
 261     ShenandoahNMethod* nm_data = ShenandoahNMethod::gc_data(nm);
 262     assert(!nm_data->is_unregistered(), "Should not see unregistered entry");
 263 
 264     if (!nm->is_alive()) {
 265       return;
 266     }
 267 
 268     if (nm->is_unloading()) {
 269       ShenandoahAbortableNMethodLocker locker(nm_data->lock());
 270       if (locker.aborted()) {
 271         set_failed();
 272       } else {
 273         unlink(nm);
 274       }
 275       return;
 276     }
 277 
 278     ShenandoahAbortableNMethodLocker locker(nm_data->lock());
 279     if (locker.aborted()) {
 280       set_failed();
 281       return;
 282     }
 283 
 284     // Heal oops and disarm
 285     if (_bs->is_armed(nm)) {
 286       ShenandoahNMethod::heal_nmethod_metadata(nm_data);
 287       _bs->disarm(nm);
 288     }
 289 
 290     // Clear compiled ICs and exception caches
 291     if (!nm->unload_nmethod_caches(_unloading_occurred)) {
 292       _refill_icBuffer = true;
 293       set_failed();
 294     }
 295   }
 296 
 297   bool need_refill_icBuffer() const {
 298     return _refill_icBuffer;
 299   }
 300 
 301   bool failed() const {
 302     return Atomic::load(&_failed);
 303   }
 304 };
 305 
 306 class ShenandoahUnlinkTask : public AbstractGangTask {
 307 private:
 308   ShenandoahNMethodUnlinkClosure      _cl;
 309   ICRefillVerifier*                   _verifier;
 310   ShenandoahConcurrentNMethodIterator _iterator;
 311 
 312 public:
 313   ShenandoahUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
 314     AbstractGangTask("ShenandoahNMethodUnlinkTask"),
 315     _cl(unloading_occurred),
 316     _verifier(verifier),
 317     _iterator(ShenandoahCodeRoots::table()) {
 318     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 319     _iterator.nmethods_do_begin();
 320   }
 321 
 322   ~ShenandoahUnlinkTask() {
 323     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 324     _iterator.nmethods_do_end();
 325   }
 326 
 327   virtual void work(uint worker_id) {
 328     ICRefillVerifierMark mark(_verifier);
 329     ShenandoahEvacOOMScope evac_scope;
 330     _iterator.nmethods_do(&_cl);
 331   }
 332 
 333   bool success() const {
 334     return !_cl.failed();
 335   }
 336 
 337   bool need_refill_icBuffer() const {
 338     return _cl.need_refill_icBuffer();
 339   }
 340 };
 341 
 342 void ShenandoahCodeRoots::unlink(WorkGang* workers, bool unloading_occurred) {
 343   assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
 344          "Only when running concurrent class unloading");
 345 
 346   for (;;) {
 347     ICRefillVerifier verifier;
 348     bool need_refill_icBuffer = false;
 349 
 350     {
 351       ShenandoahUnlinkTask task(unloading_occurred, &verifier);
 352       workers->run_task(&task);
 353       if (task.success()) {
 354         return;
 355       }
 356       need_refill_icBuffer = task.need_refill_icBuffer();
 357     }
 358 
 359     if (need_refill_icBuffer) {
 360       // Cleaning failed because we ran out of transitional IC stubs,
 361       // so we have to refill and try again. Refilling requires taking
 362       // a safepoint, so we temporarily leave the suspendible thread set.
 363       SuspendibleThreadSetLeaver sts;
 364       InlineCacheBuffer::refill_ic_stubs();
 365     }
 366   }
 367 }
 368 
 369 class ShenandoahNMethodPurgeClosure : public NMethodClosure {
 370 public:
 371   virtual void do_nmethod(nmethod* nm) {
 372     if (nm->is_alive() && nm->is_unloading()) {
 373       nm->make_unloaded();
 374     }
 375   }
 376 };
 377 
 378 class ShenandoahNMethodPurgeTask : public AbstractGangTask {
 379 private:
 380   ShenandoahNMethodPurgeClosure       _cl;
 381   ShenandoahConcurrentNMethodIterator _iterator;
 382 
 383 public:
 384   ShenandoahNMethodPurgeTask() :
 385     AbstractGangTask("ShenandoahNMethodPurgeTask"),
 386     _cl(),
 387     _iterator(ShenandoahCodeRoots::table()) {
 388     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 389     _iterator.nmethods_do_begin();
 390   }
 391 
 392   ~ShenandoahNMethodPurgeTask() {
 393     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 394     _iterator.nmethods_do_end();
 395   }
 396 
 397   virtual void work(uint worker_id) {
 398     _iterator.nmethods_do(&_cl);
 399   }
 400 };
 401 
 402 void ShenandoahCodeRoots::purge(WorkGang* workers) {
 403   assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
 404          "Only when running concurrent class unloading");
 405 
 406   ShenandoahNMethodPurgeTask task;
 407   workers->run_task(&task);
 408 }
 409 
 410 ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
 411         _par_iterator(CodeCache::heaps()),
 412         _table_snapshot(NULL) {
 413   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
 414   assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers");
 415   switch (ShenandoahCodeRootsStyle) {
 416     case 0:
 417     case 1: {
 418       // No need to do anything here
 419       break;
 420     }
 421     case 2: {
 422       CodeCache_lock->lock_without_safepoint_check();
 423       _table_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
 424       break;
 425     }
 426     default:
 427       ShouldNotReachHere();
 428   }
 429 }
 430 
 431 ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() {
 432   switch (ShenandoahCodeRootsStyle) {
 433     case 0:
 434     case 1: {
 435       // No need to do anything here
 436       break;
 437     }
 438     case 2: {
 439       ShenandoahCodeRoots::table()->finish_iteration(_table_snapshot);
 440       _table_snapshot = NULL;
 441       CodeCache_lock->unlock();
 442       break;
 443     }
 444     default:
 445       ShouldNotReachHere();
 446   }
 447 }
 448 
 449 void ShenandoahCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
 450   switch (ShenandoahCodeRootsStyle) {
 451     case 0: {
 452       if (_seq_claimed.try_set()) {
 453         CodeCache::blobs_do(f);
 454       }
 455       break;
 456     }
 457     case 1: {
 458       _par_iterator.parallel_blobs_do(f);
 459       break;
 460     }
 461     case 2: {
 462       ShenandoahCodeRootsIterator::fast_parallel_blobs_do(f);
 463       break;
 464     }
 465     default:
 466       ShouldNotReachHere();
 467   }
 468 }
 469 
 470 void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) {
 471   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
 472   assert(_table_snapshot != NULL, "Sanity");
 473   _table_snapshot->parallel_blobs_do(f);
 474 }
 475