1 /*
   2  * Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "code/nmethod.hpp"
  29 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  30 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  31 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
  32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  33 #include "gc/shenandoah/shenandoahNMethod.inline.hpp"
  34 #include "gc/shenandoah/shenandoahUtils.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.hpp"
  37 #include "runtime/atomic.hpp"
  38 #include "utilities/powerOfTwo.hpp"
  39 
  40 ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps) {
  41   _length = heaps->length();
  42   _iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC);
  43   for (int h = 0; h < _length; h++) {
  44     _iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h));
  45   }
  46 }
  47 
  48 ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() {
  49   FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters);
  50 }
  51 
  52 void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) {
  53   for (int c = 0; c < _length; c++) {
  54     _iters[c].parallel_blobs_do(f);
  55   }
  56 }
  57 
  58 ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) :
  59         _heap(heap), _claimed_idx(0), _finished(false) {
  60 }
  61 
  62 void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
  63   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
  64 
  65   /*
  66    * Parallel code heap walk.
  67    *
  68    * This code makes all threads scan all code heaps, but only one thread would execute the
  69    * closure on given blob. This is achieved by recording the "claimed" blocks: if a thread
  70    * had claimed the block, it can process all blobs in it. Others have to fast-forward to
  71    * next attempt without processing.
  72    *
  73    * Late threads would return immediately if iterator is finished.
  74    */
  75 
  76   if (_finished) {
  77     return;
  78   }
  79 
  80   int stride = 256; // educated guess
  81   int stride_mask = stride - 1;
  82   assert (is_power_of_2(stride), "sanity");
  83 
  84   int count = 0;
  85   bool process_block = true;
  86 
  87   for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) {
  88     int current = count++;
  89     if ((current & stride_mask) == 0) {
  90       process_block = (current >= _claimed_idx) &&
  91                       (Atomic::cmpxchg(&_claimed_idx, current, current + stride) == current);
  92     }
  93     if (process_block) {
  94       if (cb->is_alive()) {
  95         f->do_code_blob(cb);
  96 #ifdef ASSERT
  97         if (cb->is_nmethod())
  98           Universe::heap()->verify_nmethod((nmethod*)cb);
  99 #endif
 100       }
 101     }
 102   }
 103 
 104   _finished = true;
 105 }
 106 
 107 ShenandoahNMethodTable* ShenandoahCodeRoots::_nmethod_table;
 108 int ShenandoahCodeRoots::_disarmed_value = 1;
 109 
 110 void ShenandoahCodeRoots::initialize() {
 111   _nmethod_table = new ShenandoahNMethodTable();
 112 }
 113 
 114 void ShenandoahCodeRoots::register_nmethod(nmethod* nm) {
 115   switch (ShenandoahCodeRootsStyle) {
 116     case 0:
 117     case 1:
 118       break;
 119     case 2: {
 120       assert_locked_or_safepoint(CodeCache_lock);
 121       _nmethod_table->register_nmethod(nm);
 122       break;
 123     }
 124     default:
 125       ShouldNotReachHere();
 126   }
 127 }
 128 
 129 void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) {
 130   switch (ShenandoahCodeRootsStyle) {
 131     case 0:
 132     case 1: {
 133       break;
 134     }
 135     case 2: {
 136       assert_locked_or_safepoint(CodeCache_lock);
 137       _nmethod_table->unregister_nmethod(nm);
 138       break;
 139     }
 140     default:
 141       ShouldNotReachHere();
 142   }
 143 }
 144 
 145 void ShenandoahCodeRoots::flush_nmethod(nmethod* nm) {
 146   switch (ShenandoahCodeRootsStyle) {
 147     case 0:
 148     case 1: {
 149       break;
 150     }
 151     case 2: {
 152       assert_locked_or_safepoint(CodeCache_lock);
 153       _nmethod_table->flush_nmethod(nm);
 154       break;
 155     }
 156     default:
 157       ShouldNotReachHere();
 158   }
 159 }
 160 
 161 void ShenandoahCodeRoots::arm_nmethods() {
 162   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 163   _disarmed_value ++;
 164   // 0 is reserved for new nmethod
 165   if (_disarmed_value == 0) {
 166     _disarmed_value = 1;
 167   }
 168 
 169   JavaThreadIteratorWithHandle jtiwh;
 170   for (JavaThread *thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
 171     ShenandoahThreadLocalData::set_disarmed_value(thr, _disarmed_value);
 172   }
 173 }
 174 
 175 class ShenandoahDisarmNMethodClosure : public NMethodClosure {
 176 private:
 177   BarrierSetNMethod* const _bs;
 178 
 179 public:
 180   ShenandoahDisarmNMethodClosure() :
 181     _bs(BarrierSet::barrier_set()->barrier_set_nmethod()) {
 182   }
 183 
 184   virtual void do_nmethod(nmethod* nm) {
 185     _bs->disarm(nm);
 186   }
 187 };
 188 
 189 class ShenandoahDisarmNMethodsTask : public AbstractGangTask {
 190 private:
 191   ShenandoahDisarmNMethodClosure      _cl;
 192   ShenandoahConcurrentNMethodIterator _iterator;
 193 
 194 public:
 195   ShenandoahDisarmNMethodsTask() :
 196     AbstractGangTask("ShenandoahDisarmNMethodsTask"),
 197     _iterator(ShenandoahCodeRoots::table()) {
 198     assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint");
 199     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 200     _iterator.nmethods_do_begin();
 201   }
 202 
 203   ~ShenandoahDisarmNMethodsTask() {
 204     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 205     _iterator.nmethods_do_end();
 206   }
 207 
 208   virtual void work(uint worker_id) {
 209     ShenandoahParallelWorkerSession worker_session(worker_id);
 210     _iterator.nmethods_do(&_cl);
 211   }
 212 };
 213 
 214 void ShenandoahCodeRoots::disarm_nmethods() {
 215   ShenandoahDisarmNMethodsTask task;
 216   ShenandoahHeap::heap()->workers()->run_task(&task);
 217 }
 218 
 219 class ShenandoahNMethodUnlinkClosure : public NMethodClosure {
 220 private:
 221   bool                      _unloading_occurred;
 222   volatile bool             _failed;
 223   ShenandoahHeap* const     _heap;
 224   BarrierSetNMethod* const  _bs;
 225 
 226   void set_failed() {
 227     Atomic::store(&_failed, true);
 228   }
 229 
 230    void unlink(nmethod* nm) {
 231      // Unlinking of the dependencies must happen before the
 232      // handshake separating unlink and purge.
 233      nm->flush_dependencies(false /* delete_immediately */);
 234 
 235      // unlink_from_method will take the CompiledMethod_lock.
 236      // In this case we don't strictly need it when unlinking nmethods from
 237      // the Method, because it is only concurrently unlinked by
 238      // the entry barrier, which acquires the per nmethod lock.
 239      nm->unlink_from_method();
 240 
 241      if (nm->is_osr_method()) {
 242        // Invalidate the osr nmethod only once
 243        nm->invalidate_osr_method();
 244      }
 245    }
 246 public:
 247   ShenandoahNMethodUnlinkClosure(bool unloading_occurred) :
 248       _unloading_occurred(unloading_occurred),
 249       _failed(false),
 250       _heap(ShenandoahHeap::heap()),
 251       _bs(ShenandoahBarrierSet::barrier_set()->barrier_set_nmethod()) {}
 252 
 253   virtual void do_nmethod(nmethod* nm) {
 254     assert(_heap->is_concurrent_weak_root_in_progress(), "Only this phase");
 255     if (failed()) {
 256       return;
 257     }
 258 
 259     ShenandoahNMethod* nm_data = ShenandoahNMethod::gc_data(nm);
 260     assert(!nm_data->is_unregistered(), "Should not see unregistered entry");
 261 
 262     if (!nm->is_alive()) {
 263       return;
 264     }
 265 
 266     if (nm->is_unloading()) {
 267       ShenandoahReentrantLocker locker(nm_data->lock());
 268       unlink(nm);
 269       return;
 270     }
 271 
 272     ShenandoahReentrantLocker locker(nm_data->lock());
 273 
 274     // Heal oops and disarm
 275     if (_bs->is_armed(nm)) {
 276       ShenandoahNMethod::heal_nmethod_metadata(nm_data);
 277       _bs->disarm(nm);
 278     }
 279 
 280     // Clear compiled ICs and exception caches
 281     if (!nm->unload_nmethod_caches(_unloading_occurred)) {
 282       set_failed();
 283     }
 284   }
 285 
 286   bool failed() const {
 287     return Atomic::load(&_failed);
 288   }
 289 };
 290 
 291 class ShenandoahUnlinkTask : public AbstractGangTask {
 292 private:
 293   ShenandoahNMethodUnlinkClosure      _cl;
 294   ICRefillVerifier*                   _verifier;
 295   ShenandoahConcurrentNMethodIterator _iterator;
 296 
 297 public:
 298   ShenandoahUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
 299     AbstractGangTask("ShenandoahNMethodUnlinkTask"),
 300     _cl(unloading_occurred),
 301     _verifier(verifier),
 302     _iterator(ShenandoahCodeRoots::table()) {
 303     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 304     _iterator.nmethods_do_begin();
 305   }
 306 
 307   ~ShenandoahUnlinkTask() {
 308     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 309     _iterator.nmethods_do_end();
 310   }
 311 
 312   virtual void work(uint worker_id) {
 313     ICRefillVerifierMark mark(_verifier);
 314     ShenandoahEvacOOMScope evac_scope;
 315     _iterator.nmethods_do(&_cl);
 316   }
 317 
 318   bool success() const {
 319     return !_cl.failed();
 320   }
 321 };
 322 
 323 void ShenandoahCodeRoots::unlink(WorkGang* workers, bool unloading_occurred) {
 324   assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
 325          "Only when running concurrent class unloading");
 326 
 327   for (;;) {
 328     ICRefillVerifier verifier;
 329 
 330     {
 331       ShenandoahUnlinkTask task(unloading_occurred, &verifier);
 332       workers->run_task(&task);
 333       if (task.success()) {
 334         return;
 335       }
 336     }
 337 
 338     // Cleaning failed because we ran out of transitional IC stubs,
 339     // so we have to refill and try again. Refilling requires taking
 340     // a safepoint, so we temporarily leave the suspendible thread set.
 341     SuspendibleThreadSetLeaver sts;
 342     InlineCacheBuffer::refill_ic_stubs();
 343   }
 344 }
 345 
 346 class ShenandoahNMethodPurgeClosure : public NMethodClosure {
 347 public:
 348   virtual void do_nmethod(nmethod* nm) {
 349     if (nm->is_alive() && nm->is_unloading()) {
 350       nm->make_unloaded();
 351     }
 352   }
 353 };
 354 
 355 class ShenandoahNMethodPurgeTask : public AbstractGangTask {
 356 private:
 357   ShenandoahNMethodPurgeClosure       _cl;
 358   ShenandoahConcurrentNMethodIterator _iterator;
 359 
 360 public:
 361   ShenandoahNMethodPurgeTask() :
 362     AbstractGangTask("ShenandoahNMethodPurgeTask"),
 363     _cl(),
 364     _iterator(ShenandoahCodeRoots::table()) {
 365     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 366     _iterator.nmethods_do_begin();
 367   }
 368 
 369   ~ShenandoahNMethodPurgeTask() {
 370     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 371     _iterator.nmethods_do_end();
 372   }
 373 
 374   virtual void work(uint worker_id) {
 375     _iterator.nmethods_do(&_cl);
 376   }
 377 };
 378 
 379 void ShenandoahCodeRoots::purge(WorkGang* workers) {
 380   assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
 381          "Only when running concurrent class unloading");
 382 
 383   ShenandoahNMethodPurgeTask task;
 384   workers->run_task(&task);
 385 }
 386 
 387 ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
 388         _par_iterator(CodeCache::heaps()),
 389         _table_snapshot(NULL) {
 390   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
 391   assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers");
 392   switch (ShenandoahCodeRootsStyle) {
 393     case 0:
 394     case 1: {
 395       // No need to do anything here
 396       break;
 397     }
 398     case 2: {
 399       CodeCache_lock->lock_without_safepoint_check();
 400       _table_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
 401       break;
 402     }
 403     default:
 404       ShouldNotReachHere();
 405   }
 406 }
 407 
 408 ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() {
 409   switch (ShenandoahCodeRootsStyle) {
 410     case 0:
 411     case 1: {
 412       // No need to do anything here
 413       break;
 414     }
 415     case 2: {
 416       ShenandoahCodeRoots::table()->finish_iteration(_table_snapshot);
 417       _table_snapshot = NULL;
 418       CodeCache_lock->unlock();
 419       break;
 420     }
 421     default:
 422       ShouldNotReachHere();
 423   }
 424 }
 425 
 426 template<bool CSET_FILTER>
 427 void ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(CodeBlobClosure *f) {
 428   switch (ShenandoahCodeRootsStyle) {
 429     case 0: {
 430       if (_seq_claimed.try_set()) {
 431         CodeCache::blobs_do(f);
 432       }
 433       break;
 434     }
 435     case 1: {
 436       _par_iterator.parallel_blobs_do(f);
 437       break;
 438     }
 439     case 2: {
 440       ShenandoahCodeRootsIterator::fast_parallel_blobs_do<CSET_FILTER>(f);
 441       break;
 442     }
 443     default:
 444       ShouldNotReachHere();
 445   }
 446 }
 447 
 448 void ShenandoahAllCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
 449   ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<false>(f);
 450 }
 451 
 452 void ShenandoahCsetCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
 453   ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<true>(f);
 454 }
 455 
 456 template <bool CSET_FILTER>
 457 void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) {
 458   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
 459   assert(_table_snapshot != NULL, "Sanity");
 460   _table_snapshot->parallel_blobs_do<CSET_FILTER>(f);
 461 }
 462