1 /*
   2  * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "code/codeCache.hpp"
  26 #include "code/icBuffer.hpp"
  27 #include "code/nmethod.hpp"
  28 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  29 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahNMethod.inline.hpp"
  32 #include "gc/shenandoah/shenandoahUtils.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "memory/universe.hpp"
  35 #include "runtime/atomic.hpp"
  36 
  37 ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps) {
  38   _length = heaps->length();
  39   _iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC);
  40   for (int h = 0; h < _length; h++) {
  41     _iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h));
  42   }
  43 }
  44 
  45 ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() {
  46   FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters);
  47 }
  48 
  49 void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) {
  50   for (int c = 0; c < _length; c++) {
  51     _iters[c].parallel_blobs_do(f);
  52   }
  53 }
  54 
  55 ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) :
  56         _heap(heap), _claimed_idx(0), _finished(false) {
  57 }
  58 
  59 void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
  60   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
  61 
  62   /*
  63    * Parallel code heap walk.
  64    *
  65    * This code makes all threads scan all code heaps, but only one thread would execute the
  66    * closure on given blob. This is achieved by recording the "claimed" blocks: if a thread
  67    * had claimed the block, it can process all blobs in it. Others have to fast-forward to
  68    * next attempt without processing.
  69    *
  70    * Late threads would return immediately if iterator is finished.
  71    */
  72 
  73   if (_finished) {
  74     return;
  75   }
  76 
  77   int stride = 256; // educated guess
  78   int stride_mask = stride - 1;
  79   assert (is_power_of_2(stride), "sanity");
  80 
  81   int count = 0;
  82   bool process_block = true;
  83 
  84   for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) {
  85     int current = count++;
  86     if ((current & stride_mask) == 0) {
  87       process_block = (current >= _claimed_idx) &&
  88                       (Atomic::cmpxchg(&_claimed_idx, current, current + stride) == current);
  89     }
  90     if (process_block) {
  91       if (cb->is_alive()) {
  92         f->do_code_blob(cb);
  93 #ifdef ASSERT
  94         if (cb->is_nmethod())
  95           Universe::heap()->verify_nmethod((nmethod*)cb);
  96 #endif
  97       }
  98     }
  99   }
 100 
 101   _finished = true;
 102 }
 103 
 104 ShenandoahNMethodTable* ShenandoahCodeRoots::_nmethod_table;
 105 int ShenandoahCodeRoots::_disarmed_value = 1;
 106 
 107 void ShenandoahCodeRoots::initialize() {
 108   _nmethod_table = new ShenandoahNMethodTable();
 109 }
 110 
 111 void ShenandoahCodeRoots::register_nmethod(nmethod* nm) {
 112   switch (ShenandoahCodeRootsStyle) {
 113     case 0:
 114     case 1:
 115       break;
 116     case 2: {
 117       assert_locked_or_safepoint(CodeCache_lock);
 118       _nmethod_table->register_nmethod(nm);
 119       break;
 120     }
 121     default:
 122       ShouldNotReachHere();
 123   }
 124 }
 125 
 126 void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) {
 127   switch (ShenandoahCodeRootsStyle) {
 128     case 0:
 129     case 1: {
 130       break;
 131     }
 132     case 2: {
 133       assert_locked_or_safepoint(CodeCache_lock);
 134       _nmethod_table->unregister_nmethod(nm);
 135       break;
 136     }
 137     default:
 138       ShouldNotReachHere();
 139   }
 140 }
 141 
 142 void ShenandoahCodeRoots::flush_nmethod(nmethod* nm) {
 143   switch (ShenandoahCodeRootsStyle) {
 144     case 0:
 145     case 1: {
 146       break;
 147     }
 148     case 2: {
 149       assert_locked_or_safepoint(CodeCache_lock);
 150       _nmethod_table->flush_nmethod(nm);
 151       break;
 152     }
 153     default:
 154       ShouldNotReachHere();
 155   }
 156 }
 157 
 158 void ShenandoahCodeRoots::arm_nmethods() {
 159   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 160   _disarmed_value ++;
 161   // 0 is reserved for new nmethod
 162   if (_disarmed_value == 0) {
 163     _disarmed_value = 1;
 164   }
 165 
 166   JavaThreadIteratorWithHandle jtiwh;
 167   for (JavaThread *thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
 168     ShenandoahThreadLocalData::set_disarmed_value(thr, _disarmed_value);
 169   }
 170 }
 171 
 172 class ShenandoahNMethodUnlinkClosure : public NMethodClosure {
 173 private:
 174   bool            _unloading_occurred;
 175   volatile bool   _failed;
 176   ShenandoahHeap* _heap;
 177 
 178   void set_failed() {
 179     Atomic::store(&_failed, true);
 180   }
 181 
 182    void unlink(nmethod* nm) {
 183      // Unlinking of the dependencies must happen before the
 184      // handshake separating unlink and purge.
 185      nm->flush_dependencies(false /* delete_immediately */);
 186 
 187      // unlink_from_method will take the CompiledMethod_lock.
 188      // In this case we don't strictly need it when unlinking nmethods from
 189      // the Method, because it is only concurrently unlinked by
 190      // the entry barrier, which acquires the per nmethod lock.
 191      nm->unlink_from_method();
 192 
 193      if (nm->is_osr_method()) {
 194        // Invalidate the osr nmethod only once
 195        nm->invalidate_osr_method();
 196      }
 197    }
 198 public:
 199   ShenandoahNMethodUnlinkClosure(bool unloading_occurred) :
 200       _unloading_occurred(unloading_occurred),
 201       _failed(false),
 202       _heap(ShenandoahHeap::heap()) {}
 203 
 204   virtual void do_nmethod(nmethod* nm) {
 205     if (failed()) {
 206       return;
 207     }
 208 
 209     ShenandoahNMethod* nm_data = ShenandoahNMethod::gc_data(nm);
 210     assert(!nm_data->is_unregistered(), "Should not see unregistered entry");
 211 
 212     if (!nm->is_alive()) {
 213       return;
 214     }
 215 
 216     if (nm->is_unloading()) {
 217       ShenandoahReentrantLocker locker(nm_data->lock());
 218       ShenandoahEvacOOMScope evac_scope;
 219       unlink(nm);
 220       return;
 221     }
 222 
 223     ShenandoahReentrantLocker locker(nm_data->lock());
 224 
 225     // Heal oops and disarm
 226     ShenandoahEvacOOMScope evac_scope;
 227     if (_heap->is_evacuation_in_progress()) {
 228       ShenandoahNMethod::heal_nmethod(nm);
 229     }
 230     ShenandoahNMethod::disarm_nmethod(nm);
 231 
 232     // Clear compiled ICs and exception caches
 233     if (!nm->unload_nmethod_caches(_unloading_occurred)) {
 234       set_failed();
 235     }
 236   }
 237 
 238   bool failed() const {
 239     return Atomic::load(&_failed);
 240   }
 241 };
 242 
 243 class ShenandoahUnlinkTask : public AbstractGangTask {
 244 private:
 245   ShenandoahNMethodUnlinkClosure      _cl;
 246   ICRefillVerifier*                   _verifier;
 247   ShenandoahConcurrentNMethodIterator _iterator;
 248 
 249 public:
 250   ShenandoahUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
 251     AbstractGangTask("ShenandoahNMethodUnlinkTask"),
 252     _cl(unloading_occurred),
 253     _verifier(verifier),
 254     _iterator(ShenandoahCodeRoots::table()) {
 255     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 256     _iterator.nmethods_do_begin();
 257   }
 258 
 259   ~ShenandoahUnlinkTask() {
 260     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 261     _iterator.nmethods_do_end();
 262   }
 263 
 264   virtual void work(uint worker_id) {
 265     ICRefillVerifierMark mark(_verifier);
 266     _iterator.nmethods_do(&_cl);
 267   }
 268 
 269   bool success() const {
 270     return !_cl.failed();
 271   }
 272 };
 273 
 274 void ShenandoahCodeRoots::unlink(WorkGang* workers, bool unloading_occurred) {
 275   assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
 276          "Only when running concurrent class unloading");
 277 
 278   for (;;) {
 279     ICRefillVerifier verifier;
 280 
 281     {
 282       ShenandoahUnlinkTask task(unloading_occurred, &verifier);
 283       workers->run_task(&task);
 284       if (task.success()) {
 285         return;
 286       }
 287     }
 288 
 289     // Cleaning failed because we ran out of transitional IC stubs,
 290     // so we have to refill and try again. Refilling requires taking
 291     // a safepoint, so we temporarily leave the suspendible thread set.
 292     SuspendibleThreadSetLeaver sts;
 293     InlineCacheBuffer::refill_ic_stubs();
 294   }
 295 }
 296 
 297 class ShenandoahNMethodPurgeClosure : public NMethodClosure {
 298 public:
 299   virtual void do_nmethod(nmethod* nm) {
 300     if (nm->is_alive() && nm->is_unloading()) {
 301       nm->make_unloaded();
 302     }
 303   }
 304 };
 305 
 306 class ShenandoahNMethodPurgeTask : public AbstractGangTask {
 307 private:
 308   ShenandoahNMethodPurgeClosure       _cl;
 309   ShenandoahConcurrentNMethodIterator _iterator;
 310 
 311 public:
 312   ShenandoahNMethodPurgeTask() :
 313     AbstractGangTask("ShenandoahNMethodPurgeTask"),
 314     _cl(),
 315     _iterator(ShenandoahCodeRoots::table()) {
 316     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 317     _iterator.nmethods_do_begin();
 318   }
 319 
 320   ~ShenandoahNMethodPurgeTask() {
 321     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 322     _iterator.nmethods_do_end();
 323   }
 324 
 325   virtual void work(uint worker_id) {
 326     _iterator.nmethods_do(&_cl);
 327   }
 328 };
 329 
 330 void ShenandoahCodeRoots::purge(WorkGang* workers) {
 331   assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
 332          "Only when running concurrent class unloading");
 333 
 334   ShenandoahNMethodPurgeTask task;
 335   workers->run_task(&task);
 336 }
 337 
 338 ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
 339         _par_iterator(CodeCache::heaps()),
 340         _table_snapshot(NULL) {
 341   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
 342   assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers");
 343   switch (ShenandoahCodeRootsStyle) {
 344     case 0:
 345     case 1: {
 346       // No need to do anything here
 347       break;
 348     }
 349     case 2: {
 350       CodeCache_lock->lock_without_safepoint_check();
 351       _table_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
 352       break;
 353     }
 354     default:
 355       ShouldNotReachHere();
 356   }
 357 }
 358 
 359 ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() {
 360   switch (ShenandoahCodeRootsStyle) {
 361     case 0:
 362     case 1: {
 363       // No need to do anything here
 364       break;
 365     }
 366     case 2: {
 367       ShenandoahCodeRoots::table()->finish_iteration(_table_snapshot);
 368       _table_snapshot = NULL;
 369       CodeCache_lock->unlock();
 370       break;
 371     }
 372     default:
 373       ShouldNotReachHere();
 374   }
 375 }
 376 
 377 template<bool CSET_FILTER>
 378 void ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(CodeBlobClosure *f) {
 379   switch (ShenandoahCodeRootsStyle) {
 380     case 0: {
 381       if (_seq_claimed.try_set()) {
 382         CodeCache::blobs_do(f);
 383       }
 384       break;
 385     }
 386     case 1: {
 387       _par_iterator.parallel_blobs_do(f);
 388       break;
 389     }
 390     case 2: {
 391       ShenandoahCodeRootsIterator::fast_parallel_blobs_do<CSET_FILTER>(f);
 392       break;
 393     }
 394     default:
 395       ShouldNotReachHere();
 396   }
 397 }
 398 
 399 void ShenandoahAllCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
 400   ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<false>(f);
 401 }
 402 
 403 void ShenandoahCsetCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
 404   ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<true>(f);
 405 }
 406 
 407 template <bool CSET_FILTER>
 408 void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) {
 409   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
 410   assert(_table_snapshot != NULL, "Sanity");
 411   _table_snapshot->parallel_blobs_do<CSET_FILTER>(f);
 412 }
 413