1 /* 2 * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "code/codeCache.hpp" 26 #include "code/icBuffer.hpp" 27 #include "code/nmethod.hpp" 28 #include "gc/shenandoah/shenandoahCodeRoots.hpp" 29 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp" 30 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 31 #include "gc/shenandoah/shenandoahNMethod.inline.hpp" 32 #include "gc/shenandoah/shenandoahUtils.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "runtime/atomic.hpp" 36 37 ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps) { 38 _length = heaps->length(); 39 _iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC); 40 for (int h = 0; h < _length; h++) { 41 _iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h)); 42 } 43 } 44 45 ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() { 46 FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters); 47 } 48 49 void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) { 50 for (int c = 0; c < _length; c++) { 51 _iters[c].parallel_blobs_do(f); 52 } 53 } 54 55 ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) : 56 _heap(heap), _claimed_idx(0), _finished(false) { 57 } 58 59 void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) { 60 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); 61 62 /* 63 * Parallel code heap walk. 64 * 65 * This code makes all threads scan all code heaps, but only one thread would execute the 66 * closure on given blob. This is achieved by recording the "claimed" blocks: if a thread 67 * had claimed the block, it can process all blobs in it. Others have to fast-forward to 68 * next attempt without processing. 69 * 70 * Late threads would return immediately if iterator is finished. 71 */ 72 73 if (_finished) { 74 return; 75 } 76 77 int stride = 256; // educated guess 78 int stride_mask = stride - 1; 79 assert (is_power_of_2(stride), "sanity"); 80 81 int count = 0; 82 bool process_block = true; 83 84 for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) { 85 int current = count++; 86 if ((current & stride_mask) == 0) { 87 process_block = (current >= _claimed_idx) && 88 (Atomic::cmpxchg(&_claimed_idx, current, current + stride) == current); 89 } 90 if (process_block) { 91 if (cb->is_alive()) { 92 f->do_code_blob(cb); 93 #ifdef ASSERT 94 if (cb->is_nmethod()) 95 Universe::heap()->verify_nmethod((nmethod*)cb); 96 #endif 97 } 98 } 99 } 100 101 _finished = true; 102 } 103 104 ShenandoahNMethodTable* ShenandoahCodeRoots::_nmethod_table; 105 int ShenandoahCodeRoots::_disarmed_value = 1; 106 107 void ShenandoahCodeRoots::initialize() { 108 _nmethod_table = new ShenandoahNMethodTable(); 109 } 110 111 void ShenandoahCodeRoots::register_nmethod(nmethod* nm) { 112 switch (ShenandoahCodeRootsStyle) { 113 case 0: 114 case 1: 115 break; 116 case 2: { 117 assert_locked_or_safepoint(CodeCache_lock); 118 _nmethod_table->register_nmethod(nm); 119 break; 120 } 121 default: 122 ShouldNotReachHere(); 123 } 124 } 125 126 void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) { 127 switch (ShenandoahCodeRootsStyle) { 128 case 0: 129 case 1: { 130 break; 131 } 132 case 2: { 133 assert_locked_or_safepoint(CodeCache_lock); 134 _nmethod_table->unregister_nmethod(nm); 135 break; 136 } 137 default: 138 ShouldNotReachHere(); 139 } 140 } 141 142 void ShenandoahCodeRoots::flush_nmethod(nmethod* nm) { 143 switch (ShenandoahCodeRootsStyle) { 144 case 0: 145 case 1: { 146 break; 147 } 148 case 2: { 149 assert_locked_or_safepoint(CodeCache_lock); 150 _nmethod_table->flush_nmethod(nm); 151 break; 152 } 153 default: 154 ShouldNotReachHere(); 155 } 156 } 157 158 void ShenandoahCodeRoots::prepare_concurrent_unloading() { 159 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); 160 _disarmed_value ++; 161 // 0 is reserved for new nmethod 162 if (_disarmed_value == 0) { 163 _disarmed_value = 1; 164 } 165 166 JavaThreadIteratorWithHandle jtiwh; 167 for (JavaThread *thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) { 168 ShenandoahThreadLocalData::set_disarmed_value(thr, _disarmed_value); 169 } 170 } 171 172 class ShenandoahNMethodUnlinkClosure : public NMethodClosure { 173 private: 174 bool _unloading_occurred; 175 volatile bool _failed; 176 ShenandoahHeap* _heap; 177 178 void set_failed() { 179 Atomic::store(&_failed, true); 180 } 181 182 void unlink(nmethod* nm) { 183 // Unlinking of the dependencies must happen before the 184 // handshake separating unlink and purge. 185 nm->flush_dependencies(false /* delete_immediately */); 186 187 // unlink_from_method will take the CompiledMethod_lock. 188 // In this case we don't strictly need it when unlinking nmethods from 189 // the Method, because it is only concurrently unlinked by 190 // the entry barrier, which acquires the per nmethod lock. 191 nm->unlink_from_method(); 192 193 if (nm->is_osr_method()) { 194 // Invalidate the osr nmethod only once 195 nm->invalidate_osr_method(); 196 } 197 } 198 public: 199 ShenandoahNMethodUnlinkClosure(bool unloading_occurred) : 200 _unloading_occurred(unloading_occurred), 201 _failed(false), 202 _heap(ShenandoahHeap::heap()) {} 203 204 virtual void do_nmethod(nmethod* nm) { 205 if (failed()) { 206 return; 207 } 208 209 ShenandoahNMethod* nm_data = ShenandoahNMethod::gc_data(nm); 210 assert(!nm_data->is_unregistered(), "Should not see unregistered entry"); 211 212 if (!nm->is_alive()) { 213 return; 214 } 215 216 if (nm->is_unloading()) { 217 ShenandoahReentrantLocker locker(nm_data->lock()); 218 unlink(nm); 219 return; 220 } 221 222 ShenandoahReentrantLocker locker(nm_data->lock()); 223 224 // Heal oops and disarm 225 ShenandoahNMethod::heal_nmethod(nm); 226 ShenandoahNMethod::disarm_nmethod(nm); 227 228 // Clear compiled ICs and exception caches 229 if (!nm->unload_nmethod_caches(_unloading_occurred)) { 230 set_failed(); 231 } 232 } 233 234 bool failed() const { 235 return Atomic::load(&_failed); 236 } 237 }; 238 239 class ShenandoahUnlinkTask : public AbstractGangTask { 240 private: 241 ShenandoahNMethodUnlinkClosure _cl; 242 ICRefillVerifier* _verifier; 243 ShenandoahConcurrentNMethodIterator _iterator; 244 245 public: 246 ShenandoahUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) : 247 AbstractGangTask("ShenandoahNMethodUnlinkTask"), 248 _cl(unloading_occurred), 249 _verifier(verifier), 250 _iterator(ShenandoahCodeRoots::table()) { 251 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 252 _iterator.nmethods_do_begin(); 253 } 254 255 ~ShenandoahUnlinkTask() { 256 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 257 _iterator.nmethods_do_end(); 258 } 259 260 virtual void work(uint worker_id) { 261 ShenandoahEvacOOMScope evac_scope; 262 ICRefillVerifierMark mark(_verifier); 263 _iterator.nmethods_do(&_cl); 264 } 265 266 bool success() const { 267 return !_cl.failed(); 268 } 269 }; 270 271 void ShenandoahCodeRoots::unlink(WorkGang* workers, bool unloading_occurred) { 272 assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(), 273 "Only when running concurrent class unloading"); 274 275 for (;;) { 276 ICRefillVerifier verifier; 277 278 { 279 ShenandoahUnlinkTask task(unloading_occurred, &verifier); 280 workers->run_task(&task); 281 if (task.success()) { 282 return; 283 } 284 } 285 286 // Cleaning failed because we ran out of transitional IC stubs, 287 // so we have to refill and try again. Refilling requires taking 288 // a safepoint, so we temporarily leave the suspendible thread set. 289 SuspendibleThreadSetLeaver sts; 290 InlineCacheBuffer::refill_ic_stubs(); 291 } 292 } 293 294 class ShenandoahNMethodPurgeClosure : public NMethodClosure { 295 public: 296 virtual void do_nmethod(nmethod* nm) { 297 if (nm->is_alive() && nm->is_unloading()) { 298 nm->make_unloaded(); 299 } 300 } 301 }; 302 303 class ShenandoahNMethodPurgeTask : public AbstractGangTask { 304 private: 305 ShenandoahNMethodPurgeClosure _cl; 306 ShenandoahConcurrentNMethodIterator _iterator; 307 308 public: 309 ShenandoahNMethodPurgeTask() : 310 AbstractGangTask("ShenandoahNMethodPurgeTask"), 311 _cl(), 312 _iterator(ShenandoahCodeRoots::table()) { 313 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 314 _iterator.nmethods_do_begin(); 315 } 316 317 ~ShenandoahNMethodPurgeTask() { 318 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 319 _iterator.nmethods_do_end(); 320 } 321 322 virtual void work(uint worker_id) { 323 _iterator.nmethods_do(&_cl); 324 } 325 }; 326 327 void ShenandoahCodeRoots::purge(WorkGang* workers) { 328 assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(), 329 "Only when running concurrent class unloading"); 330 331 ShenandoahNMethodPurgeTask task; 332 workers->run_task(&task); 333 } 334 335 ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() : 336 _par_iterator(CodeCache::heaps()), 337 _table_snapshot(NULL) { 338 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); 339 assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers"); 340 switch (ShenandoahCodeRootsStyle) { 341 case 0: 342 case 1: { 343 // No need to do anything here 344 break; 345 } 346 case 2: { 347 CodeCache_lock->lock_without_safepoint_check(); 348 _table_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration(); 349 break; 350 } 351 default: 352 ShouldNotReachHere(); 353 } 354 } 355 356 ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() { 357 switch (ShenandoahCodeRootsStyle) { 358 case 0: 359 case 1: { 360 // No need to do anything here 361 break; 362 } 363 case 2: { 364 ShenandoahCodeRoots::table()->finish_iteration(_table_snapshot); 365 _table_snapshot = NULL; 366 CodeCache_lock->unlock(); 367 break; 368 } 369 default: 370 ShouldNotReachHere(); 371 } 372 } 373 374 template<bool CSET_FILTER> 375 void ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(CodeBlobClosure *f) { 376 switch (ShenandoahCodeRootsStyle) { 377 case 0: { 378 if (_seq_claimed.try_set()) { 379 CodeCache::blobs_do(f); 380 } 381 break; 382 } 383 case 1: { 384 _par_iterator.parallel_blobs_do(f); 385 break; 386 } 387 case 2: { 388 ShenandoahCodeRootsIterator::fast_parallel_blobs_do<CSET_FILTER>(f); 389 break; 390 } 391 default: 392 ShouldNotReachHere(); 393 } 394 } 395 396 void ShenandoahAllCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) { 397 ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<false>(f); 398 } 399 400 void ShenandoahCsetCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) { 401 ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<true>(f); 402 } 403 404 template <bool CSET_FILTER> 405 void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) { 406 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); 407 assert(_table_snapshot != NULL, "Sanity"); 408 _table_snapshot->parallel_blobs_do<CSET_FILTER>(f); 409 } 410