1 /* 2 * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "code/codeCache.hpp" 26 #include "code/icBuffer.hpp" 27 #include "code/nmethod.hpp" 28 #include "gc/shenandoah/shenandoahCodeRoots.hpp" 29 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp" 30 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 31 #include "gc/shenandoah/shenandoahNMethod.inline.hpp" 32 #include "gc/shenandoah/shenandoahUtils.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "runtime/atomic.hpp" 36 37 ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator(const GrowableArray<CodeHeap*>* heaps) { 38 _length = heaps->length(); 39 _iters = NEW_C_HEAP_ARRAY(ShenandoahParallelCodeHeapIterator, _length, mtGC); 40 for (int h = 0; h < _length; h++) { 41 _iters[h] = ShenandoahParallelCodeHeapIterator(heaps->at(h)); 42 } 43 } 44 45 ShenandoahParallelCodeCacheIterator::~ShenandoahParallelCodeCacheIterator() { 46 FREE_C_HEAP_ARRAY(ParallelCodeHeapIterator, _iters); 47 } 48 49 void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) { 50 for (int c = 0; c < _length; c++) { 51 _iters[c].parallel_blobs_do(f); 52 } 53 } 54 55 ShenandoahParallelCodeHeapIterator::ShenandoahParallelCodeHeapIterator(CodeHeap* heap) : 56 _heap(heap), _claimed_idx(0), _finished(false) { 57 } 58 59 void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) { 60 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); 61 62 /* 63 * Parallel code heap walk. 64 * 65 * This code makes all threads scan all code heaps, but only one thread would execute the 66 * closure on given blob. This is achieved by recording the "claimed" blocks: if a thread 67 * had claimed the block, it can process all blobs in it. Others have to fast-forward to 68 * next attempt without processing. 69 * 70 * Late threads would return immediately if iterator is finished. 71 */ 72 73 if (_finished) { 74 return; 75 } 76 77 int stride = 256; // educated guess 78 int stride_mask = stride - 1; 79 assert (is_power_of_2(stride), "sanity"); 80 81 int count = 0; 82 bool process_block = true; 83 84 for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) { 85 int current = count++; 86 if ((current & stride_mask) == 0) { 87 process_block = (current >= _claimed_idx) && 88 (Atomic::cmpxchg(&_claimed_idx, current, current + stride) == current); 89 } 90 if (process_block) { 91 if (cb->is_alive()) { 92 f->do_code_blob(cb); 93 #ifdef ASSERT 94 if (cb->is_nmethod()) 95 Universe::heap()->verify_nmethod((nmethod*)cb); 96 #endif 97 } 98 } 99 } 100 101 _finished = true; 102 } 103 104 ShenandoahNMethodTable* ShenandoahCodeRoots::_nmethod_table; 105 int ShenandoahCodeRoots::_disarmed_value = 1; 106 107 void ShenandoahCodeRoots::initialize() { 108 _nmethod_table = new ShenandoahNMethodTable(); 109 } 110 111 void ShenandoahCodeRoots::register_nmethod(nmethod* nm) { 112 switch (ShenandoahCodeRootsStyle) { 113 case 0: 114 case 1: 115 break; 116 case 2: { 117 assert_locked_or_safepoint(CodeCache_lock); 118 _nmethod_table->register_nmethod(nm); 119 break; 120 } 121 default: 122 ShouldNotReachHere(); 123 } 124 } 125 126 void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) { 127 switch (ShenandoahCodeRootsStyle) { 128 case 0: 129 case 1: { 130 break; 131 } 132 case 2: { 133 assert_locked_or_safepoint(CodeCache_lock); 134 _nmethod_table->unregister_nmethod(nm); 135 break; 136 } 137 default: 138 ShouldNotReachHere(); 139 } 140 } 141 142 void ShenandoahCodeRoots::flush_nmethod(nmethod* nm) { 143 switch (ShenandoahCodeRootsStyle) { 144 case 0: 145 case 1: { 146 break; 147 } 148 case 2: { 149 assert_locked_or_safepoint(CodeCache_lock); 150 _nmethod_table->flush_nmethod(nm); 151 break; 152 } 153 default: 154 ShouldNotReachHere(); 155 } 156 } 157 158 void ShenandoahCodeRoots::prepare_concurrent_unloading() { 159 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); 160 _disarmed_value ++; 161 // 0 is reserved for new nmethod 162 if (_disarmed_value == 0) { 163 _disarmed_value = 1; 164 } 165 166 JavaThreadIteratorWithHandle jtiwh; 167 for (JavaThread *thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) { 168 ShenandoahThreadLocalData::set_disarmed_value(thr, _disarmed_value); 169 } 170 } 171 172 class ShenandoahNMethodUnlinkClosure : public NMethodClosure { 173 private: 174 bool _unloading_occurred; 175 volatile bool _failed; 176 ShenandoahHeap* _heap; 177 178 void set_failed() { 179 Atomic::store(&_failed, true); 180 } 181 182 void unlink(nmethod* nm) { 183 // Unlinking of the dependencies must happen before the 184 // handshake separating unlink and purge. 185 nm->flush_dependencies(false /* delete_immediately */); 186 187 // unlink_from_method will take the CompiledMethod_lock. 188 // In this case we don't strictly need it when unlinking nmethods from 189 // the Method, because it is only concurrently unlinked by 190 // the entry barrier, which acquires the per nmethod lock. 191 nm->unlink_from_method(); 192 193 if (nm->is_osr_method()) { 194 // Invalidate the osr nmethod only once 195 nm->invalidate_osr_method(); 196 } 197 } 198 public: 199 ShenandoahNMethodUnlinkClosure(bool unloading_occurred) : 200 _unloading_occurred(unloading_occurred), 201 _failed(false), 202 _heap(ShenandoahHeap::heap()) {} 203 204 virtual void do_nmethod(nmethod* nm) { 205 if (failed()) { 206 return; 207 } 208 209 ShenandoahNMethod* nm_data = ShenandoahNMethod::gc_data(nm); 210 assert(!nm_data->is_unregistered(), "Should not see unregistered entry"); 211 212 if (!nm->is_alive()) { 213 return; 214 } 215 216 if (nm->is_unloading()) { 217 ShenandoahReentrantLocker locker(nm_data->lock()); 218 ShenandoahEvacOOMScope evac_scope; 219 unlink(nm); 220 return; 221 } 222 223 ShenandoahReentrantLocker locker(nm_data->lock()); 224 225 // Heal oops and disarm 226 ShenandoahEvacOOMScope evac_scope; 227 ShenandoahNMethod::heal_nmethod(nm); 228 ShenandoahNMethod::disarm_nmethod(nm); 229 230 // Clear compiled ICs and exception caches 231 if (!nm->unload_nmethod_caches(_unloading_occurred)) { 232 set_failed(); 233 } 234 } 235 236 bool failed() const { 237 return Atomic::load(&_failed); 238 } 239 }; 240 241 class ShenandoahUnlinkTask : public AbstractGangTask { 242 private: 243 ShenandoahNMethodUnlinkClosure _cl; 244 ICRefillVerifier* _verifier; 245 ShenandoahConcurrentNMethodIterator _iterator; 246 247 public: 248 ShenandoahUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) : 249 AbstractGangTask("ShenandoahNMethodUnlinkTask"), 250 _cl(unloading_occurred), 251 _verifier(verifier), 252 _iterator(ShenandoahCodeRoots::table()) { 253 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 254 _iterator.nmethods_do_begin(); 255 } 256 257 ~ShenandoahUnlinkTask() { 258 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 259 _iterator.nmethods_do_end(); 260 } 261 262 virtual void work(uint worker_id) { 263 ICRefillVerifierMark mark(_verifier); 264 _iterator.nmethods_do(&_cl); 265 } 266 267 bool success() const { 268 return !_cl.failed(); 269 } 270 }; 271 272 void ShenandoahCodeRoots::unlink(WorkGang* workers, bool unloading_occurred) { 273 assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(), 274 "Only when running concurrent class unloading"); 275 276 for (;;) { 277 ICRefillVerifier verifier; 278 279 { 280 ShenandoahUnlinkTask task(unloading_occurred, &verifier); 281 workers->run_task(&task); 282 if (task.success()) { 283 return; 284 } 285 } 286 287 // Cleaning failed because we ran out of transitional IC stubs, 288 // so we have to refill and try again. Refilling requires taking 289 // a safepoint, so we temporarily leave the suspendible thread set. 290 SuspendibleThreadSetLeaver sts; 291 InlineCacheBuffer::refill_ic_stubs(); 292 } 293 } 294 295 class ShenandoahNMethodPurgeClosure : public NMethodClosure { 296 public: 297 virtual void do_nmethod(nmethod* nm) { 298 if (nm->is_alive() && nm->is_unloading()) { 299 nm->make_unloaded(); 300 } 301 } 302 }; 303 304 class ShenandoahNMethodPurgeTask : public AbstractGangTask { 305 private: 306 ShenandoahNMethodPurgeClosure _cl; 307 ShenandoahConcurrentNMethodIterator _iterator; 308 309 public: 310 ShenandoahNMethodPurgeTask() : 311 AbstractGangTask("ShenandoahNMethodPurgeTask"), 312 _cl(), 313 _iterator(ShenandoahCodeRoots::table()) { 314 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 315 _iterator.nmethods_do_begin(); 316 } 317 318 ~ShenandoahNMethodPurgeTask() { 319 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 320 _iterator.nmethods_do_end(); 321 } 322 323 virtual void work(uint worker_id) { 324 _iterator.nmethods_do(&_cl); 325 } 326 }; 327 328 void ShenandoahCodeRoots::purge(WorkGang* workers) { 329 assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(), 330 "Only when running concurrent class unloading"); 331 332 ShenandoahNMethodPurgeTask task; 333 workers->run_task(&task); 334 } 335 336 ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() : 337 _par_iterator(CodeCache::heaps()), 338 _table_snapshot(NULL) { 339 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); 340 assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers"); 341 switch (ShenandoahCodeRootsStyle) { 342 case 0: 343 case 1: { 344 // No need to do anything here 345 break; 346 } 347 case 2: { 348 CodeCache_lock->lock_without_safepoint_check(); 349 _table_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration(); 350 break; 351 } 352 default: 353 ShouldNotReachHere(); 354 } 355 } 356 357 ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() { 358 switch (ShenandoahCodeRootsStyle) { 359 case 0: 360 case 1: { 361 // No need to do anything here 362 break; 363 } 364 case 2: { 365 ShenandoahCodeRoots::table()->finish_iteration(_table_snapshot); 366 _table_snapshot = NULL; 367 CodeCache_lock->unlock(); 368 break; 369 } 370 default: 371 ShouldNotReachHere(); 372 } 373 } 374 375 template<bool CSET_FILTER> 376 void ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(CodeBlobClosure *f) { 377 switch (ShenandoahCodeRootsStyle) { 378 case 0: { 379 if (_seq_claimed.try_set()) { 380 CodeCache::blobs_do(f); 381 } 382 break; 383 } 384 case 1: { 385 _par_iterator.parallel_blobs_do(f); 386 break; 387 } 388 case 2: { 389 ShenandoahCodeRootsIterator::fast_parallel_blobs_do<CSET_FILTER>(f); 390 break; 391 } 392 default: 393 ShouldNotReachHere(); 394 } 395 } 396 397 void ShenandoahAllCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) { 398 ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<false>(f); 399 } 400 401 void ShenandoahCsetCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) { 402 ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do<true>(f); 403 } 404 405 template <bool CSET_FILTER> 406 void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) { 407 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); 408 assert(_table_snapshot != NULL, "Sanity"); 409 _table_snapshot->parallel_blobs_do<CSET_FILTER>(f); 410 } 411