1 /* 2 * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "memory/allocation.hpp" 26 27 #include "gc_implementation/shared/gcTimer.hpp" 28 #include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp" 29 #include "gc_implementation/shared/parallelCleaning.hpp" 30 31 #include "gc_implementation/shenandoah/shenandoahBrooksPointer.hpp" 32 #include "gc_implementation/shenandoah/shenandoahAllocTracker.hpp" 33 #include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp" 34 #include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp" 35 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp" 36 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp" 37 #include "gc_implementation/shenandoah/shenandoahControlThread.hpp" 38 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp" 39 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" 40 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" 41 #include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" 42 #include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp" 43 #include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp" 44 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp" 45 #include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp" 46 #include "gc_implementation/shenandoah/shenandoahMetrics.hpp" 47 #include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp" 48 #include "gc_implementation/shenandoah/shenandoahPacer.inline.hpp" 49 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp" 50 #include "gc_implementation/shenandoah/shenandoahUtils.hpp" 51 #include "gc_implementation/shenandoah/shenandoahVerifier.hpp" 52 #include "gc_implementation/shenandoah/shenandoahCodeRoots.hpp" 53 #include "gc_implementation/shenandoah/shenandoahVMOperations.hpp" 54 #include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp" 55 #include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp" 56 #include "gc_implementation/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp" 57 #include "gc_implementation/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp" 58 #include "gc_implementation/shenandoah/heuristics/shenandoahCompactHeuristics.hpp" 59 #include "gc_implementation/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp" 60 #include "gc_implementation/shenandoah/heuristics/shenandoahStaticHeuristics.hpp" 61 62 #include "memory/metaspace.hpp" 63 #include "runtime/vmThread.hpp" 64 #include "services/mallocTracker.hpp" 65 66 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {} 67 68 #ifdef ASSERT 69 template <class T> 70 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) { 71 T o = oopDesc::load_heap_oop(p); 72 if (! oopDesc::is_null(o)) { 73 oop obj = oopDesc::decode_heap_oop_not_null(o); 74 shenandoah_assert_not_forwarded(p, obj); 75 } 76 } 77 78 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); } 79 void ShenandoahAssertToSpaceClosure::do_oop(oop* p) { do_oop_nv(p); } 80 #endif 81 82 class ShenandoahPretouchHeapTask : public AbstractGangTask { 83 private: 84 ShenandoahRegionIterator _regions; 85 const size_t _page_size; 86 public: 87 ShenandoahPretouchHeapTask(size_t page_size) : 88 AbstractGangTask("Shenandoah Pretouch Heap"), 89 _page_size(page_size) {} 90 91 virtual void work(uint worker_id) { 92 ShenandoahHeapRegion* r = _regions.next(); 93 while (r != NULL) { 94 os::pretouch_memory((char*) r->bottom(), (char*) r->end()); 95 r = _regions.next(); 96 } 97 } 98 }; 99 100 class ShenandoahPretouchBitmapTask : public AbstractGangTask { 101 private: 102 ShenandoahRegionIterator _regions; 103 char* _bitmap_base; 104 const size_t _bitmap_size; 105 const size_t _page_size; 106 public: 107 ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) : 108 AbstractGangTask("Shenandoah Pretouch Bitmap"), 109 _bitmap_base(bitmap_base), 110 _bitmap_size(bitmap_size), 111 _page_size(page_size) {} 112 113 virtual void work(uint worker_id) { 114 ShenandoahHeapRegion* r = _regions.next(); 115 while (r != NULL) { 116 size_t start = r->region_number() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); 117 size_t end = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); 118 assert (end <= _bitmap_size, err_msg("end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size)); 119 120 os::pretouch_memory(_bitmap_base + start, _bitmap_base + end); 121 122 r = _regions.next(); 123 } 124 } 125 }; 126 127 jint ShenandoahHeap::initialize() { 128 CollectedHeap::pre_initialize(); 129 130 ShenandoahBrooksPointer::initial_checks(); 131 132 initialize_heuristics(); 133 134 // 135 // Figure out heap sizing 136 // 137 138 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); 139 size_t max_byte_size = collector_policy()->max_heap_byte_size(); 140 size_t heap_alignment = collector_policy()->heap_alignment(); 141 142 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 143 144 if (ShenandoahAlwaysPreTouch) { 145 // Enabled pre-touch means the entire heap is committed right away. 146 init_byte_size = max_byte_size; 147 } 148 149 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap"); 150 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap"); 151 152 _num_regions = ShenandoahHeapRegion::region_count(); 153 154 size_t num_committed_regions = init_byte_size / reg_size_bytes; 155 num_committed_regions = MIN2(num_committed_regions, _num_regions); 156 assert(num_committed_regions <= _num_regions, "sanity"); 157 158 _initial_size = num_committed_regions * reg_size_bytes; 159 _committed = _initial_size; 160 161 size_t heap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); 162 size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); 163 164 // 165 // Reserve and commit memory for heap 166 // 167 168 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment); 169 _reserved.set_word_size(0); 170 _reserved.set_start((HeapWord*)heap_rs.base()); 171 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); 172 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize); 173 _heap_region_special = heap_rs.special(); 174 175 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0, 176 err_msg("Misaligned heap: " PTR_FORMAT, p2i(base()))); 177 178 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size); 179 if (!_heap_region_special) { 180 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false, 181 "Cannot commit heap memory"); 182 } 183 184 // 185 // Reserve and commit memory for bitmap(s) 186 // 187 188 _bitmap_size = MarkBitMap::compute_size(heap_rs.size()); 189 _bitmap_size = align_size_up(_bitmap_size, bitmap_page_size); 190 191 size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor(); 192 193 guarantee(bitmap_bytes_per_region != 0, 194 err_msg("Bitmap bytes per region should not be zero")); 195 guarantee(is_power_of_2(bitmap_bytes_per_region), 196 err_msg("Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region)); 197 198 if (bitmap_page_size > bitmap_bytes_per_region) { 199 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region; 200 _bitmap_bytes_per_slice = bitmap_page_size; 201 } else { 202 _bitmap_regions_per_slice = 1; 203 _bitmap_bytes_per_slice = bitmap_bytes_per_region; 204 } 205 206 guarantee(_bitmap_regions_per_slice >= 1, 207 err_msg("Should have at least one region per slice: " SIZE_FORMAT, 208 _bitmap_regions_per_slice)); 209 210 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0, 211 err_msg("Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT, 212 _bitmap_bytes_per_slice, bitmap_page_size)); 213 214 ReservedSpace bitmap(_bitmap_size, bitmap_page_size); 215 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC); 216 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize); 217 _bitmap_region_special = bitmap.special(); 218 219 size_t bitmap_init_commit = _bitmap_bytes_per_slice * 220 align_size_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice; 221 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit); 222 if (!_bitmap_region_special) { 223 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false, 224 "Cannot commit bitmap memory"); 225 } 226 227 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions); 228 229 if (ShenandoahVerify) { 230 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size); 231 if (!verify_bitmap.special()) { 232 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false, 233 "Cannot commit verification bitmap memory"); 234 } 235 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC); 236 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize); 237 _verification_bit_map.initialize(_heap_region, verify_bitmap_region); 238 _verifier = new ShenandoahVerifier(this, &_verification_bit_map); 239 } 240 241 // Reserve aux bitmap for use in object_iterate(). We don't commit it here. 242 ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size); 243 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC); 244 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize); 245 _aux_bitmap_region_special = aux_bitmap.special(); 246 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region); 247 248 // 249 // Create regions and region sets 250 // 251 252 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC); 253 _free_set = new ShenandoahFreeSet(this, _num_regions); 254 _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)sh_rs.base()); 255 256 { 257 ShenandoahHeapLocker locker(lock()); 258 259 size_t size_words = ShenandoahHeapRegion::region_size_words(); 260 261 for (size_t i = 0; i < _num_regions; i++) { 262 HeapWord* start = (HeapWord*)sh_rs.base() + size_words * i; 263 bool is_committed = i < num_committed_regions; 264 ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this, start, size_words, i, is_committed); 265 266 _marking_context->initialize_top_at_mark_start(r); 267 _regions[i] = r; 268 assert(!collection_set()->is_in(i), "New region should not be in collection set"); 269 } 270 271 // Initialize to complete 272 _marking_context->mark_complete(); 273 274 _free_set->rebuild(); 275 } 276 277 if (ShenandoahAlwaysPreTouch) { 278 assert(!AlwaysPreTouch, "Should have been overridden"); 279 280 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads, 281 // before initialize() below zeroes it with initializing thread. For any given region, 282 // we touch the region and the corresponding bitmaps from the same thread. 283 ShenandoahPushWorkerScope scope(workers(), _max_workers, false); 284 285 size_t pretouch_heap_page_size = heap_page_size; 286 size_t pretouch_bitmap_page_size = bitmap_page_size; 287 288 #ifdef LINUX 289 // UseTransparentHugePages would madvise that backing memory can be coalesced into huge 290 // pages. But, the kernel needs to know that every small page is used, in order to coalesce 291 // them into huge one. Therefore, we need to pretouch with smaller pages. 292 if (UseTransparentHugePages) { 293 pretouch_heap_page_size = (size_t)os::vm_page_size(); 294 pretouch_bitmap_page_size = (size_t)os::vm_page_size(); 295 } 296 #endif 297 298 // OS memory managers may want to coalesce back-to-back pages. Make their jobs 299 // simpler by pre-touching continuous spaces (heap and bitmap) separately. 300 301 log_info(gc, init)("Pretouch bitmap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page", 302 _num_regions, pretouch_bitmap_page_size); 303 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, pretouch_bitmap_page_size); 304 _workers->run_task(&bcl); 305 306 log_info(gc, init)("Pretouch heap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page", 307 _num_regions, pretouch_heap_page_size); 308 ShenandoahPretouchHeapTask hcl(pretouch_heap_page_size); 309 _workers->run_task(&hcl); 310 } 311 312 // 313 // Initialize the rest of GC subsystems 314 // 315 316 set_barrier_set(new ShenandoahBarrierSet(this)); 317 318 _liveness_cache = NEW_C_HEAP_ARRAY(jushort*, _max_workers, mtGC); 319 for (uint worker = 0; worker < _max_workers; worker++) { 320 _liveness_cache[worker] = NEW_C_HEAP_ARRAY(jushort, _num_regions, mtGC); 321 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(jushort)); 322 } 323 324 // The call below uses stuff (the SATB* things) that are in G1, but probably 325 // belong into a shared location. 326 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, 327 SATB_Q_FL_lock, 328 20 /*G1SATBProcessCompletedThreshold */, 329 Shared_SATB_Q_lock); 330 331 _monitoring_support = new ShenandoahMonitoringSupport(this); 332 _phase_timings = new ShenandoahPhaseTimings(); 333 ShenandoahStringDedup::initialize(); 334 ShenandoahCodeRoots::initialize(); 335 336 if (ShenandoahAllocationTrace) { 337 _alloc_tracker = new ShenandoahAllocTracker(); 338 } 339 340 if (ShenandoahPacing) { 341 _pacer = new ShenandoahPacer(this); 342 _pacer->setup_for_idle(); 343 } else { 344 _pacer = NULL; 345 } 346 347 _control_thread = new ShenandoahControlThread(); 348 349 log_info(gc, init)("Initialize Shenandoah heap with initial size " SIZE_FORMAT "%s", 350 byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size)); 351 352 return JNI_OK; 353 } 354 355 #ifdef _MSC_VER 356 #pragma warning( push ) 357 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 358 #endif 359 360 void ShenandoahHeap::initialize_heuristics() { 361 if (ShenandoahGCHeuristics != NULL) { 362 if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) { 363 _heuristics = new ShenandoahAggressiveHeuristics(); 364 } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) { 365 _heuristics = new ShenandoahStaticHeuristics(); 366 } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) { 367 _heuristics = new ShenandoahAdaptiveHeuristics(); 368 } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) { 369 _heuristics = new ShenandoahPassiveHeuristics(); 370 } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) { 371 _heuristics = new ShenandoahCompactHeuristics(); 372 } else { 373 vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option"); 374 } 375 376 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) { 377 vm_exit_during_initialization( 378 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.", 379 _heuristics->name())); 380 } 381 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) { 382 vm_exit_during_initialization( 383 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.", 384 _heuristics->name())); 385 } 386 log_info(gc, init)("Shenandoah heuristics: %s", 387 _heuristics->name()); 388 } else { 389 ShouldNotReachHere(); 390 } 391 } 392 393 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : 394 SharedHeap(policy), 395 _shenandoah_policy(policy), 396 _heap_region_special(false), 397 _regions(NULL), 398 _free_set(NULL), 399 _collection_set(NULL), 400 _update_refs_iterator(this), 401 _bytes_allocated_since_gc_start(0), 402 _max_workers((uint)MAX2(ConcGCThreads, ParallelGCThreads)), 403 _ref_processor(NULL), 404 _marking_context(NULL), 405 _bitmap_size(0), 406 _bitmap_regions_per_slice(0), 407 _bitmap_bytes_per_slice(0), 408 _bitmap_region_special(false), 409 _aux_bitmap_region_special(false), 410 _liveness_cache(NULL), 411 _aux_bit_map(), 412 _verifier(NULL), 413 _pacer(NULL), 414 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 415 _phase_timings(NULL), 416 _alloc_tracker(NULL) 417 { 418 log_info(gc, init)("GC threads: " UINTX_FORMAT " parallel, " UINTX_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads); 419 log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial"); 420 421 _scm = new ShenandoahConcurrentMark(); 422 _full_gc = new ShenandoahMarkCompact(); 423 _used = 0; 424 425 _max_workers = MAX2(_max_workers, 1U); 426 _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers, 427 /* are_GC_task_threads */true, 428 /* are_ConcurrentGC_threads */false); 429 if (_workers == NULL) { 430 vm_exit_during_initialization("Failed necessary allocation."); 431 } else { 432 _workers->initialize_workers(); 433 } 434 } 435 436 #ifdef _MSC_VER 437 #pragma warning( pop ) 438 #endif 439 440 class ShenandoahResetBitmapTask : public AbstractGangTask { 441 private: 442 ShenandoahRegionIterator _regions; 443 444 public: 445 ShenandoahResetBitmapTask() : 446 AbstractGangTask("Parallel Reset Bitmap Task") {} 447 448 void work(uint worker_id) { 449 ShenandoahHeapRegion* region = _regions.next(); 450 ShenandoahHeap* heap = ShenandoahHeap::heap(); 451 ShenandoahMarkingContext* const ctx = heap->marking_context(); 452 while (region != NULL) { 453 if (heap->is_bitmap_slice_committed(region)) { 454 ctx->clear_bitmap(region); 455 } 456 region = _regions.next(); 457 } 458 } 459 }; 460 461 void ShenandoahHeap::reset_mark_bitmap() { 462 assert_gc_workers(_workers->active_workers()); 463 mark_incomplete_marking_context(); 464 465 ShenandoahResetBitmapTask task; 466 _workers->run_task(&task); 467 } 468 469 void ShenandoahHeap::print_on(outputStream* st) const { 470 st->print_cr("Shenandoah Heap"); 471 st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used", 472 capacity() / K, committed() / K, used() / K); 473 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions", 474 num_regions(), ShenandoahHeapRegion::region_size_bytes() / K); 475 476 st->print("Status: "); 477 if (has_forwarded_objects()) st->print("has forwarded objects, "); 478 if (is_concurrent_mark_in_progress()) st->print("marking, "); 479 if (is_evacuation_in_progress()) st->print("evacuating, "); 480 if (is_update_refs_in_progress()) st->print("updating refs, "); 481 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, "); 482 if (is_full_gc_in_progress()) st->print("full gc, "); 483 if (is_full_gc_move_in_progress()) st->print("full gc move, "); 484 485 if (cancelled_gc()) { 486 st->print("cancelled"); 487 } else { 488 st->print("not cancelled"); 489 } 490 st->cr(); 491 492 st->print_cr("Reserved region:"); 493 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ", 494 p2i(reserved_region().start()), 495 p2i(reserved_region().end())); 496 497 st->cr(); 498 MetaspaceAux::print_on(st); 499 500 if (Verbose) { 501 print_heap_regions_on(st); 502 } 503 } 504 505 class ShenandoahInitGCLABClosure : public ThreadClosure { 506 public: 507 void do_thread(Thread* thread) { 508 assert(thread == NULL || !thread->is_Java_thread(), "Don't expect JavaThread this early"); 509 if (thread != NULL && thread->is_Worker_thread()) { 510 thread->gclab().initialize(true); 511 } 512 } 513 }; 514 515 void ShenandoahHeap::post_initialize() { 516 if (UseTLAB) { 517 MutexLocker ml(Threads_lock); 518 519 ShenandoahInitGCLABClosure init_gclabs; 520 Threads::threads_do(&init_gclabs); 521 } 522 523 _scm->initialize(_max_workers); 524 _full_gc->initialize(_gc_timer); 525 526 ref_processing_init(); 527 528 _heuristics->initialize(); 529 } 530 531 size_t ShenandoahHeap::used() const { 532 OrderAccess::acquire(); 533 return (size_t) _used; 534 } 535 536 size_t ShenandoahHeap::committed() const { 537 OrderAccess::acquire(); 538 return _committed; 539 } 540 541 void ShenandoahHeap::increase_committed(size_t bytes) { 542 assert_heaplock_or_safepoint(); 543 _committed += bytes; 544 } 545 546 void ShenandoahHeap::decrease_committed(size_t bytes) { 547 assert_heaplock_or_safepoint(); 548 _committed -= bytes; 549 } 550 551 void ShenandoahHeap::increase_used(size_t bytes) { 552 Atomic::add(bytes, &_used); 553 } 554 555 void ShenandoahHeap::set_used(size_t bytes) { 556 OrderAccess::release_store_fence(&_used, bytes); 557 } 558 559 void ShenandoahHeap::decrease_used(size_t bytes) { 560 assert(used() >= bytes, "never decrease heap size by more than we've left"); 561 Atomic::add(-(jlong)bytes, &_used); 562 } 563 564 void ShenandoahHeap::increase_allocated(size_t bytes) { 565 Atomic::add(bytes, &_bytes_allocated_since_gc_start); 566 } 567 568 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) { 569 size_t bytes = words * HeapWordSize; 570 if (!waste) { 571 increase_used(bytes); 572 } 573 increase_allocated(bytes); 574 if (ShenandoahPacing) { 575 control_thread()->pacing_notify_alloc(words); 576 if (waste) { 577 pacer()->claim_for_alloc(words, true); 578 } 579 } 580 } 581 582 size_t ShenandoahHeap::capacity() const { 583 return num_regions() * ShenandoahHeapRegion::region_size_bytes(); 584 } 585 586 size_t ShenandoahHeap::max_capacity() const { 587 return _num_regions * ShenandoahHeapRegion::region_size_bytes(); 588 } 589 590 size_t ShenandoahHeap::initial_capacity() const { 591 return _initial_size; 592 } 593 594 bool ShenandoahHeap::is_in(const void* p) const { 595 HeapWord* heap_base = (HeapWord*) base(); 596 HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions(); 597 return p >= heap_base && p < last_region_end; 598 } 599 600 void ShenandoahHeap::op_uncommit(double shrink_before) { 601 assert (ShenandoahUncommit, "should be enabled"); 602 603 size_t count = 0; 604 for (size_t i = 0; i < num_regions(); i++) { 605 ShenandoahHeapRegion* r = get_region(i); 606 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { 607 ShenandoahHeapLocker locker(lock()); 608 if (r->is_empty_committed()) { 609 r->make_uncommitted(); 610 count++; 611 } 612 } 613 SpinPause(); // allow allocators to take the lock 614 } 615 616 if (count > 0) { 617 log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used", 618 count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M); 619 _control_thread->notify_heap_changed(); 620 } 621 } 622 623 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) { 624 // Retain tlab and allocate object in shared space if 625 // the amount free in the tlab is too large to discard. 626 if (thread->gclab().free() > thread->gclab().refill_waste_limit()) { 627 thread->gclab().record_slow_allocation(size); 628 return NULL; 629 } 630 631 // Discard gclab and allocate a new one. 632 // To minimize fragmentation, the last GCLAB may be smaller than the rest. 633 size_t new_gclab_size = thread->gclab().compute_size(size); 634 635 thread->gclab().clear_before_allocation(); 636 637 if (new_gclab_size == 0) { 638 return NULL; 639 } 640 641 // Allocated object should fit in new GCLAB, and new_gclab_size should be larger than min 642 size_t min_size = MAX2(size + ThreadLocalAllocBuffer::alignment_reserve(), ThreadLocalAllocBuffer::min_size()); 643 new_gclab_size = MAX2(new_gclab_size, min_size); 644 645 // Allocate a new GCLAB... 646 size_t actual_size = 0; 647 HeapWord* obj = allocate_new_gclab(min_size, new_gclab_size, &actual_size); 648 649 if (obj == NULL) { 650 return NULL; 651 } 652 653 assert (size <= actual_size, "allocation should fit"); 654 655 if (ZeroTLAB) { 656 // ..and clear it. 657 Copy::zero_to_words(obj, actual_size); 658 } else { 659 // ...and zap just allocated object. 660 #ifdef ASSERT 661 // Skip mangling the space corresponding to the object header to 662 // ensure that the returned space is not considered parsable by 663 // any concurrent GC thread. 664 size_t hdr_size = oopDesc::header_size(); 665 Copy::fill_to_words(obj + hdr_size, actual_size - hdr_size, badHeapWordVal); 666 #endif // ASSERT 667 } 668 thread->gclab().fill(obj, obj + size, actual_size); 669 return obj; 670 } 671 672 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) { 673 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(word_size); 674 return allocate_memory(req); 675 } 676 677 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size, 678 size_t word_size, 679 size_t* actual_size) { 680 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size); 681 HeapWord* res = allocate_memory(req); 682 if (res != NULL) { 683 *actual_size = req.actual_size(); 684 } else { 685 *actual_size = 0; 686 } 687 return res; 688 } 689 690 ShenandoahHeap* ShenandoahHeap::heap() { 691 CollectedHeap* heap = Universe::heap(); 692 assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()"); 693 assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap"); 694 return (ShenandoahHeap*) heap; 695 } 696 697 ShenandoahHeap* ShenandoahHeap::heap_no_check() { 698 CollectedHeap* heap = Universe::heap(); 699 return (ShenandoahHeap*) heap; 700 } 701 702 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) { 703 ShenandoahAllocTrace trace_alloc(req.size(), req.type()); 704 705 intptr_t pacer_epoch = 0; 706 bool in_new_region = false; 707 HeapWord* result = NULL; 708 709 if (req.is_mutator_alloc()) { 710 if (ShenandoahPacing) { 711 pacer()->pace_for_alloc(req.size()); 712 pacer_epoch = pacer()->epoch(); 713 } 714 715 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) { 716 result = allocate_memory_under_lock(req, in_new_region); 717 } 718 719 // Allocation failed, block until control thread reacted, then retry allocation. 720 // 721 // It might happen that one of the threads requesting allocation would unblock 722 // way later after GC happened, only to fail the second allocation, because 723 // other threads have already depleted the free storage. In this case, a better 724 // strategy is to try again, as long as GC makes progress. 725 // 726 // Then, we need to make sure the allocation was retried after at least one 727 // Full GC, which means we want to try more than ShenandoahFullGCThreshold times. 728 729 size_t tries = 0; 730 731 while (result == NULL && _progress_last_gc.is_set()) { 732 tries++; 733 control_thread()->handle_alloc_failure(req.size()); 734 result = allocate_memory_under_lock(req, in_new_region); 735 } 736 737 while (result == NULL && tries <= ShenandoahFullGCThreshold) { 738 tries++; 739 control_thread()->handle_alloc_failure(req.size()); 740 result = allocate_memory_under_lock(req, in_new_region); 741 } 742 743 } else { 744 assert(req.is_gc_alloc(), "Can only accept GC allocs here"); 745 result = allocate_memory_under_lock(req, in_new_region); 746 // Do not call handle_alloc_failure() here, because we cannot block. 747 // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac(). 748 } 749 750 if (in_new_region) { 751 control_thread()->notify_heap_changed(); 752 } 753 754 if (result != NULL) { 755 size_t requested = req.size(); 756 size_t actual = req.actual_size(); 757 758 assert (req.is_lab_alloc() || (requested == actual), 759 err_msg("Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT, 760 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual)); 761 762 if (req.is_mutator_alloc()) { 763 notify_mutator_alloc_words(actual, false); 764 765 // If we requested more than we were granted, give the rest back to pacer. 766 // This only matters if we are in the same pacing epoch: do not try to unpace 767 // over the budget for the other phase. 768 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) { 769 pacer()->unpace_for_alloc(pacer_epoch, requested - actual); 770 } 771 } else { 772 increase_used(actual*HeapWordSize); 773 } 774 } 775 776 return result; 777 } 778 779 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) { 780 ShenandoahHeapLocker locker(lock()); 781 return _free_set->allocate(req, in_new_region); 782 } 783 784 HeapWord* ShenandoahHeap::mem_allocate(size_t size, 785 bool* gc_overhead_limit_was_exceeded) { 786 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size + ShenandoahBrooksPointer::word_size()); 787 HeapWord* filler = allocate_memory(req); 788 HeapWord* result = filler + ShenandoahBrooksPointer::word_size(); 789 if (filler != NULL) { 790 ShenandoahBrooksPointer::initialize(oop(result)); 791 792 assert(! in_collection_set(result), "never allocate in targetted region"); 793 return result; 794 } else { 795 return NULL; 796 } 797 } 798 799 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure { 800 private: 801 ShenandoahHeap* _heap; 802 Thread* _thread; 803 public: 804 ShenandoahEvacuateUpdateRootsClosure() : 805 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) { 806 } 807 808 private: 809 template <class T> 810 void do_oop_work(T* p) { 811 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress"); 812 813 T o = oopDesc::load_heap_oop(p); 814 if (! oopDesc::is_null(o)) { 815 oop obj = oopDesc::decode_heap_oop_not_null(o); 816 if (_heap->in_collection_set(obj)) { 817 shenandoah_assert_marked(p, obj); 818 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 819 if (oopDesc::unsafe_equals(resolved, obj)) { 820 bool evac; 821 resolved = _heap->evacuate_object(obj, _thread, evac); 822 } 823 oopDesc::encode_store_heap_oop(p, resolved); 824 } 825 } 826 } 827 828 public: 829 void do_oop(oop* p) { 830 do_oop_work(p); 831 } 832 void do_oop(narrowOop* p) { 833 do_oop_work(p); 834 } 835 }; 836 837 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure { 838 private: 839 ShenandoahHeap* const _heap; 840 Thread* const _thread; 841 public: 842 ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) : 843 _heap(heap), _thread(Thread::current()) {} 844 845 void do_object(oop p) { 846 shenandoah_assert_marked(NULL, p); 847 if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) { 848 bool evac; 849 _heap->evacuate_object(p, _thread, evac); 850 } 851 } 852 }; 853 854 class ShenandoahEvacuationTask : public AbstractGangTask { 855 private: 856 ShenandoahHeap* const _sh; 857 ShenandoahCollectionSet* const _cs; 858 bool _concurrent; 859 public: 860 ShenandoahEvacuationTask(ShenandoahHeap* sh, 861 ShenandoahCollectionSet* cs, 862 bool concurrent) : 863 AbstractGangTask("Parallel Evacuation Task"), 864 _sh(sh), 865 _cs(cs), 866 _concurrent(concurrent) 867 {} 868 869 void work(uint worker_id) { 870 ShenandoahEvacOOMScope oom_evac_scope; 871 if (_concurrent) { 872 ShenandoahConcurrentWorkerSession worker_session(worker_id); 873 do_work(); 874 } else { 875 ShenandoahParallelWorkerSession worker_session(worker_id); 876 do_work(); 877 } 878 } 879 880 private: 881 void do_work() { 882 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh); 883 ShenandoahHeapRegion* r; 884 while ((r =_cs->claim_next()) != NULL) { 885 assert(r->has_live(), "all-garbage regions are reclaimed early"); 886 _sh->marked_object_iterate(r, &cl); 887 888 if (ShenandoahPacing) { 889 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize); 890 } 891 892 if (_sh->cancelled_gc()) { 893 break; 894 } 895 } 896 } 897 }; 898 899 void ShenandoahHeap::trash_cset_regions() { 900 ShenandoahHeapLocker locker(lock()); 901 902 ShenandoahCollectionSet* set = collection_set(); 903 ShenandoahHeapRegion* r; 904 set->clear_current_index(); 905 while ((r = set->next()) != NULL) { 906 r->make_trash(); 907 } 908 collection_set()->clear(); 909 } 910 911 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const { 912 st->print_cr("Heap Regions:"); 913 st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned"); 914 st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data"); 915 st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)"); 916 917 for (size_t i = 0; i < num_regions(); i++) { 918 get_region(i)->print_on(st); 919 } 920 } 921 922 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) { 923 assert(start->is_humongous_start(), "reclaim regions starting with the first one"); 924 925 oop humongous_obj = oop(start->bottom() + ShenandoahBrooksPointer::word_size()); 926 size_t size = humongous_obj->size() + ShenandoahBrooksPointer::word_size(); 927 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); 928 size_t index = start->region_number() + required_regions - 1; 929 930 assert(!start->has_live(), "liveness must be zero"); 931 932 for(size_t i = 0; i < required_regions; i++) { 933 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log, 934 // as it expects that every region belongs to a humongous region starting with a humongous start region. 935 ShenandoahHeapRegion* region = get_region(index --); 936 937 assert(region->is_humongous(), "expect correct humongous start or continuation"); 938 assert(!region->is_cset(), "Humongous region should not be in collection set"); 939 940 region->make_trash_immediate(); 941 } 942 } 943 944 class ShenandoahRetireGCLABClosure : public ThreadClosure { 945 private: 946 bool _retire; 947 public: 948 ShenandoahRetireGCLABClosure(bool retire) : _retire(retire) {}; 949 950 void do_thread(Thread* thread) { 951 assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name())); 952 thread->gclab().make_parsable(_retire); 953 } 954 }; 955 956 void ShenandoahHeap::make_parsable(bool retire_tlabs) { 957 if (UseTLAB) { 958 CollectedHeap::ensure_parsability(retire_tlabs); 959 ShenandoahRetireGCLABClosure cl(retire_tlabs); 960 Threads::java_threads_do(&cl); 961 _workers->threads_do(&cl); 962 } 963 } 964 965 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask { 966 private: 967 ShenandoahRootEvacuator* _rp; 968 969 public: 970 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) : 971 AbstractGangTask("Shenandoah evacuate and update roots"), 972 _rp(rp) {} 973 974 void work(uint worker_id) { 975 ShenandoahParallelWorkerSession worker_session(worker_id); 976 ShenandoahEvacOOMScope oom_evac_scope; 977 ShenandoahEvacuateUpdateRootsClosure cl; 978 979 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations); 980 _rp->process_evacuate_roots(&cl, &blobsCl, worker_id); 981 } 982 }; 983 984 void ShenandoahHeap::evacuate_and_update_roots() { 985 COMPILER2_PRESENT(DerivedPointerTable::clear()); 986 987 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped"); 988 989 { 990 ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac); 991 ShenandoahEvacuateUpdateRootsTask roots_task(&rp); 992 workers()->run_task(&roots_task); 993 } 994 995 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 996 } 997 998 void ShenandoahHeap::roots_iterate(OopClosure* cl) { 999 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped"); 1000 1001 CodeBlobToOopClosure blobsCl(cl, false); 1002 CLDToOopClosure cldCl(cl); 1003 1004 ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases); 1005 rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0); 1006 } 1007 1008 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const { 1009 // Returns size in bytes 1010 return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes()); 1011 } 1012 1013 size_t ShenandoahHeap::max_tlab_size() const { 1014 // Returns size in words 1015 return ShenandoahHeapRegion::max_tlab_size_words(); 1016 } 1017 1018 class ShenandoahResizeGCLABClosure : public ThreadClosure { 1019 public: 1020 void do_thread(Thread* thread) { 1021 assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name())); 1022 thread->gclab().resize(); 1023 } 1024 }; 1025 1026 void ShenandoahHeap::resize_all_tlabs() { 1027 CollectedHeap::resize_all_tlabs(); 1028 1029 ShenandoahResizeGCLABClosure cl; 1030 Threads::java_threads_do(&cl); 1031 _workers->threads_do(&cl); 1032 } 1033 1034 class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure { 1035 public: 1036 void do_thread(Thread* thread) { 1037 assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name())); 1038 thread->gclab().accumulate_statistics(); 1039 thread->gclab().initialize_statistics(); 1040 } 1041 }; 1042 1043 void ShenandoahHeap::accumulate_statistics_all_gclabs() { 1044 ShenandoahAccumulateStatisticsGCLABClosure cl; 1045 Threads::java_threads_do(&cl); 1046 _workers->threads_do(&cl); 1047 } 1048 1049 void ShenandoahHeap::collect(GCCause::Cause cause) { 1050 _control_thread->request_gc(cause); 1051 } 1052 1053 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) { 1054 //assert(false, "Shouldn't need to do full collections"); 1055 } 1056 1057 CollectorPolicy* ShenandoahHeap::collector_policy() const { 1058 return _shenandoah_policy; 1059 } 1060 1061 void ShenandoahHeap::resize_tlabs() { 1062 CollectedHeap::resize_all_tlabs(); 1063 } 1064 1065 void ShenandoahHeap::accumulate_statistics_tlabs() { 1066 CollectedHeap::accumulate_statistics_all_tlabs(); 1067 } 1068 1069 HeapWord* ShenandoahHeap::block_start(const void* addr) const { 1070 Space* sp = heap_region_containing(addr); 1071 if (sp != NULL) { 1072 return sp->block_start(addr); 1073 } 1074 return NULL; 1075 } 1076 1077 size_t ShenandoahHeap::block_size(const HeapWord* addr) const { 1078 Space* sp = heap_region_containing(addr); 1079 assert(sp != NULL, "block_size of address outside of heap"); 1080 return sp->block_size(addr); 1081 } 1082 1083 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const { 1084 Space* sp = heap_region_containing(addr); 1085 return sp->block_is_obj(addr); 1086 } 1087 1088 jlong ShenandoahHeap::millis_since_last_gc() { 1089 double v = heuristics()->time_since_last_gc() * 1000; 1090 assert(0 <= v && v <= max_jlong, err_msg("value should fit: %f", v)); 1091 return (jlong)v; 1092 } 1093 1094 void ShenandoahHeap::prepare_for_verify() { 1095 if (SafepointSynchronize::is_at_safepoint()) { 1096 make_parsable(false); 1097 } 1098 } 1099 1100 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const { 1101 workers()->print_worker_threads_on(st); 1102 if (ShenandoahStringDedup::is_enabled()) { 1103 ShenandoahStringDedup::print_worker_threads_on(st); 1104 } 1105 } 1106 1107 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const { 1108 workers()->threads_do(tcl); 1109 if (ShenandoahStringDedup::is_enabled()) { 1110 ShenandoahStringDedup::threads_do(tcl); 1111 } 1112 } 1113 1114 void ShenandoahHeap::print_tracing_info() const { 1115 if (PrintGC || TraceGen0Time || TraceGen1Time) { 1116 ResourceMark rm; 1117 outputStream* out = gclog_or_tty; 1118 phase_timings()->print_on(out); 1119 1120 out->cr(); 1121 out->cr(); 1122 1123 shenandoah_policy()->print_gc_stats(out); 1124 1125 out->cr(); 1126 out->cr(); 1127 1128 if (ShenandoahPacing) { 1129 pacer()->print_on(out); 1130 } 1131 1132 out->cr(); 1133 out->cr(); 1134 1135 if (ShenandoahAllocationTrace) { 1136 assert(alloc_tracker() != NULL, "Must be"); 1137 alloc_tracker()->print_on(out); 1138 } else { 1139 out->print_cr(" Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable."); 1140 } 1141 } 1142 } 1143 1144 void ShenandoahHeap::verify(bool silent, VerifyOption vo) { 1145 if (ShenandoahSafepoint::is_at_shenandoah_safepoint() || ! UseTLAB) { 1146 if (ShenandoahVerify) { 1147 verifier()->verify_generic(vo); 1148 } else { 1149 // TODO: Consider allocating verification bitmaps on demand, 1150 // and turn this on unconditionally. 1151 } 1152 } 1153 } 1154 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const { 1155 return _free_set->capacity(); 1156 } 1157 1158 class ObjectIterateScanRootClosure : public ExtendedOopClosure { 1159 private: 1160 MarkBitMap* _bitmap; 1161 Stack<oop,mtGC>* _oop_stack; 1162 1163 template <class T> 1164 void do_oop_work(T* p) { 1165 T o = oopDesc::load_heap_oop(p); 1166 if (!oopDesc::is_null(o)) { 1167 oop obj = oopDesc::decode_heap_oop_not_null(o); 1168 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 1169 assert(obj->is_oop(), "must be a valid oop"); 1170 if (!_bitmap->isMarked((HeapWord*) obj)) { 1171 _bitmap->mark((HeapWord*) obj); 1172 _oop_stack->push(obj); 1173 } 1174 } 1175 } 1176 public: 1177 ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) : 1178 _bitmap(bitmap), _oop_stack(oop_stack) {} 1179 void do_oop(oop* p) { do_oop_work(p); } 1180 void do_oop(narrowOop* p) { do_oop_work(p); } 1181 }; 1182 1183 /* 1184 * This is public API, used in preparation of object_iterate(). 1185 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't 1186 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can 1187 * control, we call SH::make_parsable(). 1188 */ 1189 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) { 1190 // No-op. 1191 } 1192 1193 /* 1194 * Iterates objects in the heap. This is public API, used for, e.g., heap dumping. 1195 * 1196 * We cannot safely iterate objects by doing a linear scan at random points in time. Linear 1197 * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g. 1198 * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear 1199 * scanning therefore depends on having a valid marking bitmap to support it. However, we only 1200 * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid 1201 * marking bitmap during marking, after aborted marking or during/after cleanup (when we just 1202 * wiped the bitmap in preparation for next marking). 1203 * 1204 * For all those reasons, we implement object iteration as a single marking traversal, reporting 1205 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap 1206 * is allowed to report dead objects, but is not required to do so. 1207 */ 1208 void ShenandoahHeap::object_iterate(ObjectClosure* cl) { 1209 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints"); 1210 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) { 1211 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration"); 1212 return; 1213 } 1214 1215 // Reset bitmap 1216 _aux_bit_map.clear(); 1217 1218 Stack<oop,mtGC> oop_stack; 1219 1220 // First, we process all GC roots. This populates the work stack with initial objects. 1221 ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases); 1222 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack); 1223 CLDToOopClosure clds(&oops, false); 1224 CodeBlobToOopClosure blobs(&oops, false); 1225 rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0); 1226 1227 // Work through the oop stack to traverse heap. 1228 while (! oop_stack.is_empty()) { 1229 oop obj = oop_stack.pop(); 1230 assert(obj->is_oop(), "must be a valid oop"); 1231 cl->do_object(obj); 1232 obj->oop_iterate(&oops); 1233 } 1234 1235 assert(oop_stack.is_empty(), "should be empty"); 1236 1237 if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) { 1238 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration"); 1239 } 1240 } 1241 1242 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) { 1243 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints"); 1244 object_iterate(cl); 1245 } 1246 1247 void ShenandoahHeap::oop_iterate(ExtendedOopClosure* cl) { 1248 ObjectToOopClosure cl2(cl); 1249 object_iterate(&cl2); 1250 } 1251 1252 class ShenandoahSpaceClosureRegionClosure: public ShenandoahHeapRegionClosure { 1253 SpaceClosure* _cl; 1254 public: 1255 ShenandoahSpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} 1256 void heap_region_do(ShenandoahHeapRegion* r) { 1257 _cl->do_space(r); 1258 } 1259 }; 1260 1261 void ShenandoahHeap::space_iterate(SpaceClosure* cl) { 1262 ShenandoahSpaceClosureRegionClosure blk(cl); 1263 heap_region_iterate(&blk); 1264 } 1265 1266 Space* ShenandoahHeap::space_containing(const void* oop) const { 1267 Space* res = heap_region_containing(oop); 1268 return res; 1269 } 1270 1271 void ShenandoahHeap::gc_prologue(bool b) { 1272 Unimplemented(); 1273 } 1274 1275 void ShenandoahHeap::gc_epilogue(bool b) { 1276 Unimplemented(); 1277 } 1278 1279 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const { 1280 for (size_t i = 0; i < num_regions(); i++) { 1281 ShenandoahHeapRegion* current = get_region(i); 1282 blk->heap_region_do(current); 1283 } 1284 } 1285 1286 class ShenandoahParallelHeapRegionTask : public AbstractGangTask { 1287 private: 1288 ShenandoahHeap* const _heap; 1289 ShenandoahHeapRegionClosure* const _blk; 1290 1291 char _pad0[DEFAULT_CACHE_LINE_SIZE]; 1292 volatile jint _index; 1293 char _pad1[DEFAULT_CACHE_LINE_SIZE]; 1294 1295 public: 1296 ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) : 1297 AbstractGangTask("Parallel Region Task"), 1298 _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {} 1299 1300 void work(uint worker_id) { 1301 jint stride = (jint)ShenandoahParallelRegionStride; 1302 1303 jint max = (jint)_heap->num_regions(); 1304 while (_index < max) { 1305 jint cur = Atomic::add(stride, &_index) - stride; 1306 jint start = cur; 1307 jint end = MIN2(cur + stride, max); 1308 if (start >= max) break; 1309 1310 for (jint i = cur; i < end; i++) { 1311 ShenandoahHeapRegion* current = _heap->get_region((size_t)i); 1312 _blk->heap_region_do(current); 1313 } 1314 } 1315 } 1316 }; 1317 1318 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const { 1319 assert(blk->is_thread_safe(), "Only thread-safe closures here"); 1320 if (num_regions() > ShenandoahParallelRegionStride) { 1321 ShenandoahParallelHeapRegionTask task(blk); 1322 workers()->run_task(&task); 1323 } else { 1324 heap_region_iterate(blk); 1325 } 1326 } 1327 1328 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure { 1329 private: 1330 ShenandoahMarkingContext* const _ctx; 1331 public: 1332 ShenandoahClearLivenessClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 1333 1334 void heap_region_do(ShenandoahHeapRegion* r) { 1335 if (r->is_active()) { 1336 r->clear_live_data(); 1337 _ctx->capture_top_at_mark_start(r); 1338 } else { 1339 assert(!r->has_live(), 1340 err_msg("Region " SIZE_FORMAT " should have no live data", r->region_number())); 1341 assert(_ctx->top_at_mark_start(r) == r->top(), 1342 err_msg("Region " SIZE_FORMAT " should already have correct TAMS", r->region_number())); 1343 } 1344 } 1345 1346 bool is_thread_safe() { return true; } 1347 }; 1348 1349 void ShenandoahHeap::op_init_mark() { 1350 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 1351 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); 1352 1353 assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap"); 1354 assert(!marking_context()->is_complete(), "should not be complete"); 1355 1356 if (ShenandoahVerify) { 1357 verifier()->verify_before_concmark(); 1358 } 1359 1360 { 1361 ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats); 1362 accumulate_statistics_tlabs(); 1363 } 1364 1365 if (VerifyBeforeGC) { 1366 Universe::verify(); 1367 } 1368 1369 set_concurrent_mark_in_progress(true); 1370 // We need to reset all TLABs because we'd lose marks on all objects allocated in them. 1371 if (UseTLAB) { 1372 ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable); 1373 make_parsable(true); 1374 } 1375 1376 { 1377 ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness); 1378 ShenandoahClearLivenessClosure clc; 1379 parallel_heap_region_iterate(&clc); 1380 } 1381 1382 // Make above changes visible to worker threads 1383 OrderAccess::fence(); 1384 1385 concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots); 1386 1387 if (UseTLAB) { 1388 ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs); 1389 resize_tlabs(); 1390 } 1391 1392 if (ShenandoahPacing) { 1393 pacer()->setup_for_mark(); 1394 } 1395 } 1396 1397 void ShenandoahHeap::op_mark() { 1398 concurrent_mark()->mark_from_roots(); 1399 } 1400 1401 class ShenandoahCompleteLivenessClosure : public ShenandoahHeapRegionClosure { 1402 private: 1403 ShenandoahMarkingContext* const _ctx; 1404 public: 1405 ShenandoahCompleteLivenessClosure() : _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 1406 1407 void heap_region_do(ShenandoahHeapRegion* r) { 1408 if (r->is_active()) { 1409 HeapWord *tams = _ctx->top_at_mark_start(r); 1410 HeapWord *top = r->top(); 1411 if (top > tams) { 1412 r->increase_live_data_alloc_words(pointer_delta(top, tams)); 1413 } 1414 } else { 1415 assert(!r->has_live(), 1416 err_msg("Region " SIZE_FORMAT " should have no live data", r->region_number())); 1417 assert(_ctx->top_at_mark_start(r) == r->top(), 1418 err_msg("Region " SIZE_FORMAT " should have correct TAMS", r->region_number())); 1419 } 1420 } 1421 1422 bool is_thread_safe() { return true; } 1423 }; 1424 1425 void ShenandoahHeap::op_final_mark() { 1426 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 1427 1428 // It is critical that we 1429 // evacuate roots right after finishing marking, so that we don't 1430 // get unmarked objects in the roots. 1431 1432 if (!cancelled_gc()) { 1433 concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false); 1434 1435 TASKQUEUE_STATS_ONLY(concurrent_mark()->task_queues()->reset_taskqueue_stats()); 1436 1437 if (has_forwarded_objects()) { 1438 concurrent_mark()->update_roots(ShenandoahPhaseTimings::update_roots); 1439 } 1440 1441 TASKQUEUE_STATS_ONLY(concurrent_mark()->task_queues()->print_taskqueue_stats()); 1442 1443 stop_concurrent_marking(); 1444 1445 { 1446 ShenandoahGCPhase phase(ShenandoahPhaseTimings::complete_liveness); 1447 1448 // All allocations past TAMS are implicitly live, adjust the region data. 1449 // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap. 1450 ShenandoahCompleteLivenessClosure cl; 1451 parallel_heap_region_iterate(&cl); 1452 } 1453 1454 { 1455 ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac); 1456 1457 make_parsable(true); 1458 1459 trash_cset_regions(); 1460 1461 { 1462 ShenandoahHeapLocker locker(lock()); 1463 _collection_set->clear(); 1464 _free_set->clear(); 1465 1466 heuristics()->choose_collection_set(_collection_set); 1467 _free_set->rebuild(); 1468 } 1469 } 1470 1471 // If collection set has candidates, start evacuation. 1472 // Otherwise, bypass the rest of the cycle. 1473 if (!collection_set()->is_empty()) { 1474 ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac); 1475 1476 if (ShenandoahVerify) { 1477 verifier()->verify_before_evacuation(); 1478 } 1479 1480 set_evacuation_in_progress(true); 1481 // From here on, we need to update references. 1482 set_has_forwarded_objects(true); 1483 1484 evacuate_and_update_roots(); 1485 1486 if (ShenandoahPacing) { 1487 pacer()->setup_for_evac(); 1488 } 1489 } else { 1490 if (ShenandoahVerify) { 1491 verifier()->verify_after_concmark(); 1492 } 1493 1494 if (VerifyAfterGC) { 1495 Universe::verify(); 1496 } 1497 } 1498 1499 } else { 1500 concurrent_mark()->cancel(); 1501 stop_concurrent_marking(); 1502 1503 if (process_references()) { 1504 // Abandon reference processing right away: pre-cleaning must have failed. 1505 ReferenceProcessor *rp = ref_processor(); 1506 rp->disable_discovery(); 1507 rp->abandon_partial_discovery(); 1508 rp->verify_no_references_recorded(); 1509 } 1510 } 1511 } 1512 1513 void ShenandoahHeap::op_final_evac() { 1514 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 1515 1516 set_evacuation_in_progress(false); 1517 if (ShenandoahVerify) { 1518 verifier()->verify_after_evacuation(); 1519 } 1520 1521 if (VerifyAfterGC) { 1522 Universe::verify(); 1523 } 1524 } 1525 1526 void ShenandoahHeap::op_conc_evac() { 1527 ShenandoahEvacuationTask task(this, _collection_set, true); 1528 workers()->run_task(&task); 1529 } 1530 1531 void ShenandoahHeap::op_stw_evac() { 1532 ShenandoahEvacuationTask task(this, _collection_set, false); 1533 workers()->run_task(&task); 1534 } 1535 1536 void ShenandoahHeap::op_updaterefs() { 1537 update_heap_references(true); 1538 } 1539 1540 void ShenandoahHeap::op_cleanup() { 1541 free_set()->recycle_trash(); 1542 } 1543 1544 void ShenandoahHeap::op_reset() { 1545 reset_mark_bitmap(); 1546 } 1547 1548 void ShenandoahHeap::op_preclean() { 1549 concurrent_mark()->preclean_weak_refs(); 1550 } 1551 1552 void ShenandoahHeap::op_full(GCCause::Cause cause) { 1553 ShenandoahMetricsSnapshot metrics; 1554 metrics.snap_before(); 1555 1556 full_gc()->do_it(cause); 1557 1558 metrics.snap_after(); 1559 metrics.print(); 1560 1561 if (metrics.is_good_progress("Full GC")) { 1562 _progress_last_gc.set(); 1563 } else { 1564 // Nothing to do. Tell the allocation path that we have failed to make 1565 // progress, and it can finally fail. 1566 _progress_last_gc.unset(); 1567 } 1568 } 1569 1570 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) { 1571 // Degenerated GC is STW, but it can also fail. Current mechanics communicates 1572 // GC failure via cancelled_concgc() flag. So, if we detect the failure after 1573 // some phase, we have to upgrade the Degenerate GC to Full GC. 1574 1575 clear_cancelled_gc(); 1576 1577 ShenandoahMetricsSnapshot metrics; 1578 metrics.snap_before(); 1579 1580 switch (point) { 1581 // The cases below form the Duff's-like device: it describes the actual GC cycle, 1582 // but enters it at different points, depending on which concurrent phase had 1583 // degenerated. 1584 1585 case _degenerated_outside_cycle: 1586 // We have degenerated from outside the cycle, which means something is bad with 1587 // the heap, most probably heavy humongous fragmentation, or we are very low on free 1588 // space. It makes little sense to wait for Full GC to reclaim as much as it can, when 1589 // we can do the most aggressive degen cycle, which includes processing references and 1590 // class unloading, unless those features are explicitly disabled. 1591 // 1592 // Note that we can only do this for "outside-cycle" degens, otherwise we would risk 1593 // changing the cycle parameters mid-cycle during concurrent -> degenerated handover. 1594 set_process_references(heuristics()->can_process_references()); 1595 set_unload_classes(heuristics()->can_unload_classes()); 1596 1597 op_reset(); 1598 1599 op_init_mark(); 1600 if (cancelled_gc()) { 1601 op_degenerated_fail(); 1602 return; 1603 } 1604 1605 case _degenerated_mark: 1606 op_final_mark(); 1607 if (cancelled_gc()) { 1608 op_degenerated_fail(); 1609 return; 1610 } 1611 1612 op_cleanup(); 1613 1614 case _degenerated_evac: 1615 // If heuristics thinks we should do the cycle, this flag would be set, 1616 // and we can do evacuation. Otherwise, it would be the shortcut cycle. 1617 if (is_evacuation_in_progress()) { 1618 1619 // Degeneration under oom-evac protocol might have left some objects in 1620 // collection set un-evacuated. Restart evacuation from the beginning to 1621 // capture all objects. For all the objects that are already evacuated, 1622 // it would be a simple check, which is supposed to be fast. This is also 1623 // safe to do even without degeneration, as CSet iterator is at beginning 1624 // in preparation for evacuation anyway. 1625 collection_set()->clear_current_index(); 1626 1627 op_stw_evac(); 1628 if (cancelled_gc()) { 1629 op_degenerated_fail(); 1630 return; 1631 } 1632 } 1633 1634 // If heuristics thinks we should do the cycle, this flag would be set, 1635 // and we need to do update-refs. Otherwise, it would be the shortcut cycle. 1636 if (has_forwarded_objects()) { 1637 op_init_updaterefs(); 1638 if (cancelled_gc()) { 1639 op_degenerated_fail(); 1640 return; 1641 } 1642 } 1643 1644 case _degenerated_updaterefs: 1645 if (has_forwarded_objects()) { 1646 op_final_updaterefs(); 1647 if (cancelled_gc()) { 1648 op_degenerated_fail(); 1649 return; 1650 } 1651 } 1652 1653 op_cleanup(); 1654 break; 1655 1656 default: 1657 ShouldNotReachHere(); 1658 } 1659 1660 if (ShenandoahVerify) { 1661 verifier()->verify_after_degenerated(); 1662 } 1663 1664 if (VerifyAfterGC) { 1665 Universe::verify(); 1666 } 1667 1668 metrics.snap_after(); 1669 metrics.print(); 1670 1671 // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles, 1672 // because that probably means the heap is overloaded and/or fragmented. 1673 if (!metrics.is_good_progress("Degenerated GC")) { 1674 _progress_last_gc.unset(); 1675 cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc); 1676 op_degenerated_futile(); 1677 } else { 1678 _progress_last_gc.set(); 1679 } 1680 } 1681 1682 void ShenandoahHeap::op_degenerated_fail() { 1683 log_info(gc)("Cannot finish degeneration, upgrading to Full GC"); 1684 shenandoah_policy()->record_degenerated_upgrade_to_full(); 1685 op_full(GCCause::_shenandoah_upgrade_to_full_gc); 1686 } 1687 1688 void ShenandoahHeap::op_degenerated_futile() { 1689 shenandoah_policy()->record_degenerated_upgrade_to_full(); 1690 op_full(GCCause::_shenandoah_upgrade_to_full_gc); 1691 } 1692 1693 void ShenandoahHeap::stop_concurrent_marking() { 1694 assert(is_concurrent_mark_in_progress(), "How else could we get here?"); 1695 if (!cancelled_gc()) { 1696 // If we needed to update refs, and concurrent marking has been cancelled, 1697 // we need to finish updating references. 1698 set_has_forwarded_objects(false); 1699 mark_complete_marking_context(); 1700 } 1701 set_concurrent_mark_in_progress(false); 1702 } 1703 1704 void ShenandoahHeap::force_satb_flush_all_threads() { 1705 if (!is_concurrent_mark_in_progress()) { 1706 // No need to flush SATBs 1707 return; 1708 } 1709 1710 // Do not block if Threads lock is busy. This avoids the potential deadlock 1711 // when this code is called from the periodic task, and something else is 1712 // expecting the periodic task to complete without blocking. On the off-chance 1713 // Threads lock is busy momentarily, try to acquire several times. 1714 for (int t = 0; t < 10; t++) { 1715 if (Threads_lock->try_lock()) { 1716 JavaThread::set_force_satb_flush_all_threads(true); 1717 Threads_lock->unlock(); 1718 1719 // The threads are not "acquiring" their thread-local data, but it does not 1720 // hurt to "release" the updates here anyway. 1721 OrderAccess::fence(); 1722 break; 1723 } 1724 os::naked_short_sleep(1); 1725 } 1726 } 1727 1728 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) { 1729 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint"); 1730 _gc_state.set_cond(mask, value); 1731 JavaThread::set_gc_state_all_threads(_gc_state.raw_value()); 1732 } 1733 1734 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) { 1735 set_gc_state_mask(MARKING, in_progress); 1736 JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress); 1737 } 1738 1739 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) { 1740 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint"); 1741 set_gc_state_mask(EVACUATION, in_progress); 1742 } 1743 1744 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) { 1745 // Initialize Brooks pointer for the next object 1746 HeapWord* result = obj + ShenandoahBrooksPointer::word_size(); 1747 ShenandoahBrooksPointer::initialize(oop(result)); 1748 return result; 1749 } 1750 1751 uint ShenandoahHeap::oop_extra_words() { 1752 return ShenandoahBrooksPointer::word_size(); 1753 } 1754 1755 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() : 1756 _mark_context(ShenandoahHeap::heap()->marking_context()) { 1757 } 1758 1759 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() : 1760 _mark_context(ShenandoahHeap::heap()->marking_context()) { 1761 } 1762 1763 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) { 1764 if (oopDesc::is_null(obj)) { 1765 return false; 1766 } 1767 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 1768 shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_mark_in_progress()); 1769 return _mark_context->is_marked(obj); 1770 } 1771 1772 bool ShenandoahIsAliveClosure::do_object_b(oop obj) { 1773 if (oopDesc::is_null(obj)) { 1774 return false; 1775 } 1776 shenandoah_assert_not_forwarded(NULL, obj); 1777 return _mark_context->is_marked(obj); 1778 } 1779 1780 void ShenandoahHeap::ref_processing_init() { 1781 MemRegion mr = reserved_region(); 1782 1783 assert(_max_workers > 0, "Sanity"); 1784 1785 _ref_processor = 1786 new ReferenceProcessor(mr, // span 1787 ParallelRefProcEnabled, // MT processing 1788 _max_workers, // Degree of MT processing 1789 true, // MT discovery 1790 _max_workers, // Degree of MT discovery 1791 false, // Reference discovery is not atomic 1792 NULL); // No closure, should be installed before use 1793 1794 shenandoah_assert_rp_isalive_not_installed(); 1795 } 1796 1797 void ShenandoahHeap::acquire_pending_refs_lock() { 1798 _control_thread->slt()->manipulatePLL(SurrogateLockerThread::acquirePLL); 1799 } 1800 1801 void ShenandoahHeap::release_pending_refs_lock() { 1802 _control_thread->slt()->manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL); 1803 } 1804 1805 GCTracer* ShenandoahHeap::tracer() { 1806 return shenandoah_policy()->tracer(); 1807 } 1808 1809 size_t ShenandoahHeap::tlab_used(Thread* thread) const { 1810 return _free_set->used(); 1811 } 1812 1813 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) { 1814 if (try_cancel_gc()) { 1815 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause)); 1816 log_info(gc)("%s", msg.buffer()); 1817 Events::log(Thread::current(), "%s", msg.buffer()); 1818 } 1819 } 1820 1821 uint ShenandoahHeap::max_workers() { 1822 return _max_workers; 1823 } 1824 1825 void ShenandoahHeap::stop() { 1826 // The shutdown sequence should be able to terminate when GC is running. 1827 1828 // Step 0. Notify policy to disable event recording. 1829 _shenandoah_policy->record_shutdown(); 1830 1831 // Step 1. Notify control thread that we are in shutdown. 1832 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown. 1833 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below. 1834 _control_thread->prepare_for_graceful_shutdown(); 1835 1836 // Step 2. Notify GC workers that we are cancelling GC. 1837 cancel_gc(GCCause::_shenandoah_stop_vm); 1838 1839 // Step 3. Wait until GC worker exits normally. 1840 _control_thread->stop(); 1841 1842 // Step 4. Stop String Dedup thread if it is active 1843 if (ShenandoahStringDedup::is_enabled()) { 1844 ShenandoahStringDedup::stop(); 1845 } 1846 } 1847 1848 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) { 1849 assert(heuristics()->can_unload_classes(), "Class unloading should be enabled"); 1850 1851 ShenandoahGCPhase root_phase(full_gc ? 1852 ShenandoahPhaseTimings::full_gc_purge : 1853 ShenandoahPhaseTimings::purge); 1854 1855 ShenandoahIsAliveSelector alive; 1856 BoolObjectClosure* is_alive = alive.is_alive_closure(); 1857 1858 bool purged_class; 1859 1860 // Unload classes and purge SystemDictionary. 1861 { 1862 ShenandoahGCPhase phase(full_gc ? 1863 ShenandoahPhaseTimings::full_gc_purge_class_unload : 1864 ShenandoahPhaseTimings::purge_class_unload); 1865 purged_class = SystemDictionary::do_unloading(is_alive, 1866 full_gc /* do_cleaning*/ ); 1867 } 1868 1869 { 1870 ShenandoahGCPhase phase(full_gc ? 1871 ShenandoahPhaseTimings::full_gc_purge_par : 1872 ShenandoahPhaseTimings::purge_par); 1873 uint active = _workers->active_workers(); 1874 ParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class); 1875 _workers->run_task(&unlink_task); 1876 } 1877 1878 if (ShenandoahStringDedup::is_enabled()) { 1879 ShenandoahGCPhase phase(full_gc ? 1880 ShenandoahPhaseTimings::full_gc_purge_string_dedup : 1881 ShenandoahPhaseTimings::purge_string_dedup); 1882 ShenandoahStringDedup::parallel_cleanup(); 1883 } 1884 1885 { 1886 ShenandoahGCPhase phase(full_gc ? 1887 ShenandoahPhaseTimings::full_gc_purge_cldg : 1888 ShenandoahPhaseTimings::purge_cldg); 1889 ClassLoaderDataGraph::purge(); 1890 } 1891 } 1892 1893 void ShenandoahHeap::set_has_forwarded_objects(bool cond) { 1894 set_gc_state_mask(HAS_FORWARDED, cond); 1895 } 1896 1897 void ShenandoahHeap::set_process_references(bool pr) { 1898 _process_references.set_cond(pr); 1899 } 1900 1901 void ShenandoahHeap::set_unload_classes(bool uc) { 1902 _unload_classes.set_cond(uc); 1903 } 1904 1905 bool ShenandoahHeap::process_references() const { 1906 return _process_references.is_set(); 1907 } 1908 1909 bool ShenandoahHeap::unload_classes() const { 1910 return _unload_classes.is_set(); 1911 } 1912 1913 address ShenandoahHeap::in_cset_fast_test_addr() { 1914 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1915 assert(heap->collection_set() != NULL, "Sanity"); 1916 return (address) heap->collection_set()->biased_map_address(); 1917 } 1918 1919 address ShenandoahHeap::cancelled_gc_addr() { 1920 return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of(); 1921 } 1922 1923 address ShenandoahHeap::gc_state_addr() { 1924 return (address) ShenandoahHeap::heap()->_gc_state.addr_of(); 1925 } 1926 1927 size_t ShenandoahHeap::conservative_max_heap_alignment() { 1928 size_t align = ShenandoahMaxRegionSize; 1929 if (UseLargePages) { 1930 align = MAX2(align, os::large_page_size()); 1931 } 1932 return align; 1933 } 1934 1935 size_t ShenandoahHeap::bytes_allocated_since_gc_start() { 1936 return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start); 1937 } 1938 1939 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() { 1940 OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0); 1941 } 1942 1943 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) { 1944 _degenerated_gc_in_progress.set_cond(in_progress); 1945 } 1946 1947 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) { 1948 _full_gc_in_progress.set_cond(in_progress); 1949 } 1950 1951 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) { 1952 assert (is_full_gc_in_progress(), "should be"); 1953 _full_gc_move_in_progress.set_cond(in_progress); 1954 } 1955 1956 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) { 1957 set_gc_state_mask(UPDATEREFS, in_progress); 1958 } 1959 1960 void ShenandoahHeap::register_nmethod(nmethod* nm) { 1961 ShenandoahCodeRoots::add_nmethod(nm); 1962 } 1963 1964 void ShenandoahHeap::unregister_nmethod(nmethod* nm) { 1965 ShenandoahCodeRoots::remove_nmethod(nm); 1966 } 1967 1968 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) { 1969 o = barrier_set()->write_barrier(o); 1970 ShenandoahHeapLocker locker(lock()); 1971 heap_region_containing(o)->make_pinned(); 1972 return o; 1973 } 1974 1975 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) { 1976 o = barrier_set()->read_barrier(o); 1977 ShenandoahHeapLocker locker(lock()); 1978 heap_region_containing(o)->make_unpinned(); 1979 } 1980 1981 GCTimer* ShenandoahHeap::gc_timer() const { 1982 return _gc_timer; 1983 } 1984 1985 #ifdef ASSERT 1986 void ShenandoahHeap::assert_gc_workers(uint nworkers) { 1987 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity"); 1988 1989 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) { 1990 if (UseDynamicNumberOfGCThreads || 1991 (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) { 1992 assert(nworkers <= ParallelGCThreads, "Cannot use more than it has"); 1993 } else { 1994 // Use ParallelGCThreads inside safepoints 1995 assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints"); 1996 } 1997 } else { 1998 if (UseDynamicNumberOfGCThreads || 1999 (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) { 2000 assert(nworkers <= ConcGCThreads, "Cannot use more than it has"); 2001 } else { 2002 // Use ConcGCThreads outside safepoints 2003 assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints"); 2004 } 2005 } 2006 } 2007 #endif 2008 2009 ShenandoahVerifier* ShenandoahHeap::verifier() { 2010 guarantee(ShenandoahVerify, "Should be enabled"); 2011 assert (_verifier != NULL, "sanity"); 2012 return _verifier; 2013 } 2014 2015 ShenandoahUpdateHeapRefsClosure::ShenandoahUpdateHeapRefsClosure() : 2016 _heap(ShenandoahHeap::heap()) {} 2017 2018 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask { 2019 private: 2020 ShenandoahHeap* _heap; 2021 ShenandoahRegionIterator* _regions; 2022 bool _concurrent; 2023 2024 public: 2025 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) : 2026 AbstractGangTask("Concurrent Update References Task"), 2027 _heap(ShenandoahHeap::heap()), 2028 _regions(regions), 2029 _concurrent(concurrent) { 2030 } 2031 2032 void work(uint worker_id) { 2033 ShenandoahConcurrentWorkerSession worker_session(worker_id); 2034 ShenandoahUpdateHeapRefsClosure cl; 2035 ShenandoahHeapRegion* r = _regions->next(); 2036 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context(); 2037 while (r != NULL) { 2038 HeapWord* top_at_start_ur = r->concurrent_iteration_safe_limit(); 2039 assert (top_at_start_ur >= r->bottom(), "sanity"); 2040 if (r->is_active() && !r->is_cset()) { 2041 _heap->marked_object_oop_iterate(r, &cl, top_at_start_ur); 2042 } 2043 if (ShenandoahPacing) { 2044 _heap->pacer()->report_updaterefs(pointer_delta(top_at_start_ur, r->bottom())); 2045 } 2046 if (_heap->cancelled_gc()) { 2047 return; 2048 } 2049 r = _regions->next(); 2050 } 2051 } 2052 }; 2053 2054 void ShenandoahHeap::update_heap_references(bool concurrent) { 2055 ShenandoahUpdateHeapRefsTask task(&_update_refs_iterator, concurrent); 2056 workers()->run_task(&task); 2057 } 2058 2059 void ShenandoahHeap::op_init_updaterefs() { 2060 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); 2061 2062 set_evacuation_in_progress(false); 2063 2064 if (ShenandoahVerify) { 2065 verifier()->verify_before_updaterefs(); 2066 } 2067 2068 set_update_refs_in_progress(true); 2069 make_parsable(true); 2070 for (uint i = 0; i < num_regions(); i++) { 2071 ShenandoahHeapRegion* r = get_region(i); 2072 r->set_concurrent_iteration_safe_limit(r->top()); 2073 } 2074 2075 // Reset iterator. 2076 _update_refs_iterator.reset(); 2077 2078 if (ShenandoahPacing) { 2079 pacer()->setup_for_updaterefs(); 2080 } 2081 } 2082 2083 void ShenandoahHeap::op_final_updaterefs() { 2084 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); 2085 2086 // Check if there is left-over work, and finish it 2087 if (_update_refs_iterator.has_next()) { 2088 ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work); 2089 2090 // Finish updating references where we left off. 2091 clear_cancelled_gc(); 2092 update_heap_references(false); 2093 } 2094 2095 // Clear cancelled GC, if set. On cancellation path, the block before would handle 2096 // everything. On degenerated paths, cancelled gc would not be set anyway. 2097 if (cancelled_gc()) { 2098 clear_cancelled_gc(); 2099 } 2100 assert(!cancelled_gc(), "Should have been done right before"); 2101 2102 concurrent_mark()->update_roots(is_degenerated_gc_in_progress() ? 2103 ShenandoahPhaseTimings::degen_gc_update_roots: 2104 ShenandoahPhaseTimings::final_update_refs_roots); 2105 2106 ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle); 2107 2108 trash_cset_regions(); 2109 set_has_forwarded_objects(false); 2110 set_update_refs_in_progress(false); 2111 2112 if (ShenandoahVerify) { 2113 verifier()->verify_after_updaterefs(); 2114 } 2115 2116 if (VerifyAfterGC) { 2117 Universe::verify(); 2118 } 2119 2120 { 2121 ShenandoahHeapLocker locker(lock()); 2122 _free_set->rebuild(); 2123 } 2124 } 2125 2126 #ifdef ASSERT 2127 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() { 2128 _lock.assert_not_owned_by_current_thread(); 2129 } 2130 2131 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() { 2132 _lock.assert_owned_by_current_thread(); 2133 } 2134 2135 void ShenandoahHeap::assert_heaplock_or_safepoint() { 2136 _lock.assert_owned_by_current_thread_or_safepoint(); 2137 } 2138 #endif 2139 2140 void ShenandoahHeap::print_extended_on(outputStream *st) const { 2141 print_on(st); 2142 print_heap_regions_on(st); 2143 } 2144 2145 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) { 2146 size_t slice = r->region_number() / _bitmap_regions_per_slice; 2147 2148 size_t regions_from = _bitmap_regions_per_slice * slice; 2149 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1)); 2150 for (size_t g = regions_from; g < regions_to; g++) { 2151 assert (g / _bitmap_regions_per_slice == slice, "same slice"); 2152 if (skip_self && g == r->region_number()) continue; 2153 if (get_region(g)->is_committed()) { 2154 return true; 2155 } 2156 } 2157 return false; 2158 } 2159 2160 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) { 2161 assert_heaplock_owned_by_current_thread(); 2162 2163 // Bitmaps in special regions do not need commits 2164 if (_bitmap_region_special) { 2165 return true; 2166 } 2167 2168 if (is_bitmap_slice_committed(r, true)) { 2169 // Some other region from the group is already committed, meaning the bitmap 2170 // slice is already committed, we exit right away. 2171 return true; 2172 } 2173 2174 // Commit the bitmap slice: 2175 size_t slice = r->region_number() / _bitmap_regions_per_slice; 2176 size_t off = _bitmap_bytes_per_slice * slice; 2177 size_t len = _bitmap_bytes_per_slice; 2178 if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) { 2179 return false; 2180 } 2181 return true; 2182 } 2183 2184 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) { 2185 assert_heaplock_owned_by_current_thread(); 2186 2187 // Bitmaps in special regions do not need uncommits 2188 if (_bitmap_region_special) { 2189 return true; 2190 } 2191 2192 if (is_bitmap_slice_committed(r, true)) { 2193 // Some other region from the group is still committed, meaning the bitmap 2194 // slice is should stay committed, exit right away. 2195 return true; 2196 } 2197 2198 // Uncommit the bitmap slice: 2199 size_t slice = r->region_number() / _bitmap_regions_per_slice; 2200 size_t off = _bitmap_bytes_per_slice * slice; 2201 size_t len = _bitmap_bytes_per_slice; 2202 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) { 2203 return false; 2204 } 2205 return true; 2206 } 2207 2208 void ShenandoahHeap::vmop_entry_init_mark() { 2209 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2210 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2211 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross); 2212 2213 try_inject_alloc_failure(); 2214 VM_ShenandoahInitMark op; 2215 VMThread::execute(&op); // jump to entry_init_mark() under safepoint 2216 } 2217 2218 void ShenandoahHeap::vmop_entry_final_mark() { 2219 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2220 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2221 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross); 2222 2223 try_inject_alloc_failure(); 2224 VM_ShenandoahFinalMarkStartEvac op; 2225 VMThread::execute(&op); // jump to entry_final_mark under safepoint 2226 } 2227 2228 void ShenandoahHeap::vmop_entry_final_evac() { 2229 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2230 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2231 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross); 2232 2233 VM_ShenandoahFinalEvac op; 2234 VMThread::execute(&op); // jump to entry_final_evac under safepoint 2235 } 2236 2237 void ShenandoahHeap::vmop_entry_init_updaterefs() { 2238 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2239 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2240 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross); 2241 2242 try_inject_alloc_failure(); 2243 VM_ShenandoahInitUpdateRefs op; 2244 VMThread::execute(&op); 2245 } 2246 2247 void ShenandoahHeap::vmop_entry_final_updaterefs() { 2248 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2249 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2250 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross); 2251 2252 try_inject_alloc_failure(); 2253 VM_ShenandoahFinalUpdateRefs op; 2254 VMThread::execute(&op); 2255 } 2256 2257 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) { 2258 TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters()); 2259 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2260 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross); 2261 2262 try_inject_alloc_failure(); 2263 VM_ShenandoahFullGC op(cause); 2264 VMThread::execute(&op); 2265 } 2266 2267 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) { 2268 TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters()); 2269 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2270 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross); 2271 2272 VM_ShenandoahDegeneratedGC degenerated_gc((int)point); 2273 VMThread::execute(°enerated_gc); 2274 } 2275 2276 void ShenandoahHeap::entry_init_mark() { 2277 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2278 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark); 2279 2280 const char* msg = init_mark_event_message(); 2281 GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id()); 2282 EventMark em("%s", msg); 2283 2284 ShenandoahWorkerScope scope(workers(), 2285 ShenandoahWorkerPolicy::calc_workers_for_init_marking(), 2286 "init marking"); 2287 2288 op_init_mark(); 2289 } 2290 2291 void ShenandoahHeap::entry_final_mark() { 2292 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2293 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark); 2294 2295 const char* msg = final_mark_event_message(); 2296 GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id()); 2297 EventMark em("%s", msg); 2298 2299 ShenandoahWorkerScope scope(workers(), 2300 ShenandoahWorkerPolicy::calc_workers_for_final_marking(), 2301 "final marking"); 2302 2303 op_final_mark(); 2304 } 2305 2306 void ShenandoahHeap::entry_final_evac() { 2307 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2308 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac); 2309 2310 const char* msg = "Pause Final Evac"; 2311 GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id()); 2312 EventMark em("%s", msg); 2313 2314 op_final_evac(); 2315 } 2316 2317 void ShenandoahHeap::entry_init_updaterefs() { 2318 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2319 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs); 2320 2321 static const char* msg = "Pause Init Update Refs"; 2322 GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id()); 2323 EventMark em("%s", msg); 2324 2325 // No workers used in this phase, no setup required 2326 2327 op_init_updaterefs(); 2328 } 2329 2330 void ShenandoahHeap::entry_final_updaterefs() { 2331 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2332 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs); 2333 2334 static const char* msg = "Pause Final Update Refs"; 2335 GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id()); 2336 EventMark em("%s", msg); 2337 2338 ShenandoahWorkerScope scope(workers(), 2339 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(), 2340 "final reference update"); 2341 2342 op_final_updaterefs(); 2343 } 2344 2345 void ShenandoahHeap::entry_full(GCCause::Cause cause) { 2346 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2347 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc); 2348 2349 static const char* msg = "Pause Full"; 2350 GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id(), true); 2351 EventMark em("%s", msg); 2352 2353 ShenandoahWorkerScope scope(workers(), 2354 ShenandoahWorkerPolicy::calc_workers_for_fullgc(), 2355 "full gc"); 2356 2357 op_full(cause); 2358 } 2359 2360 void ShenandoahHeap::entry_degenerated(int point) { 2361 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2362 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc); 2363 2364 ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point; 2365 const char* msg = degen_event_message(dpoint); 2366 GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id(), true); 2367 EventMark em("%s", msg); 2368 2369 ShenandoahWorkerScope scope(workers(), 2370 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(), 2371 "stw degenerated gc"); 2372 2373 set_degenerated_gc_in_progress(true); 2374 op_degenerated(dpoint); 2375 set_degenerated_gc_in_progress(false); 2376 } 2377 2378 void ShenandoahHeap::entry_mark() { 2379 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); 2380 2381 const char* msg = conc_mark_event_message(); 2382 GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true); 2383 EventMark em("%s", msg); 2384 2385 ShenandoahWorkerScope scope(workers(), 2386 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 2387 "concurrent marking"); 2388 2389 try_inject_alloc_failure(); 2390 op_mark(); 2391 } 2392 2393 void ShenandoahHeap::entry_evac() { 2394 ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac); 2395 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); 2396 2397 static const char *msg = "Concurrent evacuation"; 2398 GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true); 2399 EventMark em("%s", msg); 2400 2401 ShenandoahWorkerScope scope(workers(), 2402 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), 2403 "concurrent evacuation"); 2404 2405 try_inject_alloc_failure(); 2406 op_conc_evac(); 2407 } 2408 2409 void ShenandoahHeap::entry_updaterefs() { 2410 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs); 2411 2412 static const char* msg = "Concurrent update references"; 2413 GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true); 2414 EventMark em("%s", msg); 2415 2416 ShenandoahWorkerScope scope(workers(), 2417 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(), 2418 "concurrent reference update"); 2419 2420 try_inject_alloc_failure(); 2421 op_updaterefs(); 2422 } 2423 2424 void ShenandoahHeap::entry_cleanup() { 2425 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup); 2426 2427 static const char* msg = "Concurrent cleanup"; 2428 GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true); 2429 EventMark em("%s", msg); 2430 2431 // This phase does not use workers, no need for setup 2432 2433 try_inject_alloc_failure(); 2434 op_cleanup(); 2435 } 2436 2437 void ShenandoahHeap::entry_reset() { 2438 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_reset); 2439 2440 static const char* msg = "Concurrent reset"; 2441 GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true); 2442 EventMark em("%s", msg); 2443 2444 ShenandoahWorkerScope scope(workers(), 2445 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), 2446 "concurrent reset"); 2447 2448 try_inject_alloc_failure(); 2449 op_reset(); 2450 } 2451 2452 void ShenandoahHeap::entry_preclean() { 2453 if (ShenandoahPreclean && process_references()) { 2454 ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean); 2455 2456 static const char* msg = "Concurrent precleaning"; 2457 GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true); 2458 EventMark em("%s", msg); 2459 2460 ShenandoahWorkerScope scope(workers(), 2461 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(), 2462 "concurrent preclean", 2463 /* check_workers = */ false); 2464 2465 try_inject_alloc_failure(); 2466 op_preclean(); 2467 } 2468 } 2469 2470 void ShenandoahHeap::entry_uncommit(double shrink_before) { 2471 static const char *msg = "Concurrent uncommit"; 2472 GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true); 2473 EventMark em("%s", msg); 2474 2475 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit); 2476 2477 op_uncommit(shrink_before); 2478 } 2479 2480 void ShenandoahHeap::try_inject_alloc_failure() { 2481 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) { 2482 _inject_alloc_failure.set(); 2483 os::naked_short_sleep(1); 2484 if (cancelled_gc()) { 2485 log_info(gc)("Allocation failure was successfully injected"); 2486 } 2487 } 2488 } 2489 2490 bool ShenandoahHeap::should_inject_alloc_failure() { 2491 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset(); 2492 } 2493 2494 void ShenandoahHeap::enter_evacuation() { 2495 _oom_evac_handler.enter_evacuation(); 2496 } 2497 2498 void ShenandoahHeap::leave_evacuation() { 2499 _oom_evac_handler.leave_evacuation(); 2500 } 2501 2502 ShenandoahRegionIterator::ShenandoahRegionIterator() : 2503 _heap(ShenandoahHeap::heap()), 2504 _index(0) {} 2505 2506 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) : 2507 _heap(heap), 2508 _index(0) {} 2509 2510 void ShenandoahRegionIterator::reset() { 2511 _index = 0; 2512 } 2513 2514 bool ShenandoahRegionIterator::has_next() const { 2515 return _index < (jint)_heap->num_regions(); 2516 } 2517 2518 char ShenandoahHeap::gc_state() { 2519 return _gc_state.raw_value(); 2520 } 2521 2522 const char* ShenandoahHeap::init_mark_event_message() const { 2523 bool update_refs = has_forwarded_objects(); 2524 bool proc_refs = process_references(); 2525 bool unload_cls = unload_classes(); 2526 2527 if (update_refs && proc_refs && unload_cls) { 2528 return "Pause Init Mark (update refs) (process weakrefs) (unload classes)"; 2529 } else if (update_refs && proc_refs) { 2530 return "Pause Init Mark (update refs) (process weakrefs)"; 2531 } else if (update_refs && unload_cls) { 2532 return "Pause Init Mark (update refs) (unload classes)"; 2533 } else if (proc_refs && unload_cls) { 2534 return "Pause Init Mark (process weakrefs) (unload classes)"; 2535 } else if (update_refs) { 2536 return "Pause Init Mark (update refs)"; 2537 } else if (proc_refs) { 2538 return "Pause Init Mark (process weakrefs)"; 2539 } else if (unload_cls) { 2540 return "Pause Init Mark (unload classes)"; 2541 } else { 2542 return "Pause Init Mark"; 2543 } 2544 } 2545 2546 const char* ShenandoahHeap::final_mark_event_message() const { 2547 bool update_refs = has_forwarded_objects(); 2548 bool proc_refs = process_references(); 2549 bool unload_cls = unload_classes(); 2550 2551 if (update_refs && proc_refs && unload_cls) { 2552 return "Pause Final Mark (update refs) (process weakrefs) (unload classes)"; 2553 } else if (update_refs && proc_refs) { 2554 return "Pause Final Mark (update refs) (process weakrefs)"; 2555 } else if (update_refs && unload_cls) { 2556 return "Pause Final Mark (update refs) (unload classes)"; 2557 } else if (proc_refs && unload_cls) { 2558 return "Pause Final Mark (process weakrefs) (unload classes)"; 2559 } else if (update_refs) { 2560 return "Pause Final Mark (update refs)"; 2561 } else if (proc_refs) { 2562 return "Pause Final Mark (process weakrefs)"; 2563 } else if (unload_cls) { 2564 return "Pause Final Mark (unload classes)"; 2565 } else { 2566 return "Pause Final Mark"; 2567 } 2568 } 2569 2570 const char* ShenandoahHeap::conc_mark_event_message() const { 2571 bool update_refs = has_forwarded_objects(); 2572 bool proc_refs = process_references(); 2573 bool unload_cls = unload_classes(); 2574 2575 if (update_refs && proc_refs && unload_cls) { 2576 return "Concurrent marking (update refs) (process weakrefs) (unload classes)"; 2577 } else if (update_refs && proc_refs) { 2578 return "Concurrent marking (update refs) (process weakrefs)"; 2579 } else if (update_refs && unload_cls) { 2580 return "Concurrent marking (update refs) (unload classes)"; 2581 } else if (proc_refs && unload_cls) { 2582 return "Concurrent marking (process weakrefs) (unload classes)"; 2583 } else if (update_refs) { 2584 return "Concurrent marking (update refs)"; 2585 } else if (proc_refs) { 2586 return "Concurrent marking (process weakrefs)"; 2587 } else if (unload_cls) { 2588 return "Concurrent marking (unload classes)"; 2589 } else { 2590 return "Concurrent marking"; 2591 } 2592 } 2593 2594 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const { 2595 switch (point) { 2596 case _degenerated_unset: 2597 return "Pause Degenerated GC (<UNSET>)"; 2598 case _degenerated_outside_cycle: 2599 return "Pause Degenerated GC (Outside of Cycle)"; 2600 case _degenerated_mark: 2601 return "Pause Degenerated GC (Mark)"; 2602 case _degenerated_evac: 2603 return "Pause Degenerated GC (Evacuation)"; 2604 case _degenerated_updaterefs: 2605 return "Pause Degenerated GC (Update Refs)"; 2606 default: 2607 ShouldNotReachHere(); 2608 return "ERROR"; 2609 } 2610 } 2611 2612 jushort* ShenandoahHeap::get_liveness_cache(uint worker_id) { 2613 #ifdef ASSERT 2614 assert(_liveness_cache != NULL, "sanity"); 2615 assert(worker_id < _max_workers, "sanity"); 2616 for (uint i = 0; i < num_regions(); i++) { 2617 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty"); 2618 } 2619 #endif 2620 return _liveness_cache[worker_id]; 2621 } 2622 2623 void ShenandoahHeap::flush_liveness_cache(uint worker_id) { 2624 assert(worker_id < _max_workers, "sanity"); 2625 assert(_liveness_cache != NULL, "sanity"); 2626 jushort* ld = _liveness_cache[worker_id]; 2627 for (uint i = 0; i < num_regions(); i++) { 2628 ShenandoahHeapRegion* r = get_region(i); 2629 jushort live = ld[i]; 2630 if (live > 0) { 2631 r->increase_live_data_gc_words(live); 2632 ld[i] = 0; 2633 } 2634 } 2635 } 2636 2637 BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() { 2638 return ShenandoahHeap::heap()->has_forwarded_objects() ? reinterpret_cast<BoolObjectClosure*>(&_fwd_alive_cl) 2639 : reinterpret_cast<BoolObjectClosure*>(&_alive_cl); 2640 }