1 /* 2 * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "memory/allocation.hpp" 26 27 #include "gc/shared/gcTimer.hpp" 28 #include "gc/shared/gcTraceTime.inline.hpp" 29 #include "gc/shared/memAllocator.hpp" 30 #include "gc/shared/parallelCleaning.hpp" 31 #include "gc/shared/plab.hpp" 32 33 #include "gc/shenandoah/shenandoahAllocTracker.hpp" 34 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 35 #include "gc/shenandoah/shenandoahBrooksPointer.hpp" 36 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 37 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 38 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" 39 #include "gc/shenandoah/shenandoahControlThread.hpp" 40 #include "gc/shenandoah/shenandoahFreeSet.hpp" 41 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 42 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 43 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 44 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 45 #include "gc/shenandoah/shenandoahMarkCompact.hpp" 46 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 47 #include "gc/shenandoah/shenandoahMemoryPool.hpp" 48 #include "gc/shenandoah/shenandoahMetrics.hpp" 49 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 50 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 51 #include "gc/shenandoah/shenandoahPacer.inline.hpp" 52 #include "gc/shenandoah/shenandoahRootProcessor.hpp" 53 #include "gc/shenandoah/shenandoahStringDedup.hpp" 54 #include "gc/shenandoah/shenandoahUtils.hpp" 55 #include "gc/shenandoah/shenandoahVerifier.hpp" 56 #include "gc/shenandoah/shenandoahCodeRoots.hpp" 57 #include "gc/shenandoah/shenandoahVMOperations.hpp" 58 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 59 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 60 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp" 61 #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp" 62 #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp" 63 #include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp" 64 #include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp" 65 #include "gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp" 66 67 #include "memory/metaspace.hpp" 68 #include "runtime/vmThread.hpp" 69 #include "services/mallocTracker.hpp" 70 71 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {} 72 73 #ifdef ASSERT 74 template <class T> 75 void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) { 76 T o = RawAccess<>::oop_load(p); 77 if (! CompressedOops::is_null(o)) { 78 oop obj = CompressedOops::decode_not_null(o); 79 shenandoah_assert_not_forwarded(p, obj); 80 } 81 } 82 83 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_work(p); } 84 void ShenandoahAssertToSpaceClosure::do_oop(oop* p) { do_oop_work(p); } 85 #endif 86 87 class ShenandoahPretouchTask : public AbstractGangTask { 88 private: 89 ShenandoahRegionIterator _regions; 90 const size_t _bitmap_size; 91 const size_t _page_size; 92 char* _bitmap_base; 93 public: 94 ShenandoahPretouchTask(char* bitmap_base, size_t bitmap_size, size_t page_size) : 95 AbstractGangTask("Shenandoah PreTouch"), 96 _bitmap_size(bitmap_size), 97 _page_size(page_size), 98 _bitmap_base(bitmap_base) { 99 } 100 101 virtual void work(uint worker_id) { 102 ShenandoahHeapRegion* r = _regions.next(); 103 while (r != NULL) { 104 os::pretouch_memory(r->bottom(), r->end(), _page_size); 105 106 size_t start = r->region_number() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); 107 size_t end = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); 108 assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size); 109 110 os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size); 111 112 r = _regions.next(); 113 } 114 } 115 }; 116 117 jint ShenandoahHeap::initialize() { 118 ShenandoahBrooksPointer::initial_checks(); 119 120 initialize_heuristics(); 121 122 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); 123 size_t max_byte_size = collector_policy()->max_heap_byte_size(); 124 size_t heap_alignment = collector_policy()->heap_alignment(); 125 126 if (ShenandoahAlwaysPreTouch) { 127 // Enabled pre-touch means the entire heap is committed right away. 128 init_byte_size = max_byte_size; 129 } 130 131 Universe::check_alignment(max_byte_size, 132 ShenandoahHeapRegion::region_size_bytes(), 133 "shenandoah heap"); 134 Universe::check_alignment(init_byte_size, 135 ShenandoahHeapRegion::region_size_bytes(), 136 "shenandoah heap"); 137 138 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, 139 heap_alignment); 140 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size())); 141 142 ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size); 143 144 _num_regions = ShenandoahHeapRegion::region_count(); 145 146 size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes(); 147 num_committed_regions = MIN2(num_committed_regions, _num_regions); 148 assert(num_committed_regions <= _num_regions, "sanity"); 149 150 _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes(); 151 _committed = _initial_size; 152 153 log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT "%s", 154 byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size)); 155 if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) { 156 vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap"); 157 } 158 159 size_t reg_size_words = ShenandoahHeapRegion::region_size_words(); 160 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 161 162 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC); 163 _free_set = new ShenandoahFreeSet(this, _num_regions); 164 165 _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base()); 166 167 if (ShenandoahPacing) { 168 _pacer = new ShenandoahPacer(this); 169 _pacer->setup_for_idle(); 170 } else { 171 _pacer = NULL; 172 } 173 174 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0, 175 "misaligned heap: " PTR_FORMAT, p2i(base())); 176 177 // The call below uses stuff (the SATB* things) that are in G1, but probably 178 // belong into a shared location. 179 ShenandoahBarrierSet::satb_mark_queue_set().initialize(this, 180 SATB_Q_CBL_mon, 181 20 /*G1SATBProcessCompletedThreshold */, 182 60 /* G1SATBBufferEnqueueingThresholdPercent */, 183 Shared_SATB_Q_lock); 184 185 // Reserve space for prev and next bitmap. 186 size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); 187 _bitmap_size = MarkBitMap::compute_size(heap_rs.size()); 188 _bitmap_size = align_up(_bitmap_size, bitmap_page_size); 189 _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize); 190 191 size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor(); 192 193 guarantee(bitmap_bytes_per_region != 0, 194 "Bitmap bytes per region should not be zero"); 195 guarantee(is_power_of_2(bitmap_bytes_per_region), 196 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region); 197 198 if (bitmap_page_size > bitmap_bytes_per_region) { 199 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region; 200 _bitmap_bytes_per_slice = bitmap_page_size; 201 } else { 202 _bitmap_regions_per_slice = 1; 203 _bitmap_bytes_per_slice = bitmap_bytes_per_region; 204 } 205 206 guarantee(_bitmap_regions_per_slice >= 1, 207 "Should have at least one region per slice: " SIZE_FORMAT, 208 _bitmap_regions_per_slice); 209 210 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0, 211 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT, 212 _bitmap_bytes_per_slice, bitmap_page_size); 213 214 ReservedSpace bitmap0(_bitmap_size, bitmap_page_size); 215 MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC); 216 _bitmap_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize); 217 218 size_t bitmap_init_commit = _bitmap_bytes_per_slice * 219 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice; 220 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit); 221 os::commit_memory_or_exit((char *) (_bitmap_region.start()), bitmap_init_commit, false, 222 "couldn't allocate initial bitmap"); 223 224 size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); 225 226 if (ShenandoahVerify) { 227 ReservedSpace verify_bitmap(_bitmap_size, page_size); 228 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false, 229 "couldn't allocate verification bitmap"); 230 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC); 231 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize); 232 _verification_bit_map.initialize(_heap_region, verify_bitmap_region); 233 _verifier = new ShenandoahVerifier(this, &_verification_bit_map); 234 } 235 236 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions); 237 238 { 239 ShenandoahHeapLocker locker(lock()); 240 for (size_t i = 0; i < _num_regions; i++) { 241 ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this, 242 (HeapWord*) pgc_rs.base() + reg_size_words * i, 243 reg_size_words, 244 i, 245 i < num_committed_regions); 246 247 _marking_context->initialize_top_at_mark_start(r); 248 _regions[i] = r; 249 assert(!collection_set()->is_in(i), "New region should not be in collection set"); 250 } 251 252 // Initialize to complete 253 _marking_context->mark_complete(); 254 255 _free_set->rebuild(); 256 } 257 258 if (ShenandoahAlwaysPreTouch) { 259 assert (!AlwaysPreTouch, "Should have been overridden"); 260 261 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads, 262 // before initialize() below zeroes it with initializing thread. For any given region, 263 // we touch the region and the corresponding bitmaps from the same thread. 264 ShenandoahPushWorkerScope scope(workers(), _max_workers, false); 265 266 log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages", 267 _num_regions, page_size); 268 ShenandoahPretouchTask cl(bitmap0.base(), _bitmap_size, page_size); 269 _workers->run_task(&cl); 270 } 271 272 // Reserve aux bitmap for use in object_iterate(). We don't commit it here. 273 ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size); 274 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC); 275 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize); 276 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region); 277 278 _traversal_gc = heuristics()->can_do_traversal_gc() ? 279 new ShenandoahTraversalGC(this, _num_regions) : 280 NULL; 281 282 _monitoring_support = new ShenandoahMonitoringSupport(this); 283 284 _phase_timings = new ShenandoahPhaseTimings(); 285 286 if (ShenandoahAllocationTrace) { 287 _alloc_tracker = new ShenandoahAllocTracker(); 288 } 289 290 ShenandoahStringDedup::initialize(); 291 292 _control_thread = new ShenandoahControlThread(); 293 294 ShenandoahCodeRoots::initialize(); 295 296 log_info(gc, init)("Safepointing mechanism: %s", 297 SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" : 298 (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown")); 299 300 _liveness_cache = NEW_C_HEAP_ARRAY(jushort*, _max_workers, mtGC); 301 for (uint worker = 0; worker < _max_workers; worker++) { 302 _liveness_cache[worker] = NEW_C_HEAP_ARRAY(jushort, _num_regions, mtGC); 303 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(jushort)); 304 } 305 306 return JNI_OK; 307 } 308 309 void ShenandoahHeap::initialize_heuristics() { 310 if (ShenandoahGCHeuristics != NULL) { 311 if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) { 312 _heuristics = new ShenandoahAggressiveHeuristics(); 313 } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) { 314 _heuristics = new ShenandoahStaticHeuristics(); 315 } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) { 316 _heuristics = new ShenandoahAdaptiveHeuristics(); 317 } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) { 318 _heuristics = new ShenandoahPassiveHeuristics(); 319 } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) { 320 _heuristics = new ShenandoahCompactHeuristics(); 321 } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) { 322 _heuristics = new ShenandoahTraversalHeuristics(); 323 } else { 324 vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option"); 325 } 326 327 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) { 328 vm_exit_during_initialization( 329 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.", 330 _heuristics->name())); 331 } 332 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) { 333 vm_exit_during_initialization( 334 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.", 335 _heuristics->name())); 336 } 337 338 if (ShenandoahStoreValEnqueueBarrier && ShenandoahStoreValReadBarrier) { 339 vm_exit_during_initialization("Cannot use both ShenandoahStoreValEnqueueBarrier and ShenandoahStoreValReadBarrier"); 340 } 341 log_info(gc, init)("Shenandoah heuristics: %s", 342 _heuristics->name()); 343 } else { 344 ShouldNotReachHere(); 345 } 346 347 } 348 349 #ifdef _MSC_VER 350 #pragma warning( push ) 351 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 352 #endif 353 354 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : 355 CollectedHeap(), 356 _initial_size(0), 357 _used(0), 358 _committed(0), 359 _bytes_allocated_since_gc_start(0), 360 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)), 361 _workers(NULL), 362 _safepoint_workers(NULL), 363 _num_regions(0), 364 _regions(NULL), 365 _update_refs_iterator(this), 366 _control_thread(NULL), 367 _shenandoah_policy(policy), 368 _heuristics(NULL), 369 _free_set(NULL), 370 _scm(new ShenandoahConcurrentMark()), 371 _traversal_gc(NULL), 372 _full_gc(new ShenandoahMarkCompact()), 373 _pacer(NULL), 374 _verifier(NULL), 375 _alloc_tracker(NULL), 376 _phase_timings(NULL), 377 _monitoring_support(NULL), 378 _memory_pool(NULL), 379 _stw_memory_manager("Shenandoah Pauses", "end of GC pause"), 380 _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"), 381 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 382 _soft_ref_policy(), 383 _ref_processor(NULL), 384 _marking_context(NULL), 385 _collection_set(NULL) 386 { 387 log_info(gc, init)("GC threads: " UINT32_FORMAT " parallel, " UINT32_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads); 388 log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial"); 389 390 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this)); 391 392 _max_workers = MAX2(_max_workers, 1U); 393 _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers, 394 /* are_GC_task_threads */true, 395 /* are_ConcurrentGC_threads */false); 396 if (_workers == NULL) { 397 vm_exit_during_initialization("Failed necessary allocation."); 398 } else { 399 _workers->initialize_workers(); 400 } 401 402 if (ShenandoahParallelSafepointThreads > 1) { 403 _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread", 404 ShenandoahParallelSafepointThreads, 405 false, false); 406 _safepoint_workers->initialize_workers(); 407 } 408 } 409 410 #ifdef _MSC_VER 411 #pragma warning( pop ) 412 #endif 413 414 class ShenandoahResetBitmapTask : public AbstractGangTask { 415 private: 416 ShenandoahRegionIterator _regions; 417 418 public: 419 ShenandoahResetBitmapTask() : 420 AbstractGangTask("Parallel Reset Bitmap Task") {} 421 422 void work(uint worker_id) { 423 ShenandoahHeapRegion* region = _regions.next(); 424 ShenandoahHeap* heap = ShenandoahHeap::heap(); 425 ShenandoahMarkingContext* const ctx = heap->marking_context(); 426 while (region != NULL) { 427 if (heap->is_bitmap_slice_committed(region)) { 428 ctx->clear_bitmap(region); 429 } 430 region = _regions.next(); 431 } 432 } 433 }; 434 435 void ShenandoahHeap::reset_mark_bitmap() { 436 assert_gc_workers(_workers->active_workers()); 437 mark_incomplete_marking_context(); 438 439 ShenandoahResetBitmapTask task; 440 _workers->run_task(&task); 441 } 442 443 void ShenandoahHeap::print_on(outputStream* st) const { 444 st->print_cr("Shenandoah Heap"); 445 st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used", 446 capacity() / K, committed() / K, used() / K); 447 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions", 448 num_regions(), ShenandoahHeapRegion::region_size_bytes() / K); 449 450 st->print("Status: "); 451 if (has_forwarded_objects()) st->print("has forwarded objects, "); 452 if (is_concurrent_mark_in_progress()) st->print("marking, "); 453 if (is_evacuation_in_progress()) st->print("evacuating, "); 454 if (is_update_refs_in_progress()) st->print("updating refs, "); 455 if (is_concurrent_traversal_in_progress()) st->print("traversal, "); 456 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, "); 457 if (is_full_gc_in_progress()) st->print("full gc, "); 458 if (is_full_gc_move_in_progress()) st->print("full gc move, "); 459 460 if (cancelled_gc()) { 461 st->print("cancelled"); 462 } else { 463 st->print("not cancelled"); 464 } 465 st->cr(); 466 467 st->print_cr("Reserved region:"); 468 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ", 469 p2i(reserved_region().start()), 470 p2i(reserved_region().end())); 471 472 st->cr(); 473 MetaspaceUtils::print_on(st); 474 475 if (Verbose) { 476 print_heap_regions_on(st); 477 } 478 } 479 480 class ShenandoahInitGCLABClosure : public ThreadClosure { 481 public: 482 void do_thread(Thread* thread) { 483 if (thread != NULL && (thread->is_Java_thread() || thread->is_Worker_thread())) { 484 ShenandoahThreadLocalData::initialize_gclab(thread); 485 } 486 } 487 }; 488 489 void ShenandoahHeap::post_initialize() { 490 CollectedHeap::post_initialize(); 491 MutexLocker ml(Threads_lock); 492 493 ShenandoahInitGCLABClosure init_gclabs; 494 Threads::threads_do(&init_gclabs); 495 _workers->threads_do(&init_gclabs); 496 _safepoint_workers->threads_do(&init_gclabs); 497 498 // gclab can not be initialized early during VM startup, as it can not determinate its max_size. 499 // Now, we will let WorkGang to initialize gclab when new worker is created. 500 _workers->set_initialize_gclab(); 501 502 _scm->initialize(_max_workers); 503 _full_gc->initialize(_gc_timer); 504 505 ref_processing_init(); 506 507 _heuristics->initialize(); 508 } 509 510 size_t ShenandoahHeap::used() const { 511 return OrderAccess::load_acquire(&_used); 512 } 513 514 size_t ShenandoahHeap::committed() const { 515 OrderAccess::acquire(); 516 return _committed; 517 } 518 519 void ShenandoahHeap::increase_committed(size_t bytes) { 520 assert_heaplock_or_safepoint(); 521 _committed += bytes; 522 } 523 524 void ShenandoahHeap::decrease_committed(size_t bytes) { 525 assert_heaplock_or_safepoint(); 526 _committed -= bytes; 527 } 528 529 void ShenandoahHeap::increase_used(size_t bytes) { 530 Atomic::add(bytes, &_used); 531 } 532 533 void ShenandoahHeap::set_used(size_t bytes) { 534 OrderAccess::release_store_fence(&_used, bytes); 535 } 536 537 void ShenandoahHeap::decrease_used(size_t bytes) { 538 assert(used() >= bytes, "never decrease heap size by more than we've left"); 539 Atomic::sub(bytes, &_used); 540 } 541 542 void ShenandoahHeap::increase_allocated(size_t bytes) { 543 Atomic::add(bytes, &_bytes_allocated_since_gc_start); 544 } 545 546 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) { 547 size_t bytes = words * HeapWordSize; 548 if (!waste) { 549 increase_used(bytes); 550 } 551 increase_allocated(bytes); 552 if (ShenandoahPacing) { 553 control_thread()->pacing_notify_alloc(words); 554 if (waste) { 555 pacer()->claim_for_alloc(words, true); 556 } 557 } 558 } 559 560 size_t ShenandoahHeap::capacity() const { 561 return num_regions() * ShenandoahHeapRegion::region_size_bytes(); 562 } 563 564 size_t ShenandoahHeap::max_capacity() const { 565 return _num_regions * ShenandoahHeapRegion::region_size_bytes(); 566 } 567 568 size_t ShenandoahHeap::initial_capacity() const { 569 return _initial_size; 570 } 571 572 bool ShenandoahHeap::is_in(const void* p) const { 573 HeapWord* heap_base = (HeapWord*) base(); 574 HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions(); 575 return p >= heap_base && p < last_region_end; 576 } 577 578 void ShenandoahHeap::op_uncommit(double shrink_before) { 579 assert (ShenandoahUncommit, "should be enabled"); 580 581 size_t count = 0; 582 for (size_t i = 0; i < num_regions(); i++) { 583 ShenandoahHeapRegion* r = get_region(i); 584 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { 585 ShenandoahHeapLocker locker(lock()); 586 if (r->is_empty_committed()) { 587 r->make_uncommitted(); 588 count++; 589 } 590 } 591 SpinPause(); // allow allocators to take the lock 592 } 593 594 if (count > 0) { 595 log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used", 596 count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M); 597 control_thread()->notify_heap_changed(); 598 } 599 } 600 601 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) { 602 // New object should fit the GCLAB size 603 size_t min_size = MAX2(size, PLAB::min_size()); 604 605 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively. 606 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2; 607 new_size = MIN2(new_size, PLAB::max_size()); 608 new_size = MAX2(new_size, PLAB::min_size()); 609 610 // Record new heuristic value even if we take any shortcut. This captures 611 // the case when moderately-sized objects always take a shortcut. At some point, 612 // heuristics should catch up with them. 613 ShenandoahThreadLocalData::set_gclab_size(thread, new_size); 614 615 if (new_size < size) { 616 // New size still does not fit the object. Fall back to shared allocation. 617 // This avoids retiring perfectly good GCLABs, when we encounter a large object. 618 return NULL; 619 } 620 621 // Retire current GCLAB, and allocate a new one. 622 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); 623 gclab->retire(); 624 625 size_t actual_size = 0; 626 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size); 627 if (gclab_buf == NULL) { 628 return NULL; 629 } 630 631 assert (size <= actual_size, "allocation should fit"); 632 633 if (ZeroTLAB) { 634 // ..and clear it. 635 Copy::zero_to_words(gclab_buf, actual_size); 636 } else { 637 // ...and zap just allocated object. 638 #ifdef ASSERT 639 // Skip mangling the space corresponding to the object header to 640 // ensure that the returned space is not considered parsable by 641 // any concurrent GC thread. 642 size_t hdr_size = oopDesc::header_size(); 643 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal); 644 #endif // ASSERT 645 } 646 gclab->set_buf(gclab_buf, actual_size); 647 return gclab->allocate(size); 648 } 649 650 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size, 651 size_t requested_size, 652 size_t* actual_size) { 653 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size); 654 HeapWord* res = allocate_memory(req); 655 if (res != NULL) { 656 *actual_size = req.actual_size(); 657 } else { 658 *actual_size = 0; 659 } 660 return res; 661 } 662 663 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size, 664 size_t word_size, 665 size_t* actual_size) { 666 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size); 667 HeapWord* res = allocate_memory(req); 668 if (res != NULL) { 669 *actual_size = req.actual_size(); 670 } else { 671 *actual_size = 0; 672 } 673 return res; 674 } 675 676 ShenandoahHeap* ShenandoahHeap::heap() { 677 CollectedHeap* heap = Universe::heap(); 678 assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()"); 679 assert(heap->kind() == CollectedHeap::Shenandoah, "not a shenandoah heap"); 680 return (ShenandoahHeap*) heap; 681 } 682 683 ShenandoahHeap* ShenandoahHeap::heap_no_check() { 684 CollectedHeap* heap = Universe::heap(); 685 return (ShenandoahHeap*) heap; 686 } 687 688 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) { 689 ShenandoahAllocTrace trace_alloc(req.size(), req.type()); 690 691 intptr_t pacer_epoch = 0; 692 bool in_new_region = false; 693 HeapWord* result = NULL; 694 695 if (req.is_mutator_alloc()) { 696 if (ShenandoahPacing) { 697 pacer()->pace_for_alloc(req.size()); 698 pacer_epoch = pacer()->epoch(); 699 } 700 701 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) { 702 result = allocate_memory_under_lock(req, in_new_region); 703 } 704 705 // Allocation failed, block until control thread reacted, then retry allocation. 706 // 707 // It might happen that one of the threads requesting allocation would unblock 708 // way later after GC happened, only to fail the second allocation, because 709 // other threads have already depleted the free storage. In this case, a better 710 // strategy is to try again, as long as GC makes progress. 711 // 712 // Then, we need to make sure the allocation was retried after at least one 713 // Full GC, which means we want to try more than ShenandoahFullGCThreshold times. 714 715 size_t tries = 0; 716 717 while (result == NULL && _progress_last_gc.is_set()) { 718 tries++; 719 control_thread()->handle_alloc_failure(req.size()); 720 result = allocate_memory_under_lock(req, in_new_region); 721 } 722 723 while (result == NULL && tries <= ShenandoahFullGCThreshold) { 724 tries++; 725 control_thread()->handle_alloc_failure(req.size()); 726 result = allocate_memory_under_lock(req, in_new_region); 727 } 728 729 } else { 730 assert(req.is_gc_alloc(), "Can only accept GC allocs here"); 731 result = allocate_memory_under_lock(req, in_new_region); 732 // Do not call handle_alloc_failure() here, because we cannot block. 733 // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac(). 734 } 735 736 if (in_new_region) { 737 control_thread()->notify_heap_changed(); 738 } 739 740 if (result != NULL) { 741 size_t requested = req.size(); 742 size_t actual = req.actual_size(); 743 744 assert (req.is_lab_alloc() || (requested == actual), 745 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT, 746 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual); 747 748 if (req.is_mutator_alloc()) { 749 notify_mutator_alloc_words(actual, false); 750 751 // If we requested more than we were granted, give the rest back to pacer. 752 // This only matters if we are in the same pacing epoch: do not try to unpace 753 // over the budget for the other phase. 754 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) { 755 pacer()->unpace_for_alloc(pacer_epoch, requested - actual); 756 } 757 } else { 758 increase_used(actual*HeapWordSize); 759 } 760 } 761 762 return result; 763 } 764 765 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) { 766 ShenandoahHeapLocker locker(lock()); 767 return _free_set->allocate(req, in_new_region); 768 } 769 770 class ShenandoahMemAllocator : public MemAllocator { 771 private: 772 MemAllocator& _initializer; 773 public: 774 ShenandoahMemAllocator(MemAllocator& initializer, Klass* klass, size_t word_size, Thread* thread) : 775 MemAllocator(klass, word_size + ShenandoahBrooksPointer::word_size(), thread), 776 _initializer(initializer) {} 777 778 protected: 779 virtual HeapWord* mem_allocate(Allocation& allocation) const { 780 HeapWord* result = MemAllocator::mem_allocate(allocation); 781 // Initialize brooks-pointer 782 if (result != NULL) { 783 result += ShenandoahBrooksPointer::word_size(); 784 ShenandoahBrooksPointer::initialize(oop(result)); 785 assert(! ShenandoahHeap::heap()->in_collection_set(result), "never allocate in targetted region"); 786 } 787 return result; 788 } 789 790 virtual oop initialize(HeapWord* mem) const { 791 return _initializer.initialize(mem); 792 } 793 }; 794 795 oop ShenandoahHeap::obj_allocate(Klass* klass, int size, TRAPS) { 796 ObjAllocator initializer(klass, size, THREAD); 797 ShenandoahMemAllocator allocator(initializer, klass, size, THREAD); 798 return allocator.allocate(); 799 } 800 801 oop ShenandoahHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) { 802 ObjArrayAllocator initializer(klass, size, length, do_zero, THREAD); 803 ShenandoahMemAllocator allocator(initializer, klass, size, THREAD); 804 return allocator.allocate(); 805 } 806 807 oop ShenandoahHeap::class_allocate(Klass* klass, int size, TRAPS) { 808 ClassAllocator initializer(klass, size, THREAD); 809 ShenandoahMemAllocator allocator(initializer, klass, size, THREAD); 810 return allocator.allocate(); 811 } 812 813 HeapWord* ShenandoahHeap::mem_allocate(size_t size, 814 bool* gc_overhead_limit_was_exceeded) { 815 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size); 816 return allocate_memory(req); 817 } 818 819 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 820 size_t size, 821 Metaspace::MetadataType mdtype) { 822 MetaWord* result; 823 824 // Inform metaspace OOM to GC heuristics if class unloading is possible. 825 if (heuristics()->can_unload_classes()) { 826 ShenandoahHeuristics* h = heuristics(); 827 h->record_metaspace_oom(); 828 } 829 830 // Expand and retry allocation 831 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); 832 if (result != NULL) { 833 return result; 834 } 835 836 // Start full GC 837 collect(GCCause::_metadata_GC_clear_soft_refs); 838 839 // Retry allocation 840 result = loader_data->metaspace_non_null()->allocate(size, mdtype); 841 if (result != NULL) { 842 return result; 843 } 844 845 // Expand and retry allocation 846 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); 847 if (result != NULL) { 848 return result; 849 } 850 851 // Out of memory 852 return NULL; 853 } 854 855 void ShenandoahHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) { 856 HeapWord* obj = tlab_post_allocation_setup(start); 857 CollectedHeap::fill_with_object(obj, end); 858 } 859 860 size_t ShenandoahHeap::min_dummy_object_size() const { 861 return CollectedHeap::min_dummy_object_size() + ShenandoahBrooksPointer::word_size(); 862 } 863 864 class ShenandoahEvacuateUpdateRootsClosure: public BasicOopIterateClosure { 865 private: 866 ShenandoahHeap* _heap; 867 Thread* _thread; 868 public: 869 ShenandoahEvacuateUpdateRootsClosure() : 870 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) { 871 } 872 873 private: 874 template <class T> 875 void do_oop_work(T* p) { 876 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress"); 877 878 T o = RawAccess<>::oop_load(p); 879 if (! CompressedOops::is_null(o)) { 880 oop obj = CompressedOops::decode_not_null(o); 881 if (_heap->in_collection_set(obj)) { 882 shenandoah_assert_marked(p, obj); 883 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 884 if (oopDesc::equals_raw(resolved, obj)) { 885 resolved = _heap->evacuate_object(obj, _thread); 886 } 887 RawAccess<IS_NOT_NULL>::oop_store(p, resolved); 888 } 889 } 890 } 891 892 public: 893 void do_oop(oop* p) { 894 do_oop_work(p); 895 } 896 void do_oop(narrowOop* p) { 897 do_oop_work(p); 898 } 899 }; 900 901 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure { 902 private: 903 ShenandoahHeap* const _heap; 904 Thread* const _thread; 905 public: 906 ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) : 907 _heap(heap), _thread(Thread::current()) {} 908 909 void do_object(oop p) { 910 shenandoah_assert_marked(NULL, p); 911 if (oopDesc::equals_raw(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) { 912 _heap->evacuate_object(p, _thread); 913 } 914 } 915 }; 916 917 class ShenandoahEvacuationTask : public AbstractGangTask { 918 private: 919 ShenandoahHeap* const _sh; 920 ShenandoahCollectionSet* const _cs; 921 bool _concurrent; 922 public: 923 ShenandoahEvacuationTask(ShenandoahHeap* sh, 924 ShenandoahCollectionSet* cs, 925 bool concurrent) : 926 AbstractGangTask("Parallel Evacuation Task"), 927 _sh(sh), 928 _cs(cs), 929 _concurrent(concurrent) 930 {} 931 932 void work(uint worker_id) { 933 if (_concurrent) { 934 ShenandoahConcurrentWorkerSession worker_session(worker_id); 935 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); 936 ShenandoahEvacOOMScope oom_evac_scope; 937 do_work(); 938 } else { 939 ShenandoahParallelWorkerSession worker_session(worker_id); 940 ShenandoahEvacOOMScope oom_evac_scope; 941 do_work(); 942 } 943 } 944 945 private: 946 void do_work() { 947 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh); 948 ShenandoahHeapRegion* r; 949 while ((r =_cs->claim_next()) != NULL) { 950 assert(r->has_live(), "all-garbage regions are reclaimed early"); 951 _sh->marked_object_iterate(r, &cl); 952 953 if (ShenandoahPacing) { 954 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize); 955 } 956 957 if (_sh->check_cancelled_gc_and_yield(_concurrent)) { 958 break; 959 } 960 } 961 } 962 }; 963 964 void ShenandoahHeap::trash_cset_regions() { 965 ShenandoahHeapLocker locker(lock()); 966 967 ShenandoahCollectionSet* set = collection_set(); 968 ShenandoahHeapRegion* r; 969 set->clear_current_index(); 970 while ((r = set->next()) != NULL) { 971 r->make_trash(); 972 } 973 collection_set()->clear(); 974 } 975 976 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const { 977 st->print_cr("Heap Regions:"); 978 st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned"); 979 st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data"); 980 st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)"); 981 st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)"); 982 983 for (size_t i = 0; i < num_regions(); i++) { 984 get_region(i)->print_on(st); 985 } 986 } 987 988 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) { 989 assert(start->is_humongous_start(), "reclaim regions starting with the first one"); 990 991 oop humongous_obj = oop(start->bottom() + ShenandoahBrooksPointer::word_size()); 992 size_t size = humongous_obj->size() + ShenandoahBrooksPointer::word_size(); 993 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); 994 size_t index = start->region_number() + required_regions - 1; 995 996 assert(!start->has_live(), "liveness must be zero"); 997 998 for(size_t i = 0; i < required_regions; i++) { 999 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log, 1000 // as it expects that every region belongs to a humongous region starting with a humongous start region. 1001 ShenandoahHeapRegion* region = get_region(index --); 1002 1003 assert(region->is_humongous(), "expect correct humongous start or continuation"); 1004 assert(!region->is_cset(), "Humongous region should not be in collection set"); 1005 1006 region->make_trash_immediate(); 1007 } 1008 } 1009 1010 class ShenandoahRetireGCLABClosure : public ThreadClosure { 1011 public: 1012 void do_thread(Thread* thread) { 1013 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); 1014 assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name()); 1015 gclab->retire(); 1016 } 1017 }; 1018 1019 void ShenandoahHeap::make_parsable(bool retire_tlabs) { 1020 if (UseTLAB) { 1021 CollectedHeap::ensure_parsability(retire_tlabs); 1022 } 1023 ShenandoahRetireGCLABClosure cl; 1024 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { 1025 cl.do_thread(t); 1026 } 1027 workers()->threads_do(&cl); 1028 _safepoint_workers->threads_do(&cl); 1029 } 1030 1031 void ShenandoahHeap::resize_tlabs() { 1032 CollectedHeap::resize_all_tlabs(); 1033 } 1034 1035 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask { 1036 private: 1037 ShenandoahRootEvacuator* _rp; 1038 1039 public: 1040 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) : 1041 AbstractGangTask("Shenandoah evacuate and update roots"), 1042 _rp(rp) {} 1043 1044 void work(uint worker_id) { 1045 ShenandoahParallelWorkerSession worker_session(worker_id); 1046 ShenandoahEvacOOMScope oom_evac_scope; 1047 ShenandoahEvacuateUpdateRootsClosure cl; 1048 1049 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations); 1050 _rp->process_evacuate_roots(&cl, &blobsCl, worker_id); 1051 } 1052 }; 1053 1054 void ShenandoahHeap::evacuate_and_update_roots() { 1055 #if defined(COMPILER2) || INCLUDE_JVMCI 1056 DerivedPointerTable::clear(); 1057 #endif 1058 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped"); 1059 1060 { 1061 ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac); 1062 ShenandoahEvacuateUpdateRootsTask roots_task(&rp); 1063 workers()->run_task(&roots_task); 1064 } 1065 1066 #if defined(COMPILER2) || INCLUDE_JVMCI 1067 DerivedPointerTable::update_pointers(); 1068 #endif 1069 } 1070 1071 void ShenandoahHeap::roots_iterate(OopClosure* cl) { 1072 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped"); 1073 1074 CodeBlobToOopClosure blobsCl(cl, false); 1075 CLDToOopClosure cldCl(cl, ClassLoaderData::_claim_strong); 1076 1077 ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases); 1078 rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0); 1079 } 1080 1081 // Returns size in bytes 1082 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const { 1083 if (ShenandoahElasticTLAB) { 1084 // With Elastic TLABs, return the max allowed size, and let the allocation path 1085 // figure out the safe size for current allocation. 1086 return ShenandoahHeapRegion::max_tlab_size_bytes(); 1087 } else { 1088 return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes()); 1089 } 1090 } 1091 1092 size_t ShenandoahHeap::max_tlab_size() const { 1093 // Returns size in words 1094 return ShenandoahHeapRegion::max_tlab_size_words(); 1095 } 1096 1097 class ShenandoahRetireAndResetGCLABClosure : public ThreadClosure { 1098 public: 1099 void do_thread(Thread* thread) { 1100 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); 1101 gclab->retire(); 1102 if (ShenandoahThreadLocalData::gclab_size(thread) > 0) { 1103 ShenandoahThreadLocalData::set_gclab_size(thread, 0); 1104 } 1105 } 1106 }; 1107 1108 void ShenandoahHeap::retire_and_reset_gclabs() { 1109 ShenandoahRetireAndResetGCLABClosure cl; 1110 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { 1111 cl.do_thread(t); 1112 } 1113 workers()->threads_do(&cl); 1114 _safepoint_workers->threads_do(&cl); 1115 } 1116 1117 void ShenandoahHeap::collect(GCCause::Cause cause) { 1118 control_thread()->request_gc(cause); 1119 } 1120 1121 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) { 1122 //assert(false, "Shouldn't need to do full collections"); 1123 } 1124 1125 CollectorPolicy* ShenandoahHeap::collector_policy() const { 1126 return _shenandoah_policy; 1127 } 1128 1129 HeapWord* ShenandoahHeap::block_start(const void* addr) const { 1130 Space* sp = heap_region_containing(addr); 1131 if (sp != NULL) { 1132 return sp->block_start(addr); 1133 } 1134 return NULL; 1135 } 1136 1137 size_t ShenandoahHeap::block_size(const HeapWord* addr) const { 1138 Space* sp = heap_region_containing(addr); 1139 assert(sp != NULL, "block_size of address outside of heap"); 1140 return sp->block_size(addr); 1141 } 1142 1143 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const { 1144 Space* sp = heap_region_containing(addr); 1145 return sp->block_is_obj(addr); 1146 } 1147 1148 jlong ShenandoahHeap::millis_since_last_gc() { 1149 double v = heuristics()->time_since_last_gc() * 1000; 1150 assert(0 <= v && v <= max_jlong, "value should fit: %f", v); 1151 return (jlong)v; 1152 } 1153 1154 void ShenandoahHeap::prepare_for_verify() { 1155 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 1156 make_parsable(false); 1157 } 1158 } 1159 1160 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const { 1161 workers()->print_worker_threads_on(st); 1162 if (ShenandoahStringDedup::is_enabled()) { 1163 ShenandoahStringDedup::print_worker_threads_on(st); 1164 } 1165 } 1166 1167 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const { 1168 workers()->threads_do(tcl); 1169 _safepoint_workers->threads_do(tcl); 1170 if (ShenandoahStringDedup::is_enabled()) { 1171 ShenandoahStringDedup::threads_do(tcl); 1172 } 1173 } 1174 1175 void ShenandoahHeap::print_tracing_info() const { 1176 LogTarget(Info, gc, stats) lt; 1177 if (lt.is_enabled()) { 1178 ResourceMark rm; 1179 LogStream ls(lt); 1180 1181 phase_timings()->print_on(&ls); 1182 1183 ls.cr(); 1184 ls.cr(); 1185 1186 shenandoah_policy()->print_gc_stats(&ls); 1187 1188 ls.cr(); 1189 ls.cr(); 1190 1191 if (ShenandoahPacing) { 1192 pacer()->print_on(&ls); 1193 } 1194 1195 ls.cr(); 1196 ls.cr(); 1197 1198 if (ShenandoahAllocationTrace) { 1199 assert(alloc_tracker() != NULL, "Must be"); 1200 alloc_tracker()->print_on(&ls); 1201 } else { 1202 ls.print_cr(" Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable."); 1203 } 1204 } 1205 } 1206 1207 void ShenandoahHeap::verify(VerifyOption vo) { 1208 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) { 1209 if (ShenandoahVerify) { 1210 verifier()->verify_generic(vo); 1211 } else { 1212 // TODO: Consider allocating verification bitmaps on demand, 1213 // and turn this on unconditionally. 1214 } 1215 } 1216 } 1217 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const { 1218 return _free_set->capacity(); 1219 } 1220 1221 class ObjectIterateScanRootClosure : public BasicOopIterateClosure { 1222 private: 1223 MarkBitMap* _bitmap; 1224 Stack<oop,mtGC>* _oop_stack; 1225 1226 template <class T> 1227 void do_oop_work(T* p) { 1228 T o = RawAccess<>::oop_load(p); 1229 if (!CompressedOops::is_null(o)) { 1230 oop obj = CompressedOops::decode_not_null(o); 1231 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 1232 assert(oopDesc::is_oop(obj), "must be a valid oop"); 1233 if (!_bitmap->is_marked((HeapWord*) obj)) { 1234 _bitmap->mark((HeapWord*) obj); 1235 _oop_stack->push(obj); 1236 } 1237 } 1238 } 1239 public: 1240 ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) : 1241 _bitmap(bitmap), _oop_stack(oop_stack) {} 1242 void do_oop(oop* p) { do_oop_work(p); } 1243 void do_oop(narrowOop* p) { do_oop_work(p); } 1244 }; 1245 1246 /* 1247 * This is public API, used in preparation of object_iterate(). 1248 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't 1249 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can 1250 * control, we call SH::make_tlabs_parsable(). 1251 */ 1252 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) { 1253 // No-op. 1254 } 1255 1256 /* 1257 * Iterates objects in the heap. This is public API, used for, e.g., heap dumping. 1258 * 1259 * We cannot safely iterate objects by doing a linear scan at random points in time. Linear 1260 * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g. 1261 * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear 1262 * scanning therefore depends on having a valid marking bitmap to support it. However, we only 1263 * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid 1264 * marking bitmap during marking, after aborted marking or during/after cleanup (when we just 1265 * wiped the bitmap in preparation for next marking). 1266 * 1267 * For all those reasons, we implement object iteration as a single marking traversal, reporting 1268 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap 1269 * is allowed to report dead objects, but is not required to do so. 1270 */ 1271 void ShenandoahHeap::object_iterate(ObjectClosure* cl) { 1272 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints"); 1273 if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) { 1274 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration"); 1275 return; 1276 } 1277 1278 // Reset bitmap 1279 _aux_bit_map.clear(); 1280 1281 Stack<oop,mtGC> oop_stack; 1282 1283 // First, we process all GC roots. This populates the work stack with initial objects. 1284 ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases); 1285 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack); 1286 CLDToOopClosure clds(&oops, ClassLoaderData::_claim_none); 1287 CodeBlobToOopClosure blobs(&oops, false); 1288 rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0); 1289 1290 // Work through the oop stack to traverse heap. 1291 while (! oop_stack.is_empty()) { 1292 oop obj = oop_stack.pop(); 1293 assert(oopDesc::is_oop(obj), "must be a valid oop"); 1294 cl->do_object(obj); 1295 obj->oop_iterate(&oops); 1296 } 1297 1298 assert(oop_stack.is_empty(), "should be empty"); 1299 1300 if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) { 1301 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration"); 1302 } 1303 } 1304 1305 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) { 1306 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints"); 1307 object_iterate(cl); 1308 } 1309 1310 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const { 1311 for (size_t i = 0; i < num_regions(); i++) { 1312 ShenandoahHeapRegion* current = get_region(i); 1313 blk->heap_region_do(current); 1314 } 1315 } 1316 1317 class ShenandoahParallelHeapRegionTask : public AbstractGangTask { 1318 private: 1319 ShenandoahHeap* const _heap; 1320 ShenandoahHeapRegionClosure* const _blk; 1321 1322 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); 1323 volatile size_t _index; 1324 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); 1325 1326 public: 1327 ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) : 1328 AbstractGangTask("Parallel Region Task"), 1329 _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {} 1330 1331 void work(uint worker_id) { 1332 size_t stride = ShenandoahParallelRegionStride; 1333 1334 size_t max = _heap->num_regions(); 1335 while (_index < max) { 1336 size_t cur = Atomic::add(stride, &_index) - stride; 1337 size_t start = cur; 1338 size_t end = MIN2(cur + stride, max); 1339 if (start >= max) break; 1340 1341 for (size_t i = cur; i < end; i++) { 1342 ShenandoahHeapRegion* current = _heap->get_region(i); 1343 _blk->heap_region_do(current); 1344 } 1345 } 1346 } 1347 }; 1348 1349 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const { 1350 assert(blk->is_thread_safe(), "Only thread-safe closures here"); 1351 if (num_regions() > ShenandoahParallelRegionStride) { 1352 ShenandoahParallelHeapRegionTask task(blk); 1353 workers()->run_task(&task); 1354 } else { 1355 heap_region_iterate(blk); 1356 } 1357 } 1358 1359 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure { 1360 private: 1361 ShenandoahMarkingContext* const _ctx; 1362 public: 1363 ShenandoahClearLivenessClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 1364 1365 void heap_region_do(ShenandoahHeapRegion* r) { 1366 if (r->is_active()) { 1367 r->clear_live_data(); 1368 _ctx->capture_top_at_mark_start(r); 1369 } else { 1370 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->region_number()); 1371 assert(_ctx->top_at_mark_start(r) == r->top(), 1372 "Region " SIZE_FORMAT " should already have correct TAMS", r->region_number()); 1373 } 1374 } 1375 1376 bool is_thread_safe() { return true; } 1377 }; 1378 1379 void ShenandoahHeap::op_init_mark() { 1380 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 1381 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); 1382 1383 assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap"); 1384 assert(!marking_context()->is_complete(), "should not be complete"); 1385 1386 if (ShenandoahVerify) { 1387 verifier()->verify_before_concmark(); 1388 } 1389 1390 if (VerifyBeforeGC) { 1391 Universe::verify(); 1392 } 1393 1394 set_concurrent_mark_in_progress(true); 1395 // We need to reset all TLABs because we'd lose marks on all objects allocated in them. 1396 { 1397 ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable); 1398 make_parsable(true); 1399 } 1400 1401 { 1402 ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness); 1403 ShenandoahClearLivenessClosure clc; 1404 parallel_heap_region_iterate(&clc); 1405 } 1406 1407 // Make above changes visible to worker threads 1408 OrderAccess::fence(); 1409 1410 concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots); 1411 1412 if (UseTLAB) { 1413 ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs); 1414 resize_tlabs(); 1415 } 1416 1417 if (ShenandoahPacing) { 1418 pacer()->setup_for_mark(); 1419 } 1420 } 1421 1422 void ShenandoahHeap::op_mark() { 1423 concurrent_mark()->mark_from_roots(); 1424 } 1425 1426 class ShenandoahCompleteLivenessClosure : public ShenandoahHeapRegionClosure { 1427 private: 1428 ShenandoahMarkingContext* const _ctx; 1429 public: 1430 ShenandoahCompleteLivenessClosure() : _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 1431 1432 void heap_region_do(ShenandoahHeapRegion* r) { 1433 if (r->is_active()) { 1434 HeapWord *tams = _ctx->top_at_mark_start(r); 1435 HeapWord *top = r->top(); 1436 if (top > tams) { 1437 r->increase_live_data_alloc_words(pointer_delta(top, tams)); 1438 } 1439 } else { 1440 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->region_number()); 1441 assert(_ctx->top_at_mark_start(r) == r->top(), 1442 "Region " SIZE_FORMAT " should have correct TAMS", r->region_number()); 1443 } 1444 } 1445 1446 bool is_thread_safe() { return true; } 1447 }; 1448 1449 void ShenandoahHeap::op_final_mark() { 1450 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 1451 1452 // It is critical that we 1453 // evacuate roots right after finishing marking, so that we don't 1454 // get unmarked objects in the roots. 1455 1456 if (!cancelled_gc()) { 1457 concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false); 1458 1459 if (has_forwarded_objects()) { 1460 concurrent_mark()->update_roots(ShenandoahPhaseTimings::update_roots); 1461 } 1462 1463 stop_concurrent_marking(); 1464 1465 { 1466 ShenandoahGCPhase phase(ShenandoahPhaseTimings::complete_liveness); 1467 1468 // All allocations past TAMS are implicitly live, adjust the region data. 1469 // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap. 1470 ShenandoahCompleteLivenessClosure cl; 1471 parallel_heap_region_iterate(&cl); 1472 } 1473 1474 { 1475 ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac); 1476 1477 make_parsable(true); 1478 1479 trash_cset_regions(); 1480 1481 { 1482 ShenandoahHeapLocker locker(lock()); 1483 _collection_set->clear(); 1484 _free_set->clear(); 1485 1486 heuristics()->choose_collection_set(_collection_set); 1487 1488 _free_set->rebuild(); 1489 } 1490 } 1491 1492 // If collection set has candidates, start evacuation. 1493 // Otherwise, bypass the rest of the cycle. 1494 if (!collection_set()->is_empty()) { 1495 ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac); 1496 1497 if (ShenandoahVerify) { 1498 verifier()->verify_before_evacuation(); 1499 } 1500 1501 set_evacuation_in_progress(true); 1502 // From here on, we need to update references. 1503 set_has_forwarded_objects(true); 1504 1505 evacuate_and_update_roots(); 1506 1507 if (ShenandoahPacing) { 1508 pacer()->setup_for_evac(); 1509 } 1510 } else { 1511 if (ShenandoahVerify) { 1512 verifier()->verify_after_concmark(); 1513 } 1514 1515 if (VerifyAfterGC) { 1516 Universe::verify(); 1517 } 1518 } 1519 1520 } else { 1521 concurrent_mark()->cancel(); 1522 stop_concurrent_marking(); 1523 1524 if (process_references()) { 1525 // Abandon reference processing right away: pre-cleaning must have failed. 1526 ReferenceProcessor *rp = ref_processor(); 1527 rp->disable_discovery(); 1528 rp->abandon_partial_discovery(); 1529 rp->verify_no_references_recorded(); 1530 } 1531 } 1532 } 1533 1534 void ShenandoahHeap::op_final_evac() { 1535 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 1536 1537 set_evacuation_in_progress(false); 1538 1539 retire_and_reset_gclabs(); 1540 1541 if (ShenandoahVerify) { 1542 verifier()->verify_after_evacuation(); 1543 } 1544 1545 if (VerifyAfterGC) { 1546 Universe::verify(); 1547 } 1548 } 1549 1550 void ShenandoahHeap::op_conc_evac() { 1551 ShenandoahEvacuationTask task(this, _collection_set, true); 1552 workers()->run_task(&task); 1553 } 1554 1555 void ShenandoahHeap::op_stw_evac() { 1556 ShenandoahEvacuationTask task(this, _collection_set, false); 1557 workers()->run_task(&task); 1558 } 1559 1560 void ShenandoahHeap::op_updaterefs() { 1561 update_heap_references(true); 1562 } 1563 1564 void ShenandoahHeap::op_cleanup() { 1565 free_set()->recycle_trash(); 1566 } 1567 1568 void ShenandoahHeap::op_reset() { 1569 reset_mark_bitmap(); 1570 } 1571 1572 void ShenandoahHeap::op_preclean() { 1573 concurrent_mark()->preclean_weak_refs(); 1574 } 1575 1576 void ShenandoahHeap::op_init_traversal() { 1577 traversal_gc()->init_traversal_collection(); 1578 } 1579 1580 void ShenandoahHeap::op_traversal() { 1581 traversal_gc()->concurrent_traversal_collection(); 1582 } 1583 1584 void ShenandoahHeap::op_final_traversal() { 1585 traversal_gc()->final_traversal_collection(); 1586 } 1587 1588 void ShenandoahHeap::op_full(GCCause::Cause cause) { 1589 ShenandoahMetricsSnapshot metrics; 1590 metrics.snap_before(); 1591 1592 full_gc()->do_it(cause); 1593 if (UseTLAB) { 1594 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs); 1595 resize_all_tlabs(); 1596 } 1597 1598 metrics.snap_after(); 1599 metrics.print(); 1600 1601 if (metrics.is_good_progress("Full GC")) { 1602 _progress_last_gc.set(); 1603 } else { 1604 // Nothing to do. Tell the allocation path that we have failed to make 1605 // progress, and it can finally fail. 1606 _progress_last_gc.unset(); 1607 } 1608 } 1609 1610 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) { 1611 // Degenerated GC is STW, but it can also fail. Current mechanics communicates 1612 // GC failure via cancelled_concgc() flag. So, if we detect the failure after 1613 // some phase, we have to upgrade the Degenerate GC to Full GC. 1614 1615 clear_cancelled_gc(); 1616 1617 ShenandoahMetricsSnapshot metrics; 1618 metrics.snap_before(); 1619 1620 switch (point) { 1621 case _degenerated_traversal: 1622 { 1623 // Drop the collection set. Note: this leaves some already forwarded objects 1624 // behind, which may be problematic, see comments for ShenandoahEvacAssist 1625 // workarounds in ShenandoahTraversalHeuristics. 1626 1627 ShenandoahHeapLocker locker(lock()); 1628 collection_set()->clear_current_index(); 1629 for (size_t i = 0; i < collection_set()->count(); i++) { 1630 ShenandoahHeapRegion* r = collection_set()->next(); 1631 r->make_regular_bypass(); 1632 } 1633 collection_set()->clear(); 1634 } 1635 op_final_traversal(); 1636 op_cleanup(); 1637 return; 1638 1639 // The cases below form the Duff's-like device: it describes the actual GC cycle, 1640 // but enters it at different points, depending on which concurrent phase had 1641 // degenerated. 1642 1643 case _degenerated_outside_cycle: 1644 // We have degenerated from outside the cycle, which means something is bad with 1645 // the heap, most probably heavy humongous fragmentation, or we are very low on free 1646 // space. It makes little sense to wait for Full GC to reclaim as much as it can, when 1647 // we can do the most aggressive degen cycle, which includes processing references and 1648 // class unloading, unless those features are explicitly disabled. 1649 // 1650 // Note that we can only do this for "outside-cycle" degens, otherwise we would risk 1651 // changing the cycle parameters mid-cycle during concurrent -> degenerated handover. 1652 set_process_references(heuristics()->can_process_references()); 1653 set_unload_classes(heuristics()->can_unload_classes()); 1654 1655 if (heuristics()->can_do_traversal_gc()) { 1656 // Not possible to degenerate from here, upgrade to Full GC right away. 1657 cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc); 1658 op_degenerated_fail(); 1659 return; 1660 } 1661 1662 op_reset(); 1663 1664 op_init_mark(); 1665 if (cancelled_gc()) { 1666 op_degenerated_fail(); 1667 return; 1668 } 1669 1670 case _degenerated_mark: 1671 op_final_mark(); 1672 if (cancelled_gc()) { 1673 op_degenerated_fail(); 1674 return; 1675 } 1676 1677 op_cleanup(); 1678 1679 case _degenerated_evac: 1680 // If heuristics thinks we should do the cycle, this flag would be set, 1681 // and we can do evacuation. Otherwise, it would be the shortcut cycle. 1682 if (is_evacuation_in_progress()) { 1683 1684 // Degeneration under oom-evac protocol might have left some objects in 1685 // collection set un-evacuated. Restart evacuation from the beginning to 1686 // capture all objects. For all the objects that are already evacuated, 1687 // it would be a simple check, which is supposed to be fast. This is also 1688 // safe to do even without degeneration, as CSet iterator is at beginning 1689 // in preparation for evacuation anyway. 1690 collection_set()->clear_current_index(); 1691 1692 op_stw_evac(); 1693 if (cancelled_gc()) { 1694 op_degenerated_fail(); 1695 return; 1696 } 1697 } 1698 1699 // If heuristics thinks we should do the cycle, this flag would be set, 1700 // and we need to do update-refs. Otherwise, it would be the shortcut cycle. 1701 if (has_forwarded_objects()) { 1702 op_init_updaterefs(); 1703 if (cancelled_gc()) { 1704 op_degenerated_fail(); 1705 return; 1706 } 1707 } 1708 1709 case _degenerated_updaterefs: 1710 if (has_forwarded_objects()) { 1711 op_final_updaterefs(); 1712 if (cancelled_gc()) { 1713 op_degenerated_fail(); 1714 return; 1715 } 1716 } 1717 1718 op_cleanup(); 1719 break; 1720 1721 default: 1722 ShouldNotReachHere(); 1723 } 1724 1725 if (ShenandoahVerify) { 1726 verifier()->verify_after_degenerated(); 1727 } 1728 1729 if (VerifyAfterGC) { 1730 Universe::verify(); 1731 } 1732 1733 metrics.snap_after(); 1734 metrics.print(); 1735 1736 // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles, 1737 // because that probably means the heap is overloaded and/or fragmented. 1738 if (!metrics.is_good_progress("Degenerated GC")) { 1739 _progress_last_gc.unset(); 1740 cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc); 1741 op_degenerated_futile(); 1742 } else { 1743 _progress_last_gc.set(); 1744 } 1745 } 1746 1747 void ShenandoahHeap::op_degenerated_fail() { 1748 log_info(gc)("Cannot finish degeneration, upgrading to Full GC"); 1749 shenandoah_policy()->record_degenerated_upgrade_to_full(); 1750 op_full(GCCause::_shenandoah_upgrade_to_full_gc); 1751 } 1752 1753 void ShenandoahHeap::op_degenerated_futile() { 1754 shenandoah_policy()->record_degenerated_upgrade_to_full(); 1755 op_full(GCCause::_shenandoah_upgrade_to_full_gc); 1756 } 1757 1758 void ShenandoahHeap::stop_concurrent_marking() { 1759 assert(is_concurrent_mark_in_progress(), "How else could we get here?"); 1760 if (!cancelled_gc()) { 1761 // If we needed to update refs, and concurrent marking has been cancelled, 1762 // we need to finish updating references. 1763 set_has_forwarded_objects(false); 1764 mark_complete_marking_context(); 1765 } 1766 set_concurrent_mark_in_progress(false); 1767 } 1768 1769 void ShenandoahHeap::force_satb_flush_all_threads() { 1770 if (!is_concurrent_mark_in_progress() && !is_concurrent_traversal_in_progress()) { 1771 // No need to flush SATBs 1772 return; 1773 } 1774 1775 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { 1776 ShenandoahThreadLocalData::set_force_satb_flush(t, true); 1777 } 1778 // The threads are not "acquiring" their thread-local data, but it does not 1779 // hurt to "release" the updates here anyway. 1780 OrderAccess::fence(); 1781 } 1782 1783 void ShenandoahHeap::set_gc_state_all_threads(char state) { 1784 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { 1785 ShenandoahThreadLocalData::set_gc_state(t, state); 1786 } 1787 } 1788 1789 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) { 1790 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint"); 1791 _gc_state.set_cond(mask, value); 1792 set_gc_state_all_threads(_gc_state.raw_value()); 1793 } 1794 1795 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) { 1796 set_gc_state_mask(MARKING, in_progress); 1797 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress); 1798 } 1799 1800 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) { 1801 set_gc_state_mask(TRAVERSAL | HAS_FORWARDED, in_progress); 1802 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress); 1803 } 1804 1805 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) { 1806 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint"); 1807 set_gc_state_mask(EVACUATION, in_progress); 1808 } 1809 1810 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) { 1811 // Initialize Brooks pointer for the next object 1812 HeapWord* result = obj + ShenandoahBrooksPointer::word_size(); 1813 ShenandoahBrooksPointer::initialize(oop(result)); 1814 return result; 1815 } 1816 1817 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() : 1818 _mark_context(ShenandoahHeap::heap()->marking_context()) { 1819 } 1820 1821 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() : 1822 _mark_context(ShenandoahHeap::heap()->marking_context()) { 1823 } 1824 1825 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) { 1826 if (CompressedOops::is_null(obj)) { 1827 return false; 1828 } 1829 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 1830 shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_mark_in_progress() || ShenandoahHeap::heap()->is_concurrent_traversal_in_progress()); 1831 return _mark_context->is_marked(obj); 1832 } 1833 1834 bool ShenandoahIsAliveClosure::do_object_b(oop obj) { 1835 if (CompressedOops::is_null(obj)) { 1836 return false; 1837 } 1838 shenandoah_assert_not_forwarded(NULL, obj); 1839 return _mark_context->is_marked(obj); 1840 } 1841 1842 void ShenandoahHeap::ref_processing_init() { 1843 assert(_max_workers > 0, "Sanity"); 1844 1845 _ref_processor = 1846 new ReferenceProcessor(&_subject_to_discovery, // is_subject_to_discovery 1847 ParallelRefProcEnabled, // MT processing 1848 _max_workers, // Degree of MT processing 1849 true, // MT discovery 1850 _max_workers, // Degree of MT discovery 1851 false, // Reference discovery is not atomic 1852 NULL, // No closure, should be installed before use 1853 true); // Scale worker threads 1854 1855 shenandoah_assert_rp_isalive_not_installed(); 1856 } 1857 1858 GCTracer* ShenandoahHeap::tracer() { 1859 return shenandoah_policy()->tracer(); 1860 } 1861 1862 size_t ShenandoahHeap::tlab_used(Thread* thread) const { 1863 return _free_set->used(); 1864 } 1865 1866 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) { 1867 if (try_cancel_gc()) { 1868 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause)); 1869 log_info(gc)("%s", msg.buffer()); 1870 Events::log(Thread::current(), "%s", msg.buffer()); 1871 } 1872 } 1873 1874 uint ShenandoahHeap::max_workers() { 1875 return _max_workers; 1876 } 1877 1878 void ShenandoahHeap::stop() { 1879 // The shutdown sequence should be able to terminate when GC is running. 1880 1881 // Step 0. Notify policy to disable event recording. 1882 _shenandoah_policy->record_shutdown(); 1883 1884 // Step 1. Notify control thread that we are in shutdown. 1885 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown. 1886 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below. 1887 control_thread()->prepare_for_graceful_shutdown(); 1888 1889 // Step 2. Notify GC workers that we are cancelling GC. 1890 cancel_gc(GCCause::_shenandoah_stop_vm); 1891 1892 // Step 3. Wait until GC worker exits normally. 1893 control_thread()->stop(); 1894 1895 // Step 4. Stop String Dedup thread if it is active 1896 if (ShenandoahStringDedup::is_enabled()) { 1897 ShenandoahStringDedup::stop(); 1898 } 1899 } 1900 1901 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) { 1902 assert(heuristics()->can_unload_classes(), "Class unloading should be enabled"); 1903 1904 ShenandoahGCPhase root_phase(full_gc ? 1905 ShenandoahPhaseTimings::full_gc_purge : 1906 ShenandoahPhaseTimings::purge); 1907 1908 ShenandoahIsAliveSelector alive; 1909 BoolObjectClosure* is_alive = alive.is_alive_closure(); 1910 1911 bool purged_class; 1912 1913 // Unload classes and purge SystemDictionary. 1914 { 1915 ShenandoahGCPhase phase(full_gc ? 1916 ShenandoahPhaseTimings::full_gc_purge_class_unload : 1917 ShenandoahPhaseTimings::purge_class_unload); 1918 purged_class = SystemDictionary::do_unloading(gc_timer()); 1919 } 1920 1921 { 1922 ShenandoahGCPhase phase(full_gc ? 1923 ShenandoahPhaseTimings::full_gc_purge_par : 1924 ShenandoahPhaseTimings::purge_par); 1925 uint active = _workers->active_workers(); 1926 StringDedupUnlinkOrOopsDoClosure dedup_cl(is_alive, NULL); 1927 ParallelCleaningTask unlink_task(is_alive, &dedup_cl, active, purged_class); 1928 _workers->run_task(&unlink_task); 1929 } 1930 1931 { 1932 ShenandoahGCPhase phase(full_gc ? 1933 ShenandoahPhaseTimings::full_gc_purge_cldg : 1934 ShenandoahPhaseTimings::purge_cldg); 1935 ClassLoaderDataGraph::purge(); 1936 } 1937 } 1938 1939 void ShenandoahHeap::set_has_forwarded_objects(bool cond) { 1940 set_gc_state_mask(HAS_FORWARDED, cond); 1941 } 1942 1943 void ShenandoahHeap::set_process_references(bool pr) { 1944 _process_references.set_cond(pr); 1945 } 1946 1947 void ShenandoahHeap::set_unload_classes(bool uc) { 1948 _unload_classes.set_cond(uc); 1949 } 1950 1951 bool ShenandoahHeap::process_references() const { 1952 return _process_references.is_set(); 1953 } 1954 1955 bool ShenandoahHeap::unload_classes() const { 1956 return _unload_classes.is_set(); 1957 } 1958 1959 address ShenandoahHeap::in_cset_fast_test_addr() { 1960 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1961 assert(heap->collection_set() != NULL, "Sanity"); 1962 return (address) heap->collection_set()->biased_map_address(); 1963 } 1964 1965 address ShenandoahHeap::cancelled_gc_addr() { 1966 return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of(); 1967 } 1968 1969 address ShenandoahHeap::gc_state_addr() { 1970 return (address) ShenandoahHeap::heap()->_gc_state.addr_of(); 1971 } 1972 1973 size_t ShenandoahHeap::bytes_allocated_since_gc_start() { 1974 return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start); 1975 } 1976 1977 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() { 1978 OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0); 1979 } 1980 1981 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) { 1982 _degenerated_gc_in_progress.set_cond(in_progress); 1983 } 1984 1985 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) { 1986 _full_gc_in_progress.set_cond(in_progress); 1987 } 1988 1989 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) { 1990 assert (is_full_gc_in_progress(), "should be"); 1991 _full_gc_move_in_progress.set_cond(in_progress); 1992 } 1993 1994 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) { 1995 set_gc_state_mask(UPDATEREFS, in_progress); 1996 } 1997 1998 void ShenandoahHeap::register_nmethod(nmethod* nm) { 1999 ShenandoahCodeRoots::add_nmethod(nm); 2000 } 2001 2002 void ShenandoahHeap::unregister_nmethod(nmethod* nm) { 2003 ShenandoahCodeRoots::remove_nmethod(nm); 2004 } 2005 2006 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) { 2007 o = ShenandoahBarrierSet::barrier_set()->write_barrier(o); 2008 ShenandoahHeapLocker locker(lock()); 2009 heap_region_containing(o)->make_pinned(); 2010 return o; 2011 } 2012 2013 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) { 2014 o = ShenandoahBarrierSet::barrier_set()->read_barrier(o); 2015 ShenandoahHeapLocker locker(lock()); 2016 heap_region_containing(o)->make_unpinned(); 2017 } 2018 2019 GCTimer* ShenandoahHeap::gc_timer() const { 2020 return _gc_timer; 2021 } 2022 2023 #ifdef ASSERT 2024 void ShenandoahHeap::assert_gc_workers(uint nworkers) { 2025 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity"); 2026 2027 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) { 2028 if (UseDynamicNumberOfGCThreads || 2029 (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) { 2030 assert(nworkers <= ParallelGCThreads, "Cannot use more than it has"); 2031 } else { 2032 // Use ParallelGCThreads inside safepoints 2033 assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints"); 2034 } 2035 } else { 2036 if (UseDynamicNumberOfGCThreads || 2037 (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) { 2038 assert(nworkers <= ConcGCThreads, "Cannot use more than it has"); 2039 } else { 2040 // Use ConcGCThreads outside safepoints 2041 assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints"); 2042 } 2043 } 2044 } 2045 #endif 2046 2047 ShenandoahVerifier* ShenandoahHeap::verifier() { 2048 guarantee(ShenandoahVerify, "Should be enabled"); 2049 assert (_verifier != NULL, "sanity"); 2050 return _verifier; 2051 } 2052 2053 template<class T> 2054 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask { 2055 private: 2056 T cl; 2057 ShenandoahHeap* _heap; 2058 ShenandoahRegionIterator* _regions; 2059 bool _concurrent; 2060 public: 2061 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) : 2062 AbstractGangTask("Concurrent Update References Task"), 2063 cl(T()), 2064 _heap(ShenandoahHeap::heap()), 2065 _regions(regions), 2066 _concurrent(concurrent) { 2067 } 2068 2069 void work(uint worker_id) { 2070 if (_concurrent) { 2071 ShenandoahConcurrentWorkerSession worker_session(worker_id); 2072 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); 2073 do_work(); 2074 } else { 2075 ShenandoahParallelWorkerSession worker_session(worker_id); 2076 do_work(); 2077 } 2078 } 2079 2080 private: 2081 void do_work() { 2082 ShenandoahHeapRegion* r = _regions->next(); 2083 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context(); 2084 while (r != NULL) { 2085 HeapWord* top_at_start_ur = r->concurrent_iteration_safe_limit(); 2086 assert (top_at_start_ur >= r->bottom(), "sanity"); 2087 if (r->is_active() && !r->is_cset()) { 2088 _heap->marked_object_oop_iterate(r, &cl, top_at_start_ur); 2089 } 2090 if (ShenandoahPacing) { 2091 _heap->pacer()->report_updaterefs(pointer_delta(top_at_start_ur, r->bottom())); 2092 } 2093 if (_heap->check_cancelled_gc_and_yield(_concurrent)) { 2094 return; 2095 } 2096 r = _regions->next(); 2097 } 2098 } 2099 }; 2100 2101 void ShenandoahHeap::update_heap_references(bool concurrent) { 2102 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent); 2103 workers()->run_task(&task); 2104 } 2105 2106 void ShenandoahHeap::op_init_updaterefs() { 2107 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); 2108 2109 set_evacuation_in_progress(false); 2110 2111 retire_and_reset_gclabs(); 2112 2113 if (ShenandoahVerify) { 2114 verifier()->verify_before_updaterefs(); 2115 } 2116 2117 set_update_refs_in_progress(true); 2118 make_parsable(true); 2119 for (uint i = 0; i < num_regions(); i++) { 2120 ShenandoahHeapRegion* r = get_region(i); 2121 r->set_concurrent_iteration_safe_limit(r->top()); 2122 } 2123 2124 // Reset iterator. 2125 _update_refs_iterator.reset(); 2126 2127 if (ShenandoahPacing) { 2128 pacer()->setup_for_updaterefs(); 2129 } 2130 } 2131 2132 void ShenandoahHeap::op_final_updaterefs() { 2133 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); 2134 2135 // Check if there is left-over work, and finish it 2136 if (_update_refs_iterator.has_next()) { 2137 ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work); 2138 2139 // Finish updating references where we left off. 2140 clear_cancelled_gc(); 2141 update_heap_references(false); 2142 } 2143 2144 // Clear cancelled GC, if set. On cancellation path, the block before would handle 2145 // everything. On degenerated paths, cancelled gc would not be set anyway. 2146 if (cancelled_gc()) { 2147 clear_cancelled_gc(); 2148 } 2149 assert(!cancelled_gc(), "Should have been done right before"); 2150 2151 concurrent_mark()->update_roots(is_degenerated_gc_in_progress() ? 2152 ShenandoahPhaseTimings::degen_gc_update_roots: 2153 ShenandoahPhaseTimings::final_update_refs_roots); 2154 2155 ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle); 2156 2157 trash_cset_regions(); 2158 set_has_forwarded_objects(false); 2159 set_update_refs_in_progress(false); 2160 2161 if (ShenandoahVerify) { 2162 verifier()->verify_after_updaterefs(); 2163 } 2164 2165 if (VerifyAfterGC) { 2166 Universe::verify(); 2167 } 2168 2169 { 2170 ShenandoahHeapLocker locker(lock()); 2171 _free_set->rebuild(); 2172 } 2173 } 2174 2175 #ifdef ASSERT 2176 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() { 2177 _lock.assert_owned_by_current_thread(); 2178 } 2179 2180 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() { 2181 _lock.assert_not_owned_by_current_thread(); 2182 } 2183 2184 void ShenandoahHeap::assert_heaplock_or_safepoint() { 2185 _lock.assert_owned_by_current_thread_or_safepoint(); 2186 } 2187 #endif 2188 2189 void ShenandoahHeap::print_extended_on(outputStream *st) const { 2190 print_on(st); 2191 print_heap_regions_on(st); 2192 } 2193 2194 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) { 2195 size_t slice = r->region_number() / _bitmap_regions_per_slice; 2196 2197 size_t regions_from = _bitmap_regions_per_slice * slice; 2198 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1)); 2199 for (size_t g = regions_from; g < regions_to; g++) { 2200 assert (g / _bitmap_regions_per_slice == slice, "same slice"); 2201 if (skip_self && g == r->region_number()) continue; 2202 if (get_region(g)->is_committed()) { 2203 return true; 2204 } 2205 } 2206 return false; 2207 } 2208 2209 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) { 2210 assert_heaplock_owned_by_current_thread(); 2211 2212 if (is_bitmap_slice_committed(r, true)) { 2213 // Some other region from the group is already committed, meaning the bitmap 2214 // slice is already committed, we exit right away. 2215 return true; 2216 } 2217 2218 // Commit the bitmap slice: 2219 size_t slice = r->region_number() / _bitmap_regions_per_slice; 2220 size_t off = _bitmap_bytes_per_slice * slice; 2221 size_t len = _bitmap_bytes_per_slice; 2222 if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) { 2223 return false; 2224 } 2225 return true; 2226 } 2227 2228 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) { 2229 assert_heaplock_owned_by_current_thread(); 2230 2231 if (is_bitmap_slice_committed(r, true)) { 2232 // Some other region from the group is still committed, meaning the bitmap 2233 // slice is should stay committed, exit right away. 2234 return true; 2235 } 2236 2237 // Uncommit the bitmap slice: 2238 size_t slice = r->region_number() / _bitmap_regions_per_slice; 2239 size_t off = _bitmap_bytes_per_slice * slice; 2240 size_t len = _bitmap_bytes_per_slice; 2241 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) { 2242 return false; 2243 } 2244 return true; 2245 } 2246 2247 void ShenandoahHeap::safepoint_synchronize_begin() { 2248 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) { 2249 SuspendibleThreadSet::synchronize(); 2250 } 2251 } 2252 2253 void ShenandoahHeap::safepoint_synchronize_end() { 2254 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) { 2255 SuspendibleThreadSet::desynchronize(); 2256 } 2257 } 2258 2259 void ShenandoahHeap::vmop_entry_init_mark() { 2260 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2261 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2262 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross); 2263 2264 try_inject_alloc_failure(); 2265 VM_ShenandoahInitMark op; 2266 VMThread::execute(&op); // jump to entry_init_mark() under safepoint 2267 } 2268 2269 void ShenandoahHeap::vmop_entry_final_mark() { 2270 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2271 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2272 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross); 2273 2274 try_inject_alloc_failure(); 2275 VM_ShenandoahFinalMarkStartEvac op; 2276 VMThread::execute(&op); // jump to entry_final_mark under safepoint 2277 } 2278 2279 void ShenandoahHeap::vmop_entry_final_evac() { 2280 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2281 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2282 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross); 2283 2284 VM_ShenandoahFinalEvac op; 2285 VMThread::execute(&op); // jump to entry_final_evac under safepoint 2286 } 2287 2288 void ShenandoahHeap::vmop_entry_init_updaterefs() { 2289 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2290 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2291 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross); 2292 2293 try_inject_alloc_failure(); 2294 VM_ShenandoahInitUpdateRefs op; 2295 VMThread::execute(&op); 2296 } 2297 2298 void ShenandoahHeap::vmop_entry_final_updaterefs() { 2299 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2300 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2301 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross); 2302 2303 try_inject_alloc_failure(); 2304 VM_ShenandoahFinalUpdateRefs op; 2305 VMThread::execute(&op); 2306 } 2307 2308 void ShenandoahHeap::vmop_entry_init_traversal() { 2309 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2310 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2311 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross); 2312 2313 try_inject_alloc_failure(); 2314 VM_ShenandoahInitTraversalGC op; 2315 VMThread::execute(&op); 2316 } 2317 2318 void ShenandoahHeap::vmop_entry_final_traversal() { 2319 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2320 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2321 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross); 2322 2323 try_inject_alloc_failure(); 2324 VM_ShenandoahFinalTraversalGC op; 2325 VMThread::execute(&op); 2326 } 2327 2328 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) { 2329 TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters()); 2330 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2331 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross); 2332 2333 try_inject_alloc_failure(); 2334 VM_ShenandoahFullGC op(cause); 2335 VMThread::execute(&op); 2336 } 2337 2338 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) { 2339 TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters()); 2340 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); 2341 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross); 2342 2343 VM_ShenandoahDegeneratedGC degenerated_gc((int)point); 2344 VMThread::execute(°enerated_gc); 2345 } 2346 2347 void ShenandoahHeap::entry_init_mark() { 2348 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2349 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark); 2350 const char* msg = init_mark_event_message(); 2351 GCTraceTime(Info, gc) time(msg, gc_timer()); 2352 EventMark em("%s", msg); 2353 2354 ShenandoahWorkerScope scope(workers(), 2355 ShenandoahWorkerPolicy::calc_workers_for_init_marking(), 2356 "init marking"); 2357 2358 op_init_mark(); 2359 } 2360 2361 void ShenandoahHeap::entry_final_mark() { 2362 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2363 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark); 2364 const char* msg = final_mark_event_message(); 2365 GCTraceTime(Info, gc) time(msg, gc_timer()); 2366 EventMark em("%s", msg); 2367 2368 ShenandoahWorkerScope scope(workers(), 2369 ShenandoahWorkerPolicy::calc_workers_for_final_marking(), 2370 "final marking"); 2371 2372 op_final_mark(); 2373 } 2374 2375 void ShenandoahHeap::entry_final_evac() { 2376 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2377 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac); 2378 static const char* msg = "Pause Final Evac"; 2379 GCTraceTime(Info, gc) time(msg, gc_timer()); 2380 EventMark em("%s", msg); 2381 2382 op_final_evac(); 2383 } 2384 2385 void ShenandoahHeap::entry_init_updaterefs() { 2386 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2387 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs); 2388 2389 static const char* msg = "Pause Init Update Refs"; 2390 GCTraceTime(Info, gc) time(msg, gc_timer()); 2391 EventMark em("%s", msg); 2392 2393 // No workers used in this phase, no setup required 2394 2395 op_init_updaterefs(); 2396 } 2397 2398 void ShenandoahHeap::entry_final_updaterefs() { 2399 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2400 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs); 2401 2402 static const char* msg = "Pause Final Update Refs"; 2403 GCTraceTime(Info, gc) time(msg, gc_timer()); 2404 EventMark em("%s", msg); 2405 2406 ShenandoahWorkerScope scope(workers(), 2407 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(), 2408 "final reference update"); 2409 2410 op_final_updaterefs(); 2411 } 2412 2413 void ShenandoahHeap::entry_init_traversal() { 2414 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2415 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc); 2416 2417 static const char* msg = "Pause Init Traversal"; 2418 GCTraceTime(Info, gc) time(msg, gc_timer()); 2419 EventMark em("%s", msg); 2420 2421 ShenandoahWorkerScope scope(workers(), 2422 ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(), 2423 "init traversal"); 2424 2425 op_init_traversal(); 2426 } 2427 2428 void ShenandoahHeap::entry_final_traversal() { 2429 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2430 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc); 2431 2432 static const char* msg = "Pause Final Traversal"; 2433 GCTraceTime(Info, gc) time(msg, gc_timer()); 2434 EventMark em("%s", msg); 2435 2436 ShenandoahWorkerScope scope(workers(), 2437 ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(), 2438 "final traversal"); 2439 2440 op_final_traversal(); 2441 } 2442 2443 void ShenandoahHeap::entry_full(GCCause::Cause cause) { 2444 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2445 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc); 2446 2447 static const char* msg = "Pause Full"; 2448 GCTraceTime(Info, gc) time(msg, gc_timer(), cause, true); 2449 EventMark em("%s", msg); 2450 2451 ShenandoahWorkerScope scope(workers(), 2452 ShenandoahWorkerPolicy::calc_workers_for_fullgc(), 2453 "full gc"); 2454 2455 op_full(cause); 2456 } 2457 2458 void ShenandoahHeap::entry_degenerated(int point) { 2459 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); 2460 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc); 2461 2462 ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point; 2463 const char* msg = degen_event_message(dpoint); 2464 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); 2465 EventMark em("%s", msg); 2466 2467 ShenandoahWorkerScope scope(workers(), 2468 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(), 2469 "stw degenerated gc"); 2470 2471 set_degenerated_gc_in_progress(true); 2472 op_degenerated(dpoint); 2473 set_degenerated_gc_in_progress(false); 2474 } 2475 2476 void ShenandoahHeap::entry_mark() { 2477 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); 2478 2479 const char* msg = conc_mark_event_message(); 2480 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); 2481 EventMark em("%s", msg); 2482 2483 ShenandoahWorkerScope scope(workers(), 2484 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 2485 "concurrent marking"); 2486 2487 try_inject_alloc_failure(); 2488 op_mark(); 2489 } 2490 2491 void ShenandoahHeap::entry_evac() { 2492 ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac); 2493 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); 2494 2495 static const char* msg = "Concurrent evacuation"; 2496 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); 2497 EventMark em("%s", msg); 2498 2499 ShenandoahWorkerScope scope(workers(), 2500 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), 2501 "concurrent evacuation"); 2502 2503 try_inject_alloc_failure(); 2504 op_conc_evac(); 2505 } 2506 2507 void ShenandoahHeap::entry_updaterefs() { 2508 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs); 2509 2510 static const char* msg = "Concurrent update references"; 2511 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); 2512 EventMark em("%s", msg); 2513 2514 ShenandoahWorkerScope scope(workers(), 2515 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(), 2516 "concurrent reference update"); 2517 2518 try_inject_alloc_failure(); 2519 op_updaterefs(); 2520 } 2521 void ShenandoahHeap::entry_cleanup() { 2522 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup); 2523 2524 static const char* msg = "Concurrent cleanup"; 2525 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); 2526 EventMark em("%s", msg); 2527 2528 // This phase does not use workers, no need for setup 2529 2530 try_inject_alloc_failure(); 2531 op_cleanup(); 2532 } 2533 2534 void ShenandoahHeap::entry_reset() { 2535 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_reset); 2536 2537 static const char* msg = "Concurrent reset"; 2538 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); 2539 EventMark em("%s", msg); 2540 2541 ShenandoahWorkerScope scope(workers(), 2542 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), 2543 "concurrent reset"); 2544 2545 try_inject_alloc_failure(); 2546 op_reset(); 2547 } 2548 2549 void ShenandoahHeap::entry_preclean() { 2550 if (ShenandoahPreclean && process_references()) { 2551 static const char* msg = "Concurrent precleaning"; 2552 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); 2553 EventMark em("%s", msg); 2554 2555 ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean); 2556 2557 ShenandoahWorkerScope scope(workers(), 2558 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(), 2559 "concurrent preclean", 2560 /* check_workers = */ false); 2561 2562 try_inject_alloc_failure(); 2563 op_preclean(); 2564 } 2565 } 2566 2567 void ShenandoahHeap::entry_traversal() { 2568 static const char* msg = "Concurrent traversal"; 2569 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); 2570 EventMark em("%s", msg); 2571 2572 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); 2573 2574 ShenandoahWorkerScope scope(workers(), 2575 ShenandoahWorkerPolicy::calc_workers_for_conc_traversal(), 2576 "concurrent traversal"); 2577 2578 try_inject_alloc_failure(); 2579 op_traversal(); 2580 } 2581 2582 void ShenandoahHeap::entry_uncommit(double shrink_before) { 2583 static const char *msg = "Concurrent uncommit"; 2584 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true); 2585 EventMark em("%s", msg); 2586 2587 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit); 2588 2589 op_uncommit(shrink_before); 2590 } 2591 2592 void ShenandoahHeap::try_inject_alloc_failure() { 2593 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) { 2594 _inject_alloc_failure.set(); 2595 os::naked_short_sleep(1); 2596 if (cancelled_gc()) { 2597 log_info(gc)("Allocation failure was successfully injected"); 2598 } 2599 } 2600 } 2601 2602 bool ShenandoahHeap::should_inject_alloc_failure() { 2603 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset(); 2604 } 2605 2606 void ShenandoahHeap::initialize_serviceability() { 2607 _memory_pool = new ShenandoahMemoryPool(this); 2608 _cycle_memory_manager.add_pool(_memory_pool); 2609 _stw_memory_manager.add_pool(_memory_pool); 2610 } 2611 2612 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() { 2613 GrowableArray<GCMemoryManager*> memory_managers(2); 2614 memory_managers.append(&_cycle_memory_manager); 2615 memory_managers.append(&_stw_memory_manager); 2616 return memory_managers; 2617 } 2618 2619 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() { 2620 GrowableArray<MemoryPool*> memory_pools(1); 2621 memory_pools.append(_memory_pool); 2622 return memory_pools; 2623 } 2624 2625 void ShenandoahHeap::enter_evacuation() { 2626 _oom_evac_handler.enter_evacuation(); 2627 } 2628 2629 void ShenandoahHeap::leave_evacuation() { 2630 _oom_evac_handler.leave_evacuation(); 2631 } 2632 2633 ShenandoahRegionIterator::ShenandoahRegionIterator() : 2634 _heap(ShenandoahHeap::heap()), 2635 _index(0) {} 2636 2637 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) : 2638 _heap(heap), 2639 _index(0) {} 2640 2641 void ShenandoahRegionIterator::reset() { 2642 _index = 0; 2643 } 2644 2645 bool ShenandoahRegionIterator::has_next() const { 2646 return _index < _heap->num_regions(); 2647 } 2648 2649 char ShenandoahHeap::gc_state() const { 2650 return _gc_state.raw_value(); 2651 } 2652 2653 void ShenandoahHeap::deduplicate_string(oop str) { 2654 assert(java_lang_String::is_instance(str), "invariant"); 2655 2656 if (ShenandoahStringDedup::is_enabled()) { 2657 ShenandoahStringDedup::deduplicate(str); 2658 } 2659 } 2660 2661 const char* ShenandoahHeap::init_mark_event_message() const { 2662 bool update_refs = has_forwarded_objects(); 2663 bool proc_refs = process_references(); 2664 bool unload_cls = unload_classes(); 2665 2666 if (update_refs && proc_refs && unload_cls) { 2667 return "Pause Init Mark (update refs) (process weakrefs) (unload classes)"; 2668 } else if (update_refs && proc_refs) { 2669 return "Pause Init Mark (update refs) (process weakrefs)"; 2670 } else if (update_refs && unload_cls) { 2671 return "Pause Init Mark (update refs) (unload classes)"; 2672 } else if (proc_refs && unload_cls) { 2673 return "Pause Init Mark (process weakrefs) (unload classes)"; 2674 } else if (update_refs) { 2675 return "Pause Init Mark (update refs)"; 2676 } else if (proc_refs) { 2677 return "Pause Init Mark (process weakrefs)"; 2678 } else if (unload_cls) { 2679 return "Pause Init Mark (unload classes)"; 2680 } else { 2681 return "Pause Init Mark"; 2682 } 2683 } 2684 2685 const char* ShenandoahHeap::final_mark_event_message() const { 2686 bool update_refs = has_forwarded_objects(); 2687 bool proc_refs = process_references(); 2688 bool unload_cls = unload_classes(); 2689 2690 if (update_refs && proc_refs && unload_cls) { 2691 return "Pause Final Mark (update refs) (process weakrefs) (unload classes)"; 2692 } else if (update_refs && proc_refs) { 2693 return "Pause Final Mark (update refs) (process weakrefs)"; 2694 } else if (update_refs && unload_cls) { 2695 return "Pause Final Mark (update refs) (unload classes)"; 2696 } else if (proc_refs && unload_cls) { 2697 return "Pause Final Mark (process weakrefs) (unload classes)"; 2698 } else if (update_refs) { 2699 return "Pause Final Mark (update refs)"; 2700 } else if (proc_refs) { 2701 return "Pause Final Mark (process weakrefs)"; 2702 } else if (unload_cls) { 2703 return "Pause Final Mark (unload classes)"; 2704 } else { 2705 return "Pause Final Mark"; 2706 } 2707 } 2708 2709 const char* ShenandoahHeap::conc_mark_event_message() const { 2710 bool update_refs = has_forwarded_objects(); 2711 bool proc_refs = process_references(); 2712 bool unload_cls = unload_classes(); 2713 2714 if (update_refs && proc_refs && unload_cls) { 2715 return "Concurrent marking (update refs) (process weakrefs) (unload classes)"; 2716 } else if (update_refs && proc_refs) { 2717 return "Concurrent marking (update refs) (process weakrefs)"; 2718 } else if (update_refs && unload_cls) { 2719 return "Concurrent marking (update refs) (unload classes)"; 2720 } else if (proc_refs && unload_cls) { 2721 return "Concurrent marking (process weakrefs) (unload classes)"; 2722 } else if (update_refs) { 2723 return "Concurrent marking (update refs)"; 2724 } else if (proc_refs) { 2725 return "Concurrent marking (process weakrefs)"; 2726 } else if (unload_cls) { 2727 return "Concurrent marking (unload classes)"; 2728 } else { 2729 return "Concurrent marking"; 2730 } 2731 } 2732 2733 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const { 2734 switch (point) { 2735 case _degenerated_unset: 2736 return "Pause Degenerated GC (<UNSET>)"; 2737 case _degenerated_traversal: 2738 return "Pause Degenerated GC (Traversal)"; 2739 case _degenerated_outside_cycle: 2740 return "Pause Degenerated GC (Outside of Cycle)"; 2741 case _degenerated_mark: 2742 return "Pause Degenerated GC (Mark)"; 2743 case _degenerated_evac: 2744 return "Pause Degenerated GC (Evacuation)"; 2745 case _degenerated_updaterefs: 2746 return "Pause Degenerated GC (Update Refs)"; 2747 default: 2748 ShouldNotReachHere(); 2749 return "ERROR"; 2750 } 2751 } 2752 2753 jushort* ShenandoahHeap::get_liveness_cache(uint worker_id) { 2754 #ifdef ASSERT 2755 assert(worker_id < _max_workers, "sanity"); 2756 for (uint i = 0; i < num_regions(); i++) { 2757 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty"); 2758 } 2759 #endif 2760 return _liveness_cache[worker_id]; 2761 } 2762 2763 void ShenandoahHeap::flush_liveness_cache(uint worker_id) { 2764 assert(worker_id < _max_workers, "sanity"); 2765 jushort* ld = _liveness_cache[worker_id]; 2766 for (uint i = 0; i < num_regions(); i++) { 2767 ShenandoahHeapRegion* r = get_region(i); 2768 jushort live = ld[i]; 2769 if (live > 0) { 2770 r->increase_live_data_gc_words(live); 2771 ld[i] = 0; 2772 } 2773 } 2774 } 2775 2776 size_t ShenandoahHeap::obj_size(oop obj) const { 2777 return CollectedHeap::obj_size(obj) + ShenandoahBrooksPointer::word_size(); 2778 } 2779 2780 ptrdiff_t ShenandoahHeap::cell_header_size() const { 2781 return ShenandoahBrooksPointer::byte_size(); 2782 } 2783 2784 BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() { 2785 return ShenandoahHeap::heap()->has_forwarded_objects() ? reinterpret_cast<BoolObjectClosure*>(&_fwd_alive_cl) 2786 : reinterpret_cast<BoolObjectClosure*>(&_alive_cl); 2787 }