1 /* 2 * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/allocation.hpp" 27 #include "memory/universe.hpp" 28 29 #include "gc/shared/gcArguments.hpp" 30 #include "gc/shared/gcTimer.hpp" 31 #include "gc/shared/gcTraceTime.inline.hpp" 32 #include "gc/shared/locationPrinter.inline.hpp" 33 #include "gc/shared/memAllocator.hpp" 34 #include "gc/shared/oopStorageSet.hpp" 35 #include "gc/shared/plab.hpp" 36 37 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 38 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 39 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 40 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 41 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" 42 #include "gc/shenandoah/shenandoahConcurrentRoots.hpp" 43 #include "gc/shenandoah/shenandoahControlThread.hpp" 44 #include "gc/shenandoah/shenandoahFreeSet.hpp" 45 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 46 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 47 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 48 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 49 #include "gc/shenandoah/shenandoahInitLogger.hpp" 50 #include "gc/shenandoah/shenandoahMarkCompact.hpp" 51 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 52 #include "gc/shenandoah/shenandoahMemoryPool.hpp" 53 #include "gc/shenandoah/shenandoahMetrics.hpp" 54 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 55 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 56 #include "gc/shenandoah/shenandoahPacer.inline.hpp" 57 #include "gc/shenandoah/shenandoahPadding.hpp" 58 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp" 59 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 60 #include "gc/shenandoah/shenandoahStringDedup.hpp" 61 #include "gc/shenandoah/shenandoahTaskqueue.hpp" 62 #include "gc/shenandoah/shenandoahUtils.hpp" 63 #include "gc/shenandoah/shenandoahVerifier.hpp" 64 #include "gc/shenandoah/shenandoahCodeRoots.hpp" 65 #include "gc/shenandoah/shenandoahVMOperations.hpp" 66 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 67 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 68 #include "gc/shenandoah/mode/shenandoahIUMode.hpp" 69 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp" 70 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp" 71 #if INCLUDE_JFR 72 #include "gc/shenandoah/shenandoahJfrSupport.hpp" 73 #endif 74 75 #include "memory/metaspace.hpp" 76 #include "oops/compressedOops.inline.hpp" 77 #include "runtime/atomic.hpp" 78 #include "runtime/globals.hpp" 79 #include "runtime/interfaceSupport.inline.hpp" 80 #include "runtime/orderAccess.hpp" 81 #include "runtime/safepointMechanism.hpp" 82 #include "runtime/vmThread.hpp" 83 #include "services/mallocTracker.hpp" 84 #include "utilities/powerOfTwo.hpp" 85 86 #ifdef ASSERT 87 template <class T> 88 void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) { 89 T o = RawAccess<>::oop_load(p); 90 if (! CompressedOops::is_null(o)) { 91 oop obj = CompressedOops::decode_not_null(o); 92 shenandoah_assert_not_forwarded(p, obj); 93 } 94 } 95 96 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_work(p); } 97 void ShenandoahAssertToSpaceClosure::do_oop(oop* p) { do_oop_work(p); } 98 #endif 99 100 class ShenandoahPretouchHeapTask : public AbstractGangTask { 101 private: 102 ShenandoahRegionIterator _regions; 103 const size_t _page_size; 104 public: 105 ShenandoahPretouchHeapTask(size_t page_size) : 106 AbstractGangTask("Shenandoah Pretouch Heap"), 107 _page_size(page_size) {} 108 109 virtual void work(uint worker_id) { 110 ShenandoahHeapRegion* r = _regions.next(); 111 while (r != NULL) { 112 if (r->is_committed()) { 113 os::pretouch_memory(r->bottom(), r->end(), _page_size); 114 } 115 r = _regions.next(); 116 } 117 } 118 }; 119 120 class ShenandoahPretouchBitmapTask : public AbstractGangTask { 121 private: 122 ShenandoahRegionIterator _regions; 123 char* _bitmap_base; 124 const size_t _bitmap_size; 125 const size_t _page_size; 126 public: 127 ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) : 128 AbstractGangTask("Shenandoah Pretouch Bitmap"), 129 _bitmap_base(bitmap_base), 130 _bitmap_size(bitmap_size), 131 _page_size(page_size) {} 132 133 virtual void work(uint worker_id) { 134 ShenandoahHeapRegion* r = _regions.next(); 135 while (r != NULL) { 136 size_t start = r->index() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); 137 size_t end = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); 138 assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size); 139 140 if (r->is_committed()) { 141 os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size); 142 } 143 144 r = _regions.next(); 145 } 146 } 147 }; 148 149 jint ShenandoahHeap::initialize() { 150 // 151 // Figure out heap sizing 152 // 153 154 size_t init_byte_size = InitialHeapSize; 155 size_t min_byte_size = MinHeapSize; 156 size_t max_byte_size = MaxHeapSize; 157 size_t heap_alignment = HeapAlignment; 158 159 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 160 161 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap"); 162 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap"); 163 164 _num_regions = ShenandoahHeapRegion::region_count(); 165 166 // Now we know the number of regions, initialize the heuristics. 167 initialize_heuristics(); 168 169 size_t num_committed_regions = init_byte_size / reg_size_bytes; 170 num_committed_regions = MIN2(num_committed_regions, _num_regions); 171 assert(num_committed_regions <= _num_regions, "sanity"); 172 _initial_size = num_committed_regions * reg_size_bytes; 173 174 size_t num_min_regions = min_byte_size / reg_size_bytes; 175 num_min_regions = MIN2(num_min_regions, _num_regions); 176 assert(num_min_regions <= _num_regions, "sanity"); 177 _minimum_size = num_min_regions * reg_size_bytes; 178 179 _committed = _initial_size; 180 181 size_t heap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); 182 size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); 183 size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); 184 185 // 186 // Reserve and commit memory for heap 187 // 188 189 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment); 190 initialize_reserved_region(heap_rs); 191 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize); 192 _heap_region_special = heap_rs.special(); 193 194 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0, 195 "Misaligned heap: " PTR_FORMAT, p2i(base())); 196 197 #if SHENANDOAH_OPTIMIZED_OBJTASK 198 // The optimized ObjArrayChunkedTask takes some bits away from the full object bits. 199 // Fail if we ever attempt to address more than we can. 200 if ((uintptr_t)heap_rs.end() >= ObjArrayChunkedTask::max_addressable()) { 201 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n" 202 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n" 203 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).", 204 p2i(heap_rs.base()), p2i(heap_rs.end()), ObjArrayChunkedTask::max_addressable()); 205 vm_exit_during_initialization("Fatal Error", buf); 206 } 207 #endif 208 209 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size); 210 if (!_heap_region_special) { 211 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false, 212 "Cannot commit heap memory"); 213 } 214 215 // 216 // Reserve and commit memory for bitmap(s) 217 // 218 219 _bitmap_size = MarkBitMap::compute_size(heap_rs.size()); 220 _bitmap_size = align_up(_bitmap_size, bitmap_page_size); 221 222 size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor(); 223 224 guarantee(bitmap_bytes_per_region != 0, 225 "Bitmap bytes per region should not be zero"); 226 guarantee(is_power_of_2(bitmap_bytes_per_region), 227 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region); 228 229 if (bitmap_page_size > bitmap_bytes_per_region) { 230 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region; 231 _bitmap_bytes_per_slice = bitmap_page_size; 232 } else { 233 _bitmap_regions_per_slice = 1; 234 _bitmap_bytes_per_slice = bitmap_bytes_per_region; 235 } 236 237 guarantee(_bitmap_regions_per_slice >= 1, 238 "Should have at least one region per slice: " SIZE_FORMAT, 239 _bitmap_regions_per_slice); 240 241 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0, 242 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT, 243 _bitmap_bytes_per_slice, bitmap_page_size); 244 245 ReservedSpace bitmap(_bitmap_size, bitmap_page_size); 246 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC); 247 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize); 248 _bitmap_region_special = bitmap.special(); 249 250 size_t bitmap_init_commit = _bitmap_bytes_per_slice * 251 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice; 252 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit); 253 if (!_bitmap_region_special) { 254 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false, 255 "Cannot commit bitmap memory"); 256 } 257 258 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions); 259 260 if (ShenandoahVerify) { 261 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size); 262 if (!verify_bitmap.special()) { 263 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false, 264 "Cannot commit verification bitmap memory"); 265 } 266 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC); 267 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize); 268 _verification_bit_map.initialize(_heap_region, verify_bitmap_region); 269 _verifier = new ShenandoahVerifier(this, &_verification_bit_map); 270 } 271 272 // Reserve aux bitmap for use in object_iterate(). We don't commit it here. 273 ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size); 274 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC); 275 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize); 276 _aux_bitmap_region_special = aux_bitmap.special(); 277 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region); 278 279 // 280 // Create regions and region sets 281 // 282 size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE); 283 size_t region_storage_size = align_up(region_align * _num_regions, region_page_size); 284 region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity()); 285 286 ReservedSpace region_storage(region_storage_size, region_page_size); 287 MemTracker::record_virtual_memory_type(region_storage.base(), mtGC); 288 if (!region_storage.special()) { 289 os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false, 290 "Cannot commit region memory"); 291 } 292 293 // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks. 294 // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there. 295 // If not successful, bite a bullet and allocate at whatever address. 296 { 297 size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity()); 298 size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align); 299 300 uintptr_t min = round_up_power_of_2(cset_align); 301 uintptr_t max = (1u << 30u); 302 303 for (uintptr_t addr = min; addr <= max; addr <<= 1u) { 304 char* req_addr = (char*)addr; 305 assert(is_aligned(req_addr, cset_align), "Should be aligned"); 306 ReservedSpace cset_rs(cset_size, cset_align, false, req_addr); 307 if (cset_rs.is_reserved()) { 308 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr); 309 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base()); 310 break; 311 } 312 } 313 314 if (_collection_set == NULL) { 315 ReservedSpace cset_rs(cset_size, cset_align, false); 316 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base()); 317 } 318 } 319 320 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC); 321 _free_set = new ShenandoahFreeSet(this, _num_regions); 322 323 { 324 ShenandoahHeapLocker locker(lock()); 325 326 for (size_t i = 0; i < _num_regions; i++) { 327 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i; 328 bool is_committed = i < num_committed_regions; 329 void* loc = region_storage.base() + i * region_align; 330 331 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed); 332 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity"); 333 334 _marking_context->initialize_top_at_mark_start(r); 335 _regions[i] = r; 336 assert(!collection_set()->is_in(i), "New region should not be in collection set"); 337 } 338 339 // Initialize to complete 340 _marking_context->mark_complete(); 341 342 _free_set->rebuild(); 343 } 344 345 if (AlwaysPreTouch) { 346 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads, 347 // before initialize() below zeroes it with initializing thread. For any given region, 348 // we touch the region and the corresponding bitmaps from the same thread. 349 ShenandoahPushWorkerScope scope(workers(), _max_workers, false); 350 351 _pretouch_heap_page_size = heap_page_size; 352 _pretouch_bitmap_page_size = bitmap_page_size; 353 354 #ifdef LINUX 355 // UseTransparentHugePages would madvise that backing memory can be coalesced into huge 356 // pages. But, the kernel needs to know that every small page is used, in order to coalesce 357 // them into huge one. Therefore, we need to pretouch with smaller pages. 358 if (UseTransparentHugePages) { 359 _pretouch_heap_page_size = (size_t)os::vm_page_size(); 360 _pretouch_bitmap_page_size = (size_t)os::vm_page_size(); 361 } 362 #endif 363 364 // OS memory managers may want to coalesce back-to-back pages. Make their jobs 365 // simpler by pre-touching continuous spaces (heap and bitmap) separately. 366 367 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size); 368 _workers->run_task(&bcl); 369 370 ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size); 371 _workers->run_task(&hcl); 372 } 373 374 // 375 // Initialize the rest of GC subsystems 376 // 377 378 _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC); 379 for (uint worker = 0; worker < _max_workers; worker++) { 380 _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC); 381 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData)); 382 } 383 384 // There should probably be Shenandoah-specific options for these, 385 // just as there are G1-specific options. 386 { 387 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set(); 388 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold 389 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent 390 } 391 392 _monitoring_support = new ShenandoahMonitoringSupport(this); 393 _phase_timings = new ShenandoahPhaseTimings(max_workers()); 394 ShenandoahStringDedup::initialize(); 395 ShenandoahCodeRoots::initialize(); 396 397 if (ShenandoahPacing) { 398 _pacer = new ShenandoahPacer(this); 399 _pacer->setup_for_idle(); 400 } else { 401 _pacer = NULL; 402 } 403 404 _control_thread = new ShenandoahControlThread(); 405 406 _ref_proc_mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1); 407 _ref_proc_mt_discovery = _max_workers > 1; 408 409 ShenandoahInitLogger::print(); 410 411 return JNI_OK; 412 } 413 414 void ShenandoahHeap::initialize_heuristics() { 415 if (ShenandoahGCMode != NULL) { 416 if (strcmp(ShenandoahGCMode, "satb") == 0) { 417 _gc_mode = new ShenandoahSATBMode(); 418 } else if (strcmp(ShenandoahGCMode, "iu") == 0) { 419 _gc_mode = new ShenandoahIUMode(); 420 } else if (strcmp(ShenandoahGCMode, "passive") == 0) { 421 _gc_mode = new ShenandoahPassiveMode(); 422 } else { 423 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option"); 424 } 425 } else { 426 ShouldNotReachHere(); 427 } 428 _gc_mode->initialize_flags(); 429 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) { 430 vm_exit_during_initialization( 431 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.", 432 _gc_mode->name())); 433 } 434 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) { 435 vm_exit_during_initialization( 436 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.", 437 _gc_mode->name())); 438 } 439 440 _heuristics = _gc_mode->initialize_heuristics(); 441 442 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) { 443 vm_exit_during_initialization( 444 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.", 445 _heuristics->name())); 446 } 447 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) { 448 vm_exit_during_initialization( 449 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.", 450 _heuristics->name())); 451 } 452 } 453 454 #ifdef _MSC_VER 455 #pragma warning( push ) 456 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 457 #endif 458 459 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : 460 CollectedHeap(), 461 _initial_size(0), 462 _used(0), 463 _committed(0), 464 _bytes_allocated_since_gc_start(0), 465 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)), 466 _workers(NULL), 467 _safepoint_workers(NULL), 468 _heap_region_special(false), 469 _num_regions(0), 470 _regions(NULL), 471 _update_refs_iterator(this), 472 _control_thread(NULL), 473 _shenandoah_policy(policy), 474 _heuristics(NULL), 475 _free_set(NULL), 476 _scm(new ShenandoahConcurrentMark()), 477 _full_gc(new ShenandoahMarkCompact()), 478 _pacer(NULL), 479 _verifier(NULL), 480 _phase_timings(NULL), 481 _monitoring_support(NULL), 482 _memory_pool(NULL), 483 _stw_memory_manager("Shenandoah Pauses", "end of GC pause"), 484 _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"), 485 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 486 _soft_ref_policy(), 487 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes), 488 _ref_processor(NULL), 489 _marking_context(NULL), 490 _bitmap_size(0), 491 _bitmap_regions_per_slice(0), 492 _bitmap_bytes_per_slice(0), 493 _bitmap_region_special(false), 494 _aux_bitmap_region_special(false), 495 _liveness_cache(NULL), 496 _collection_set(NULL) 497 { 498 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this)); 499 500 _max_workers = MAX2(_max_workers, 1U); 501 _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers, 502 /* are_GC_task_threads */ true, 503 /* are_ConcurrentGC_threads */ true); 504 if (_workers == NULL) { 505 vm_exit_during_initialization("Failed necessary allocation."); 506 } else { 507 _workers->initialize_workers(); 508 } 509 510 if (ParallelGCThreads > 1) { 511 _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread", 512 ParallelGCThreads, 513 /* are_GC_task_threads */ false, 514 /* are_ConcurrentGC_threads */ false); 515 _safepoint_workers->initialize_workers(); 516 } 517 } 518 519 #ifdef _MSC_VER 520 #pragma warning( pop ) 521 #endif 522 523 class ShenandoahResetBitmapTask : public AbstractGangTask { 524 private: 525 ShenandoahRegionIterator _regions; 526 527 public: 528 ShenandoahResetBitmapTask() : 529 AbstractGangTask("Parallel Reset Bitmap Task") {} 530 531 void work(uint worker_id) { 532 ShenandoahHeapRegion* region = _regions.next(); 533 ShenandoahHeap* heap = ShenandoahHeap::heap(); 534 ShenandoahMarkingContext* const ctx = heap->marking_context(); 535 while (region != NULL) { 536 if (heap->is_bitmap_slice_committed(region)) { 537 ctx->clear_bitmap(region); 538 } 539 region = _regions.next(); 540 } 541 } 542 }; 543 544 void ShenandoahHeap::reset_mark_bitmap() { 545 assert_gc_workers(_workers->active_workers()); 546 mark_incomplete_marking_context(); 547 548 ShenandoahResetBitmapTask task; 549 _workers->run_task(&task); 550 } 551 552 void ShenandoahHeap::print_on(outputStream* st) const { 553 st->print_cr("Shenandoah Heap"); 554 st->print_cr(" " SIZE_FORMAT "%s total, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used", 555 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()), 556 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()), 557 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); 558 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions", 559 num_regions(), 560 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()), 561 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes())); 562 563 st->print("Status: "); 564 if (has_forwarded_objects()) st->print("has forwarded objects, "); 565 if (is_concurrent_mark_in_progress()) st->print("marking, "); 566 if (is_evacuation_in_progress()) st->print("evacuating, "); 567 if (is_update_refs_in_progress()) st->print("updating refs, "); 568 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, "); 569 if (is_full_gc_in_progress()) st->print("full gc, "); 570 if (is_full_gc_move_in_progress()) st->print("full gc move, "); 571 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, "); 572 if (is_concurrent_strong_root_in_progress() && 573 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, "); 574 575 if (cancelled_gc()) { 576 st->print("cancelled"); 577 } else { 578 st->print("not cancelled"); 579 } 580 st->cr(); 581 582 st->print_cr("Reserved region:"); 583 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ", 584 p2i(reserved_region().start()), 585 p2i(reserved_region().end())); 586 587 ShenandoahCollectionSet* cset = collection_set(); 588 st->print_cr("Collection set:"); 589 if (cset != NULL) { 590 st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address())); 591 st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address())); 592 } else { 593 st->print_cr(" (NULL)"); 594 } 595 596 st->cr(); 597 MetaspaceUtils::print_on(st); 598 599 if (Verbose) { 600 print_heap_regions_on(st); 601 } 602 } 603 604 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure { 605 public: 606 void do_thread(Thread* thread) { 607 assert(thread != NULL, "Sanity"); 608 assert(thread->is_Worker_thread(), "Only worker thread expected"); 609 ShenandoahThreadLocalData::initialize_gclab(thread); 610 } 611 }; 612 613 void ShenandoahHeap::post_initialize() { 614 CollectedHeap::post_initialize(); 615 MutexLocker ml(Threads_lock); 616 617 ShenandoahInitWorkerGCLABClosure init_gclabs; 618 _workers->threads_do(&init_gclabs); 619 620 // gclab can not be initialized early during VM startup, as it can not determinate its max_size. 621 // Now, we will let WorkGang to initialize gclab when new worker is created. 622 _workers->set_initialize_gclab(); 623 624 _scm->initialize(_max_workers); 625 _full_gc->initialize(_gc_timer); 626 627 ref_processing_init(); 628 629 _heuristics->initialize(); 630 631 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers()); 632 } 633 634 size_t ShenandoahHeap::used() const { 635 return Atomic::load_acquire(&_used); 636 } 637 638 size_t ShenandoahHeap::committed() const { 639 OrderAccess::acquire(); 640 return _committed; 641 } 642 643 void ShenandoahHeap::increase_committed(size_t bytes) { 644 shenandoah_assert_heaplocked_or_safepoint(); 645 _committed += bytes; 646 } 647 648 void ShenandoahHeap::decrease_committed(size_t bytes) { 649 shenandoah_assert_heaplocked_or_safepoint(); 650 _committed -= bytes; 651 } 652 653 void ShenandoahHeap::increase_used(size_t bytes) { 654 Atomic::add(&_used, bytes); 655 } 656 657 void ShenandoahHeap::set_used(size_t bytes) { 658 Atomic::release_store_fence(&_used, bytes); 659 } 660 661 void ShenandoahHeap::decrease_used(size_t bytes) { 662 assert(used() >= bytes, "never decrease heap size by more than we've left"); 663 Atomic::sub(&_used, bytes); 664 } 665 666 void ShenandoahHeap::increase_allocated(size_t bytes) { 667 Atomic::add(&_bytes_allocated_since_gc_start, bytes); 668 } 669 670 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) { 671 size_t bytes = words * HeapWordSize; 672 if (!waste) { 673 increase_used(bytes); 674 } 675 increase_allocated(bytes); 676 if (ShenandoahPacing) { 677 control_thread()->pacing_notify_alloc(words); 678 if (waste) { 679 pacer()->claim_for_alloc(words, true); 680 } 681 } 682 } 683 684 size_t ShenandoahHeap::capacity() const { 685 return committed(); 686 } 687 688 size_t ShenandoahHeap::max_capacity() const { 689 return _num_regions * ShenandoahHeapRegion::region_size_bytes(); 690 } 691 692 size_t ShenandoahHeap::min_capacity() const { 693 return _minimum_size; 694 } 695 696 size_t ShenandoahHeap::initial_capacity() const { 697 return _initial_size; 698 } 699 700 bool ShenandoahHeap::is_in(const void* p) const { 701 HeapWord* heap_base = (HeapWord*) base(); 702 HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions(); 703 return p >= heap_base && p < last_region_end; 704 } 705 706 void ShenandoahHeap::op_uncommit(double shrink_before) { 707 assert (ShenandoahUncommit, "should be enabled"); 708 709 // Application allocates from the beginning of the heap, and GC allocates at 710 // the end of it. It is more efficient to uncommit from the end, so that applications 711 // could enjoy the near committed regions. GC allocations are much less frequent, 712 // and therefore can accept the committing costs. 713 714 size_t count = 0; 715 for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow 716 ShenandoahHeapRegion* r = get_region(i - 1); 717 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { 718 ShenandoahHeapLocker locker(lock()); 719 if (r->is_empty_committed()) { 720 // Do not uncommit below minimal capacity 721 if (committed() < min_capacity() + ShenandoahHeapRegion::region_size_bytes()) { 722 break; 723 } 724 725 r->make_uncommitted(); 726 count++; 727 } 728 } 729 SpinPause(); // allow allocators to take the lock 730 } 731 732 if (count > 0) { 733 control_thread()->notify_heap_changed(); 734 } 735 } 736 737 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) { 738 // New object should fit the GCLAB size 739 size_t min_size = MAX2(size, PLAB::min_size()); 740 741 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively. 742 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2; 743 new_size = MIN2(new_size, PLAB::max_size()); 744 new_size = MAX2(new_size, PLAB::min_size()); 745 746 // Record new heuristic value even if we take any shortcut. This captures 747 // the case when moderately-sized objects always take a shortcut. At some point, 748 // heuristics should catch up with them. 749 ShenandoahThreadLocalData::set_gclab_size(thread, new_size); 750 751 if (new_size < size) { 752 // New size still does not fit the object. Fall back to shared allocation. 753 // This avoids retiring perfectly good GCLABs, when we encounter a large object. 754 return NULL; 755 } 756 757 // Retire current GCLAB, and allocate a new one. 758 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); 759 gclab->retire(); 760 761 size_t actual_size = 0; 762 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size); 763 if (gclab_buf == NULL) { 764 return NULL; 765 } 766 767 assert (size <= actual_size, "allocation should fit"); 768 769 if (ZeroTLAB) { 770 // ..and clear it. 771 Copy::zero_to_words(gclab_buf, actual_size); 772 } else { 773 // ...and zap just allocated object. 774 #ifdef ASSERT 775 // Skip mangling the space corresponding to the object header to 776 // ensure that the returned space is not considered parsable by 777 // any concurrent GC thread. 778 size_t hdr_size = oopDesc::header_size(); 779 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal); 780 #endif // ASSERT 781 } 782 gclab->set_buf(gclab_buf, actual_size); 783 return gclab->allocate(size); 784 } 785 786 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size, 787 size_t requested_size, 788 size_t* actual_size) { 789 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size); 790 HeapWord* res = allocate_memory(req); 791 if (res != NULL) { 792 *actual_size = req.actual_size(); 793 } else { 794 *actual_size = 0; 795 } 796 return res; 797 } 798 799 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size, 800 size_t word_size, 801 size_t* actual_size) { 802 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size); 803 HeapWord* res = allocate_memory(req); 804 if (res != NULL) { 805 *actual_size = req.actual_size(); 806 } else { 807 *actual_size = 0; 808 } 809 return res; 810 } 811 812 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) { 813 intptr_t pacer_epoch = 0; 814 bool in_new_region = false; 815 HeapWord* result = NULL; 816 817 if (req.is_mutator_alloc()) { 818 if (ShenandoahPacing) { 819 pacer()->pace_for_alloc(req.size()); 820 pacer_epoch = pacer()->epoch(); 821 } 822 823 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) { 824 result = allocate_memory_under_lock(req, in_new_region); 825 } 826 827 // Allocation failed, block until control thread reacted, then retry allocation. 828 // 829 // It might happen that one of the threads requesting allocation would unblock 830 // way later after GC happened, only to fail the second allocation, because 831 // other threads have already depleted the free storage. In this case, a better 832 // strategy is to try again, as long as GC makes progress. 833 // 834 // Then, we need to make sure the allocation was retried after at least one 835 // Full GC, which means we want to try more than ShenandoahFullGCThreshold times. 836 837 size_t tries = 0; 838 839 while (result == NULL && _progress_last_gc.is_set()) { 840 tries++; 841 control_thread()->handle_alloc_failure(req); 842 result = allocate_memory_under_lock(req, in_new_region); 843 } 844 845 while (result == NULL && tries <= ShenandoahFullGCThreshold) { 846 tries++; 847 control_thread()->handle_alloc_failure(req); 848 result = allocate_memory_under_lock(req, in_new_region); 849 } 850 851 } else { 852 assert(req.is_gc_alloc(), "Can only accept GC allocs here"); 853 result = allocate_memory_under_lock(req, in_new_region); 854 // Do not call handle_alloc_failure() here, because we cannot block. 855 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac(). 856 } 857 858 if (in_new_region) { 859 control_thread()->notify_heap_changed(); 860 } 861 862 if (result != NULL) { 863 size_t requested = req.size(); 864 size_t actual = req.actual_size(); 865 866 assert (req.is_lab_alloc() || (requested == actual), 867 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT, 868 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual); 869 870 if (req.is_mutator_alloc()) { 871 notify_mutator_alloc_words(actual, false); 872 873 // If we requested more than we were granted, give the rest back to pacer. 874 // This only matters if we are in the same pacing epoch: do not try to unpace 875 // over the budget for the other phase. 876 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) { 877 pacer()->unpace_for_alloc(pacer_epoch, requested - actual); 878 } 879 } else { 880 increase_used(actual*HeapWordSize); 881 } 882 } 883 884 return result; 885 } 886 887 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) { 888 ShenandoahHeapLocker locker(lock()); 889 return _free_set->allocate(req, in_new_region); 890 } 891 892 HeapWord* ShenandoahHeap::mem_allocate(size_t size, 893 bool* gc_overhead_limit_was_exceeded) { 894 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size); 895 return allocate_memory(req); 896 } 897 898 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 899 size_t size, 900 Metaspace::MetadataType mdtype) { 901 MetaWord* result; 902 903 // Inform metaspace OOM to GC heuristics if class unloading is possible. 904 if (heuristics()->can_unload_classes()) { 905 ShenandoahHeuristics* h = heuristics(); 906 h->record_metaspace_oom(); 907 } 908 909 // Expand and retry allocation 910 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); 911 if (result != NULL) { 912 return result; 913 } 914 915 // Start full GC 916 collect(GCCause::_metadata_GC_clear_soft_refs); 917 918 // Retry allocation 919 result = loader_data->metaspace_non_null()->allocate(size, mdtype); 920 if (result != NULL) { 921 return result; 922 } 923 924 // Expand and retry allocation 925 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); 926 if (result != NULL) { 927 return result; 928 } 929 930 // Out of memory 931 return NULL; 932 } 933 934 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure { 935 private: 936 ShenandoahHeap* const _heap; 937 Thread* const _thread; 938 public: 939 ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) : 940 _heap(heap), _thread(Thread::current()) {} 941 942 void do_object(oop p) { 943 shenandoah_assert_marked(NULL, p); 944 if (!p->is_forwarded()) { 945 _heap->evacuate_object(p, _thread); 946 } 947 } 948 }; 949 950 class ShenandoahEvacuationTask : public AbstractGangTask { 951 private: 952 ShenandoahHeap* const _sh; 953 ShenandoahCollectionSet* const _cs; 954 bool _concurrent; 955 public: 956 ShenandoahEvacuationTask(ShenandoahHeap* sh, 957 ShenandoahCollectionSet* cs, 958 bool concurrent) : 959 AbstractGangTask("Parallel Evacuation Task"), 960 _sh(sh), 961 _cs(cs), 962 _concurrent(concurrent) 963 {} 964 965 void work(uint worker_id) { 966 if (_concurrent) { 967 ShenandoahConcurrentWorkerSession worker_session(worker_id); 968 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); 969 ShenandoahEvacOOMScope oom_evac_scope; 970 do_work(); 971 } else { 972 ShenandoahParallelWorkerSession worker_session(worker_id); 973 ShenandoahEvacOOMScope oom_evac_scope; 974 do_work(); 975 } 976 } 977 978 private: 979 void do_work() { 980 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh); 981 ShenandoahHeapRegion* r; 982 while ((r =_cs->claim_next()) != NULL) { 983 assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index()); 984 _sh->marked_object_iterate(r, &cl); 985 986 if (ShenandoahPacing) { 987 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize); 988 } 989 990 if (_sh->check_cancelled_gc_and_yield(_concurrent)) { 991 break; 992 } 993 } 994 } 995 }; 996 997 void ShenandoahHeap::trash_cset_regions() { 998 ShenandoahHeapLocker locker(lock()); 999 1000 ShenandoahCollectionSet* set = collection_set(); 1001 ShenandoahHeapRegion* r; 1002 set->clear_current_index(); 1003 while ((r = set->next()) != NULL) { 1004 r->make_trash(); 1005 } 1006 collection_set()->clear(); 1007 } 1008 1009 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const { 1010 st->print_cr("Heap Regions:"); 1011 st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned"); 1012 st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data"); 1013 st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start, UWM=update watermark"); 1014 st->print_cr("SN=alloc sequence number"); 1015 1016 for (size_t i = 0; i < num_regions(); i++) { 1017 get_region(i)->print_on(st); 1018 } 1019 } 1020 1021 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) { 1022 assert(start->is_humongous_start(), "reclaim regions starting with the first one"); 1023 1024 oop humongous_obj = oop(start->bottom()); 1025 size_t size = humongous_obj->size(); 1026 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); 1027 size_t index = start->index() + required_regions - 1; 1028 1029 assert(!start->has_live(), "liveness must be zero"); 1030 1031 for(size_t i = 0; i < required_regions; i++) { 1032 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log, 1033 // as it expects that every region belongs to a humongous region starting with a humongous start region. 1034 ShenandoahHeapRegion* region = get_region(index --); 1035 1036 assert(region->is_humongous(), "expect correct humongous start or continuation"); 1037 assert(!region->is_cset(), "Humongous region should not be in collection set"); 1038 1039 region->make_trash_immediate(); 1040 } 1041 } 1042 1043 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure { 1044 public: 1045 ShenandoahCheckCleanGCLABClosure() {} 1046 void do_thread(Thread* thread) { 1047 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); 1048 assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name()); 1049 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement"); 1050 } 1051 }; 1052 1053 class ShenandoahRetireGCLABClosure : public ThreadClosure { 1054 private: 1055 bool const _resize; 1056 public: 1057 ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {} 1058 void do_thread(Thread* thread) { 1059 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); 1060 assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name()); 1061 gclab->retire(); 1062 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) { 1063 ShenandoahThreadLocalData::set_gclab_size(thread, 0); 1064 } 1065 } 1066 }; 1067 1068 void ShenandoahHeap::labs_make_parsable() { 1069 assert(UseTLAB, "Only call with UseTLAB"); 1070 1071 ShenandoahRetireGCLABClosure cl(false); 1072 1073 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { 1074 ThreadLocalAllocBuffer& tlab = t->tlab(); 1075 tlab.make_parsable(); 1076 cl.do_thread(t); 1077 } 1078 1079 workers()->threads_do(&cl); 1080 } 1081 1082 void ShenandoahHeap::tlabs_retire(bool resize) { 1083 assert(UseTLAB, "Only call with UseTLAB"); 1084 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled"); 1085 1086 ThreadLocalAllocStats stats; 1087 1088 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { 1089 ThreadLocalAllocBuffer& tlab = t->tlab(); 1090 tlab.retire(&stats); 1091 if (resize) { 1092 tlab.resize(); 1093 } 1094 } 1095 1096 stats.publish(); 1097 1098 #ifdef ASSERT 1099 ShenandoahCheckCleanGCLABClosure cl; 1100 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { 1101 cl.do_thread(t); 1102 } 1103 workers()->threads_do(&cl); 1104 #endif 1105 } 1106 1107 void ShenandoahHeap::gclabs_retire(bool resize) { 1108 assert(UseTLAB, "Only call with UseTLAB"); 1109 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled"); 1110 1111 ShenandoahRetireGCLABClosure cl(resize); 1112 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { 1113 cl.do_thread(t); 1114 } 1115 workers()->threads_do(&cl); 1116 } 1117 1118 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask { 1119 private: 1120 ShenandoahRootEvacuator* _rp; 1121 1122 public: 1123 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) : 1124 AbstractGangTask("Shenandoah evacuate and update roots"), 1125 _rp(rp) {} 1126 1127 void work(uint worker_id) { 1128 ShenandoahParallelWorkerSession worker_session(worker_id); 1129 ShenandoahEvacOOMScope oom_evac_scope; 1130 ShenandoahEvacuateUpdateRootsClosure<> cl; 1131 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations); 1132 _rp->roots_do(worker_id, &cl); 1133 } 1134 }; 1135 1136 void ShenandoahHeap::evacuate_and_update_roots() { 1137 #if COMPILER2_OR_JVMCI 1138 DerivedPointerTable::clear(); 1139 #endif 1140 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped"); 1141 { 1142 // Include concurrent roots if current cycle can not process those roots concurrently 1143 ShenandoahRootEvacuator rp(workers()->active_workers(), 1144 ShenandoahPhaseTimings::init_evac, 1145 !ShenandoahConcurrentRoots::should_do_concurrent_roots(), 1146 !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()); 1147 ShenandoahEvacuateUpdateRootsTask roots_task(&rp); 1148 workers()->run_task(&roots_task); 1149 } 1150 1151 #if COMPILER2_OR_JVMCI 1152 DerivedPointerTable::update_pointers(); 1153 #endif 1154 } 1155 1156 // Returns size in bytes 1157 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const { 1158 if (ShenandoahElasticTLAB) { 1159 // With Elastic TLABs, return the max allowed size, and let the allocation path 1160 // figure out the safe size for current allocation. 1161 return ShenandoahHeapRegion::max_tlab_size_bytes(); 1162 } else { 1163 return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes()); 1164 } 1165 } 1166 1167 size_t ShenandoahHeap::max_tlab_size() const { 1168 // Returns size in words 1169 return ShenandoahHeapRegion::max_tlab_size_words(); 1170 } 1171 1172 void ShenandoahHeap::collect(GCCause::Cause cause) { 1173 control_thread()->request_gc(cause); 1174 } 1175 1176 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) { 1177 //assert(false, "Shouldn't need to do full collections"); 1178 } 1179 1180 HeapWord* ShenandoahHeap::block_start(const void* addr) const { 1181 ShenandoahHeapRegion* r = heap_region_containing(addr); 1182 if (r != NULL) { 1183 return r->block_start(addr); 1184 } 1185 return NULL; 1186 } 1187 1188 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const { 1189 ShenandoahHeapRegion* r = heap_region_containing(addr); 1190 return r->block_is_obj(addr); 1191 } 1192 1193 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const { 1194 return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr); 1195 } 1196 1197 jlong ShenandoahHeap::millis_since_last_gc() { 1198 double v = heuristics()->time_since_last_gc() * 1000; 1199 assert(0 <= v && v <= max_jlong, "value should fit: %f", v); 1200 return (jlong)v; 1201 } 1202 1203 void ShenandoahHeap::prepare_for_verify() { 1204 if (SafepointSynchronize::is_at_safepoint() && UseTLAB) { 1205 labs_make_parsable(); 1206 } 1207 } 1208 1209 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const { 1210 workers()->threads_do(tcl); 1211 if (_safepoint_workers != NULL) { 1212 _safepoint_workers->threads_do(tcl); 1213 } 1214 if (ShenandoahStringDedup::is_enabled()) { 1215 ShenandoahStringDedup::threads_do(tcl); 1216 } 1217 } 1218 1219 void ShenandoahHeap::print_tracing_info() const { 1220 LogTarget(Info, gc, stats) lt; 1221 if (lt.is_enabled()) { 1222 ResourceMark rm; 1223 LogStream ls(lt); 1224 1225 phase_timings()->print_global_on(&ls); 1226 1227 ls.cr(); 1228 ls.cr(); 1229 1230 shenandoah_policy()->print_gc_stats(&ls); 1231 1232 ls.cr(); 1233 ls.cr(); 1234 1235 if (ShenandoahPacing) { 1236 pacer()->print_on(&ls); 1237 } 1238 1239 ls.cr(); 1240 ls.cr(); 1241 } 1242 } 1243 1244 void ShenandoahHeap::verify(VerifyOption vo) { 1245 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) { 1246 if (ShenandoahVerify) { 1247 verifier()->verify_generic(vo); 1248 } else { 1249 // TODO: Consider allocating verification bitmaps on demand, 1250 // and turn this on unconditionally. 1251 } 1252 } 1253 } 1254 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const { 1255 return _free_set->capacity(); 1256 } 1257 1258 class ObjectIterateScanRootClosure : public BasicOopIterateClosure { 1259 private: 1260 MarkBitMap* _bitmap; 1261 Stack<oop,mtGC>* _oop_stack; 1262 ShenandoahHeap* const _heap; 1263 ShenandoahMarkingContext* const _marking_context; 1264 1265 template <class T> 1266 void do_oop_work(T* p) { 1267 T o = RawAccess<>::oop_load(p); 1268 if (!CompressedOops::is_null(o)) { 1269 oop obj = CompressedOops::decode_not_null(o); 1270 if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) { 1271 // There may be dead oops in weak roots in concurrent root phase, do not touch them. 1272 return; 1273 } 1274 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 1275 1276 assert(oopDesc::is_oop(obj), "must be a valid oop"); 1277 if (!_bitmap->is_marked(obj)) { 1278 _bitmap->mark(obj); 1279 _oop_stack->push(obj); 1280 } 1281 } 1282 } 1283 public: 1284 ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) : 1285 _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()), 1286 _marking_context(_heap->marking_context()) {} 1287 void do_oop(oop* p) { do_oop_work(p); } 1288 void do_oop(narrowOop* p) { do_oop_work(p); } 1289 }; 1290 1291 /* 1292 * This is public API, used in preparation of object_iterate(). 1293 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't 1294 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can 1295 * control, we call SH::tlabs_retire, SH::gclabs_retire. 1296 */ 1297 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) { 1298 // No-op. 1299 } 1300 1301 /* 1302 * Iterates objects in the heap. This is public API, used for, e.g., heap dumping. 1303 * 1304 * We cannot safely iterate objects by doing a linear scan at random points in time. Linear 1305 * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g. 1306 * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear 1307 * scanning therefore depends on having a valid marking bitmap to support it. However, we only 1308 * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid 1309 * marking bitmap during marking, after aborted marking or during/after cleanup (when we just 1310 * wiped the bitmap in preparation for next marking). 1311 * 1312 * For all those reasons, we implement object iteration as a single marking traversal, reporting 1313 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap 1314 * is allowed to report dead objects, but is not required to do so. 1315 */ 1316 void ShenandoahHeap::object_iterate(ObjectClosure* cl) { 1317 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints"); 1318 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) { 1319 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration"); 1320 return; 1321 } 1322 1323 // Reset bitmap 1324 _aux_bit_map.clear(); 1325 1326 Stack<oop,mtGC> oop_stack; 1327 1328 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack); 1329 1330 { 1331 // First, we process GC roots according to current GC cycle. 1332 // This populates the work stack with initial objects. 1333 // It is important to relinquish the associated locks before diving 1334 // into heap dumper. 1335 ShenandoahHeapIterationRootScanner rp; 1336 rp.roots_do(&oops); 1337 } 1338 1339 // Work through the oop stack to traverse heap. 1340 while (! oop_stack.is_empty()) { 1341 oop obj = oop_stack.pop(); 1342 assert(oopDesc::is_oop(obj), "must be a valid oop"); 1343 cl->do_object(obj); 1344 obj->oop_iterate(&oops); 1345 } 1346 1347 assert(oop_stack.is_empty(), "should be empty"); 1348 1349 if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) { 1350 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration"); 1351 } 1352 } 1353 1354 void ShenandoahHeap::run_task(AbstractGangTask* task) { 1355 workers()->run_task(task, workers()->active_workers()); 1356 } 1357 1358 // Keep alive an object that was loaded with AS_NO_KEEPALIVE. 1359 void ShenandoahHeap::keep_alive(oop obj) { 1360 if (is_concurrent_mark_in_progress()) { 1361 ShenandoahBarrierSet::barrier_set()->enqueue(obj); 1362 } 1363 } 1364 1365 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const { 1366 for (size_t i = 0; i < num_regions(); i++) { 1367 ShenandoahHeapRegion* current = get_region(i); 1368 blk->heap_region_do(current); 1369 } 1370 } 1371 1372 class ShenandoahParallelHeapRegionTask : public AbstractGangTask { 1373 private: 1374 ShenandoahHeap* const _heap; 1375 ShenandoahHeapRegionClosure* const _blk; 1376 1377 shenandoah_padding(0); 1378 volatile size_t _index; 1379 shenandoah_padding(1); 1380 1381 public: 1382 ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) : 1383 AbstractGangTask("Parallel Region Task"), 1384 _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {} 1385 1386 void work(uint worker_id) { 1387 ShenandoahParallelWorkerSession worker_session(worker_id); 1388 size_t stride = ShenandoahParallelRegionStride; 1389 1390 size_t max = _heap->num_regions(); 1391 while (_index < max) { 1392 size_t cur = Atomic::fetch_and_add(&_index, stride); 1393 size_t start = cur; 1394 size_t end = MIN2(cur + stride, max); 1395 if (start >= max) break; 1396 1397 for (size_t i = cur; i < end; i++) { 1398 ShenandoahHeapRegion* current = _heap->get_region(i); 1399 _blk->heap_region_do(current); 1400 } 1401 } 1402 } 1403 }; 1404 1405 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const { 1406 assert(blk->is_thread_safe(), "Only thread-safe closures here"); 1407 if (num_regions() > ShenandoahParallelRegionStride) { 1408 ShenandoahParallelHeapRegionTask task(blk); 1409 workers()->run_task(&task); 1410 } else { 1411 heap_region_iterate(blk); 1412 } 1413 } 1414 1415 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { 1416 private: 1417 ShenandoahMarkingContext* const _ctx; 1418 public: 1419 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 1420 1421 void heap_region_do(ShenandoahHeapRegion* r) { 1422 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); 1423 if (r->is_active()) { 1424 // Check if region needs updating its TAMS. We have updated it already during concurrent 1425 // reset, so it is very likely we don't need to do another write here. 1426 if (_ctx->top_at_mark_start(r) != r->top()) { 1427 _ctx->capture_top_at_mark_start(r); 1428 } 1429 } else { 1430 assert(_ctx->top_at_mark_start(r) == r->top(), 1431 "Region " SIZE_FORMAT " should already have correct TAMS", r->index()); 1432 } 1433 } 1434 1435 bool is_thread_safe() { return true; } 1436 }; 1437 1438 void ShenandoahHeap::op_init_mark() { 1439 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 1440 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); 1441 1442 assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap"); 1443 assert(!marking_context()->is_complete(), "should not be complete"); 1444 assert(!has_forwarded_objects(), "No forwarded objects on this path"); 1445 1446 if (ShenandoahVerify) { 1447 verifier()->verify_before_concmark(); 1448 } 1449 1450 if (VerifyBeforeGC) { 1451 Universe::verify(); 1452 } 1453 1454 set_concurrent_mark_in_progress(true); 1455 1456 // We need to reset all TLABs because they might be below the TAMS, and we need to mark 1457 // the objects in them. Do not let mutators allocate any new objects in their current TLABs. 1458 // It is also a good place to resize the TLAB sizes for future allocations. 1459 if (UseTLAB) { 1460 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_manage_tlabs); 1461 tlabs_retire(ResizeTLAB); 1462 } 1463 1464 { 1465 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); 1466 ShenandoahInitMarkUpdateRegionStateClosure cl; 1467 parallel_heap_region_iterate(&cl); 1468 } 1469 1470 // Make above changes visible to worker threads 1471 OrderAccess::fence(); 1472 1473 concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots); 1474 1475 if (ShenandoahPacing) { 1476 pacer()->setup_for_mark(); 1477 } 1478 1479 // Arm nmethods for concurrent marking. When a nmethod is about to be executed, 1480 // we need to make sure that all its metadata are marked. alternative is to remark 1481 // thread roots at final mark pause, but it can be potential latency killer. 1482 if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) { 1483 ShenandoahCodeRoots::arm_nmethods(); 1484 } 1485 } 1486 1487 void ShenandoahHeap::op_mark() { 1488 concurrent_mark()->mark_from_roots(); 1489 } 1490 1491 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { 1492 private: 1493 ShenandoahMarkingContext* const _ctx; 1494 ShenandoahHeapLock* const _lock; 1495 1496 public: 1497 ShenandoahFinalMarkUpdateRegionStateClosure() : 1498 _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {} 1499 1500 void heap_region_do(ShenandoahHeapRegion* r) { 1501 if (r->is_active()) { 1502 // All allocations past TAMS are implicitly live, adjust the region data. 1503 // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap. 1504 HeapWord *tams = _ctx->top_at_mark_start(r); 1505 HeapWord *top = r->top(); 1506 if (top > tams) { 1507 r->increase_live_data_alloc_words(pointer_delta(top, tams)); 1508 } 1509 1510 // We are about to select the collection set, make sure it knows about 1511 // current pinning status. Also, this allows trashing more regions that 1512 // now have their pinning status dropped. 1513 if (r->is_pinned()) { 1514 if (r->pin_count() == 0) { 1515 ShenandoahHeapLocker locker(_lock); 1516 r->make_unpinned(); 1517 } 1518 } else { 1519 if (r->pin_count() > 0) { 1520 ShenandoahHeapLocker locker(_lock); 1521 r->make_pinned(); 1522 } 1523 } 1524 1525 // Remember limit for updating refs. It's guaranteed that we get no 1526 // from-space-refs written from here on. 1527 r->set_update_watermark_at_safepoint(r->top()); 1528 } else { 1529 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); 1530 assert(_ctx->top_at_mark_start(r) == r->top(), 1531 "Region " SIZE_FORMAT " should have correct TAMS", r->index()); 1532 } 1533 } 1534 1535 bool is_thread_safe() { return true; } 1536 }; 1537 1538 void ShenandoahHeap::op_final_mark() { 1539 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 1540 assert(!has_forwarded_objects(), "No forwarded objects on this path"); 1541 1542 // It is critical that we 1543 // evacuate roots right after finishing marking, so that we don't 1544 // get unmarked objects in the roots. 1545 1546 if (!cancelled_gc()) { 1547 concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false); 1548 1549 // Marking is completed, deactivate SATB barrier 1550 set_concurrent_mark_in_progress(false); 1551 mark_complete_marking_context(); 1552 1553 parallel_cleaning(false /* full gc*/); 1554 1555 if (ShenandoahVerify) { 1556 verifier()->verify_roots_no_forwarded(); 1557 } 1558 1559 { 1560 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_region_states); 1561 ShenandoahFinalMarkUpdateRegionStateClosure cl; 1562 parallel_heap_region_iterate(&cl); 1563 1564 assert_pinned_region_status(); 1565 } 1566 1567 // Retire the TLABs, which will force threads to reacquire their TLABs after the pause. 1568 // This is needed for two reasons. Strong one: new allocations would be with new freeset, 1569 // which would be outside the collection set, so no cset writes would happen there. 1570 // Weaker one: new allocations would happen past update watermark, and so less work would 1571 // be needed for reference updates (would update the large filler instead). 1572 if (UseTLAB) { 1573 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_manage_labs); 1574 tlabs_retire(false); 1575 } 1576 1577 { 1578 ShenandoahGCPhase phase(ShenandoahPhaseTimings::choose_cset); 1579 ShenandoahHeapLocker locker(lock()); 1580 _collection_set->clear(); 1581 heuristics()->choose_collection_set(_collection_set); 1582 } 1583 1584 { 1585 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_rebuild_freeset); 1586 ShenandoahHeapLocker locker(lock()); 1587 _free_set->rebuild(); 1588 } 1589 1590 if (!is_degenerated_gc_in_progress()) { 1591 prepare_concurrent_roots(); 1592 prepare_concurrent_unloading(); 1593 } 1594 1595 // If collection set has candidates, start evacuation. 1596 // Otherwise, bypass the rest of the cycle. 1597 if (!collection_set()->is_empty()) { 1598 ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac); 1599 1600 if (ShenandoahVerify) { 1601 verifier()->verify_before_evacuation(); 1602 } 1603 1604 set_evacuation_in_progress(true); 1605 // From here on, we need to update references. 1606 set_has_forwarded_objects(true); 1607 1608 if (!is_degenerated_gc_in_progress()) { 1609 if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) { 1610 ShenandoahCodeRoots::arm_nmethods(); 1611 } 1612 evacuate_and_update_roots(); 1613 } 1614 1615 if (ShenandoahPacing) { 1616 pacer()->setup_for_evac(); 1617 } 1618 1619 if (ShenandoahVerify) { 1620 // If OOM while evacuating/updating of roots, there is no guarantee of their consistencies 1621 if (!cancelled_gc()) { 1622 ShenandoahRootVerifier::RootTypes types = ShenandoahRootVerifier::None; 1623 if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) { 1624 types = ShenandoahRootVerifier::combine(ShenandoahRootVerifier::JNIHandleRoots, ShenandoahRootVerifier::WeakRoots); 1625 types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CLDGRoots); 1626 types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::StringDedupRoots); 1627 } 1628 1629 if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) { 1630 types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CodeRoots); 1631 } 1632 verifier()->verify_roots_no_forwarded_except(types); 1633 } 1634 verifier()->verify_during_evacuation(); 1635 } 1636 } else { 1637 if (ShenandoahVerify) { 1638 verifier()->verify_after_concmark(); 1639 } 1640 1641 if (VerifyAfterGC) { 1642 Universe::verify(); 1643 } 1644 } 1645 1646 } else { 1647 // If this cycle was updating references, we need to keep the has_forwarded_objects 1648 // flag on, for subsequent phases to deal with it. 1649 concurrent_mark()->cancel(); 1650 set_concurrent_mark_in_progress(false); 1651 1652 if (process_references()) { 1653 // Abandon reference processing right away: pre-cleaning must have failed. 1654 ReferenceProcessor *rp = ref_processor(); 1655 rp->disable_discovery(); 1656 rp->abandon_partial_discovery(); 1657 rp->verify_no_references_recorded(); 1658 } 1659 } 1660 } 1661 1662 void ShenandoahHeap::op_conc_evac() { 1663 ShenandoahEvacuationTask task(this, _collection_set, true); 1664 workers()->run_task(&task); 1665 } 1666 1667 void ShenandoahHeap::op_stw_evac() { 1668 ShenandoahEvacuationTask task(this, _collection_set, false); 1669 workers()->run_task(&task); 1670 } 1671 1672 void ShenandoahHeap::op_updaterefs() { 1673 update_heap_references(true); 1674 } 1675 1676 void ShenandoahHeap::op_cleanup_early() { 1677 free_set()->recycle_trash(); 1678 } 1679 1680 void ShenandoahHeap::op_cleanup_complete() { 1681 free_set()->recycle_trash(); 1682 } 1683 1684 class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask { 1685 private: 1686 ShenandoahVMRoots<true /*concurrent*/> _vm_roots; 1687 ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /*single threaded*/> _cld_roots; 1688 1689 public: 1690 ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 1691 AbstractGangTask("Shenandoah Evacuate/Update Concurrent Strong Roots Task"), 1692 _vm_roots(phase), 1693 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()) {} 1694 1695 void work(uint worker_id) { 1696 ShenandoahConcurrentWorkerSession worker_session(worker_id); 1697 ShenandoahEvacOOMScope oom; 1698 { 1699 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration 1700 // may race against OopStorage::release() calls. 1701 ShenandoahEvacUpdateOopStorageRootsClosure cl; 1702 _vm_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl, worker_id); 1703 } 1704 1705 { 1706 ShenandoahEvacuateUpdateRootsClosure<> cl; 1707 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong); 1708 _cld_roots.cld_do(&clds, worker_id); 1709 } 1710 } 1711 }; 1712 1713 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure { 1714 private: 1715 ShenandoahHeap* const _heap; 1716 ShenandoahMarkingContext* const _mark_context; 1717 bool _evac_in_progress; 1718 Thread* const _thread; 1719 size_t _dead_counter; 1720 1721 public: 1722 ShenandoahEvacUpdateCleanupOopStorageRootsClosure(); 1723 void do_oop(oop* p); 1724 void do_oop(narrowOop* p); 1725 1726 size_t dead_counter() const; 1727 void reset_dead_counter(); 1728 }; 1729 1730 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() : 1731 _heap(ShenandoahHeap::heap()), 1732 _mark_context(ShenandoahHeap::heap()->marking_context()), 1733 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()), 1734 _thread(Thread::current()), 1735 _dead_counter(0) { 1736 } 1737 1738 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) { 1739 const oop obj = RawAccess<>::oop_load(p); 1740 if (!CompressedOops::is_null(obj)) { 1741 if (!_mark_context->is_marked(obj)) { 1742 shenandoah_assert_correct(p, obj); 1743 oop old = Atomic::cmpxchg(p, obj, oop(NULL)); 1744 if (obj == old) { 1745 _dead_counter ++; 1746 } 1747 } else if (_evac_in_progress && _heap->in_collection_set(obj)) { 1748 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 1749 if (resolved == obj) { 1750 resolved = _heap->evacuate_object(obj, _thread); 1751 } 1752 Atomic::cmpxchg(p, obj, resolved); 1753 assert(_heap->cancelled_gc() || 1754 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved), 1755 "Sanity"); 1756 } 1757 } 1758 } 1759 1760 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) { 1761 ShouldNotReachHere(); 1762 } 1763 1764 size_t ShenandoahEvacUpdateCleanupOopStorageRootsClosure::dead_counter() const { 1765 return _dead_counter; 1766 } 1767 1768 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::reset_dead_counter() { 1769 _dead_counter = 0; 1770 } 1771 1772 class ShenandoahIsCLDAliveClosure : public CLDClosure { 1773 public: 1774 void do_cld(ClassLoaderData* cld) { 1775 cld->is_alive(); 1776 } 1777 }; 1778 1779 class ShenandoahIsNMethodAliveClosure: public NMethodClosure { 1780 public: 1781 void do_nmethod(nmethod* n) { 1782 n->is_unloading(); 1783 } 1784 }; 1785 1786 // This task not only evacuates/updates marked weak roots, but also "NULL" 1787 // dead weak roots. 1788 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public AbstractGangTask { 1789 private: 1790 ShenandoahWeakRoot<true /*concurrent*/> _jni_roots; 1791 ShenandoahWeakRoot<true /*concurrent*/> _string_table_roots; 1792 ShenandoahWeakRoot<true /*concurrent*/> _resolved_method_table_roots; 1793 ShenandoahWeakRoot<true /*concurrent*/> _vm_roots; 1794 1795 // Roots related to concurrent class unloading 1796 ShenandoahClassLoaderDataRoots<true /* concurrent */, false /* single thread*/> 1797 _cld_roots; 1798 ShenandoahConcurrentNMethodIterator _nmethod_itr; 1799 ShenandoahConcurrentStringDedupRoots _dedup_roots; 1800 bool _concurrent_class_unloading; 1801 1802 public: 1803 ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 1804 AbstractGangTask("Shenandoah Concurrent Weak Root Task"), 1805 _jni_roots(OopStorageSet::jni_weak(), phase, ShenandoahPhaseTimings::JNIWeakRoots), 1806 _string_table_roots(OopStorageSet::string_table_weak(), phase, ShenandoahPhaseTimings::StringTableRoots), 1807 _resolved_method_table_roots(OopStorageSet::resolved_method_table_weak(), phase, ShenandoahPhaseTimings::ResolvedMethodTableRoots), 1808 _vm_roots(OopStorageSet::vm_weak(), phase, ShenandoahPhaseTimings::VMWeakRoots), 1809 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()), 1810 _nmethod_itr(ShenandoahCodeRoots::table()), 1811 _dedup_roots(phase), 1812 _concurrent_class_unloading(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) { 1813 StringTable::reset_dead_counter(); 1814 ResolvedMethodTable::reset_dead_counter(); 1815 if (_concurrent_class_unloading) { 1816 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1817 _nmethod_itr.nmethods_do_begin(); 1818 } 1819 } 1820 1821 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() { 1822 StringTable::finish_dead_counter(); 1823 ResolvedMethodTable::finish_dead_counter(); 1824 if (_concurrent_class_unloading) { 1825 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1826 _nmethod_itr.nmethods_do_end(); 1827 } 1828 } 1829 1830 void work(uint worker_id) { 1831 ShenandoahConcurrentWorkerSession worker_session(worker_id); 1832 { 1833 ShenandoahEvacOOMScope oom; 1834 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration 1835 // may race against OopStorage::release() calls. 1836 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl; 1837 _jni_roots.oops_do(&cl, worker_id); 1838 _vm_roots.oops_do(&cl, worker_id); 1839 1840 cl.reset_dead_counter(); 1841 _string_table_roots.oops_do(&cl, worker_id); 1842 StringTable::inc_dead_counter(cl.dead_counter()); 1843 1844 cl.reset_dead_counter(); 1845 _resolved_method_table_roots.oops_do(&cl, worker_id); 1846 ResolvedMethodTable::inc_dead_counter(cl.dead_counter()); 1847 1848 // String dedup weak roots 1849 ShenandoahForwardedIsAliveClosure is_alive; 1850 ShenandoahEvacuateUpdateRootsClosure<MO_RELEASE> keep_alive; 1851 _dedup_roots.oops_do(&is_alive, &keep_alive, worker_id); 1852 } 1853 1854 // If we are going to perform concurrent class unloading later on, we need to 1855 // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we 1856 // can cleanup immediate garbage sooner. 1857 if (_concurrent_class_unloading) { 1858 // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either NULL the 1859 // CLD's holder or evacuate it. 1860 ShenandoahIsCLDAliveClosure is_cld_alive; 1861 _cld_roots.cld_do(&is_cld_alive, worker_id); 1862 1863 // Applies ShenandoahIsNMethodAliveClosure to registered nmethods. 1864 // The closure calls nmethod->is_unloading(). The is_unloading 1865 // state is cached, therefore, during concurrent class unloading phase, 1866 // we will not touch the metadata of unloading nmethods 1867 ShenandoahIsNMethodAliveClosure is_nmethod_alive; 1868 _nmethod_itr.nmethods_do(&is_nmethod_alive); 1869 } 1870 } 1871 }; 1872 1873 void ShenandoahHeap::op_weak_roots() { 1874 if (is_concurrent_weak_root_in_progress()) { 1875 // Concurrent weak root processing 1876 { 1877 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work); 1878 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work); 1879 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work); 1880 workers()->run_task(&task); 1881 if (!ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) { 1882 set_concurrent_weak_root_in_progress(false); 1883 } 1884 } 1885 1886 // Perform handshake to flush out dead oops 1887 { 1888 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous); 1889 ShenandoahRendezvousClosure cl; 1890 Handshake::execute(&cl); 1891 } 1892 } 1893 } 1894 1895 void ShenandoahHeap::op_class_unloading() { 1896 assert (is_concurrent_weak_root_in_progress() && 1897 ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(), 1898 "Checked by caller"); 1899 _unloader.unload(); 1900 set_concurrent_weak_root_in_progress(false); 1901 } 1902 1903 void ShenandoahHeap::op_strong_roots() { 1904 assert(is_concurrent_strong_root_in_progress(), "Checked by caller"); 1905 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots); 1906 workers()->run_task(&task); 1907 set_concurrent_strong_root_in_progress(false); 1908 } 1909 1910 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { 1911 private: 1912 ShenandoahMarkingContext* const _ctx; 1913 public: 1914 ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 1915 1916 void heap_region_do(ShenandoahHeapRegion* r) { 1917 if (r->is_active()) { 1918 // Reset live data and set TAMS optimistically. We would recheck these under the pause 1919 // anyway to capture any updates that happened since now. 1920 r->clear_live_data(); 1921 _ctx->capture_top_at_mark_start(r); 1922 } 1923 } 1924 1925 bool is_thread_safe() { return true; } 1926 }; 1927 1928 void ShenandoahHeap::op_reset() { 1929 if (ShenandoahPacing) { 1930 pacer()->setup_for_reset(); 1931 } 1932 reset_mark_bitmap(); 1933 1934 ShenandoahResetUpdateRegionStateClosure cl; 1935 parallel_heap_region_iterate(&cl); 1936 } 1937 1938 void ShenandoahHeap::op_preclean() { 1939 if (ShenandoahPacing) { 1940 pacer()->setup_for_preclean(); 1941 } 1942 concurrent_mark()->preclean_weak_refs(); 1943 } 1944 1945 void ShenandoahHeap::op_full(GCCause::Cause cause) { 1946 ShenandoahMetricsSnapshot metrics; 1947 metrics.snap_before(); 1948 1949 full_gc()->do_it(cause); 1950 1951 metrics.snap_after(); 1952 1953 if (metrics.is_good_progress()) { 1954 _progress_last_gc.set(); 1955 } else { 1956 // Nothing to do. Tell the allocation path that we have failed to make 1957 // progress, and it can finally fail. 1958 _progress_last_gc.unset(); 1959 } 1960 } 1961 1962 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) { 1963 // Degenerated GC is STW, but it can also fail. Current mechanics communicates 1964 // GC failure via cancelled_concgc() flag. So, if we detect the failure after 1965 // some phase, we have to upgrade the Degenerate GC to Full GC. 1966 1967 clear_cancelled_gc(); 1968 1969 ShenandoahMetricsSnapshot metrics; 1970 metrics.snap_before(); 1971 1972 switch (point) { 1973 // The cases below form the Duff's-like device: it describes the actual GC cycle, 1974 // but enters it at different points, depending on which concurrent phase had 1975 // degenerated. 1976 1977 case _degenerated_outside_cycle: 1978 // We have degenerated from outside the cycle, which means something is bad with 1979 // the heap, most probably heavy humongous fragmentation, or we are very low on free 1980 // space. It makes little sense to wait for Full GC to reclaim as much as it can, when 1981 // we can do the most aggressive degen cycle, which includes processing references and 1982 // class unloading, unless those features are explicitly disabled. 1983 // 1984 // Note that we can only do this for "outside-cycle" degens, otherwise we would risk 1985 // changing the cycle parameters mid-cycle during concurrent -> degenerated handover. 1986 set_process_references(heuristics()->can_process_references()); 1987 set_unload_classes(heuristics()->can_unload_classes()); 1988 1989 op_reset(); 1990 1991 op_init_mark(); 1992 if (cancelled_gc()) { 1993 op_degenerated_fail(); 1994 return; 1995 } 1996 1997 case _degenerated_mark: 1998 op_final_mark(); 1999 if (cancelled_gc()) { 2000 op_degenerated_fail(); 2001 return; 2002 } 2003 2004 if (!has_forwarded_objects() && ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) { 2005 // Disarm nmethods that armed for concurrent mark. On normal cycle, it would 2006 // be disarmed while conc-roots phase is running. 2007 // TODO: Call op_conc_roots() here instead 2008 ShenandoahCodeRoots::disarm_nmethods(); 2009 } 2010 2011 op_cleanup_early(); 2012 2013 case _degenerated_evac: 2014 // If heuristics thinks we should do the cycle, this flag would be set, 2015 // and we can do evacuation. Otherwise, it would be the shortcut cycle. 2016 if (is_evacuation_in_progress()) { 2017 2018 // Degeneration under oom-evac protocol might have left some objects in 2019 // collection set un-evacuated. Restart evacuation from the beginning to 2020 // capture all objects. For all the objects that are already evacuated, 2021 // it would be a simple check, which is supposed to be fast. This is also 2022 // safe to do even without degeneration, as CSet iterator is at beginning 2023 // in preparation for evacuation anyway. 2024 // 2025 // Before doing that, we need to make sure we never had any cset-pinned 2026 // regions. This may happen if allocation failure happened when evacuating 2027 // the about-to-be-pinned object, oom-evac protocol left the object in 2028 // the collection set, and then the pin reached the cset region. If we continue 2029 // the cycle here, we would trash the cset and alive objects in it. To avoid 2030 // it, we fail degeneration right away and slide into Full GC to recover. 2031 2032 { 2033 sync_pinned_region_status(); 2034 collection_set()->clear_current_index(); 2035 2036 ShenandoahHeapRegion* r; 2037 while ((r = collection_set()->next()) != NULL) { 2038 if (r->is_pinned()) { 2039 cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc); 2040 op_degenerated_fail(); 2041 return; 2042 } 2043 } 2044 2045 collection_set()->clear_current_index(); 2046 } 2047 2048 op_stw_evac(); 2049 if (cancelled_gc()) { 2050 op_degenerated_fail(); 2051 return; 2052 } 2053 } 2054 2055 // If heuristics thinks we should do the cycle, this flag would be set, 2056 // and we need to do update-refs. Otherwise, it would be the shortcut cycle. 2057 if (has_forwarded_objects()) { 2058 op_init_updaterefs(); 2059 if (cancelled_gc()) { 2060 op_degenerated_fail(); 2061 return; 2062 } 2063 } 2064 2065 case _degenerated_updaterefs: 2066 if (has_forwarded_objects()) { 2067 op_final_updaterefs(); 2068 if (cancelled_gc()) { 2069 op_degenerated_fail(); 2070 return; 2071 } 2072 } 2073 2074 op_cleanup_complete(); 2075 break; 2076 2077 default: 2078 ShouldNotReachHere(); 2079 } 2080 2081 if (ShenandoahVerify) { 2082 verifier()->verify_after_degenerated(); 2083 } 2084 2085 if (VerifyAfterGC) { 2086 Universe::verify(); 2087 } 2088 2089 metrics.snap_after(); 2090 2091 // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles, 2092 // because that probably means the heap is overloaded and/or fragmented. 2093 if (!metrics.is_good_progress()) { 2094 _progress_last_gc.unset(); 2095 cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc); 2096 op_degenerated_futile(); 2097 } else { 2098 _progress_last_gc.set(); 2099 } 2100 } 2101 2102 void ShenandoahHeap::op_degenerated_fail() { 2103 log_info(gc)("Cannot finish degeneration, upgrading to Full GC"); 2104 shenandoah_policy()->record_degenerated_upgrade_to_full(); 2105 op_full(GCCause::_shenandoah_upgrade_to_full_gc); 2106 } 2107 2108 void ShenandoahHeap::op_degenerated_futile() { 2109 shenandoah_policy()->record_degenerated_upgrade_to_full(); 2110 op_full(GCCause::_shenandoah_upgrade_to_full_gc); 2111 } 2112 2113 void ShenandoahHeap::force_satb_flush_all_threads() { 2114 if (!is_concurrent_mark_in_progress()) { 2115 // No need to flush SATBs 2116 return; 2117 } 2118 2119 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { 2120 ShenandoahThreadLocalData::set_force_satb_flush(t, true); 2121 } 2122 // The threads are not "acquiring" their thread-local data, but it does not 2123 // hurt to "release" the updates here anyway. 2124 OrderAccess::fence(); 2125 } 2126 2127 void ShenandoahHeap::set_gc_state_all_threads(char state) { 2128 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { 2129 ShenandoahThreadLocalData::set_gc_state(t, state); 2130 } 2131 } 2132 2133 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) { 2134 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint"); 2135 _gc_state.set_cond(mask, value); 2136 set_gc_state_all_threads(_gc_state.raw_value()); 2137 } 2138 2139 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) { 2140 if (has_forwarded_objects()) { 2141 set_gc_state_mask(MARKING | UPDATEREFS, in_progress); 2142 } else { 2143 set_gc_state_mask(MARKING, in_progress); 2144 } 2145 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress); 2146 } 2147 2148 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) { 2149 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint"); 2150 set_gc_state_mask(EVACUATION, in_progress); 2151 } 2152 2153 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) { 2154 assert(ShenandoahConcurrentRoots::can_do_concurrent_roots(), "Why set the flag?"); 2155 if (in_progress) { 2156 _concurrent_strong_root_in_progress.set(); 2157 } else { 2158 _concurrent_strong_root_in_progress.unset(); 2159 } 2160 } 2161 2162 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool in_progress) { 2163 assert(ShenandoahConcurrentRoots::can_do_concurrent_roots(), "Why set the flag?"); 2164 if (in_progress) { 2165 _concurrent_weak_root_in_progress.set(); 2166 } else { 2167 _concurrent_weak_root_in_progress.unset(); 2168 } 2169 } 2170 2171 void ShenandoahHeap::ref_processing_init() { 2172 assert(_max_workers > 0, "Sanity"); 2173 2174 _ref_processor = 2175 new ReferenceProcessor(&_subject_to_discovery, // is_subject_to_discovery 2176 _ref_proc_mt_processing, // MT processing 2177 _max_workers, // Degree of MT processing 2178 _ref_proc_mt_discovery, // MT discovery 2179 _max_workers, // Degree of MT discovery 2180 false, // Reference discovery is not atomic 2181 NULL, // No closure, should be installed before use 2182 true); // Scale worker threads 2183 2184 shenandoah_assert_rp_isalive_not_installed(); 2185 } 2186 2187 GCTracer* ShenandoahHeap::tracer() { 2188 return shenandoah_policy()->tracer(); 2189 } 2190 2191 size_t ShenandoahHeap::tlab_used(Thread* thread) const { 2192 return _free_set->used(); 2193 } 2194 2195 bool ShenandoahHeap::try_cancel_gc() { 2196 while (true) { 2197 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE); 2198 if (prev == CANCELLABLE) return true; 2199 else if (prev == CANCELLED) return false; 2200 assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers"); 2201 assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED"); 2202 if (Thread::current()->is_Java_thread()) { 2203 // We need to provide a safepoint here, otherwise we might 2204 // spin forever if a SP is pending. 2205 ThreadBlockInVM sp(JavaThread::current()); 2206 SpinPause(); 2207 } 2208 } 2209 } 2210 2211 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) { 2212 if (try_cancel_gc()) { 2213 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause)); 2214 log_info(gc)("%s", msg.buffer()); 2215 Events::log(Thread::current(), "%s", msg.buffer()); 2216 } 2217 } 2218 2219 uint ShenandoahHeap::max_workers() { 2220 return _max_workers; 2221 } 2222 2223 void ShenandoahHeap::stop() { 2224 // The shutdown sequence should be able to terminate when GC is running. 2225 2226 // Step 0. Notify policy to disable event recording. 2227 _shenandoah_policy->record_shutdown(); 2228 2229 // Step 1. Notify control thread that we are in shutdown. 2230 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown. 2231 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below. 2232 control_thread()->prepare_for_graceful_shutdown(); 2233 2234 // Step 2. Notify GC workers that we are cancelling GC. 2235 cancel_gc(GCCause::_shenandoah_stop_vm); 2236 2237 // Step 3. Wait until GC worker exits normally. 2238 control_thread()->stop(); 2239 2240 // Step 4. Stop String Dedup thread if it is active 2241 if (ShenandoahStringDedup::is_enabled()) { 2242 ShenandoahStringDedup::stop(); 2243 } 2244 } 2245 2246 void ShenandoahHeap::stw_unload_classes(bool full_gc) { 2247 if (!unload_classes()) return; 2248 2249 // Unload classes and purge SystemDictionary. 2250 { 2251 ShenandoahGCPhase phase(full_gc ? 2252 ShenandoahPhaseTimings::full_gc_purge_class_unload : 2253 ShenandoahPhaseTimings::purge_class_unload); 2254 bool purged_class = SystemDictionary::do_unloading(gc_timer()); 2255 2256 ShenandoahIsAliveSelector is_alive; 2257 uint num_workers = _workers->active_workers(); 2258 ShenandoahClassUnloadingTask unlink_task(is_alive.is_alive_closure(), num_workers, purged_class); 2259 _workers->run_task(&unlink_task); 2260 } 2261 2262 { 2263 ShenandoahGCPhase phase(full_gc ? 2264 ShenandoahPhaseTimings::full_gc_purge_cldg : 2265 ShenandoahPhaseTimings::purge_cldg); 2266 ClassLoaderDataGraph::purge(); 2267 } 2268 // Resize and verify metaspace 2269 MetaspaceGC::compute_new_size(); 2270 MetaspaceUtils::verify_metrics(); 2271 } 2272 2273 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs), 2274 // so they should not have forwarded oops. 2275 // However, we do need to "null" dead oops in the roots, if can not be done 2276 // in concurrent cycles. 2277 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) { 2278 ShenandoahGCPhase root_phase(full_gc ? 2279 ShenandoahPhaseTimings::full_gc_purge : 2280 ShenandoahPhaseTimings::purge); 2281 uint num_workers = _workers->active_workers(); 2282 ShenandoahPhaseTimings::Phase timing_phase = full_gc ? 2283 ShenandoahPhaseTimings::full_gc_purge_weak_par : 2284 ShenandoahPhaseTimings::purge_weak_par; 2285 ShenandoahGCPhase phase(timing_phase); 2286 ShenandoahGCWorkerPhase worker_phase(timing_phase); 2287 2288 // Cleanup weak roots 2289 if (has_forwarded_objects()) { 2290 ShenandoahForwardedIsAliveClosure is_alive; 2291 ShenandoahUpdateRefsClosure keep_alive; 2292 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure> 2293 cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers, !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()); 2294 _workers->run_task(&cleaning_task); 2295 } else { 2296 ShenandoahIsAliveClosure is_alive; 2297 #ifdef ASSERT 2298 ShenandoahAssertNotForwardedClosure verify_cl; 2299 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure> 2300 cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers, !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()); 2301 #else 2302 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure> 2303 cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers, !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()); 2304 #endif 2305 _workers->run_task(&cleaning_task); 2306 } 2307 } 2308 2309 void ShenandoahHeap::parallel_cleaning(bool full_gc) { 2310 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); 2311 stw_process_weak_roots(full_gc); 2312 if (!ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) { 2313 stw_unload_classes(full_gc); 2314 } 2315 } 2316 2317 void ShenandoahHeap::set_has_forwarded_objects(bool cond) { 2318 set_gc_state_mask(HAS_FORWARDED, cond); 2319 } 2320 2321 void ShenandoahHeap::set_process_references(bool pr) { 2322 _process_references.set_cond(pr); 2323 } 2324 2325 void ShenandoahHeap::set_unload_classes(bool uc) { 2326 _unload_classes.set_cond(uc); 2327 } 2328 2329 bool ShenandoahHeap::process_references() const { 2330 return _process_references.is_set(); 2331 } 2332 2333 bool ShenandoahHeap::unload_classes() const { 2334 return _unload_classes.is_set(); 2335 } 2336 2337 address ShenandoahHeap::in_cset_fast_test_addr() { 2338 ShenandoahHeap* heap = ShenandoahHeap::heap(); 2339 assert(heap->collection_set() != NULL, "Sanity"); 2340 return (address) heap->collection_set()->biased_map_address(); 2341 } 2342 2343 address ShenandoahHeap::cancelled_gc_addr() { 2344 return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of(); 2345 } 2346 2347 address ShenandoahHeap::gc_state_addr() { 2348 return (address) ShenandoahHeap::heap()->_gc_state.addr_of(); 2349 } 2350 2351 size_t ShenandoahHeap::bytes_allocated_since_gc_start() { 2352 return Atomic::load_acquire(&_bytes_allocated_since_gc_start); 2353 } 2354 2355 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() { 2356 Atomic::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0); 2357 } 2358 2359 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) { 2360 _degenerated_gc_in_progress.set_cond(in_progress); 2361 } 2362 2363 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) { 2364 _full_gc_in_progress.set_cond(in_progress); 2365 } 2366 2367 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) { 2368 assert (is_full_gc_in_progress(), "should be"); 2369 _full_gc_move_in_progress.set_cond(in_progress); 2370 } 2371 2372 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) { 2373 set_gc_state_mask(UPDATEREFS, in_progress); 2374 } 2375 2376 void ShenandoahHeap::register_nmethod(nmethod* nm) { 2377 ShenandoahCodeRoots::register_nmethod(nm); 2378 } 2379 2380 void ShenandoahHeap::unregister_nmethod(nmethod* nm) { 2381 ShenandoahCodeRoots::unregister_nmethod(nm); 2382 } 2383 2384 void ShenandoahHeap::flush_nmethod(nmethod* nm) { 2385 ShenandoahCodeRoots::flush_nmethod(nm); 2386 } 2387 2388 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) { 2389 heap_region_containing(o)->record_pin(); 2390 return o; 2391 } 2392 2393 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) { 2394 heap_region_containing(o)->record_unpin(); 2395 } 2396 2397 void ShenandoahHeap::sync_pinned_region_status() { 2398 ShenandoahHeapLocker locker(lock()); 2399 2400 for (size_t i = 0; i < num_regions(); i++) { 2401 ShenandoahHeapRegion *r = get_region(i); 2402 if (r->is_active()) { 2403 if (r->is_pinned()) { 2404 if (r->pin_count() == 0) { 2405 r->make_unpinned(); 2406 } 2407 } else { 2408 if (r->pin_count() > 0) { 2409 r->make_pinned(); 2410 } 2411 } 2412 } 2413 } 2414 2415 assert_pinned_region_status(); 2416 } 2417 2418 #ifdef ASSERT 2419 void ShenandoahHeap::assert_pinned_region_status() { 2420 for (size_t i = 0; i < num_regions(); i++) { 2421 ShenandoahHeapRegion* r = get_region(i); 2422 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0), 2423 "Region " SIZE_FORMAT " pinning status is inconsistent", i); 2424 } 2425 } 2426 #endif 2427 2428 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const { 2429 return _gc_timer; 2430 } 2431 2432 void ShenandoahHeap::prepare_concurrent_roots() { 2433 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); 2434 if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) { 2435 set_concurrent_strong_root_in_progress(!collection_set()->is_empty()); 2436 set_concurrent_weak_root_in_progress(true); 2437 } 2438 } 2439 2440 void ShenandoahHeap::prepare_concurrent_unloading() { 2441 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); 2442 if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) { 2443 _unloader.prepare(); 2444 } 2445 } 2446 2447 void ShenandoahHeap::finish_concurrent_unloading() { 2448 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); 2449 if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) { 2450 _unloader.finish(); 2451 } 2452 } 2453 2454 #ifdef ASSERT 2455 void ShenandoahHeap::assert_gc_workers(uint nworkers) { 2456 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity"); 2457 2458 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) { 2459 if (UseDynamicNumberOfGCThreads) { 2460 assert(nworkers <= ParallelGCThreads, "Cannot use more than it has"); 2461 } else { 2462 // Use ParallelGCThreads inside safepoints 2463 assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints"); 2464 } 2465 } else { 2466 if (UseDynamicNumberOfGCThreads) { 2467 assert(nworkers <= ConcGCThreads, "Cannot use more than it has"); 2468 } else { 2469 // Use ConcGCThreads outside safepoints 2470 assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints"); 2471 } 2472 } 2473 } 2474 #endif 2475 2476 ShenandoahVerifier* ShenandoahHeap::verifier() { 2477 guarantee(ShenandoahVerify, "Should be enabled"); 2478 assert (_verifier != NULL, "sanity"); 2479 return _verifier; 2480 } 2481 2482 template<class T> 2483 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask { 2484 private: 2485 T cl; 2486 ShenandoahHeap* _heap; 2487 ShenandoahRegionIterator* _regions; 2488 bool _concurrent; 2489 public: 2490 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) : 2491 AbstractGangTask("Concurrent Update References Task"), 2492 cl(T()), 2493 _heap(ShenandoahHeap::heap()), 2494 _regions(regions), 2495 _concurrent(concurrent) { 2496 } 2497 2498 void work(uint worker_id) { 2499 if (_concurrent) { 2500 ShenandoahConcurrentWorkerSession worker_session(worker_id); 2501 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); 2502 do_work(); 2503 } else { 2504 ShenandoahParallelWorkerSession worker_session(worker_id); 2505 do_work(); 2506 } 2507 } 2508 2509 private: 2510 void do_work() { 2511 ShenandoahHeapRegion* r = _regions->next(); 2512 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context(); 2513 while (r != NULL) { 2514 HeapWord* update_watermark = r->get_update_watermark(); 2515 assert (update_watermark >= r->bottom(), "sanity"); 2516 if (r->is_active() && !r->is_cset()) { 2517 _heap->marked_object_oop_iterate(r, &cl, update_watermark); 2518 } 2519 if (ShenandoahPacing) { 2520 _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom())); 2521 } 2522 if (_heap->check_cancelled_gc_and_yield(_concurrent)) { 2523 return; 2524 } 2525 r = _regions->next(); 2526 } 2527 } 2528 }; 2529 2530 void ShenandoahHeap::update_heap_references(bool concurrent) { 2531 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent); 2532 workers()->run_task(&task); 2533 } 2534 2535 void ShenandoahHeap::op_init_updaterefs() { 2536 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); 2537 2538 set_evacuation_in_progress(false); 2539 2540 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to 2541 // make them parsable for update code to work correctly. Plus, we can compute new sizes 2542 // for future GCLABs here. 2543 if (UseTLAB) { 2544 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_manage_gclabs); 2545 gclabs_retire(ResizeTLAB); 2546 } 2547 2548 if (ShenandoahVerify) { 2549 if (!is_degenerated_gc_in_progress()) { 2550 verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots); 2551 } 2552 verifier()->verify_before_updaterefs(); 2553 } 2554 2555 set_update_refs_in_progress(true); 2556 2557 _update_refs_iterator.reset(); 2558 2559 if (ShenandoahPacing) { 2560 pacer()->setup_for_updaterefs(); 2561 } 2562 } 2563 2564 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { 2565 private: 2566 ShenandoahHeapLock* const _lock; 2567 2568 public: 2569 ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {} 2570 2571 void heap_region_do(ShenandoahHeapRegion* r) { 2572 // Drop unnecessary "pinned" state from regions that does not have CP marks 2573 // anymore, as this would allow trashing them. 2574 2575 if (r->is_active()) { 2576 if (r->is_pinned()) { 2577 if (r->pin_count() == 0) { 2578 ShenandoahHeapLocker locker(_lock); 2579 r->make_unpinned(); 2580 } 2581 } else { 2582 if (r->pin_count() > 0) { 2583 ShenandoahHeapLocker locker(_lock); 2584 r->make_pinned(); 2585 } 2586 } 2587 } 2588 } 2589 2590 bool is_thread_safe() { return true; } 2591 }; 2592 2593 void ShenandoahHeap::op_final_updaterefs() { 2594 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); 2595 2596 finish_concurrent_unloading(); 2597 2598 // Check if there is left-over work, and finish it 2599 if (_update_refs_iterator.has_next()) { 2600 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work); 2601 2602 // Finish updating references where we left off. 2603 clear_cancelled_gc(); 2604 update_heap_references(false); 2605 } 2606 2607 // Clear cancelled GC, if set. On cancellation path, the block before would handle 2608 // everything. On degenerated paths, cancelled gc would not be set anyway. 2609 if (cancelled_gc()) { 2610 clear_cancelled_gc(); 2611 } 2612 assert(!cancelled_gc(), "Should have been done right before"); 2613 2614 if (ShenandoahVerify && !is_degenerated_gc_in_progress()) { 2615 verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots); 2616 } 2617 2618 if (is_degenerated_gc_in_progress()) { 2619 concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots); 2620 } else { 2621 concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_update_refs_roots); 2622 } 2623 2624 // Has to be done before cset is clear 2625 if (ShenandoahVerify) { 2626 verifier()->verify_roots_in_to_space(); 2627 } 2628 2629 { 2630 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_update_region_states); 2631 ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl; 2632 parallel_heap_region_iterate(&cl); 2633 2634 assert_pinned_region_status(); 2635 } 2636 2637 { 2638 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_trash_cset); 2639 trash_cset_regions(); 2640 } 2641 2642 set_has_forwarded_objects(false); 2643 set_update_refs_in_progress(false); 2644 2645 if (ShenandoahVerify) { 2646 verifier()->verify_after_updaterefs(); 2647 } 2648 2649 if (VerifyAfterGC) { 2650 Universe::verify(); 2651 } 2652 2653 { 2654 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_rebuild_freeset); 2655 ShenandoahHeapLocker locker(lock()); 2656 _free_set->rebuild(); 2657 } 2658 } 2659 2660 void ShenandoahHeap::print_extended_on(outputStream *st) const { 2661 print_on(st); 2662 print_heap_regions_on(st); 2663 } 2664 2665 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) { 2666 size_t slice = r->index() / _bitmap_regions_per_slice; 2667 2668 size_t regions_from = _bitmap_regions_per_slice * slice; 2669 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1)); 2670 for (size_t g = regions_from; g < regions_to; g++) { 2671 assert (g / _bitmap_regions_per_slice == slice, "same slice"); 2672 if (skip_self && g == r->index()) continue; 2673 if (get_region(g)->is_committed()) { 2674 return true; 2675 } 2676 } 2677 return false; 2678 } 2679 2680 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) { 2681 shenandoah_assert_heaplocked(); 2682 2683 // Bitmaps in special regions do not need commits 2684 if (_bitmap_region_special) { 2685 return true; 2686 } 2687 2688 if (is_bitmap_slice_committed(r, true)) { 2689 // Some other region from the group is already committed, meaning the bitmap 2690 // slice is already committed, we exit right away. 2691 return true; 2692 } 2693 2694 // Commit the bitmap slice: 2695 size_t slice = r->index() / _bitmap_regions_per_slice; 2696 size_t off = _bitmap_bytes_per_slice * slice; 2697 size_t len = _bitmap_bytes_per_slice; 2698 char* start = (char*) _bitmap_region.start() + off; 2699 2700 if (!os::commit_memory(start, len, false)) { 2701 return false; 2702 } 2703 2704 if (AlwaysPreTouch) { 2705 os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size); 2706 } 2707 2708 return true; 2709 } 2710 2711 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) { 2712 shenandoah_assert_heaplocked(); 2713 2714 // Bitmaps in special regions do not need uncommits 2715 if (_bitmap_region_special) { 2716 return true; 2717 } 2718 2719 if (is_bitmap_slice_committed(r, true)) { 2720 // Some other region from the group is still committed, meaning the bitmap 2721 // slice is should stay committed, exit right away. 2722 return true; 2723 } 2724 2725 // Uncommit the bitmap slice: 2726 size_t slice = r->index() / _bitmap_regions_per_slice; 2727 size_t off = _bitmap_bytes_per_slice * slice; 2728 size_t len = _bitmap_bytes_per_slice; 2729 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) { 2730 return false; 2731 } 2732 return true; 2733 } 2734 2735 void ShenandoahHeap::safepoint_synchronize_begin() { 2736 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) { 2737 SuspendibleThreadSet::synchronize(); 2738 } 2739 } 2740 2741 void ShenandoahHeap::safepoint_synchronize_end() { 2742 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) { 2743 SuspendibleThreadSet::desynchronize(); 2744 } 2745 } 2746 2747 void ShenandoahHeap::vmop_entry_init_mark() { 2748 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2749 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross); 2750 2751 try_inject_alloc_failure(); 2752 VM_ShenandoahInitMark op; 2753 VMThread::execute(&op); // jump to entry_init_mark() under safepoint 2754 } 2755 2756 void ShenandoahHeap::vmop_entry_final_mark() { 2757 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2758 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross); 2759 2760 try_inject_alloc_failure(); 2761 VM_ShenandoahFinalMarkStartEvac op; 2762 VMThread::execute(&op); // jump to entry_final_mark under safepoint 2763 } 2764 2765 void ShenandoahHeap::vmop_entry_init_updaterefs() { 2766 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2767 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross); 2768 2769 try_inject_alloc_failure(); 2770 VM_ShenandoahInitUpdateRefs op; 2771 VMThread::execute(&op); 2772 } 2773 2774 void ShenandoahHeap::vmop_entry_final_updaterefs() { 2775 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); 2776 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross); 2777 2778 try_inject_alloc_failure(); 2779 VM_ShenandoahFinalUpdateRefs op; 2780 VMThread::execute(&op); 2781 } 2782 2783 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) { 2784 TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters()); 2785 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross); 2786 2787 try_inject_alloc_failure(); 2788 VM_ShenandoahFullGC op(cause); 2789 VMThread::execute(&op); 2790 } 2791 2792 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) { 2793 TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters()); 2794 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross); 2795 2796 VM_ShenandoahDegeneratedGC degenerated_gc((int)point); 2797 VMThread::execute(°enerated_gc); 2798 } 2799 2800 void ShenandoahHeap::entry_init_mark() { 2801 const char* msg = init_mark_event_message(); 2802 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark); 2803 EventMark em("%s", msg); 2804 2805 ShenandoahWorkerScope scope(workers(), 2806 ShenandoahWorkerPolicy::calc_workers_for_init_marking(), 2807 "init marking"); 2808 2809 op_init_mark(); 2810 } 2811 2812 void ShenandoahHeap::entry_final_mark() { 2813 const char* msg = final_mark_event_message(); 2814 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark); 2815 EventMark em("%s", msg); 2816 2817 ShenandoahWorkerScope scope(workers(), 2818 ShenandoahWorkerPolicy::calc_workers_for_final_marking(), 2819 "final marking"); 2820 2821 op_final_mark(); 2822 } 2823 2824 void ShenandoahHeap::entry_init_updaterefs() { 2825 static const char* msg = "Pause Init Update Refs"; 2826 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs); 2827 EventMark em("%s", msg); 2828 2829 // No workers used in this phase, no setup required 2830 2831 op_init_updaterefs(); 2832 } 2833 2834 void ShenandoahHeap::entry_final_updaterefs() { 2835 static const char* msg = "Pause Final Update Refs"; 2836 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs); 2837 EventMark em("%s", msg); 2838 2839 ShenandoahWorkerScope scope(workers(), 2840 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(), 2841 "final reference update"); 2842 2843 op_final_updaterefs(); 2844 } 2845 2846 void ShenandoahHeap::entry_full(GCCause::Cause cause) { 2847 static const char* msg = "Pause Full"; 2848 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */); 2849 EventMark em("%s", msg); 2850 2851 ShenandoahWorkerScope scope(workers(), 2852 ShenandoahWorkerPolicy::calc_workers_for_fullgc(), 2853 "full gc"); 2854 2855 op_full(cause); 2856 } 2857 2858 void ShenandoahHeap::entry_degenerated(int point) { 2859 ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point; 2860 const char* msg = degen_event_message(dpoint); 2861 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */); 2862 EventMark em("%s", msg); 2863 2864 ShenandoahWorkerScope scope(workers(), 2865 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(), 2866 "stw degenerated gc"); 2867 2868 set_degenerated_gc_in_progress(true); 2869 op_degenerated(dpoint); 2870 set_degenerated_gc_in_progress(false); 2871 } 2872 2873 void ShenandoahHeap::entry_mark() { 2874 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); 2875 2876 const char* msg = conc_mark_event_message(); 2877 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark); 2878 EventMark em("%s", msg); 2879 2880 ShenandoahWorkerScope scope(workers(), 2881 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 2882 "concurrent marking"); 2883 2884 try_inject_alloc_failure(); 2885 op_mark(); 2886 } 2887 2888 void ShenandoahHeap::entry_evac() { 2889 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); 2890 2891 static const char* msg = "Concurrent evacuation"; 2892 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac); 2893 EventMark em("%s", msg); 2894 2895 ShenandoahWorkerScope scope(workers(), 2896 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), 2897 "concurrent evacuation"); 2898 2899 try_inject_alloc_failure(); 2900 op_conc_evac(); 2901 } 2902 2903 void ShenandoahHeap::entry_updaterefs() { 2904 static const char* msg = "Concurrent update references"; 2905 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs); 2906 EventMark em("%s", msg); 2907 2908 ShenandoahWorkerScope scope(workers(), 2909 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(), 2910 "concurrent reference update"); 2911 2912 try_inject_alloc_failure(); 2913 op_updaterefs(); 2914 } 2915 2916 void ShenandoahHeap::entry_weak_roots() { 2917 static const char* msg = "Concurrent weak roots"; 2918 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots); 2919 EventMark em("%s", msg); 2920 2921 ShenandoahWorkerScope scope(workers(), 2922 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 2923 "concurrent weak root"); 2924 2925 try_inject_alloc_failure(); 2926 op_weak_roots(); 2927 } 2928 2929 void ShenandoahHeap::entry_class_unloading() { 2930 static const char* msg = "Concurrent class unloading"; 2931 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload); 2932 EventMark em("%s", msg); 2933 2934 ShenandoahWorkerScope scope(workers(), 2935 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 2936 "concurrent class unloading"); 2937 2938 try_inject_alloc_failure(); 2939 op_class_unloading(); 2940 } 2941 2942 void ShenandoahHeap::entry_strong_roots() { 2943 static const char* msg = "Concurrent strong roots"; 2944 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots); 2945 EventMark em("%s", msg); 2946 2947 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots); 2948 2949 ShenandoahWorkerScope scope(workers(), 2950 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 2951 "concurrent strong root"); 2952 2953 try_inject_alloc_failure(); 2954 op_strong_roots(); 2955 } 2956 2957 void ShenandoahHeap::entry_cleanup_early() { 2958 static const char* msg = "Concurrent cleanup"; 2959 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */); 2960 EventMark em("%s", msg); 2961 2962 // This phase does not use workers, no need for setup 2963 2964 try_inject_alloc_failure(); 2965 op_cleanup_early(); 2966 } 2967 2968 void ShenandoahHeap::entry_cleanup_complete() { 2969 static const char* msg = "Concurrent cleanup"; 2970 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */); 2971 EventMark em("%s", msg); 2972 2973 // This phase does not use workers, no need for setup 2974 2975 try_inject_alloc_failure(); 2976 op_cleanup_complete(); 2977 } 2978 2979 void ShenandoahHeap::entry_reset() { 2980 static const char* msg = "Concurrent reset"; 2981 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset); 2982 EventMark em("%s", msg); 2983 2984 ShenandoahWorkerScope scope(workers(), 2985 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), 2986 "concurrent reset"); 2987 2988 try_inject_alloc_failure(); 2989 op_reset(); 2990 } 2991 2992 void ShenandoahHeap::entry_preclean() { 2993 if (ShenandoahPreclean && process_references()) { 2994 static const char* msg = "Concurrent precleaning"; 2995 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_preclean); 2996 EventMark em("%s", msg); 2997 2998 ShenandoahWorkerScope scope(workers(), 2999 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(), 3000 "concurrent preclean", 3001 /* check_workers = */ false); 3002 3003 try_inject_alloc_failure(); 3004 op_preclean(); 3005 } 3006 } 3007 3008 void ShenandoahHeap::entry_uncommit(double shrink_before) { 3009 static const char *msg = "Concurrent uncommit"; 3010 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */); 3011 EventMark em("%s", msg); 3012 3013 op_uncommit(shrink_before); 3014 } 3015 3016 void ShenandoahHeap::try_inject_alloc_failure() { 3017 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) { 3018 _inject_alloc_failure.set(); 3019 os::naked_short_sleep(1); 3020 if (cancelled_gc()) { 3021 log_info(gc)("Allocation failure was successfully injected"); 3022 } 3023 } 3024 } 3025 3026 bool ShenandoahHeap::should_inject_alloc_failure() { 3027 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset(); 3028 } 3029 3030 void ShenandoahHeap::initialize_serviceability() { 3031 _memory_pool = new ShenandoahMemoryPool(this); 3032 _cycle_memory_manager.add_pool(_memory_pool); 3033 _stw_memory_manager.add_pool(_memory_pool); 3034 } 3035 3036 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() { 3037 GrowableArray<GCMemoryManager*> memory_managers(2); 3038 memory_managers.append(&_cycle_memory_manager); 3039 memory_managers.append(&_stw_memory_manager); 3040 return memory_managers; 3041 } 3042 3043 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() { 3044 GrowableArray<MemoryPool*> memory_pools(1); 3045 memory_pools.append(_memory_pool); 3046 return memory_pools; 3047 } 3048 3049 MemoryUsage ShenandoahHeap::memory_usage() { 3050 return _memory_pool->get_memory_usage(); 3051 } 3052 3053 ShenandoahRegionIterator::ShenandoahRegionIterator() : 3054 _heap(ShenandoahHeap::heap()), 3055 _index(0) {} 3056 3057 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) : 3058 _heap(heap), 3059 _index(0) {} 3060 3061 void ShenandoahRegionIterator::reset() { 3062 _index = 0; 3063 } 3064 3065 bool ShenandoahRegionIterator::has_next() const { 3066 return _index < _heap->num_regions(); 3067 } 3068 3069 char ShenandoahHeap::gc_state() const { 3070 return _gc_state.raw_value(); 3071 } 3072 3073 void ShenandoahHeap::deduplicate_string(oop str) { 3074 assert(java_lang_String::is_instance(str), "invariant"); 3075 3076 if (ShenandoahStringDedup::is_enabled()) { 3077 ShenandoahStringDedup::deduplicate(str); 3078 } 3079 } 3080 3081 const char* ShenandoahHeap::init_mark_event_message() const { 3082 assert(!has_forwarded_objects(), "Should not have forwarded objects here"); 3083 3084 bool proc_refs = process_references(); 3085 bool unload_cls = unload_classes(); 3086 3087 if (proc_refs && unload_cls) { 3088 return "Pause Init Mark (process weakrefs) (unload classes)"; 3089 } else if (proc_refs) { 3090 return "Pause Init Mark (process weakrefs)"; 3091 } else if (unload_cls) { 3092 return "Pause Init Mark (unload classes)"; 3093 } else { 3094 return "Pause Init Mark"; 3095 } 3096 } 3097 3098 const char* ShenandoahHeap::final_mark_event_message() const { 3099 assert(!has_forwarded_objects(), "Should not have forwarded objects here"); 3100 3101 bool proc_refs = process_references(); 3102 bool unload_cls = unload_classes(); 3103 3104 if (proc_refs && unload_cls) { 3105 return "Pause Final Mark (process weakrefs) (unload classes)"; 3106 } else if (proc_refs) { 3107 return "Pause Final Mark (process weakrefs)"; 3108 } else if (unload_cls) { 3109 return "Pause Final Mark (unload classes)"; 3110 } else { 3111 return "Pause Final Mark"; 3112 } 3113 } 3114 3115 const char* ShenandoahHeap::conc_mark_event_message() const { 3116 assert(!has_forwarded_objects(), "Should not have forwarded objects here"); 3117 3118 bool proc_refs = process_references(); 3119 bool unload_cls = unload_classes(); 3120 3121 if (proc_refs && unload_cls) { 3122 return "Concurrent marking (process weakrefs) (unload classes)"; 3123 } else if (proc_refs) { 3124 return "Concurrent marking (process weakrefs)"; 3125 } else if (unload_cls) { 3126 return "Concurrent marking (unload classes)"; 3127 } else { 3128 return "Concurrent marking"; 3129 } 3130 } 3131 3132 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const { 3133 switch (point) { 3134 case _degenerated_unset: 3135 return "Pause Degenerated GC (<UNSET>)"; 3136 case _degenerated_outside_cycle: 3137 return "Pause Degenerated GC (Outside of Cycle)"; 3138 case _degenerated_mark: 3139 return "Pause Degenerated GC (Mark)"; 3140 case _degenerated_evac: 3141 return "Pause Degenerated GC (Evacuation)"; 3142 case _degenerated_updaterefs: 3143 return "Pause Degenerated GC (Update Refs)"; 3144 default: 3145 ShouldNotReachHere(); 3146 return "ERROR"; 3147 } 3148 } 3149 3150 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) { 3151 #ifdef ASSERT 3152 assert(_liveness_cache != NULL, "sanity"); 3153 assert(worker_id < _max_workers, "sanity"); 3154 for (uint i = 0; i < num_regions(); i++) { 3155 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty"); 3156 } 3157 #endif 3158 return _liveness_cache[worker_id]; 3159 } 3160 3161 void ShenandoahHeap::flush_liveness_cache(uint worker_id) { 3162 assert(worker_id < _max_workers, "sanity"); 3163 assert(_liveness_cache != NULL, "sanity"); 3164 ShenandoahLiveData* ld = _liveness_cache[worker_id]; 3165 for (uint i = 0; i < num_regions(); i++) { 3166 ShenandoahLiveData live = ld[i]; 3167 if (live > 0) { 3168 ShenandoahHeapRegion* r = get_region(i); 3169 r->increase_live_data_gc_words(live); 3170 ld[i] = 0; 3171 } 3172 } 3173 }