1 /* 2 * Copyright (c) 2013, 2017, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "memory/allocation.hpp" 26 27 #include "gc/shared/gcTimer.hpp" 28 #include "gc/shared/gcTraceTime.inline.hpp" 29 #include "gc/shared/parallelCleaning.hpp" 30 31 #include "gc/shenandoah/brooksPointer.hpp" 32 #include "gc/shenandoah/shenandoahAllocTracker.hpp" 33 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 34 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 36 #include "gc/shenandoah/shenandoahConcurrentMark.hpp" 37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" 38 #include "gc/shenandoah/shenandoahConcurrentThread.hpp" 39 #include "gc/shenandoah/shenandoahFreeSet.hpp" 40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 41 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 42 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 43 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 44 #include "gc/shenandoah/shenandoahMarkCompact.hpp" 45 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 47 #include "gc/shenandoah/shenandoahPartialGC.hpp" 48 #include "gc/shenandoah/shenandoahRootProcessor.hpp" 49 #include "gc/shenandoah/shenandoahStringDedup.hpp" 50 #include "gc/shenandoah/shenandoahUtils.hpp" 51 #include "gc/shenandoah/shenandoahVerifier.hpp" 52 #include "gc/shenandoah/shenandoahCodeRoots.hpp" 53 #include "gc/shenandoah/vm_operations_shenandoah.hpp" 54 55 #include "runtime/vmThread.hpp" 56 #include "services/mallocTracker.hpp" 57 58 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {} 59 60 #ifdef ASSERT 61 template <class T> 62 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) { 63 T o = oopDesc::load_heap_oop(p); 64 if (! oopDesc::is_null(o)) { 65 oop obj = oopDesc::decode_heap_oop_not_null(o); 66 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), 67 "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT, 68 p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p)); 69 } 70 } 71 72 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); } 73 void ShenandoahAssertToSpaceClosure::do_oop(oop* p) { do_oop_nv(p); } 74 #endif 75 76 const char* ShenandoahHeap::name() const { 77 return "Shenandoah"; 78 } 79 80 class ShenandoahPretouchTask : public AbstractGangTask { 81 private: 82 ShenandoahHeapRegionSet* _regions; 83 const size_t _bitmap_size; 84 const size_t _page_size; 85 char* _bitmap0_base; 86 char* _bitmap1_base; 87 public: 88 ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions, 89 char* bitmap0_base, char* bitmap1_base, size_t bitmap_size, 90 size_t page_size) : 91 AbstractGangTask("Shenandoah PreTouch", 92 Universe::is_fully_initialized() ? GCId::current_raw() : 93 // During VM initialization there is 94 // no GC cycle that this task can be 95 // associated with. 96 GCId::undefined()), 97 _bitmap0_base(bitmap0_base), 98 _bitmap1_base(bitmap1_base), 99 _regions(regions), 100 _bitmap_size(bitmap_size), 101 _page_size(page_size) { 102 _regions->clear_current_index(); 103 }; 104 105 virtual void work(uint worker_id) { 106 ShenandoahHeapRegion* r = _regions->claim_next(); 107 while (r != NULL) { 108 log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT, 109 r->region_number(), p2i(r->bottom()), p2i(r->end())); 110 os::pretouch_memory(r->bottom(), r->end(), _page_size); 111 112 size_t start = r->region_number() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); 113 size_t end = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); 114 assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size); 115 116 log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT, 117 r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end)); 118 os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size); 119 120 log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT, 121 r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end)); 122 os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size); 123 124 r = _regions->claim_next(); 125 } 126 } 127 }; 128 129 jint ShenandoahHeap::initialize() { 130 CollectedHeap::pre_initialize(); 131 132 BrooksPointer::initial_checks(); 133 134 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); 135 size_t max_byte_size = collector_policy()->max_heap_byte_size(); 136 size_t heap_alignment = collector_policy()->heap_alignment(); 137 138 if (ShenandoahAlwaysPreTouch) { 139 // Enabled pre-touch means the entire heap is committed right away. 140 init_byte_size = max_byte_size; 141 } 142 143 Universe::check_alignment(max_byte_size, 144 ShenandoahHeapRegion::region_size_bytes(), 145 "shenandoah heap"); 146 Universe::check_alignment(init_byte_size, 147 ShenandoahHeapRegion::region_size_bytes(), 148 "shenandoah heap"); 149 150 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, 151 heap_alignment); 152 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size())); 153 154 set_barrier_set(new ShenandoahBarrierSet(this)); 155 ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size); 156 157 _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes(); 158 size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes(); 159 _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes(); 160 _committed = _initial_size; 161 162 log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size); 163 if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) { 164 vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap"); 165 } 166 167 size_t reg_size_words = ShenandoahHeapRegion::region_size_words(); 168 169 _ordered_regions = new ShenandoahHeapRegionSet(_num_regions); 170 _free_regions = new ShenandoahFreeSet(_ordered_regions, _num_regions); 171 172 _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base()); 173 174 _next_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC); 175 _next_top_at_mark_starts = _next_top_at_mark_starts_base - 176 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift()); 177 178 _complete_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC); 179 _complete_top_at_mark_starts = _complete_top_at_mark_starts_base - 180 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift()); 181 182 { 183 ShenandoahHeapLocker locker(lock()); 184 for (size_t i = 0; i < _num_regions; i++) { 185 ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this, 186 (HeapWord*) pgc_rs.base() + reg_size_words * i, 187 reg_size_words, 188 i, 189 i < num_committed_regions); 190 191 _complete_top_at_mark_starts_base[i] = r->bottom(); 192 _next_top_at_mark_starts_base[i] = r->bottom(); 193 194 // Add to ordered regions first. 195 // We use the active size of ordered regions as the number of active regions in heap, 196 // free set and collection set use the number to assert the correctness of incoming regions. 197 _ordered_regions->add_region(r); 198 _free_regions->add_region(r); 199 assert(!collection_set()->is_in(i), "New region should not be in collection set"); 200 } 201 } 202 203 assert(_ordered_regions->active_regions() == _num_regions, "Must match"); 204 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0, 205 "misaligned heap: "PTR_FORMAT, p2i(base())); 206 207 LogTarget(Trace, gc, region) lt; 208 if (lt.is_enabled()) { 209 ResourceMark rm; 210 LogStream ls(lt); 211 log_trace(gc, region)("All Regions"); 212 _ordered_regions->print_on(&ls); 213 log_trace(gc, region)("Free Regions"); 214 _free_regions->print_on(&ls); 215 } 216 217 _recycled_regions = NEW_C_HEAP_ARRAY(size_t, _num_regions, mtGC); 218 219 // The call below uses stuff (the SATB* things) that are in G1, but probably 220 // belong into a shared location. 221 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, 222 SATB_Q_FL_lock, 223 20 /*G1SATBProcessCompletedThreshold */, 224 Shared_SATB_Q_lock); 225 226 // Reserve space for prev and next bitmap. 227 _bitmap_size = MarkBitMap::compute_size(heap_rs.size()); 228 _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize); 229 230 size_t bitmap_bytes_per_region = _bitmap_size / _num_regions; 231 _bitmap_words_per_region = bitmap_bytes_per_region / HeapWordSize; 232 233 guarantee(bitmap_bytes_per_region != 0, 234 "Bitmap bytes per region should not be zero"); 235 guarantee(is_power_of_2(bitmap_bytes_per_region), 236 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region); 237 guarantee((bitmap_bytes_per_region % os::vm_page_size()) == 0, 238 "Bitmap bytes per region should be page-granular: bpr = " SIZE_FORMAT ", page size = %d", 239 bitmap_bytes_per_region, os::vm_page_size()); 240 guarantee(is_power_of_2(_bitmap_words_per_region), 241 "Bitmap words per region Should be power of two: " SIZE_FORMAT, _bitmap_words_per_region); 242 243 size_t bitmap_page_size = UseLargePages && (bitmap_bytes_per_region >= (size_t)os::large_page_size()) ? 244 (size_t)os::large_page_size() : (size_t)os::vm_page_size(); 245 246 ReservedSpace bitmap0(_bitmap_size, bitmap_page_size); 247 MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC); 248 _bitmap0_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize); 249 250 ReservedSpace bitmap1(_bitmap_size, bitmap_page_size); 251 MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC); 252 _bitmap1_region = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize); 253 254 { 255 ShenandoahHeapLocker locker(lock()); 256 for (size_t i = 0; i < _num_regions; i++) { 257 ShenandoahHeapRegion* r = _ordered_regions->get(i); 258 if (r->is_committed()) { 259 commit_bitmaps(r); 260 } 261 } 262 } 263 264 size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); 265 266 if (ShenandoahVerify) { 267 ReservedSpace verify_bitmap(_bitmap_size, page_size); 268 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false, 269 "couldn't allocate verification bitmap"); 270 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC); 271 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize); 272 _verification_bit_map.initialize(_heap_region, verify_bitmap_region); 273 _verifier = new ShenandoahVerifier(this, &_verification_bit_map); 274 } 275 276 if (ShenandoahAlwaysPreTouch) { 277 assert (!AlwaysPreTouch, "Should have been overridden"); 278 279 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads, 280 // before initialize() below zeroes it with initializing thread. For any given region, 281 // we touch the region and the corresponding bitmaps from the same thread. 282 283 log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages", 284 _ordered_regions->count(), page_size); 285 ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), _bitmap_size, page_size); 286 _workers->run_task(&cl); 287 } 288 289 _mark_bit_map0.initialize(_heap_region, _bitmap0_region); 290 _complete_mark_bit_map = &_mark_bit_map0; 291 292 _mark_bit_map1.initialize(_heap_region, _bitmap1_region); 293 _next_mark_bit_map = &_mark_bit_map1; 294 295 if (UseShenandoahMatrix) { 296 _connection_matrix = new ShenandoahConnectionMatrix(_num_regions); 297 } else { 298 _connection_matrix = NULL; 299 } 300 301 _partial_gc = _shenandoah_policy->can_do_partial_gc() ? 302 new ShenandoahPartialGC(this, _num_regions) : 303 NULL; 304 305 _monitoring_support = new ShenandoahMonitoringSupport(this); 306 307 _phase_timings = new ShenandoahPhaseTimings(); 308 309 if (ShenandoahAllocationTrace) { 310 _alloc_tracker = new ShenandoahAllocTracker(); 311 } 312 313 ShenandoahStringDedup::initialize(); 314 315 _concurrent_gc_thread = new ShenandoahConcurrentThread(); 316 317 ShenandoahMarkCompact::initialize(); 318 319 ShenandoahCodeRoots::initialize(); 320 321 return JNI_OK; 322 } 323 324 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : 325 CollectedHeap(), 326 _shenandoah_policy(policy), 327 _concurrent_mark_in_progress(0), 328 _evacuation_in_progress(0), 329 _full_gc_in_progress(false), 330 _update_refs_in_progress(false), 331 _concurrent_partial_in_progress(false), 332 _free_regions(NULL), 333 _collection_set(NULL), 334 _bytes_allocated_since_cm(0), 335 _bytes_allocated_during_cm(0), 336 _allocated_last_gc(0), 337 _used_start_gc(0), 338 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)), 339 _ref_processor(NULL), 340 _next_top_at_mark_starts(NULL), 341 _next_top_at_mark_starts_base(NULL), 342 _complete_top_at_mark_starts(NULL), 343 _complete_top_at_mark_starts_base(NULL), 344 _mark_bit_map0(), 345 _mark_bit_map1(), 346 _connection_matrix(NULL), 347 _cancelled_concgc(0), 348 _need_update_refs(false), 349 _need_reset_bitmaps(false), 350 _verifier(NULL), 351 _heap_lock(0), 352 _used_at_last_gc(0), 353 _alloc_seq_at_last_gc_start(0), 354 _alloc_seq_at_last_gc_end(0), 355 _safepoint_workers(NULL), 356 #ifdef ASSERT 357 _heap_lock_owner(NULL), 358 _heap_expansion_count(0), 359 #endif 360 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 361 _phase_timings(NULL), 362 _alloc_tracker(NULL) 363 { 364 log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads); 365 log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads); 366 log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled)); 367 368 _scm = new ShenandoahConcurrentMark(); 369 _used = 0; 370 371 _max_workers = MAX2(_max_workers, 1U); 372 _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers, 373 /* are_GC_task_threads */true, 374 /* are_ConcurrentGC_threads */false); 375 if (_workers == NULL) { 376 vm_exit_during_initialization("Failed necessary allocation."); 377 } else { 378 _workers->initialize_workers(); 379 } 380 381 if (ParallelSafepointCleanupThreads > 1) { 382 _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread", 383 ParallelSafepointCleanupThreads, 384 false, false); 385 _safepoint_workers->initialize_workers(); 386 } 387 } 388 389 class ShenandoahResetNextBitmapTask : public AbstractGangTask { 390 private: 391 ShenandoahHeapRegionSet* _regions; 392 393 public: 394 ShenandoahResetNextBitmapTask(ShenandoahHeapRegionSet* regions) : 395 AbstractGangTask("Parallel Reset Bitmap Task"), 396 _regions(regions) { 397 _regions->clear_current_index(); 398 } 399 400 void work(uint worker_id) { 401 ShenandoahHeapRegion* region = _regions->claim_next(); 402 ShenandoahHeap* heap = ShenandoahHeap::heap(); 403 while (region != NULL) { 404 if (region->is_committed()) { 405 HeapWord* bottom = region->bottom(); 406 HeapWord* top = heap->next_top_at_mark_start(region->bottom()); 407 if (top > bottom) { 408 heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top)); 409 } 410 assert(heap->is_next_bitmap_clear_range(bottom, region->end()), "must be clear"); 411 } 412 region = _regions->claim_next(); 413 } 414 } 415 }; 416 417 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) { 418 assert_gc_workers(workers->active_workers()); 419 420 ShenandoahResetNextBitmapTask task = ShenandoahResetNextBitmapTask(_ordered_regions); 421 workers->run_task(&task); 422 } 423 424 class ShenandoahResetCompleteBitmapTask : public AbstractGangTask { 425 private: 426 ShenandoahHeapRegionSet* _regions; 427 428 public: 429 ShenandoahResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) : 430 AbstractGangTask("Parallel Reset Bitmap Task"), 431 _regions(regions) { 432 _regions->clear_current_index(); 433 } 434 435 void work(uint worker_id) { 436 ShenandoahHeapRegion* region = _regions->claim_next(); 437 ShenandoahHeap* heap = ShenandoahHeap::heap(); 438 while (region != NULL) { 439 if (region->is_committed()) { 440 HeapWord* bottom = region->bottom(); 441 HeapWord* top = heap->complete_top_at_mark_start(region->bottom()); 442 if (top > bottom) { 443 heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top)); 444 } 445 assert(heap->is_complete_bitmap_clear_range(bottom, region->end()), "must be clear"); 446 } 447 region = _regions->claim_next(); 448 } 449 } 450 }; 451 452 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) { 453 assert_gc_workers(workers->active_workers()); 454 455 ShenandoahResetCompleteBitmapTask task = ShenandoahResetCompleteBitmapTask(_ordered_regions); 456 workers->run_task(&task); 457 } 458 459 bool ShenandoahHeap::is_next_bitmap_clear() { 460 for (size_t idx = 0; idx < _num_regions; idx++) { 461 ShenandoahHeapRegion* r = _ordered_regions->get(idx); 462 if (r->is_committed() && !is_next_bitmap_clear_range(r->bottom(), r->end())) { 463 return false; 464 } 465 } 466 return true; 467 } 468 469 bool ShenandoahHeap::is_next_bitmap_clear_range(HeapWord* start, HeapWord* end) { 470 return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end; 471 } 472 473 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) { 474 return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end; 475 } 476 477 void ShenandoahHeap::print_on(outputStream* st) const { 478 st->print_cr("Shenandoah Heap"); 479 st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used", 480 capacity() / K, committed() / K, used() / K); 481 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions", 482 num_regions(), ShenandoahHeapRegion::region_size_bytes() / K); 483 484 st->print("Status: "); 485 if (concurrent_mark_in_progress()) { 486 st->print("marking "); 487 } else if (is_evacuation_in_progress()) { 488 st->print("evacuating "); 489 } else if (is_update_refs_in_progress()) { 490 st->print("updating refs "); 491 } else { 492 st->print("idle "); 493 } 494 if (cancelled_concgc()) { 495 st->print("cancelled "); 496 } 497 st->cr(); 498 499 st->print_cr("Reserved region:"); 500 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ", 501 p2i(reserved_region().start()), 502 p2i(reserved_region().end())); 503 504 if (UseShenandoahMatrix) { 505 st->print_cr("Matrix:"); 506 507 ShenandoahConnectionMatrix* matrix = connection_matrix(); 508 if (matrix != NULL) { 509 st->print_cr(" - base: " PTR_FORMAT, p2i(matrix->matrix_addr())); 510 st->print_cr(" - stride: " SIZE_FORMAT, matrix->stride()); 511 st->print_cr(" - magic: " PTR_FORMAT, matrix->magic_offset()); 512 } else { 513 st->print_cr(" No matrix."); 514 } 515 } 516 517 if (Verbose) { 518 print_heap_regions_on(st); 519 } 520 } 521 522 class ShenandoahInitGCLABClosure : public ThreadClosure { 523 public: 524 void do_thread(Thread* thread) { 525 thread->gclab().initialize(true); 526 } 527 }; 528 529 void ShenandoahHeap::post_initialize() { 530 if (UseTLAB) { 531 MutexLocker ml(Threads_lock); 532 533 ShenandoahInitGCLABClosure init_gclabs; 534 Threads::java_threads_do(&init_gclabs); 535 gc_threads_do(&init_gclabs); 536 537 // gclab can not be initialized early during VM startup, as it can not determinate its max_size. 538 // Now, we will let WorkGang to initialize gclab when new worker is created. 539 _workers->set_initialize_gclab(); 540 } 541 542 _scm->initialize(_max_workers); 543 544 ref_processing_init(); 545 546 _shenandoah_policy->post_heap_initialize(); 547 } 548 549 size_t ShenandoahHeap::used() const { 550 OrderAccess::acquire(); 551 return _used; 552 } 553 554 size_t ShenandoahHeap::committed() const { 555 OrderAccess::acquire(); 556 return _committed; 557 } 558 559 void ShenandoahHeap::increase_committed(size_t bytes) { 560 assert_heaplock_or_safepoint(); 561 _committed += bytes; 562 } 563 564 void ShenandoahHeap::decrease_committed(size_t bytes) { 565 assert_heaplock_or_safepoint(); 566 _committed -= bytes; 567 } 568 569 void ShenandoahHeap::increase_used(size_t bytes) { 570 assert_heaplock_or_safepoint(); 571 _used += bytes; 572 } 573 574 void ShenandoahHeap::set_used(size_t bytes) { 575 assert_heaplock_or_safepoint(); 576 _used = bytes; 577 } 578 579 void ShenandoahHeap::decrease_used(size_t bytes) { 580 assert_heaplock_or_safepoint(); 581 assert(_used >= bytes, "never decrease heap size by more than we've left"); 582 _used -= bytes; 583 } 584 585 size_t ShenandoahHeap::capacity() const { 586 return num_regions() * ShenandoahHeapRegion::region_size_bytes(); 587 } 588 589 bool ShenandoahHeap::is_maximal_no_gc() const { 590 Unimplemented(); 591 return true; 592 } 593 594 size_t ShenandoahHeap::max_capacity() const { 595 return _num_regions * ShenandoahHeapRegion::region_size_bytes(); 596 } 597 598 size_t ShenandoahHeap::initial_capacity() const { 599 return _initial_size; 600 } 601 602 bool ShenandoahHeap::is_in(const void* p) const { 603 HeapWord* heap_base = (HeapWord*) base(); 604 HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions(); 605 return p >= heap_base && p < last_region_end; 606 } 607 608 bool ShenandoahHeap::is_scavengable(const void* p) { 609 return true; 610 } 611 612 void ShenandoahHeap::handle_heap_shrinkage() { 613 ShenandoahHeapLocker locker(lock()); 614 615 ShenandoahHeapRegionSet* set = regions(); 616 617 size_t count = 0; 618 double current = os::elapsedTime(); 619 for (size_t i = 0; i < num_regions(); i++) { 620 ShenandoahHeapRegion* r = set->get(i); 621 if (r->is_empty_committed() && 622 (current - r->empty_time()) * 1000 > ShenandoahUncommitDelay && 623 r->make_empty_uncommitted()) { 624 count++; 625 } 626 } 627 628 if (count > 0) { 629 log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used", 630 count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M); 631 } 632 } 633 634 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) { 635 // Retain tlab and allocate object in shared space if 636 // the amount free in the tlab is too large to discard. 637 if (thread->gclab().free() > thread->gclab().refill_waste_limit()) { 638 thread->gclab().record_slow_allocation(size); 639 return NULL; 640 } 641 642 // Discard gclab and allocate a new one. 643 // To minimize fragmentation, the last GCLAB may be smaller than the rest. 644 size_t new_gclab_size = thread->gclab().compute_size(size); 645 646 thread->gclab().clear_before_allocation(); 647 648 if (new_gclab_size == 0) { 649 return NULL; 650 } 651 652 // Allocate a new GCLAB... 653 HeapWord* obj = allocate_new_gclab(new_gclab_size); 654 if (obj == NULL) { 655 return NULL; 656 } 657 658 if (ZeroTLAB) { 659 // ..and clear it. 660 Copy::zero_to_words(obj, new_gclab_size); 661 } else { 662 // ...and zap just allocated object. 663 #ifdef ASSERT 664 // Skip mangling the space corresponding to the object header to 665 // ensure that the returned space is not considered parsable by 666 // any concurrent GC thread. 667 size_t hdr_size = oopDesc::header_size(); 668 Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal); 669 #endif // ASSERT 670 } 671 thread->gclab().fill(obj, obj + size, new_gclab_size); 672 return obj; 673 } 674 675 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) { 676 #ifdef ASSERT 677 log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize); 678 #endif 679 return allocate_new_lab(word_size, _alloc_tlab); 680 } 681 682 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) { 683 #ifdef ASSERT 684 log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize); 685 #endif 686 return allocate_new_lab(word_size, _alloc_gclab); 687 } 688 689 HeapWord* ShenandoahHeap::allocate_new_lab(size_t word_size, AllocType type) { 690 HeapWord* result = allocate_memory(word_size, type); 691 692 if (result != NULL) { 693 assert(! in_collection_set(result), "Never allocate in collection set"); 694 _bytes_allocated_since_cm += word_size * HeapWordSize; 695 696 log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result)); 697 698 } 699 return result; 700 } 701 702 ShenandoahHeap* ShenandoahHeap::heap() { 703 CollectedHeap* heap = Universe::heap(); 704 assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()"); 705 assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap"); 706 return (ShenandoahHeap*) heap; 707 } 708 709 ShenandoahHeap* ShenandoahHeap::heap_no_check() { 710 CollectedHeap* heap = Universe::heap(); 711 return (ShenandoahHeap*) heap; 712 } 713 714 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, AllocType type) { 715 ShenandoahAllocTrace trace_alloc(word_size, type); 716 717 bool in_new_region = false; 718 HeapWord* result = allocate_memory_under_lock(word_size, type, in_new_region); 719 720 if (type == _alloc_tlab || type == _alloc_shared) { 721 // Allocation failed, try full-GC, then retry allocation. 722 // 723 // It might happen that one of the threads requesting allocation would unblock 724 // way later after full-GC happened, only to fail the second allocation, because 725 // other threads have already depleted the free storage. In this case, a better 726 // strategy would be to try full-GC again. 727 // 728 // Lacking the way to detect progress from "collect" call, we are left with blindly 729 // retrying for some bounded number of times. 730 // TODO: Poll if Full GC made enough progress to warrant retry. 731 int tries = 0; 732 while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) { 733 log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d", 734 p2i(Thread::current()), word_size * HeapWordSize, tries); 735 collect(GCCause::_allocation_failure); 736 result = allocate_memory_under_lock(word_size, type, in_new_region); 737 } 738 } 739 740 if (in_new_region) { 741 // Update monitoring counters when we took a new region. This amortizes the 742 // update costs on slow path. 743 concurrent_thread()->trigger_counters_update(); 744 } 745 746 log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ", 747 word_size, p2i(result), Thread::current()->osthread()->thread_id()); 748 749 return result; 750 } 751 752 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size, AllocType type, bool& in_new_region) { 753 ShenandoahHeapLocker locker(lock()); 754 return _free_regions->allocate(word_size, type, in_new_region); 755 } 756 757 HeapWord* ShenandoahHeap::mem_allocate(size_t size, 758 bool* gc_overhead_limit_was_exceeded) { 759 HeapWord* filler = allocate_memory(size + BrooksPointer::word_size(), _alloc_shared); 760 HeapWord* result = filler + BrooksPointer::word_size(); 761 if (filler != NULL) { 762 BrooksPointer::initialize(oop(result)); 763 _bytes_allocated_since_cm += size * HeapWordSize; 764 765 assert(! in_collection_set(result), "never allocate in targetted region"); 766 return result; 767 } else { 768 return NULL; 769 } 770 } 771 772 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure { 773 private: 774 ShenandoahHeap* _heap; 775 Thread* _thread; 776 public: 777 ShenandoahEvacuateUpdateRootsClosure() : 778 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) { 779 } 780 781 private: 782 template <class T> 783 void do_oop_work(T* p) { 784 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress"); 785 786 T o = oopDesc::load_heap_oop(p); 787 if (! oopDesc::is_null(o)) { 788 oop obj = oopDesc::decode_heap_oop_not_null(o); 789 if (_heap->in_collection_set(obj)) { 790 assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d", 791 _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj))); 792 oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj); 793 if (oopDesc::unsafe_equals(resolved, obj)) { 794 bool evac; 795 resolved = _heap->evacuate_object(obj, _thread, evac); 796 } 797 oopDesc::encode_store_heap_oop(p, resolved); 798 } 799 } 800 } 801 802 public: 803 void do_oop(oop* p) { 804 do_oop_work(p); 805 } 806 void do_oop(narrowOop* p) { 807 do_oop_work(p); 808 } 809 }; 810 811 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure { 812 private: 813 ShenandoahHeap* _heap; 814 Thread* _thread; 815 public: 816 ShenandoahEvacuateRootsClosure() : 817 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) { 818 } 819 820 private: 821 template <class T> 822 void do_oop_work(T* p) { 823 T o = oopDesc::load_heap_oop(p); 824 if (! oopDesc::is_null(o)) { 825 oop obj = oopDesc::decode_heap_oop_not_null(o); 826 if (_heap->in_collection_set(obj)) { 827 oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj); 828 if (oopDesc::unsafe_equals(resolved, obj)) { 829 bool evac; 830 _heap->evacuate_object(obj, _thread, evac); 831 } 832 } 833 } 834 } 835 836 public: 837 void do_oop(oop* p) { 838 do_oop_work(p); 839 } 840 void do_oop(narrowOop* p) { 841 do_oop_work(p); 842 } 843 }; 844 845 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure { 846 private: 847 ShenandoahHeap* const _heap; 848 Thread* const _thread; 849 public: 850 ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) : 851 _heap(heap), _thread(Thread::current()) {} 852 853 void do_object(oop p) { 854 assert(_heap->is_marked_complete(p), "expect only marked objects"); 855 if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) { 856 bool evac; 857 _heap->evacuate_object(p, _thread, evac); 858 } 859 } 860 }; 861 862 class ShenandoahParallelEvacuationTask : public AbstractGangTask { 863 private: 864 ShenandoahHeap* const _sh; 865 ShenandoahCollectionSet* const _cs; 866 volatile jbyte _claimed_codecache; 867 868 bool claim_codecache() { 869 jbyte old = Atomic::cmpxchg((jbyte)1, &_claimed_codecache, (jbyte)0); 870 return old == 0; 871 } 872 public: 873 ShenandoahParallelEvacuationTask(ShenandoahHeap* sh, 874 ShenandoahCollectionSet* cs) : 875 AbstractGangTask("Parallel Evacuation Task"), 876 _cs(cs), 877 _sh(sh), 878 _claimed_codecache(0) 879 {} 880 881 void work(uint worker_id) { 882 883 SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); 884 885 // If concurrent code cache evac is enabled, evacuate it here. 886 // Note we cannot update the roots here, because we risk non-atomic stores to the alive 887 // nmethods. The update would be handled elsewhere. 888 if (ShenandoahConcurrentEvacCodeRoots && claim_codecache()) { 889 ShenandoahEvacuateRootsClosure cl; 890 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 891 CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations); 892 CodeCache::blobs_do(&blobs); 893 } 894 895 ShenandoahParallelEvacuateRegionObjectClosure cl(_sh); 896 ShenandoahHeapRegion* r; 897 while ((r =_cs->claim_next()) != NULL) { 898 log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT, 899 worker_id, 900 r->region_number()); 901 902 assert(r->has_live(), "all-garbage regions are reclaimed early"); 903 _sh->marked_object_iterate(r, &cl); 904 905 if (_sh->check_cancelled_concgc_and_yield()) { 906 log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT, r->region_number()); 907 break; 908 } 909 } 910 } 911 }; 912 913 void ShenandoahHeap::trash_cset_regions() { 914 ShenandoahHeapLocker locker(lock()); 915 916 ShenandoahCollectionSet* set = collection_set(); 917 ShenandoahHeapRegion* r; 918 set->clear_current_index(); 919 while ((r = set->next()) != NULL) { 920 r->make_trash(); 921 } 922 collection_set()->clear(); 923 } 924 925 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const { 926 st->print_cr("Heap Regions:"); 927 st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned"); 928 st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data"); 929 st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)"); 930 st->print_cr("FTS=first use timestamp, LTS=last use timestamp"); 931 932 _ordered_regions->print_on(st); 933 } 934 935 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) { 936 assert(start->is_humongous_start(), "reclaim regions starting with the first one"); 937 938 oop humongous_obj = oop(start->bottom() + BrooksPointer::word_size()); 939 size_t size = humongous_obj->size() + BrooksPointer::word_size(); 940 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); 941 size_t index = start->region_number() + required_regions - 1; 942 943 assert(!start->has_live(), "liveness must be zero"); 944 log_trace(gc, humongous)("Reclaiming "SIZE_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size); 945 946 for(size_t i = 0; i < required_regions; i++) { 947 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log, 948 // as it expects that every region belongs to a humongous region starting with a humongous start region. 949 ShenandoahHeapRegion* region = _ordered_regions->get(index --); 950 951 LogTarget(Trace, gc, humongous) lt; 952 if (lt.is_enabled()) { 953 ResourceMark rm; 954 LogStream ls(lt); 955 region->print_on(&ls); 956 } 957 958 assert(region->is_humongous(), "expect correct humongous start or continuation"); 959 assert(!in_collection_set(region), "Humongous region should not be in collection set"); 960 961 region->make_trash(); 962 } 963 return required_regions; 964 } 965 966 #ifdef ASSERT 967 class ShenandoahCheckCollectionSetClosure: public ShenandoahHeapRegionClosure { 968 bool heap_region_do(ShenandoahHeapRegion* r) { 969 assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now"); 970 return false; 971 } 972 }; 973 #endif 974 975 void ShenandoahHeap::prepare_for_concurrent_evacuation() { 976 assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!"); 977 978 log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id()); 979 980 if (!cancelled_concgc()) { 981 // Allocations might have happened before we STWed here, record peak: 982 shenandoahPolicy()->record_peak_occupancy(); 983 984 ensure_parsability(true); 985 986 if (ShenandoahVerify) { 987 verifier()->verify_after_concmark(); 988 } 989 990 trash_cset_regions(); 991 992 // NOTE: This needs to be done during a stop the world pause, because 993 // putting regions into the collection set concurrently with Java threads 994 // will create a race. In particular, acmp could fail because when we 995 // resolve the first operand, the containing region might not yet be in 996 // the collection set, and thus return the original oop. When the 2nd 997 // operand gets resolved, the region could be in the collection set 998 // and the oop gets evacuated. If both operands have originally been 999 // the same, we get false negatives. 1000 1001 { 1002 ShenandoahHeapLocker locker(lock()); 1003 _collection_set->clear(); 1004 _free_regions->clear(); 1005 1006 #ifdef ASSERT 1007 ShenandoahCheckCollectionSetClosure ccsc; 1008 _ordered_regions->heap_region_iterate(&ccsc); 1009 #endif 1010 1011 _shenandoah_policy->choose_collection_set(_collection_set); 1012 1013 _shenandoah_policy->choose_free_set(_free_regions); 1014 } 1015 1016 _bytes_allocated_since_cm = 0; 1017 1018 Universe::update_heap_info_at_gc(); 1019 1020 if (ShenandoahVerify) { 1021 verifier()->verify_before_evacuation(); 1022 } 1023 } 1024 } 1025 1026 1027 class ShenandoahRetireTLABClosure : public ThreadClosure { 1028 private: 1029 bool _retire; 1030 1031 public: 1032 ShenandoahRetireTLABClosure(bool retire) : _retire(retire) {} 1033 1034 void do_thread(Thread* thread) { 1035 assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name()); 1036 thread->gclab().make_parsable(_retire); 1037 } 1038 }; 1039 1040 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) { 1041 if (UseTLAB) { 1042 CollectedHeap::ensure_parsability(retire_tlabs); 1043 ShenandoahRetireTLABClosure cl(retire_tlabs); 1044 Threads::java_threads_do(&cl); 1045 gc_threads_do(&cl); 1046 } 1047 } 1048 1049 1050 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask { 1051 ShenandoahRootEvacuator* _rp; 1052 public: 1053 1054 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) : 1055 AbstractGangTask("Shenandoah evacuate and update roots"), 1056 _rp(rp) 1057 { 1058 // Nothing else to do. 1059 } 1060 1061 void work(uint worker_id) { 1062 ShenandoahEvacuateUpdateRootsClosure cl; 1063 1064 if (ShenandoahConcurrentEvacCodeRoots) { 1065 _rp->process_evacuate_roots(&cl, NULL, worker_id); 1066 } else { 1067 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations); 1068 _rp->process_evacuate_roots(&cl, &blobsCl, worker_id); 1069 } 1070 } 1071 }; 1072 1073 class ShenandoahFixRootsTask : public AbstractGangTask { 1074 ShenandoahRootEvacuator* _rp; 1075 public: 1076 1077 ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) : 1078 AbstractGangTask("Shenandoah update roots"), 1079 _rp(rp) 1080 { 1081 // Nothing else to do. 1082 } 1083 1084 void work(uint worker_id) { 1085 ShenandoahUpdateRefsClosure cl; 1086 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations); 1087 1088 _rp->process_evacuate_roots(&cl, &blobsCl, worker_id); 1089 } 1090 }; 1091 1092 void ShenandoahHeap::evacuate_and_update_roots() { 1093 1094 #if defined(COMPILER2) || INCLUDE_JVMCI 1095 DerivedPointerTable::clear(); 1096 #endif 1097 assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped"); 1098 1099 { 1100 ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac); 1101 ShenandoahEvacuateUpdateRootsTask roots_task(&rp); 1102 workers()->run_task(&roots_task); 1103 } 1104 1105 #if defined(COMPILER2) || INCLUDE_JVMCI 1106 DerivedPointerTable::update_pointers(); 1107 #endif 1108 if (cancelled_concgc()) { 1109 fixup_roots(); 1110 } 1111 } 1112 1113 1114 void ShenandoahHeap::fixup_roots() { 1115 assert(cancelled_concgc(), "Only after concurrent cycle failed"); 1116 1117 // If initial evacuation has been cancelled, we need to update all references 1118 // after all workers have finished. Otherwise we might run into the following problem: 1119 // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X. 1120 // GC thread 2 evacuates the same object X to to-space 1121 // which leaves a truly dangling from-space reference in the first root oop*. This must not happen. 1122 // clear() and update_pointers() must always be called in pairs, 1123 // cannot nest with above clear()/update_pointers(). 1124 #if defined(COMPILER2) || INCLUDE_JVMCI 1125 DerivedPointerTable::clear(); 1126 #endif 1127 ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac); 1128 ShenandoahFixRootsTask update_roots_task(&rp); 1129 workers()->run_task(&update_roots_task); 1130 #if defined(COMPILER2) || INCLUDE_JVMCI 1131 DerivedPointerTable::update_pointers(); 1132 #endif 1133 } 1134 1135 void ShenandoahHeap::do_evacuation() { 1136 ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac); 1137 1138 LogTarget(Trace, gc, region) lt_region; 1139 LogTarget(Trace, gc, cset) lt_cset; 1140 1141 if (lt_region.is_enabled()) { 1142 ResourceMark rm; 1143 LogStream ls(lt_region); 1144 ls.print_cr("All available regions:"); 1145 print_heap_regions_on(&ls); 1146 } 1147 1148 if (lt_cset.is_enabled()) { 1149 ResourceMark rm; 1150 LogStream ls(lt_cset); 1151 ls.print_cr("Collection set ("SIZE_FORMAT" regions):", _collection_set->count()); 1152 _collection_set->print_on(&ls); 1153 1154 ls.print_cr("Free set:"); 1155 _free_regions->print_on(&ls); 1156 } 1157 1158 ShenandoahParallelEvacuationTask task(this, _collection_set); 1159 workers()->run_task(&task); 1160 1161 if (lt_cset.is_enabled()) { 1162 ResourceMark rm; 1163 LogStream ls(lt_cset); 1164 ls.print_cr("After evacuation collection set ("SIZE_FORMAT" regions):", 1165 _collection_set->count()); 1166 _collection_set->print_on(&ls); 1167 1168 ls.print_cr("After evacuation free set:"); 1169 _free_regions->print_on(&ls); 1170 } 1171 1172 if (lt_region.is_enabled()) { 1173 ResourceMark rm; 1174 LogStream ls(lt_region); 1175 ls.print_cr("All regions after evacuation:"); 1176 print_heap_regions_on(&ls); 1177 } 1178 } 1179 1180 void ShenandoahHeap::roots_iterate(OopClosure* cl) { 1181 assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped"); 1182 1183 CodeBlobToOopClosure blobsCl(cl, false); 1184 CLDToOopClosure cldCl(cl); 1185 1186 ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases); 1187 rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0); 1188 } 1189 1190 bool ShenandoahHeap::supports_tlab_allocation() const { 1191 return true; 1192 } 1193 1194 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const { 1195 return MIN2(_free_regions->unsafe_peek_free(), max_tlab_size()); 1196 } 1197 1198 size_t ShenandoahHeap::max_tlab_size() const { 1199 return ShenandoahHeapRegion::max_tlab_size_bytes(); 1200 } 1201 1202 class ShenandoahResizeGCLABClosure : public ThreadClosure { 1203 public: 1204 void do_thread(Thread* thread) { 1205 assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name()); 1206 thread->gclab().resize(); 1207 } 1208 }; 1209 1210 void ShenandoahHeap::resize_all_tlabs() { 1211 CollectedHeap::resize_all_tlabs(); 1212 1213 ShenandoahResizeGCLABClosure cl; 1214 Threads::java_threads_do(&cl); 1215 gc_threads_do(&cl); 1216 } 1217 1218 class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure { 1219 public: 1220 void do_thread(Thread* thread) { 1221 assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name()); 1222 thread->gclab().accumulate_statistics(); 1223 thread->gclab().initialize_statistics(); 1224 } 1225 }; 1226 1227 void ShenandoahHeap::accumulate_statistics_all_gclabs() { 1228 ShenandoahAccumulateStatisticsGCLABClosure cl; 1229 Threads::java_threads_do(&cl); 1230 gc_threads_do(&cl); 1231 } 1232 1233 bool ShenandoahHeap::can_elide_tlab_store_barriers() const { 1234 return true; 1235 } 1236 1237 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { 1238 // Overridden to do nothing. 1239 return new_obj; 1240 } 1241 1242 bool ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) { 1243 return true; 1244 } 1245 1246 bool ShenandoahHeap::card_mark_must_follow_store() const { 1247 return false; 1248 } 1249 1250 void ShenandoahHeap::collect(GCCause::Cause cause) { 1251 assert(cause != GCCause::_gc_locker, "no JNI critical callback"); 1252 if (GCCause::is_user_requested_gc(cause)) { 1253 if (!DisableExplicitGC) { 1254 if (ExplicitGCInvokesConcurrent) { 1255 _concurrent_gc_thread->do_conc_gc(); 1256 } else { 1257 _concurrent_gc_thread->do_full_gc(cause); 1258 } 1259 } 1260 } else if (cause == GCCause::_allocation_failure) { 1261 collector_policy()->set_should_clear_all_soft_refs(true); 1262 _concurrent_gc_thread->do_full_gc(cause); 1263 } 1264 } 1265 1266 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) { 1267 //assert(false, "Shouldn't need to do full collections"); 1268 } 1269 1270 AdaptiveSizePolicy* ShenandoahHeap::size_policy() { 1271 Unimplemented(); 1272 return NULL; 1273 1274 } 1275 1276 CollectorPolicy* ShenandoahHeap::collector_policy() const { 1277 return _shenandoah_policy; 1278 } 1279 1280 1281 HeapWord* ShenandoahHeap::block_start(const void* addr) const { 1282 Space* sp = heap_region_containing(addr); 1283 if (sp != NULL) { 1284 return sp->block_start(addr); 1285 } 1286 return NULL; 1287 } 1288 1289 size_t ShenandoahHeap::block_size(const HeapWord* addr) const { 1290 Space* sp = heap_region_containing(addr); 1291 assert(sp != NULL, "block_size of address outside of heap"); 1292 return sp->block_size(addr); 1293 } 1294 1295 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const { 1296 Space* sp = heap_region_containing(addr); 1297 return sp->block_is_obj(addr); 1298 } 1299 1300 jlong ShenandoahHeap::millis_since_last_gc() { 1301 return 0; 1302 } 1303 1304 void ShenandoahHeap::prepare_for_verify() { 1305 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 1306 ensure_parsability(false); 1307 } 1308 } 1309 1310 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const { 1311 workers()->print_worker_threads_on(st); 1312 } 1313 1314 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const { 1315 workers()->threads_do(tcl); 1316 } 1317 1318 void ShenandoahHeap::print_tracing_info() const { 1319 LogTarget(Info, gc, stats) lt; 1320 if (lt.is_enabled()) { 1321 ResourceMark rm; 1322 LogStream ls(lt); 1323 1324 phase_timings()->print_on(&ls); 1325 1326 ls.cr(); 1327 ls.cr(); 1328 1329 shenandoahPolicy()->print_gc_stats(&ls); 1330 1331 ls.cr(); 1332 ls.cr(); 1333 1334 if (ShenandoahAllocationTrace) { 1335 assert(alloc_tracker() != NULL, "Must be"); 1336 alloc_tracker()->print_on(&ls); 1337 } else { 1338 ls.print_cr(" Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable."); 1339 } 1340 } 1341 } 1342 1343 void ShenandoahHeap::verify(VerifyOption vo) { 1344 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 1345 if (ShenandoahVerify) { 1346 verifier()->verify_generic(vo); 1347 } else { 1348 // TODO: Consider allocating verification bitmaps on demand, 1349 // and turn this on unconditionally. 1350 } 1351 } 1352 } 1353 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const { 1354 return _free_regions->capacity(); 1355 } 1356 1357 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure { 1358 ObjectClosure* _cl; 1359 public: 1360 ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} 1361 bool heap_region_do(ShenandoahHeapRegion* r) { 1362 ShenandoahHeap::heap()->marked_object_iterate(r, _cl); 1363 return false; 1364 } 1365 }; 1366 1367 void ShenandoahHeap::object_iterate(ObjectClosure* cl) { 1368 ShenandoahIterateObjectClosureRegionClosure blk(cl); 1369 heap_region_iterate(&blk, false, true); 1370 } 1371 1372 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure { 1373 private: 1374 ShenandoahHeap* _heap; 1375 1376 public: 1377 ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {} 1378 1379 private: 1380 template <class T> 1381 inline void do_oop_work(T* p) { 1382 T o = oopDesc::load_heap_oop(p); 1383 if (!oopDesc::is_null(o)) { 1384 oop obj = oopDesc::decode_heap_oop_not_null(o); 1385 oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj)); 1386 } 1387 } 1388 public: 1389 void do_oop(oop* p) { 1390 do_oop_work(p); 1391 } 1392 void do_oop(narrowOop* p) { 1393 do_oop_work(p); 1394 } 1395 }; 1396 1397 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure { 1398 private: 1399 ObjectClosure* _cl; 1400 public: 1401 ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {} 1402 1403 virtual void do_object(oop obj) { 1404 assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)), 1405 "avoid double-counting: only non-forwarded objects here"); 1406 1407 // Fix up the ptrs. 1408 ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs; 1409 obj->oop_iterate(&adjust_ptrs); 1410 1411 // Can reply the object now: 1412 _cl->do_object(obj); 1413 } 1414 }; 1415 1416 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) { 1417 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints"); 1418 1419 // Safe iteration does objects only with correct references. 1420 // This is why we skip collection set regions that have stale copies of objects, 1421 // and fix up the pointers in the returned objects. 1422 1423 ShenandoahSafeObjectIterateAndUpdate safe_cl(cl); 1424 ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl); 1425 heap_region_iterate(&blk, 1426 /* skip_cset_regions = */ true, 1427 /* skip_humongous_continuations = */ true); 1428 1429 _need_update_refs = false; // already updated the references 1430 } 1431 1432 // Apply blk->heap_region_do() on all committed regions in address order, 1433 // terminating the iteration early if heap_region_do() returns true. 1434 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions, bool skip_humongous_continuation) const { 1435 for (size_t i = 0; i < num_regions(); i++) { 1436 ShenandoahHeapRegion* current = _ordered_regions->get(i); 1437 if (skip_humongous_continuation && current->is_humongous_continuation()) { 1438 continue; 1439 } 1440 if (skip_cset_regions && in_collection_set(current)) { 1441 continue; 1442 } 1443 if (blk->heap_region_do(current)) { 1444 return; 1445 } 1446 } 1447 } 1448 1449 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure { 1450 private: 1451 ShenandoahHeap* sh; 1452 public: 1453 ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {} 1454 1455 bool heap_region_do(ShenandoahHeapRegion* r) { 1456 r->clear_live_data(); 1457 sh->set_next_top_at_mark_start(r->bottom(), r->top()); 1458 return false; 1459 } 1460 }; 1461 1462 void ShenandoahHeap::start_concurrent_marking() { 1463 if (ShenandoahVerify) { 1464 verifier()->verify_before_concmark(); 1465 } 1466 1467 { 1468 ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats); 1469 accumulate_statistics_all_tlabs(); 1470 } 1471 1472 set_concurrent_mark_in_progress(true); 1473 // We need to reset all TLABs because we'd lose marks on all objects allocated in them. 1474 if (UseTLAB) { 1475 ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable); 1476 ensure_parsability(true); 1477 } 1478 1479 _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm); 1480 _used_start_gc = used(); 1481 1482 { 1483 ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness); 1484 ShenandoahClearLivenessClosure clc(this); 1485 heap_region_iterate(&clc); 1486 } 1487 1488 // Make above changes visible to worker threads 1489 OrderAccess::fence(); 1490 1491 concurrentMark()->init_mark_roots(); 1492 1493 if (UseTLAB) { 1494 ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs); 1495 resize_all_tlabs(); 1496 } 1497 } 1498 1499 void ShenandoahHeap::swap_mark_bitmaps() { 1500 // Swap bitmaps. 1501 MarkBitMap* tmp1 = _complete_mark_bit_map; 1502 _complete_mark_bit_map = _next_mark_bit_map; 1503 _next_mark_bit_map = tmp1; 1504 1505 // Swap top-at-mark-start pointers 1506 HeapWord** tmp2 = _complete_top_at_mark_starts; 1507 _complete_top_at_mark_starts = _next_top_at_mark_starts; 1508 _next_top_at_mark_starts = tmp2; 1509 1510 HeapWord** tmp3 = _complete_top_at_mark_starts_base; 1511 _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base; 1512 _next_top_at_mark_starts_base = tmp3; 1513 } 1514 1515 1516 void ShenandoahHeap::stop_concurrent_marking() { 1517 assert(concurrent_mark_in_progress(), "How else could we get here?"); 1518 if (! cancelled_concgc()) { 1519 // If we needed to update refs, and concurrent marking has been cancelled, 1520 // we need to finish updating references. 1521 set_need_update_refs(false); 1522 swap_mark_bitmaps(); 1523 } 1524 set_concurrent_mark_in_progress(false); 1525 1526 LogTarget(Trace, gc, region) lt; 1527 if (lt.is_enabled()) { 1528 ResourceMark rm; 1529 LogStream ls(lt); 1530 ls.print_cr("Regions at stopping the concurrent mark:"); 1531 print_heap_regions_on(&ls); 1532 } 1533 } 1534 1535 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) { 1536 _concurrent_mark_in_progress = in_progress ? 1 : 0; 1537 JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress); 1538 } 1539 1540 void ShenandoahHeap::set_concurrent_partial_in_progress(bool in_progress) { 1541 _concurrent_partial_in_progress = in_progress; 1542 JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress); 1543 set_evacuation_in_progress_at_safepoint(in_progress); 1544 } 1545 1546 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) { 1547 // Note: it is important to first release the _evacuation_in_progress flag here, 1548 // so that Java threads can get out of oom_during_evacuation() and reach a safepoint, 1549 // in case a VM task is pending. 1550 set_evacuation_in_progress(in_progress); 1551 MutexLocker mu(Threads_lock); 1552 JavaThread::set_evacuation_in_progress_all_threads(in_progress); 1553 } 1554 1555 void ShenandoahHeap::set_evacuation_in_progress_at_safepoint(bool in_progress) { 1556 assert(SafepointSynchronize::is_at_safepoint(), "Only call this at safepoint"); 1557 set_evacuation_in_progress(in_progress); 1558 JavaThread::set_evacuation_in_progress_all_threads(in_progress); 1559 } 1560 1561 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) { 1562 _evacuation_in_progress = in_progress ? 1 : 0; 1563 OrderAccess::fence(); 1564 } 1565 1566 void ShenandoahHeap::oom_during_evacuation() { 1567 log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d", 1568 Thread::current()->osthread()->thread_id()); 1569 1570 // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC. 1571 collector_policy()->set_should_clear_all_soft_refs(true); 1572 concurrent_thread()->try_set_full_gc(); 1573 cancel_concgc(_oom_evacuation); 1574 1575 if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) { 1576 assert(! Threads_lock->owned_by_self() 1577 || SafepointSynchronize::is_at_safepoint(), "must not hold Threads_lock here"); 1578 log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes."); 1579 while (_evacuation_in_progress) { // wait. 1580 Thread::current()->_ParkEvent->park(1); 1581 } 1582 } 1583 1584 } 1585 1586 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) { 1587 // Initialize Brooks pointer for the next object 1588 HeapWord* result = obj + BrooksPointer::word_size(); 1589 BrooksPointer::initialize(oop(result)); 1590 return result; 1591 } 1592 1593 uint ShenandoahHeap::oop_extra_words() { 1594 return BrooksPointer::word_size(); 1595 } 1596 1597 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() : 1598 _heap(ShenandoahHeap::heap_no_check()) { 1599 } 1600 1601 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) { 1602 assert(_heap != NULL, "sanity"); 1603 obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj); 1604 #ifdef ASSERT 1605 if (_heap->concurrent_mark_in_progress()) { 1606 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space"); 1607 } 1608 #endif 1609 assert(!oopDesc::is_null(obj), "null"); 1610 return _heap->is_marked_next(obj); 1611 } 1612 1613 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() : 1614 _heap(ShenandoahHeap::heap_no_check()) { 1615 } 1616 1617 bool ShenandoahIsAliveClosure::do_object_b(oop obj) { 1618 assert(_heap != NULL, "sanity"); 1619 assert(!oopDesc::is_null(obj), "null"); 1620 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space"); 1621 return _heap->is_marked_next(obj); 1622 } 1623 1624 BoolObjectClosure* ShenandoahHeap::is_alive_closure() { 1625 return need_update_refs() ? 1626 (BoolObjectClosure*) &_forwarded_is_alive : 1627 (BoolObjectClosure*) &_is_alive; 1628 } 1629 1630 void ShenandoahHeap::ref_processing_init() { 1631 MemRegion mr = reserved_region(); 1632 1633 _forwarded_is_alive.init(ShenandoahHeap::heap()); 1634 _is_alive.init(ShenandoahHeap::heap()); 1635 assert(_max_workers > 0, "Sanity"); 1636 1637 _ref_processor = 1638 new ReferenceProcessor(mr, // span 1639 ParallelRefProcEnabled, // MT processing 1640 _max_workers, // Degree of MT processing 1641 true, // MT discovery 1642 _max_workers, // Degree of MT discovery 1643 false, // Reference discovery is not atomic 1644 &_forwarded_is_alive); // Pessimistically assume "forwarded" 1645 } 1646 1647 1648 GCTracer* ShenandoahHeap::tracer() { 1649 return shenandoahPolicy()->tracer(); 1650 } 1651 1652 size_t ShenandoahHeap::tlab_used(Thread* thread) const { 1653 return _free_regions->used(); 1654 } 1655 1656 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) { 1657 if (try_cancel_concgc()) { 1658 log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause)); 1659 _shenandoah_policy->report_concgc_cancelled(); 1660 } 1661 } 1662 1663 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) { 1664 if (try_cancel_concgc()) { 1665 log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause)); 1666 _shenandoah_policy->report_concgc_cancelled(); 1667 } 1668 } 1669 1670 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) { 1671 switch (cause) { 1672 case _oom_evacuation: 1673 return "Out of memory for evacuation"; 1674 case _vm_stop: 1675 return "Stopping VM"; 1676 default: 1677 return "Unknown"; 1678 } 1679 } 1680 1681 uint ShenandoahHeap::max_workers() { 1682 return _max_workers; 1683 } 1684 1685 void ShenandoahHeap::stop() { 1686 // The shutdown sequence should be able to terminate when GC is running. 1687 1688 // Step 0. Notify policy to disable event recording. 1689 _shenandoah_policy->record_shutdown(); 1690 1691 // Step 1. Notify control thread that we are in shutdown. 1692 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown. 1693 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below. 1694 _concurrent_gc_thread->prepare_for_graceful_shutdown(); 1695 1696 // Step 2. Notify GC workers that we are cancelling GC. 1697 cancel_concgc(_vm_stop); 1698 1699 // Step 3. Wait until GC worker exits normally. 1700 _concurrent_gc_thread->stop(); 1701 } 1702 1703 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) { 1704 ShenandoahPhaseTimings::Phase phase_root = 1705 full_gc ? 1706 ShenandoahPhaseTimings::full_gc_purge : 1707 ShenandoahPhaseTimings::purge; 1708 1709 ShenandoahPhaseTimings::Phase phase_unload = 1710 full_gc ? 1711 ShenandoahPhaseTimings::full_gc_purge_class_unload : 1712 ShenandoahPhaseTimings::purge_class_unload; 1713 1714 ShenandoahPhaseTimings::Phase phase_cldg = 1715 full_gc ? 1716 ShenandoahPhaseTimings::full_gc_purge_cldg : 1717 ShenandoahPhaseTimings::purge_cldg; 1718 1719 ShenandoahPhaseTimings::Phase phase_par = 1720 full_gc ? 1721 ShenandoahPhaseTimings::full_gc_purge_par : 1722 ShenandoahPhaseTimings::purge_par; 1723 1724 ShenandoahPhaseTimings::Phase phase_par_classes = 1725 full_gc ? 1726 ShenandoahPhaseTimings::full_gc_purge_par_classes : 1727 ShenandoahPhaseTimings::purge_par_classes; 1728 1729 ShenandoahPhaseTimings::Phase phase_par_codecache = 1730 full_gc ? 1731 ShenandoahPhaseTimings::full_gc_purge_par_codecache : 1732 ShenandoahPhaseTimings::purge_par_codecache; 1733 1734 ShenandoahPhaseTimings::Phase phase_par_rmt = 1735 full_gc ? 1736 ShenandoahPhaseTimings::full_gc_purge_par_rmt : 1737 ShenandoahPhaseTimings::purge_par_rmt; 1738 1739 ShenandoahPhaseTimings::Phase phase_par_symbstring = 1740 full_gc ? 1741 ShenandoahPhaseTimings::full_gc_purge_par_symbstring : 1742 ShenandoahPhaseTimings::purge_par_symbstring; 1743 1744 ShenandoahPhaseTimings::Phase phase_par_sync = 1745 full_gc ? 1746 ShenandoahPhaseTimings::full_gc_purge_par_sync : 1747 ShenandoahPhaseTimings::purge_par_sync; 1748 1749 ShenandoahGCPhase root_phase(phase_root); 1750 1751 BoolObjectClosure* is_alive = is_alive_closure(); 1752 1753 bool purged_class; 1754 1755 // Unload classes and purge SystemDictionary. 1756 { 1757 ShenandoahGCPhase phase(phase_unload); 1758 purged_class = SystemDictionary::do_unloading(is_alive, 1759 full_gc ? ShenandoahMarkCompact::gc_timer() : gc_timer(), 1760 true); 1761 } 1762 1763 { 1764 ShenandoahGCPhase phase(phase_par); 1765 uint active = _workers->active_workers(); 1766 ParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class); 1767 _workers->run_task(&unlink_task); 1768 1769 ShenandoahPhaseTimings* p = ShenandoahHeap::heap()->phase_timings(); 1770 ParallelCleaningTimes times = unlink_task.times(); 1771 1772 // "times" report total time, phase_tables_cc reports wall time. Divide total times 1773 // by active workers to get average time per worker, that would add up to wall time. 1774 p->record_phase_time(phase_par_classes, times.klass_work_us() / active); 1775 p->record_phase_time(phase_par_codecache, times.codecache_work_us() / active); 1776 p->record_phase_time(phase_par_rmt, times.rmt_work_us() / active); 1777 p->record_phase_time(phase_par_symbstring, times.tables_work_us() / active); 1778 p->record_phase_time(phase_par_sync, times.sync_us() / active); 1779 } 1780 1781 { 1782 ShenandoahGCPhase phase(phase_cldg); 1783 ClassLoaderDataGraph::purge(); 1784 } 1785 } 1786 1787 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) { 1788 _need_update_refs = need_update_refs; 1789 } 1790 1791 //fixme this should be in heapregionset 1792 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) { 1793 size_t region_idx = r->region_number() + 1; 1794 ShenandoahHeapRegion* next = _ordered_regions->get(region_idx); 1795 guarantee(next->region_number() == region_idx, "region number must match"); 1796 while (next->is_humongous()) { 1797 region_idx = next->region_number() + 1; 1798 next = _ordered_regions->get(region_idx); 1799 guarantee(next->region_number() == region_idx, "region number must match"); 1800 } 1801 return next; 1802 } 1803 1804 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() { 1805 return _monitoring_support; 1806 } 1807 1808 MarkBitMap* ShenandoahHeap::complete_mark_bit_map() { 1809 return _complete_mark_bit_map; 1810 } 1811 1812 MarkBitMap* ShenandoahHeap::next_mark_bit_map() { 1813 return _next_mark_bit_map; 1814 } 1815 1816 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) { 1817 _free_regions->add_region(r); 1818 } 1819 1820 void ShenandoahHeap::clear_free_regions() { 1821 _free_regions->clear(); 1822 } 1823 1824 address ShenandoahHeap::in_cset_fast_test_addr() { 1825 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1826 assert(heap->collection_set() != NULL, "Sanity"); 1827 return (address) heap->collection_set()->biased_map_address(); 1828 } 1829 1830 address ShenandoahHeap::cancelled_concgc_addr() { 1831 return (address) &(ShenandoahHeap::heap()->_cancelled_concgc); 1832 } 1833 1834 1835 size_t ShenandoahHeap::conservative_max_heap_alignment() { 1836 return ShenandoahMaxRegionSize; 1837 } 1838 1839 size_t ShenandoahHeap::bytes_allocated_since_cm() { 1840 return _bytes_allocated_since_cm; 1841 } 1842 1843 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) { 1844 _bytes_allocated_since_cm = bytes; 1845 } 1846 1847 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) { 1848 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift(); 1849 _next_top_at_mark_starts[index] = addr; 1850 } 1851 1852 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) { 1853 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift(); 1854 return _next_top_at_mark_starts[index]; 1855 } 1856 1857 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) { 1858 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift(); 1859 _complete_top_at_mark_starts[index] = addr; 1860 } 1861 1862 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) { 1863 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift(); 1864 return _complete_top_at_mark_starts[index]; 1865 } 1866 1867 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) { 1868 _full_gc_in_progress = in_progress; 1869 } 1870 1871 bool ShenandoahHeap::is_full_gc_in_progress() const { 1872 return _full_gc_in_progress; 1873 } 1874 1875 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) { 1876 _update_refs_in_progress = in_progress; 1877 } 1878 1879 bool ShenandoahHeap::is_update_refs_in_progress() const { 1880 return _update_refs_in_progress; 1881 } 1882 1883 void ShenandoahHeap::register_nmethod(nmethod* nm) { 1884 ShenandoahCodeRoots::add_nmethod(nm); 1885 } 1886 1887 void ShenandoahHeap::unregister_nmethod(nmethod* nm) { 1888 ShenandoahCodeRoots::remove_nmethod(nm); 1889 } 1890 1891 void ShenandoahHeap::pin_object(oop o) { 1892 ShenandoahHeapLocker locker(lock()); 1893 heap_region_containing(o)->make_pinned(); 1894 } 1895 1896 void ShenandoahHeap::unpin_object(oop o) { 1897 ShenandoahHeapLocker locker(lock()); 1898 heap_region_containing(o)->make_unpinned(); 1899 } 1900 1901 GCTimer* ShenandoahHeap::gc_timer() const { 1902 return _gc_timer; 1903 } 1904 1905 #ifdef ASSERT 1906 void ShenandoahHeap::assert_gc_workers(uint nworkers) { 1907 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity"); 1908 1909 if (SafepointSynchronize::is_at_safepoint()) { 1910 if (UseDynamicNumberOfGCThreads || 1911 (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) { 1912 assert(nworkers <= ParallelGCThreads, "Cannot use more than it has"); 1913 } else { 1914 // Use ParallelGCThreads inside safepoints 1915 assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints"); 1916 } 1917 } else { 1918 if (UseDynamicNumberOfGCThreads || 1919 (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) { 1920 assert(nworkers <= ConcGCThreads, "Cannot use more than it has"); 1921 } else { 1922 // Use ConcGCThreads outside safepoints 1923 assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints"); 1924 } 1925 } 1926 } 1927 #endif 1928 1929 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure { 1930 private: 1931 size_t _garbage; 1932 public: 1933 ShenandoahCountGarbageClosure() : _garbage(0) { 1934 } 1935 1936 bool heap_region_do(ShenandoahHeapRegion* r) { 1937 if (r->is_regular()) { 1938 _garbage += r->garbage(); 1939 } 1940 return false; 1941 } 1942 1943 size_t garbage() { 1944 return _garbage; 1945 } 1946 }; 1947 1948 size_t ShenandoahHeap::garbage() { 1949 ShenandoahCountGarbageClosure cl; 1950 heap_region_iterate(&cl); 1951 return cl.garbage(); 1952 } 1953 1954 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() const { 1955 return _connection_matrix; 1956 } 1957 1958 ShenandoahPartialGC* ShenandoahHeap::partial_gc() { 1959 return _partial_gc; 1960 } 1961 1962 ShenandoahVerifier* ShenandoahHeap::verifier() { 1963 guarantee(ShenandoahVerify, "Should be enabled"); 1964 assert (_verifier != NULL, "sanity"); 1965 return _verifier; 1966 } 1967 1968 template<class T> 1969 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask { 1970 private: 1971 T cl; 1972 ShenandoahHeap* _heap; 1973 ShenandoahHeapRegionSet* _regions; 1974 bool _concurrent; 1975 public: 1976 ShenandoahUpdateHeapRefsTask(ShenandoahHeapRegionSet* regions, bool concurrent) : 1977 AbstractGangTask("Concurrent Update References Task"), 1978 cl(T()), 1979 _heap(ShenandoahHeap::heap()), 1980 _regions(regions), 1981 _concurrent(concurrent) { 1982 } 1983 1984 void work(uint worker_id) { 1985 SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers); 1986 ShenandoahHeapRegion* r = _regions->claim_next(); 1987 while (r != NULL) { 1988 if (_heap->in_collection_set(r)) { 1989 HeapWord* bottom = r->bottom(); 1990 HeapWord* top = _heap->complete_top_at_mark_start(r->bottom()); 1991 if (top > bottom) { 1992 _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top)); 1993 } 1994 } else { 1995 if (r->is_active()) { 1996 _heap->marked_object_oop_safe_iterate(r, &cl); 1997 } 1998 } 1999 if (_heap->check_cancelled_concgc_and_yield(_concurrent)) { 2000 return; 2001 } 2002 r = _regions->claim_next(); 2003 } 2004 } 2005 }; 2006 2007 void ShenandoahHeap::update_heap_references(ShenandoahHeapRegionSet* update_regions, bool concurrent) { 2008 if (UseShenandoahMatrix) { 2009 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(update_regions, concurrent); 2010 workers()->run_task(&task); 2011 } else { 2012 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(update_regions, concurrent); 2013 workers()->run_task(&task); 2014 } 2015 } 2016 2017 void ShenandoahHeap::concurrent_update_heap_references() { 2018 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs); 2019 ShenandoahHeapRegionSet* update_regions = regions(); 2020 update_regions->clear_current_index(); 2021 update_heap_references(update_regions, true); 2022 } 2023 2024 void ShenandoahHeap::prepare_update_refs() { 2025 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2026 2027 if (ShenandoahVerify) { 2028 verifier()->verify_before_updaterefs(); 2029 } 2030 2031 set_evacuation_in_progress_at_safepoint(false); 2032 set_update_refs_in_progress(true); 2033 ensure_parsability(true); 2034 if (UseShenandoahMatrix) { 2035 connection_matrix()->clear_all(); 2036 } 2037 for (uint i = 0; i < num_regions(); i++) { 2038 ShenandoahHeapRegion* r = _ordered_regions->get(i); 2039 r->set_concurrent_iteration_safe_limit(r->top()); 2040 } 2041 } 2042 2043 void ShenandoahHeap::finish_update_refs() { 2044 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2045 2046 if (cancelled_concgc()) { 2047 ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work); 2048 2049 // Finish updating references where we left off. 2050 clear_cancelled_concgc(); 2051 ShenandoahHeapRegionSet* update_regions = regions(); 2052 update_heap_references(update_regions, false); 2053 } 2054 2055 assert(! cancelled_concgc(), "Should have been done right before"); 2056 concurrentMark()->update_roots(ShenandoahPhaseTimings::final_update_refs_roots); 2057 2058 if (ShenandoahStringDedup::is_enabled()) { 2059 ShenandoahGCPhase final_str_dedup_table(ShenandoahPhaseTimings::final_update_refs_dedup_table); 2060 ShenandoahStringDedup::parallel_update_refs(); 2061 } 2062 2063 // Allocations might have happened before we STWed here, record peak: 2064 shenandoahPolicy()->record_peak_occupancy(); 2065 2066 ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle); 2067 2068 trash_cset_regions(); 2069 set_need_update_refs(false); 2070 2071 if (ShenandoahVerify) { 2072 verifier()->verify_after_updaterefs(); 2073 } 2074 2075 { 2076 // Rebuild the free set 2077 ShenandoahHeapLocker locker(lock()); 2078 _free_regions->clear(); 2079 size_t end = _ordered_regions->active_regions(); 2080 for (size_t i = 0; i < end; i++) { 2081 ShenandoahHeapRegion* r = _ordered_regions->get(i); 2082 if (r->is_alloc_allowed()) { 2083 assert (!in_collection_set(r), "collection set should be clear"); 2084 _free_regions->add_region(r); 2085 } 2086 } 2087 } 2088 set_update_refs_in_progress(false); 2089 } 2090 2091 void ShenandoahHeap::set_alloc_seq_gc_start() { 2092 // Take next number, the start seq number is inclusive 2093 _alloc_seq_at_last_gc_start = ShenandoahHeapRegion::alloc_seq_num() + 1; 2094 } 2095 2096 void ShenandoahHeap::set_alloc_seq_gc_end() { 2097 // Take current number, the end seq number is also inclusive 2098 _alloc_seq_at_last_gc_end = ShenandoahHeapRegion::alloc_seq_num(); 2099 } 2100 2101 2102 #ifdef ASSERT 2103 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() { 2104 _lock.assert_owned_by_current_thread(); 2105 } 2106 2107 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() { 2108 _lock.assert_not_owned_by_current_thread(); 2109 } 2110 2111 void ShenandoahHeap::assert_heaplock_or_safepoint() { 2112 _lock.assert_owned_by_current_thread_or_safepoint(); 2113 } 2114 #endif 2115 2116 void ShenandoahHeap::recycle_trash_assist(size_t limit) { 2117 assert_heaplock_owned_by_current_thread(); 2118 2119 size_t count = 0; 2120 for (size_t i = 0; (i < num_regions()) && (count < limit); i++) { 2121 ShenandoahHeapRegion *r = _ordered_regions->get(i); 2122 if (r->is_trash()) { 2123 decrease_used(r->used()); 2124 r->recycle(); 2125 _free_regions->add_region(r); 2126 count++; 2127 } 2128 } 2129 } 2130 2131 void ShenandoahHeap::recycle_trash() { 2132 // lock is not reentrable, check we don't have it 2133 assert_heaplock_not_owned_by_current_thread(); 2134 2135 size_t bytes_reclaimed = 0; 2136 2137 if (UseShenandoahMatrix) { 2138 // The complication for matrix cleanup is that we want the batched update 2139 // to alleviate costs. We also cannot add regions to freeset until matrix 2140 // is clean, otherwise we race with the actual allocations. 2141 2142 size_t count = 0; 2143 for (size_t i = 0; i < num_regions(); i++) { 2144 ShenandoahHeapRegion* r = _ordered_regions->get(i); 2145 if (r->is_trash()) { 2146 ShenandoahHeapLocker locker(lock()); 2147 if (r->is_trash()) { 2148 bytes_reclaimed += r->used(); 2149 decrease_used(r->used()); 2150 r->recycle_no_matrix(); 2151 _recycled_regions[count++] = r->region_number(); 2152 } 2153 } 2154 SpinPause(); // allow allocators to barge the lock 2155 } 2156 2157 connection_matrix()->clear_batched(_recycled_regions, count); 2158 2159 { 2160 ShenandoahHeapLocker locker(lock()); 2161 for (size_t i = 0; i < count; i++) { 2162 ShenandoahHeapRegion *r = _ordered_regions->get(_recycled_regions[i]); 2163 _free_regions->add_region(r); 2164 } 2165 } 2166 2167 } else { 2168 for (size_t i = 0; i < num_regions(); i++) { 2169 ShenandoahHeapRegion* r = _ordered_regions->get(i); 2170 if (r->is_trash()) { 2171 ShenandoahHeapLocker locker(lock()); 2172 if (r->is_trash()) { 2173 bytes_reclaimed += r->used(); 2174 decrease_used(r->used()); 2175 r->recycle(); 2176 _free_regions->add_region(r); 2177 } 2178 } 2179 SpinPause(); // allow allocators to barge the lock 2180 } 2181 } 2182 2183 _shenandoah_policy->record_bytes_reclaimed(bytes_reclaimed); 2184 } 2185 2186 void ShenandoahHeap::print_extended_on(outputStream *st) const { 2187 print_on(st); 2188 print_heap_regions_on(st); 2189 } 2190 2191 address ShenandoahHeap::concurrent_mark_in_progress_addr() { 2192 return (address) &(ShenandoahHeap::heap()->_concurrent_mark_in_progress); 2193 } 2194 2195 bool ShenandoahHeap::commit_bitmaps(ShenandoahHeapRegion* r) { 2196 size_t len = _bitmap_words_per_region * HeapWordSize; 2197 size_t off = r->region_number() * _bitmap_words_per_region; 2198 if (!os::commit_memory((char*)(_bitmap0_region.start() + off), len, false)) { 2199 return false; 2200 } 2201 if (!os::commit_memory((char*)(_bitmap1_region.start() + off), len, false)) { 2202 return false; 2203 } 2204 return true; 2205 } 2206 2207 bool ShenandoahHeap::uncommit_bitmaps(ShenandoahHeapRegion* r) { 2208 size_t len = _bitmap_words_per_region * HeapWordSize; 2209 size_t off = r->region_number() * _bitmap_words_per_region; 2210 if (!os::uncommit_memory((char*)(_bitmap0_region.start() + off), len)) { 2211 return false; 2212 } 2213 if (!os::uncommit_memory((char*)(_bitmap1_region.start() + off), len)) { 2214 return false; 2215 } 2216 return true; 2217 }