1 /* 2 * Copyright (c) 2013, 2017, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "memory/allocation.hpp" 26 27 #include "gc/shared/gcTimer.hpp" 28 #include "gc/shared/gcTraceTime.inline.hpp" 29 #include "gc/shared/parallelCleaning.hpp" 30 31 #include "gc/shenandoah/brooksPointer.hpp" 32 #include "gc/shenandoah/shenandoahAllocTracker.hpp" 33 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 34 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 36 #include "gc/shenandoah/shenandoahConcurrentMark.hpp" 37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" 38 #include "gc/shenandoah/shenandoahConcurrentThread.hpp" 39 #include "gc/shenandoah/shenandoahFreeSet.hpp" 40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 41 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 42 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 43 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 44 #include "gc/shenandoah/shenandoahMarkCompact.hpp" 45 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 47 #include "gc/shenandoah/shenandoahPartialGC.hpp" 48 #include "gc/shenandoah/shenandoahRootProcessor.hpp" 49 #include "gc/shenandoah/shenandoahStringDedup.hpp" 50 #include "gc/shenandoah/shenandoahUtils.hpp" 51 #include "gc/shenandoah/shenandoahVerifier.hpp" 52 #include "gc/shenandoah/shenandoahCodeRoots.hpp" 53 #include "gc/shenandoah/vm_operations_shenandoah.hpp" 54 55 #include "runtime/vmThread.hpp" 56 #include "services/mallocTracker.hpp" 57 58 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {} 59 60 #ifdef ASSERT 61 template <class T> 62 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) { 63 T o = oopDesc::load_heap_oop(p); 64 if (! oopDesc::is_null(o)) { 65 oop obj = oopDesc::decode_heap_oop_not_null(o); 66 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), 67 "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT, 68 p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p)); 69 } 70 } 71 72 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); } 73 void ShenandoahAssertToSpaceClosure::do_oop(oop* p) { do_oop_nv(p); } 74 #endif 75 76 const char* ShenandoahHeap::name() const { 77 return "Shenandoah"; 78 } 79 80 class ShenandoahPretouchTask : public AbstractGangTask { 81 private: 82 ShenandoahHeapRegionSet* _regions; 83 const size_t _bitmap_size; 84 const size_t _page_size; 85 char* _bitmap_base; 86 public: 87 ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions, 88 char* bitmap_base, size_t bitmap_size, 89 size_t page_size) : 90 AbstractGangTask("Shenandoah PreTouch", 91 Universe::is_fully_initialized() ? GCId::current_raw() : 92 // During VM initialization there is 93 // no GC cycle that this task can be 94 // associated with. 95 GCId::undefined()), 96 _bitmap_base(bitmap_base), 97 _regions(regions), 98 _bitmap_size(bitmap_size), 99 _page_size(page_size) { 100 _regions->clear_current_index(); 101 }; 102 103 virtual void work(uint worker_id) { 104 ShenandoahHeapRegion* r = _regions->claim_next(); 105 while (r != NULL) { 106 log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT, 107 r->region_number(), p2i(r->bottom()), p2i(r->end())); 108 os::pretouch_memory(r->bottom(), r->end(), _page_size); 109 110 size_t start = r->region_number() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); 111 size_t end = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); 112 assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size); 113 114 log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT, 115 r->region_number(), p2i(_bitmap_base + start), p2i(_bitmap_base + end)); 116 os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size); 117 118 r = _regions->claim_next(); 119 } 120 } 121 }; 122 123 jint ShenandoahHeap::initialize() { 124 CollectedHeap::pre_initialize(); 125 126 BrooksPointer::initial_checks(); 127 128 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); 129 size_t max_byte_size = collector_policy()->max_heap_byte_size(); 130 size_t heap_alignment = collector_policy()->heap_alignment(); 131 132 if (ShenandoahAlwaysPreTouch) { 133 // Enabled pre-touch means the entire heap is committed right away. 134 init_byte_size = max_byte_size; 135 } 136 137 Universe::check_alignment(max_byte_size, 138 ShenandoahHeapRegion::region_size_bytes(), 139 "shenandoah heap"); 140 Universe::check_alignment(init_byte_size, 141 ShenandoahHeapRegion::region_size_bytes(), 142 "shenandoah heap"); 143 144 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, 145 heap_alignment); 146 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size())); 147 148 set_barrier_set(new ShenandoahBarrierSet(this)); 149 ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size); 150 151 _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes(); 152 size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes(); 153 _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes(); 154 _committed = _initial_size; 155 156 log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size); 157 if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) { 158 vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap"); 159 } 160 161 size_t reg_size_words = ShenandoahHeapRegion::region_size_words(); 162 163 _ordered_regions = new ShenandoahHeapRegionSet(_num_regions); 164 _free_regions = new ShenandoahFreeSet(_ordered_regions, _num_regions); 165 166 _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base()); 167 168 _top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC); 169 _top_at_mark_starts = _top_at_mark_starts_base - 170 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift()); 171 172 173 { 174 ShenandoahHeapLocker locker(lock()); 175 for (size_t i = 0; i < _num_regions; i++) { 176 ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this, 177 (HeapWord*) pgc_rs.base() + reg_size_words * i, 178 reg_size_words, 179 i, 180 i < num_committed_regions); 181 182 _top_at_mark_starts_base[i] = r->bottom(); 183 184 // Add to ordered regions first. 185 // We use the active size of ordered regions as the number of active regions in heap, 186 // free set and collection set use the number to assert the correctness of incoming regions. 187 _ordered_regions->add_region(r); 188 _free_regions->add_region(r); 189 assert(!collection_set()->is_in(i), "New region should not be in collection set"); 190 } 191 } 192 193 assert(_ordered_regions->active_regions() == _num_regions, "Must match"); 194 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0, 195 "misaligned heap: "PTR_FORMAT, p2i(base())); 196 197 LogTarget(Trace, gc, region) lt; 198 if (lt.is_enabled()) { 199 ResourceMark rm; 200 LogStream ls(lt); 201 log_trace(gc, region)("All Regions"); 202 _ordered_regions->print_on(&ls); 203 log_trace(gc, region)("Free Regions"); 204 _free_regions->print_on(&ls); 205 } 206 207 _recycled_regions = NEW_C_HEAP_ARRAY(size_t, _num_regions, mtGC); 208 209 // The call below uses stuff (the SATB* things) that are in G1, but probably 210 // belong into a shared location. 211 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, 212 SATB_Q_FL_lock, 213 20 /*G1SATBProcessCompletedThreshold */, 214 Shared_SATB_Q_lock); 215 216 // Reserve space for prev and next bitmap. 217 _bitmap_size = MarkBitMap::compute_size(heap_rs.size()); 218 _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize); 219 220 size_t bitmap_bytes_per_region = _bitmap_size / _num_regions; 221 _bitmap_words_per_region = bitmap_bytes_per_region / HeapWordSize; 222 223 guarantee(is_power_of_2(bitmap_bytes_per_region), 224 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region); 225 guarantee((bitmap_bytes_per_region % os::vm_page_size()) == 0, 226 "Bitmap bytes per region should be page-granular: bpr = " SIZE_FORMAT ", page size = %d", 227 bitmap_bytes_per_region, os::vm_page_size()); 228 guarantee(is_power_of_2(_bitmap_words_per_region), 229 "Bitmap words per region Should be power of two: " SIZE_FORMAT, _bitmap_words_per_region); 230 guarantee(bitmap_bytes_per_region >= (size_t)os::vm_page_size(), 231 "Bitmap slice per region (" SIZE_FORMAT ") should be larger than page size (%d)", 232 bitmap_bytes_per_region, os::vm_page_size()); 233 234 size_t bitmap_page_size = UseLargePages && (bitmap_bytes_per_region >= (size_t)os::large_page_size()) ? 235 (size_t)os::large_page_size() : (size_t)os::vm_page_size(); 236 237 ReservedSpace bitmap(_bitmap_size, bitmap_page_size); 238 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC); 239 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize); 240 241 { 242 ShenandoahHeapLocker locker(lock()); 243 for (size_t i = 0; i < _num_regions; i++) { 244 ShenandoahHeapRegion* r = _ordered_regions->get(i); 245 if (r->is_committed()) { 246 commit_bitmaps(r); 247 } 248 } 249 } 250 251 size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); 252 253 if (ShenandoahVerify) { 254 ReservedSpace verify_bitmap(_bitmap_size, page_size); 255 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false, 256 "couldn't allocate verification bitmap"); 257 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC); 258 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize); 259 _verification_bit_map.initialize(_heap_region, verify_bitmap_region); 260 _verifier = new ShenandoahVerifier(this, &_verification_bit_map); 261 } 262 263 if (ShenandoahAlwaysPreTouch) { 264 assert (!AlwaysPreTouch, "Should have been overridden"); 265 266 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads, 267 // before initialize() below zeroes it with initializing thread. For any given region, 268 // we touch the region and the corresponding bitmaps from the same thread. 269 270 log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages", 271 _ordered_regions->count(), page_size); 272 ShenandoahPretouchTask cl(_ordered_regions, bitmap.base(), _bitmap_size, page_size); 273 _workers->run_task(&cl); 274 } 275 276 __mark_bit_map.initialize(_heap_region, _bitmap_region); 277 _mark_bit_map = & __mark_bit_map; 278 279 if (UseShenandoahMatrix) { 280 _connection_matrix = new ShenandoahConnectionMatrix(_num_regions); 281 } else { 282 _connection_matrix = NULL; 283 } 284 285 _partial_gc = _shenandoah_policy->can_do_partial_gc() ? 286 new ShenandoahPartialGC(this, _num_regions) : 287 NULL; 288 289 _monitoring_support = new ShenandoahMonitoringSupport(this); 290 291 _phase_timings = new ShenandoahPhaseTimings(); 292 293 if (ShenandoahAllocationTrace) { 294 _alloc_tracker = new ShenandoahAllocTracker(); 295 } 296 297 ShenandoahStringDedup::initialize(); 298 299 _concurrent_gc_thread = new ShenandoahConcurrentThread(); 300 301 ShenandoahMarkCompact::initialize(); 302 303 ShenandoahCodeRoots::initialize(); 304 305 return JNI_OK; 306 } 307 308 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : 309 CollectedHeap(), 310 _shenandoah_policy(policy), 311 _concurrent_mark_in_progress(0), 312 _evacuation_in_progress(0), 313 _full_gc_in_progress(false), 314 _update_refs_in_progress(false), 315 _free_regions(NULL), 316 _collection_set(NULL), 317 _bytes_allocated_since_cm(0), 318 _bytes_allocated_during_cm(0), 319 _allocated_last_gc(0), 320 _used_start_gc(0), 321 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)), 322 _ref_processor(NULL), 323 _top_at_mark_starts(NULL), 324 _top_at_mark_starts_base(NULL), 325 __mark_bit_map(), 326 _mark_bit_map(NULL), 327 _connection_matrix(NULL), 328 _cancelled_concgc(0), 329 _need_update_refs(false), 330 _need_reset_bitmap(false), 331 _bitmap_valid(true), 332 _verifier(NULL), 333 _heap_lock(0), 334 _used_at_last_gc(0), 335 _alloc_seq_at_last_gc_start(0), 336 _alloc_seq_at_last_gc_end(0), 337 _safepoint_workers(NULL), 338 #ifdef ASSERT 339 _heap_lock_owner(NULL), 340 _heap_expansion_count(0), 341 #endif 342 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 343 _phase_timings(NULL), 344 _alloc_tracker(NULL) 345 { 346 log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads); 347 log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads); 348 log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled)); 349 350 _scm = new ShenandoahConcurrentMark(); 351 _used = 0; 352 353 _max_workers = MAX2(_max_workers, 1U); 354 _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers, 355 /* are_GC_task_threads */true, 356 /* are_ConcurrentGC_threads */false); 357 if (_workers == NULL) { 358 vm_exit_during_initialization("Failed necessary allocation."); 359 } else { 360 _workers->initialize_workers(); 361 } 362 363 if (ParallelSafepointCleanupThreads > 1) { 364 _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread", 365 ParallelSafepointCleanupThreads, 366 false, false); 367 _safepoint_workers->initialize_workers(); 368 } 369 } 370 371 class ShenandoahResetBitmapTask : public AbstractGangTask { 372 private: 373 ShenandoahHeapRegionSet* _regions; 374 375 public: 376 ShenandoahResetBitmapTask(ShenandoahHeapRegionSet* regions) : 377 AbstractGangTask("Parallel Reset Bitmap Task"), 378 _regions(regions) { 379 _regions->clear_current_index(); 380 } 381 382 void work(uint worker_id) { 383 ShenandoahHeapRegion* region = _regions->claim_next(); 384 ShenandoahHeap* heap = ShenandoahHeap::heap(); 385 while (region != NULL) { 386 if (region->is_committed()) { 387 HeapWord* bottom = region->bottom(); 388 HeapWord* top = heap->top_at_mark_start(region->bottom()); 389 if (top > bottom) { 390 heap->mark_bit_map()->clear_range_large(MemRegion(bottom, top)); 391 } 392 assert(heap->is_bitmap_clear_range(bottom, region->end()), "must be clear"); 393 } 394 region = _regions->claim_next(); 395 } 396 } 397 }; 398 399 void ShenandoahHeap::reset_mark_bitmap(WorkGang* workers) { 400 assert_gc_workers(workers->active_workers()); 401 402 ShenandoahResetBitmapTask task = ShenandoahResetBitmapTask(_ordered_regions); 403 workers->run_task(&task); 404 } 405 406 bool ShenandoahHeap::is_bitmap_clear() { 407 for (size_t idx = 0; idx < _num_regions; idx++) { 408 ShenandoahHeapRegion* r = _ordered_regions->get(idx); 409 if (r->is_committed() && !is_bitmap_clear_range(r->bottom(), r->end())) { 410 return false; 411 } 412 } 413 return true; 414 } 415 416 bool ShenandoahHeap::is_bitmap_clear_range(HeapWord* start, HeapWord* end) { 417 return _mark_bit_map->getNextMarkedWordAddress(start, end) == end; 418 } 419 420 void ShenandoahHeap::print_on(outputStream* st) const { 421 st->print_cr("Shenandoah Heap"); 422 st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used", 423 capacity() / K, committed() / K, used() / K); 424 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions", 425 num_regions(), ShenandoahHeapRegion::region_size_bytes() / K); 426 427 st->print("Status: "); 428 if (concurrent_mark_in_progress()) { 429 st->print("marking "); 430 } else if (is_evacuation_in_progress()) { 431 st->print("evacuating "); 432 } else if (is_update_refs_in_progress()) { 433 st->print("updating refs "); 434 } else { 435 st->print("idle "); 436 } 437 if (cancelled_concgc()) { 438 st->print("cancelled "); 439 } 440 st->cr(); 441 442 st->print_cr("Reserved region:"); 443 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ", 444 p2i(reserved_region().start()), 445 p2i(reserved_region().end())); 446 447 if (UseShenandoahMatrix) { 448 st->print_cr("Matrix:"); 449 450 ShenandoahConnectionMatrix* matrix = connection_matrix(); 451 if (matrix != NULL) { 452 st->print_cr(" - base: " PTR_FORMAT, p2i(matrix->matrix_addr())); 453 st->print_cr(" - stride: " SIZE_FORMAT, matrix->stride()); 454 st->print_cr(" - magic: " PTR_FORMAT, matrix->magic_offset()); 455 } else { 456 st->print_cr(" No matrix."); 457 } 458 } 459 460 if (Verbose) { 461 print_heap_regions_on(st); 462 } 463 } 464 465 class ShenandoahInitGCLABClosure : public ThreadClosure { 466 public: 467 void do_thread(Thread* thread) { 468 thread->gclab().initialize(true); 469 } 470 }; 471 472 void ShenandoahHeap::post_initialize() { 473 if (UseTLAB) { 474 MutexLocker ml(Threads_lock); 475 476 ShenandoahInitGCLABClosure init_gclabs; 477 Threads::java_threads_do(&init_gclabs); 478 gc_threads_do(&init_gclabs); 479 480 // gclab can not be initialized early during VM startup, as it can not determinate its max_size. 481 // Now, we will let WorkGang to initialize gclab when new worker is created. 482 _workers->set_initialize_gclab(); 483 } 484 485 _scm->initialize(_max_workers); 486 487 ref_processing_init(); 488 489 _shenandoah_policy->post_heap_initialize(); 490 } 491 492 size_t ShenandoahHeap::used() const { 493 OrderAccess::acquire(); 494 return _used; 495 } 496 497 size_t ShenandoahHeap::committed() const { 498 OrderAccess::acquire(); 499 return _committed; 500 } 501 502 void ShenandoahHeap::increase_committed(size_t bytes) { 503 assert_heaplock_or_safepoint(); 504 _committed += bytes; 505 } 506 507 void ShenandoahHeap::decrease_committed(size_t bytes) { 508 assert_heaplock_or_safepoint(); 509 _committed -= bytes; 510 } 511 512 void ShenandoahHeap::increase_used(size_t bytes) { 513 assert_heaplock_or_safepoint(); 514 _used += bytes; 515 } 516 517 void ShenandoahHeap::set_used(size_t bytes) { 518 assert_heaplock_or_safepoint(); 519 _used = bytes; 520 } 521 522 void ShenandoahHeap::decrease_used(size_t bytes) { 523 assert_heaplock_or_safepoint(); 524 assert(_used >= bytes, "never decrease heap size by more than we've left"); 525 _used -= bytes; 526 } 527 528 size_t ShenandoahHeap::capacity() const { 529 return num_regions() * ShenandoahHeapRegion::region_size_bytes(); 530 } 531 532 bool ShenandoahHeap::is_maximal_no_gc() const { 533 Unimplemented(); 534 return true; 535 } 536 537 size_t ShenandoahHeap::max_capacity() const { 538 return _num_regions * ShenandoahHeapRegion::region_size_bytes(); 539 } 540 541 size_t ShenandoahHeap::initial_capacity() const { 542 return _initial_size; 543 } 544 545 bool ShenandoahHeap::is_in(const void* p) const { 546 HeapWord* heap_base = (HeapWord*) base(); 547 HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions(); 548 return p >= heap_base && p < last_region_end; 549 } 550 551 bool ShenandoahHeap::is_scavengable(const void* p) { 552 return true; 553 } 554 555 void ShenandoahHeap::handle_heap_shrinkage() { 556 ShenandoahHeapLocker locker(lock()); 557 558 ShenandoahHeapRegionSet* set = regions(); 559 560 size_t count = 0; 561 double current = os::elapsedTime(); 562 for (size_t i = 0; i < num_regions(); i++) { 563 ShenandoahHeapRegion* r = set->get(i); 564 if (r->is_empty_committed() && 565 (current - r->empty_time()) * 1000 > ShenandoahUncommitDelay && 566 r->make_empty_uncommitted()) { 567 count++; 568 } 569 } 570 571 if (count > 0) { 572 log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used", 573 count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M); 574 } 575 } 576 577 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) { 578 // Retain tlab and allocate object in shared space if 579 // the amount free in the tlab is too large to discard. 580 if (thread->gclab().free() > thread->gclab().refill_waste_limit()) { 581 thread->gclab().record_slow_allocation(size); 582 return NULL; 583 } 584 585 // Discard gclab and allocate a new one. 586 // To minimize fragmentation, the last GCLAB may be smaller than the rest. 587 size_t new_gclab_size = thread->gclab().compute_size(size); 588 589 thread->gclab().clear_before_allocation(); 590 591 if (new_gclab_size == 0) { 592 return NULL; 593 } 594 595 // Allocate a new GCLAB... 596 HeapWord* obj = allocate_new_gclab(new_gclab_size); 597 if (obj == NULL) { 598 return NULL; 599 } 600 601 if (ZeroTLAB) { 602 // ..and clear it. 603 Copy::zero_to_words(obj, new_gclab_size); 604 } else { 605 // ...and zap just allocated object. 606 #ifdef ASSERT 607 // Skip mangling the space corresponding to the object header to 608 // ensure that the returned space is not considered parsable by 609 // any concurrent GC thread. 610 size_t hdr_size = oopDesc::header_size(); 611 Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal); 612 #endif // ASSERT 613 } 614 thread->gclab().fill(obj, obj + size, new_gclab_size); 615 return obj; 616 } 617 618 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) { 619 #ifdef ASSERT 620 log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize); 621 #endif 622 return allocate_new_lab(word_size, _alloc_tlab); 623 } 624 625 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) { 626 #ifdef ASSERT 627 log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize); 628 #endif 629 return allocate_new_lab(word_size, _alloc_gclab); 630 } 631 632 HeapWord* ShenandoahHeap::allocate_new_lab(size_t word_size, AllocType type) { 633 HeapWord* result = allocate_memory(word_size, type); 634 635 if (result != NULL) { 636 assert(! in_collection_set(result), "Never allocate in collection set"); 637 _bytes_allocated_since_cm += word_size * HeapWordSize; 638 639 log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result)); 640 641 } 642 return result; 643 } 644 645 ShenandoahHeap* ShenandoahHeap::heap() { 646 CollectedHeap* heap = Universe::heap(); 647 assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()"); 648 assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap"); 649 return (ShenandoahHeap*) heap; 650 } 651 652 ShenandoahHeap* ShenandoahHeap::heap_no_check() { 653 CollectedHeap* heap = Universe::heap(); 654 return (ShenandoahHeap*) heap; 655 } 656 657 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, AllocType type) { 658 ShenandoahAllocTrace trace_alloc(word_size, type); 659 660 bool in_new_region = false; 661 HeapWord* result = allocate_memory_under_lock(word_size, type, in_new_region); 662 663 if (type == _alloc_tlab || type == _alloc_shared) { 664 // Allocation failed, try full-GC, then retry allocation. 665 // 666 // It might happen that one of the threads requesting allocation would unblock 667 // way later after full-GC happened, only to fail the second allocation, because 668 // other threads have already depleted the free storage. In this case, a better 669 // strategy would be to try full-GC again. 670 // 671 // Lacking the way to detect progress from "collect" call, we are left with blindly 672 // retrying for some bounded number of times. 673 // TODO: Poll if Full GC made enough progress to warrant retry. 674 int tries = 0; 675 while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) { 676 log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d", 677 p2i(Thread::current()), word_size * HeapWordSize, tries); 678 collect(GCCause::_allocation_failure); 679 result = allocate_memory_under_lock(word_size, type, in_new_region); 680 } 681 } 682 683 if (in_new_region) { 684 // Update monitoring counters when we took a new region. This amortizes the 685 // update costs on slow path. 686 concurrent_thread()->trigger_counters_update(); 687 } 688 689 log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ", 690 word_size, p2i(result), Thread::current()->osthread()->thread_id()); 691 692 return result; 693 } 694 695 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size, AllocType type, bool& in_new_region) { 696 ShenandoahHeapLocker locker(lock()); 697 return _free_regions->allocate(word_size, type, in_new_region); 698 } 699 700 HeapWord* ShenandoahHeap::mem_allocate(size_t size, 701 bool* gc_overhead_limit_was_exceeded) { 702 HeapWord* filler = allocate_memory(size + BrooksPointer::word_size(), _alloc_shared); 703 HeapWord* result = filler + BrooksPointer::word_size(); 704 if (filler != NULL) { 705 BrooksPointer::initialize(oop(result)); 706 _bytes_allocated_since_cm += size * HeapWordSize; 707 708 assert(! in_collection_set(result), "never allocate in targetted region"); 709 return result; 710 } else { 711 return NULL; 712 } 713 } 714 715 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure { 716 private: 717 ShenandoahHeap* _heap; 718 Thread* _thread; 719 public: 720 ShenandoahEvacuateUpdateRootsClosure() : 721 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) { 722 } 723 724 private: 725 template <class T> 726 void do_oop_work(T* p) { 727 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress"); 728 729 T o = oopDesc::load_heap_oop(p); 730 if (! oopDesc::is_null(o)) { 731 oop obj = oopDesc::decode_heap_oop_not_null(o); 732 if (_heap->in_collection_set(obj)) { 733 assert(_heap->is_marked(obj), "only evacuate marked objects %d %d", 734 _heap->is_marked(obj), _heap->is_marked(ShenandoahBarrierSet::resolve_oop_static_not_null(obj))); 735 oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj); 736 if (oopDesc::unsafe_equals(resolved, obj)) { 737 bool evac; 738 resolved = _heap->evacuate_object(obj, _thread, evac); 739 } 740 oopDesc::encode_store_heap_oop(p, resolved); 741 } 742 } 743 } 744 745 public: 746 void do_oop(oop* p) { 747 do_oop_work(p); 748 } 749 void do_oop(narrowOop* p) { 750 do_oop_work(p); 751 } 752 }; 753 754 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure { 755 private: 756 ShenandoahHeap* _heap; 757 Thread* _thread; 758 public: 759 ShenandoahEvacuateRootsClosure() : 760 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) { 761 } 762 763 private: 764 template <class T> 765 void do_oop_work(T* p) { 766 T o = oopDesc::load_heap_oop(p); 767 if (! oopDesc::is_null(o)) { 768 oop obj = oopDesc::decode_heap_oop_not_null(o); 769 if (_heap->in_collection_set(obj)) { 770 oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj); 771 if (oopDesc::unsafe_equals(resolved, obj)) { 772 bool evac; 773 _heap->evacuate_object(obj, _thread, evac); 774 } 775 } 776 } 777 } 778 779 public: 780 void do_oop(oop* p) { 781 do_oop_work(p); 782 } 783 void do_oop(narrowOop* p) { 784 do_oop_work(p); 785 } 786 }; 787 788 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure { 789 private: 790 ShenandoahHeap* const _heap; 791 Thread* const _thread; 792 public: 793 ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) : 794 _heap(heap), _thread(Thread::current()) {} 795 796 void do_object(oop p) { 797 assert(_heap->is_marked(p), "expect only marked objects"); 798 if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) { 799 bool evac; 800 _heap->evacuate_object(p, _thread, evac); 801 } 802 } 803 }; 804 805 class ShenandoahParallelEvacuationTask : public AbstractGangTask { 806 private: 807 ShenandoahHeap* const _sh; 808 ShenandoahCollectionSet* const _cs; 809 volatile jbyte _claimed_codecache; 810 811 bool claim_codecache() { 812 jbyte old = Atomic::cmpxchg((jbyte)1, &_claimed_codecache, (jbyte)0); 813 return old == 0; 814 } 815 public: 816 ShenandoahParallelEvacuationTask(ShenandoahHeap* sh, 817 ShenandoahCollectionSet* cs) : 818 AbstractGangTask("Parallel Evacuation Task"), 819 _cs(cs), 820 _sh(sh), 821 _claimed_codecache(0) 822 {} 823 824 void work(uint worker_id) { 825 826 SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); 827 828 // If concurrent code cache evac is enabled, evacuate it here. 829 // Note we cannot update the roots here, because we risk non-atomic stores to the alive 830 // nmethods. The update would be handled elsewhere. 831 if (ShenandoahConcurrentEvacCodeRoots && claim_codecache()) { 832 ShenandoahEvacuateRootsClosure cl; 833 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 834 CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations); 835 CodeCache::blobs_do(&blobs); 836 } 837 838 ShenandoahParallelEvacuateRegionObjectClosure cl(_sh); 839 ShenandoahHeapRegion* r; 840 while ((r =_cs->claim_next()) != NULL) { 841 log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT, 842 worker_id, 843 r->region_number()); 844 845 assert(r->has_live(), "all-garbage regions are reclaimed early"); 846 _sh->marked_object_iterate(r, &cl); 847 848 if (_sh->check_cancelled_concgc_and_yield()) { 849 log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT, r->region_number()); 850 break; 851 } 852 } 853 } 854 }; 855 856 void ShenandoahHeap::trash_cset_regions() { 857 ShenandoahHeapLocker locker(lock()); 858 859 ShenandoahCollectionSet* set = collection_set(); 860 ShenandoahHeapRegion* r; 861 set->clear_current_index(); 862 while ((r = set->next()) != NULL) { 863 r->make_trash(); 864 } 865 collection_set()->clear(); 866 } 867 868 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const { 869 st->print_cr("Heap Regions:"); 870 st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned"); 871 st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data"); 872 st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)"); 873 st->print_cr("FTS=first use timestamp, LTS=last use timestamp"); 874 875 _ordered_regions->print_on(st); 876 } 877 878 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) { 879 assert(start->is_humongous_start(), "reclaim regions starting with the first one"); 880 881 oop humongous_obj = oop(start->bottom() + BrooksPointer::word_size()); 882 size_t size = humongous_obj->size() + BrooksPointer::word_size(); 883 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); 884 size_t index = start->region_number() + required_regions - 1; 885 886 assert(!start->has_live(), "liveness must be zero"); 887 log_trace(gc, humongous)("Reclaiming "SIZE_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size); 888 889 for(size_t i = 0; i < required_regions; i++) { 890 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log, 891 // as it expects that every region belongs to a humongous region starting with a humongous start region. 892 ShenandoahHeapRegion* region = _ordered_regions->get(index --); 893 894 LogTarget(Trace, gc, humongous) lt; 895 if (lt.is_enabled()) { 896 ResourceMark rm; 897 LogStream ls(lt); 898 region->print_on(&ls); 899 } 900 901 assert(region->is_humongous(), "expect correct humongous start or continuation"); 902 assert(!in_collection_set(region), "Humongous region should not be in collection set"); 903 904 region->make_trash(); 905 } 906 return required_regions; 907 } 908 909 #ifdef ASSERT 910 class ShenandoahCheckCollectionSetClosure: public ShenandoahHeapRegionClosure { 911 bool heap_region_do(ShenandoahHeapRegion* r) { 912 assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now"); 913 return false; 914 } 915 }; 916 #endif 917 918 void ShenandoahHeap::prepare_for_concurrent_evacuation() { 919 assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!"); 920 921 log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id()); 922 923 if (!cancelled_concgc()) { 924 // Allocations might have happened before we STWed here, record peak: 925 shenandoahPolicy()->record_peak_occupancy(); 926 927 ensure_parsability(true); 928 929 if (ShenandoahVerify) { 930 verifier()->verify_after_concmark(); 931 } 932 933 trash_cset_regions(); 934 935 // NOTE: This needs to be done during a stop the world pause, because 936 // putting regions into the collection set concurrently with Java threads 937 // will create a race. In particular, acmp could fail because when we 938 // resolve the first operand, the containing region might not yet be in 939 // the collection set, and thus return the original oop. When the 2nd 940 // operand gets resolved, the region could be in the collection set 941 // and the oop gets evacuated. If both operands have originally been 942 // the same, we get false negatives. 943 944 { 945 ShenandoahHeapLocker locker(lock()); 946 _collection_set->clear(); 947 _free_regions->clear(); 948 949 #ifdef ASSERT 950 ShenandoahCheckCollectionSetClosure ccsc; 951 _ordered_regions->heap_region_iterate(&ccsc); 952 #endif 953 954 _shenandoah_policy->choose_collection_set(_collection_set); 955 956 _shenandoah_policy->choose_free_set(_free_regions); 957 } 958 959 _bytes_allocated_since_cm = 0; 960 961 Universe::update_heap_info_at_gc(); 962 963 if (ShenandoahVerify) { 964 verifier()->verify_before_evacuation(); 965 } 966 } 967 } 968 969 970 class ShenandoahRetireTLABClosure : public ThreadClosure { 971 private: 972 bool _retire; 973 974 public: 975 ShenandoahRetireTLABClosure(bool retire) : _retire(retire) {} 976 977 void do_thread(Thread* thread) { 978 assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name()); 979 thread->gclab().make_parsable(_retire); 980 } 981 }; 982 983 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) { 984 if (UseTLAB) { 985 CollectedHeap::ensure_parsability(retire_tlabs); 986 ShenandoahRetireTLABClosure cl(retire_tlabs); 987 Threads::java_threads_do(&cl); 988 gc_threads_do(&cl); 989 } 990 } 991 992 993 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask { 994 ShenandoahRootEvacuator* _rp; 995 public: 996 997 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) : 998 AbstractGangTask("Shenandoah evacuate and update roots"), 999 _rp(rp) 1000 { 1001 // Nothing else to do. 1002 } 1003 1004 void work(uint worker_id) { 1005 ShenandoahEvacuateUpdateRootsClosure cl; 1006 1007 if (ShenandoahConcurrentEvacCodeRoots) { 1008 _rp->process_evacuate_roots(&cl, NULL, worker_id); 1009 } else { 1010 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations); 1011 _rp->process_evacuate_roots(&cl, &blobsCl, worker_id); 1012 } 1013 } 1014 }; 1015 1016 class ShenandoahFixRootsTask : public AbstractGangTask { 1017 ShenandoahRootEvacuator* _rp; 1018 public: 1019 1020 ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) : 1021 AbstractGangTask("Shenandoah update roots"), 1022 _rp(rp) 1023 { 1024 // Nothing else to do. 1025 } 1026 1027 void work(uint worker_id) { 1028 ShenandoahUpdateRefsClosure cl; 1029 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations); 1030 1031 _rp->process_evacuate_roots(&cl, &blobsCl, worker_id); 1032 } 1033 }; 1034 1035 void ShenandoahHeap::evacuate_and_update_roots() { 1036 1037 #if defined(COMPILER2) || INCLUDE_JVMCI 1038 DerivedPointerTable::clear(); 1039 #endif 1040 assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped"); 1041 1042 { 1043 ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac); 1044 ShenandoahEvacuateUpdateRootsTask roots_task(&rp); 1045 workers()->run_task(&roots_task); 1046 } 1047 1048 #if defined(COMPILER2) || INCLUDE_JVMCI 1049 DerivedPointerTable::update_pointers(); 1050 #endif 1051 if (cancelled_concgc()) { 1052 fixup_roots(); 1053 } 1054 } 1055 1056 1057 void ShenandoahHeap::fixup_roots() { 1058 assert(cancelled_concgc(), "Only after concurrent cycle failed"); 1059 1060 // If initial evacuation has been cancelled, we need to update all references 1061 // after all workers have finished. Otherwise we might run into the following problem: 1062 // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X. 1063 // GC thread 2 evacuates the same object X to to-space 1064 // which leaves a truly dangling from-space reference in the first root oop*. This must not happen. 1065 // clear() and update_pointers() must always be called in pairs, 1066 // cannot nest with above clear()/update_pointers(). 1067 #if defined(COMPILER2) || INCLUDE_JVMCI 1068 DerivedPointerTable::clear(); 1069 #endif 1070 ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac); 1071 ShenandoahFixRootsTask update_roots_task(&rp); 1072 workers()->run_task(&update_roots_task); 1073 #if defined(COMPILER2) || INCLUDE_JVMCI 1074 DerivedPointerTable::update_pointers(); 1075 #endif 1076 } 1077 1078 void ShenandoahHeap::do_evacuation() { 1079 ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac); 1080 1081 LogTarget(Trace, gc, region) lt_region; 1082 LogTarget(Trace, gc, cset) lt_cset; 1083 1084 if (lt_region.is_enabled()) { 1085 ResourceMark rm; 1086 LogStream ls(lt_region); 1087 ls.print_cr("All available regions:"); 1088 print_heap_regions_on(&ls); 1089 } 1090 1091 if (lt_cset.is_enabled()) { 1092 ResourceMark rm; 1093 LogStream ls(lt_cset); 1094 ls.print_cr("Collection set ("SIZE_FORMAT" regions):", _collection_set->count()); 1095 _collection_set->print_on(&ls); 1096 1097 ls.print_cr("Free set:"); 1098 _free_regions->print_on(&ls); 1099 } 1100 1101 ShenandoahParallelEvacuationTask task(this, _collection_set); 1102 workers()->run_task(&task); 1103 1104 if (lt_cset.is_enabled()) { 1105 ResourceMark rm; 1106 LogStream ls(lt_cset); 1107 ls.print_cr("After evacuation collection set ("SIZE_FORMAT" regions):", 1108 _collection_set->count()); 1109 _collection_set->print_on(&ls); 1110 1111 ls.print_cr("After evacuation free set:"); 1112 _free_regions->print_on(&ls); 1113 } 1114 1115 if (lt_region.is_enabled()) { 1116 ResourceMark rm; 1117 LogStream ls(lt_region); 1118 ls.print_cr("All regions after evacuation:"); 1119 print_heap_regions_on(&ls); 1120 } 1121 } 1122 1123 void ShenandoahHeap::roots_iterate(OopClosure* cl) { 1124 assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped"); 1125 1126 CodeBlobToOopClosure blobsCl(cl, false); 1127 CLDToOopClosure cldCl(cl); 1128 1129 ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases); 1130 rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0); 1131 } 1132 1133 bool ShenandoahHeap::supports_tlab_allocation() const { 1134 return true; 1135 } 1136 1137 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const { 1138 return MIN2(_free_regions->unsafe_peek_free(), max_tlab_size()); 1139 } 1140 1141 size_t ShenandoahHeap::max_tlab_size() const { 1142 return ShenandoahHeapRegion::max_tlab_size_bytes(); 1143 } 1144 1145 class ShenandoahResizeGCLABClosure : public ThreadClosure { 1146 public: 1147 void do_thread(Thread* thread) { 1148 assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name()); 1149 thread->gclab().resize(); 1150 } 1151 }; 1152 1153 void ShenandoahHeap::resize_all_tlabs() { 1154 CollectedHeap::resize_all_tlabs(); 1155 1156 ShenandoahResizeGCLABClosure cl; 1157 Threads::java_threads_do(&cl); 1158 gc_threads_do(&cl); 1159 } 1160 1161 class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure { 1162 public: 1163 void do_thread(Thread* thread) { 1164 assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name()); 1165 thread->gclab().accumulate_statistics(); 1166 thread->gclab().initialize_statistics(); 1167 } 1168 }; 1169 1170 void ShenandoahHeap::accumulate_statistics_all_gclabs() { 1171 ShenandoahAccumulateStatisticsGCLABClosure cl; 1172 Threads::java_threads_do(&cl); 1173 gc_threads_do(&cl); 1174 } 1175 1176 bool ShenandoahHeap::can_elide_tlab_store_barriers() const { 1177 return true; 1178 } 1179 1180 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { 1181 // Overridden to do nothing. 1182 return new_obj; 1183 } 1184 1185 bool ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) { 1186 return true; 1187 } 1188 1189 bool ShenandoahHeap::card_mark_must_follow_store() const { 1190 return false; 1191 } 1192 1193 void ShenandoahHeap::collect(GCCause::Cause cause) { 1194 assert(cause != GCCause::_gc_locker, "no JNI critical callback"); 1195 if (GCCause::is_user_requested_gc(cause)) { 1196 if (!DisableExplicitGC) { 1197 if (ExplicitGCInvokesConcurrent) { 1198 _concurrent_gc_thread->do_conc_gc(); 1199 } else { 1200 _concurrent_gc_thread->do_full_gc(cause); 1201 } 1202 } 1203 } else if (cause == GCCause::_allocation_failure) { 1204 collector_policy()->set_should_clear_all_soft_refs(true); 1205 _concurrent_gc_thread->do_full_gc(cause); 1206 } 1207 } 1208 1209 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) { 1210 //assert(false, "Shouldn't need to do full collections"); 1211 } 1212 1213 AdaptiveSizePolicy* ShenandoahHeap::size_policy() { 1214 Unimplemented(); 1215 return NULL; 1216 1217 } 1218 1219 CollectorPolicy* ShenandoahHeap::collector_policy() const { 1220 return _shenandoah_policy; 1221 } 1222 1223 1224 HeapWord* ShenandoahHeap::block_start(const void* addr) const { 1225 Space* sp = heap_region_containing(addr); 1226 if (sp != NULL) { 1227 return sp->block_start(addr); 1228 } 1229 return NULL; 1230 } 1231 1232 size_t ShenandoahHeap::block_size(const HeapWord* addr) const { 1233 Space* sp = heap_region_containing(addr); 1234 assert(sp != NULL, "block_size of address outside of heap"); 1235 return sp->block_size(addr); 1236 } 1237 1238 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const { 1239 Space* sp = heap_region_containing(addr); 1240 return sp->block_is_obj(addr); 1241 } 1242 1243 jlong ShenandoahHeap::millis_since_last_gc() { 1244 return 0; 1245 } 1246 1247 void ShenandoahHeap::prepare_for_verify() { 1248 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 1249 ensure_parsability(false); 1250 } 1251 } 1252 1253 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const { 1254 workers()->print_worker_threads_on(st); 1255 } 1256 1257 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const { 1258 workers()->threads_do(tcl); 1259 } 1260 1261 void ShenandoahHeap::print_tracing_info() const { 1262 LogTarget(Info, gc, stats) lt; 1263 if (lt.is_enabled()) { 1264 ResourceMark rm; 1265 LogStream ls(lt); 1266 1267 phase_timings()->print_on(&ls); 1268 1269 ls.cr(); 1270 ls.cr(); 1271 1272 shenandoahPolicy()->print_gc_stats(&ls); 1273 1274 ls.cr(); 1275 ls.cr(); 1276 1277 if (ShenandoahAllocationTrace) { 1278 assert(alloc_tracker() != NULL, "Must be"); 1279 alloc_tracker()->print_on(&ls); 1280 } else { 1281 ls.print_cr(" Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable."); 1282 } 1283 } 1284 } 1285 1286 void ShenandoahHeap::verify(VerifyOption vo) { 1287 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 1288 if (ShenandoahVerify) { 1289 verifier()->verify_generic(vo); 1290 } else { 1291 // TODO: Consider allocating verification bitmaps on demand, 1292 // and turn this on unconditionally. 1293 } 1294 } 1295 } 1296 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const { 1297 return _free_regions->capacity(); 1298 } 1299 1300 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure { 1301 ObjectClosure* _cl; 1302 public: 1303 ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} 1304 bool heap_region_do(ShenandoahHeapRegion* r) { 1305 ShenandoahHeap::heap()->marked_object_iterate(r, _cl); 1306 return false; 1307 } 1308 }; 1309 1310 void ShenandoahHeap::object_iterate(ObjectClosure* cl) { 1311 ShenandoahIterateObjectClosureRegionClosure blk(cl); 1312 heap_region_iterate(&blk, false, true); 1313 } 1314 1315 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure { 1316 private: 1317 ShenandoahHeap* _heap; 1318 1319 public: 1320 ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {} 1321 1322 private: 1323 template <class T> 1324 inline void do_oop_work(T* p) { 1325 T o = oopDesc::load_heap_oop(p); 1326 if (!oopDesc::is_null(o)) { 1327 oop obj = oopDesc::decode_heap_oop_not_null(o); 1328 oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj)); 1329 } 1330 } 1331 public: 1332 void do_oop(oop* p) { 1333 do_oop_work(p); 1334 } 1335 void do_oop(narrowOop* p) { 1336 do_oop_work(p); 1337 } 1338 }; 1339 1340 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure { 1341 private: 1342 ObjectClosure* _cl; 1343 public: 1344 ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {} 1345 1346 virtual void do_object(oop obj) { 1347 assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)), 1348 "avoid double-counting: only non-forwarded objects here"); 1349 1350 // Fix up the ptrs. 1351 ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs; 1352 obj->oop_iterate(&adjust_ptrs); 1353 1354 // Can reply the object now: 1355 _cl->do_object(obj); 1356 } 1357 }; 1358 1359 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) { 1360 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints"); 1361 1362 // Safe iteration does objects only with correct references. 1363 // This is why we skip collection set regions that have stale copies of objects, 1364 // and fix up the pointers in the returned objects. 1365 1366 ShenandoahSafeObjectIterateAndUpdate safe_cl(cl); 1367 ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl); 1368 heap_region_iterate(&blk, 1369 /* skip_cset_regions = */ true, 1370 /* skip_humongous_continuations = */ true); 1371 1372 _need_update_refs = false; // already updated the references 1373 } 1374 1375 // Apply blk->heap_region_do() on all committed regions in address order, 1376 // terminating the iteration early if heap_region_do() returns true. 1377 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions, bool skip_humongous_continuation) const { 1378 for (size_t i = 0; i < num_regions(); i++) { 1379 ShenandoahHeapRegion* current = _ordered_regions->get(i); 1380 if (skip_humongous_continuation && current->is_humongous_continuation()) { 1381 continue; 1382 } 1383 if (skip_cset_regions && in_collection_set(current)) { 1384 continue; 1385 } 1386 if (blk->heap_region_do(current)) { 1387 return; 1388 } 1389 } 1390 } 1391 1392 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure { 1393 private: 1394 ShenandoahHeap* sh; 1395 public: 1396 ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {} 1397 1398 bool heap_region_do(ShenandoahHeapRegion* r) { 1399 r->clear_live_data(); 1400 sh->set_top_at_mark_start(r->bottom(), r->top()); 1401 return false; 1402 } 1403 }; 1404 1405 void ShenandoahHeap::start_concurrent_marking() { 1406 if (ShenandoahVerify) { 1407 verifier()->verify_before_concmark(); 1408 } 1409 1410 { 1411 ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats); 1412 accumulate_statistics_all_tlabs(); 1413 } 1414 1415 set_concurrent_mark_in_progress(true); 1416 // We need to reset all TLABs because we'd lose marks on all objects allocated in them. 1417 if (UseTLAB) { 1418 ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable); 1419 ensure_parsability(true); 1420 } 1421 1422 _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm); 1423 _used_start_gc = used(); 1424 1425 { 1426 ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness); 1427 ShenandoahClearLivenessClosure clc(this); 1428 heap_region_iterate(&clc); 1429 } 1430 1431 // Make above changes visible to worker threads 1432 OrderAccess::fence(); 1433 1434 concurrentMark()->init_mark_roots(); 1435 1436 if (UseTLAB) { 1437 ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs); 1438 resize_all_tlabs(); 1439 } 1440 } 1441 1442 void ShenandoahHeap::stop_concurrent_marking() { 1443 assert(concurrent_mark_in_progress(), "How else could we get here?"); 1444 if (! cancelled_concgc()) { 1445 // If we needed to update refs, and concurrent marking has been cancelled, 1446 // we need to finish updating references. 1447 set_need_update_refs(false); 1448 } 1449 set_concurrent_mark_in_progress(false); 1450 1451 LogTarget(Trace, gc, region) lt; 1452 if (lt.is_enabled()) { 1453 ResourceMark rm; 1454 LogStream ls(lt); 1455 ls.print_cr("Regions at stopping the concurrent mark:"); 1456 print_heap_regions_on(&ls); 1457 } 1458 } 1459 1460 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) { 1461 _concurrent_mark_in_progress = in_progress ? 1 : 0; 1462 JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress); 1463 } 1464 1465 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) { 1466 // Note: it is important to first release the _evacuation_in_progress flag here, 1467 // so that Java threads can get out of oom_during_evacuation() and reach a safepoint, 1468 // in case a VM task is pending. 1469 set_evacuation_in_progress(in_progress); 1470 MutexLocker mu(Threads_lock); 1471 JavaThread::set_evacuation_in_progress_all_threads(in_progress); 1472 } 1473 1474 void ShenandoahHeap::set_evacuation_in_progress_at_safepoint(bool in_progress) { 1475 assert(SafepointSynchronize::is_at_safepoint(), "Only call this at safepoint"); 1476 set_evacuation_in_progress(in_progress); 1477 JavaThread::set_evacuation_in_progress_all_threads(in_progress); 1478 } 1479 1480 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) { 1481 _evacuation_in_progress = in_progress ? 1 : 0; 1482 OrderAccess::fence(); 1483 } 1484 1485 void ShenandoahHeap::oom_during_evacuation() { 1486 log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d", 1487 Thread::current()->osthread()->thread_id()); 1488 1489 // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC. 1490 collector_policy()->set_should_clear_all_soft_refs(true); 1491 concurrent_thread()->try_set_full_gc(); 1492 cancel_concgc(_oom_evacuation); 1493 1494 if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) { 1495 assert(! Threads_lock->owned_by_self() 1496 || SafepointSynchronize::is_at_safepoint(), "must not hold Threads_lock here"); 1497 log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes."); 1498 while (_evacuation_in_progress) { // wait. 1499 Thread::current()->_ParkEvent->park(1); 1500 } 1501 } 1502 1503 } 1504 1505 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) { 1506 // Initialize Brooks pointer for the next object 1507 HeapWord* result = obj + BrooksPointer::word_size(); 1508 BrooksPointer::initialize(oop(result)); 1509 return result; 1510 } 1511 1512 uint ShenandoahHeap::oop_extra_words() { 1513 return BrooksPointer::word_size(); 1514 } 1515 1516 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() : 1517 _heap(ShenandoahHeap::heap_no_check()) { 1518 } 1519 1520 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) { 1521 assert(_heap != NULL, "sanity"); 1522 obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj); 1523 #ifdef ASSERT 1524 if (_heap->concurrent_mark_in_progress()) { 1525 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space"); 1526 } 1527 #endif 1528 assert(!oopDesc::is_null(obj), "null"); 1529 return _heap->is_marked(obj); 1530 } 1531 1532 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() : 1533 _heap(ShenandoahHeap::heap_no_check()) { 1534 } 1535 1536 bool ShenandoahIsAliveClosure::do_object_b(oop obj) { 1537 assert(_heap != NULL, "sanity"); 1538 assert(!oopDesc::is_null(obj), "null"); 1539 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space"); 1540 return _heap->is_marked(obj); 1541 } 1542 1543 BoolObjectClosure* ShenandoahHeap::is_alive_closure() { 1544 return need_update_refs() ? 1545 (BoolObjectClosure*) &_forwarded_is_alive : 1546 (BoolObjectClosure*) &_is_alive; 1547 } 1548 1549 void ShenandoahHeap::ref_processing_init() { 1550 MemRegion mr = reserved_region(); 1551 1552 _forwarded_is_alive.init(ShenandoahHeap::heap()); 1553 _is_alive.init(ShenandoahHeap::heap()); 1554 assert(_max_workers > 0, "Sanity"); 1555 1556 _ref_processor = 1557 new ReferenceProcessor(mr, // span 1558 ParallelRefProcEnabled, // MT processing 1559 _max_workers, // Degree of MT processing 1560 true, // MT discovery 1561 _max_workers, // Degree of MT discovery 1562 false, // Reference discovery is not atomic 1563 &_forwarded_is_alive); // Pessimistically assume "forwarded" 1564 } 1565 1566 1567 GCTracer* ShenandoahHeap::tracer() { 1568 return shenandoahPolicy()->tracer(); 1569 } 1570 1571 size_t ShenandoahHeap::tlab_used(Thread* thread) const { 1572 return _free_regions->used(); 1573 } 1574 1575 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) { 1576 if (try_cancel_concgc()) { 1577 log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause)); 1578 _shenandoah_policy->report_concgc_cancelled(); 1579 } 1580 } 1581 1582 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) { 1583 if (try_cancel_concgc()) { 1584 log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause)); 1585 _shenandoah_policy->report_concgc_cancelled(); 1586 } 1587 } 1588 1589 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) { 1590 switch (cause) { 1591 case _oom_evacuation: 1592 return "Out of memory for evacuation"; 1593 case _vm_stop: 1594 return "Stopping VM"; 1595 default: 1596 return "Unknown"; 1597 } 1598 } 1599 1600 uint ShenandoahHeap::max_workers() { 1601 return _max_workers; 1602 } 1603 1604 void ShenandoahHeap::stop() { 1605 // The shutdown sequence should be able to terminate when GC is running. 1606 1607 // Step 0. Notify policy to disable event recording. 1608 _shenandoah_policy->record_shutdown(); 1609 1610 // Step 1. Notify control thread that we are in shutdown. 1611 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown. 1612 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below. 1613 _concurrent_gc_thread->prepare_for_graceful_shutdown(); 1614 1615 // Step 2. Notify GC workers that we are cancelling GC. 1616 cancel_concgc(_vm_stop); 1617 1618 // Step 3. Wait until GC worker exits normally. 1619 _concurrent_gc_thread->stop(); 1620 } 1621 1622 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) { 1623 ShenandoahPhaseTimings::Phase phase_root = 1624 full_gc ? 1625 ShenandoahPhaseTimings::full_gc_purge : 1626 ShenandoahPhaseTimings::purge; 1627 1628 ShenandoahPhaseTimings::Phase phase_unload = 1629 full_gc ? 1630 ShenandoahPhaseTimings::full_gc_purge_class_unload : 1631 ShenandoahPhaseTimings::purge_class_unload; 1632 1633 ShenandoahPhaseTimings::Phase phase_cldg = 1634 full_gc ? 1635 ShenandoahPhaseTimings::full_gc_purge_cldg : 1636 ShenandoahPhaseTimings::purge_cldg; 1637 1638 ShenandoahPhaseTimings::Phase phase_par = 1639 full_gc ? 1640 ShenandoahPhaseTimings::full_gc_purge_par : 1641 ShenandoahPhaseTimings::purge_par; 1642 1643 ShenandoahPhaseTimings::Phase phase_par_classes = 1644 full_gc ? 1645 ShenandoahPhaseTimings::full_gc_purge_par_classes : 1646 ShenandoahPhaseTimings::purge_par_classes; 1647 1648 ShenandoahPhaseTimings::Phase phase_par_codecache = 1649 full_gc ? 1650 ShenandoahPhaseTimings::full_gc_purge_par_codecache : 1651 ShenandoahPhaseTimings::purge_par_codecache; 1652 1653 ShenandoahPhaseTimings::Phase phase_par_rmt = 1654 full_gc ? 1655 ShenandoahPhaseTimings::full_gc_purge_par_rmt : 1656 ShenandoahPhaseTimings::purge_par_rmt; 1657 1658 ShenandoahPhaseTimings::Phase phase_par_symbstring = 1659 full_gc ? 1660 ShenandoahPhaseTimings::full_gc_purge_par_symbstring : 1661 ShenandoahPhaseTimings::purge_par_symbstring; 1662 1663 ShenandoahPhaseTimings::Phase phase_par_sync = 1664 full_gc ? 1665 ShenandoahPhaseTimings::full_gc_purge_par_sync : 1666 ShenandoahPhaseTimings::purge_par_sync; 1667 1668 ShenandoahGCPhase root_phase(phase_root); 1669 1670 BoolObjectClosure* is_alive = is_alive_closure(); 1671 1672 bool purged_class; 1673 1674 // Unload classes and purge SystemDictionary. 1675 { 1676 ShenandoahGCPhase phase(phase_unload); 1677 purged_class = SystemDictionary::do_unloading(is_alive, 1678 full_gc ? ShenandoahMarkCompact::gc_timer() : gc_timer(), 1679 true); 1680 } 1681 1682 { 1683 ShenandoahGCPhase phase(phase_par); 1684 uint active = _workers->active_workers(); 1685 ParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class); 1686 _workers->run_task(&unlink_task); 1687 1688 ShenandoahPhaseTimings* p = ShenandoahHeap::heap()->phase_timings(); 1689 ParallelCleaningTimes times = unlink_task.times(); 1690 1691 // "times" report total time, phase_tables_cc reports wall time. Divide total times 1692 // by active workers to get average time per worker, that would add up to wall time. 1693 p->record_phase_time(phase_par_classes, times.klass_work_us() / active); 1694 p->record_phase_time(phase_par_codecache, times.codecache_work_us() / active); 1695 p->record_phase_time(phase_par_rmt, times.rmt_work_us() / active); 1696 p->record_phase_time(phase_par_symbstring, times.tables_work_us() / active); 1697 p->record_phase_time(phase_par_sync, times.sync_us() / active); 1698 } 1699 1700 { 1701 ShenandoahGCPhase phase(phase_cldg); 1702 ClassLoaderDataGraph::purge(); 1703 } 1704 } 1705 1706 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) { 1707 _need_update_refs = need_update_refs; 1708 } 1709 1710 //fixme this should be in heapregionset 1711 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) { 1712 size_t region_idx = r->region_number() + 1; 1713 ShenandoahHeapRegion* next = _ordered_regions->get(region_idx); 1714 guarantee(next->region_number() == region_idx, "region number must match"); 1715 while (next->is_humongous()) { 1716 region_idx = next->region_number() + 1; 1717 next = _ordered_regions->get(region_idx); 1718 guarantee(next->region_number() == region_idx, "region number must match"); 1719 } 1720 return next; 1721 } 1722 1723 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() { 1724 return _monitoring_support; 1725 } 1726 1727 MarkBitMap* ShenandoahHeap::mark_bit_map() { 1728 return _mark_bit_map; 1729 } 1730 1731 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) { 1732 _free_regions->add_region(r); 1733 } 1734 1735 void ShenandoahHeap::clear_free_regions() { 1736 _free_regions->clear(); 1737 } 1738 1739 address ShenandoahHeap::in_cset_fast_test_addr() { 1740 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1741 assert(heap->collection_set() != NULL, "Sanity"); 1742 return (address) heap->collection_set()->biased_map_address(); 1743 } 1744 1745 address ShenandoahHeap::cancelled_concgc_addr() { 1746 return (address) &(ShenandoahHeap::heap()->_cancelled_concgc); 1747 } 1748 1749 1750 size_t ShenandoahHeap::conservative_max_heap_alignment() { 1751 return ShenandoahMaxRegionSize; 1752 } 1753 1754 size_t ShenandoahHeap::bytes_allocated_since_cm() { 1755 return _bytes_allocated_since_cm; 1756 } 1757 1758 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) { 1759 _bytes_allocated_since_cm = bytes; 1760 } 1761 1762 void ShenandoahHeap::set_top_at_mark_start(HeapWord* region_base, HeapWord* addr) { 1763 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift(); 1764 _top_at_mark_starts[index] = addr; 1765 } 1766 1767 HeapWord* ShenandoahHeap::top_at_mark_start(HeapWord* region_base) { 1768 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift(); 1769 return _top_at_mark_starts[index]; 1770 } 1771 1772 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) { 1773 _full_gc_in_progress = in_progress; 1774 } 1775 1776 bool ShenandoahHeap::is_full_gc_in_progress() const { 1777 return _full_gc_in_progress; 1778 } 1779 1780 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) { 1781 _update_refs_in_progress = in_progress; 1782 } 1783 1784 bool ShenandoahHeap::is_update_refs_in_progress() const { 1785 return _update_refs_in_progress; 1786 } 1787 1788 void ShenandoahHeap::register_nmethod(nmethod* nm) { 1789 ShenandoahCodeRoots::add_nmethod(nm); 1790 } 1791 1792 void ShenandoahHeap::unregister_nmethod(nmethod* nm) { 1793 ShenandoahCodeRoots::remove_nmethod(nm); 1794 } 1795 1796 void ShenandoahHeap::pin_object(oop o) { 1797 ShenandoahHeapLocker locker(lock()); 1798 heap_region_containing(o)->make_pinned(); 1799 } 1800 1801 void ShenandoahHeap::unpin_object(oop o) { 1802 ShenandoahHeapLocker locker(lock()); 1803 heap_region_containing(o)->make_unpinned(); 1804 } 1805 1806 GCTimer* ShenandoahHeap::gc_timer() const { 1807 return _gc_timer; 1808 } 1809 1810 #ifdef ASSERT 1811 void ShenandoahHeap::assert_gc_workers(uint nworkers) { 1812 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity"); 1813 1814 if (SafepointSynchronize::is_at_safepoint()) { 1815 if (UseDynamicNumberOfGCThreads || 1816 (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) { 1817 assert(nworkers <= ParallelGCThreads, "Cannot use more than it has"); 1818 } else { 1819 // Use ParallelGCThreads inside safepoints 1820 assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints"); 1821 } 1822 } else { 1823 if (UseDynamicNumberOfGCThreads || 1824 (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) { 1825 assert(nworkers <= ConcGCThreads, "Cannot use more than it has"); 1826 } else { 1827 // Use ConcGCThreads outside safepoints 1828 assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints"); 1829 } 1830 } 1831 } 1832 #endif 1833 1834 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure { 1835 private: 1836 size_t _garbage; 1837 public: 1838 ShenandoahCountGarbageClosure() : _garbage(0) { 1839 } 1840 1841 bool heap_region_do(ShenandoahHeapRegion* r) { 1842 if (r->is_regular()) { 1843 _garbage += r->garbage(); 1844 } 1845 return false; 1846 } 1847 1848 size_t garbage() { 1849 return _garbage; 1850 } 1851 }; 1852 1853 size_t ShenandoahHeap::garbage() { 1854 ShenandoahCountGarbageClosure cl; 1855 heap_region_iterate(&cl); 1856 return cl.garbage(); 1857 } 1858 1859 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() const { 1860 return _connection_matrix; 1861 } 1862 1863 ShenandoahPartialGC* ShenandoahHeap::partial_gc() { 1864 return _partial_gc; 1865 } 1866 1867 void ShenandoahHeap::do_partial_collection() { 1868 partial_gc()->do_partial_collection(); 1869 } 1870 1871 ShenandoahVerifier* ShenandoahHeap::verifier() { 1872 guarantee(ShenandoahVerify, "Should be enabled"); 1873 assert (_verifier != NULL, "sanity"); 1874 return _verifier; 1875 } 1876 1877 template<class T> 1878 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask { 1879 private: 1880 T cl; 1881 ShenandoahHeap* _heap; 1882 ShenandoahHeapRegionSet* _regions; 1883 bool _concurrent; 1884 public: 1885 ShenandoahUpdateHeapRefsTask(ShenandoahHeapRegionSet* regions, bool concurrent) : 1886 AbstractGangTask("Concurrent Update References Task"), 1887 cl(T()), 1888 _heap(ShenandoahHeap::heap()), 1889 _regions(regions), 1890 _concurrent(concurrent) { 1891 } 1892 1893 void work(uint worker_id) { 1894 SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers); 1895 ShenandoahHeapRegion* r = _regions->claim_next(); 1896 while (r != NULL) { 1897 if (_heap->in_collection_set(r)) { 1898 HeapWord* bottom = r->bottom(); 1899 HeapWord* top = _heap->top_at_mark_start(r->bottom()); 1900 if (top > bottom) { 1901 _heap->mark_bit_map()->clear_range_large(MemRegion(bottom, top)); 1902 } 1903 } else { 1904 if (r->is_active()) { 1905 _heap->marked_object_oop_safe_iterate(r, &cl); 1906 } 1907 } 1908 if (_heap->check_cancelled_concgc_and_yield(_concurrent)) { 1909 return; 1910 } 1911 r = _regions->claim_next(); 1912 } 1913 } 1914 }; 1915 1916 void ShenandoahHeap::update_heap_references(ShenandoahHeapRegionSet* update_regions, bool concurrent) { 1917 if (UseShenandoahMatrix) { 1918 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(update_regions, concurrent); 1919 workers()->run_task(&task); 1920 } else { 1921 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(update_regions, concurrent); 1922 workers()->run_task(&task); 1923 } 1924 } 1925 1926 void ShenandoahHeap::concurrent_update_heap_references() { 1927 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs); 1928 ShenandoahHeapRegionSet* update_regions = regions(); 1929 update_regions->clear_current_index(); 1930 update_heap_references(update_regions, true); 1931 } 1932 1933 void ShenandoahHeap::prepare_update_refs() { 1934 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1935 1936 if (ShenandoahVerify) { 1937 verifier()->verify_before_updaterefs(); 1938 } 1939 1940 set_evacuation_in_progress_at_safepoint(false); 1941 set_update_refs_in_progress(true); 1942 ensure_parsability(true); 1943 if (UseShenandoahMatrix) { 1944 connection_matrix()->clear_all(); 1945 } 1946 for (uint i = 0; i < num_regions(); i++) { 1947 ShenandoahHeapRegion* r = _ordered_regions->get(i); 1948 r->set_concurrent_iteration_safe_limit(r->top()); 1949 } 1950 } 1951 1952 void ShenandoahHeap::finish_update_refs() { 1953 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1954 1955 if (cancelled_concgc()) { 1956 ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work); 1957 1958 // Finish updating references where we left off. 1959 clear_cancelled_concgc(); 1960 ShenandoahHeapRegionSet* update_regions = regions(); 1961 update_heap_references(update_regions, false); 1962 } 1963 1964 assert(! cancelled_concgc(), "Should have been done right before"); 1965 concurrentMark()->update_roots(ShenandoahPhaseTimings::final_update_refs_roots); 1966 1967 if (ShenandoahStringDedup::is_enabled()) { 1968 ShenandoahGCPhase final_str_dedup_table(ShenandoahPhaseTimings::final_update_refs_dedup_table); 1969 ShenandoahStringDedup::parallel_update_refs(); 1970 } 1971 1972 // Allocations might have happened before we STWed here, record peak: 1973 shenandoahPolicy()->record_peak_occupancy(); 1974 1975 ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle); 1976 1977 trash_cset_regions(); 1978 set_need_update_refs(false); 1979 1980 if (ShenandoahVerify) { 1981 verifier()->verify_after_updaterefs(); 1982 } 1983 1984 { 1985 // Rebuild the free set 1986 ShenandoahHeapLocker locker(lock()); 1987 _free_regions->clear(); 1988 size_t end = _ordered_regions->active_regions(); 1989 for (size_t i = 0; i < end; i++) { 1990 ShenandoahHeapRegion* r = _ordered_regions->get(i); 1991 if (r->is_alloc_allowed()) { 1992 assert (!in_collection_set(r), "collection set should be clear"); 1993 _free_regions->add_region(r); 1994 } 1995 } 1996 } 1997 set_update_refs_in_progress(false); 1998 } 1999 2000 void ShenandoahHeap::set_alloc_seq_gc_start() { 2001 // Take next number, the start seq number is inclusive 2002 _alloc_seq_at_last_gc_start = ShenandoahHeapRegion::alloc_seq_num() + 1; 2003 } 2004 2005 void ShenandoahHeap::set_alloc_seq_gc_end() { 2006 // Take current number, the end seq number is also inclusive 2007 _alloc_seq_at_last_gc_end = ShenandoahHeapRegion::alloc_seq_num(); 2008 } 2009 2010 2011 #ifdef ASSERT 2012 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() { 2013 _lock.assert_owned_by_current_thread(); 2014 } 2015 2016 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() { 2017 _lock.assert_not_owned_by_current_thread(); 2018 } 2019 2020 void ShenandoahHeap::assert_heaplock_or_safepoint() { 2021 _lock.assert_owned_by_current_thread_or_safepoint(); 2022 } 2023 #endif 2024 2025 void ShenandoahHeap::recycle_trash_assist(size_t limit) { 2026 assert_heaplock_owned_by_current_thread(); 2027 2028 size_t count = 0; 2029 for (size_t i = 0; (i < num_regions()) && (count < limit); i++) { 2030 ShenandoahHeapRegion *r = _ordered_regions->get(i); 2031 if (r->is_trash()) { 2032 decrease_used(r->used()); 2033 r->recycle(); 2034 _free_regions->add_region(r); 2035 count++; 2036 } 2037 } 2038 } 2039 2040 void ShenandoahHeap::recycle_trash() { 2041 // lock is not reentrable, check we don't have it 2042 assert_heaplock_not_owned_by_current_thread(); 2043 2044 size_t bytes_reclaimed = 0; 2045 2046 if (UseShenandoahMatrix) { 2047 // The complication for matrix cleanup is that we want the batched update 2048 // to alleviate costs. We also cannot add regions to freeset until matrix 2049 // is clean, otherwise we race with the actual allocations. 2050 2051 size_t count = 0; 2052 for (size_t i = 0; i < num_regions(); i++) { 2053 ShenandoahHeapRegion* r = _ordered_regions->get(i); 2054 if (r->is_trash()) { 2055 ShenandoahHeapLocker locker(lock()); 2056 if (r->is_trash()) { 2057 bytes_reclaimed += r->used(); 2058 decrease_used(r->used()); 2059 r->recycle_no_matrix(); 2060 _recycled_regions[count++] = r->region_number(); 2061 } 2062 } 2063 SpinPause(); // allow allocators to barge the lock 2064 } 2065 2066 connection_matrix()->clear_batched(_recycled_regions, count); 2067 2068 { 2069 ShenandoahHeapLocker locker(lock()); 2070 for (size_t i = 0; i < count; i++) { 2071 ShenandoahHeapRegion *r = _ordered_regions->get(_recycled_regions[i]); 2072 _free_regions->add_region(r); 2073 } 2074 } 2075 2076 } else { 2077 for (size_t i = 0; i < num_regions(); i++) { 2078 ShenandoahHeapRegion* r = _ordered_regions->get(i); 2079 if (r->is_trash()) { 2080 ShenandoahHeapLocker locker(lock()); 2081 if (r->is_trash()) { 2082 bytes_reclaimed += r->used(); 2083 decrease_used(r->used()); 2084 r->recycle(); 2085 _free_regions->add_region(r); 2086 } 2087 } 2088 SpinPause(); // allow allocators to barge the lock 2089 } 2090 } 2091 2092 _shenandoah_policy->record_bytes_reclaimed(bytes_reclaimed); 2093 } 2094 2095 void ShenandoahHeap::print_extended_on(outputStream *st) const { 2096 print_on(st); 2097 print_heap_regions_on(st); 2098 } 2099 2100 bool ShenandoahHeap::commit_bitmaps(ShenandoahHeapRegion* r) { 2101 size_t len = _bitmap_words_per_region * HeapWordSize; 2102 size_t off = r->region_number() * _bitmap_words_per_region; 2103 if (!os::commit_memory((char*)(_bitmap_region.start() + off), len, false)) { 2104 return false; 2105 } 2106 return true; 2107 } 2108 2109 bool ShenandoahHeap::uncommit_bitmaps(ShenandoahHeapRegion* r) { 2110 size_t len = _bitmap_words_per_region * HeapWordSize; 2111 size_t off = r->region_number() * _bitmap_words_per_region; 2112 if (!os::uncommit_memory((char*)(_bitmap_region.start() + off), len)) { 2113 return false; 2114 } 2115 return true; 2116 }