1 /* 2 * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "memory/allocation.hpp" 25 #include "gc/g1/heapRegionBounds.inline.hpp" 26 27 #include "gc/shared/gcTimer.hpp" 28 #include "gc/shared/gcTraceTime.inline.hpp" 29 #include "gc/shared/parallelCleaning.hpp" 30 31 #include "gc/shenandoah/brooksPointer.hpp" 32 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 33 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 35 #include "gc/shenandoah/shenandoahConcurrentMark.hpp" 36 #include "gc/shenandoah/shenandoahConcurrentThread.hpp" 37 #include "gc/shenandoah/shenandoahFreeSet.hpp" 38 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 39 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 41 #include "gc/shenandoah/shenandoahHumongous.hpp" 42 #include "gc/shenandoah/shenandoahMarkCompact.hpp" 43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 44 #include "gc/shenandoah/shenandoahRootProcessor.hpp" 45 #include "gc/shenandoah/vm_operations_shenandoah.hpp" 46 47 #include "runtime/vmThread.hpp" 48 #include "services/mallocTracker.hpp" 49 50 const char* ShenandoahHeap::name() const { 51 return "Shenandoah"; 52 } 53 54 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) { 55 HeapWord* cur = NULL; 56 for (cur = start; cur < end; cur++) { 57 tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur))); 58 } 59 } 60 61 class PrintHeapRegionsClosure : public 62 ShenandoahHeapRegionClosure { 63 private: 64 outputStream* _st; 65 public: 66 PrintHeapRegionsClosure() : _st(tty) {} 67 PrintHeapRegionsClosure(outputStream* st) : _st(st) {} 68 69 bool doHeapRegion(ShenandoahHeapRegion* r) { 70 r->print_on(_st); 71 return false; 72 } 73 }; 74 75 class ShenandoahPretouchTask : public AbstractGangTask { 76 private: 77 char* volatile _cur_addr; 78 char* const _start_addr; 79 char* const _end_addr; 80 size_t const _page_size; 81 public: 82 ShenandoahPretouchTask(char* start_address, char* end_address, size_t page_size) : 83 AbstractGangTask("Shenandoah PreTouch", 84 Universe::is_fully_initialized() ? GCId::current_raw() : 85 // During VM initialization there is 86 // no GC cycle that this task can be 87 // associated with. 88 GCId::undefined()), 89 _cur_addr(start_address), 90 _start_addr(start_address), 91 _end_addr(end_address), 92 _page_size(page_size) { 93 } 94 95 virtual void work(uint worker_id) { 96 size_t const actual_chunk_size = MAX2(PreTouchParallelChunkSize, _page_size); 97 while (true) { 98 char* touch_addr = (char*)Atomic::add_ptr((intptr_t)actual_chunk_size, (volatile void*) &_cur_addr) - actual_chunk_size; 99 if (touch_addr < _start_addr || touch_addr >= _end_addr) { 100 break; 101 } 102 char* end_addr = touch_addr + MIN2(actual_chunk_size, pointer_delta(_end_addr, touch_addr, sizeof(char))); 103 os::pretouch_memory(touch_addr, end_addr, _page_size); 104 } 105 } 106 }; 107 108 void ShenandoahHeap::pretouch_storage(char* start, char* end, WorkGang* workers) { 109 assert (ShenandoahAlwaysPreTouch, "Sanity"); 110 assert (!AlwaysPreTouch, "Should have been overridden"); 111 112 size_t size = (size_t)(end - start); 113 size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); 114 size_t num_chunks = MAX2((size_t)1, size / MAX2(PreTouchParallelChunkSize, page_size)); 115 uint num_workers = MIN2((uint)num_chunks, workers->active_workers()); 116 117 log_info(gc, heap)("Parallel pretouch with %u workers for " SIZE_FORMAT " work units pre-touching " SIZE_FORMAT " bytes.", 118 num_workers, num_chunks, size); 119 120 ShenandoahPretouchTask cl(start, end, page_size); 121 workers->run_task(&cl, num_workers); 122 } 123 124 jint ShenandoahHeap::initialize() { 125 CollectedHeap::pre_initialize(); 126 127 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); 128 size_t max_byte_size = collector_policy()->max_heap_byte_size(); 129 130 Universe::check_alignment(max_byte_size, 131 ShenandoahHeapRegion::RegionSizeBytes, 132 "shenandoah heap"); 133 Universe::check_alignment(init_byte_size, 134 ShenandoahHeapRegion::RegionSizeBytes, 135 "shenandoah heap"); 136 137 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, 138 Arguments::conservative_max_heap_alignment()); 139 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size())); 140 141 set_barrier_set(new ShenandoahBarrierSet(this)); 142 ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size); 143 _storage.initialize(pgc_rs, init_byte_size); 144 if (ShenandoahAlwaysPreTouch) { 145 pretouch_storage(_storage.low(), _storage.high(), _workers); 146 } 147 148 _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes; 149 _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes; 150 _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes; 151 size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize; 152 assert(init_byte_size == _initialSize, "tautology"); 153 _ordered_regions = new ShenandoahHeapRegionSet(_max_regions); 154 _sorted_regions = new ShenandoahHeapRegionSet(_max_regions); 155 _collection_set = new ShenandoahCollectionSet(_max_regions); 156 _free_regions = new ShenandoahFreeSet(_max_regions); 157 158 size_t i = 0; 159 for (i = 0; i < _num_regions; i++) { 160 ShenandoahHeapRegion* current = new ShenandoahHeapRegion(); 161 current->initialize_heap_region((HeapWord*) pgc_rs.base() + 162 regionSizeWords * i, regionSizeWords, i); 163 _free_regions->add_region(current); 164 _ordered_regions->add_region(current); 165 _sorted_regions->add_region(current); 166 } 167 assert(((size_t) _ordered_regions->active_regions()) == _num_regions, ""); 168 _first_region = _ordered_regions->get(0); 169 _first_region_bottom = _first_region->bottom(); 170 assert((((size_t) _first_region_bottom) & 171 (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0, 172 "misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom)); 173 174 _numAllocs = 0; 175 176 if (log_is_enabled(Trace, gc, region)) { 177 ResourceMark rm; 178 outputStream* out = Log(gc, region)::trace_stream(); 179 log_trace(gc, region)("All Regions"); 180 _ordered_regions->print(out); 181 log_trace(gc, region)("Free Regions"); 182 _free_regions->print(out); 183 } 184 185 // The call below uses stuff (the SATB* things) that are in G1, but probably 186 // belong into a shared location. 187 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, 188 SATB_Q_FL_lock, 189 20 /*G1SATBProcessCompletedThreshold */, 190 Shared_SATB_Q_lock); 191 192 // Reserve space for prev and next bitmap. 193 size_t bitmap_size = CMBitMap::compute_size(heap_rs.size()); 194 MemRegion heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize); 195 196 size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); 197 198 ReservedSpace bitmap0(bitmap_size, page_size); 199 os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap"); 200 MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC); 201 MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize); 202 _mark_bit_map0.initialize(heap_region, bitmap_region0); 203 _prev_mark_bit_map = &_mark_bit_map0; 204 205 ReservedSpace bitmap1(bitmap_size, page_size); 206 os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap"); 207 MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC); 208 MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize); 209 _mark_bit_map1.initialize(heap_region, bitmap_region1); 210 _next_mark_bit_map = &_mark_bit_map1; 211 212 // Initialize fast collection set test structure. 213 _in_cset_fast_test_length = _max_regions; 214 _in_cset_fast_test_base = 215 NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length, mtGC); 216 _in_cset_fast_test = _in_cset_fast_test_base - 217 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift); 218 clear_cset_fast_test(); 219 220 _top_at_mark_starts_base = 221 NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC); 222 _top_at_mark_starts = _top_at_mark_starts_base - 223 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift); 224 225 for (i = 0; i < _num_regions; i++) { 226 _in_cset_fast_test_base[i] = false; // Not in cset 227 _top_at_mark_starts_base[i] = _ordered_regions->get(i)->bottom(); 228 } 229 230 _monitoring_support = new ShenandoahMonitoringSupport(this); 231 232 _concurrent_gc_thread = new ShenandoahConcurrentThread(); 233 234 ShenandoahMarkCompact::initialize(); 235 236 return JNI_OK; 237 } 238 239 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : 240 CollectedHeap(), 241 _shenandoah_policy(policy), 242 _concurrent_mark_in_progress(0), 243 _evacuation_in_progress(0), 244 _full_gc_in_progress(false), 245 _free_regions(NULL), 246 _collection_set(NULL), 247 _bytes_allocated_since_cm(0), 248 _bytes_allocated_during_cm(0), 249 _max_allocated_gc(0), 250 _allocated_last_gc(0), 251 _used_start_gc(0), 252 _max_conc_workers((int) MAX2((uint) ConcGCThreads, 1U)), 253 _max_parallel_workers((int) MAX2((uint) ParallelGCThreads, 1U)), 254 _ref_processor(NULL), 255 _in_cset_fast_test(NULL), 256 _in_cset_fast_test_base(NULL), 257 _top_at_mark_starts(NULL), 258 _top_at_mark_starts_base(NULL), 259 _mark_bit_map0(), 260 _mark_bit_map1(), 261 _cancelled_concgc(false), 262 _need_update_refs(false), 263 _need_reset_bitmaps(false), 264 _growing_heap(0), 265 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()) 266 267 { 268 log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads); 269 log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads); 270 log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled)); 271 272 _scm = new ShenandoahConcurrentMark(); 273 _used = 0; 274 // This is odd. They are concurrent gc threads, but they are also task threads. 275 // Framework doesn't allow both. 276 _workers = new WorkGang("Parallel GC Threads", ParallelGCThreads, 277 /* are_GC_task_threads */true, 278 /* are_ConcurrentGC_threads */false); 279 _conc_workers = new WorkGang("Concurrent GC Threads", ConcGCThreads, 280 /* are_GC_task_threads */true, 281 /* are_ConcurrentGC_threads */false); 282 if ((_workers == NULL) || (_conc_workers == NULL)) { 283 vm_exit_during_initialization("Failed necessary allocation."); 284 } else { 285 _workers->initialize_workers(); 286 _conc_workers->initialize_workers(); 287 } 288 } 289 290 class ResetBitmapTask : public AbstractGangTask { 291 private: 292 ShenandoahHeapRegionSet* _regions; 293 294 public: 295 ResetBitmapTask(ShenandoahHeapRegionSet* regions) : 296 AbstractGangTask("Parallel Reset Bitmap Task"), 297 _regions(regions) { 298 _regions->clear_current_index(); 299 } 300 301 void work(uint worker_id) { 302 ShenandoahHeapRegion* region = _regions->claim_next(); 303 ShenandoahHeap* heap = ShenandoahHeap::heap(); 304 while (region != NULL) { 305 HeapWord* bottom = region->bottom(); 306 HeapWord* top = region->top_prev_mark_bitmap(); 307 region->set_top_prev_mark_bitmap(region->top_at_prev_mark_start()); 308 if (top > bottom) { 309 heap->reset_mark_bitmap_range(bottom, top); 310 } 311 region = _regions->claim_next(); 312 } 313 } 314 }; 315 316 class ResetPrevBitmapTask : public AbstractGangTask { 317 private: 318 ShenandoahHeapRegionSet* _regions; 319 320 public: 321 ResetPrevBitmapTask(ShenandoahHeapRegionSet* regions) : 322 AbstractGangTask("Parallel Reset Prev Bitmap Task"), 323 _regions(regions) { 324 _regions->clear_current_index(); 325 } 326 327 void work(uint worker_id) { 328 ShenandoahHeapRegion* region = _regions->claim_next(); 329 ShenandoahHeap* heap = ShenandoahHeap::heap(); 330 while (region != NULL) { 331 HeapWord* bottom = region->bottom(); 332 HeapWord* top = region->top_prev_mark_bitmap(); 333 if (top > bottom) { 334 heap->reset_prev_mark_bitmap_range(bottom, top); 335 } 336 region = _regions->claim_next(); 337 } 338 } 339 }; 340 341 void ShenandoahHeap::reset_mark_bitmap(WorkGang* workers) { 342 GCTraceTime(Info, gc, phases) time("Concurrent reset bitmaps", gc_timer(), GCCause::_no_gc); 343 344 ResetBitmapTask task = ResetBitmapTask(_ordered_regions); 345 workers->run_task(&task); 346 } 347 348 void ShenandoahHeap::reset_prev_mark_bitmap(WorkGang* workers) { 349 GCTraceTime(Info, gc, phases) time("Concurrent reset prev bitmaps", gc_timer(), GCCause::_no_gc); 350 351 ResetPrevBitmapTask task = ResetPrevBitmapTask(_ordered_regions); 352 workers->run_task(&task); 353 } 354 355 void ShenandoahHeap::reset_mark_bitmap_range(HeapWord* from, HeapWord* to) { 356 _next_mark_bit_map->clear_range(MemRegion(from, to)); 357 } 358 359 void ShenandoahHeap::reset_prev_mark_bitmap_range(HeapWord* from, HeapWord* to) { 360 _prev_mark_bit_map->clear_range(MemRegion(from, to)); 361 } 362 363 bool ShenandoahHeap::is_bitmap_clear() { 364 HeapWord* start = _ordered_regions->bottom(); 365 HeapWord* end = _ordered_regions->end(); 366 return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end; 367 } 368 369 void ShenandoahHeap::print_on(outputStream* st) const { 370 st->print("Shenandoah Heap"); 371 st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K); 372 st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K); 373 if (_concurrent_mark_in_progress) { 374 st->print("marking "); 375 } 376 if (_evacuation_in_progress) { 377 st->print("evacuating "); 378 } 379 if (_cancelled_concgc) { 380 st->print("cancelled "); 381 } 382 st->print("\n"); 383 384 if (Verbose) { 385 print_heap_regions(st); 386 } 387 } 388 389 class InitGCLABClosure : public ThreadClosure { 390 public: 391 void do_thread(Thread* thread) { 392 thread->gclab().initialize(true); 393 } 394 }; 395 396 void ShenandoahHeap::post_initialize() { 397 398 { 399 if (UseTLAB) { 400 InitGCLABClosure init_gclabs; 401 for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { 402 init_gclabs.do_thread(thread); 403 } 404 gc_threads_do(&init_gclabs); 405 } 406 } 407 _scm->initialize(); 408 409 ref_processing_init(); 410 411 _max_workers = MAX(_max_parallel_workers, _max_conc_workers); 412 } 413 414 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure { 415 size_t sum; 416 public: 417 418 CalculateUsedRegionClosure() { 419 sum = 0; 420 } 421 422 bool doHeapRegion(ShenandoahHeapRegion* r) { 423 sum = sum + r->used(); 424 return false; 425 } 426 427 size_t getResult() { return sum;} 428 }; 429 430 size_t ShenandoahHeap::calculateUsed() { 431 CalculateUsedRegionClosure cl; 432 heap_region_iterate(&cl); 433 return cl.getResult(); 434 } 435 436 void ShenandoahHeap::verify_heap_size_consistency() { 437 438 assert(calculateUsed() == used(), 439 "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed()); 440 } 441 442 size_t ShenandoahHeap::used() const { 443 OrderAccess::acquire(); 444 return _used; 445 } 446 447 void ShenandoahHeap::increase_used(size_t bytes) { 448 Atomic::add(bytes, &_used); 449 } 450 451 void ShenandoahHeap::set_used(size_t bytes) { 452 _used = bytes; 453 OrderAccess::release(); 454 } 455 456 void ShenandoahHeap::decrease_used(size_t bytes) { 457 assert(_used >= bytes, "never decrease heap size by more than we've left"); 458 Atomic::add(-bytes, &_used); 459 } 460 461 size_t ShenandoahHeap::capacity() const { 462 return _num_regions * ShenandoahHeapRegion::RegionSizeBytes; 463 464 } 465 466 bool ShenandoahHeap::is_maximal_no_gc() const { 467 Unimplemented(); 468 return true; 469 } 470 471 size_t ShenandoahHeap::max_capacity() const { 472 return _max_regions * ShenandoahHeapRegion::RegionSizeBytes; 473 } 474 475 size_t ShenandoahHeap::min_capacity() const { 476 return _initialSize; 477 } 478 479 VirtualSpace* ShenandoahHeap::storage() const { 480 return (VirtualSpace*) &_storage; 481 } 482 483 bool ShenandoahHeap::is_in(const void* p) const { 484 HeapWord* first_region_bottom = _first_region->bottom(); 485 HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions; 486 return p > _first_region_bottom && p < last_region_end; 487 } 488 489 bool ShenandoahHeap::is_scavengable(const void* p) { 490 return true; 491 } 492 493 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) { 494 // Retain tlab and allocate object in shared space if 495 // the amount free in the tlab is too large to discard. 496 if (thread->gclab().free() > thread->gclab().refill_waste_limit()) { 497 thread->gclab().record_slow_allocation(size); 498 return NULL; 499 } 500 501 // Discard gclab and allocate a new one. 502 // To minimize fragmentation, the last GCLAB may be smaller than the rest. 503 size_t new_gclab_size = thread->gclab().compute_size(size); 504 505 thread->gclab().clear_before_allocation(); 506 507 if (new_gclab_size == 0) { 508 return NULL; 509 } 510 511 // Allocate a new GCLAB... 512 HeapWord* obj = allocate_new_gclab(new_gclab_size); 513 if (obj == NULL) { 514 return NULL; 515 } 516 517 if (ZeroTLAB) { 518 // ..and clear it. 519 Copy::zero_to_words(obj, new_gclab_size); 520 } else { 521 // ...and zap just allocated object. 522 #ifdef ASSERT 523 // Skip mangling the space corresponding to the object header to 524 // ensure that the returned space is not considered parsable by 525 // any concurrent GC thread. 526 size_t hdr_size = oopDesc::header_size(); 527 Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal); 528 #endif // ASSERT 529 } 530 thread->gclab().fill(obj, obj + size, new_gclab_size); 531 return obj; 532 } 533 534 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) { 535 return allocate_new_tlab(word_size, false); 536 } 537 538 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) { 539 return allocate_new_tlab(word_size, true); 540 } 541 542 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) { 543 HeapWord* result = allocate_memory(word_size, evacuating); 544 545 if (result != NULL) { 546 assert(! heap_region_containing(result)->is_in_collection_set(), "Never allocate in dirty region"); 547 _bytes_allocated_since_cm += word_size * HeapWordSize; 548 549 log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result)); 550 551 } 552 return result; 553 } 554 555 ShenandoahHeap* ShenandoahHeap::heap() { 556 CollectedHeap* heap = Universe::heap(); 557 assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()"); 558 assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap"); 559 return (ShenandoahHeap*) heap; 560 } 561 562 ShenandoahHeap* ShenandoahHeap::heap_no_check() { 563 CollectedHeap* heap = Universe::heap(); 564 return (ShenandoahHeap*) heap; 565 } 566 567 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) { 568 HeapWord* result = NULL; 569 result = allocate_memory_work(word_size); 570 571 if (result == NULL) { 572 bool retry; 573 do { 574 // Try to grow the heap. 575 retry = check_grow_heap(); 576 result = allocate_memory_work(word_size); 577 } while (retry && result == NULL); 578 } 579 580 if (result == NULL && ! evacuating) { // Allocation failed, try full-GC, then retry allocation. 581 log_develop_trace(gc)("Failed to allocate " SIZE_FORMAT " bytes, free regions: ", word_size * HeapWordSize); 582 collect(GCCause::_allocation_failure); 583 result = allocate_memory_work(word_size); 584 } 585 586 // Only update monitoring counters when not calling from a write-barrier. 587 // Otherwise we might attempt to grab the Service_lock, which we must 588 // not do when coming from a write-barrier (because the thread might 589 // already hold the Compile_lock). 590 if (! evacuating) { 591 monitoring_support()->update_counters(); 592 } 593 594 log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ", word_size, p2i(result), Thread::current()->osthread()->thread_id()); 595 596 return result; 597 } 598 599 bool ShenandoahHeap::call_from_write_barrier(bool evacuating) { 600 return evacuating && Thread::current()->is_Java_thread(); 601 } 602 603 bool ShenandoahHeap::check_grow_heap() { 604 605 assert(_free_regions->max_regions() >= _free_regions->active_regions(), "don't get negative"); 606 607 size_t available = _max_regions - _num_regions; 608 if (available == 0) { 609 return false; // Don't retry. 610 } 611 612 jbyte growing = Atomic::cmpxchg(1, &_growing_heap, 0); 613 if (growing == 0) { 614 // Only one thread succeeds this, and this one gets 615 // to grow the heap. All other threads can continue 616 // to allocate from the reserve. 617 grow_heap_by(MIN2(available, ShenandoahAllocReserveRegions)); 618 619 // Reset it back to 0, so that other threads can take it again. 620 Atomic::store(0, &_growing_heap); 621 return true; 622 } else { 623 // Let other threads work, then try again. 624 os::naked_yield(); 625 return true; 626 } 627 } 628 629 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) { 630 if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) { 631 return allocate_large_memory(word_size); 632 } 633 634 // Not enough memory in free region set. 635 // Coming out of full GC, it is possible that there is not 636 // free region available, so current_index may not be valid. 637 if (word_size * HeapWordSize > _free_regions->capacity()) return NULL; 638 639 size_t current_idx = _free_regions->current_index(); 640 ShenandoahHeapRegion* my_current_region = _free_regions->get(current_idx); 641 642 if (my_current_region == NULL) { 643 return NULL; // No more room to make a new region. OOM. 644 } 645 assert(my_current_region != NULL, "should have a region at this point"); 646 647 #ifdef ASSERT 648 if (my_current_region->is_in_collection_set()) { 649 print_heap_regions(); 650 } 651 #endif 652 assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists"); 653 assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions"); 654 655 HeapWord* result = my_current_region->par_allocate(word_size); 656 657 while (result == NULL) { 658 // 2nd attempt. Try next region. 659 current_idx = _free_regions->par_claim_next(current_idx); 660 my_current_region = _free_regions->get(current_idx); 661 662 if (my_current_region == NULL) { 663 return NULL; // No more room to make a new region. OOM. 664 } 665 // _free_regions->increase_used(remaining); 666 assert(my_current_region != NULL, "should have a region at this point"); 667 assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists"); 668 assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions"); 669 result = my_current_region->par_allocate(word_size); 670 } 671 672 my_current_region->increase_live_data(word_size * HeapWordSize); 673 increase_used(word_size * HeapWordSize); 674 _free_regions->increase_used(word_size * HeapWordSize); 675 return result; 676 } 677 678 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) { 679 680 uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize); 681 if (required_regions > _max_regions) return NULL; 682 683 ShenandoahHeapRegion* r = _free_regions->claim_contiguous(required_regions); 684 685 HeapWord* result = NULL; 686 687 if (r != NULL) { 688 result = r->bottom(); 689 690 log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT, 691 (words * HeapWordSize) / K, p2i(result), r->region_number()); 692 } else { 693 log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed", 694 (words * HeapWordSize) / K, p2i(result)); 695 } 696 697 698 return result; 699 700 } 701 702 HeapWord* ShenandoahHeap::mem_allocate(size_t size, 703 bool* gc_overhead_limit_was_exceeded) { 704 705 #ifdef ASSERT 706 if (ShenandoahVerify && _numAllocs > 1000000) { 707 _numAllocs = 0; 708 } 709 _numAllocs++; 710 #endif 711 HeapWord* filler = allocate_memory(BrooksPointer::word_size() + size, false); 712 HeapWord* result = filler + BrooksPointer::word_size(); 713 if (filler != NULL) { 714 BrooksPointer::initialize(oop(result)); 715 _bytes_allocated_since_cm += size * HeapWordSize; 716 717 assert(! heap_region_containing(result)->is_in_collection_set(), "never allocate in targetted region"); 718 return result; 719 } else { 720 /* 721 tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT, size, used(), _bytes_allocated_since_cm); 722 { 723 print_heap_regions(); 724 tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count()); 725 _free_regions->print(); 726 } 727 */ 728 return NULL; 729 } 730 } 731 732 class ParallelEvacuateRegionObjectClosure : public ObjectClosure { 733 private: 734 ShenandoahHeap* _heap; 735 Thread* _thread; 736 public: 737 ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) : 738 _heap(heap), _thread(Thread::current()) { 739 } 740 741 void do_object(oop p) { 742 743 log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size()); 744 745 assert(_heap->is_marked_prev(p), "expect only marked objects"); 746 if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) { 747 _heap->evacuate_object(p, _thread); 748 } 749 } 750 }; 751 752 #ifdef ASSERT 753 class VerifyEvacuatedObjectClosure : public ObjectClosure { 754 755 public: 756 757 void do_object(oop p) { 758 if (ShenandoahHeap::heap()->is_marked_prev(p)) { 759 oop p_prime = oopDesc::bs()->read_barrier(p); 760 assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy"); 761 if (p->klass() != p_prime->klass()) { 762 tty->print_cr("copy has different class than original:"); 763 p->klass()->print_on(tty); 764 p_prime->klass()->print_on(tty); 765 } 766 assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i((HeapWord*) p), p2i((HeapWord*) p_prime)); 767 // assert(p->mark() == p_prime->mark(), "Should have the same mark"); 768 assert(p->size() == p_prime->size(), "Should be the same size"); 769 assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once"); 770 } 771 } 772 }; 773 774 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) { 775 VerifyEvacuatedObjectClosure verify_evacuation; 776 marked_next_object_iterate(from_region, &verify_evacuation); 777 } 778 #endif 779 780 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) { 781 782 assert(from_region->get_live_data() > 0, "all-garbage regions are reclaimed earlier"); 783 784 ParallelEvacuateRegionObjectClosure evacuate_region(this); 785 786 marked_prev_object_iterate(from_region, &evacuate_region); 787 788 #ifdef ASSERT 789 if (ShenandoahVerify && ! cancelled_concgc()) { 790 verify_evacuated_region(from_region); 791 } 792 #endif 793 } 794 795 class ParallelEvacuationTask : public AbstractGangTask { 796 private: 797 ShenandoahHeap* _sh; 798 ShenandoahCollectionSet* _cs; 799 800 public: 801 ParallelEvacuationTask(ShenandoahHeap* sh, 802 ShenandoahCollectionSet* cs) : 803 AbstractGangTask("Parallel Evacuation Task"), 804 _cs(cs), 805 _sh(sh) {} 806 807 void work(uint worker_id) { 808 809 ShenandoahHeapRegion* from_hr = _cs->claim_next(); 810 811 while (from_hr != NULL) { 812 log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT, 813 worker_id, 814 from_hr->region_number()); 815 816 assert(from_hr->get_live_data() > 0, "all-garbage regions are reclaimed early"); 817 _sh->parallel_evacuate_region(from_hr); 818 819 if (_sh->cancelled_concgc()) { 820 log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT "\n", from_hr->region_number()); 821 break; 822 } 823 from_hr = _cs->claim_next(); 824 } 825 } 826 }; 827 828 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure { 829 private: 830 ShenandoahHeap* _heap; 831 size_t _bytes_reclaimed; 832 public: 833 RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {} 834 835 bool doHeapRegion(ShenandoahHeapRegion* r) { 836 837 if (_heap->cancelled_concgc()) { 838 // The aborted marking bitmap needs to be cleared at the end of cycle. 839 // Setup the top-marker for this. 840 r->set_top_prev_mark_bitmap(r->top_at_mark_start()); 841 842 return false; 843 } 844 845 r->swap_top_at_mark_start(); 846 847 if (r->is_in_collection_set()) { 848 log_develop_trace(gc, region)("Recycling region " SIZE_FORMAT ":", r->region_number()); 849 _heap->decrease_used(r->used()); 850 _bytes_reclaimed += r->used(); 851 r->recycle(); 852 _heap->free_regions()->add_region(r); 853 } 854 855 return false; 856 } 857 size_t bytes_reclaimed() { return _bytes_reclaimed;} 858 void clear_bytes_reclaimed() {_bytes_reclaimed = 0;} 859 }; 860 861 void ShenandoahHeap::recycle_dirty_regions() { 862 RecycleDirtyRegionsClosure cl; 863 cl.clear_bytes_reclaimed(); 864 865 heap_region_iterate(&cl); 866 867 _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed()); 868 if (! cancelled_concgc()) { 869 clear_cset_fast_test(); 870 } 871 } 872 873 ShenandoahFreeSet* ShenandoahHeap::free_regions() { 874 return _free_regions; 875 } 876 877 void ShenandoahHeap::print_heap_regions(outputStream* st) const { 878 _ordered_regions->print(st); 879 } 880 881 class PrintAllRefsOopClosure: public ExtendedOopClosure { 882 private: 883 int _index; 884 const char* _prefix; 885 886 public: 887 PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {} 888 889 private: 890 template <class T> 891 inline void do_oop_work(T* p) { 892 oop o = oopDesc::load_decode_heap_oop(p); 893 if (o != NULL) { 894 if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) { 895 tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")", _prefix, _index, p2i(p), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(o)), o->klass()->internal_name(), p2i(o->klass())); 896 } else { 897 // tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty: %s) -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty (%s))", _prefix, _index, p2i(p), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(o)->is_in_collection_set())); 898 tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)", _prefix, _index, p2i(p), p2i((HeapWord*) o)); 899 } 900 } else { 901 tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o)); 902 } 903 _index++; 904 } 905 906 public: 907 void do_oop(oop* p) { 908 do_oop_work(p); 909 } 910 911 void do_oop(narrowOop* p) { 912 do_oop_work(p); 913 } 914 915 }; 916 917 class PrintAllRefsObjectClosure : public ObjectClosure { 918 const char* _prefix; 919 920 public: 921 PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {} 922 923 void do_object(oop p) { 924 if (ShenandoahHeap::heap()->is_in(p)) { 925 tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:", _prefix, p2i((HeapWord*) p), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(p)), p->klass()->internal_name(), p2i(p->klass())); 926 PrintAllRefsOopClosure cl(_prefix); 927 p->oop_iterate(&cl); 928 } 929 } 930 }; 931 932 void ShenandoahHeap::print_all_refs(const char* prefix) { 933 tty->print_cr("printing all references in the heap"); 934 tty->print_cr("root references:"); 935 936 ensure_parsability(false); 937 938 PrintAllRefsOopClosure cl(prefix); 939 roots_iterate(&cl); 940 941 tty->print_cr("heap references:"); 942 PrintAllRefsObjectClosure cl2(prefix); 943 object_iterate(&cl2); 944 } 945 946 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure { 947 private: 948 ShenandoahHeap* _heap; 949 950 public: 951 VerifyAfterMarkingOopClosure() : 952 _heap(ShenandoahHeap::heap()) { } 953 954 private: 955 template <class T> 956 inline void do_oop_work(T* p) { 957 oop o = oopDesc::load_decode_heap_oop(p); 958 if (o != NULL) { 959 if (! _heap->is_marked_prev(o)) { 960 _heap->print_heap_regions(); 961 _heap->print_all_refs("post-mark"); 962 tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s", 963 p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_prev(o))); 964 _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size()); 965 966 tty->print_cr("oop class: %s", o->klass()->internal_name()); 967 if (_heap->is_in(p)) { 968 oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p)); 969 tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer)); 970 referrer->print(); 971 _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size()); 972 } 973 tty->print_cr("heap region containing object:"); 974 _heap->heap_region_containing(o)->print(); 975 tty->print_cr("heap region containing referrer:"); 976 _heap->heap_region_containing(p)->print(); 977 tty->print_cr("heap region containing forwardee:"); 978 _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print(); 979 } 980 assert(o->is_oop(), "oop must be an oop"); 981 assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace"); 982 if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) { 983 tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)", p2i(p), BOOL_TO_STR(_heap->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o), BOOL_TO_STR(_heap->heap_region_containing(o)->is_in_collection_set()), p2i((HeapWord*) oopDesc::bs()->read_barrier(o)), BOOL_TO_STR(_heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->is_in_collection_set())); 984 tty->print_cr("oop class: %s", o->klass()->internal_name()); 985 } 986 assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded"); 987 assert(! _heap->heap_region_containing(o)->is_in_collection_set(), "references must not point to dirty heap regions"); 988 assert(_heap->is_marked_prev(o), "live oops must be marked current"); 989 } 990 } 991 992 public: 993 void do_oop(oop* p) { 994 do_oop_work(p); 995 } 996 997 void do_oop(narrowOop* p) { 998 do_oop_work(p); 999 } 1000 1001 }; 1002 1003 class IterateMarkedCurrentObjectsClosure: public ObjectClosure { 1004 private: 1005 ShenandoahHeap* _heap; 1006 ExtendedOopClosure* _cl; 1007 public: 1008 IterateMarkedCurrentObjectsClosure(ExtendedOopClosure* cl) : 1009 _heap(ShenandoahHeap::heap()), _cl(cl) {}; 1010 1011 void do_object(oop p) { 1012 if (_heap->is_marked_current(p)) { 1013 p->oop_iterate(_cl); 1014 } 1015 } 1016 1017 }; 1018 1019 void ShenandoahHeap::verify_heap_after_marking() { 1020 1021 verify_heap_size_consistency(); 1022 1023 log_trace(gc)("verifying heap after marking"); 1024 1025 VerifyAfterMarkingOopClosure cl; 1026 roots_iterate(&cl); 1027 1028 IterateMarkedCurrentObjectsClosure marked_oops(&cl); 1029 object_iterate(&marked_oops); 1030 } 1031 1032 1033 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) { 1034 assert(r->is_humongous_start(), "reclaim regions starting with the first one"); 1035 1036 oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size()); 1037 size_t size = humongous_obj->size() + BrooksPointer::word_size(); 1038 uint required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize); 1039 uint index = r->region_number(); 1040 1041 1042 assert(r->get_live_data() == 0, "liveness must be zero"); 1043 1044 for(size_t i = 0; i < required_regions; i++) { 1045 1046 ShenandoahHeapRegion* region = _ordered_regions->get(index++); 1047 1048 assert((region->is_humongous_start() || region->is_humongous_continuation()), 1049 "expect correct humongous start or continuation"); 1050 1051 if (log_is_enabled(Debug, gc, humongous)) { 1052 log_debug(gc, humongous)("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size); 1053 ResourceMark rm; 1054 outputStream* out = Log(gc, humongous)::debug_stream(); 1055 region->print_on(out); 1056 } 1057 1058 region->reset(); 1059 ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes); 1060 } 1061 } 1062 1063 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure { 1064 1065 bool doHeapRegion(ShenandoahHeapRegion* r) { 1066 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1067 1068 if (r->is_humongous_start()) { 1069 oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size()); 1070 if (! heap->is_marked_current(humongous_obj)) { 1071 1072 heap->reclaim_humongous_region_at(r); 1073 } 1074 } 1075 return false; 1076 } 1077 }; 1078 1079 #ifdef ASSERT 1080 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure { 1081 bool doHeapRegion(ShenandoahHeapRegion* r) { 1082 assert(!r->is_in_collection_set(), "Should have been cleared by now"); 1083 return false; 1084 } 1085 }; 1086 #endif 1087 1088 void ShenandoahHeap::prepare_for_concurrent_evacuation() { 1089 assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!"); 1090 1091 log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id()); 1092 1093 if (!cancelled_concgc()) { 1094 1095 recycle_dirty_regions(); 1096 1097 ensure_parsability(true); 1098 1099 #ifdef ASSERT 1100 if (ShenandoahVerify) { 1101 verify_heap_after_marking(); 1102 } 1103 #endif 1104 1105 // NOTE: This needs to be done during a stop the world pause, because 1106 // putting regions into the collection set concurrently with Java threads 1107 // will create a race. In particular, acmp could fail because when we 1108 // resolve the first operand, the containing region might not yet be in 1109 // the collection set, and thus return the original oop. When the 2nd 1110 // operand gets resolved, the region could be in the collection set 1111 // and the oop gets evacuated. If both operands have originally been 1112 // the same, we get false negatives. 1113 1114 1115 _collection_set->clear(); 1116 _free_regions->clear(); 1117 1118 ShenandoahReclaimHumongousRegionsClosure reclaim; 1119 heap_region_iterate(&reclaim); 1120 1121 // _ordered_regions->print(); 1122 #ifdef ASSERT 1123 CheckCollectionSetClosure ccsc; 1124 _ordered_regions->heap_region_iterate(&ccsc); 1125 #endif 1126 1127 _shenandoah_policy->choose_collection_set(_collection_set); 1128 1129 _shenandoah_policy->choose_free_set(_free_regions); 1130 1131 /* 1132 tty->print("Sorted free regions\n"); 1133 _free_regions->print(); 1134 */ 1135 1136 if (_collection_set->count() == 0) 1137 cancel_concgc(); 1138 1139 _bytes_allocated_since_cm = 0; 1140 1141 Universe::update_heap_info_at_gc(); 1142 } 1143 } 1144 1145 1146 class RetireTLABClosure : public ThreadClosure { 1147 private: 1148 bool _retire; 1149 1150 public: 1151 RetireTLABClosure(bool retire) : _retire(retire) { 1152 } 1153 1154 void do_thread(Thread* thread) { 1155 thread->gclab().make_parsable(_retire); 1156 } 1157 }; 1158 1159 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) { 1160 if (UseTLAB) { 1161 CollectedHeap::ensure_parsability(retire_tlabs); 1162 1163 RetireTLABClosure cl(retire_tlabs); 1164 for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { 1165 cl.do_thread(thread); 1166 } 1167 gc_threads_do(&cl); 1168 } 1169 } 1170 1171 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure { 1172 private: 1173 ShenandoahHeap* _heap; 1174 Thread* _thread; 1175 public: 1176 ShenandoahEvacuateUpdateRootsClosure() : 1177 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) { 1178 } 1179 1180 private: 1181 template <class T> 1182 void do_oop_work(T* p) { 1183 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress"); 1184 1185 T o = oopDesc::load_heap_oop(p); 1186 if (! oopDesc::is_null(o)) { 1187 oop obj = oopDesc::decode_heap_oop_not_null(o); 1188 if (_heap->in_cset_fast_test((HeapWord*) obj)) { 1189 assert(_heap->is_marked_prev(obj), "only evacuate marked objects %d %d", _heap->is_marked_prev(obj), _heap->is_marked_prev(ShenandoahBarrierSet::resolve_oop_static_not_null(obj))); 1190 oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj); 1191 if (oopDesc::unsafe_equals(resolved, obj)) { 1192 resolved = _heap->evacuate_object(obj, _thread); 1193 } 1194 oopDesc::encode_store_heap_oop(p, resolved); 1195 } 1196 } 1197 #ifdef ASSERT 1198 else { 1199 // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s", p2i(p), p2i((HeapWord*) obj), BOOL_TO_STR(_heap->is_in(obj)), BOOL_TO_STR(_heap->in_cset_fast_test(obj)), BOOL_TO_STR(_heap->is_marked_current(obj))); 1200 } 1201 #endif 1202 } 1203 1204 public: 1205 void do_oop(oop* p) { 1206 do_oop_work(p); 1207 } 1208 void do_oop(narrowOop* p) { 1209 do_oop_work(p); 1210 } 1211 }; 1212 1213 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask { 1214 ShenandoahRootProcessor* _rp; 1215 public: 1216 1217 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootProcessor* rp) : 1218 AbstractGangTask("Shenandoah evacuate and update roots"), 1219 _rp(rp) 1220 { 1221 // Nothing else to do. 1222 } 1223 1224 void work(uint worker_id) { 1225 ShenandoahEvacuateUpdateRootsClosure cl; 1226 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations); 1227 1228 _rp->process_evacuate_roots(&cl, &blobsCl, worker_id); 1229 } 1230 }; 1231 1232 void ShenandoahHeap::evacuate_and_update_roots() { 1233 1234 COMPILER2_PRESENT(DerivedPointerTable::clear()); 1235 1236 if (ShenandoahVerifyReadsToFromSpace) { 1237 set_from_region_protection(false); 1238 } 1239 1240 assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped"); 1241 ClassLoaderDataGraph::clear_claimed_marks(); 1242 1243 { 1244 ShenandoahRootProcessor rp(this, _max_parallel_workers, ShenandoahCollectorPolicy::evac_thread_roots); 1245 ShenandoahEvacuateUpdateRootsTask roots_task(&rp); 1246 workers()->run_task(&roots_task); 1247 } 1248 1249 if (ShenandoahVerifyReadsToFromSpace) { 1250 set_from_region_protection(true); 1251 } 1252 1253 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 1254 1255 } 1256 1257 1258 void ShenandoahHeap::do_evacuation() { 1259 1260 parallel_evacuate(); 1261 1262 if (ShenandoahVerify && ! cancelled_concgc()) { 1263 VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation; 1264 if (Thread::current()->is_VM_thread()) { 1265 verify_after_evacuation.doit(); 1266 } else { 1267 VMThread::execute(&verify_after_evacuation); 1268 } 1269 } 1270 1271 } 1272 1273 void ShenandoahHeap::parallel_evacuate() { 1274 1275 if (! cancelled_concgc()) { 1276 1277 log_develop_trace(gc)("starting parallel_evacuate"); 1278 1279 _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac); 1280 1281 if (log_is_enabled(Trace, gc, region)) { 1282 ResourceMark rm; 1283 outputStream* out = Log(gc, region)::trace_stream(); 1284 out->print("Printing all available regions"); 1285 print_heap_regions(out); 1286 } 1287 1288 if (log_is_enabled(Trace, gc, cset)) { 1289 ResourceMark rm; 1290 outputStream* out = Log(gc, cset)::trace_stream(); 1291 out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count()); 1292 _collection_set->print(out); 1293 1294 out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count()); 1295 _free_regions->print(out); 1296 } 1297 1298 ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set); 1299 1300 conc_workers()->run_task(&evacuationTask); 1301 1302 if (log_is_enabled(Trace, gc, cset)) { 1303 ResourceMark rm; 1304 outputStream* out = Log(gc, cset)::trace_stream(); 1305 out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n", 1306 _collection_set->count()); 1307 1308 _collection_set->print(out); 1309 1310 out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n", 1311 _free_regions->count()); 1312 _free_regions->print(out); 1313 1314 } 1315 1316 if (log_is_enabled(Trace, gc, region)) { 1317 ResourceMark rm; 1318 outputStream* out = Log(gc, region)::trace_stream(); 1319 out->print_cr("all regions after evacuation:"); 1320 print_heap_regions(out); 1321 } 1322 1323 _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac); 1324 1325 if (cancelled_concgc()) { 1326 // tty->print("GOTCHA: by thread %d", Thread::current()->osthread()->thread_id()); 1327 concurrent_thread()->schedule_full_gc(); 1328 // tty->print("PostGotcha: by thread %d FullGC should be scheduled\n", 1329 // Thread::current()->osthread()->thread_id()); 1330 } 1331 } 1332 } 1333 1334 class VerifyEvacuationClosure: public ExtendedOopClosure { 1335 private: 1336 ShenandoahHeap* _heap; 1337 ShenandoahHeapRegion* _from_region; 1338 1339 public: 1340 VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) : 1341 _heap(ShenandoahHeap::heap()), _from_region(from_region) { } 1342 private: 1343 template <class T> 1344 inline void do_oop_work(T* p) { 1345 oop heap_oop = oopDesc::load_decode_heap_oop(p); 1346 if (! oopDesc::is_null(heap_oop)) { 1347 guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop)); 1348 } 1349 } 1350 1351 public: 1352 void do_oop(oop* p) { 1353 do_oop_work(p); 1354 } 1355 1356 void do_oop(narrowOop* p) { 1357 do_oop_work(p); 1358 } 1359 1360 }; 1361 1362 void ShenandoahHeap::roots_iterate(OopClosure* cl) { 1363 1364 assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped"); 1365 1366 CodeBlobToOopClosure blobsCl(cl, false); 1367 CLDToOopClosure cldCl(cl); 1368 1369 ClassLoaderDataGraph::clear_claimed_marks(); 1370 1371 ShenandoahRootProcessor rp(this, 1); 1372 rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0); 1373 } 1374 1375 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) { 1376 1377 VerifyEvacuationClosure rootsCl(from_region); 1378 roots_iterate(&rootsCl); 1379 1380 } 1381 1382 bool ShenandoahHeap::supports_tlab_allocation() const { 1383 return true; 1384 } 1385 1386 1387 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const { 1388 size_t idx = _free_regions->current_index(); 1389 ShenandoahHeapRegion* current = _free_regions->get(idx); 1390 if (current == NULL) 1391 return 0; 1392 else if (current->free() > MinTLABSize) { 1393 return current->free(); 1394 } else { 1395 return MinTLABSize; 1396 } 1397 } 1398 1399 size_t ShenandoahHeap::max_tlab_size() const { 1400 return ShenandoahHeapRegion::RegionSizeBytes; 1401 } 1402 1403 class ResizeGCLABClosure : public ThreadClosure { 1404 public: 1405 void do_thread(Thread* thread) { 1406 thread->gclab().resize(); 1407 } 1408 }; 1409 1410 void ShenandoahHeap::resize_all_tlabs() { 1411 CollectedHeap::resize_all_tlabs(); 1412 1413 ResizeGCLABClosure cl; 1414 for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { 1415 cl.do_thread(thread); 1416 } 1417 gc_threads_do(&cl); 1418 1419 } 1420 1421 class AccumulateStatisticsGCLABClosure : public ThreadClosure { 1422 public: 1423 void do_thread(Thread* thread) { 1424 thread->gclab().accumulate_statistics(); 1425 thread->gclab().initialize_statistics(); 1426 } 1427 }; 1428 1429 void ShenandoahHeap::accumulate_statistics_all_gclabs() { 1430 1431 AccumulateStatisticsGCLABClosure cl; 1432 for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { 1433 cl.do_thread(thread); 1434 } 1435 gc_threads_do(&cl); 1436 } 1437 1438 bool ShenandoahHeap::can_elide_tlab_store_barriers() const { 1439 return true; 1440 } 1441 1442 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { 1443 // Overridden to do nothing. 1444 return new_obj; 1445 } 1446 1447 bool ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) { 1448 return true; 1449 } 1450 1451 bool ShenandoahHeap::card_mark_must_follow_store() const { 1452 return false; 1453 } 1454 1455 void ShenandoahHeap::collect(GCCause::Cause cause) { 1456 assert(cause != GCCause::_gc_locker, "no JNI critical callback"); 1457 if (GCCause::is_user_requested_gc(cause)) { 1458 if (! DisableExplicitGC) { 1459 cancel_concgc(); 1460 _concurrent_gc_thread->do_full_gc(cause); 1461 } 1462 } else if (cause == GCCause::_allocation_failure) { 1463 1464 cancel_concgc(); 1465 collector_policy()->set_should_clear_all_soft_refs(true); 1466 _concurrent_gc_thread->do_full_gc(cause); 1467 1468 } 1469 } 1470 1471 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) { 1472 //assert(false, "Shouldn't need to do full collections"); 1473 } 1474 1475 AdaptiveSizePolicy* ShenandoahHeap::size_policy() { 1476 Unimplemented(); 1477 return NULL; 1478 1479 } 1480 1481 CollectorPolicy* ShenandoahHeap::collector_policy() const { 1482 return _shenandoah_policy; 1483 } 1484 1485 1486 HeapWord* ShenandoahHeap::block_start(const void* addr) const { 1487 Space* sp = heap_region_containing(addr); 1488 if (sp != NULL) { 1489 return sp->block_start(addr); 1490 } 1491 return NULL; 1492 } 1493 1494 size_t ShenandoahHeap::block_size(const HeapWord* addr) const { 1495 Space* sp = heap_region_containing(addr); 1496 assert(sp != NULL, "block_size of address outside of heap"); 1497 return sp->block_size(addr); 1498 } 1499 1500 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const { 1501 Space* sp = heap_region_containing(addr); 1502 return sp->block_is_obj(addr); 1503 } 1504 1505 jlong ShenandoahHeap::millis_since_last_gc() { 1506 return 0; 1507 } 1508 1509 void ShenandoahHeap::prepare_for_verify() { 1510 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 1511 ensure_parsability(false); 1512 } 1513 } 1514 1515 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const { 1516 workers()->print_worker_threads_on(st); 1517 conc_workers()->print_worker_threads_on(st); 1518 } 1519 1520 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const { 1521 workers()->threads_do(tcl); 1522 conc_workers()->threads_do(tcl); 1523 } 1524 1525 void ShenandoahHeap::print_tracing_info() const { 1526 if (log_is_enabled(Info, gc, stats)) { 1527 ResourceMark rm; 1528 outputStream* out = Log(gc, stats)::info_stream(); 1529 _shenandoah_policy->print_tracing_info(out); 1530 } 1531 } 1532 1533 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure { 1534 private: 1535 ShenandoahHeap* _heap; 1536 VerifyOption _vo; 1537 bool _failures; 1538 public: 1539 // _vo == UsePrevMarking -> use "prev" marking information, 1540 // _vo == UseNextMarking -> use "next" marking information, 1541 // _vo == UseMarkWord -> use mark word from object header. 1542 ShenandoahVerifyRootsClosure(VerifyOption vo) : 1543 _heap(ShenandoahHeap::heap()), 1544 _vo(vo), 1545 _failures(false) { } 1546 1547 bool failures() { return _failures; } 1548 1549 private: 1550 template <class T> 1551 inline void do_oop_work(T* p) { 1552 oop obj = oopDesc::load_decode_heap_oop(p); 1553 if (! oopDesc::is_null(obj) && ! obj->is_oop()) { 1554 { // Just for debugging. 1555 tty->print_cr("Root location "PTR_FORMAT 1556 "verified "PTR_FORMAT, p2i(p), p2i((void*) obj)); 1557 // obj->print_on(tty); 1558 } 1559 } 1560 guarantee(obj->is_oop_or_null(), "is oop or null"); 1561 } 1562 1563 public: 1564 void do_oop(oop* p) { 1565 do_oop_work(p); 1566 } 1567 1568 void do_oop(narrowOop* p) { 1569 do_oop_work(p); 1570 } 1571 1572 }; 1573 1574 class ShenandoahVerifyHeapClosure: public ObjectClosure { 1575 private: 1576 ShenandoahVerifyRootsClosure _rootsCl; 1577 public: 1578 ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) : 1579 _rootsCl(rc) {}; 1580 1581 void do_object(oop p) { 1582 _rootsCl.do_oop(&p); 1583 } 1584 }; 1585 1586 class ShenandoahVerifyKlassClosure: public KlassClosure { 1587 OopClosure *_oop_closure; 1588 public: 1589 ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {} 1590 void do_klass(Klass* k) { 1591 k->oops_do(_oop_closure); 1592 } 1593 }; 1594 1595 void ShenandoahHeap::verify(VerifyOption vo) { 1596 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 1597 1598 ShenandoahVerifyRootsClosure rootsCl(vo); 1599 1600 assert(Thread::current()->is_VM_thread(), 1601 "Expected to be executed serially by the VM thread at this point"); 1602 1603 roots_iterate(&rootsCl); 1604 1605 bool failures = rootsCl.failures(); 1606 log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures)); 1607 1608 ShenandoahVerifyHeapClosure heapCl(rootsCl); 1609 1610 object_iterate(&heapCl); 1611 // TODO: Implement rest of it. 1612 #ifdef ASSERT_DISABLED 1613 verify_live(); 1614 #endif 1615 } else { 1616 tty->print("(SKIPPING roots, heapRegions, remset) "); 1617 } 1618 } 1619 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const { 1620 return _free_regions->capacity(); 1621 } 1622 1623 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure { 1624 ObjectClosure* _cl; 1625 public: 1626 ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} 1627 bool doHeapRegion(ShenandoahHeapRegion* r) { 1628 ShenandoahHeap::heap()->marked_prev_object_iterate(r, _cl); 1629 return false; 1630 } 1631 }; 1632 1633 void ShenandoahHeap::object_iterate(ObjectClosure* cl) { 1634 ShenandoahIterateObjectClosureRegionClosure blk(cl); 1635 heap_region_iterate(&blk, false, true); 1636 } 1637 1638 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) { 1639 Unimplemented(); 1640 } 1641 1642 // Apply blk->doHeapRegion() on all committed regions in address order, 1643 // terminating the iteration early if doHeapRegion() returns true. 1644 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const { 1645 for (size_t i = 0; i < _num_regions; i++) { 1646 ShenandoahHeapRegion* current = _ordered_regions->get(i); 1647 if (skip_humongous_continuation && current->is_humongous_continuation()) { 1648 continue; 1649 } 1650 if (skip_dirty_regions && current->is_in_collection_set()) { 1651 continue; 1652 } 1653 if (blk->doHeapRegion(current)) { 1654 return; 1655 } 1656 } 1657 } 1658 1659 class ClearLivenessClosure : public ShenandoahHeapRegionClosure { 1660 ShenandoahHeap* sh; 1661 public: 1662 ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { } 1663 1664 bool doHeapRegion(ShenandoahHeapRegion* r) { 1665 r->clear_live_data(); 1666 r->init_top_at_mark_start(); 1667 return false; 1668 } 1669 }; 1670 1671 1672 void ShenandoahHeap::start_concurrent_marking() { 1673 1674 shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats); 1675 accumulate_statistics_all_tlabs(); 1676 shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats); 1677 1678 set_concurrent_mark_in_progress(true); 1679 // We need to reset all TLABs because we'd lose marks on all objects allocated in them. 1680 if (UseTLAB) { 1681 shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable); 1682 ensure_parsability(true); 1683 shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable); 1684 } 1685 1686 _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm); 1687 _used_start_gc = used(); 1688 1689 #ifdef ASSERT 1690 if (ShenandoahDumpHeapBeforeConcurrentMark) { 1691 ensure_parsability(false); 1692 print_all_refs("pre-mark"); 1693 } 1694 #endif 1695 1696 shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness); 1697 ClearLivenessClosure clc(this); 1698 heap_region_iterate(&clc); 1699 shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness); 1700 1701 // print_all_refs("pre -mark"); 1702 1703 // oopDesc::_debug = true; 1704 1705 // Make above changes visible to worker threads 1706 OrderAccess::fence(); 1707 1708 shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots); 1709 concurrentMark()->init_mark_roots(); 1710 shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots); 1711 1712 // print_all_refs("pre-mark2"); 1713 } 1714 1715 1716 class VerifyLivenessClosure : public ExtendedOopClosure { 1717 1718 ShenandoahHeap* _sh; 1719 1720 public: 1721 VerifyLivenessClosure() : _sh ( ShenandoahHeap::heap() ) {} 1722 1723 template<class T> void do_oop_nv(T* p) { 1724 T heap_oop = oopDesc::load_heap_oop(p); 1725 if (!oopDesc::is_null(heap_oop)) { 1726 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 1727 guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))), 1728 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s", 1729 BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()), 1730 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))) 1731 ); 1732 obj = oopDesc::bs()->read_barrier(obj); 1733 guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions"); 1734 guarantee(obj->is_oop(), "is_oop"); 1735 ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap(); 1736 if (! sh->is_marked_current(obj)) { 1737 sh->print_on(tty); 1738 } 1739 assert(sh->is_marked_current(obj), "Referenced Objects should be marked obj: "PTR_FORMAT", marked: %s, is_in_heap: %s", 1740 p2i((HeapWord*) obj), BOOL_TO_STR(sh->is_marked_current(obj)), BOOL_TO_STR(sh->is_in(obj))); 1741 } 1742 } 1743 1744 void do_oop(oop* p) { do_oop_nv(p); } 1745 void do_oop(narrowOop* p) { do_oop_nv(p); } 1746 1747 }; 1748 1749 void ShenandoahHeap::verify_live() { 1750 1751 VerifyLivenessClosure cl; 1752 roots_iterate(&cl); 1753 1754 IterateMarkedCurrentObjectsClosure marked_oops(&cl); 1755 object_iterate(&marked_oops); 1756 1757 } 1758 1759 class VerifyAfterEvacuationClosure : public ExtendedOopClosure { 1760 1761 ShenandoahHeap* _sh; 1762 1763 public: 1764 VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {} 1765 1766 template<class T> void do_oop_nv(T* p) { 1767 T heap_oop = oopDesc::load_heap_oop(p); 1768 if (!oopDesc::is_null(heap_oop)) { 1769 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 1770 guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))), 1771 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s", 1772 BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()), 1773 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))), obj->klass()->external_name(), BOOL_TO_STR(_sh->is_marked_current(obj)) 1774 ); 1775 obj = oopDesc::bs()->read_barrier(obj); 1776 guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions"); 1777 guarantee(obj->is_oop(), "is_oop"); 1778 guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace"); 1779 } 1780 } 1781 1782 void do_oop(oop* p) { do_oop_nv(p); } 1783 void do_oop(narrowOop* p) { do_oop_nv(p); } 1784 1785 }; 1786 1787 class VerifyAfterUpdateRefsClosure : public ExtendedOopClosure { 1788 1789 ShenandoahHeap* _sh; 1790 1791 public: 1792 VerifyAfterUpdateRefsClosure() : _sh ( ShenandoahHeap::heap() ) {} 1793 1794 template<class T> void do_oop_nv(T* p) { 1795 T heap_oop = oopDesc::load_heap_oop(p); 1796 if (!oopDesc::is_null(heap_oop)) { 1797 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 1798 guarantee((! _sh->heap_region_containing(obj)->is_in_collection_set()), 1799 "no live reference must point to from-space, is_marked: %s", 1800 BOOL_TO_STR(_sh->is_marked_current(obj))); 1801 if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)) && _sh->is_in(p)) { 1802 tty->print_cr("top-limit: "PTR_FORMAT", p: "PTR_FORMAT, p2i(_sh->heap_region_containing(p)->concurrent_iteration_safe_limit()), p2i(p)); 1803 } 1804 guarantee(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "no live reference must point to forwarded object"); 1805 guarantee(obj->is_oop(), "is_oop"); 1806 guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace"); 1807 } 1808 } 1809 1810 void do_oop(oop* p) { do_oop_nv(p); } 1811 void do_oop(narrowOop* p) { do_oop_nv(p); } 1812 1813 }; 1814 1815 void ShenandoahHeap::verify_heap_after_evacuation() { 1816 1817 verify_heap_size_consistency(); 1818 1819 ensure_parsability(false); 1820 1821 VerifyAfterEvacuationClosure cl; 1822 roots_iterate(&cl); 1823 1824 IterateMarkedCurrentObjectsClosure marked_oops(&cl); 1825 object_iterate(&marked_oops); 1826 1827 } 1828 1829 class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure { 1830 public: 1831 bool doHeapRegion(ShenandoahHeapRegion* r) { 1832 assert(! r->is_in_collection_set(), "no region must be in collection set"); 1833 assert(! ShenandoahHeap::heap()->in_cset_fast_test(r->bottom()), "no region must be in collection set"); 1834 return false; 1835 } 1836 }; 1837 1838 void ShenandoahHeap::swap_mark_bitmaps() { 1839 CMBitMap* tmp = _prev_mark_bit_map; 1840 _prev_mark_bit_map = _next_mark_bit_map; 1841 _next_mark_bit_map = tmp; 1842 } 1843 1844 void ShenandoahHeap::stop_concurrent_marking() { 1845 assert(concurrent_mark_in_progress(), "How else could we get here?"); 1846 if (! cancelled_concgc()) { 1847 // If we needed to update refs, and concurrent marking has been cancelled, 1848 // we need to finish updating references. 1849 set_need_update_refs(false); 1850 swap_mark_bitmaps(); 1851 } 1852 set_concurrent_mark_in_progress(false); 1853 1854 if (log_is_enabled(Trace, gc, region)) { 1855 ResourceMark rm; 1856 outputStream* out = Log(gc, region)::trace_stream(); 1857 print_heap_regions(out); 1858 } 1859 1860 } 1861 1862 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) { 1863 _concurrent_mark_in_progress = in_progress ? 1 : 0; 1864 JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress); 1865 } 1866 1867 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) { 1868 JavaThread::set_evacuation_in_progress_all_threads(in_progress); 1869 _evacuation_in_progress = in_progress ? 1 : 0; 1870 OrderAccess::fence(); 1871 } 1872 1873 void ShenandoahHeap::verify_copy(oop p,oop c){ 1874 assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly"); 1875 assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct"); 1876 if (p->klass() != c->klass()) { 1877 print_heap_regions(); 1878 } 1879 assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size()); 1880 assert(p->size() == c->size(), "verify size"); 1881 // Object may have been locked between copy and verification 1882 // assert(p->mark() == c->mark(), "verify mark"); 1883 assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once"); 1884 } 1885 1886 void ShenandoahHeap::oom_during_evacuation() { 1887 log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d", 1888 Thread::current()->osthread()->thread_id()); 1889 1890 // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC. 1891 collector_policy()->set_should_clear_all_soft_refs(true); 1892 concurrent_thread()->schedule_full_gc(); 1893 cancel_concgc(); 1894 1895 if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) { 1896 log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes."); 1897 while (_evacuation_in_progress) { // wait. 1898 Thread::current()->_ParkEvent->park(1); 1899 } 1900 } 1901 1902 } 1903 1904 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) { 1905 // Initialize Brooks pointer for the next object 1906 HeapWord* result = obj + BrooksPointer::word_size(); 1907 BrooksPointer::initialize(oop(result)); 1908 return result; 1909 } 1910 1911 uint ShenandoahHeap::oop_extra_words() { 1912 return BrooksPointer::word_size(); 1913 } 1914 1915 void ShenandoahHeap::grow_heap_by(size_t num_regions) { 1916 size_t base = _num_regions; 1917 ensure_new_regions(num_regions); 1918 1919 ShenandoahHeapRegion* regions[num_regions]; 1920 for (size_t i = 0; i < num_regions; i++) { 1921 ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion(); 1922 size_t new_region_index = i + base; 1923 HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index; 1924 new_region->initialize_heap_region(start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index); 1925 1926 if (log_is_enabled(Trace, gc, region)) { 1927 ResourceMark rm; 1928 outputStream* out = Log(gc, region)::trace_stream(); 1929 out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index); 1930 new_region->print_on(out); 1931 } 1932 1933 assert(_ordered_regions->active_regions() == new_region->region_number(), "must match"); 1934 _ordered_regions->add_region(new_region); 1935 _sorted_regions->add_region(new_region); 1936 _in_cset_fast_test_base[new_region_index] = false; // Not in cset 1937 _top_at_mark_starts_base[new_region_index] = new_region->bottom(); 1938 1939 regions[i] = new_region; 1940 } 1941 _free_regions->par_add_regions(regions, 0, num_regions, num_regions); 1942 } 1943 1944 void ShenandoahHeap::ensure_new_regions(size_t new_regions) { 1945 1946 size_t num_regions = _num_regions; 1947 size_t new_num_regions = num_regions + new_regions; 1948 assert(new_num_regions <= _max_regions, "we checked this earlier"); 1949 1950 size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes; 1951 log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions); 1952 bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch); 1953 assert(success, "should always be able to expand by requested size"); 1954 1955 _num_regions = new_num_regions; 1956 1957 } 1958 1959 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() : 1960 _heap(ShenandoahHeap::heap_no_check()) { 1961 } 1962 1963 void ShenandoahIsAliveClosure::init(ShenandoahHeap* heap) { 1964 _heap = heap; 1965 } 1966 1967 bool ShenandoahIsAliveClosure::do_object_b(oop obj) { 1968 1969 assert(_heap != NULL, "sanity"); 1970 #ifdef ASSERT 1971 if (_heap->concurrent_mark_in_progress()) { 1972 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space"); 1973 } 1974 #endif 1975 assert(!oopDesc::is_null(obj), "null"); 1976 return _heap->is_marked_current(obj); 1977 } 1978 1979 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() : 1980 _heap(ShenandoahHeap::heap_no_check()) { 1981 } 1982 1983 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) { 1984 _heap = heap; 1985 } 1986 1987 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) { 1988 1989 assert(_heap != NULL, "sanity"); 1990 obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj); 1991 #ifdef ASSERT 1992 if (_heap->concurrent_mark_in_progress()) { 1993 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space"); 1994 } 1995 #endif 1996 assert(!oopDesc::is_null(obj), "null"); 1997 return _heap->is_marked_current(obj); 1998 } 1999 2000 void ShenandoahHeap::ref_processing_init() { 2001 MemRegion mr = reserved_region(); 2002 2003 isAlive.init(ShenandoahHeap::heap()); 2004 _ref_processor = 2005 new ReferenceProcessor(mr, // span 2006 ParallelRefProcEnabled, 2007 // mt processing 2008 (int) ConcGCThreads, 2009 // degree of mt processing 2010 true, 2011 // mt discovery 2012 (int) ConcGCThreads, 2013 // degree of mt discovery 2014 false, 2015 // Reference discovery is not atomic 2016 &isAlive); 2017 2018 } 2019 2020 #ifdef ASSERT 2021 void ShenandoahHeap::set_from_region_protection(bool protect) { 2022 for (uint i = 0; i < _num_regions; i++) { 2023 ShenandoahHeapRegion* region = _ordered_regions->get(i); 2024 if (region != NULL && region->is_in_collection_set()) { 2025 if (protect) { 2026 region->memProtectionOn(); 2027 } else { 2028 region->memProtectionOff(); 2029 } 2030 } 2031 } 2032 } 2033 #endif 2034 2035 size_t ShenandoahHeap::num_regions() { 2036 return _num_regions; 2037 } 2038 2039 size_t ShenandoahHeap::max_regions() { 2040 return _max_regions; 2041 } 2042 2043 GCTracer* ShenandoahHeap::tracer() { 2044 return shenandoahPolicy()->tracer(); 2045 } 2046 2047 size_t ShenandoahHeap::tlab_used(Thread* thread) const { 2048 return _free_regions->used(); 2049 } 2050 2051 void ShenandoahHeap::cancel_concgc() { 2052 // only report it once 2053 if (!_cancelled_concgc) { 2054 log_info(gc)("Cancelling GC"); 2055 _cancelled_concgc = true; 2056 OrderAccess::fence(); 2057 _shenandoah_policy->report_concgc_cancelled(); 2058 } 2059 2060 } 2061 2062 void ShenandoahHeap::clear_cancelled_concgc() { 2063 _cancelled_concgc = false; 2064 } 2065 2066 uint ShenandoahHeap::max_workers() { 2067 return _max_workers; 2068 } 2069 2070 uint ShenandoahHeap::max_parallel_workers() { 2071 return _max_parallel_workers; 2072 } 2073 uint ShenandoahHeap::max_conc_workers() { 2074 return _max_conc_workers; 2075 } 2076 2077 void ShenandoahHeap::stop() { 2078 // We set this early here, to let GC threads terminate before we ask the concurrent thread 2079 // to terminate, which would otherwise block until all GC threads come to finish normally. 2080 _cancelled_concgc = true; 2081 _concurrent_gc_thread->stop(); 2082 cancel_concgc(); 2083 } 2084 2085 void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) { 2086 2087 StringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols); 2088 workers()->run_task(&shenandoah_unlink_task); 2089 2090 // if (G1StringDedup::is_enabled()) { 2091 // G1StringDedup::unlink(is_alive); 2092 // } 2093 } 2094 2095 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) { 2096 _need_update_refs = need_update_refs; 2097 } 2098 2099 //fixme this should be in heapregionset 2100 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) { 2101 size_t region_idx = r->region_number() + 1; 2102 ShenandoahHeapRegion* next = _ordered_regions->get(region_idx); 2103 guarantee(next->region_number() == region_idx, "region number must match"); 2104 while (next->is_humongous()) { 2105 region_idx = next->region_number() + 1; 2106 next = _ordered_regions->get(region_idx); 2107 guarantee(next->region_number() == region_idx, "region number must match"); 2108 } 2109 return next; 2110 } 2111 2112 bool ShenandoahHeap::is_in_collection_set(const void* p) { 2113 return heap_region_containing(p)->is_in_collection_set(); 2114 } 2115 2116 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() { 2117 return _monitoring_support; 2118 } 2119 2120 bool ShenandoahHeap::is_obj_dead(const oop obj, const ShenandoahHeapRegion* r) const { 2121 return ! r->allocated_after_prev_mark_start((HeapWord*) obj) && 2122 ! is_marked_prev(obj, r); 2123 } 2124 CMBitMap* ShenandoahHeap::prev_mark_bit_map() { 2125 return _prev_mark_bit_map; 2126 } 2127 2128 CMBitMap* ShenandoahHeap::next_mark_bit_map() { 2129 return _next_mark_bit_map; 2130 } 2131 2132 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) { 2133 _free_regions->add_region(r); 2134 } 2135 2136 void ShenandoahHeap::clear_free_regions() { 2137 _free_regions->clear(); 2138 } 2139 2140 void ShenandoahHeap::register_region_with_in_cset_fast_test(ShenandoahHeapRegion* r) { 2141 assert(_in_cset_fast_test_base != NULL, "sanity"); 2142 assert(r->is_in_collection_set(), "invariant"); 2143 size_t index = r->region_number(); 2144 assert(index < _in_cset_fast_test_length, "invariant"); 2145 assert(!_in_cset_fast_test_base[index], "invariant"); 2146 _in_cset_fast_test_base[index] = true; 2147 } 2148 2149 address ShenandoahHeap::in_cset_fast_test_addr() { 2150 return (address) (ShenandoahHeap::heap()->_in_cset_fast_test); 2151 } 2152 2153 address ShenandoahHeap::cancelled_concgc_addr() { 2154 return (address) &(ShenandoahHeap::heap()->_cancelled_concgc); 2155 } 2156 2157 void ShenandoahHeap::clear_cset_fast_test() { 2158 assert(_in_cset_fast_test_base != NULL, "sanity"); 2159 memset(_in_cset_fast_test_base, false, 2160 _in_cset_fast_test_length * sizeof(bool)); 2161 } 2162 2163 size_t ShenandoahHeap::conservative_max_heap_alignment() { 2164 return HeapRegionBounds::max_size(); 2165 } 2166 2167 size_t ShenandoahHeap::bytes_allocated_since_cm() { 2168 return _bytes_allocated_since_cm; 2169 } 2170 2171 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) { 2172 _bytes_allocated_since_cm = bytes; 2173 } 2174 2175 size_t ShenandoahHeap::max_allocated_gc() { 2176 return _max_allocated_gc; 2177 } 2178 2179 void ShenandoahHeap::set_top_at_mark_start(HeapWord* region_base, HeapWord* addr) { 2180 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift; 2181 _top_at_mark_starts[index] = addr; 2182 } 2183 2184 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) { 2185 _full_gc_in_progress = in_progress; 2186 } 2187 2188 bool ShenandoahHeap::is_full_gc_in_progress() const { 2189 return _full_gc_in_progress; 2190 } 2191 2192 bool ShenandoahHeap::needs_reference_pending_list_locker_thread() const { 2193 return true; 2194 } 2195 2196 class NMethodOopInitializer : public OopClosure { 2197 private: 2198 ShenandoahHeap* _heap; 2199 public: 2200 NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) { 2201 } 2202 2203 private: 2204 template <class T> 2205 inline void do_oop_work(T* p) { 2206 T o = oopDesc::load_heap_oop(p); 2207 if (! oopDesc::is_null(o)) { 2208 oop obj1 = oopDesc::decode_heap_oop_not_null(o); 2209 oop obj2 = oopDesc::bs()->write_barrier(obj1); 2210 if (! oopDesc::unsafe_equals(obj1, obj2)) { 2211 oopDesc::encode_store_heap_oop(p, obj2); 2212 } 2213 } 2214 } 2215 2216 public: 2217 void do_oop(oop* o) { 2218 do_oop_work(o); 2219 } 2220 void do_oop(narrowOop* o) { 2221 do_oop_work(o); 2222 } 2223 }; 2224 2225 void ShenandoahHeap::register_nmethod(nmethod* nm) { 2226 NMethodOopInitializer init; 2227 nm->oops_do(&init); 2228 nm->fix_oop_relocations(); 2229 } 2230 2231 void ShenandoahHeap::unregister_nmethod(nmethod* nm) { 2232 } 2233 2234 void ShenandoahHeap::pin_object(oop o) { 2235 heap_region_containing(o)->pin(); 2236 } 2237 2238 void ShenandoahHeap::unpin_object(oop o) { 2239 heap_region_containing(o)->unpin(); 2240 } 2241 2242 2243 GCTimer* ShenandoahHeap::gc_timer() const { 2244 return _gc_timer; 2245 }