1 /* 2 * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "memory/allocation.hpp" 25 #include "gc/g1/heapRegionBounds.inline.hpp" 26 27 #include "gc/shared/gcTimer.hpp" 28 #include "gc/shared/gcTraceTime.inline.hpp" 29 #include "gc/shared/parallelCleaning.hpp" 30 31 #include "gc/shenandoah/brooksPointer.hpp" 32 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 33 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 35 #include "gc/shenandoah/shenandoahConcurrentMark.hpp" 36 #include "gc/shenandoah/shenandoahConcurrentThread.hpp" 37 #include "gc/shenandoah/shenandoahFreeSet.hpp" 38 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 39 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 41 #include "gc/shenandoah/shenandoahHumongous.hpp" 42 #include "gc/shenandoah/shenandoahMarkCompact.hpp" 43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 44 #include "gc/shenandoah/shenandoahRootProcessor.hpp" 45 #include "gc/shenandoah/vm_operations_shenandoah.hpp" 46 47 #include "runtime/vmThread.hpp" 48 49 const char* ShenandoahHeap::name() const { 50 return "Shenandoah"; 51 } 52 53 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) { 54 HeapWord* cur = NULL; 55 for (cur = start; cur < end; cur++) { 56 tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur))); 57 } 58 } 59 60 class PrintHeapRegionsClosure : public 61 ShenandoahHeapRegionClosure { 62 private: 63 outputStream* _st; 64 public: 65 PrintHeapRegionsClosure() : _st(tty) {} 66 PrintHeapRegionsClosure(outputStream* st) : _st(st) {} 67 68 bool doHeapRegion(ShenandoahHeapRegion* r) { 69 r->print_on(_st); 70 return false; 71 } 72 }; 73 74 jint ShenandoahHeap::initialize() { 75 CollectedHeap::pre_initialize(); 76 77 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); 78 size_t max_byte_size = collector_policy()->max_heap_byte_size(); 79 if (ShenandoahGCVerbose) 80 tty->print_cr("init_byte_size = "SIZE_FORMAT","SIZE_FORMAT_HEX" max_byte_size = "INT64_FORMAT","SIZE_FORMAT_HEX, 81 init_byte_size, init_byte_size, max_byte_size, max_byte_size); 82 83 Universe::check_alignment(max_byte_size, 84 ShenandoahHeapRegion::RegionSizeBytes, 85 "shenandoah heap"); 86 Universe::check_alignment(init_byte_size, 87 ShenandoahHeapRegion::RegionSizeBytes, 88 "shenandoah heap"); 89 90 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, 91 Arguments::conservative_max_heap_alignment()); 92 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size())); 93 94 set_barrier_set(new ShenandoahBarrierSet(this)); 95 ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size); 96 _storage.initialize(pgc_rs, init_byte_size); 97 if (ShenandoahGCVerbose) { 98 tty->print_cr("Calling initialize on reserved space base = "PTR_FORMAT" end = "PTR_FORMAT, 99 p2i(pgc_rs.base()), p2i(pgc_rs.base() + pgc_rs.size())); 100 } 101 102 _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes; 103 _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes; 104 _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes; 105 size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize; 106 assert(init_byte_size == _initialSize, "tautology"); 107 _ordered_regions = new ShenandoahHeapRegionSet(_max_regions); 108 _sorted_regions = new ShenandoahHeapRegionSet(_max_regions); 109 _collection_set = new ShenandoahCollectionSet(_max_regions); 110 _free_regions = new ShenandoahFreeSet(_max_regions); 111 112 size_t i = 0; 113 for (i = 0; i < _num_regions; i++) { 114 115 ShenandoahHeapRegion* current = new ShenandoahHeapRegion(); 116 current->initialize_heap_region((HeapWord*) pgc_rs.base() + 117 regionSizeWords * i, regionSizeWords, i); 118 _free_regions->add_region(current); 119 _ordered_regions->add_region(current); 120 _sorted_regions->add_region(current); 121 } 122 assert(((size_t) _ordered_regions->active_regions()) == _num_regions, ""); 123 _first_region = _ordered_regions->get(0); 124 _first_region_bottom = _first_region->bottom(); 125 assert((((size_t) _first_region_bottom) & 126 (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0, 127 "misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom)); 128 129 _numAllocs = 0; 130 131 if (ShenandoahGCVerbose) { 132 tty->print("All Regions\n"); 133 print_heap_regions(); 134 tty->print("Free Regions\n"); 135 _free_regions->print(); 136 } 137 138 // The call below uses stuff (the SATB* things) that are in G1, but probably 139 // belong into a shared location. 140 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, 141 SATB_Q_FL_lock, 142 20 /*G1SATBProcessCompletedThreshold */, 143 Shared_SATB_Q_lock); 144 145 // Reserve space for prev and next bitmap. 146 size_t bitmap_size = CMBitMap::compute_size(heap_rs.size()); 147 MemRegion heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize); 148 149 ReservedSpace bitmap0(ReservedSpace::allocation_align_size_up(bitmap_size)); 150 os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap"); 151 MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize); 152 _mark_bit_map0.initialize(heap_region, bitmap_region0); 153 _prev_mark_bit_map = &_mark_bit_map0; 154 155 ReservedSpace bitmap1(ReservedSpace::allocation_align_size_up(bitmap_size)); 156 os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap"); 157 MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize); 158 _mark_bit_map1.initialize(heap_region, bitmap_region1); 159 _next_mark_bit_map = &_mark_bit_map1; 160 161 // Initialize fast collection set test structure. 162 _in_cset_fast_test_length = _max_regions; 163 _in_cset_fast_test_base = 164 NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC); 165 _in_cset_fast_test = _in_cset_fast_test_base - 166 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift); 167 clear_cset_fast_test(); 168 169 _top_at_mark_starts_base = 170 NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC); 171 _top_at_mark_starts = _top_at_mark_starts_base - 172 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift); 173 174 for (i = 0; i < _num_regions; i++) { 175 _in_cset_fast_test_base[i] = false; // Not in cset 176 _top_at_mark_starts_base[i] = _ordered_regions->get(i)->bottom(); 177 } 178 179 _monitoring_support = new ShenandoahMonitoringSupport(this); 180 181 _concurrent_gc_thread = new ShenandoahConcurrentThread(); 182 183 ShenandoahMarkCompact::initialize(); 184 185 return JNI_OK; 186 } 187 188 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : 189 CollectedHeap(), 190 _shenandoah_policy(policy), 191 _concurrent_mark_in_progress(false), 192 _evacuation_in_progress(false), 193 _full_gc_in_progress(false), 194 _free_regions(NULL), 195 _collection_set(NULL), 196 _bytes_allocated_since_cm(0), 197 _bytes_allocated_during_cm(0), 198 _max_allocated_gc(0), 199 _allocated_last_gc(0), 200 _used_start_gc(0), 201 _max_conc_workers((int) MAX2((uint) ConcGCThreads, 1U)), 202 _max_parallel_workers((int) MAX2((uint) ParallelGCThreads, 1U)), 203 _ref_processor(NULL), 204 _in_cset_fast_test(NULL), 205 _in_cset_fast_test_base(NULL), 206 _top_at_mark_starts(NULL), 207 _top_at_mark_starts_base(NULL), 208 _mark_bit_map0(), 209 _mark_bit_map1(), 210 _cancelled_concgc(false), 211 _need_update_refs(false), 212 _need_reset_bitmaps(false), 213 _growing_heap(0), 214 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()) 215 216 { 217 if (ShenandoahLogConfig) { 218 tty->print_cr("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads); 219 tty->print_cr("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads); 220 tty->print_cr("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled)); 221 } 222 _scm = new ShenandoahConcurrentMark(); 223 _used = 0; 224 // This is odd. They are concurrent gc threads, but they are also task threads. 225 // Framework doesn't allow both. 226 _workers = new WorkGang("Parallel GC Threads", ParallelGCThreads, 227 /* are_GC_task_threads */true, 228 /* are_ConcurrentGC_threads */false); 229 _conc_workers = new WorkGang("Concurrent GC Threads", ConcGCThreads, 230 /* are_GC_task_threads */true, 231 /* are_ConcurrentGC_threads */false); 232 if ((_workers == NULL) || (_conc_workers == NULL)) { 233 vm_exit_during_initialization("Failed necessary allocation."); 234 } else { 235 _workers->initialize_workers(); 236 _conc_workers->initialize_workers(); 237 } 238 } 239 240 class ResetBitmapTask : public AbstractGangTask { 241 private: 242 ShenandoahHeapRegionSet* _regions; 243 244 public: 245 ResetBitmapTask(ShenandoahHeapRegionSet* regions) : 246 AbstractGangTask("Parallel Reset Bitmap Task"), 247 _regions(regions) { 248 _regions->clear_current_index(); 249 } 250 251 void work(uint worker_id) { 252 ShenandoahHeapRegion* region = _regions->claim_next(); 253 ShenandoahHeap* heap = ShenandoahHeap::heap(); 254 while (region != NULL) { 255 HeapWord* bottom = region->bottom(); 256 HeapWord* top = region->top_prev_mark_bitmap(); 257 region->set_top_prev_mark_bitmap(region->top_at_prev_mark_start()); 258 if (top > bottom) { 259 heap->reset_mark_bitmap_range(bottom, top); 260 } 261 region = _regions->claim_next(); 262 } 263 } 264 }; 265 266 void ShenandoahHeap::reset_mark_bitmap() { 267 GCTraceTime(Info, gc, phases) time("Concurrent reset bitmaps", gc_timer(), GCCause::_no_gc); 268 269 ResetBitmapTask task = ResetBitmapTask(_ordered_regions); 270 conc_workers()->run_task(&task); 271 } 272 273 void ShenandoahHeap::reset_mark_bitmap_range(HeapWord* from, HeapWord* to) { 274 _next_mark_bit_map->clear_range(MemRegion(from, to)); 275 } 276 277 bool ShenandoahHeap::is_bitmap_clear() { 278 HeapWord* start = _ordered_regions->bottom(); 279 HeapWord* end = _ordered_regions->end(); 280 return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end; 281 } 282 283 void ShenandoahHeap::print_on(outputStream* st) const { 284 st->print("Shenandoah Heap"); 285 st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K); 286 st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K); 287 if (_concurrent_mark_in_progress) { 288 st->print("marking "); 289 } 290 if (_evacuation_in_progress) { 291 st->print("evacuating "); 292 } 293 if (_cancelled_concgc) { 294 st->print("cancelled "); 295 } 296 st->print("\n"); 297 298 if (Verbose) { 299 print_heap_regions(st); 300 } 301 } 302 303 class InitGCLABClosure : public ThreadClosure { 304 public: 305 void do_thread(Thread* thread) { 306 thread->gclab().initialize(true); 307 } 308 }; 309 310 void ShenandoahHeap::post_initialize() { 311 312 { 313 if (UseTLAB) { 314 InitGCLABClosure init_gclabs; 315 for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { 316 init_gclabs.do_thread(thread); 317 } 318 gc_threads_do(&init_gclabs); 319 } 320 } 321 _scm->initialize(); 322 323 ref_processing_init(); 324 325 _max_workers = MAX(_max_parallel_workers, _max_conc_workers); 326 } 327 328 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure { 329 size_t sum; 330 public: 331 332 CalculateUsedRegionClosure() { 333 sum = 0; 334 } 335 336 bool doHeapRegion(ShenandoahHeapRegion* r) { 337 sum = sum + r->used(); 338 return false; 339 } 340 341 size_t getResult() { return sum;} 342 }; 343 344 size_t ShenandoahHeap::calculateUsed() { 345 CalculateUsedRegionClosure cl; 346 heap_region_iterate(&cl); 347 return cl.getResult(); 348 } 349 350 void ShenandoahHeap::verify_heap_size_consistency() { 351 352 assert(calculateUsed() == used(), 353 "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed()); 354 } 355 356 size_t ShenandoahHeap::used() const { 357 OrderAccess::acquire(); 358 return _used; 359 } 360 361 void ShenandoahHeap::increase_used(size_t bytes) { 362 Atomic::add(bytes, &_used); 363 } 364 365 void ShenandoahHeap::set_used(size_t bytes) { 366 _used = bytes; 367 OrderAccess::release(); 368 } 369 370 void ShenandoahHeap::decrease_used(size_t bytes) { 371 assert(_used >= bytes, "never decrease heap size by more than we've left"); 372 Atomic::add(-bytes, &_used); 373 } 374 375 size_t ShenandoahHeap::capacity() const { 376 return _num_regions * ShenandoahHeapRegion::RegionSizeBytes; 377 378 } 379 380 bool ShenandoahHeap::is_maximal_no_gc() const { 381 Unimplemented(); 382 return true; 383 } 384 385 size_t ShenandoahHeap::max_capacity() const { 386 return _max_regions * ShenandoahHeapRegion::RegionSizeBytes; 387 } 388 389 size_t ShenandoahHeap::min_capacity() const { 390 return _initialSize; 391 } 392 393 VirtualSpace* ShenandoahHeap::storage() const { 394 return (VirtualSpace*) &_storage; 395 } 396 397 bool ShenandoahHeap::is_in(const void* p) const { 398 HeapWord* first_region_bottom = _first_region->bottom(); 399 HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions; 400 return p > _first_region_bottom && p < last_region_end; 401 } 402 403 bool ShenandoahHeap::is_scavengable(const void* p) { 404 return true; 405 } 406 407 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) { 408 // Retain tlab and allocate object in shared space if 409 // the amount free in the tlab is too large to discard. 410 if (thread->gclab().free() > thread->gclab().refill_waste_limit()) { 411 thread->gclab().record_slow_allocation(size); 412 return NULL; 413 } 414 415 // Discard gclab and allocate a new one. 416 // To minimize fragmentation, the last GCLAB may be smaller than the rest. 417 size_t new_gclab_size = thread->gclab().compute_size(size); 418 419 thread->gclab().clear_before_allocation(); 420 421 if (new_gclab_size == 0) { 422 return NULL; 423 } 424 425 // Allocate a new GCLAB... 426 HeapWord* obj = allocate_new_gclab(new_gclab_size); 427 if (obj == NULL) { 428 return NULL; 429 } 430 431 if (ZeroTLAB) { 432 // ..and clear it. 433 Copy::zero_to_words(obj, new_gclab_size); 434 } else { 435 // ...and zap just allocated object. 436 #ifdef ASSERT 437 // Skip mangling the space corresponding to the object header to 438 // ensure that the returned space is not considered parsable by 439 // any concurrent GC thread. 440 size_t hdr_size = oopDesc::header_size(); 441 Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal); 442 #endif // ASSERT 443 } 444 thread->gclab().fill(obj, obj + size, new_gclab_size); 445 return obj; 446 } 447 448 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) { 449 return allocate_new_tlab(word_size, false); 450 } 451 452 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) { 453 return allocate_new_tlab(word_size, true); 454 } 455 456 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) { 457 HeapWord* result = allocate_memory(word_size, evacuating); 458 459 if (result != NULL) { 460 assert(! heap_region_containing(result)->is_in_collection_set(), "Never allocate in dirty region"); 461 _bytes_allocated_since_cm += word_size * HeapWordSize; 462 463 #ifdef ASSERT 464 if (ShenandoahTraceTLabs) 465 tty->print_cr("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result)); 466 #endif 467 468 } 469 return result; 470 } 471 472 ShenandoahHeap* ShenandoahHeap::heap() { 473 CollectedHeap* heap = Universe::heap(); 474 assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()"); 475 assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap"); 476 return (ShenandoahHeap*) heap; 477 } 478 479 ShenandoahHeap* ShenandoahHeap::heap_no_check() { 480 CollectedHeap* heap = Universe::heap(); 481 return (ShenandoahHeap*) heap; 482 } 483 484 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) { 485 HeapWord* result = NULL; 486 result = allocate_memory_work(word_size); 487 488 if (result == NULL) { 489 bool retry; 490 do { 491 // Try to grow the heap. 492 retry = check_grow_heap(); 493 result = allocate_memory_work(word_size); 494 } while (retry && result == NULL); 495 } 496 497 if (result == NULL && ! evacuating) { // Allocation failed, try full-GC, then retry allocation. 498 // tty->print_cr("failed to allocate "SIZE_FORMAT " bytes, free regions:", word_size * HeapWordSize); 499 // _free_regions->print(); 500 collect(GCCause::_allocation_failure); 501 result = allocate_memory_work(word_size); 502 } 503 504 // Only update monitoring counters when not calling from a write-barrier. 505 // Otherwise we might attempt to grab the Service_lock, which we must 506 // not do when coming from a write-barrier (because the thread might 507 // already hold the Compile_lock). 508 if (! evacuating) { 509 monitoring_support()->update_counters(); 510 } 511 512 return result; 513 } 514 515 bool ShenandoahHeap::call_from_write_barrier(bool evacuating) { 516 return evacuating && Thread::current()->is_Java_thread(); 517 } 518 519 bool ShenandoahHeap::check_grow_heap() { 520 521 assert(_free_regions->max_regions() >= _free_regions->active_regions(), "don't get negative"); 522 523 size_t available = _max_regions - _num_regions; 524 if (available == 0) { 525 return false; // Don't retry. 526 } 527 528 jbyte growing = Atomic::cmpxchg(1, &_growing_heap, 0); 529 if (growing == 0) { 530 // Only one thread succeeds this, and this one gets 531 // to grow the heap. All other threads can continue 532 // to allocate from the reserve. 533 grow_heap_by(MIN2(available, ShenandoahAllocReserveRegions)); 534 535 // Reset it back to 0, so that other threads can take it again. 536 Atomic::store(0, &_growing_heap); 537 return true; 538 } else { 539 // Let other threads work, then try again. 540 os::naked_yield(); 541 return true; 542 } 543 } 544 545 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) { 546 547 if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) { 548 return allocate_large_memory(word_size); 549 } 550 551 jlong current_idx = _free_regions->current_index(); 552 assert(current_idx >= 0, "expect >= 0"); 553 ShenandoahHeapRegion* my_current_region = _free_regions->get(current_idx); 554 555 if (my_current_region == NULL) { 556 return NULL; // No more room to make a new region. OOM. 557 } 558 assert(my_current_region != NULL, "should have a region at this point"); 559 560 #ifdef ASSERT 561 if (my_current_region->is_in_collection_set()) { 562 print_heap_regions(); 563 } 564 #endif 565 assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists"); 566 assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions"); 567 568 HeapWord* result = my_current_region->par_allocate(word_size); 569 570 while (result == NULL && my_current_region != NULL) { 571 572 // 2nd attempt. Try next region. 573 size_t remaining = my_current_region->free(); 574 current_idx = _free_regions->par_claim_next(current_idx); 575 my_current_region = _free_regions->get(current_idx); 576 577 if (my_current_region == NULL) { 578 // tty->print("WTF: OOM error trying to allocate %ld words\n", word_size); 579 return NULL; // No more room to make a new region. OOM. 580 } 581 // _free_regions->increase_used(remaining); 582 assert(my_current_region != NULL, "should have a region at this point"); 583 assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists"); 584 assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions"); 585 result = my_current_region->par_allocate(word_size); 586 } 587 588 if (result != NULL) { 589 my_current_region->increase_live_data(word_size * HeapWordSize); 590 increase_used(word_size * HeapWordSize); 591 _free_regions->increase_used(word_size * HeapWordSize); 592 } 593 return result; 594 } 595 596 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) { 597 598 uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize); 599 assert(required_regions <= _max_regions, "sanity check"); 600 ShenandoahHeapRegion* r = _free_regions->claim_contiguous(required_regions); 601 602 HeapWord* result = NULL; 603 604 if (r != NULL) { 605 result = r->bottom(); 606 607 if (ShenandoahTraceHumongous) { 608 tty->print_cr("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT, 609 (words * HeapWordSize) / K, p2i(result), r->region_number()); 610 } 611 } else { 612 if (ShenandoahTraceHumongous) { 613 tty->print_cr("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed", 614 (words * HeapWordSize) / K, p2i(result)); 615 } 616 } 617 618 619 return result; 620 621 } 622 623 HeapWord* ShenandoahHeap::mem_allocate(size_t size, 624 bool* gc_overhead_limit_was_exceeded) { 625 626 #ifdef ASSERT 627 if (ShenandoahVerify && _numAllocs > 1000000) { 628 _numAllocs = 0; 629 } 630 _numAllocs++; 631 #endif 632 HeapWord* filler = allocate_memory(BrooksPointer::BROOKS_POINTER_OBJ_SIZE + size, false); 633 HeapWord* result = filler + BrooksPointer::BROOKS_POINTER_OBJ_SIZE; 634 if (filler != NULL) { 635 initialize_brooks_ptr(oop(result)); 636 _bytes_allocated_since_cm += size * HeapWordSize; 637 #ifdef ASSERT 638 if (ShenandoahTraceAllocations) { 639 if (*gc_overhead_limit_was_exceeded) 640 tty->print("gc_overhead_limit_was_exceeded"); 641 tty->print_cr("mem_allocate object of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ", 642 size, p2i(result), Thread::current()->osthread()->thread_id()); 643 } 644 #endif 645 646 assert(! heap_region_containing(result)->is_in_collection_set(), "never allocate in targetted region"); 647 return result; 648 } else { 649 /* 650 tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT, size, used(), _bytes_allocated_since_cm); 651 { 652 print_heap_regions(); 653 tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count()); 654 _free_regions->print(); 655 } 656 */ 657 return NULL; 658 } 659 } 660 661 class ParallelEvacuateRegionObjectClosure : public ObjectClosure { 662 private: 663 ShenandoahHeap* _heap; 664 Thread* _thread; 665 public: 666 ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) : 667 _heap(heap), _thread(Thread::current()) { 668 } 669 670 void do_object(oop p) { 671 672 #ifdef ASSERT 673 if (ShenandoahTraceEvacuations) { 674 tty->print_cr("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size()); 675 } 676 #endif 677 678 assert(_heap->is_marked_prev(p), "expect only marked objects"); 679 if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) { 680 _heap->evacuate_object(p, _thread); 681 } 682 } 683 }; 684 685 #ifdef ASSERT 686 class VerifyEvacuatedObjectClosure : public ObjectClosure { 687 688 public: 689 690 void do_object(oop p) { 691 if (ShenandoahHeap::heap()->is_marked_prev(p)) { 692 oop p_prime = oopDesc::bs()->read_barrier(p); 693 assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy"); 694 #ifdef ASSERT 695 if (p->klass() != p_prime->klass()) { 696 tty->print_cr("copy has different class than original:"); 697 p->klass()->print_on(tty); 698 p_prime->klass()->print_on(tty); 699 } 700 #endif 701 assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i((HeapWord*) p), p2i((HeapWord*) p_prime)); 702 // assert(p->mark() == p_prime->mark(), "Should have the same mark"); 703 assert(p->size() == p_prime->size(), "Should be the same size"); 704 assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once"); 705 } 706 } 707 }; 708 709 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) { 710 if (ShenandoahGCVerbose) { 711 tty->print("Verifying From Region\n"); 712 from_region->print(); 713 } 714 715 VerifyEvacuatedObjectClosure verify_evacuation; 716 from_region->marked_object_iterate(&verify_evacuation); 717 } 718 #endif 719 720 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) { 721 722 assert(from_region->getLiveData() > 0, "all-garbage regions are reclaimed earlier"); 723 724 ParallelEvacuateRegionObjectClosure evacuate_region(this); 725 726 #ifdef ASSERT 727 if (ShenandoahGCVerbose) { 728 tty->print_cr("parallel_evacuate_region starting from_region "SIZE_FORMAT": free_regions = "SIZE_FORMAT, 729 from_region->region_number(), _free_regions->count()); 730 } 731 #endif 732 733 marked_object_iterate(from_region, &evacuate_region); 734 735 #ifdef ASSERT 736 if (ShenandoahVerify && ! cancelled_concgc()) { 737 verify_evacuated_region(from_region); 738 } 739 if (ShenandoahGCVerbose) { 740 tty->print_cr("parallel_evacuate_region after from_region = "SIZE_FORMAT": free_regions = "SIZE_FORMAT, 741 from_region->region_number(), _free_regions->count()); 742 } 743 #endif 744 } 745 746 class ParallelEvacuationTask : public AbstractGangTask { 747 private: 748 ShenandoahHeap* _sh; 749 ShenandoahCollectionSet* _cs; 750 751 public: 752 ParallelEvacuationTask(ShenandoahHeap* sh, 753 ShenandoahCollectionSet* cs) : 754 AbstractGangTask("Parallel Evacuation Task"), 755 _cs(cs), 756 _sh(sh) {} 757 758 void work(uint worker_id) { 759 760 ShenandoahHeapRegion* from_hr = _cs->claim_next(); 761 762 while (from_hr != NULL) { 763 if (ShenandoahGCVerbose) { 764 tty->print_cr("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT, 765 worker_id, 766 from_hr->region_number()); 767 from_hr->print(); 768 } 769 770 assert(from_hr->getLiveData() > 0, "all-garbage regions are reclaimed early"); 771 _sh->parallel_evacuate_region(from_hr); 772 773 if (_sh->cancelled_concgc()) { 774 // tty->print("We cancelled concgc while working on region %d\n", from_hr->region_number()); 775 // from_hr->print(); 776 break; 777 } 778 from_hr = _cs->claim_next(); 779 } 780 } 781 }; 782 783 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure { 784 private: 785 ShenandoahHeap* _heap; 786 size_t _bytes_reclaimed; 787 public: 788 RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {} 789 790 bool doHeapRegion(ShenandoahHeapRegion* r) { 791 792 if (_heap->cancelled_concgc()) { 793 // The aborted marking bitmap needs to be cleared at the end of cycle. 794 // Setup the top-marker for this. 795 r->set_top_prev_mark_bitmap(r->top_at_mark_start()); 796 797 return false; 798 } 799 800 r->swap_top_at_mark_start(); 801 802 if (r->is_in_collection_set()) { 803 // tty->print_cr("recycling region "INT32_FORMAT":", r->region_number()); 804 // r->print_on(tty); 805 // tty->print_cr(" "); 806 _heap->decrease_used(r->used()); 807 _bytes_reclaimed += r->used(); 808 r->recycle(); 809 _heap->free_regions()->add_region(r); 810 } 811 812 return false; 813 } 814 size_t bytes_reclaimed() { return _bytes_reclaimed;} 815 void clear_bytes_reclaimed() {_bytes_reclaimed = 0;} 816 }; 817 818 void ShenandoahHeap::recycle_dirty_regions() { 819 RecycleDirtyRegionsClosure cl; 820 cl.clear_bytes_reclaimed(); 821 822 heap_region_iterate(&cl); 823 824 _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed()); 825 if (! cancelled_concgc()) { 826 clear_cset_fast_test(); 827 } 828 } 829 830 ShenandoahFreeSet* ShenandoahHeap::free_regions() { 831 return _free_regions; 832 } 833 834 void ShenandoahHeap::print_heap_regions(outputStream* st) const { 835 PrintHeapRegionsClosure pc1(st); 836 heap_region_iterate(&pc1); 837 } 838 839 class PrintAllRefsOopClosure: public ExtendedOopClosure { 840 private: 841 int _index; 842 const char* _prefix; 843 844 public: 845 PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {} 846 847 private: 848 template <class T> 849 inline void do_oop_work(T* p) { 850 oop o = oopDesc::load_decode_heap_oop(p); 851 if (o != NULL) { 852 if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) { 853 tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")", _prefix, _index, p2i(p), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(o)), o->klass()->internal_name(), p2i(o->klass())); 854 } else { 855 // tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty: %s) -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty (%s))", _prefix, _index, p2i(p), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(o)->is_in_collection_set())); 856 tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)", _prefix, _index, p2i(p), p2i((HeapWord*) o)); 857 } 858 } else { 859 tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o)); 860 } 861 _index++; 862 } 863 864 public: 865 void do_oop(oop* p) { 866 do_oop_work(p); 867 } 868 869 void do_oop(narrowOop* p) { 870 do_oop_work(p); 871 } 872 873 }; 874 875 class PrintAllRefsObjectClosure : public ObjectClosure { 876 const char* _prefix; 877 878 public: 879 PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {} 880 881 void do_object(oop p) { 882 if (ShenandoahHeap::heap()->is_in(p)) { 883 tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:", _prefix, p2i((HeapWord*) p), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(p)), p->klass()->internal_name(), p2i(p->klass())); 884 PrintAllRefsOopClosure cl(_prefix); 885 p->oop_iterate(&cl); 886 } 887 } 888 }; 889 890 void ShenandoahHeap::print_all_refs(const char* prefix) { 891 tty->print_cr("printing all references in the heap"); 892 tty->print_cr("root references:"); 893 894 ensure_parsability(false); 895 896 PrintAllRefsOopClosure cl(prefix); 897 roots_iterate(&cl); 898 899 tty->print_cr("heap references:"); 900 PrintAllRefsObjectClosure cl2(prefix); 901 object_iterate(&cl2); 902 } 903 904 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure { 905 private: 906 ShenandoahHeap* _heap; 907 908 public: 909 VerifyAfterMarkingOopClosure() : 910 _heap(ShenandoahHeap::heap()) { } 911 912 private: 913 template <class T> 914 inline void do_oop_work(T* p) { 915 oop o = oopDesc::load_decode_heap_oop(p); 916 if (o != NULL) { 917 if (! _heap->is_marked_prev(o)) { 918 _heap->print_heap_regions(); 919 _heap->print_all_refs("post-mark"); 920 tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s", 921 p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_prev(o))); 922 _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size()); 923 924 tty->print_cr("oop class: %s", o->klass()->internal_name()); 925 if (_heap->is_in(p)) { 926 oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p)); 927 tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer)); 928 referrer->print(); 929 _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size()); 930 } 931 tty->print_cr("heap region containing object:"); 932 _heap->heap_region_containing(o)->print(); 933 tty->print_cr("heap region containing referrer:"); 934 _heap->heap_region_containing(p)->print(); 935 tty->print_cr("heap region containing forwardee:"); 936 _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print(); 937 } 938 assert(o->is_oop(), "oop must be an oop"); 939 assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace"); 940 if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) { 941 tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)", p2i(p), BOOL_TO_STR(_heap->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o), BOOL_TO_STR(_heap->heap_region_containing(o)->is_in_collection_set()), p2i((HeapWord*) oopDesc::bs()->read_barrier(o)), BOOL_TO_STR(_heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->is_in_collection_set())); 942 tty->print_cr("oop class: %s", o->klass()->internal_name()); 943 } 944 assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded"); 945 assert(! _heap->heap_region_containing(o)->is_in_collection_set(), "references must not point to dirty heap regions"); 946 assert(_heap->is_marked_prev(o), "live oops must be marked current"); 947 } 948 } 949 950 public: 951 void do_oop(oop* p) { 952 do_oop_work(p); 953 } 954 955 void do_oop(narrowOop* p) { 956 do_oop_work(p); 957 } 958 959 }; 960 961 class IterateMarkedCurrentObjectsClosure: public ObjectClosure { 962 private: 963 ShenandoahHeap* _heap; 964 ExtendedOopClosure* _cl; 965 public: 966 IterateMarkedCurrentObjectsClosure(ExtendedOopClosure* cl) : 967 _heap(ShenandoahHeap::heap()), _cl(cl) {}; 968 969 void do_object(oop p) { 970 if (_heap->is_marked_current(p)) { 971 p->oop_iterate(_cl); 972 } 973 } 974 975 }; 976 977 void ShenandoahHeap::verify_heap_after_marking() { 978 979 verify_heap_size_consistency(); 980 981 if (ShenandoahGCVerbose) { 982 tty->print("verifying heap after marking\n"); 983 } 984 VerifyAfterMarkingOopClosure cl; 985 roots_iterate(&cl); 986 987 IterateMarkedCurrentObjectsClosure marked_oops(&cl); 988 object_iterate(&marked_oops); 989 } 990 991 992 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) { 993 assert(r->is_humongous_start(), "reclaim regions starting with the first one"); 994 995 oop humongous_obj = oop(r->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE); 996 size_t size = humongous_obj->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE; 997 uint required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize); 998 uint index = r->region_number(); 999 1000 1001 assert(r->getLiveData() == 0, "liveness must be zero"); 1002 1003 for(size_t i = 0; i < required_regions; i++) { 1004 1005 ShenandoahHeapRegion* region = _ordered_regions->get(index++); 1006 1007 assert((region->is_humongous_start() || region->is_humongous_continuation()), 1008 "expect correct humongous start or continuation"); 1009 1010 if (ShenandoahTraceHumongous) { 1011 tty->print_cr("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size); 1012 1013 region->print(); 1014 } 1015 1016 region->reset(); 1017 ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes); 1018 } 1019 } 1020 1021 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure { 1022 1023 bool doHeapRegion(ShenandoahHeapRegion* r) { 1024 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1025 1026 if (r->is_humongous_start()) { 1027 oop humongous_obj = oop(r->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE); 1028 if (! heap->is_marked_current(humongous_obj)) { 1029 1030 heap->reclaim_humongous_region_at(r); 1031 } 1032 } 1033 return false; 1034 } 1035 }; 1036 1037 #ifdef ASSERT 1038 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure { 1039 bool doHeapRegion(ShenandoahHeapRegion* r) { 1040 assert(!r->is_in_collection_set(), "Should have been cleared by now"); 1041 return false; 1042 } 1043 }; 1044 #endif 1045 1046 void ShenandoahHeap::prepare_for_concurrent_evacuation() { 1047 assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF"); 1048 /* 1049 tty->print("Thread %d started prepare_for_concurrent_evacuation\n", 1050 Thread::current()->osthread()->thread_id()); 1051 */ 1052 if (!cancelled_concgc()) { 1053 1054 recycle_dirty_regions(); 1055 1056 ensure_parsability(true); 1057 1058 #ifdef ASSERT 1059 if (ShenandoahVerify) { 1060 verify_heap_after_marking(); 1061 } 1062 #endif 1063 1064 // NOTE: This needs to be done during a stop the world pause, because 1065 // putting regions into the collection set concurrently with Java threads 1066 // will create a race. In particular, acmp could fail because when we 1067 // resolve the first operand, the containing region might not yet be in 1068 // the collection set, and thus return the original oop. When the 2nd 1069 // operand gets resolved, the region could be in the collection set 1070 // and the oop gets evacuated. If both operands have originally been 1071 // the same, we get false negatives. 1072 1073 1074 _collection_set->clear(); 1075 _free_regions->clear(); 1076 1077 ShenandoahReclaimHumongousRegionsClosure reclaim; 1078 heap_region_iterate(&reclaim); 1079 1080 // _ordered_regions->print(); 1081 #ifdef ASSERT 1082 CheckCollectionSetClosure ccsc; 1083 _ordered_regions->heap_region_iterate(&ccsc); 1084 #endif 1085 1086 _shenandoah_policy->choose_collection_set(_collection_set); 1087 1088 _shenandoah_policy->choose_free_set(_free_regions); 1089 1090 /* 1091 tty->print("Sorted free regions\n"); 1092 _free_regions->print(); 1093 */ 1094 1095 if (_collection_set->count() == 0) 1096 cancel_concgc(); 1097 1098 _bytes_allocated_since_cm = 0; 1099 1100 Universe::update_heap_info_at_gc(); 1101 } 1102 } 1103 1104 1105 class RetireTLABClosure : public ThreadClosure { 1106 private: 1107 bool _retire; 1108 1109 public: 1110 RetireTLABClosure(bool retire) : _retire(retire) { 1111 } 1112 1113 void do_thread(Thread* thread) { 1114 thread->gclab().make_parsable(_retire); 1115 } 1116 }; 1117 1118 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) { 1119 if (UseTLAB) { 1120 CollectedHeap::ensure_parsability(retire_tlabs); 1121 1122 RetireTLABClosure cl(retire_tlabs); 1123 for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { 1124 cl.do_thread(thread); 1125 } 1126 gc_threads_do(&cl); 1127 } 1128 } 1129 1130 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure { 1131 private: 1132 ShenandoahHeap* _heap; 1133 Thread* _thread; 1134 public: 1135 ShenandoahEvacuateUpdateRootsClosure() : 1136 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) { 1137 } 1138 1139 private: 1140 template <class T> 1141 void do_oop_work(T* p) { 1142 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress"); 1143 1144 T o = oopDesc::load_heap_oop(p); 1145 if (! oopDesc::is_null(o)) { 1146 oop obj = oopDesc::decode_heap_oop_not_null(o); 1147 if (_heap->in_cset_fast_test((HeapWord*) obj)) { 1148 assert(_heap->is_marked_prev(obj), "only evacuate marked objects %d %d", _heap->is_marked_prev(obj), _heap->is_marked_prev(ShenandoahBarrierSet::resolve_oop_static_not_null(obj))); 1149 oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj); 1150 if (oopDesc::unsafe_equals(resolved, obj)) { 1151 resolved = _heap->evacuate_object(obj, _thread); 1152 } 1153 oopDesc::encode_store_heap_oop(p, resolved); 1154 } 1155 } 1156 #ifdef ASSERT 1157 else { 1158 // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s", p2i(p), p2i((HeapWord*) obj), BOOL_TO_STR(_heap->is_in(obj)), BOOL_TO_STR(_heap->in_cset_fast_test(obj)), BOOL_TO_STR(_heap->is_marked_current(obj))); 1159 } 1160 #endif 1161 } 1162 1163 public: 1164 void do_oop(oop* p) { 1165 do_oop_work(p); 1166 } 1167 void do_oop(narrowOop* p) { 1168 do_oop_work(p); 1169 } 1170 }; 1171 1172 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask { 1173 ShenandoahRootProcessor* _rp; 1174 public: 1175 1176 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootProcessor* rp) : 1177 AbstractGangTask("Shenandoah evacuate and update roots"), 1178 _rp(rp) 1179 { 1180 // Nothing else to do. 1181 } 1182 1183 void work(uint worker_id) { 1184 ShenandoahEvacuateUpdateRootsClosure cl; 1185 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations); 1186 1187 _rp->process_evacuate_roots(&cl, &blobsCl, worker_id); 1188 } 1189 }; 1190 1191 void ShenandoahHeap::evacuate_and_update_roots() { 1192 1193 COMPILER2_PRESENT(DerivedPointerTable::clear()); 1194 1195 if (ShenandoahVerifyReadsToFromSpace) { 1196 set_from_region_protection(false); 1197 } 1198 1199 assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped"); 1200 ClassLoaderDataGraph::clear_claimed_marks(); 1201 1202 { 1203 ShenandoahRootProcessor rp(this, _max_parallel_workers, ShenandoahCollectorPolicy::evac_thread_roots); 1204 ShenandoahEvacuateUpdateRootsTask roots_task(&rp); 1205 workers()->run_task(&roots_task); 1206 } 1207 1208 if (ShenandoahVerifyReadsToFromSpace) { 1209 set_from_region_protection(true); 1210 } 1211 1212 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 1213 1214 } 1215 1216 1217 void ShenandoahHeap::do_evacuation() { 1218 1219 parallel_evacuate(); 1220 1221 if (ShenandoahVerify && ! cancelled_concgc()) { 1222 VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation; 1223 if (Thread::current()->is_VM_thread()) { 1224 verify_after_evacuation.doit(); 1225 } else { 1226 VMThread::execute(&verify_after_evacuation); 1227 } 1228 } 1229 1230 } 1231 1232 void ShenandoahHeap::parallel_evacuate() { 1233 1234 if (! cancelled_concgc()) { 1235 1236 if (ShenandoahGCVerbose) { 1237 tty->print_cr("starting parallel_evacuate"); 1238 // PrintHeapRegionsClosure pc1; 1239 // heap_region_iterate(&pc1); 1240 } 1241 1242 _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac); 1243 1244 if (ShenandoahGCVerbose) { 1245 tty->print("Printing all available regions"); 1246 print_heap_regions(); 1247 } 1248 1249 if (ShenandoahPrintCollectionSet) { 1250 tty->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count()); 1251 _collection_set->print(); 1252 1253 tty->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count()); 1254 _free_regions->print(); 1255 1256 // if (_collection_set->length() == 0) 1257 // print_heap_regions(); 1258 } 1259 1260 ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set); 1261 1262 conc_workers()->run_task(&evacuationTask); 1263 1264 if (ShenandoahGCVerbose) { 1265 1266 tty->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n", 1267 _collection_set->count()); 1268 1269 _collection_set->print(); 1270 1271 tty->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n", 1272 _free_regions->count()); 1273 _free_regions->print(); 1274 1275 tty->print_cr("finished parallel_evacuate"); 1276 print_heap_regions(); 1277 1278 tty->print_cr("all regions after evacuation:"); 1279 print_heap_regions(); 1280 } 1281 1282 _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac); 1283 1284 if (cancelled_concgc()) { 1285 // tty->print("GOTCHA: by thread %d", Thread::current()->osthread()->thread_id()); 1286 concurrent_thread()->schedule_full_gc(); 1287 // tty->print("PostGotcha: by thread %d FullGC should be scheduled\n", 1288 // Thread::current()->osthread()->thread_id()); 1289 } 1290 } 1291 } 1292 1293 class VerifyEvacuationClosure: public ExtendedOopClosure { 1294 private: 1295 ShenandoahHeap* _heap; 1296 ShenandoahHeapRegion* _from_region; 1297 1298 public: 1299 VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) : 1300 _heap(ShenandoahHeap::heap()), _from_region(from_region) { } 1301 private: 1302 template <class T> 1303 inline void do_oop_work(T* p) { 1304 oop heap_oop = oopDesc::load_decode_heap_oop(p); 1305 if (! oopDesc::is_null(heap_oop)) { 1306 guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop)); 1307 } 1308 } 1309 1310 public: 1311 void do_oop(oop* p) { 1312 do_oop_work(p); 1313 } 1314 1315 void do_oop(narrowOop* p) { 1316 do_oop_work(p); 1317 } 1318 1319 }; 1320 1321 void ShenandoahHeap::roots_iterate(OopClosure* cl) { 1322 1323 assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped"); 1324 1325 CodeBlobToOopClosure blobsCl(cl, false); 1326 CLDToOopClosure cldCl(cl); 1327 1328 ClassLoaderDataGraph::clear_claimed_marks(); 1329 1330 ShenandoahRootProcessor rp(this, 1); 1331 rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0); 1332 } 1333 1334 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) { 1335 1336 VerifyEvacuationClosure rootsCl(from_region); 1337 roots_iterate(&rootsCl); 1338 1339 } 1340 1341 bool ShenandoahHeap::supports_tlab_allocation() const { 1342 return true; 1343 } 1344 1345 1346 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const { 1347 jlong idx = _free_regions->current_index(); 1348 ShenandoahHeapRegion* current = _free_regions->get(idx); 1349 if (current == NULL) 1350 return 0; 1351 else if (current->free() > MinTLABSize) { 1352 return current->free(); 1353 } else { 1354 return MinTLABSize; 1355 } 1356 } 1357 1358 size_t ShenandoahHeap::max_tlab_size() const { 1359 return ShenandoahHeapRegion::RegionSizeBytes; 1360 } 1361 1362 class ResizeGCLABClosure : public ThreadClosure { 1363 public: 1364 void do_thread(Thread* thread) { 1365 thread->gclab().resize(); 1366 } 1367 }; 1368 1369 void ShenandoahHeap::resize_all_tlabs() { 1370 CollectedHeap::resize_all_tlabs(); 1371 1372 ResizeGCLABClosure cl; 1373 for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { 1374 cl.do_thread(thread); 1375 } 1376 gc_threads_do(&cl); 1377 1378 } 1379 1380 class AccumulateStatisticsGCLABClosure : public ThreadClosure { 1381 public: 1382 void do_thread(Thread* thread) { 1383 thread->gclab().accumulate_statistics(); 1384 thread->gclab().initialize_statistics(); 1385 } 1386 }; 1387 1388 void ShenandoahHeap::accumulate_statistics_all_gclabs() { 1389 1390 AccumulateStatisticsGCLABClosure cl; 1391 for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { 1392 cl.do_thread(thread); 1393 } 1394 gc_threads_do(&cl); 1395 } 1396 1397 bool ShenandoahHeap::can_elide_tlab_store_barriers() const { 1398 return true; 1399 } 1400 1401 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { 1402 // Overridden to do nothing. 1403 return new_obj; 1404 } 1405 1406 bool ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) { 1407 return true; 1408 } 1409 1410 bool ShenandoahHeap::card_mark_must_follow_store() const { 1411 return false; 1412 } 1413 1414 void ShenandoahHeap::collect(GCCause::Cause cause) { 1415 assert(cause != GCCause::_gc_locker, "no JNI critical callback"); 1416 if (GCCause::is_user_requested_gc(cause)) { 1417 if (! DisableExplicitGC) { 1418 cancel_concgc(); 1419 _concurrent_gc_thread->do_full_gc(cause); 1420 } 1421 } else if (cause == GCCause::_allocation_failure) { 1422 1423 cancel_concgc(); 1424 collector_policy()->set_should_clear_all_soft_refs(true); 1425 _concurrent_gc_thread->do_full_gc(cause); 1426 1427 } 1428 } 1429 1430 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) { 1431 //assert(false, "Shouldn't need to do full collections"); 1432 } 1433 1434 AdaptiveSizePolicy* ShenandoahHeap::size_policy() { 1435 Unimplemented(); 1436 return NULL; 1437 1438 } 1439 1440 CollectorPolicy* ShenandoahHeap::collector_policy() const { 1441 return _shenandoah_policy; 1442 } 1443 1444 1445 HeapWord* ShenandoahHeap::block_start(const void* addr) const { 1446 Space* sp = heap_region_containing(addr); 1447 if (sp != NULL) { 1448 return sp->block_start(addr); 1449 } 1450 return NULL; 1451 } 1452 1453 size_t ShenandoahHeap::block_size(const HeapWord* addr) const { 1454 Space* sp = heap_region_containing(addr); 1455 assert(sp != NULL, "block_size of address outside of heap"); 1456 return sp->block_size(addr); 1457 } 1458 1459 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const { 1460 Space* sp = heap_region_containing(addr); 1461 return sp->block_is_obj(addr); 1462 } 1463 1464 jlong ShenandoahHeap::millis_since_last_gc() { 1465 return 0; 1466 } 1467 1468 void ShenandoahHeap::prepare_for_verify() { 1469 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 1470 ensure_parsability(false); 1471 } 1472 } 1473 1474 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const { 1475 workers()->print_worker_threads_on(st); 1476 conc_workers()->print_worker_threads_on(st); 1477 } 1478 1479 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const { 1480 workers()->threads_do(tcl); 1481 conc_workers()->threads_do(tcl); 1482 } 1483 1484 void ShenandoahHeap::print_tracing_info() const { 1485 if (ShenandoahPrintGCDetails) { 1486 _shenandoah_policy->print_tracing_info(); 1487 } 1488 } 1489 1490 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure { 1491 private: 1492 ShenandoahHeap* _heap; 1493 VerifyOption _vo; 1494 bool _failures; 1495 public: 1496 // _vo == UsePrevMarking -> use "prev" marking information, 1497 // _vo == UseNextMarking -> use "next" marking information, 1498 // _vo == UseMarkWord -> use mark word from object header. 1499 ShenandoahVerifyRootsClosure(VerifyOption vo) : 1500 _heap(ShenandoahHeap::heap()), 1501 _vo(vo), 1502 _failures(false) { } 1503 1504 bool failures() { return _failures; } 1505 1506 private: 1507 template <class T> 1508 inline void do_oop_work(T* p) { 1509 oop obj = oopDesc::load_decode_heap_oop(p); 1510 if (! oopDesc::is_null(obj) && ! obj->is_oop()) { 1511 { // Just for debugging. 1512 tty->print_cr("Root location "PTR_FORMAT 1513 "verified "PTR_FORMAT, p2i(p), p2i((void*) obj)); 1514 // obj->print_on(tty); 1515 } 1516 } 1517 guarantee(obj->is_oop_or_null(), "is oop or null"); 1518 } 1519 1520 public: 1521 void do_oop(oop* p) { 1522 do_oop_work(p); 1523 } 1524 1525 void do_oop(narrowOop* p) { 1526 do_oop_work(p); 1527 } 1528 1529 }; 1530 1531 class ShenandoahVerifyHeapClosure: public ObjectClosure { 1532 private: 1533 ShenandoahVerifyRootsClosure _rootsCl; 1534 public: 1535 ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) : 1536 _rootsCl(rc) {}; 1537 1538 void do_object(oop p) { 1539 _rootsCl.do_oop(&p); 1540 } 1541 }; 1542 1543 class ShenandoahVerifyKlassClosure: public KlassClosure { 1544 OopClosure *_oop_closure; 1545 public: 1546 ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {} 1547 void do_klass(Klass* k) { 1548 k->oops_do(_oop_closure); 1549 } 1550 }; 1551 1552 void ShenandoahHeap::verify(VerifyOption vo) { 1553 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 1554 1555 ShenandoahVerifyRootsClosure rootsCl(vo); 1556 1557 assert(Thread::current()->is_VM_thread(), 1558 "Expected to be executed serially by the VM thread at this point"); 1559 1560 roots_iterate(&rootsCl); 1561 1562 bool failures = rootsCl.failures(); 1563 if (ShenandoahGCVerbose) 1564 tty->print("verify failures: %s", BOOL_TO_STR(failures)); 1565 1566 ShenandoahVerifyHeapClosure heapCl(rootsCl); 1567 1568 object_iterate(&heapCl); 1569 // TODO: Implement rest of it. 1570 #ifdef ASSERT_DISABLED 1571 verify_live(); 1572 #endif 1573 } else { 1574 tty->print("(SKIPPING roots, heapRegions, remset) "); 1575 } 1576 } 1577 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const { 1578 return _free_regions->capacity(); 1579 } 1580 1581 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure { 1582 ObjectClosure* _cl; 1583 public: 1584 ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} 1585 bool doHeapRegion(ShenandoahHeapRegion* r) { 1586 ShenandoahHeap::heap()->marked_object_iterate(r, _cl); 1587 return false; 1588 } 1589 }; 1590 1591 void ShenandoahHeap::object_iterate(ObjectClosure* cl) { 1592 ShenandoahIterateObjectClosureRegionClosure blk(cl); 1593 heap_region_iterate(&blk, false, true); 1594 } 1595 1596 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) { 1597 Unimplemented(); 1598 } 1599 1600 void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl) { 1601 marked_object_iterate(region, cl, region->bottom(), region->top()); 1602 } 1603 1604 void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl, 1605 HeapWord* addr, HeapWord* limit) { 1606 addr += BrooksPointer::BROOKS_POINTER_OBJ_SIZE; 1607 HeapWord* last_addr = NULL; 1608 size_t last_size = 0; 1609 HeapWord* top_at_mark_start = region->top_at_prev_mark_start(); 1610 HeapWord* heap_end = _ordered_regions->end(); 1611 while (addr < limit) { 1612 if (addr < top_at_mark_start) { 1613 HeapWord* end = top_at_mark_start + BrooksPointer::BROOKS_POINTER_OBJ_SIZE; 1614 end = MIN2(end, heap_end); 1615 addr = _prev_mark_bit_map->getNextMarkedWordAddress(addr, end); 1616 } 1617 if (addr < limit) { 1618 oop obj = oop(addr); 1619 assert(is_marked_prev(obj), "object expected to be marked"); 1620 cl->do_object(obj); 1621 last_addr = addr; 1622 last_size = obj->size(); 1623 addr += obj->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE; 1624 } else { 1625 break; 1626 } 1627 } 1628 } 1629 1630 // Apply blk->doHeapRegion() on all committed regions in address order, 1631 // terminating the iteration early if doHeapRegion() returns true. 1632 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const { 1633 for (size_t i = 0; i < _num_regions; i++) { 1634 ShenandoahHeapRegion* current = _ordered_regions->get(i); 1635 if (skip_humongous_continuation && current->is_humongous_continuation()) { 1636 continue; 1637 } 1638 if (skip_dirty_regions && current->is_in_collection_set()) { 1639 continue; 1640 } 1641 if (blk->doHeapRegion(current)) { 1642 return; 1643 } 1644 } 1645 } 1646 1647 class ClearLivenessClosure : public ShenandoahHeapRegionClosure { 1648 ShenandoahHeap* sh; 1649 public: 1650 ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { } 1651 1652 bool doHeapRegion(ShenandoahHeapRegion* r) { 1653 r->clearLiveData(); 1654 r->init_top_at_mark_start(); 1655 return false; 1656 } 1657 }; 1658 1659 1660 void ShenandoahHeap::start_concurrent_marking() { 1661 1662 shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats); 1663 accumulate_statistics_all_tlabs(); 1664 shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats); 1665 1666 set_concurrent_mark_in_progress(true); 1667 // We need to reset all TLABs because we'd lose marks on all objects allocated in them. 1668 if (UseTLAB) { 1669 shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable); 1670 ensure_parsability(true); 1671 shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable); 1672 } 1673 1674 _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm); 1675 _used_start_gc = used(); 1676 1677 #ifdef ASSERT 1678 if (ShenandoahDumpHeapBeforeConcurrentMark) { 1679 ensure_parsability(false); 1680 print_all_refs("pre-mark"); 1681 } 1682 #endif 1683 1684 shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness); 1685 ClearLivenessClosure clc(this); 1686 heap_region_iterate(&clc); 1687 shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness); 1688 1689 // print_all_refs("pre -mark"); 1690 1691 // oopDesc::_debug = true; 1692 1693 shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots); 1694 concurrentMark()->init_mark_roots(); 1695 shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots); 1696 1697 // print_all_refs("pre-mark2"); 1698 } 1699 1700 1701 class VerifyLivenessClosure : public ExtendedOopClosure { 1702 1703 ShenandoahHeap* _sh; 1704 1705 public: 1706 VerifyLivenessClosure() : _sh ( ShenandoahHeap::heap() ) {} 1707 1708 template<class T> void do_oop_nv(T* p) { 1709 T heap_oop = oopDesc::load_heap_oop(p); 1710 if (!oopDesc::is_null(heap_oop)) { 1711 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 1712 guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))), 1713 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s", 1714 BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()), 1715 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))) 1716 ); 1717 obj = oopDesc::bs()->read_barrier(obj); 1718 guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions"); 1719 guarantee(obj->is_oop(), "is_oop"); 1720 ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap(); 1721 if (! sh->is_marked_current(obj)) { 1722 sh->print_on(tty); 1723 } 1724 assert(sh->is_marked_current(obj), "Referenced Objects should be marked obj: "PTR_FORMAT", marked: %s, is_in_heap: %s", 1725 p2i((HeapWord*) obj), BOOL_TO_STR(sh->is_marked_current(obj)), BOOL_TO_STR(sh->is_in(obj))); 1726 } 1727 } 1728 1729 void do_oop(oop* p) { do_oop_nv(p); } 1730 void do_oop(narrowOop* p) { do_oop_nv(p); } 1731 1732 }; 1733 1734 void ShenandoahHeap::verify_live() { 1735 1736 VerifyLivenessClosure cl; 1737 roots_iterate(&cl); 1738 1739 IterateMarkedCurrentObjectsClosure marked_oops(&cl); 1740 object_iterate(&marked_oops); 1741 1742 } 1743 1744 class VerifyAfterEvacuationClosure : public ExtendedOopClosure { 1745 1746 ShenandoahHeap* _sh; 1747 1748 public: 1749 VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {} 1750 1751 template<class T> void do_oop_nv(T* p) { 1752 T heap_oop = oopDesc::load_heap_oop(p); 1753 if (!oopDesc::is_null(heap_oop)) { 1754 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 1755 guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))), 1756 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s", 1757 BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()), 1758 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))), obj->klass()->external_name(), BOOL_TO_STR(_sh->is_marked_current(obj)) 1759 ); 1760 obj = oopDesc::bs()->read_barrier(obj); 1761 guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions"); 1762 guarantee(obj->is_oop(), "is_oop"); 1763 guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace"); 1764 } 1765 } 1766 1767 void do_oop(oop* p) { do_oop_nv(p); } 1768 void do_oop(narrowOop* p) { do_oop_nv(p); } 1769 1770 }; 1771 1772 class VerifyAfterUpdateRefsClosure : public ExtendedOopClosure { 1773 1774 ShenandoahHeap* _sh; 1775 1776 public: 1777 VerifyAfterUpdateRefsClosure() : _sh ( ShenandoahHeap::heap() ) {} 1778 1779 template<class T> void do_oop_nv(T* p) { 1780 T heap_oop = oopDesc::load_heap_oop(p); 1781 if (!oopDesc::is_null(heap_oop)) { 1782 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 1783 guarantee((! _sh->heap_region_containing(obj)->is_in_collection_set()), 1784 "no live reference must point to from-space, is_marked: %s", 1785 BOOL_TO_STR(_sh->is_marked_current(obj))); 1786 if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)) && _sh->is_in(p)) { 1787 tty->print_cr("top-limit: "PTR_FORMAT", p: "PTR_FORMAT, p2i(_sh->heap_region_containing(p)->concurrent_iteration_safe_limit()), p2i(p)); 1788 } 1789 guarantee(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "no live reference must point to forwarded object"); 1790 guarantee(obj->is_oop(), "is_oop"); 1791 guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace"); 1792 } 1793 } 1794 1795 void do_oop(oop* p) { do_oop_nv(p); } 1796 void do_oop(narrowOop* p) { do_oop_nv(p); } 1797 1798 }; 1799 1800 void ShenandoahHeap::verify_heap_after_evacuation() { 1801 1802 verify_heap_size_consistency(); 1803 1804 ensure_parsability(false); 1805 1806 VerifyAfterEvacuationClosure cl; 1807 roots_iterate(&cl); 1808 1809 IterateMarkedCurrentObjectsClosure marked_oops(&cl); 1810 object_iterate(&marked_oops); 1811 1812 } 1813 1814 class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure { 1815 public: 1816 bool doHeapRegion(ShenandoahHeapRegion* r) { 1817 assert(! r->is_in_collection_set(), "no region must be in collection set"); 1818 assert(! ShenandoahHeap::heap()->in_cset_fast_test(r->bottom()), "no region must be in collection set"); 1819 return false; 1820 } 1821 }; 1822 1823 void ShenandoahHeap::swap_mark_bitmaps() { 1824 CMBitMap* tmp = _prev_mark_bit_map; 1825 _prev_mark_bit_map = _next_mark_bit_map; 1826 _next_mark_bit_map = tmp; 1827 } 1828 1829 void ShenandoahHeap::stop_concurrent_marking() { 1830 assert(concurrent_mark_in_progress(), "How else could we get here?"); 1831 if (! cancelled_concgc()) { 1832 // If we needed to update refs, and concurrent marking has been cancelled, 1833 // we need to finish updating references. 1834 set_need_update_refs(false); 1835 swap_mark_bitmaps(); 1836 } 1837 set_concurrent_mark_in_progress(false); 1838 if (ShenandoahGCVerbose) { 1839 print_heap_regions(); 1840 } 1841 1842 } 1843 1844 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) { 1845 _concurrent_mark_in_progress = in_progress; 1846 JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, ! in_progress); 1847 } 1848 1849 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) { 1850 JavaThread::set_evacuation_in_progress_all_threads(in_progress); 1851 _evacuation_in_progress = in_progress; 1852 OrderAccess::fence(); 1853 } 1854 1855 void ShenandoahHeap::verify_copy(oop p,oop c){ 1856 assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly"); 1857 assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct"); 1858 if (p->klass() != c->klass()) { 1859 print_heap_regions(); 1860 } 1861 assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size()); 1862 assert(p->size() == c->size(), "verify size"); 1863 // Object may have been locked between copy and verification 1864 // assert(p->mark() == c->mark(), "verify mark"); 1865 assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once"); 1866 } 1867 1868 void ShenandoahHeap::oom_during_evacuation() { 1869 // tty->print_cr("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d", 1870 // Thread::current()->osthread()->thread_id()); 1871 1872 // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC. 1873 collector_policy()->set_should_clear_all_soft_refs(true); 1874 concurrent_thread()->schedule_full_gc(); 1875 cancel_concgc(); 1876 1877 if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) { 1878 if (ShenandoahWarnings) { 1879 tty->print_cr("OOM during evacuation. Let Java thread wait until evacuation settlded.."); 1880 } 1881 while (_evacuation_in_progress) { // wait. 1882 Thread::current()->_ParkEvent->park(1) ; 1883 } 1884 } 1885 1886 } 1887 1888 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) { 1889 HeapWord* result = obj + BrooksPointer::BROOKS_POINTER_OBJ_SIZE; 1890 initialize_brooks_ptr(oop(result)); 1891 return result; 1892 } 1893 1894 uint ShenandoahHeap::oop_extra_words() { 1895 return BrooksPointer::BROOKS_POINTER_OBJ_SIZE; 1896 } 1897 1898 void ShenandoahHeap::grow_heap_by(size_t num_regions) { 1899 size_t base = _num_regions; 1900 ensure_new_regions(num_regions); 1901 1902 ShenandoahHeapRegion* regions[num_regions]; 1903 for (size_t i = 0; i < num_regions; i++) { 1904 ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion(); 1905 size_t new_region_index = i + base; 1906 HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index; 1907 new_region->initialize_heap_region(start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index); 1908 if (ShenandoahGCVerbose) { 1909 tty->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index); 1910 new_region->print(); 1911 } 1912 1913 assert(_ordered_regions->active_regions() == new_region->region_number(), "must match"); 1914 _ordered_regions->add_region(new_region); 1915 _sorted_regions->add_region(new_region); 1916 _in_cset_fast_test_base[new_region_index] = false; // Not in cset 1917 _top_at_mark_starts_base[new_region_index] = new_region->bottom(); 1918 1919 regions[i] = new_region; 1920 } 1921 _free_regions->par_add_regions(regions, 0, num_regions, num_regions); 1922 } 1923 1924 void ShenandoahHeap::ensure_new_regions(size_t new_regions) { 1925 1926 size_t num_regions = _num_regions; 1927 size_t new_num_regions = num_regions + new_regions; 1928 assert(new_num_regions <= _max_regions, "we checked this earlier"); 1929 1930 size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes; 1931 if (ShenandoahGCVerbose) { 1932 tty->print_cr("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions); 1933 } 1934 bool success = _storage.expand_by(expand_size); 1935 assert(success, "should always be able to expand by requested size"); 1936 1937 _num_regions = new_num_regions; 1938 1939 } 1940 1941 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() : 1942 _heap(ShenandoahHeap::heap_no_check()) { 1943 } 1944 1945 void ShenandoahIsAliveClosure::init(ShenandoahHeap* heap) { 1946 _heap = heap; 1947 } 1948 1949 bool ShenandoahIsAliveClosure::do_object_b(oop obj) { 1950 1951 assert(_heap != NULL, "sanity"); 1952 #ifdef ASSERT 1953 if (_heap->concurrent_mark_in_progress()) { 1954 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space"); 1955 } 1956 #endif 1957 assert(!oopDesc::is_null(obj), "null"); 1958 return _heap->is_marked_current(obj); 1959 } 1960 1961 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() : 1962 _heap(ShenandoahHeap::heap_no_check()) { 1963 } 1964 1965 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) { 1966 _heap = heap; 1967 } 1968 1969 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) { 1970 1971 assert(_heap != NULL, "sanity"); 1972 obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj); 1973 #ifdef ASSERT 1974 if (_heap->concurrent_mark_in_progress()) { 1975 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space"); 1976 } 1977 #endif 1978 assert(!oopDesc::is_null(obj), "null"); 1979 return _heap->is_marked_current(obj); 1980 } 1981 1982 void ShenandoahHeap::ref_processing_init() { 1983 MemRegion mr = reserved_region(); 1984 1985 isAlive.init(ShenandoahHeap::heap()); 1986 _ref_processor = 1987 new ReferenceProcessor(mr, // span 1988 ParallelRefProcEnabled && (ConcGCThreads > 1), 1989 // mt processing 1990 (int) ConcGCThreads, 1991 // degree of mt processing 1992 (ConcGCThreads > 1), 1993 // mt discovery 1994 (int) ConcGCThreads, 1995 // degree of mt discovery 1996 false, 1997 // Reference discovery is not atomic 1998 &isAlive); 1999 2000 } 2001 2002 #ifdef ASSERT 2003 void ShenandoahHeap::set_from_region_protection(bool protect) { 2004 for (uint i = 0; i < _num_regions; i++) { 2005 ShenandoahHeapRegion* region = _ordered_regions->get(i); 2006 if (region != NULL && region->is_in_collection_set()) { 2007 if (protect) { 2008 region->memProtectionOn(); 2009 } else { 2010 region->memProtectionOff(); 2011 } 2012 } 2013 } 2014 } 2015 #endif 2016 2017 size_t ShenandoahHeap::num_regions() { 2018 return _num_regions; 2019 } 2020 2021 size_t ShenandoahHeap::max_regions() { 2022 return _max_regions; 2023 } 2024 2025 GCTracer* ShenandoahHeap::tracer() { 2026 return shenandoahPolicy()->tracer(); 2027 } 2028 2029 size_t ShenandoahHeap::tlab_used(Thread* thread) const { 2030 return _free_regions->used(); 2031 } 2032 2033 void ShenandoahHeap::cancel_concgc() { 2034 // only report it once 2035 if (!_cancelled_concgc) { 2036 log_info(gc)("Cancelling GC"); 2037 _cancelled_concgc = true; 2038 OrderAccess::fence(); 2039 _shenandoah_policy->report_concgc_cancelled(); 2040 } 2041 2042 } 2043 2044 void ShenandoahHeap::clear_cancelled_concgc() { 2045 _cancelled_concgc = false; 2046 } 2047 2048 int ShenandoahHeap::max_workers() { 2049 return _max_workers; 2050 } 2051 2052 int ShenandoahHeap::max_parallel_workers() { 2053 return _max_parallel_workers; 2054 } 2055 int ShenandoahHeap::max_conc_workers() { 2056 return _max_conc_workers; 2057 } 2058 2059 void ShenandoahHeap::stop() { 2060 // We set this early here, to let GC threads terminate before we ask the concurrent thread 2061 // to terminate, which would otherwise block until all GC threads come to finish normally. 2062 _cancelled_concgc = true; 2063 _concurrent_gc_thread->stop(); 2064 cancel_concgc(); 2065 } 2066 2067 void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) { 2068 2069 StringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols); 2070 workers()->run_task(&shenandoah_unlink_task); 2071 2072 // if (G1StringDedup::is_enabled()) { 2073 // G1StringDedup::unlink(is_alive); 2074 // } 2075 } 2076 2077 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) { 2078 _need_update_refs = need_update_refs; 2079 } 2080 2081 //fixme this should be in heapregionset 2082 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) { 2083 size_t region_idx = r->region_number() + 1; 2084 ShenandoahHeapRegion* next = _ordered_regions->get(region_idx); 2085 guarantee(next->region_number() == region_idx, "region number must match"); 2086 while (next->is_humongous()) { 2087 region_idx = next->region_number() + 1; 2088 next = _ordered_regions->get(region_idx); 2089 guarantee(next->region_number() == region_idx, "region number must match"); 2090 } 2091 return next; 2092 } 2093 2094 bool ShenandoahHeap::is_in_collection_set(const void* p) { 2095 return heap_region_containing(p)->is_in_collection_set(); 2096 } 2097 2098 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() { 2099 return _monitoring_support; 2100 } 2101 2102 bool ShenandoahHeap::is_obj_dead(const oop obj, const ShenandoahHeapRegion* r) const { 2103 return ! r->allocated_after_prev_mark_start((HeapWord*) obj) && 2104 ! is_marked_prev(obj, r); 2105 } 2106 CMBitMap* ShenandoahHeap::prev_mark_bit_map() { 2107 return _prev_mark_bit_map; 2108 } 2109 2110 CMBitMap* ShenandoahHeap::next_mark_bit_map() { 2111 return _next_mark_bit_map; 2112 } 2113 2114 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) { 2115 _free_regions->add_region(r); 2116 } 2117 2118 void ShenandoahHeap::clear_free_regions() { 2119 _free_regions->clear(); 2120 } 2121 2122 void ShenandoahHeap::register_region_with_in_cset_fast_test(ShenandoahHeapRegion* r) { 2123 assert(_in_cset_fast_test_base != NULL, "sanity"); 2124 assert(r->is_in_collection_set(), "invariant"); 2125 uint index = r->region_number(); 2126 assert(index < _in_cset_fast_test_length, "invariant"); 2127 assert(!_in_cset_fast_test_base[index], "invariant"); 2128 _in_cset_fast_test_base[index] = true; 2129 } 2130 2131 address ShenandoahHeap::in_cset_fast_test_addr() { 2132 return (address) (ShenandoahHeap::heap()->_in_cset_fast_test); 2133 } 2134 2135 void ShenandoahHeap::clear_cset_fast_test() { 2136 assert(_in_cset_fast_test_base != NULL, "sanity"); 2137 memset(_in_cset_fast_test_base, false, 2138 (size_t) _in_cset_fast_test_length * sizeof(bool)); 2139 } 2140 2141 size_t ShenandoahHeap::conservative_max_heap_alignment() { 2142 return HeapRegionBounds::max_size(); 2143 } 2144 2145 size_t ShenandoahHeap::bytes_allocated_since_cm() { 2146 return _bytes_allocated_since_cm; 2147 } 2148 2149 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) { 2150 _bytes_allocated_since_cm = bytes; 2151 } 2152 2153 size_t ShenandoahHeap::max_allocated_gc() { 2154 return _max_allocated_gc; 2155 } 2156 2157 void ShenandoahHeap::set_top_at_mark_start(HeapWord* region_base, HeapWord* addr) { 2158 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift; 2159 _top_at_mark_starts[index] = addr; 2160 } 2161 2162 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) { 2163 _full_gc_in_progress = in_progress; 2164 } 2165 2166 bool ShenandoahHeap::is_full_gc_in_progress() const { 2167 return _full_gc_in_progress; 2168 } 2169 2170 bool ShenandoahHeap::needs_reference_pending_list_locker_thread() const { 2171 return true; 2172 } 2173 2174 class NMethodOopInitializer : public OopClosure { 2175 private: 2176 ShenandoahHeap* _heap; 2177 public: 2178 NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) { 2179 } 2180 2181 private: 2182 template <class T> 2183 inline void do_oop_work(T* p) { 2184 T o = oopDesc::load_heap_oop(p); 2185 if (! oopDesc::is_null(o)) { 2186 oop obj1 = oopDesc::decode_heap_oop_not_null(o); 2187 oop obj2 = oopDesc::bs()->write_barrier(obj1); 2188 if (! oopDesc::unsafe_equals(obj1, obj2)) { 2189 oopDesc::encode_store_heap_oop(p, obj2); 2190 } 2191 } 2192 } 2193 2194 public: 2195 void do_oop(oop* o) { 2196 do_oop_work(o); 2197 } 2198 void do_oop(narrowOop* o) { 2199 do_oop_work(o); 2200 } 2201 }; 2202 2203 void ShenandoahHeap::register_nmethod(nmethod* nm) { 2204 NMethodOopInitializer init; 2205 nm->oops_do(&init); 2206 nm->fix_oop_relocations(); 2207 } 2208 2209 void ShenandoahHeap::unregister_nmethod(nmethod* nm) { 2210 } 2211 2212 void ShenandoahHeap::enter_critical(oop o) { 2213 heap_region_containing(o)->enter_critical(); 2214 } 2215 2216 void ShenandoahHeap::exit_critical(oop o) { 2217 heap_region_containing(o)->exit_critical(); 2218 } 2219 2220 2221 GCTimer* ShenandoahHeap::gc_timer() const { 2222 return _gc_timer; 2223 }