1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc_implementation/g1/concurrentMark.inline.hpp" 30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 31 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 32 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 33 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 34 #include "gc_implementation/g1/g1Log.hpp" 35 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 36 #include "gc_implementation/g1/g1RemSet.hpp" 37 #include "gc_implementation/g1/heapRegion.inline.hpp" 38 #include "gc_implementation/g1/heapRegionManager.inline.hpp" 39 #include "gc_implementation/g1/heapRegionRemSet.hpp" 40 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 41 #include "gc_implementation/shared/vmGCOperations.hpp" 42 #include "gc_implementation/shared/gcTimer.hpp" 43 #include "gc_implementation/shared/gcTrace.hpp" 44 #include "gc_implementation/shared/gcTraceTime.hpp" 45 #include "memory/allocation.hpp" 46 #include "memory/genOopClosures.inline.hpp" 47 #include "memory/referencePolicy.hpp" 48 #include "memory/resourceArea.hpp" 49 #include "oops/oop.inline.hpp" 50 #include "runtime/handles.inline.hpp" 51 #include "runtime/java.hpp" 52 #include "runtime/atomic.inline.hpp" 53 #include "runtime/prefetch.inline.hpp" 54 #include "services/memTracker.hpp" 55 56 // Concurrent marking bit map wrapper 57 58 CMBitMapRO::CMBitMapRO(int shifter) : 59 _bm(), 60 _shifter(shifter) { 61 _bmStartWord = 0; 62 _bmWordSize = 0; 63 } 64 65 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 66 const HeapWord* limit) const { 67 // First we must round addr *up* to a possible object boundary. 68 addr = (HeapWord*)align_size_up((intptr_t)addr, 69 HeapWordSize << _shifter); 70 size_t addrOffset = heapWordToOffset(addr); 71 if (limit == NULL) { 72 limit = _bmStartWord + _bmWordSize; 73 } 74 size_t limitOffset = heapWordToOffset(limit); 75 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 76 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 77 assert(nextAddr >= addr, "get_next_one postcondition"); 78 assert(nextAddr == limit || isMarked(nextAddr), 79 "get_next_one postcondition"); 80 return nextAddr; 81 } 82 83 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, 84 const HeapWord* limit) const { 85 size_t addrOffset = heapWordToOffset(addr); 86 if (limit == NULL) { 87 limit = _bmStartWord + _bmWordSize; 88 } 89 size_t limitOffset = heapWordToOffset(limit); 90 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 91 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 92 assert(nextAddr >= addr, "get_next_one postcondition"); 93 assert(nextAddr == limit || !isMarked(nextAddr), 94 "get_next_one postcondition"); 95 return nextAddr; 96 } 97 98 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 99 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 100 return (int) (diff >> _shifter); 101 } 102 103 #ifndef PRODUCT 104 bool CMBitMapRO::covers(MemRegion heap_rs) const { 105 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 106 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 107 "size inconsistency"); 108 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 109 _bmWordSize == heap_rs.word_size(); 110 } 111 #endif 112 113 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 114 _bm.print_on_error(st, prefix); 115 } 116 117 size_t CMBitMap::compute_size(size_t heap_size) { 118 return heap_size / mark_distance(); 119 } 120 121 size_t CMBitMap::mark_distance() { 122 return MinObjAlignmentInBytes * BitsPerByte; 123 } 124 125 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 126 _bmStartWord = heap.start(); 127 _bmWordSize = heap.word_size(); 128 129 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 130 _bm.set_size(_bmWordSize >> _shifter); 131 132 storage->set_mapping_changed_listener(&_listener); 133 } 134 135 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 136 if (zero_filled) { 137 return; 138 } 139 // We need to clear the bitmap on commit, removing any existing information. 140 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 141 _bm->clearRange(mr); 142 } 143 144 // Closure used for clearing the given mark bitmap. 145 class ClearBitmapHRClosure : public HeapRegionClosure { 146 private: 147 ConcurrentMark* _cm; 148 CMBitMap* _bitmap; 149 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 150 public: 151 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 152 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 153 } 154 155 virtual bool doHeapRegion(HeapRegion* r) { 156 size_t const chunk_size_in_words = M / HeapWordSize; 157 158 HeapWord* cur = r->bottom(); 159 HeapWord* const end = r->end(); 160 161 while (cur < end) { 162 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 163 _bitmap->clearRange(mr); 164 165 cur += chunk_size_in_words; 166 167 // Abort iteration if after yielding the marking has been aborted. 168 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 169 return true; 170 } 171 // Repeat the asserts from before the start of the closure. We will do them 172 // as asserts here to minimize their overhead on the product. However, we 173 // will have them as guarantees at the beginning / end of the bitmap 174 // clearing to get some checking in the product. 175 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 176 assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant"); 177 } 178 179 return false; 180 } 181 }; 182 183 class ParClearNextMarkBitmapTask : public AbstractGangTask { 184 ClearBitmapHRClosure* _cl; 185 HeapRegionClaimer _hrclaimer; 186 bool _suspendible; // If the task is suspendible, workers must join the STS. 187 188 public: 189 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 190 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 191 192 void work(uint worker_id) { 193 if (_suspendible) { 194 SuspendibleThreadSet::join(); 195 } 196 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 197 if (_suspendible) { 198 SuspendibleThreadSet::leave(); 199 } 200 } 201 }; 202 203 void CMBitMap::clearAll() { 204 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 205 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 206 uint n_workers = g1h->workers()->active_workers(); 207 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 208 g1h->workers()->run_task(&task); 209 guarantee(cl.complete(), "Must have completed iteration."); 210 return; 211 } 212 213 void CMBitMap::markRange(MemRegion mr) { 214 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 215 assert(!mr.is_empty(), "unexpected empty region"); 216 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 217 ((HeapWord *) mr.end())), 218 "markRange memory region end is not card aligned"); 219 // convert address range into offset range 220 _bm.at_put_range(heapWordToOffset(mr.start()), 221 heapWordToOffset(mr.end()), true); 222 } 223 224 void CMBitMap::clearRange(MemRegion mr) { 225 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 226 assert(!mr.is_empty(), "unexpected empty region"); 227 // convert address range into offset range 228 _bm.at_put_range(heapWordToOffset(mr.start()), 229 heapWordToOffset(mr.end()), false); 230 } 231 232 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 233 HeapWord* end_addr) { 234 HeapWord* start = getNextMarkedWordAddress(addr); 235 start = MIN2(start, end_addr); 236 HeapWord* end = getNextUnmarkedWordAddress(start); 237 end = MIN2(end, end_addr); 238 assert(start <= end, "Consistency check"); 239 MemRegion mr(start, end); 240 if (!mr.is_empty()) { 241 clearRange(mr); 242 } 243 return mr; 244 } 245 246 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 247 _base(NULL), _cm(cm) 248 #ifdef ASSERT 249 , _drain_in_progress(false) 250 , _drain_in_progress_yields(false) 251 #endif 252 {} 253 254 bool CMMarkStack::allocate(size_t capacity) { 255 // allocate a stack of the requisite depth 256 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 257 if (!rs.is_reserved()) { 258 warning("ConcurrentMark MarkStack allocation failure"); 259 return false; 260 } 261 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 262 if (!_virtual_space.initialize(rs, rs.size())) { 263 warning("ConcurrentMark MarkStack backing store failure"); 264 // Release the virtual memory reserved for the marking stack 265 rs.release(); 266 return false; 267 } 268 assert(_virtual_space.committed_size() == rs.size(), 269 "Didn't reserve backing store for all of ConcurrentMark stack?"); 270 _base = (oop*) _virtual_space.low(); 271 setEmpty(); 272 _capacity = (jint) capacity; 273 _saved_index = -1; 274 _should_expand = false; 275 NOT_PRODUCT(_max_depth = 0); 276 return true; 277 } 278 279 void CMMarkStack::expand() { 280 // Called, during remark, if we've overflown the marking stack during marking. 281 assert(isEmpty(), "stack should been emptied while handling overflow"); 282 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 283 // Clear expansion flag 284 _should_expand = false; 285 if (_capacity == (jint) MarkStackSizeMax) { 286 if (PrintGCDetails && Verbose) { 287 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 288 } 289 return; 290 } 291 // Double capacity if possible 292 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 293 // Do not give up existing stack until we have managed to 294 // get the double capacity that we desired. 295 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 296 sizeof(oop))); 297 if (rs.is_reserved()) { 298 // Release the backing store associated with old stack 299 _virtual_space.release(); 300 // Reinitialize virtual space for new stack 301 if (!_virtual_space.initialize(rs, rs.size())) { 302 fatal("Not enough swap for expanded marking stack capacity"); 303 } 304 _base = (oop*)(_virtual_space.low()); 305 _index = 0; 306 _capacity = new_capacity; 307 } else { 308 if (PrintGCDetails && Verbose) { 309 // Failed to double capacity, continue; 310 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 311 SIZE_FORMAT"K to " SIZE_FORMAT"K", 312 _capacity / K, new_capacity / K); 313 } 314 } 315 } 316 317 void CMMarkStack::set_should_expand() { 318 // If we're resetting the marking state because of an 319 // marking stack overflow, record that we should, if 320 // possible, expand the stack. 321 _should_expand = _cm->has_overflown(); 322 } 323 324 CMMarkStack::~CMMarkStack() { 325 if (_base != NULL) { 326 _base = NULL; 327 _virtual_space.release(); 328 } 329 } 330 331 void CMMarkStack::par_push(oop ptr) { 332 while (true) { 333 if (isFull()) { 334 _overflow = true; 335 return; 336 } 337 // Otherwise... 338 jint index = _index; 339 jint next_index = index+1; 340 jint res = Atomic::cmpxchg(next_index, &_index, index); 341 if (res == index) { 342 _base[index] = ptr; 343 // Note that we don't maintain this atomically. We could, but it 344 // doesn't seem necessary. 345 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 346 return; 347 } 348 // Otherwise, we need to try again. 349 } 350 } 351 352 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { 353 while (true) { 354 if (isFull()) { 355 _overflow = true; 356 return; 357 } 358 // Otherwise... 359 jint index = _index; 360 jint next_index = index + n; 361 if (next_index > _capacity) { 362 _overflow = true; 363 return; 364 } 365 jint res = Atomic::cmpxchg(next_index, &_index, index); 366 if (res == index) { 367 for (int i = 0; i < n; i++) { 368 int ind = index + i; 369 assert(ind < _capacity, "By overflow test above."); 370 _base[ind] = ptr_arr[i]; 371 } 372 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 373 return; 374 } 375 // Otherwise, we need to try again. 376 } 377 } 378 379 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 380 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 381 jint start = _index; 382 jint next_index = start + n; 383 if (next_index > _capacity) { 384 _overflow = true; 385 return; 386 } 387 // Otherwise. 388 _index = next_index; 389 for (int i = 0; i < n; i++) { 390 int ind = start + i; 391 assert(ind < _capacity, "By overflow test above."); 392 _base[ind] = ptr_arr[i]; 393 } 394 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 395 } 396 397 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 398 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 399 jint index = _index; 400 if (index == 0) { 401 *n = 0; 402 return false; 403 } else { 404 int k = MIN2(max, index); 405 jint new_ind = index - k; 406 for (int j = 0; j < k; j++) { 407 ptr_arr[j] = _base[new_ind + j]; 408 } 409 _index = new_ind; 410 *n = k; 411 return true; 412 } 413 } 414 415 template<class OopClosureClass> 416 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 417 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 418 || SafepointSynchronize::is_at_safepoint(), 419 "Drain recursion must be yield-safe."); 420 bool res = true; 421 debug_only(_drain_in_progress = true); 422 debug_only(_drain_in_progress_yields = yield_after); 423 while (!isEmpty()) { 424 oop newOop = pop(); 425 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 426 assert(newOop->is_oop(), "Expected an oop"); 427 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 428 "only grey objects on this stack"); 429 newOop->oop_iterate(cl); 430 if (yield_after && _cm->do_yield_check()) { 431 res = false; 432 break; 433 } 434 } 435 debug_only(_drain_in_progress = false); 436 return res; 437 } 438 439 void CMMarkStack::note_start_of_gc() { 440 assert(_saved_index == -1, 441 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 442 _saved_index = _index; 443 } 444 445 void CMMarkStack::note_end_of_gc() { 446 // This is intentionally a guarantee, instead of an assert. If we 447 // accidentally add something to the mark stack during GC, it 448 // will be a correctness issue so it's better if we crash. we'll 449 // only check this once per GC anyway, so it won't be a performance 450 // issue in any way. 451 guarantee(_saved_index == _index, 452 err_msg("saved index: %d index: %d", _saved_index, _index)); 453 _saved_index = -1; 454 } 455 456 void CMMarkStack::oops_do(OopClosure* f) { 457 assert(_saved_index == _index, 458 err_msg("saved index: %d index: %d", _saved_index, _index)); 459 for (int i = 0; i < _index; i += 1) { 460 f->do_oop(&_base[i]); 461 } 462 } 463 464 CMRootRegions::CMRootRegions() : 465 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 466 _should_abort(false), _next_survivor(NULL) { } 467 468 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 469 _young_list = g1h->young_list(); 470 _cm = cm; 471 } 472 473 void CMRootRegions::prepare_for_scan() { 474 assert(!scan_in_progress(), "pre-condition"); 475 476 // Currently, only survivors can be root regions. 477 assert(_next_survivor == NULL, "pre-condition"); 478 _next_survivor = _young_list->first_survivor_region(); 479 _scan_in_progress = (_next_survivor != NULL); 480 _should_abort = false; 481 } 482 483 HeapRegion* CMRootRegions::claim_next() { 484 if (_should_abort) { 485 // If someone has set the should_abort flag, we return NULL to 486 // force the caller to bail out of their loop. 487 return NULL; 488 } 489 490 // Currently, only survivors can be root regions. 491 HeapRegion* res = _next_survivor; 492 if (res != NULL) { 493 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 494 // Read it again in case it changed while we were waiting for the lock. 495 res = _next_survivor; 496 if (res != NULL) { 497 if (res == _young_list->last_survivor_region()) { 498 // We just claimed the last survivor so store NULL to indicate 499 // that we're done. 500 _next_survivor = NULL; 501 } else { 502 _next_survivor = res->get_next_young_region(); 503 } 504 } else { 505 // Someone else claimed the last survivor while we were trying 506 // to take the lock so nothing else to do. 507 } 508 } 509 assert(res == NULL || res->is_survivor(), "post-condition"); 510 511 return res; 512 } 513 514 void CMRootRegions::scan_finished() { 515 assert(scan_in_progress(), "pre-condition"); 516 517 // Currently, only survivors can be root regions. 518 if (!_should_abort) { 519 assert(_next_survivor == NULL, "we should have claimed all survivors"); 520 } 521 _next_survivor = NULL; 522 523 { 524 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 525 _scan_in_progress = false; 526 RootRegionScan_lock->notify_all(); 527 } 528 } 529 530 bool CMRootRegions::wait_until_scan_finished() { 531 if (!scan_in_progress()) return false; 532 533 { 534 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 535 while (scan_in_progress()) { 536 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 537 } 538 } 539 return true; 540 } 541 542 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 543 return MAX2((n_par_threads + 2) / 4, 1U); 544 } 545 546 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 547 _g1h(g1h), 548 _markBitMap1(), 549 _markBitMap2(), 550 _parallel_marking_threads(0), 551 _max_parallel_marking_threads(0), 552 _sleep_factor(0.0), 553 _marking_task_overhead(1.0), 554 _cleanup_sleep_factor(0.0), 555 _cleanup_task_overhead(1.0), 556 _cleanup_list("Cleanup List"), 557 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 558 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 559 CardTableModRefBS::card_shift, 560 false /* in_resource_area*/), 561 562 _prevMarkBitMap(&_markBitMap1), 563 _nextMarkBitMap(&_markBitMap2), 564 565 // _finger set in set_non_marking_state 566 567 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 568 // _active_tasks set in set_non_marking_state 569 // _tasks set inside the constructor 570 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 571 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 572 573 _has_overflown(false), 574 _concurrent(false), 575 _has_aborted(false), 576 _aborted_gc_id(GCId::undefined()), 577 _restart_for_overflow(false), 578 _concurrent_marking_in_progress(false), 579 580 // _verbose_level set below 581 582 _init_times(), 583 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 584 _cleanup_times(), 585 _total_counting_time(0.0), 586 _total_rs_scrub_time(0.0), 587 588 _parallel_workers(NULL), 589 590 _count_card_bitmaps(NULL), 591 _count_marked_bytes(NULL), 592 _completed_initialization(false) { 593 594 _markStack = CMMarkStack(this); 595 596 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 597 if (verbose_level < no_verbose) { 598 verbose_level = no_verbose; 599 } 600 if (verbose_level > high_verbose) { 601 verbose_level = high_verbose; 602 } 603 _verbose_level = verbose_level; 604 605 if (verbose_low()) { 606 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 607 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 608 } 609 610 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 611 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 612 613 // Create & start a ConcurrentMark thread. 614 _cmThread = new ConcurrentMarkThread(this); 615 assert(cmThread() != NULL, "CM Thread should have been created"); 616 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 617 if (_cmThread->osthread() == NULL) { 618 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 619 } 620 621 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 622 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 623 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 624 625 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 626 satb_qs.set_buffer_size(G1SATBBufferSize); 627 628 _root_regions.init(_g1h, this); 629 630 if (ConcGCThreads > ParallelGCThreads) { 631 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") " 632 "than ParallelGCThreads (" UINTX_FORMAT ").", 633 ConcGCThreads, ParallelGCThreads); 634 return; 635 } 636 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 637 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 638 // if both are set 639 _sleep_factor = 0.0; 640 _marking_task_overhead = 1.0; 641 } else if (G1MarkingOverheadPercent > 0) { 642 // We will calculate the number of parallel marking threads based 643 // on a target overhead with respect to the soft real-time goal 644 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 645 double overall_cm_overhead = 646 (double) MaxGCPauseMillis * marking_overhead / 647 (double) GCPauseIntervalMillis; 648 double cpu_ratio = 1.0 / (double) os::processor_count(); 649 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 650 double marking_task_overhead = 651 overall_cm_overhead / marking_thread_num * 652 (double) os::processor_count(); 653 double sleep_factor = 654 (1.0 - marking_task_overhead) / marking_task_overhead; 655 656 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); 657 _sleep_factor = sleep_factor; 658 _marking_task_overhead = marking_task_overhead; 659 } else { 660 // Calculate the number of parallel marking threads by scaling 661 // the number of parallel GC threads. 662 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); 663 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); 664 _sleep_factor = 0.0; 665 _marking_task_overhead = 1.0; 666 } 667 668 assert(ConcGCThreads > 0, "Should have been set"); 669 _parallel_marking_threads = (uint) ConcGCThreads; 670 _max_parallel_marking_threads = _parallel_marking_threads; 671 672 if (parallel_marking_threads() > 1) { 673 _cleanup_task_overhead = 1.0; 674 } else { 675 _cleanup_task_overhead = marking_task_overhead(); 676 } 677 _cleanup_sleep_factor = 678 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 679 680 #if 0 681 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 682 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 683 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 684 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 685 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 686 #endif 687 688 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", 689 _max_parallel_marking_threads, false, true); 690 if (_parallel_workers == NULL) { 691 vm_exit_during_initialization("Failed necessary allocation."); 692 } else { 693 _parallel_workers->initialize_workers(); 694 } 695 696 if (FLAG_IS_DEFAULT(MarkStackSize)) { 697 uintx mark_stack_size = 698 MIN2(MarkStackSizeMax, 699 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE))); 700 // Verify that the calculated value for MarkStackSize is in range. 701 // It would be nice to use the private utility routine from Arguments. 702 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 703 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): " 704 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 705 mark_stack_size, (uintx) 1, MarkStackSizeMax); 706 return; 707 } 708 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size); 709 } else { 710 // Verify MarkStackSize is in range. 711 if (FLAG_IS_CMDLINE(MarkStackSize)) { 712 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 713 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 714 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): " 715 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 716 MarkStackSize, (uintx) 1, MarkStackSizeMax); 717 return; 718 } 719 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 720 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 721 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")" 722 " or for MarkStackSizeMax (" UINTX_FORMAT ")", 723 MarkStackSize, MarkStackSizeMax); 724 return; 725 } 726 } 727 } 728 } 729 730 if (!_markStack.allocate(MarkStackSize)) { 731 warning("Failed to allocate CM marking stack"); 732 return; 733 } 734 735 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 736 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 737 738 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 739 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 740 741 BitMap::idx_t card_bm_size = _card_bm.size(); 742 743 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 744 _active_tasks = _max_worker_id; 745 746 size_t max_regions = (size_t) _g1h->max_regions(); 747 for (uint i = 0; i < _max_worker_id; ++i) { 748 CMTaskQueue* task_queue = new CMTaskQueue(); 749 task_queue->initialize(); 750 _task_queues->register_queue(i, task_queue); 751 752 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 753 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 754 755 _tasks[i] = new CMTask(i, this, 756 _count_marked_bytes[i], 757 &_count_card_bitmaps[i], 758 task_queue, _task_queues); 759 760 _accum_task_vtime[i] = 0.0; 761 } 762 763 // Calculate the card number for the bottom of the heap. Used 764 // in biasing indexes into the accounting card bitmaps. 765 _heap_bottom_card_num = 766 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 767 CardTableModRefBS::card_shift); 768 769 // Clear all the liveness counting data 770 clear_all_count_data(); 771 772 // so that the call below can read a sensible value 773 _heap_start = g1h->reserved_region().start(); 774 set_non_marking_state(); 775 _completed_initialization = true; 776 } 777 778 void ConcurrentMark::reset() { 779 // Starting values for these two. This should be called in a STW 780 // phase. 781 MemRegion reserved = _g1h->g1_reserved(); 782 _heap_start = reserved.start(); 783 _heap_end = reserved.end(); 784 785 // Separated the asserts so that we know which one fires. 786 assert(_heap_start != NULL, "heap bounds should look ok"); 787 assert(_heap_end != NULL, "heap bounds should look ok"); 788 assert(_heap_start < _heap_end, "heap bounds should look ok"); 789 790 // Reset all the marking data structures and any necessary flags 791 reset_marking_state(); 792 793 if (verbose_low()) { 794 gclog_or_tty->print_cr("[global] resetting"); 795 } 796 797 // We do reset all of them, since different phases will use 798 // different number of active threads. So, it's easiest to have all 799 // of them ready. 800 for (uint i = 0; i < _max_worker_id; ++i) { 801 _tasks[i]->reset(_nextMarkBitMap); 802 } 803 804 // we need this to make sure that the flag is on during the evac 805 // pause with initial mark piggy-backed 806 set_concurrent_marking_in_progress(); 807 } 808 809 810 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 811 _markStack.set_should_expand(); 812 _markStack.setEmpty(); // Also clears the _markStack overflow flag 813 if (clear_overflow) { 814 clear_has_overflown(); 815 } else { 816 assert(has_overflown(), "pre-condition"); 817 } 818 _finger = _heap_start; 819 820 for (uint i = 0; i < _max_worker_id; ++i) { 821 CMTaskQueue* queue = _task_queues->queue(i); 822 queue->set_empty(); 823 } 824 } 825 826 void ConcurrentMark::set_concurrency(uint active_tasks) { 827 assert(active_tasks <= _max_worker_id, "we should not have more"); 828 829 _active_tasks = active_tasks; 830 // Need to update the three data structures below according to the 831 // number of active threads for this phase. 832 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 833 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 834 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 835 } 836 837 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 838 set_concurrency(active_tasks); 839 840 _concurrent = concurrent; 841 // We propagate this to all tasks, not just the active ones. 842 for (uint i = 0; i < _max_worker_id; ++i) 843 _tasks[i]->set_concurrent(concurrent); 844 845 if (concurrent) { 846 set_concurrent_marking_in_progress(); 847 } else { 848 // We currently assume that the concurrent flag has been set to 849 // false before we start remark. At this point we should also be 850 // in a STW phase. 851 assert(!concurrent_marking_in_progress(), "invariant"); 852 assert(out_of_regions(), 853 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 854 p2i(_finger), p2i(_heap_end))); 855 } 856 } 857 858 void ConcurrentMark::set_non_marking_state() { 859 // We set the global marking state to some default values when we're 860 // not doing marking. 861 reset_marking_state(); 862 _active_tasks = 0; 863 clear_concurrent_marking_in_progress(); 864 } 865 866 ConcurrentMark::~ConcurrentMark() { 867 // The ConcurrentMark instance is never freed. 868 ShouldNotReachHere(); 869 } 870 871 void ConcurrentMark::clearNextBitmap() { 872 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 873 874 // Make sure that the concurrent mark thread looks to still be in 875 // the current cycle. 876 guarantee(cmThread()->during_cycle(), "invariant"); 877 878 // We are finishing up the current cycle by clearing the next 879 // marking bitmap and getting it ready for the next cycle. During 880 // this time no other cycle can start. So, let's make sure that this 881 // is the case. 882 guarantee(!g1h->mark_in_progress(), "invariant"); 883 884 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 885 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 886 _parallel_workers->run_task(&task); 887 888 // Clear the liveness counting data. If the marking has been aborted, the abort() 889 // call already did that. 890 if (cl.complete()) { 891 clear_all_count_data(); 892 } 893 894 // Repeat the asserts from above. 895 guarantee(cmThread()->during_cycle(), "invariant"); 896 guarantee(!g1h->mark_in_progress(), "invariant"); 897 } 898 899 class CheckBitmapClearHRClosure : public HeapRegionClosure { 900 CMBitMap* _bitmap; 901 bool _error; 902 public: 903 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 904 } 905 906 virtual bool doHeapRegion(HeapRegion* r) { 907 // This closure can be called concurrently to the mutator, so we must make sure 908 // that the result of the getNextMarkedWordAddress() call is compared to the 909 // value passed to it as limit to detect any found bits. 910 // We can use the region's orig_end() for the limit and the comparison value 911 // as it always contains the "real" end of the region that never changes and 912 // has no side effects. 913 // Due to the latter, there can also be no problem with the compiler generating 914 // reloads of the orig_end() call. 915 HeapWord* end = r->orig_end(); 916 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 917 } 918 }; 919 920 bool ConcurrentMark::nextMarkBitmapIsClear() { 921 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 922 _g1h->heap_region_iterate(&cl); 923 return cl.complete(); 924 } 925 926 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 927 public: 928 bool doHeapRegion(HeapRegion* r) { 929 if (!r->is_continues_humongous()) { 930 r->note_start_of_marking(); 931 } 932 return false; 933 } 934 }; 935 936 void ConcurrentMark::checkpointRootsInitialPre() { 937 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 938 G1CollectorPolicy* g1p = g1h->g1_policy(); 939 940 _has_aborted = false; 941 942 #ifndef PRODUCT 943 if (G1PrintReachableAtInitialMark) { 944 print_reachable("at-cycle-start", 945 VerifyOption_G1UsePrevMarking, true /* all */); 946 } 947 #endif 948 949 // Initialize marking structures. This has to be done in a STW phase. 950 reset(); 951 952 // For each region note start of marking. 953 NoteStartOfMarkHRClosure startcl; 954 g1h->heap_region_iterate(&startcl); 955 } 956 957 958 void ConcurrentMark::checkpointRootsInitialPost() { 959 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 960 961 // If we force an overflow during remark, the remark operation will 962 // actually abort and we'll restart concurrent marking. If we always 963 // force an overflow during remark we'll never actually complete the 964 // marking phase. So, we initialize this here, at the start of the 965 // cycle, so that at the remaining overflow number will decrease at 966 // every remark and we'll eventually not need to cause one. 967 force_overflow_stw()->init(); 968 969 // Start Concurrent Marking weak-reference discovery. 970 ReferenceProcessor* rp = g1h->ref_processor_cm(); 971 // enable ("weak") refs discovery 972 rp->enable_discovery(); 973 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 974 975 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 976 // This is the start of the marking cycle, we're expected all 977 // threads to have SATB queues with active set to false. 978 satb_mq_set.set_active_all_threads(true, /* new active value */ 979 false /* expected_active */); 980 981 _root_regions.prepare_for_scan(); 982 983 // update_g1_committed() will be called at the end of an evac pause 984 // when marking is on. So, it's also called at the end of the 985 // initial-mark pause to update the heap end, if the heap expands 986 // during it. No need to call it here. 987 } 988 989 /* 990 * Notice that in the next two methods, we actually leave the STS 991 * during the barrier sync and join it immediately afterwards. If we 992 * do not do this, the following deadlock can occur: one thread could 993 * be in the barrier sync code, waiting for the other thread to also 994 * sync up, whereas another one could be trying to yield, while also 995 * waiting for the other threads to sync up too. 996 * 997 * Note, however, that this code is also used during remark and in 998 * this case we should not attempt to leave / enter the STS, otherwise 999 * we'll either hit an assert (debug / fastdebug) or deadlock 1000 * (product). So we should only leave / enter the STS if we are 1001 * operating concurrently. 1002 * 1003 * Because the thread that does the sync barrier has left the STS, it 1004 * is possible to be suspended for a Full GC or an evacuation pause 1005 * could occur. This is actually safe, since the entering the sync 1006 * barrier is one of the last things do_marking_step() does, and it 1007 * doesn't manipulate any data structures afterwards. 1008 */ 1009 1010 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 1011 if (verbose_low()) { 1012 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 1013 } 1014 1015 if (concurrent()) { 1016 SuspendibleThreadSet::leave(); 1017 } 1018 1019 bool barrier_aborted = !_first_overflow_barrier_sync.enter(); 1020 1021 if (concurrent()) { 1022 SuspendibleThreadSet::join(); 1023 } 1024 // at this point everyone should have synced up and not be doing any 1025 // more work 1026 1027 if (verbose_low()) { 1028 if (barrier_aborted) { 1029 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 1030 } else { 1031 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 1032 } 1033 } 1034 1035 if (barrier_aborted) { 1036 // If the barrier aborted we ignore the overflow condition and 1037 // just abort the whole marking phase as quickly as possible. 1038 return; 1039 } 1040 1041 // If we're executing the concurrent phase of marking, reset the marking 1042 // state; otherwise the marking state is reset after reference processing, 1043 // during the remark pause. 1044 // If we reset here as a result of an overflow during the remark we will 1045 // see assertion failures from any subsequent set_concurrency_and_phase() 1046 // calls. 1047 if (concurrent()) { 1048 // let the task associated with with worker 0 do this 1049 if (worker_id == 0) { 1050 // task 0 is responsible for clearing the global data structures 1051 // We should be here because of an overflow. During STW we should 1052 // not clear the overflow flag since we rely on it being true when 1053 // we exit this method to abort the pause and restart concurrent 1054 // marking. 1055 reset_marking_state(true /* clear_overflow */); 1056 force_overflow()->update(); 1057 1058 if (G1Log::fine()) { 1059 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1060 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1061 } 1062 } 1063 } 1064 1065 // after this, each task should reset its own data structures then 1066 // then go into the second barrier 1067 } 1068 1069 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1070 if (verbose_low()) { 1071 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1072 } 1073 1074 if (concurrent()) { 1075 SuspendibleThreadSet::leave(); 1076 } 1077 1078 bool barrier_aborted = !_second_overflow_barrier_sync.enter(); 1079 1080 if (concurrent()) { 1081 SuspendibleThreadSet::join(); 1082 } 1083 // at this point everything should be re-initialized and ready to go 1084 1085 if (verbose_low()) { 1086 if (barrier_aborted) { 1087 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1088 } else { 1089 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1090 } 1091 } 1092 } 1093 1094 #ifndef PRODUCT 1095 void ForceOverflowSettings::init() { 1096 _num_remaining = G1ConcMarkForceOverflow; 1097 _force = false; 1098 update(); 1099 } 1100 1101 void ForceOverflowSettings::update() { 1102 if (_num_remaining > 0) { 1103 _num_remaining -= 1; 1104 _force = true; 1105 } else { 1106 _force = false; 1107 } 1108 } 1109 1110 bool ForceOverflowSettings::should_force() { 1111 if (_force) { 1112 _force = false; 1113 return true; 1114 } else { 1115 return false; 1116 } 1117 } 1118 #endif // !PRODUCT 1119 1120 class CMConcurrentMarkingTask: public AbstractGangTask { 1121 private: 1122 ConcurrentMark* _cm; 1123 ConcurrentMarkThread* _cmt; 1124 1125 public: 1126 void work(uint worker_id) { 1127 assert(Thread::current()->is_ConcurrentGC_thread(), 1128 "this should only be done by a conc GC thread"); 1129 ResourceMark rm; 1130 1131 double start_vtime = os::elapsedVTime(); 1132 1133 SuspendibleThreadSet::join(); 1134 1135 assert(worker_id < _cm->active_tasks(), "invariant"); 1136 CMTask* the_task = _cm->task(worker_id); 1137 the_task->record_start_time(); 1138 if (!_cm->has_aborted()) { 1139 do { 1140 double start_vtime_sec = os::elapsedVTime(); 1141 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1142 1143 the_task->do_marking_step(mark_step_duration_ms, 1144 true /* do_termination */, 1145 false /* is_serial*/); 1146 1147 double end_vtime_sec = os::elapsedVTime(); 1148 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1149 _cm->clear_has_overflown(); 1150 1151 _cm->do_yield_check(worker_id); 1152 1153 jlong sleep_time_ms; 1154 if (!_cm->has_aborted() && the_task->has_aborted()) { 1155 sleep_time_ms = 1156 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1157 SuspendibleThreadSet::leave(); 1158 os::sleep(Thread::current(), sleep_time_ms, false); 1159 SuspendibleThreadSet::join(); 1160 } 1161 } while (!_cm->has_aborted() && the_task->has_aborted()); 1162 } 1163 the_task->record_end_time(); 1164 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1165 1166 SuspendibleThreadSet::leave(); 1167 1168 double end_vtime = os::elapsedVTime(); 1169 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1170 } 1171 1172 CMConcurrentMarkingTask(ConcurrentMark* cm, 1173 ConcurrentMarkThread* cmt) : 1174 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1175 1176 ~CMConcurrentMarkingTask() { } 1177 }; 1178 1179 // Calculates the number of active workers for a concurrent 1180 // phase. 1181 uint ConcurrentMark::calc_parallel_marking_threads() { 1182 uint n_conc_workers = 0; 1183 if (!UseDynamicNumberOfGCThreads || 1184 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1185 !ForceDynamicNumberOfGCThreads)) { 1186 n_conc_workers = max_parallel_marking_threads(); 1187 } else { 1188 n_conc_workers = 1189 AdaptiveSizePolicy::calc_default_active_workers( 1190 max_parallel_marking_threads(), 1191 1, /* Minimum workers */ 1192 parallel_marking_threads(), 1193 Threads::number_of_non_daemon_threads()); 1194 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1195 // that scaling has already gone into "_max_parallel_marking_threads". 1196 } 1197 assert(n_conc_workers > 0, "Always need at least 1"); 1198 return n_conc_workers; 1199 } 1200 1201 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1202 // Currently, only survivors can be root regions. 1203 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1204 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1205 1206 const uintx interval = PrefetchScanIntervalInBytes; 1207 HeapWord* curr = hr->bottom(); 1208 const HeapWord* end = hr->top(); 1209 while (curr < end) { 1210 Prefetch::read(curr, interval); 1211 oop obj = oop(curr); 1212 int size = obj->oop_iterate(&cl); 1213 assert(size == obj->size(), "sanity"); 1214 curr += size; 1215 } 1216 } 1217 1218 class CMRootRegionScanTask : public AbstractGangTask { 1219 private: 1220 ConcurrentMark* _cm; 1221 1222 public: 1223 CMRootRegionScanTask(ConcurrentMark* cm) : 1224 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1225 1226 void work(uint worker_id) { 1227 assert(Thread::current()->is_ConcurrentGC_thread(), 1228 "this should only be done by a conc GC thread"); 1229 1230 CMRootRegions* root_regions = _cm->root_regions(); 1231 HeapRegion* hr = root_regions->claim_next(); 1232 while (hr != NULL) { 1233 _cm->scanRootRegion(hr, worker_id); 1234 hr = root_regions->claim_next(); 1235 } 1236 } 1237 }; 1238 1239 void ConcurrentMark::scanRootRegions() { 1240 // Start of concurrent marking. 1241 ClassLoaderDataGraph::clear_claimed_marks(); 1242 1243 // scan_in_progress() will have been set to true only if there was 1244 // at least one root region to scan. So, if it's false, we 1245 // should not attempt to do any further work. 1246 if (root_regions()->scan_in_progress()) { 1247 _parallel_marking_threads = calc_parallel_marking_threads(); 1248 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1249 "Maximum number of marking threads exceeded"); 1250 uint active_workers = MAX2(1U, parallel_marking_threads()); 1251 1252 CMRootRegionScanTask task(this); 1253 _parallel_workers->set_active_workers(active_workers); 1254 _parallel_workers->run_task(&task); 1255 1256 // It's possible that has_aborted() is true here without actually 1257 // aborting the survivor scan earlier. This is OK as it's 1258 // mainly used for sanity checking. 1259 root_regions()->scan_finished(); 1260 } 1261 } 1262 1263 void ConcurrentMark::markFromRoots() { 1264 // we might be tempted to assert that: 1265 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1266 // "inconsistent argument?"); 1267 // However that wouldn't be right, because it's possible that 1268 // a safepoint is indeed in progress as a younger generation 1269 // stop-the-world GC happens even as we mark in this generation. 1270 1271 _restart_for_overflow = false; 1272 force_overflow_conc()->init(); 1273 1274 // _g1h has _n_par_threads 1275 _parallel_marking_threads = calc_parallel_marking_threads(); 1276 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1277 "Maximum number of marking threads exceeded"); 1278 1279 uint active_workers = MAX2(1U, parallel_marking_threads()); 1280 1281 // Parallel task terminator is set in "set_concurrency_and_phase()" 1282 set_concurrency_and_phase(active_workers, true /* concurrent */); 1283 1284 CMConcurrentMarkingTask markingTask(this, cmThread()); 1285 _parallel_workers->set_active_workers(active_workers); 1286 // Don't set _n_par_threads because it affects MT in process_roots() 1287 // and the decisions on that MT processing is made elsewhere. 1288 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1289 _parallel_workers->run_task(&markingTask); 1290 print_stats(); 1291 } 1292 1293 // Helper class to get rid of some boilerplate code. 1294 class G1CMTraceTime : public GCTraceTime { 1295 static bool doit_and_prepend(bool doit) { 1296 if (doit) { 1297 gclog_or_tty->put(' '); 1298 } 1299 return doit; 1300 } 1301 1302 public: 1303 G1CMTraceTime(const char* title, bool doit) 1304 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), 1305 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { 1306 } 1307 }; 1308 1309 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1310 // world is stopped at this checkpoint 1311 assert(SafepointSynchronize::is_at_safepoint(), 1312 "world should be stopped"); 1313 1314 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1315 1316 // If a full collection has happened, we shouldn't do this. 1317 if (has_aborted()) { 1318 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1319 return; 1320 } 1321 1322 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1323 1324 if (VerifyDuringGC) { 1325 HandleMark hm; // handle scope 1326 Universe::heap()->prepare_for_verify(); 1327 Universe::verify(VerifyOption_G1UsePrevMarking, 1328 " VerifyDuringGC:(before)"); 1329 } 1330 g1h->check_bitmaps("Remark Start"); 1331 1332 G1CollectorPolicy* g1p = g1h->g1_policy(); 1333 g1p->record_concurrent_mark_remark_start(); 1334 1335 double start = os::elapsedTime(); 1336 1337 checkpointRootsFinalWork(); 1338 1339 double mark_work_end = os::elapsedTime(); 1340 1341 weakRefsWork(clear_all_soft_refs); 1342 1343 if (has_overflown()) { 1344 // Oops. We overflowed. Restart concurrent marking. 1345 _restart_for_overflow = true; 1346 if (G1TraceMarkStackOverflow) { 1347 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1348 } 1349 1350 // Verify the heap w.r.t. the previous marking bitmap. 1351 if (VerifyDuringGC) { 1352 HandleMark hm; // handle scope 1353 Universe::heap()->prepare_for_verify(); 1354 Universe::verify(VerifyOption_G1UsePrevMarking, 1355 " VerifyDuringGC:(overflow)"); 1356 } 1357 1358 // Clear the marking state because we will be restarting 1359 // marking due to overflowing the global mark stack. 1360 reset_marking_state(); 1361 } else { 1362 { 1363 G1CMTraceTime trace("GC aggregate-data", G1Log::finer()); 1364 1365 // Aggregate the per-task counting data that we have accumulated 1366 // while marking. 1367 aggregate_count_data(); 1368 } 1369 1370 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1371 // We're done with marking. 1372 // This is the end of the marking cycle, we're expected all 1373 // threads to have SATB queues with active set to true. 1374 satb_mq_set.set_active_all_threads(false, /* new active value */ 1375 true /* expected_active */); 1376 1377 if (VerifyDuringGC) { 1378 HandleMark hm; // handle scope 1379 Universe::heap()->prepare_for_verify(); 1380 Universe::verify(VerifyOption_G1UseNextMarking, 1381 " VerifyDuringGC:(after)"); 1382 } 1383 g1h->check_bitmaps("Remark End"); 1384 assert(!restart_for_overflow(), "sanity"); 1385 // Completely reset the marking state since marking completed 1386 set_non_marking_state(); 1387 } 1388 1389 // Expand the marking stack, if we have to and if we can. 1390 if (_markStack.should_expand()) { 1391 _markStack.expand(); 1392 } 1393 1394 // Statistics 1395 double now = os::elapsedTime(); 1396 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1397 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1398 _remark_times.add((now - start) * 1000.0); 1399 1400 g1p->record_concurrent_mark_remark_end(); 1401 1402 G1CMIsAliveClosure is_alive(g1h); 1403 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1404 } 1405 1406 // Base class of the closures that finalize and verify the 1407 // liveness counting data. 1408 class CMCountDataClosureBase: public HeapRegionClosure { 1409 protected: 1410 G1CollectedHeap* _g1h; 1411 ConcurrentMark* _cm; 1412 CardTableModRefBS* _ct_bs; 1413 1414 BitMap* _region_bm; 1415 BitMap* _card_bm; 1416 1417 // Takes a region that's not empty (i.e., it has at least one 1418 // live object in it and sets its corresponding bit on the region 1419 // bitmap to 1. If the region is "starts humongous" it will also set 1420 // to 1 the bits on the region bitmap that correspond to its 1421 // associated "continues humongous" regions. 1422 void set_bit_for_region(HeapRegion* hr) { 1423 assert(!hr->is_continues_humongous(), "should have filtered those out"); 1424 1425 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1426 if (!hr->is_starts_humongous()) { 1427 // Normal (non-humongous) case: just set the bit. 1428 _region_bm->par_at_put(index, true); 1429 } else { 1430 // Starts humongous case: calculate how many regions are part of 1431 // this humongous region and then set the bit range. 1432 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1433 _region_bm->par_at_put_range(index, end_index, true); 1434 } 1435 } 1436 1437 public: 1438 CMCountDataClosureBase(G1CollectedHeap* g1h, 1439 BitMap* region_bm, BitMap* card_bm): 1440 _g1h(g1h), _cm(g1h->concurrent_mark()), 1441 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 1442 _region_bm(region_bm), _card_bm(card_bm) { } 1443 }; 1444 1445 // Closure that calculates the # live objects per region. Used 1446 // for verification purposes during the cleanup pause. 1447 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1448 CMBitMapRO* _bm; 1449 size_t _region_marked_bytes; 1450 1451 public: 1452 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1453 BitMap* region_bm, BitMap* card_bm) : 1454 CMCountDataClosureBase(g1h, region_bm, card_bm), 1455 _bm(bm), _region_marked_bytes(0) { } 1456 1457 bool doHeapRegion(HeapRegion* hr) { 1458 1459 if (hr->is_continues_humongous()) { 1460 // We will ignore these here and process them when their 1461 // associated "starts humongous" region is processed (see 1462 // set_bit_for_heap_region()). Note that we cannot rely on their 1463 // associated "starts humongous" region to have their bit set to 1464 // 1 since, due to the region chunking in the parallel region 1465 // iteration, a "continues humongous" region might be visited 1466 // before its associated "starts humongous". 1467 return false; 1468 } 1469 1470 HeapWord* ntams = hr->next_top_at_mark_start(); 1471 HeapWord* start = hr->bottom(); 1472 1473 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1474 err_msg("Preconditions not met - " 1475 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1476 p2i(start), p2i(ntams), p2i(hr->end()))); 1477 1478 // Find the first marked object at or after "start". 1479 start = _bm->getNextMarkedWordAddress(start, ntams); 1480 1481 size_t marked_bytes = 0; 1482 1483 while (start < ntams) { 1484 oop obj = oop(start); 1485 int obj_sz = obj->size(); 1486 HeapWord* obj_end = start + obj_sz; 1487 1488 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1489 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1490 1491 // Note: if we're looking at the last region in heap - obj_end 1492 // could be actually just beyond the end of the heap; end_idx 1493 // will then correspond to a (non-existent) card that is also 1494 // just beyond the heap. 1495 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1496 // end of object is not card aligned - increment to cover 1497 // all the cards spanned by the object 1498 end_idx += 1; 1499 } 1500 1501 // Set the bits in the card BM for the cards spanned by this object. 1502 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1503 1504 // Add the size of this object to the number of marked bytes. 1505 marked_bytes += (size_t)obj_sz * HeapWordSize; 1506 1507 // Find the next marked object after this one. 1508 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1509 } 1510 1511 // Mark the allocated-since-marking portion... 1512 HeapWord* top = hr->top(); 1513 if (ntams < top) { 1514 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1515 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1516 1517 // Note: if we're looking at the last region in heap - top 1518 // could be actually just beyond the end of the heap; end_idx 1519 // will then correspond to a (non-existent) card that is also 1520 // just beyond the heap. 1521 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1522 // end of object is not card aligned - increment to cover 1523 // all the cards spanned by the object 1524 end_idx += 1; 1525 } 1526 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1527 1528 // This definitely means the region has live objects. 1529 set_bit_for_region(hr); 1530 } 1531 1532 // Update the live region bitmap. 1533 if (marked_bytes > 0) { 1534 set_bit_for_region(hr); 1535 } 1536 1537 // Set the marked bytes for the current region so that 1538 // it can be queried by a calling verification routine 1539 _region_marked_bytes = marked_bytes; 1540 1541 return false; 1542 } 1543 1544 size_t region_marked_bytes() const { return _region_marked_bytes; } 1545 }; 1546 1547 // Heap region closure used for verifying the counting data 1548 // that was accumulated concurrently and aggregated during 1549 // the remark pause. This closure is applied to the heap 1550 // regions during the STW cleanup pause. 1551 1552 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1553 G1CollectedHeap* _g1h; 1554 ConcurrentMark* _cm; 1555 CalcLiveObjectsClosure _calc_cl; 1556 BitMap* _region_bm; // Region BM to be verified 1557 BitMap* _card_bm; // Card BM to be verified 1558 bool _verbose; // verbose output? 1559 1560 BitMap* _exp_region_bm; // Expected Region BM values 1561 BitMap* _exp_card_bm; // Expected card BM values 1562 1563 int _failures; 1564 1565 public: 1566 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1567 BitMap* region_bm, 1568 BitMap* card_bm, 1569 BitMap* exp_region_bm, 1570 BitMap* exp_card_bm, 1571 bool verbose) : 1572 _g1h(g1h), _cm(g1h->concurrent_mark()), 1573 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1574 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1575 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1576 _failures(0) { } 1577 1578 int failures() const { return _failures; } 1579 1580 bool doHeapRegion(HeapRegion* hr) { 1581 if (hr->is_continues_humongous()) { 1582 // We will ignore these here and process them when their 1583 // associated "starts humongous" region is processed (see 1584 // set_bit_for_heap_region()). Note that we cannot rely on their 1585 // associated "starts humongous" region to have their bit set to 1586 // 1 since, due to the region chunking in the parallel region 1587 // iteration, a "continues humongous" region might be visited 1588 // before its associated "starts humongous". 1589 return false; 1590 } 1591 1592 int failures = 0; 1593 1594 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1595 // this region and set the corresponding bits in the expected region 1596 // and card bitmaps. 1597 bool res = _calc_cl.doHeapRegion(hr); 1598 assert(res == false, "should be continuing"); 1599 1600 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1601 Mutex::_no_safepoint_check_flag); 1602 1603 // Verify the marked bytes for this region. 1604 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1605 size_t act_marked_bytes = hr->next_marked_bytes(); 1606 1607 // We're not OK if expected marked bytes > actual marked bytes. It means 1608 // we have missed accounting some objects during the actual marking. 1609 if (exp_marked_bytes > act_marked_bytes) { 1610 if (_verbose) { 1611 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1612 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1613 hr->hrm_index(), exp_marked_bytes, act_marked_bytes); 1614 } 1615 failures += 1; 1616 } 1617 1618 // Verify the bit, for this region, in the actual and expected 1619 // (which was just calculated) region bit maps. 1620 // We're not OK if the bit in the calculated expected region 1621 // bitmap is set and the bit in the actual region bitmap is not. 1622 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1623 1624 bool expected = _exp_region_bm->at(index); 1625 bool actual = _region_bm->at(index); 1626 if (expected && !actual) { 1627 if (_verbose) { 1628 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1629 "expected: %s, actual: %s", 1630 hr->hrm_index(), 1631 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1632 } 1633 failures += 1; 1634 } 1635 1636 // Verify that the card bit maps for the cards spanned by the current 1637 // region match. We have an error if we have a set bit in the expected 1638 // bit map and the corresponding bit in the actual bitmap is not set. 1639 1640 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1641 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1642 1643 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1644 expected = _exp_card_bm->at(i); 1645 actual = _card_bm->at(i); 1646 1647 if (expected && !actual) { 1648 if (_verbose) { 1649 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1650 "expected: %s, actual: %s", 1651 hr->hrm_index(), i, 1652 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1653 } 1654 failures += 1; 1655 } 1656 } 1657 1658 if (failures > 0 && _verbose) { 1659 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1660 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1661 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1662 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1663 } 1664 1665 _failures += failures; 1666 1667 // We could stop iteration over the heap when we 1668 // find the first violating region by returning true. 1669 return false; 1670 } 1671 }; 1672 1673 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1674 protected: 1675 G1CollectedHeap* _g1h; 1676 ConcurrentMark* _cm; 1677 BitMap* _actual_region_bm; 1678 BitMap* _actual_card_bm; 1679 1680 uint _n_workers; 1681 1682 BitMap* _expected_region_bm; 1683 BitMap* _expected_card_bm; 1684 1685 int _failures; 1686 bool _verbose; 1687 1688 HeapRegionClaimer _hrclaimer; 1689 1690 public: 1691 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1692 BitMap* region_bm, BitMap* card_bm, 1693 BitMap* expected_region_bm, BitMap* expected_card_bm) 1694 : AbstractGangTask("G1 verify final counting"), 1695 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1696 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1697 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1698 _failures(0), _verbose(false), 1699 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1700 assert(VerifyDuringGC, "don't call this otherwise"); 1701 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1702 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1703 1704 _verbose = _cm->verbose_medium(); 1705 } 1706 1707 void work(uint worker_id) { 1708 assert(worker_id < _n_workers, "invariant"); 1709 1710 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1711 _actual_region_bm, _actual_card_bm, 1712 _expected_region_bm, 1713 _expected_card_bm, 1714 _verbose); 1715 1716 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1717 1718 Atomic::add(verify_cl.failures(), &_failures); 1719 } 1720 1721 int failures() const { return _failures; } 1722 }; 1723 1724 // Closure that finalizes the liveness counting data. 1725 // Used during the cleanup pause. 1726 // Sets the bits corresponding to the interval [NTAMS, top] 1727 // (which contains the implicitly live objects) in the 1728 // card liveness bitmap. Also sets the bit for each region, 1729 // containing live data, in the region liveness bitmap. 1730 1731 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1732 public: 1733 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1734 BitMap* region_bm, 1735 BitMap* card_bm) : 1736 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1737 1738 bool doHeapRegion(HeapRegion* hr) { 1739 1740 if (hr->is_continues_humongous()) { 1741 // We will ignore these here and process them when their 1742 // associated "starts humongous" region is processed (see 1743 // set_bit_for_heap_region()). Note that we cannot rely on their 1744 // associated "starts humongous" region to have their bit set to 1745 // 1 since, due to the region chunking in the parallel region 1746 // iteration, a "continues humongous" region might be visited 1747 // before its associated "starts humongous". 1748 return false; 1749 } 1750 1751 HeapWord* ntams = hr->next_top_at_mark_start(); 1752 HeapWord* top = hr->top(); 1753 1754 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1755 1756 // Mark the allocated-since-marking portion... 1757 if (ntams < top) { 1758 // This definitely means the region has live objects. 1759 set_bit_for_region(hr); 1760 1761 // Now set the bits in the card bitmap for [ntams, top) 1762 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1763 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1764 1765 // Note: if we're looking at the last region in heap - top 1766 // could be actually just beyond the end of the heap; end_idx 1767 // will then correspond to a (non-existent) card that is also 1768 // just beyond the heap. 1769 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1770 // end of object is not card aligned - increment to cover 1771 // all the cards spanned by the object 1772 end_idx += 1; 1773 } 1774 1775 assert(end_idx <= _card_bm->size(), 1776 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1777 end_idx, _card_bm->size())); 1778 assert(start_idx < _card_bm->size(), 1779 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1780 start_idx, _card_bm->size())); 1781 1782 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1783 } 1784 1785 // Set the bit for the region if it contains live data 1786 if (hr->next_marked_bytes() > 0) { 1787 set_bit_for_region(hr); 1788 } 1789 1790 return false; 1791 } 1792 }; 1793 1794 class G1ParFinalCountTask: public AbstractGangTask { 1795 protected: 1796 G1CollectedHeap* _g1h; 1797 ConcurrentMark* _cm; 1798 BitMap* _actual_region_bm; 1799 BitMap* _actual_card_bm; 1800 1801 uint _n_workers; 1802 HeapRegionClaimer _hrclaimer; 1803 1804 public: 1805 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1806 : AbstractGangTask("G1 final counting"), 1807 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1808 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1809 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1810 } 1811 1812 void work(uint worker_id) { 1813 assert(worker_id < _n_workers, "invariant"); 1814 1815 FinalCountDataUpdateClosure final_update_cl(_g1h, 1816 _actual_region_bm, 1817 _actual_card_bm); 1818 1819 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1820 } 1821 }; 1822 1823 class G1ParNoteEndTask; 1824 1825 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1826 G1CollectedHeap* _g1; 1827 size_t _max_live_bytes; 1828 uint _regions_claimed; 1829 size_t _freed_bytes; 1830 FreeRegionList* _local_cleanup_list; 1831 HeapRegionSetCount _old_regions_removed; 1832 HeapRegionSetCount _humongous_regions_removed; 1833 HRRSCleanupTask* _hrrs_cleanup_task; 1834 double _claimed_region_time; 1835 double _max_region_time; 1836 1837 public: 1838 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1839 FreeRegionList* local_cleanup_list, 1840 HRRSCleanupTask* hrrs_cleanup_task) : 1841 _g1(g1), 1842 _max_live_bytes(0), _regions_claimed(0), 1843 _freed_bytes(0), 1844 _claimed_region_time(0.0), _max_region_time(0.0), 1845 _local_cleanup_list(local_cleanup_list), 1846 _old_regions_removed(), 1847 _humongous_regions_removed(), 1848 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1849 1850 size_t freed_bytes() { return _freed_bytes; } 1851 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1852 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1853 1854 bool doHeapRegion(HeapRegion *hr) { 1855 if (hr->is_continues_humongous()) { 1856 return false; 1857 } 1858 // We use a claim value of zero here because all regions 1859 // were claimed with value 1 in the FinalCount task. 1860 _g1->reset_gc_time_stamps(hr); 1861 double start = os::elapsedTime(); 1862 _regions_claimed++; 1863 hr->note_end_of_marking(); 1864 _max_live_bytes += hr->max_live_bytes(); 1865 1866 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1867 _freed_bytes += hr->used(); 1868 hr->set_containing_set(NULL); 1869 if (hr->is_humongous()) { 1870 assert(hr->is_starts_humongous(), "we should only see starts humongous"); 1871 _humongous_regions_removed.increment(1u, hr->capacity()); 1872 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1873 } else { 1874 _old_regions_removed.increment(1u, hr->capacity()); 1875 _g1->free_region(hr, _local_cleanup_list, true); 1876 } 1877 } else { 1878 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1879 } 1880 1881 double region_time = (os::elapsedTime() - start); 1882 _claimed_region_time += region_time; 1883 if (region_time > _max_region_time) { 1884 _max_region_time = region_time; 1885 } 1886 return false; 1887 } 1888 1889 size_t max_live_bytes() { return _max_live_bytes; } 1890 uint regions_claimed() { return _regions_claimed; } 1891 double claimed_region_time_sec() { return _claimed_region_time; } 1892 double max_region_time_sec() { return _max_region_time; } 1893 }; 1894 1895 class G1ParNoteEndTask: public AbstractGangTask { 1896 friend class G1NoteEndOfConcMarkClosure; 1897 1898 protected: 1899 G1CollectedHeap* _g1h; 1900 size_t _max_live_bytes; 1901 size_t _freed_bytes; 1902 FreeRegionList* _cleanup_list; 1903 HeapRegionClaimer _hrclaimer; 1904 1905 public: 1906 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1907 AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1908 } 1909 1910 void work(uint worker_id) { 1911 FreeRegionList local_cleanup_list("Local Cleanup List"); 1912 HRRSCleanupTask hrrs_cleanup_task; 1913 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1914 &hrrs_cleanup_task); 1915 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1916 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1917 1918 // Now update the lists 1919 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1920 { 1921 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1922 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1923 _max_live_bytes += g1_note_end.max_live_bytes(); 1924 _freed_bytes += g1_note_end.freed_bytes(); 1925 1926 // If we iterate over the global cleanup list at the end of 1927 // cleanup to do this printing we will not guarantee to only 1928 // generate output for the newly-reclaimed regions (the list 1929 // might not be empty at the beginning of cleanup; we might 1930 // still be working on its previous contents). So we do the 1931 // printing here, before we append the new regions to the global 1932 // cleanup list. 1933 1934 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1935 if (hr_printer->is_active()) { 1936 FreeRegionListIterator iter(&local_cleanup_list); 1937 while (iter.more_available()) { 1938 HeapRegion* hr = iter.get_next(); 1939 hr_printer->cleanup(hr); 1940 } 1941 } 1942 1943 _cleanup_list->add_ordered(&local_cleanup_list); 1944 assert(local_cleanup_list.is_empty(), "post-condition"); 1945 1946 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1947 } 1948 } 1949 size_t max_live_bytes() { return _max_live_bytes; } 1950 size_t freed_bytes() { return _freed_bytes; } 1951 }; 1952 1953 class G1ParScrubRemSetTask: public AbstractGangTask { 1954 protected: 1955 G1RemSet* _g1rs; 1956 BitMap* _region_bm; 1957 BitMap* _card_bm; 1958 HeapRegionClaimer _hrclaimer; 1959 1960 public: 1961 G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) : 1962 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) { 1963 } 1964 1965 void work(uint worker_id) { 1966 _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer); 1967 } 1968 1969 }; 1970 1971 void ConcurrentMark::cleanup() { 1972 // world is stopped at this checkpoint 1973 assert(SafepointSynchronize::is_at_safepoint(), 1974 "world should be stopped"); 1975 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1976 1977 // If a full collection has happened, we shouldn't do this. 1978 if (has_aborted()) { 1979 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1980 return; 1981 } 1982 1983 g1h->verify_region_sets_optional(); 1984 1985 if (VerifyDuringGC) { 1986 HandleMark hm; // handle scope 1987 Universe::heap()->prepare_for_verify(); 1988 Universe::verify(VerifyOption_G1UsePrevMarking, 1989 " VerifyDuringGC:(before)"); 1990 } 1991 g1h->check_bitmaps("Cleanup Start"); 1992 1993 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); 1994 g1p->record_concurrent_mark_cleanup_start(); 1995 1996 double start = os::elapsedTime(); 1997 1998 HeapRegionRemSet::reset_for_cleanup_tasks(); 1999 2000 uint n_workers; 2001 2002 // Do counting once more with the world stopped for good measure. 2003 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 2004 2005 g1h->set_par_threads(); 2006 n_workers = g1h->n_par_threads(); 2007 assert(g1h->n_par_threads() == n_workers, 2008 "Should not have been reset"); 2009 g1h->workers()->run_task(&g1_par_count_task); 2010 // Done with the parallel phase so reset to 0. 2011 g1h->set_par_threads(0); 2012 2013 if (VerifyDuringGC) { 2014 // Verify that the counting data accumulated during marking matches 2015 // that calculated by walking the marking bitmap. 2016 2017 // Bitmaps to hold expected values 2018 BitMap expected_region_bm(_region_bm.size(), true); 2019 BitMap expected_card_bm(_card_bm.size(), true); 2020 2021 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 2022 &_region_bm, 2023 &_card_bm, 2024 &expected_region_bm, 2025 &expected_card_bm); 2026 2027 g1h->set_par_threads((int)n_workers); 2028 g1h->workers()->run_task(&g1_par_verify_task); 2029 // Done with the parallel phase so reset to 0. 2030 g1h->set_par_threads(0); 2031 2032 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 2033 } 2034 2035 size_t start_used_bytes = g1h->used(); 2036 g1h->set_marking_complete(); 2037 2038 double count_end = os::elapsedTime(); 2039 double this_final_counting_time = (count_end - start); 2040 _total_counting_time += this_final_counting_time; 2041 2042 if (G1PrintRegionLivenessInfo) { 2043 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 2044 _g1h->heap_region_iterate(&cl); 2045 } 2046 2047 // Install newly created mark bitMap as "prev". 2048 swapMarkBitMaps(); 2049 2050 g1h->reset_gc_time_stamp(); 2051 2052 // Note end of marking in all heap regions. 2053 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 2054 g1h->set_par_threads((int)n_workers); 2055 g1h->workers()->run_task(&g1_par_note_end_task); 2056 g1h->set_par_threads(0); 2057 g1h->check_gc_time_stamps(); 2058 2059 if (!cleanup_list_is_empty()) { 2060 // The cleanup list is not empty, so we'll have to process it 2061 // concurrently. Notify anyone else that might be wanting free 2062 // regions that there will be more free regions coming soon. 2063 g1h->set_free_regions_coming(); 2064 } 2065 2066 // call below, since it affects the metric by which we sort the heap 2067 // regions. 2068 if (G1ScrubRemSets) { 2069 double rs_scrub_start = os::elapsedTime(); 2070 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers); 2071 g1h->set_par_threads((int)n_workers); 2072 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2073 g1h->set_par_threads(0); 2074 2075 double rs_scrub_end = os::elapsedTime(); 2076 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2077 _total_rs_scrub_time += this_rs_scrub_time; 2078 } 2079 2080 // this will also free any regions totally full of garbage objects, 2081 // and sort the regions. 2082 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2083 2084 // Statistics. 2085 double end = os::elapsedTime(); 2086 _cleanup_times.add((end - start) * 1000.0); 2087 2088 if (G1Log::fine()) { 2089 g1h->print_size_transition(gclog_or_tty, 2090 start_used_bytes, 2091 g1h->used(), 2092 g1h->capacity()); 2093 } 2094 2095 // Clean up will have freed any regions completely full of garbage. 2096 // Update the soft reference policy with the new heap occupancy. 2097 Universe::update_heap_info_at_gc(); 2098 2099 if (VerifyDuringGC) { 2100 HandleMark hm; // handle scope 2101 Universe::heap()->prepare_for_verify(); 2102 Universe::verify(VerifyOption_G1UsePrevMarking, 2103 " VerifyDuringGC:(after)"); 2104 } 2105 2106 g1h->check_bitmaps("Cleanup End"); 2107 2108 g1h->verify_region_sets_optional(); 2109 2110 // We need to make this be a "collection" so any collection pause that 2111 // races with it goes around and waits for completeCleanup to finish. 2112 g1h->increment_total_collections(); 2113 2114 // Clean out dead classes and update Metaspace sizes. 2115 if (ClassUnloadingWithConcurrentMark) { 2116 ClassLoaderDataGraph::purge(); 2117 } 2118 MetaspaceGC::compute_new_size(); 2119 2120 // We reclaimed old regions so we should calculate the sizes to make 2121 // sure we update the old gen/space data. 2122 g1h->g1mm()->update_sizes(); 2123 g1h->allocation_context_stats().update_after_mark(); 2124 2125 g1h->trace_heap_after_concurrent_cycle(); 2126 } 2127 2128 void ConcurrentMark::completeCleanup() { 2129 if (has_aborted()) return; 2130 2131 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2132 2133 _cleanup_list.verify_optional(); 2134 FreeRegionList tmp_free_list("Tmp Free List"); 2135 2136 if (G1ConcRegionFreeingVerbose) { 2137 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2138 "cleanup list has %u entries", 2139 _cleanup_list.length()); 2140 } 2141 2142 // No one else should be accessing the _cleanup_list at this point, 2143 // so it is not necessary to take any locks 2144 while (!_cleanup_list.is_empty()) { 2145 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 2146 assert(hr != NULL, "Got NULL from a non-empty list"); 2147 hr->par_clear(); 2148 tmp_free_list.add_ordered(hr); 2149 2150 // Instead of adding one region at a time to the secondary_free_list, 2151 // we accumulate them in the local list and move them a few at a 2152 // time. This also cuts down on the number of notify_all() calls 2153 // we do during this process. We'll also append the local list when 2154 // _cleanup_list is empty (which means we just removed the last 2155 // region from the _cleanup_list). 2156 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2157 _cleanup_list.is_empty()) { 2158 if (G1ConcRegionFreeingVerbose) { 2159 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2160 "appending %u entries to the secondary_free_list, " 2161 "cleanup list still has %u entries", 2162 tmp_free_list.length(), 2163 _cleanup_list.length()); 2164 } 2165 2166 { 2167 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2168 g1h->secondary_free_list_add(&tmp_free_list); 2169 SecondaryFreeList_lock->notify_all(); 2170 } 2171 2172 if (G1StressConcRegionFreeing) { 2173 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2174 os::sleep(Thread::current(), (jlong) 1, false); 2175 } 2176 } 2177 } 2178 } 2179 assert(tmp_free_list.is_empty(), "post-condition"); 2180 } 2181 2182 // Supporting Object and Oop closures for reference discovery 2183 // and processing in during marking 2184 2185 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2186 HeapWord* addr = (HeapWord*)obj; 2187 return addr != NULL && 2188 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2189 } 2190 2191 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2192 // Uses the CMTask associated with a worker thread (for serial reference 2193 // processing the CMTask for worker 0 is used) to preserve (mark) and 2194 // trace referent objects. 2195 // 2196 // Using the CMTask and embedded local queues avoids having the worker 2197 // threads operating on the global mark stack. This reduces the risk 2198 // of overflowing the stack - which we would rather avoid at this late 2199 // state. Also using the tasks' local queues removes the potential 2200 // of the workers interfering with each other that could occur if 2201 // operating on the global stack. 2202 2203 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2204 ConcurrentMark* _cm; 2205 CMTask* _task; 2206 int _ref_counter_limit; 2207 int _ref_counter; 2208 bool _is_serial; 2209 public: 2210 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2211 _cm(cm), _task(task), _is_serial(is_serial), 2212 _ref_counter_limit(G1RefProcDrainInterval) { 2213 assert(_ref_counter_limit > 0, "sanity"); 2214 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2215 _ref_counter = _ref_counter_limit; 2216 } 2217 2218 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2219 virtual void do_oop( oop* p) { do_oop_work(p); } 2220 2221 template <class T> void do_oop_work(T* p) { 2222 if (!_cm->has_overflown()) { 2223 oop obj = oopDesc::load_decode_heap_oop(p); 2224 if (_cm->verbose_high()) { 2225 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2226 "*"PTR_FORMAT" = "PTR_FORMAT, 2227 _task->worker_id(), p2i(p), p2i((void*) obj)); 2228 } 2229 2230 _task->deal_with_reference(obj); 2231 _ref_counter--; 2232 2233 if (_ref_counter == 0) { 2234 // We have dealt with _ref_counter_limit references, pushing them 2235 // and objects reachable from them on to the local stack (and 2236 // possibly the global stack). Call CMTask::do_marking_step() to 2237 // process these entries. 2238 // 2239 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2240 // there's nothing more to do (i.e. we're done with the entries that 2241 // were pushed as a result of the CMTask::deal_with_reference() calls 2242 // above) or we overflow. 2243 // 2244 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2245 // flag while there may still be some work to do. (See the comment at 2246 // the beginning of CMTask::do_marking_step() for those conditions - 2247 // one of which is reaching the specified time target.) It is only 2248 // when CMTask::do_marking_step() returns without setting the 2249 // has_aborted() flag that the marking step has completed. 2250 do { 2251 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2252 _task->do_marking_step(mark_step_duration_ms, 2253 false /* do_termination */, 2254 _is_serial); 2255 } while (_task->has_aborted() && !_cm->has_overflown()); 2256 _ref_counter = _ref_counter_limit; 2257 } 2258 } else { 2259 if (_cm->verbose_high()) { 2260 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2261 } 2262 } 2263 } 2264 }; 2265 2266 // 'Drain' oop closure used by both serial and parallel reference processing. 2267 // Uses the CMTask associated with a given worker thread (for serial 2268 // reference processing the CMtask for worker 0 is used). Calls the 2269 // do_marking_step routine, with an unbelievably large timeout value, 2270 // to drain the marking data structures of the remaining entries 2271 // added by the 'keep alive' oop closure above. 2272 2273 class G1CMDrainMarkingStackClosure: public VoidClosure { 2274 ConcurrentMark* _cm; 2275 CMTask* _task; 2276 bool _is_serial; 2277 public: 2278 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2279 _cm(cm), _task(task), _is_serial(is_serial) { 2280 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2281 } 2282 2283 void do_void() { 2284 do { 2285 if (_cm->verbose_high()) { 2286 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2287 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2288 } 2289 2290 // We call CMTask::do_marking_step() to completely drain the local 2291 // and global marking stacks of entries pushed by the 'keep alive' 2292 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2293 // 2294 // CMTask::do_marking_step() is called in a loop, which we'll exit 2295 // if there's nothing more to do (i.e. we've completely drained the 2296 // entries that were pushed as a a result of applying the 'keep alive' 2297 // closure to the entries on the discovered ref lists) or we overflow 2298 // the global marking stack. 2299 // 2300 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2301 // flag while there may still be some work to do. (See the comment at 2302 // the beginning of CMTask::do_marking_step() for those conditions - 2303 // one of which is reaching the specified time target.) It is only 2304 // when CMTask::do_marking_step() returns without setting the 2305 // has_aborted() flag that the marking step has completed. 2306 2307 _task->do_marking_step(1000000000.0 /* something very large */, 2308 true /* do_termination */, 2309 _is_serial); 2310 } while (_task->has_aborted() && !_cm->has_overflown()); 2311 } 2312 }; 2313 2314 // Implementation of AbstractRefProcTaskExecutor for parallel 2315 // reference processing at the end of G1 concurrent marking 2316 2317 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2318 private: 2319 G1CollectedHeap* _g1h; 2320 ConcurrentMark* _cm; 2321 WorkGang* _workers; 2322 int _active_workers; 2323 2324 public: 2325 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2326 ConcurrentMark* cm, 2327 WorkGang* workers, 2328 int n_workers) : 2329 _g1h(g1h), _cm(cm), 2330 _workers(workers), _active_workers(n_workers) { } 2331 2332 // Executes the given task using concurrent marking worker threads. 2333 virtual void execute(ProcessTask& task); 2334 virtual void execute(EnqueueTask& task); 2335 }; 2336 2337 class G1CMRefProcTaskProxy: public AbstractGangTask { 2338 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2339 ProcessTask& _proc_task; 2340 G1CollectedHeap* _g1h; 2341 ConcurrentMark* _cm; 2342 2343 public: 2344 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2345 G1CollectedHeap* g1h, 2346 ConcurrentMark* cm) : 2347 AbstractGangTask("Process reference objects in parallel"), 2348 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2349 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2350 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2351 } 2352 2353 virtual void work(uint worker_id) { 2354 ResourceMark rm; 2355 HandleMark hm; 2356 CMTask* task = _cm->task(worker_id); 2357 G1CMIsAliveClosure g1_is_alive(_g1h); 2358 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2359 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2360 2361 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2362 } 2363 }; 2364 2365 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2366 assert(_workers != NULL, "Need parallel worker threads."); 2367 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2368 2369 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2370 2371 // We need to reset the concurrency level before each 2372 // proxy task execution, so that the termination protocol 2373 // and overflow handling in CMTask::do_marking_step() knows 2374 // how many workers to wait for. 2375 _cm->set_concurrency(_active_workers); 2376 _g1h->set_par_threads(_active_workers); 2377 _workers->run_task(&proc_task_proxy); 2378 _g1h->set_par_threads(0); 2379 } 2380 2381 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2382 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2383 EnqueueTask& _enq_task; 2384 2385 public: 2386 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2387 AbstractGangTask("Enqueue reference objects in parallel"), 2388 _enq_task(enq_task) { } 2389 2390 virtual void work(uint worker_id) { 2391 _enq_task.work(worker_id); 2392 } 2393 }; 2394 2395 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2396 assert(_workers != NULL, "Need parallel worker threads."); 2397 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2398 2399 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2400 2401 // Not strictly necessary but... 2402 // 2403 // We need to reset the concurrency level before each 2404 // proxy task execution, so that the termination protocol 2405 // and overflow handling in CMTask::do_marking_step() knows 2406 // how many workers to wait for. 2407 _cm->set_concurrency(_active_workers); 2408 _g1h->set_par_threads(_active_workers); 2409 _workers->run_task(&enq_task_proxy); 2410 _g1h->set_par_threads(0); 2411 } 2412 2413 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2414 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2415 } 2416 2417 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2418 if (has_overflown()) { 2419 // Skip processing the discovered references if we have 2420 // overflown the global marking stack. Reference objects 2421 // only get discovered once so it is OK to not 2422 // de-populate the discovered reference lists. We could have, 2423 // but the only benefit would be that, when marking restarts, 2424 // less reference objects are discovered. 2425 return; 2426 } 2427 2428 ResourceMark rm; 2429 HandleMark hm; 2430 2431 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2432 2433 // Is alive closure. 2434 G1CMIsAliveClosure g1_is_alive(g1h); 2435 2436 // Inner scope to exclude the cleaning of the string and symbol 2437 // tables from the displayed time. 2438 { 2439 G1CMTraceTime t("GC ref-proc", G1Log::finer()); 2440 2441 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2442 2443 // See the comment in G1CollectedHeap::ref_processing_init() 2444 // about how reference processing currently works in G1. 2445 2446 // Set the soft reference policy 2447 rp->setup_policy(clear_all_soft_refs); 2448 assert(_markStack.isEmpty(), "mark stack should be empty"); 2449 2450 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2451 // in serial reference processing. Note these closures are also 2452 // used for serially processing (by the the current thread) the 2453 // JNI references during parallel reference processing. 2454 // 2455 // These closures do not need to synchronize with the worker 2456 // threads involved in parallel reference processing as these 2457 // instances are executed serially by the current thread (e.g. 2458 // reference processing is not multi-threaded and is thus 2459 // performed by the current thread instead of a gang worker). 2460 // 2461 // The gang tasks involved in parallel reference processing create 2462 // their own instances of these closures, which do their own 2463 // synchronization among themselves. 2464 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2465 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2466 2467 // We need at least one active thread. If reference processing 2468 // is not multi-threaded we use the current (VMThread) thread, 2469 // otherwise we use the work gang from the G1CollectedHeap and 2470 // we utilize all the worker threads we can. 2471 bool processing_is_mt = rp->processing_is_mt(); 2472 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2473 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2474 2475 // Parallel processing task executor. 2476 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2477 g1h->workers(), active_workers); 2478 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2479 2480 // Set the concurrency level. The phase was already set prior to 2481 // executing the remark task. 2482 set_concurrency(active_workers); 2483 2484 // Set the degree of MT processing here. If the discovery was done MT, 2485 // the number of threads involved during discovery could differ from 2486 // the number of active workers. This is OK as long as the discovered 2487 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2488 rp->set_active_mt_degree(active_workers); 2489 2490 // Process the weak references. 2491 const ReferenceProcessorStats& stats = 2492 rp->process_discovered_references(&g1_is_alive, 2493 &g1_keep_alive, 2494 &g1_drain_mark_stack, 2495 executor, 2496 g1h->gc_timer_cm(), 2497 concurrent_gc_id()); 2498 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2499 2500 // The do_oop work routines of the keep_alive and drain_marking_stack 2501 // oop closures will set the has_overflown flag if we overflow the 2502 // global marking stack. 2503 2504 assert(_markStack.overflow() || _markStack.isEmpty(), 2505 "mark stack should be empty (unless it overflowed)"); 2506 2507 if (_markStack.overflow()) { 2508 // This should have been done already when we tried to push an 2509 // entry on to the global mark stack. But let's do it again. 2510 set_has_overflown(); 2511 } 2512 2513 assert(rp->num_q() == active_workers, "why not"); 2514 2515 rp->enqueue_discovered_references(executor); 2516 2517 rp->verify_no_references_recorded(); 2518 assert(!rp->discovery_enabled(), "Post condition"); 2519 } 2520 2521 if (has_overflown()) { 2522 // We can not trust g1_is_alive if the marking stack overflowed 2523 return; 2524 } 2525 2526 assert(_markStack.isEmpty(), "Marking should have completed"); 2527 2528 // Unload Klasses, String, Symbols, Code Cache, etc. 2529 { 2530 G1CMTraceTime trace("Unloading", G1Log::finer()); 2531 2532 if (ClassUnloadingWithConcurrentMark) { 2533 // Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack 2534 // part is too slow to be done serially, so it is handled during the weakRefsWorkParallelPart phase. 2535 // Defer the cleaning until we have complete on_stack data. 2536 MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */); 2537 2538 bool purged_classes; 2539 2540 { 2541 G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest()); 2542 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2543 } 2544 2545 { 2546 G1CMTraceTime trace("Parallel Unloading", G1Log::finest()); 2547 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2548 } 2549 2550 { 2551 G1CMTraceTime trace("Deallocate Metadata", G1Log::finest()); 2552 ClassLoaderDataGraph::free_deallocate_lists(); 2553 } 2554 } 2555 2556 if (G1StringDedup::is_enabled()) { 2557 G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest()); 2558 G1StringDedup::unlink(&g1_is_alive); 2559 } 2560 } 2561 } 2562 2563 void ConcurrentMark::swapMarkBitMaps() { 2564 CMBitMapRO* temp = _prevMarkBitMap; 2565 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2566 _nextMarkBitMap = (CMBitMap*) temp; 2567 } 2568 2569 class CMObjectClosure; 2570 2571 // Closure for iterating over objects, currently only used for 2572 // processing SATB buffers. 2573 class CMObjectClosure : public ObjectClosure { 2574 private: 2575 CMTask* _task; 2576 2577 public: 2578 void do_object(oop obj) { 2579 _task->deal_with_reference(obj); 2580 } 2581 2582 CMObjectClosure(CMTask* task) : _task(task) { } 2583 }; 2584 2585 class G1RemarkThreadsClosure : public ThreadClosure { 2586 CMObjectClosure _cm_obj; 2587 G1CMOopClosure _cm_cl; 2588 MarkingCodeBlobClosure _code_cl; 2589 int _thread_parity; 2590 2591 public: 2592 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) : 2593 _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2594 _thread_parity(SharedHeap::heap()->strong_roots_parity()) {} 2595 2596 void do_thread(Thread* thread) { 2597 if (thread->is_Java_thread()) { 2598 if (thread->claim_oops_do(true, _thread_parity)) { 2599 JavaThread* jt = (JavaThread*)thread; 2600 2601 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2602 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2603 // * Alive if on the stack of an executing method 2604 // * Weakly reachable otherwise 2605 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2606 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2607 jt->nmethods_do(&_code_cl); 2608 2609 jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj); 2610 } 2611 } else if (thread->is_VM_thread()) { 2612 if (thread->claim_oops_do(true, _thread_parity)) { 2613 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj); 2614 } 2615 } 2616 } 2617 }; 2618 2619 class CMRemarkTask: public AbstractGangTask { 2620 private: 2621 ConcurrentMark* _cm; 2622 public: 2623 void work(uint worker_id) { 2624 // Since all available tasks are actually started, we should 2625 // only proceed if we're supposed to be active. 2626 if (worker_id < _cm->active_tasks()) { 2627 CMTask* task = _cm->task(worker_id); 2628 task->record_start_time(); 2629 { 2630 ResourceMark rm; 2631 HandleMark hm; 2632 2633 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2634 Threads::threads_do(&threads_f); 2635 } 2636 2637 do { 2638 task->do_marking_step(1000000000.0 /* something very large */, 2639 true /* do_termination */, 2640 false /* is_serial */); 2641 } while (task->has_aborted() && !_cm->has_overflown()); 2642 // If we overflow, then we do not want to restart. We instead 2643 // want to abort remark and do concurrent marking again. 2644 task->record_end_time(); 2645 } 2646 } 2647 2648 CMRemarkTask(ConcurrentMark* cm, int active_workers) : 2649 AbstractGangTask("Par Remark"), _cm(cm) { 2650 _cm->terminator()->reset_for_reuse(active_workers); 2651 } 2652 }; 2653 2654 void ConcurrentMark::checkpointRootsFinalWork() { 2655 ResourceMark rm; 2656 HandleMark hm; 2657 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2658 2659 G1CMTraceTime trace("Finalize Marking", G1Log::finer()); 2660 2661 g1h->ensure_parsability(false); 2662 2663 G1CollectedHeap::StrongRootsScope srs(g1h); 2664 // this is remark, so we'll use up all active threads 2665 uint active_workers = g1h->workers()->active_workers(); 2666 if (active_workers == 0) { 2667 assert(active_workers > 0, "Should have been set earlier"); 2668 active_workers = (uint) ParallelGCThreads; 2669 g1h->workers()->set_active_workers(active_workers); 2670 } 2671 set_concurrency_and_phase(active_workers, false /* concurrent */); 2672 // Leave _parallel_marking_threads at it's 2673 // value originally calculated in the ConcurrentMark 2674 // constructor and pass values of the active workers 2675 // through the gang in the task. 2676 2677 CMRemarkTask remarkTask(this, active_workers); 2678 // We will start all available threads, even if we decide that the 2679 // active_workers will be fewer. The extra ones will just bail out 2680 // immediately. 2681 g1h->set_par_threads(active_workers); 2682 g1h->workers()->run_task(&remarkTask); 2683 g1h->set_par_threads(0); 2684 2685 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2686 guarantee(has_overflown() || 2687 satb_mq_set.completed_buffers_num() == 0, 2688 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2689 BOOL_TO_STR(has_overflown()), 2690 satb_mq_set.completed_buffers_num())); 2691 2692 print_stats(); 2693 } 2694 2695 #ifndef PRODUCT 2696 2697 class PrintReachableOopClosure: public OopClosure { 2698 private: 2699 G1CollectedHeap* _g1h; 2700 outputStream* _out; 2701 VerifyOption _vo; 2702 bool _all; 2703 2704 public: 2705 PrintReachableOopClosure(outputStream* out, 2706 VerifyOption vo, 2707 bool all) : 2708 _g1h(G1CollectedHeap::heap()), 2709 _out(out), _vo(vo), _all(all) { } 2710 2711 void do_oop(narrowOop* p) { do_oop_work(p); } 2712 void do_oop( oop* p) { do_oop_work(p); } 2713 2714 template <class T> void do_oop_work(T* p) { 2715 oop obj = oopDesc::load_decode_heap_oop(p); 2716 const char* str = NULL; 2717 const char* str2 = ""; 2718 2719 if (obj == NULL) { 2720 str = ""; 2721 } else if (!_g1h->is_in_g1_reserved(obj)) { 2722 str = " O"; 2723 } else { 2724 HeapRegion* hr = _g1h->heap_region_containing(obj); 2725 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); 2726 bool marked = _g1h->is_marked(obj, _vo); 2727 2728 if (over_tams) { 2729 str = " >"; 2730 if (marked) { 2731 str2 = " AND MARKED"; 2732 } 2733 } else if (marked) { 2734 str = " M"; 2735 } else { 2736 str = " NOT"; 2737 } 2738 } 2739 2740 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", 2741 p2i(p), p2i((void*) obj), str, str2); 2742 } 2743 }; 2744 2745 class PrintReachableObjectClosure : public ObjectClosure { 2746 private: 2747 G1CollectedHeap* _g1h; 2748 outputStream* _out; 2749 VerifyOption _vo; 2750 bool _all; 2751 HeapRegion* _hr; 2752 2753 public: 2754 PrintReachableObjectClosure(outputStream* out, 2755 VerifyOption vo, 2756 bool all, 2757 HeapRegion* hr) : 2758 _g1h(G1CollectedHeap::heap()), 2759 _out(out), _vo(vo), _all(all), _hr(hr) { } 2760 2761 void do_object(oop o) { 2762 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo); 2763 bool marked = _g1h->is_marked(o, _vo); 2764 bool print_it = _all || over_tams || marked; 2765 2766 if (print_it) { 2767 _out->print_cr(" "PTR_FORMAT"%s", 2768 p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : ""); 2769 PrintReachableOopClosure oopCl(_out, _vo, _all); 2770 o->oop_iterate_no_header(&oopCl); 2771 } 2772 } 2773 }; 2774 2775 class PrintReachableRegionClosure : public HeapRegionClosure { 2776 private: 2777 G1CollectedHeap* _g1h; 2778 outputStream* _out; 2779 VerifyOption _vo; 2780 bool _all; 2781 2782 public: 2783 bool doHeapRegion(HeapRegion* hr) { 2784 HeapWord* b = hr->bottom(); 2785 HeapWord* e = hr->end(); 2786 HeapWord* t = hr->top(); 2787 HeapWord* p = _g1h->top_at_mark_start(hr, _vo); 2788 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " 2789 "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p)); 2790 _out->cr(); 2791 2792 HeapWord* from = b; 2793 HeapWord* to = t; 2794 2795 if (to > from) { 2796 _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to)); 2797 _out->cr(); 2798 PrintReachableObjectClosure ocl(_out, _vo, _all, hr); 2799 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); 2800 _out->cr(); 2801 } 2802 2803 return false; 2804 } 2805 2806 PrintReachableRegionClosure(outputStream* out, 2807 VerifyOption vo, 2808 bool all) : 2809 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { } 2810 }; 2811 2812 void ConcurrentMark::print_reachable(const char* str, 2813 VerifyOption vo, 2814 bool all) { 2815 gclog_or_tty->cr(); 2816 gclog_or_tty->print_cr("== Doing heap dump... "); 2817 2818 if (G1PrintReachableBaseFile == NULL) { 2819 gclog_or_tty->print_cr(" #### error: no base file defined"); 2820 return; 2821 } 2822 2823 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) > 2824 (JVM_MAXPATHLEN - 1)) { 2825 gclog_or_tty->print_cr(" #### error: file name too long"); 2826 return; 2827 } 2828 2829 char file_name[JVM_MAXPATHLEN]; 2830 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str); 2831 gclog_or_tty->print_cr(" dumping to file %s", file_name); 2832 2833 fileStream fout(file_name); 2834 if (!fout.is_open()) { 2835 gclog_or_tty->print_cr(" #### error: could not open file"); 2836 return; 2837 } 2838 2839 outputStream* out = &fout; 2840 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo)); 2841 out->cr(); 2842 2843 out->print_cr("--- ITERATING OVER REGIONS"); 2844 out->cr(); 2845 PrintReachableRegionClosure rcl(out, vo, all); 2846 _g1h->heap_region_iterate(&rcl); 2847 out->cr(); 2848 2849 gclog_or_tty->print_cr(" done"); 2850 gclog_or_tty->flush(); 2851 } 2852 2853 #endif // PRODUCT 2854 2855 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2856 // Note we are overriding the read-only view of the prev map here, via 2857 // the cast. 2858 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2859 } 2860 2861 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2862 _nextMarkBitMap->clearRange(mr); 2863 } 2864 2865 HeapRegion* 2866 ConcurrentMark::claim_region(uint worker_id) { 2867 // "checkpoint" the finger 2868 HeapWord* finger = _finger; 2869 2870 // _heap_end will not change underneath our feet; it only changes at 2871 // yield points. 2872 while (finger < _heap_end) { 2873 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2874 2875 // Note on how this code handles humongous regions. In the 2876 // normal case the finger will reach the start of a "starts 2877 // humongous" (SH) region. Its end will either be the end of the 2878 // last "continues humongous" (CH) region in the sequence, or the 2879 // standard end of the SH region (if the SH is the only region in 2880 // the sequence). That way claim_region() will skip over the CH 2881 // regions. However, there is a subtle race between a CM thread 2882 // executing this method and a mutator thread doing a humongous 2883 // object allocation. The two are not mutually exclusive as the CM 2884 // thread does not need to hold the Heap_lock when it gets 2885 // here. So there is a chance that claim_region() will come across 2886 // a free region that's in the progress of becoming a SH or a CH 2887 // region. In the former case, it will either 2888 // a) Miss the update to the region's end, in which case it will 2889 // visit every subsequent CH region, will find their bitmaps 2890 // empty, and do nothing, or 2891 // b) Will observe the update of the region's end (in which case 2892 // it will skip the subsequent CH regions). 2893 // If it comes across a region that suddenly becomes CH, the 2894 // scenario will be similar to b). So, the race between 2895 // claim_region() and a humongous object allocation might force us 2896 // to do a bit of unnecessary work (due to some unnecessary bitmap 2897 // iterations) but it should not introduce and correctness issues. 2898 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2899 2900 // Above heap_region_containing_raw may return NULL as we always scan claim 2901 // until the end of the heap. In this case, just jump to the next region. 2902 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2903 2904 // Is the gap between reading the finger and doing the CAS too long? 2905 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2906 if (res == finger && curr_region != NULL) { 2907 // we succeeded 2908 HeapWord* bottom = curr_region->bottom(); 2909 HeapWord* limit = curr_region->next_top_at_mark_start(); 2910 2911 if (verbose_low()) { 2912 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2913 "["PTR_FORMAT", "PTR_FORMAT"), " 2914 "limit = "PTR_FORMAT, 2915 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2916 } 2917 2918 // notice that _finger == end cannot be guaranteed here since, 2919 // someone else might have moved the finger even further 2920 assert(_finger >= end, "the finger should have moved forward"); 2921 2922 if (verbose_low()) { 2923 gclog_or_tty->print_cr("[%u] we were successful with region = " 2924 PTR_FORMAT, worker_id, p2i(curr_region)); 2925 } 2926 2927 if (limit > bottom) { 2928 if (verbose_low()) { 2929 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 2930 "returning it ", worker_id, p2i(curr_region)); 2931 } 2932 return curr_region; 2933 } else { 2934 assert(limit == bottom, 2935 "the region limit should be at bottom"); 2936 if (verbose_low()) { 2937 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 2938 "returning NULL", worker_id, p2i(curr_region)); 2939 } 2940 // we return NULL and the caller should try calling 2941 // claim_region() again. 2942 return NULL; 2943 } 2944 } else { 2945 assert(_finger > finger, "the finger should have moved forward"); 2946 if (verbose_low()) { 2947 if (curr_region == NULL) { 2948 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " 2949 "global finger = "PTR_FORMAT", " 2950 "our finger = "PTR_FORMAT, 2951 worker_id, p2i(_finger), p2i(finger)); 2952 } else { 2953 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2954 "global finger = "PTR_FORMAT", " 2955 "our finger = "PTR_FORMAT, 2956 worker_id, p2i(_finger), p2i(finger)); 2957 } 2958 } 2959 2960 // read it again 2961 finger = _finger; 2962 } 2963 } 2964 2965 return NULL; 2966 } 2967 2968 #ifndef PRODUCT 2969 enum VerifyNoCSetOopsPhase { 2970 VerifyNoCSetOopsStack, 2971 VerifyNoCSetOopsQueues, 2972 VerifyNoCSetOopsSATBCompleted, 2973 VerifyNoCSetOopsSATBThread 2974 }; 2975 2976 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 2977 private: 2978 G1CollectedHeap* _g1h; 2979 VerifyNoCSetOopsPhase _phase; 2980 int _info; 2981 2982 const char* phase_str() { 2983 switch (_phase) { 2984 case VerifyNoCSetOopsStack: return "Stack"; 2985 case VerifyNoCSetOopsQueues: return "Queue"; 2986 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers"; 2987 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers"; 2988 default: ShouldNotReachHere(); 2989 } 2990 return NULL; 2991 } 2992 2993 void do_object_work(oop obj) { 2994 guarantee(!_g1h->obj_in_cs(obj), 2995 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 2996 p2i((void*) obj), phase_str(), _info)); 2997 } 2998 2999 public: 3000 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 3001 3002 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 3003 _phase = phase; 3004 _info = info; 3005 } 3006 3007 virtual void do_oop(oop* p) { 3008 oop obj = oopDesc::load_decode_heap_oop(p); 3009 do_object_work(obj); 3010 } 3011 3012 virtual void do_oop(narrowOop* p) { 3013 // We should not come across narrow oops while scanning marking 3014 // stacks and SATB buffers. 3015 ShouldNotReachHere(); 3016 } 3017 3018 virtual void do_object(oop obj) { 3019 do_object_work(obj); 3020 } 3021 }; 3022 3023 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, 3024 bool verify_enqueued_buffers, 3025 bool verify_thread_buffers, 3026 bool verify_fingers) { 3027 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 3028 if (!G1CollectedHeap::heap()->mark_in_progress()) { 3029 return; 3030 } 3031 3032 VerifyNoCSetOopsClosure cl; 3033 3034 if (verify_stacks) { 3035 // Verify entries on the global mark stack 3036 cl.set_phase(VerifyNoCSetOopsStack); 3037 _markStack.oops_do(&cl); 3038 3039 // Verify entries on the task queues 3040 for (uint i = 0; i < _max_worker_id; i += 1) { 3041 cl.set_phase(VerifyNoCSetOopsQueues, i); 3042 CMTaskQueue* queue = _task_queues->queue(i); 3043 queue->oops_do(&cl); 3044 } 3045 } 3046 3047 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 3048 3049 // Verify entries on the enqueued SATB buffers 3050 if (verify_enqueued_buffers) { 3051 cl.set_phase(VerifyNoCSetOopsSATBCompleted); 3052 satb_qs.iterate_completed_buffers_read_only(&cl); 3053 } 3054 3055 // Verify entries on the per-thread SATB buffers 3056 if (verify_thread_buffers) { 3057 cl.set_phase(VerifyNoCSetOopsSATBThread); 3058 satb_qs.iterate_thread_buffers_read_only(&cl); 3059 } 3060 3061 if (verify_fingers) { 3062 // Verify the global finger 3063 HeapWord* global_finger = finger(); 3064 if (global_finger != NULL && global_finger < _heap_end) { 3065 // The global finger always points to a heap region boundary. We 3066 // use heap_region_containing_raw() to get the containing region 3067 // given that the global finger could be pointing to a free region 3068 // which subsequently becomes continues humongous. If that 3069 // happens, heap_region_containing() will return the bottom of the 3070 // corresponding starts humongous region and the check below will 3071 // not hold any more. 3072 // Since we always iterate over all regions, we might get a NULL HeapRegion 3073 // here. 3074 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 3075 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 3076 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 3077 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 3078 } 3079 3080 // Verify the task fingers 3081 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 3082 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 3083 CMTask* task = _tasks[i]; 3084 HeapWord* task_finger = task->finger(); 3085 if (task_finger != NULL && task_finger < _heap_end) { 3086 // See above note on the global finger verification. 3087 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 3088 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 3089 !task_hr->in_collection_set(), 3090 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 3091 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 3092 } 3093 } 3094 } 3095 } 3096 #endif // PRODUCT 3097 3098 // Aggregate the counting data that was constructed concurrently 3099 // with marking. 3100 class AggregateCountDataHRClosure: public HeapRegionClosure { 3101 G1CollectedHeap* _g1h; 3102 ConcurrentMark* _cm; 3103 CardTableModRefBS* _ct_bs; 3104 BitMap* _cm_card_bm; 3105 uint _max_worker_id; 3106 3107 public: 3108 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 3109 BitMap* cm_card_bm, 3110 uint max_worker_id) : 3111 _g1h(g1h), _cm(g1h->concurrent_mark()), 3112 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 3113 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 3114 3115 bool doHeapRegion(HeapRegion* hr) { 3116 if (hr->is_continues_humongous()) { 3117 // We will ignore these here and process them when their 3118 // associated "starts humongous" region is processed. 3119 // Note that we cannot rely on their associated 3120 // "starts humongous" region to have their bit set to 1 3121 // since, due to the region chunking in the parallel region 3122 // iteration, a "continues humongous" region might be visited 3123 // before its associated "starts humongous". 3124 return false; 3125 } 3126 3127 HeapWord* start = hr->bottom(); 3128 HeapWord* limit = hr->next_top_at_mark_start(); 3129 HeapWord* end = hr->end(); 3130 3131 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 3132 err_msg("Preconditions not met - " 3133 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 3134 "top: "PTR_FORMAT", end: "PTR_FORMAT, 3135 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 3136 3137 assert(hr->next_marked_bytes() == 0, "Precondition"); 3138 3139 if (start == limit) { 3140 // NTAMS of this region has not been set so nothing to do. 3141 return false; 3142 } 3143 3144 // 'start' should be in the heap. 3145 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 3146 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 3147 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 3148 3149 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 3150 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 3151 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 3152 3153 // If ntams is not card aligned then we bump card bitmap index 3154 // for limit so that we get the all the cards spanned by 3155 // the object ending at ntams. 3156 // Note: if this is the last region in the heap then ntams 3157 // could be actually just beyond the end of the the heap; 3158 // limit_idx will then correspond to a (non-existent) card 3159 // that is also outside the heap. 3160 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 3161 limit_idx += 1; 3162 } 3163 3164 assert(limit_idx <= end_idx, "or else use atomics"); 3165 3166 // Aggregate the "stripe" in the count data associated with hr. 3167 uint hrm_index = hr->hrm_index(); 3168 size_t marked_bytes = 0; 3169 3170 for (uint i = 0; i < _max_worker_id; i += 1) { 3171 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 3172 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 3173 3174 // Fetch the marked_bytes in this region for task i and 3175 // add it to the running total for this region. 3176 marked_bytes += marked_bytes_array[hrm_index]; 3177 3178 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 3179 // into the global card bitmap. 3180 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 3181 3182 while (scan_idx < limit_idx) { 3183 assert(task_card_bm->at(scan_idx) == true, "should be"); 3184 _cm_card_bm->set_bit(scan_idx); 3185 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 3186 3187 // BitMap::get_next_one_offset() can handle the case when 3188 // its left_offset parameter is greater than its right_offset 3189 // parameter. It does, however, have an early exit if 3190 // left_offset == right_offset. So let's limit the value 3191 // passed in for left offset here. 3192 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 3193 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 3194 } 3195 } 3196 3197 // Update the marked bytes for this region. 3198 hr->add_to_marked_bytes(marked_bytes); 3199 3200 // Next heap region 3201 return false; 3202 } 3203 }; 3204 3205 class G1AggregateCountDataTask: public AbstractGangTask { 3206 protected: 3207 G1CollectedHeap* _g1h; 3208 ConcurrentMark* _cm; 3209 BitMap* _cm_card_bm; 3210 uint _max_worker_id; 3211 int _active_workers; 3212 HeapRegionClaimer _hrclaimer; 3213 3214 public: 3215 G1AggregateCountDataTask(G1CollectedHeap* g1h, 3216 ConcurrentMark* cm, 3217 BitMap* cm_card_bm, 3218 uint max_worker_id, 3219 int n_workers) : 3220 AbstractGangTask("Count Aggregation"), 3221 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 3222 _max_worker_id(max_worker_id), 3223 _active_workers(n_workers), 3224 _hrclaimer(_active_workers) { 3225 } 3226 3227 void work(uint worker_id) { 3228 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 3229 3230 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 3231 } 3232 }; 3233 3234 3235 void ConcurrentMark::aggregate_count_data() { 3236 int n_workers = _g1h->workers()->active_workers(); 3237 3238 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 3239 _max_worker_id, n_workers); 3240 3241 _g1h->set_par_threads(n_workers); 3242 _g1h->workers()->run_task(&g1_par_agg_task); 3243 _g1h->set_par_threads(0); 3244 } 3245 3246 // Clear the per-worker arrays used to store the per-region counting data 3247 void ConcurrentMark::clear_all_count_data() { 3248 // Clear the global card bitmap - it will be filled during 3249 // liveness count aggregation (during remark) and the 3250 // final counting task. 3251 _card_bm.clear(); 3252 3253 // Clear the global region bitmap - it will be filled as part 3254 // of the final counting task. 3255 _region_bm.clear(); 3256 3257 uint max_regions = _g1h->max_regions(); 3258 assert(_max_worker_id > 0, "uninitialized"); 3259 3260 for (uint i = 0; i < _max_worker_id; i += 1) { 3261 BitMap* task_card_bm = count_card_bitmap_for(i); 3262 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3263 3264 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3265 assert(marked_bytes_array != NULL, "uninitialized"); 3266 3267 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3268 task_card_bm->clear(); 3269 } 3270 } 3271 3272 void ConcurrentMark::print_stats() { 3273 if (verbose_stats()) { 3274 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3275 for (size_t i = 0; i < _active_tasks; ++i) { 3276 _tasks[i]->print_stats(); 3277 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3278 } 3279 } 3280 } 3281 3282 // abandon current marking iteration due to a Full GC 3283 void ConcurrentMark::abort() { 3284 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 3285 // concurrent bitmap clearing. 3286 _nextMarkBitMap->clearAll(); 3287 3288 // Note we cannot clear the previous marking bitmap here 3289 // since VerifyDuringGC verifies the objects marked during 3290 // a full GC against the previous bitmap. 3291 3292 // Clear the liveness counting data 3293 clear_all_count_data(); 3294 // Empty mark stack 3295 reset_marking_state(); 3296 for (uint i = 0; i < _max_worker_id; ++i) { 3297 _tasks[i]->clear_region_fields(); 3298 } 3299 _first_overflow_barrier_sync.abort(); 3300 _second_overflow_barrier_sync.abort(); 3301 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); 3302 if (!gc_id.is_undefined()) { 3303 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance 3304 // to detect that it was aborted. Only keep track of the first GC id that we aborted. 3305 _aborted_gc_id = gc_id; 3306 } 3307 _has_aborted = true; 3308 3309 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3310 satb_mq_set.abandon_partial_marking(); 3311 // This can be called either during or outside marking, we'll read 3312 // the expected_active value from the SATB queue set. 3313 satb_mq_set.set_active_all_threads( 3314 false, /* new active value */ 3315 satb_mq_set.is_active() /* expected_active */); 3316 3317 _g1h->trace_heap_after_concurrent_cycle(); 3318 _g1h->register_concurrent_cycle_end(); 3319 } 3320 3321 const GCId& ConcurrentMark::concurrent_gc_id() { 3322 if (has_aborted()) { 3323 return _aborted_gc_id; 3324 } 3325 return _g1h->gc_tracer_cm()->gc_id(); 3326 } 3327 3328 static void print_ms_time_info(const char* prefix, const char* name, 3329 NumberSeq& ns) { 3330 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3331 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3332 if (ns.num() > 0) { 3333 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3334 prefix, ns.sd(), ns.maximum()); 3335 } 3336 } 3337 3338 void ConcurrentMark::print_summary_info() { 3339 gclog_or_tty->print_cr(" Concurrent marking:"); 3340 print_ms_time_info(" ", "init marks", _init_times); 3341 print_ms_time_info(" ", "remarks", _remark_times); 3342 { 3343 print_ms_time_info(" ", "final marks", _remark_mark_times); 3344 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3345 3346 } 3347 print_ms_time_info(" ", "cleanups", _cleanup_times); 3348 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3349 _total_counting_time, 3350 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3351 (double)_cleanup_times.num() 3352 : 0.0)); 3353 if (G1ScrubRemSets) { 3354 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3355 _total_rs_scrub_time, 3356 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3357 (double)_cleanup_times.num() 3358 : 0.0)); 3359 } 3360 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3361 (_init_times.sum() + _remark_times.sum() + 3362 _cleanup_times.sum())/1000.0); 3363 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3364 "(%8.2f s marking).", 3365 cmThread()->vtime_accum(), 3366 cmThread()->vtime_mark_accum()); 3367 } 3368 3369 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3370 _parallel_workers->print_worker_threads_on(st); 3371 } 3372 3373 void ConcurrentMark::print_on_error(outputStream* st) const { 3374 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3375 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3376 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3377 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3378 } 3379 3380 // We take a break if someone is trying to stop the world. 3381 bool ConcurrentMark::do_yield_check(uint worker_id) { 3382 if (SuspendibleThreadSet::should_yield()) { 3383 if (worker_id == 0) { 3384 _g1h->g1_policy()->record_concurrent_pause(); 3385 } 3386 SuspendibleThreadSet::yield(); 3387 return true; 3388 } else { 3389 return false; 3390 } 3391 } 3392 3393 #ifndef PRODUCT 3394 // for debugging purposes 3395 void ConcurrentMark::print_finger() { 3396 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3397 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3398 for (uint i = 0; i < _max_worker_id; ++i) { 3399 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3400 } 3401 gclog_or_tty->cr(); 3402 } 3403 #endif 3404 3405 void CMTask::scan_object(oop obj) { 3406 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3407 3408 if (_cm->verbose_high()) { 3409 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT, 3410 _worker_id, p2i((void*) obj)); 3411 } 3412 3413 size_t obj_size = obj->size(); 3414 _words_scanned += obj_size; 3415 3416 obj->oop_iterate(_cm_oop_closure); 3417 statsOnly( ++_objs_scanned ); 3418 check_limits(); 3419 } 3420 3421 // Closure for iteration over bitmaps 3422 class CMBitMapClosure : public BitMapClosure { 3423 private: 3424 // the bitmap that is being iterated over 3425 CMBitMap* _nextMarkBitMap; 3426 ConcurrentMark* _cm; 3427 CMTask* _task; 3428 3429 public: 3430 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3431 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3432 3433 bool do_bit(size_t offset) { 3434 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3435 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3436 assert( addr < _cm->finger(), "invariant"); 3437 3438 statsOnly( _task->increase_objs_found_on_bitmap() ); 3439 assert(addr >= _task->finger(), "invariant"); 3440 3441 // We move that task's local finger along. 3442 _task->move_finger_to(addr); 3443 3444 _task->scan_object(oop(addr)); 3445 // we only partially drain the local queue and global stack 3446 _task->drain_local_queue(true); 3447 _task->drain_global_stack(true); 3448 3449 // if the has_aborted flag has been raised, we need to bail out of 3450 // the iteration 3451 return !_task->has_aborted(); 3452 } 3453 }; 3454 3455 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3456 ConcurrentMark* cm, 3457 CMTask* task) 3458 : _g1h(g1h), _cm(cm), _task(task) { 3459 assert(_ref_processor == NULL, "should be initialized to NULL"); 3460 3461 if (G1UseConcMarkReferenceProcessing) { 3462 _ref_processor = g1h->ref_processor_cm(); 3463 assert(_ref_processor != NULL, "should not be NULL"); 3464 } 3465 } 3466 3467 void CMTask::setup_for_region(HeapRegion* hr) { 3468 assert(hr != NULL, 3469 "claim_region() should have filtered out NULL regions"); 3470 assert(!hr->is_continues_humongous(), 3471 "claim_region() should have filtered out continues humongous regions"); 3472 3473 if (_cm->verbose_low()) { 3474 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3475 _worker_id, p2i(hr)); 3476 } 3477 3478 _curr_region = hr; 3479 _finger = hr->bottom(); 3480 update_region_limit(); 3481 } 3482 3483 void CMTask::update_region_limit() { 3484 HeapRegion* hr = _curr_region; 3485 HeapWord* bottom = hr->bottom(); 3486 HeapWord* limit = hr->next_top_at_mark_start(); 3487 3488 if (limit == bottom) { 3489 if (_cm->verbose_low()) { 3490 gclog_or_tty->print_cr("[%u] found an empty region " 3491 "["PTR_FORMAT", "PTR_FORMAT")", 3492 _worker_id, p2i(bottom), p2i(limit)); 3493 } 3494 // The region was collected underneath our feet. 3495 // We set the finger to bottom to ensure that the bitmap 3496 // iteration that will follow this will not do anything. 3497 // (this is not a condition that holds when we set the region up, 3498 // as the region is not supposed to be empty in the first place) 3499 _finger = bottom; 3500 } else if (limit >= _region_limit) { 3501 assert(limit >= _finger, "peace of mind"); 3502 } else { 3503 assert(limit < _region_limit, "only way to get here"); 3504 // This can happen under some pretty unusual circumstances. An 3505 // evacuation pause empties the region underneath our feet (NTAMS 3506 // at bottom). We then do some allocation in the region (NTAMS 3507 // stays at bottom), followed by the region being used as a GC 3508 // alloc region (NTAMS will move to top() and the objects 3509 // originally below it will be grayed). All objects now marked in 3510 // the region are explicitly grayed, if below the global finger, 3511 // and we do not need in fact to scan anything else. So, we simply 3512 // set _finger to be limit to ensure that the bitmap iteration 3513 // doesn't do anything. 3514 _finger = limit; 3515 } 3516 3517 _region_limit = limit; 3518 } 3519 3520 void CMTask::giveup_current_region() { 3521 assert(_curr_region != NULL, "invariant"); 3522 if (_cm->verbose_low()) { 3523 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3524 _worker_id, p2i(_curr_region)); 3525 } 3526 clear_region_fields(); 3527 } 3528 3529 void CMTask::clear_region_fields() { 3530 // Values for these three fields that indicate that we're not 3531 // holding on to a region. 3532 _curr_region = NULL; 3533 _finger = NULL; 3534 _region_limit = NULL; 3535 } 3536 3537 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3538 if (cm_oop_closure == NULL) { 3539 assert(_cm_oop_closure != NULL, "invariant"); 3540 } else { 3541 assert(_cm_oop_closure == NULL, "invariant"); 3542 } 3543 _cm_oop_closure = cm_oop_closure; 3544 } 3545 3546 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3547 guarantee(nextMarkBitMap != NULL, "invariant"); 3548 3549 if (_cm->verbose_low()) { 3550 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3551 } 3552 3553 _nextMarkBitMap = nextMarkBitMap; 3554 clear_region_fields(); 3555 3556 _calls = 0; 3557 _elapsed_time_ms = 0.0; 3558 _termination_time_ms = 0.0; 3559 _termination_start_time_ms = 0.0; 3560 3561 #if _MARKING_STATS_ 3562 _local_pushes = 0; 3563 _local_pops = 0; 3564 _local_max_size = 0; 3565 _objs_scanned = 0; 3566 _global_pushes = 0; 3567 _global_pops = 0; 3568 _global_max_size = 0; 3569 _global_transfers_to = 0; 3570 _global_transfers_from = 0; 3571 _regions_claimed = 0; 3572 _objs_found_on_bitmap = 0; 3573 _satb_buffers_processed = 0; 3574 _steal_attempts = 0; 3575 _steals = 0; 3576 _aborted = 0; 3577 _aborted_overflow = 0; 3578 _aborted_cm_aborted = 0; 3579 _aborted_yield = 0; 3580 _aborted_timed_out = 0; 3581 _aborted_satb = 0; 3582 _aborted_termination = 0; 3583 #endif // _MARKING_STATS_ 3584 } 3585 3586 bool CMTask::should_exit_termination() { 3587 regular_clock_call(); 3588 // This is called when we are in the termination protocol. We should 3589 // quit if, for some reason, this task wants to abort or the global 3590 // stack is not empty (this means that we can get work from it). 3591 return !_cm->mark_stack_empty() || has_aborted(); 3592 } 3593 3594 void CMTask::reached_limit() { 3595 assert(_words_scanned >= _words_scanned_limit || 3596 _refs_reached >= _refs_reached_limit , 3597 "shouldn't have been called otherwise"); 3598 regular_clock_call(); 3599 } 3600 3601 void CMTask::regular_clock_call() { 3602 if (has_aborted()) return; 3603 3604 // First, we need to recalculate the words scanned and refs reached 3605 // limits for the next clock call. 3606 recalculate_limits(); 3607 3608 // During the regular clock call we do the following 3609 3610 // (1) If an overflow has been flagged, then we abort. 3611 if (_cm->has_overflown()) { 3612 set_has_aborted(); 3613 return; 3614 } 3615 3616 // If we are not concurrent (i.e. we're doing remark) we don't need 3617 // to check anything else. The other steps are only needed during 3618 // the concurrent marking phase. 3619 if (!concurrent()) return; 3620 3621 // (2) If marking has been aborted for Full GC, then we also abort. 3622 if (_cm->has_aborted()) { 3623 set_has_aborted(); 3624 statsOnly( ++_aborted_cm_aborted ); 3625 return; 3626 } 3627 3628 double curr_time_ms = os::elapsedVTime() * 1000.0; 3629 3630 // (3) If marking stats are enabled, then we update the step history. 3631 #if _MARKING_STATS_ 3632 if (_words_scanned >= _words_scanned_limit) { 3633 ++_clock_due_to_scanning; 3634 } 3635 if (_refs_reached >= _refs_reached_limit) { 3636 ++_clock_due_to_marking; 3637 } 3638 3639 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3640 _interval_start_time_ms = curr_time_ms; 3641 _all_clock_intervals_ms.add(last_interval_ms); 3642 3643 if (_cm->verbose_medium()) { 3644 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3645 "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s", 3646 _worker_id, last_interval_ms, 3647 _words_scanned, 3648 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3649 _refs_reached, 3650 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3651 } 3652 #endif // _MARKING_STATS_ 3653 3654 // (4) We check whether we should yield. If we have to, then we abort. 3655 if (SuspendibleThreadSet::should_yield()) { 3656 // We should yield. To do this we abort the task. The caller is 3657 // responsible for yielding. 3658 set_has_aborted(); 3659 statsOnly( ++_aborted_yield ); 3660 return; 3661 } 3662 3663 // (5) We check whether we've reached our time quota. If we have, 3664 // then we abort. 3665 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3666 if (elapsed_time_ms > _time_target_ms) { 3667 set_has_aborted(); 3668 _has_timed_out = true; 3669 statsOnly( ++_aborted_timed_out ); 3670 return; 3671 } 3672 3673 // (6) Finally, we check whether there are enough completed STAB 3674 // buffers available for processing. If there are, we abort. 3675 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3676 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3677 if (_cm->verbose_low()) { 3678 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3679 _worker_id); 3680 } 3681 // we do need to process SATB buffers, we'll abort and restart 3682 // the marking task to do so 3683 set_has_aborted(); 3684 statsOnly( ++_aborted_satb ); 3685 return; 3686 } 3687 } 3688 3689 void CMTask::recalculate_limits() { 3690 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3691 _words_scanned_limit = _real_words_scanned_limit; 3692 3693 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3694 _refs_reached_limit = _real_refs_reached_limit; 3695 } 3696 3697 void CMTask::decrease_limits() { 3698 // This is called when we believe that we're going to do an infrequent 3699 // operation which will increase the per byte scanned cost (i.e. move 3700 // entries to/from the global stack). It basically tries to decrease the 3701 // scanning limit so that the clock is called earlier. 3702 3703 if (_cm->verbose_medium()) { 3704 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3705 } 3706 3707 _words_scanned_limit = _real_words_scanned_limit - 3708 3 * words_scanned_period / 4; 3709 _refs_reached_limit = _real_refs_reached_limit - 3710 3 * refs_reached_period / 4; 3711 } 3712 3713 void CMTask::move_entries_to_global_stack() { 3714 // local array where we'll store the entries that will be popped 3715 // from the local queue 3716 oop buffer[global_stack_transfer_size]; 3717 3718 int n = 0; 3719 oop obj; 3720 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3721 buffer[n] = obj; 3722 ++n; 3723 } 3724 3725 if (n > 0) { 3726 // we popped at least one entry from the local queue 3727 3728 statsOnly( ++_global_transfers_to; _local_pops += n ); 3729 3730 if (!_cm->mark_stack_push(buffer, n)) { 3731 if (_cm->verbose_low()) { 3732 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3733 _worker_id); 3734 } 3735 set_has_aborted(); 3736 } else { 3737 // the transfer was successful 3738 3739 if (_cm->verbose_medium()) { 3740 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3741 _worker_id, n); 3742 } 3743 statsOnly( int tmp_size = _cm->mark_stack_size(); 3744 if (tmp_size > _global_max_size) { 3745 _global_max_size = tmp_size; 3746 } 3747 _global_pushes += n ); 3748 } 3749 } 3750 3751 // this operation was quite expensive, so decrease the limits 3752 decrease_limits(); 3753 } 3754 3755 void CMTask::get_entries_from_global_stack() { 3756 // local array where we'll store the entries that will be popped 3757 // from the global stack. 3758 oop buffer[global_stack_transfer_size]; 3759 int n; 3760 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3761 assert(n <= global_stack_transfer_size, 3762 "we should not pop more than the given limit"); 3763 if (n > 0) { 3764 // yes, we did actually pop at least one entry 3765 3766 statsOnly( ++_global_transfers_from; _global_pops += n ); 3767 if (_cm->verbose_medium()) { 3768 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3769 _worker_id, n); 3770 } 3771 for (int i = 0; i < n; ++i) { 3772 bool success = _task_queue->push(buffer[i]); 3773 // We only call this when the local queue is empty or under a 3774 // given target limit. So, we do not expect this push to fail. 3775 assert(success, "invariant"); 3776 } 3777 3778 statsOnly( int tmp_size = _task_queue->size(); 3779 if (tmp_size > _local_max_size) { 3780 _local_max_size = tmp_size; 3781 } 3782 _local_pushes += n ); 3783 } 3784 3785 // this operation was quite expensive, so decrease the limits 3786 decrease_limits(); 3787 } 3788 3789 void CMTask::drain_local_queue(bool partially) { 3790 if (has_aborted()) return; 3791 3792 // Decide what the target size is, depending whether we're going to 3793 // drain it partially (so that other tasks can steal if they run out 3794 // of things to do) or totally (at the very end). 3795 size_t target_size; 3796 if (partially) { 3797 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3798 } else { 3799 target_size = 0; 3800 } 3801 3802 if (_task_queue->size() > target_size) { 3803 if (_cm->verbose_high()) { 3804 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3805 _worker_id, target_size); 3806 } 3807 3808 oop obj; 3809 bool ret = _task_queue->pop_local(obj); 3810 while (ret) { 3811 statsOnly( ++_local_pops ); 3812 3813 if (_cm->verbose_high()) { 3814 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3815 p2i((void*) obj)); 3816 } 3817 3818 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3819 assert(!_g1h->is_on_master_free_list( 3820 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3821 3822 scan_object(obj); 3823 3824 if (_task_queue->size() <= target_size || has_aborted()) { 3825 ret = false; 3826 } else { 3827 ret = _task_queue->pop_local(obj); 3828 } 3829 } 3830 3831 if (_cm->verbose_high()) { 3832 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3833 _worker_id, _task_queue->size()); 3834 } 3835 } 3836 } 3837 3838 void CMTask::drain_global_stack(bool partially) { 3839 if (has_aborted()) return; 3840 3841 // We have a policy to drain the local queue before we attempt to 3842 // drain the global stack. 3843 assert(partially || _task_queue->size() == 0, "invariant"); 3844 3845 // Decide what the target size is, depending whether we're going to 3846 // drain it partially (so that other tasks can steal if they run out 3847 // of things to do) or totally (at the very end). Notice that, 3848 // because we move entries from the global stack in chunks or 3849 // because another task might be doing the same, we might in fact 3850 // drop below the target. But, this is not a problem. 3851 size_t target_size; 3852 if (partially) { 3853 target_size = _cm->partial_mark_stack_size_target(); 3854 } else { 3855 target_size = 0; 3856 } 3857 3858 if (_cm->mark_stack_size() > target_size) { 3859 if (_cm->verbose_low()) { 3860 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3861 _worker_id, target_size); 3862 } 3863 3864 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3865 get_entries_from_global_stack(); 3866 drain_local_queue(partially); 3867 } 3868 3869 if (_cm->verbose_low()) { 3870 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3871 _worker_id, _cm->mark_stack_size()); 3872 } 3873 } 3874 } 3875 3876 // SATB Queue has several assumptions on whether to call the par or 3877 // non-par versions of the methods. this is why some of the code is 3878 // replicated. We should really get rid of the single-threaded version 3879 // of the code to simplify things. 3880 void CMTask::drain_satb_buffers() { 3881 if (has_aborted()) return; 3882 3883 // We set this so that the regular clock knows that we're in the 3884 // middle of draining buffers and doesn't set the abort flag when it 3885 // notices that SATB buffers are available for draining. It'd be 3886 // very counter productive if it did that. :-) 3887 _draining_satb_buffers = true; 3888 3889 CMObjectClosure oc(this); 3890 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3891 satb_mq_set.set_closure(_worker_id, &oc); 3892 3893 // This keeps claiming and applying the closure to completed buffers 3894 // until we run out of buffers or we need to abort. 3895 while (!has_aborted() && 3896 satb_mq_set.apply_closure_to_completed_buffer(_worker_id)) { 3897 if (_cm->verbose_medium()) { 3898 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3899 } 3900 statsOnly( ++_satb_buffers_processed ); 3901 regular_clock_call(); 3902 } 3903 3904 _draining_satb_buffers = false; 3905 3906 assert(has_aborted() || 3907 concurrent() || 3908 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3909 3910 satb_mq_set.set_closure(_worker_id, NULL); 3911 3912 // again, this was a potentially expensive operation, decrease the 3913 // limits to get the regular clock call early 3914 decrease_limits(); 3915 } 3916 3917 void CMTask::print_stats() { 3918 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3919 _worker_id, _calls); 3920 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3921 _elapsed_time_ms, _termination_time_ms); 3922 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3923 _step_times_ms.num(), _step_times_ms.avg(), 3924 _step_times_ms.sd()); 3925 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3926 _step_times_ms.maximum(), _step_times_ms.sum()); 3927 3928 #if _MARKING_STATS_ 3929 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3930 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3931 _all_clock_intervals_ms.sd()); 3932 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3933 _all_clock_intervals_ms.maximum(), 3934 _all_clock_intervals_ms.sum()); 3935 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", 3936 _clock_due_to_scanning, _clock_due_to_marking); 3937 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", 3938 _objs_scanned, _objs_found_on_bitmap); 3939 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", 3940 _local_pushes, _local_pops, _local_max_size); 3941 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", 3942 _global_pushes, _global_pops, _global_max_size); 3943 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", 3944 _global_transfers_to,_global_transfers_from); 3945 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed); 3946 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); 3947 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", 3948 _steal_attempts, _steals); 3949 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); 3950 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", 3951 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3952 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", 3953 _aborted_timed_out, _aborted_satb, _aborted_termination); 3954 #endif // _MARKING_STATS_ 3955 } 3956 3957 /***************************************************************************** 3958 3959 The do_marking_step(time_target_ms, ...) method is the building 3960 block of the parallel marking framework. It can be called in parallel 3961 with other invocations of do_marking_step() on different tasks 3962 (but only one per task, obviously) and concurrently with the 3963 mutator threads, or during remark, hence it eliminates the need 3964 for two versions of the code. When called during remark, it will 3965 pick up from where the task left off during the concurrent marking 3966 phase. Interestingly, tasks are also claimable during evacuation 3967 pauses too, since do_marking_step() ensures that it aborts before 3968 it needs to yield. 3969 3970 The data structures that it uses to do marking work are the 3971 following: 3972 3973 (1) Marking Bitmap. If there are gray objects that appear only 3974 on the bitmap (this happens either when dealing with an overflow 3975 or when the initial marking phase has simply marked the roots 3976 and didn't push them on the stack), then tasks claim heap 3977 regions whose bitmap they then scan to find gray objects. A 3978 global finger indicates where the end of the last claimed region 3979 is. A local finger indicates how far into the region a task has 3980 scanned. The two fingers are used to determine how to gray an 3981 object (i.e. whether simply marking it is OK, as it will be 3982 visited by a task in the future, or whether it needs to be also 3983 pushed on a stack). 3984 3985 (2) Local Queue. The local queue of the task which is accessed 3986 reasonably efficiently by the task. Other tasks can steal from 3987 it when they run out of work. Throughout the marking phase, a 3988 task attempts to keep its local queue short but not totally 3989 empty, so that entries are available for stealing by other 3990 tasks. Only when there is no more work, a task will totally 3991 drain its local queue. 3992 3993 (3) Global Mark Stack. This handles local queue overflow. During 3994 marking only sets of entries are moved between it and the local 3995 queues, as access to it requires a mutex and more fine-grain 3996 interaction with it which might cause contention. If it 3997 overflows, then the marking phase should restart and iterate 3998 over the bitmap to identify gray objects. Throughout the marking 3999 phase, tasks attempt to keep the global mark stack at a small 4000 length but not totally empty, so that entries are available for 4001 popping by other tasks. Only when there is no more work, tasks 4002 will totally drain the global mark stack. 4003 4004 (4) SATB Buffer Queue. This is where completed SATB buffers are 4005 made available. Buffers are regularly removed from this queue 4006 and scanned for roots, so that the queue doesn't get too 4007 long. During remark, all completed buffers are processed, as 4008 well as the filled in parts of any uncompleted buffers. 4009 4010 The do_marking_step() method tries to abort when the time target 4011 has been reached. There are a few other cases when the 4012 do_marking_step() method also aborts: 4013 4014 (1) When the marking phase has been aborted (after a Full GC). 4015 4016 (2) When a global overflow (on the global stack) has been 4017 triggered. Before the task aborts, it will actually sync up with 4018 the other tasks to ensure that all the marking data structures 4019 (local queues, stacks, fingers etc.) are re-initialized so that 4020 when do_marking_step() completes, the marking phase can 4021 immediately restart. 4022 4023 (3) When enough completed SATB buffers are available. The 4024 do_marking_step() method only tries to drain SATB buffers right 4025 at the beginning. So, if enough buffers are available, the 4026 marking step aborts and the SATB buffers are processed at 4027 the beginning of the next invocation. 4028 4029 (4) To yield. when we have to yield then we abort and yield 4030 right at the end of do_marking_step(). This saves us from a lot 4031 of hassle as, by yielding we might allow a Full GC. If this 4032 happens then objects will be compacted underneath our feet, the 4033 heap might shrink, etc. We save checking for this by just 4034 aborting and doing the yield right at the end. 4035 4036 From the above it follows that the do_marking_step() method should 4037 be called in a loop (or, otherwise, regularly) until it completes. 4038 4039 If a marking step completes without its has_aborted() flag being 4040 true, it means it has completed the current marking phase (and 4041 also all other marking tasks have done so and have all synced up). 4042 4043 A method called regular_clock_call() is invoked "regularly" (in 4044 sub ms intervals) throughout marking. It is this clock method that 4045 checks all the abort conditions which were mentioned above and 4046 decides when the task should abort. A work-based scheme is used to 4047 trigger this clock method: when the number of object words the 4048 marking phase has scanned or the number of references the marking 4049 phase has visited reach a given limit. Additional invocations to 4050 the method clock have been planted in a few other strategic places 4051 too. The initial reason for the clock method was to avoid calling 4052 vtime too regularly, as it is quite expensive. So, once it was in 4053 place, it was natural to piggy-back all the other conditions on it 4054 too and not constantly check them throughout the code. 4055 4056 If do_termination is true then do_marking_step will enter its 4057 termination protocol. 4058 4059 The value of is_serial must be true when do_marking_step is being 4060 called serially (i.e. by the VMThread) and do_marking_step should 4061 skip any synchronization in the termination and overflow code. 4062 Examples include the serial remark code and the serial reference 4063 processing closures. 4064 4065 The value of is_serial must be false when do_marking_step is 4066 being called by any of the worker threads in a work gang. 4067 Examples include the concurrent marking code (CMMarkingTask), 4068 the MT remark code, and the MT reference processing closures. 4069 4070 *****************************************************************************/ 4071 4072 void CMTask::do_marking_step(double time_target_ms, 4073 bool do_termination, 4074 bool is_serial) { 4075 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 4076 assert(concurrent() == _cm->concurrent(), "they should be the same"); 4077 4078 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 4079 assert(_task_queues != NULL, "invariant"); 4080 assert(_task_queue != NULL, "invariant"); 4081 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 4082 4083 assert(!_claimed, 4084 "only one thread should claim this task at any one time"); 4085 4086 // OK, this doesn't safeguard again all possible scenarios, as it is 4087 // possible for two threads to set the _claimed flag at the same 4088 // time. But it is only for debugging purposes anyway and it will 4089 // catch most problems. 4090 _claimed = true; 4091 4092 _start_time_ms = os::elapsedVTime() * 1000.0; 4093 statsOnly( _interval_start_time_ms = _start_time_ms ); 4094 4095 // If do_stealing is true then do_marking_step will attempt to 4096 // steal work from the other CMTasks. It only makes sense to 4097 // enable stealing when the termination protocol is enabled 4098 // and do_marking_step() is not being called serially. 4099 bool do_stealing = do_termination && !is_serial; 4100 4101 double diff_prediction_ms = 4102 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 4103 _time_target_ms = time_target_ms - diff_prediction_ms; 4104 4105 // set up the variables that are used in the work-based scheme to 4106 // call the regular clock method 4107 _words_scanned = 0; 4108 _refs_reached = 0; 4109 recalculate_limits(); 4110 4111 // clear all flags 4112 clear_has_aborted(); 4113 _has_timed_out = false; 4114 _draining_satb_buffers = false; 4115 4116 ++_calls; 4117 4118 if (_cm->verbose_low()) { 4119 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 4120 "target = %1.2lfms >>>>>>>>>>", 4121 _worker_id, _calls, _time_target_ms); 4122 } 4123 4124 // Set up the bitmap and oop closures. Anything that uses them is 4125 // eventually called from this method, so it is OK to allocate these 4126 // statically. 4127 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 4128 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 4129 set_cm_oop_closure(&cm_oop_closure); 4130 4131 if (_cm->has_overflown()) { 4132 // This can happen if the mark stack overflows during a GC pause 4133 // and this task, after a yield point, restarts. We have to abort 4134 // as we need to get into the overflow protocol which happens 4135 // right at the end of this task. 4136 set_has_aborted(); 4137 } 4138 4139 // First drain any available SATB buffers. After this, we will not 4140 // look at SATB buffers before the next invocation of this method. 4141 // If enough completed SATB buffers are queued up, the regular clock 4142 // will abort this task so that it restarts. 4143 drain_satb_buffers(); 4144 // ...then partially drain the local queue and the global stack 4145 drain_local_queue(true); 4146 drain_global_stack(true); 4147 4148 do { 4149 if (!has_aborted() && _curr_region != NULL) { 4150 // This means that we're already holding on to a region. 4151 assert(_finger != NULL, "if region is not NULL, then the finger " 4152 "should not be NULL either"); 4153 4154 // We might have restarted this task after an evacuation pause 4155 // which might have evacuated the region we're holding on to 4156 // underneath our feet. Let's read its limit again to make sure 4157 // that we do not iterate over a region of the heap that 4158 // contains garbage (update_region_limit() will also move 4159 // _finger to the start of the region if it is found empty). 4160 update_region_limit(); 4161 // We will start from _finger not from the start of the region, 4162 // as we might be restarting this task after aborting half-way 4163 // through scanning this region. In this case, _finger points to 4164 // the address where we last found a marked object. If this is a 4165 // fresh region, _finger points to start(). 4166 MemRegion mr = MemRegion(_finger, _region_limit); 4167 4168 if (_cm->verbose_low()) { 4169 gclog_or_tty->print_cr("[%u] we're scanning part " 4170 "["PTR_FORMAT", "PTR_FORMAT") " 4171 "of region "HR_FORMAT, 4172 _worker_id, p2i(_finger), p2i(_region_limit), 4173 HR_FORMAT_PARAMS(_curr_region)); 4174 } 4175 4176 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 4177 "humongous regions should go around loop once only"); 4178 4179 // Some special cases: 4180 // If the memory region is empty, we can just give up the region. 4181 // If the current region is humongous then we only need to check 4182 // the bitmap for the bit associated with the start of the object, 4183 // scan the object if it's live, and give up the region. 4184 // Otherwise, let's iterate over the bitmap of the part of the region 4185 // that is left. 4186 // If the iteration is successful, give up the region. 4187 if (mr.is_empty()) { 4188 giveup_current_region(); 4189 regular_clock_call(); 4190 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 4191 if (_nextMarkBitMap->isMarked(mr.start())) { 4192 // The object is marked - apply the closure 4193 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 4194 bitmap_closure.do_bit(offset); 4195 } 4196 // Even if this task aborted while scanning the humongous object 4197 // we can (and should) give up the current region. 4198 giveup_current_region(); 4199 regular_clock_call(); 4200 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4201 giveup_current_region(); 4202 regular_clock_call(); 4203 } else { 4204 assert(has_aborted(), "currently the only way to do so"); 4205 // The only way to abort the bitmap iteration is to return 4206 // false from the do_bit() method. However, inside the 4207 // do_bit() method we move the _finger to point to the 4208 // object currently being looked at. So, if we bail out, we 4209 // have definitely set _finger to something non-null. 4210 assert(_finger != NULL, "invariant"); 4211 4212 // Region iteration was actually aborted. So now _finger 4213 // points to the address of the object we last scanned. If we 4214 // leave it there, when we restart this task, we will rescan 4215 // the object. It is easy to avoid this. We move the finger by 4216 // enough to point to the next possible object header (the 4217 // bitmap knows by how much we need to move it as it knows its 4218 // granularity). 4219 assert(_finger < _region_limit, "invariant"); 4220 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 4221 // Check if bitmap iteration was aborted while scanning the last object 4222 if (new_finger >= _region_limit) { 4223 giveup_current_region(); 4224 } else { 4225 move_finger_to(new_finger); 4226 } 4227 } 4228 } 4229 // At this point we have either completed iterating over the 4230 // region we were holding on to, or we have aborted. 4231 4232 // We then partially drain the local queue and the global stack. 4233 // (Do we really need this?) 4234 drain_local_queue(true); 4235 drain_global_stack(true); 4236 4237 // Read the note on the claim_region() method on why it might 4238 // return NULL with potentially more regions available for 4239 // claiming and why we have to check out_of_regions() to determine 4240 // whether we're done or not. 4241 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4242 // We are going to try to claim a new region. We should have 4243 // given up on the previous one. 4244 // Separated the asserts so that we know which one fires. 4245 assert(_curr_region == NULL, "invariant"); 4246 assert(_finger == NULL, "invariant"); 4247 assert(_region_limit == NULL, "invariant"); 4248 if (_cm->verbose_low()) { 4249 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4250 } 4251 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4252 if (claimed_region != NULL) { 4253 // Yes, we managed to claim one 4254 statsOnly( ++_regions_claimed ); 4255 4256 if (_cm->verbose_low()) { 4257 gclog_or_tty->print_cr("[%u] we successfully claimed " 4258 "region "PTR_FORMAT, 4259 _worker_id, p2i(claimed_region)); 4260 } 4261 4262 setup_for_region(claimed_region); 4263 assert(_curr_region == claimed_region, "invariant"); 4264 } 4265 // It is important to call the regular clock here. It might take 4266 // a while to claim a region if, for example, we hit a large 4267 // block of empty regions. So we need to call the regular clock 4268 // method once round the loop to make sure it's called 4269 // frequently enough. 4270 regular_clock_call(); 4271 } 4272 4273 if (!has_aborted() && _curr_region == NULL) { 4274 assert(_cm->out_of_regions(), 4275 "at this point we should be out of regions"); 4276 } 4277 } while ( _curr_region != NULL && !has_aborted()); 4278 4279 if (!has_aborted()) { 4280 // We cannot check whether the global stack is empty, since other 4281 // tasks might be pushing objects to it concurrently. 4282 assert(_cm->out_of_regions(), 4283 "at this point we should be out of regions"); 4284 4285 if (_cm->verbose_low()) { 4286 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4287 } 4288 4289 // Try to reduce the number of available SATB buffers so that 4290 // remark has less work to do. 4291 drain_satb_buffers(); 4292 } 4293 4294 // Since we've done everything else, we can now totally drain the 4295 // local queue and global stack. 4296 drain_local_queue(false); 4297 drain_global_stack(false); 4298 4299 // Attempt at work stealing from other task's queues. 4300 if (do_stealing && !has_aborted()) { 4301 // We have not aborted. This means that we have finished all that 4302 // we could. Let's try to do some stealing... 4303 4304 // We cannot check whether the global stack is empty, since other 4305 // tasks might be pushing objects to it concurrently. 4306 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4307 "only way to reach here"); 4308 4309 if (_cm->verbose_low()) { 4310 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4311 } 4312 4313 while (!has_aborted()) { 4314 oop obj; 4315 statsOnly( ++_steal_attempts ); 4316 4317 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4318 if (_cm->verbose_medium()) { 4319 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4320 _worker_id, p2i((void*) obj)); 4321 } 4322 4323 statsOnly( ++_steals ); 4324 4325 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4326 "any stolen object should be marked"); 4327 scan_object(obj); 4328 4329 // And since we're towards the end, let's totally drain the 4330 // local queue and global stack. 4331 drain_local_queue(false); 4332 drain_global_stack(false); 4333 } else { 4334 break; 4335 } 4336 } 4337 } 4338 4339 // If we are about to wrap up and go into termination, check if we 4340 // should raise the overflow flag. 4341 if (do_termination && !has_aborted()) { 4342 if (_cm->force_overflow()->should_force()) { 4343 _cm->set_has_overflown(); 4344 regular_clock_call(); 4345 } 4346 } 4347 4348 // We still haven't aborted. Now, let's try to get into the 4349 // termination protocol. 4350 if (do_termination && !has_aborted()) { 4351 // We cannot check whether the global stack is empty, since other 4352 // tasks might be concurrently pushing objects on it. 4353 // Separated the asserts so that we know which one fires. 4354 assert(_cm->out_of_regions(), "only way to reach here"); 4355 assert(_task_queue->size() == 0, "only way to reach here"); 4356 4357 if (_cm->verbose_low()) { 4358 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4359 } 4360 4361 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4362 4363 // The CMTask class also extends the TerminatorTerminator class, 4364 // hence its should_exit_termination() method will also decide 4365 // whether to exit the termination protocol or not. 4366 bool finished = (is_serial || 4367 _cm->terminator()->offer_termination(this)); 4368 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4369 _termination_time_ms += 4370 termination_end_time_ms - _termination_start_time_ms; 4371 4372 if (finished) { 4373 // We're all done. 4374 4375 if (_worker_id == 0) { 4376 // let's allow task 0 to do this 4377 if (concurrent()) { 4378 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4379 // we need to set this to false before the next 4380 // safepoint. This way we ensure that the marking phase 4381 // doesn't observe any more heap expansions. 4382 _cm->clear_concurrent_marking_in_progress(); 4383 } 4384 } 4385 4386 // We can now guarantee that the global stack is empty, since 4387 // all other tasks have finished. We separated the guarantees so 4388 // that, if a condition is false, we can immediately find out 4389 // which one. 4390 guarantee(_cm->out_of_regions(), "only way to reach here"); 4391 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4392 guarantee(_task_queue->size() == 0, "only way to reach here"); 4393 guarantee(!_cm->has_overflown(), "only way to reach here"); 4394 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4395 4396 if (_cm->verbose_low()) { 4397 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4398 } 4399 } else { 4400 // Apparently there's more work to do. Let's abort this task. It 4401 // will restart it and we can hopefully find more things to do. 4402 4403 if (_cm->verbose_low()) { 4404 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4405 _worker_id); 4406 } 4407 4408 set_has_aborted(); 4409 statsOnly( ++_aborted_termination ); 4410 } 4411 } 4412 4413 // Mainly for debugging purposes to make sure that a pointer to the 4414 // closure which was statically allocated in this frame doesn't 4415 // escape it by accident. 4416 set_cm_oop_closure(NULL); 4417 double end_time_ms = os::elapsedVTime() * 1000.0; 4418 double elapsed_time_ms = end_time_ms - _start_time_ms; 4419 // Update the step history. 4420 _step_times_ms.add(elapsed_time_ms); 4421 4422 if (has_aborted()) { 4423 // The task was aborted for some reason. 4424 4425 statsOnly( ++_aborted ); 4426 4427 if (_has_timed_out) { 4428 double diff_ms = elapsed_time_ms - _time_target_ms; 4429 // Keep statistics of how well we did with respect to hitting 4430 // our target only if we actually timed out (if we aborted for 4431 // other reasons, then the results might get skewed). 4432 _marking_step_diffs_ms.add(diff_ms); 4433 } 4434 4435 if (_cm->has_overflown()) { 4436 // This is the interesting one. We aborted because a global 4437 // overflow was raised. This means we have to restart the 4438 // marking phase and start iterating over regions. However, in 4439 // order to do this we have to make sure that all tasks stop 4440 // what they are doing and re-initialize in a safe manner. We 4441 // will achieve this with the use of two barrier sync points. 4442 4443 if (_cm->verbose_low()) { 4444 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4445 } 4446 4447 if (!is_serial) { 4448 // We only need to enter the sync barrier if being called 4449 // from a parallel context 4450 _cm->enter_first_sync_barrier(_worker_id); 4451 4452 // When we exit this sync barrier we know that all tasks have 4453 // stopped doing marking work. So, it's now safe to 4454 // re-initialize our data structures. At the end of this method, 4455 // task 0 will clear the global data structures. 4456 } 4457 4458 statsOnly( ++_aborted_overflow ); 4459 4460 // We clear the local state of this task... 4461 clear_region_fields(); 4462 4463 if (!is_serial) { 4464 // ...and enter the second barrier. 4465 _cm->enter_second_sync_barrier(_worker_id); 4466 } 4467 // At this point, if we're during the concurrent phase of 4468 // marking, everything has been re-initialized and we're 4469 // ready to restart. 4470 } 4471 4472 if (_cm->verbose_low()) { 4473 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4474 "elapsed = %1.2lfms <<<<<<<<<<", 4475 _worker_id, _time_target_ms, elapsed_time_ms); 4476 if (_cm->has_aborted()) { 4477 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4478 _worker_id); 4479 } 4480 } 4481 } else { 4482 if (_cm->verbose_low()) { 4483 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4484 "elapsed = %1.2lfms <<<<<<<<<<", 4485 _worker_id, _time_target_ms, elapsed_time_ms); 4486 } 4487 } 4488 4489 _claimed = false; 4490 } 4491 4492 CMTask::CMTask(uint worker_id, 4493 ConcurrentMark* cm, 4494 size_t* marked_bytes, 4495 BitMap* card_bm, 4496 CMTaskQueue* task_queue, 4497 CMTaskQueueSet* task_queues) 4498 : _g1h(G1CollectedHeap::heap()), 4499 _worker_id(worker_id), _cm(cm), 4500 _claimed(false), 4501 _nextMarkBitMap(NULL), _hash_seed(17), 4502 _task_queue(task_queue), 4503 _task_queues(task_queues), 4504 _cm_oop_closure(NULL), 4505 _marked_bytes_array(marked_bytes), 4506 _card_bm(card_bm) { 4507 guarantee(task_queue != NULL, "invariant"); 4508 guarantee(task_queues != NULL, "invariant"); 4509 4510 statsOnly( _clock_due_to_scanning = 0; 4511 _clock_due_to_marking = 0 ); 4512 4513 _marking_step_diffs_ms.add(0.5); 4514 } 4515 4516 // These are formatting macros that are used below to ensure 4517 // consistent formatting. The *_H_* versions are used to format the 4518 // header for a particular value and they should be kept consistent 4519 // with the corresponding macro. Also note that most of the macros add 4520 // the necessary white space (as a prefix) which makes them a bit 4521 // easier to compose. 4522 4523 // All the output lines are prefixed with this string to be able to 4524 // identify them easily in a large log file. 4525 #define G1PPRL_LINE_PREFIX "###" 4526 4527 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4528 #ifdef _LP64 4529 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4530 #else // _LP64 4531 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4532 #endif // _LP64 4533 4534 // For per-region info 4535 #define G1PPRL_TYPE_FORMAT " %-4s" 4536 #define G1PPRL_TYPE_H_FORMAT " %4s" 4537 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4538 #define G1PPRL_BYTE_H_FORMAT " %9s" 4539 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4540 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4541 4542 // For summary info 4543 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4544 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4545 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4546 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4547 4548 G1PrintRegionLivenessInfoClosure:: 4549 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4550 : _out(out), 4551 _total_used_bytes(0), _total_capacity_bytes(0), 4552 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4553 _hum_used_bytes(0), _hum_capacity_bytes(0), 4554 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4555 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4556 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4557 MemRegion g1_reserved = g1h->g1_reserved(); 4558 double now = os::elapsedTime(); 4559 4560 // Print the header of the output. 4561 _out->cr(); 4562 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4563 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4564 G1PPRL_SUM_ADDR_FORMAT("reserved") 4565 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4566 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4567 HeapRegion::GrainBytes); 4568 _out->print_cr(G1PPRL_LINE_PREFIX); 4569 _out->print_cr(G1PPRL_LINE_PREFIX 4570 G1PPRL_TYPE_H_FORMAT 4571 G1PPRL_ADDR_BASE_H_FORMAT 4572 G1PPRL_BYTE_H_FORMAT 4573 G1PPRL_BYTE_H_FORMAT 4574 G1PPRL_BYTE_H_FORMAT 4575 G1PPRL_DOUBLE_H_FORMAT 4576 G1PPRL_BYTE_H_FORMAT 4577 G1PPRL_BYTE_H_FORMAT, 4578 "type", "address-range", 4579 "used", "prev-live", "next-live", "gc-eff", 4580 "remset", "code-roots"); 4581 _out->print_cr(G1PPRL_LINE_PREFIX 4582 G1PPRL_TYPE_H_FORMAT 4583 G1PPRL_ADDR_BASE_H_FORMAT 4584 G1PPRL_BYTE_H_FORMAT 4585 G1PPRL_BYTE_H_FORMAT 4586 G1PPRL_BYTE_H_FORMAT 4587 G1PPRL_DOUBLE_H_FORMAT 4588 G1PPRL_BYTE_H_FORMAT 4589 G1PPRL_BYTE_H_FORMAT, 4590 "", "", 4591 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4592 "(bytes)", "(bytes)"); 4593 } 4594 4595 // It takes as a parameter a reference to one of the _hum_* fields, it 4596 // deduces the corresponding value for a region in a humongous region 4597 // series (either the region size, or what's left if the _hum_* field 4598 // is < the region size), and updates the _hum_* field accordingly. 4599 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4600 size_t bytes = 0; 4601 // The > 0 check is to deal with the prev and next live bytes which 4602 // could be 0. 4603 if (*hum_bytes > 0) { 4604 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4605 *hum_bytes -= bytes; 4606 } 4607 return bytes; 4608 } 4609 4610 // It deduces the values for a region in a humongous region series 4611 // from the _hum_* fields and updates those accordingly. It assumes 4612 // that that _hum_* fields have already been set up from the "starts 4613 // humongous" region and we visit the regions in address order. 4614 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4615 size_t* capacity_bytes, 4616 size_t* prev_live_bytes, 4617 size_t* next_live_bytes) { 4618 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4619 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4620 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4621 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4622 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4623 } 4624 4625 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4626 const char* type = r->get_type_str(); 4627 HeapWord* bottom = r->bottom(); 4628 HeapWord* end = r->end(); 4629 size_t capacity_bytes = r->capacity(); 4630 size_t used_bytes = r->used(); 4631 size_t prev_live_bytes = r->live_bytes(); 4632 size_t next_live_bytes = r->next_live_bytes(); 4633 double gc_eff = r->gc_efficiency(); 4634 size_t remset_bytes = r->rem_set()->mem_size(); 4635 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4636 4637 if (r->is_starts_humongous()) { 4638 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4639 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4640 "they should have been zeroed after the last time we used them"); 4641 // Set up the _hum_* fields. 4642 _hum_capacity_bytes = capacity_bytes; 4643 _hum_used_bytes = used_bytes; 4644 _hum_prev_live_bytes = prev_live_bytes; 4645 _hum_next_live_bytes = next_live_bytes; 4646 get_hum_bytes(&used_bytes, &capacity_bytes, 4647 &prev_live_bytes, &next_live_bytes); 4648 end = bottom + HeapRegion::GrainWords; 4649 } else if (r->is_continues_humongous()) { 4650 get_hum_bytes(&used_bytes, &capacity_bytes, 4651 &prev_live_bytes, &next_live_bytes); 4652 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4653 } 4654 4655 _total_used_bytes += used_bytes; 4656 _total_capacity_bytes += capacity_bytes; 4657 _total_prev_live_bytes += prev_live_bytes; 4658 _total_next_live_bytes += next_live_bytes; 4659 _total_remset_bytes += remset_bytes; 4660 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4661 4662 // Print a line for this particular region. 4663 _out->print_cr(G1PPRL_LINE_PREFIX 4664 G1PPRL_TYPE_FORMAT 4665 G1PPRL_ADDR_BASE_FORMAT 4666 G1PPRL_BYTE_FORMAT 4667 G1PPRL_BYTE_FORMAT 4668 G1PPRL_BYTE_FORMAT 4669 G1PPRL_DOUBLE_FORMAT 4670 G1PPRL_BYTE_FORMAT 4671 G1PPRL_BYTE_FORMAT, 4672 type, p2i(bottom), p2i(end), 4673 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4674 remset_bytes, strong_code_roots_bytes); 4675 4676 return false; 4677 } 4678 4679 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4680 // add static memory usages to remembered set sizes 4681 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4682 // Print the footer of the output. 4683 _out->print_cr(G1PPRL_LINE_PREFIX); 4684 _out->print_cr(G1PPRL_LINE_PREFIX 4685 " SUMMARY" 4686 G1PPRL_SUM_MB_FORMAT("capacity") 4687 G1PPRL_SUM_MB_PERC_FORMAT("used") 4688 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4689 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4690 G1PPRL_SUM_MB_FORMAT("remset") 4691 G1PPRL_SUM_MB_FORMAT("code-roots"), 4692 bytes_to_mb(_total_capacity_bytes), 4693 bytes_to_mb(_total_used_bytes), 4694 perc(_total_used_bytes, _total_capacity_bytes), 4695 bytes_to_mb(_total_prev_live_bytes), 4696 perc(_total_prev_live_bytes, _total_capacity_bytes), 4697 bytes_to_mb(_total_next_live_bytes), 4698 perc(_total_next_live_bytes, _total_capacity_bytes), 4699 bytes_to_mb(_total_remset_bytes), 4700 bytes_to_mb(_total_strong_code_roots_bytes)); 4701 _out->cr(); 4702 }