1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/symbolTable.hpp" 27 #include "gc_implementation/g1/concurrentMark.inline.hpp" 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 32 #include "gc_implementation/g1/g1Log.hpp" 33 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 34 #include "gc_implementation/g1/g1RemSet.hpp" 35 #include "gc_implementation/g1/heapRegion.inline.hpp" 36 #include "gc_implementation/g1/heapRegionRemSet.hpp" 37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 38 #include "gc_implementation/shared/vmGCOperations.hpp" 39 #include "gc_implementation/shared/gcTimer.hpp" 40 #include "gc_implementation/shared/gcTrace.hpp" 41 #include "gc_implementation/shared/gcTraceTime.hpp" 42 #include "memory/genOopClosures.inline.hpp" 43 #include "memory/referencePolicy.hpp" 44 #include "memory/resourceArea.hpp" 45 #include "oops/oop.inline.hpp" 46 #include "runtime/handles.inline.hpp" 47 #include "runtime/java.hpp" 48 #include "runtime/atomic.inline.hpp" 49 #include "runtime/prefetch.inline.hpp" 50 #include "services/memTracker.hpp" 51 52 // Concurrent marking bit map wrapper 53 54 CMBitMapRO::CMBitMapRO(int shifter) : 55 _bm(), 56 _shifter(shifter) { 57 _bmStartWord = 0; 58 _bmWordSize = 0; 59 } 60 61 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr, 62 HeapWord* limit) const { 63 // First we must round addr *up* to a possible object boundary. 64 addr = (HeapWord*)align_size_up((intptr_t)addr, 65 HeapWordSize << _shifter); 66 size_t addrOffset = heapWordToOffset(addr); 67 if (limit == NULL) { 68 limit = _bmStartWord + _bmWordSize; 69 } 70 size_t limitOffset = heapWordToOffset(limit); 71 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 72 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 73 assert(nextAddr >= addr, "get_next_one postcondition"); 74 assert(nextAddr == limit || isMarked(nextAddr), 75 "get_next_one postcondition"); 76 return nextAddr; 77 } 78 79 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr, 80 HeapWord* limit) const { 81 size_t addrOffset = heapWordToOffset(addr); 82 if (limit == NULL) { 83 limit = _bmStartWord + _bmWordSize; 84 } 85 size_t limitOffset = heapWordToOffset(limit); 86 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 87 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 88 assert(nextAddr >= addr, "get_next_one postcondition"); 89 assert(nextAddr == limit || !isMarked(nextAddr), 90 "get_next_one postcondition"); 91 return nextAddr; 92 } 93 94 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 95 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 96 return (int) (diff >> _shifter); 97 } 98 99 #ifndef PRODUCT 100 bool CMBitMapRO::covers(ReservedSpace heap_rs) const { 101 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 102 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 103 "size inconsistency"); 104 return _bmStartWord == (HeapWord*)(heap_rs.base()) && 105 _bmWordSize == heap_rs.size()>>LogHeapWordSize; 106 } 107 #endif 108 109 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 110 _bm.print_on_error(st, prefix); 111 } 112 113 bool CMBitMap::allocate(ReservedSpace heap_rs) { 114 _bmStartWord = (HeapWord*)(heap_rs.base()); 115 _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes 116 ReservedSpace brs(ReservedSpace::allocation_align_size_up( 117 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); 118 if (!brs.is_reserved()) { 119 warning("ConcurrentMark marking bit map allocation failure"); 120 return false; 121 } 122 MemTracker::record_virtual_memory_type((address)brs.base(), mtGC); 123 // For now we'll just commit all of the bit map up front. 124 // Later on we'll try to be more parsimonious with swap. 125 if (!_virtual_space.initialize(brs, brs.size())) { 126 warning("ConcurrentMark marking bit map backing store failure"); 127 return false; 128 } 129 assert(_virtual_space.committed_size() == brs.size(), 130 "didn't reserve backing store for all of concurrent marking bit map?"); 131 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low()); 132 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= 133 _bmWordSize, "inconsistency in bit map sizing"); 134 _bm.set_size(_bmWordSize >> _shifter); 135 return true; 136 } 137 138 void CMBitMap::clearAll() { 139 _bm.clear(); 140 return; 141 } 142 143 void CMBitMap::markRange(MemRegion mr) { 144 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 145 assert(!mr.is_empty(), "unexpected empty region"); 146 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 147 ((HeapWord *) mr.end())), 148 "markRange memory region end is not card aligned"); 149 // convert address range into offset range 150 _bm.at_put_range(heapWordToOffset(mr.start()), 151 heapWordToOffset(mr.end()), true); 152 } 153 154 void CMBitMap::clearRange(MemRegion mr) { 155 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 156 assert(!mr.is_empty(), "unexpected empty region"); 157 // convert address range into offset range 158 _bm.at_put_range(heapWordToOffset(mr.start()), 159 heapWordToOffset(mr.end()), false); 160 } 161 162 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 163 HeapWord* end_addr) { 164 HeapWord* start = getNextMarkedWordAddress(addr); 165 start = MIN2(start, end_addr); 166 HeapWord* end = getNextUnmarkedWordAddress(start); 167 end = MIN2(end, end_addr); 168 assert(start <= end, "Consistency check"); 169 MemRegion mr(start, end); 170 if (!mr.is_empty()) { 171 clearRange(mr); 172 } 173 return mr; 174 } 175 176 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 177 _base(NULL), _cm(cm) 178 #ifdef ASSERT 179 , _drain_in_progress(false) 180 , _drain_in_progress_yields(false) 181 #endif 182 {} 183 184 bool CMMarkStack::allocate(size_t capacity) { 185 // allocate a stack of the requisite depth 186 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 187 if (!rs.is_reserved()) { 188 warning("ConcurrentMark MarkStack allocation failure"); 189 return false; 190 } 191 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 192 if (!_virtual_space.initialize(rs, rs.size())) { 193 warning("ConcurrentMark MarkStack backing store failure"); 194 // Release the virtual memory reserved for the marking stack 195 rs.release(); 196 return false; 197 } 198 assert(_virtual_space.committed_size() == rs.size(), 199 "Didn't reserve backing store for all of ConcurrentMark stack?"); 200 _base = (oop*) _virtual_space.low(); 201 setEmpty(); 202 _capacity = (jint) capacity; 203 _saved_index = -1; 204 _should_expand = false; 205 NOT_PRODUCT(_max_depth = 0); 206 return true; 207 } 208 209 void CMMarkStack::expand() { 210 // Called, during remark, if we've overflown the marking stack during marking. 211 assert(isEmpty(), "stack should been emptied while handling overflow"); 212 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 213 // Clear expansion flag 214 _should_expand = false; 215 if (_capacity == (jint) MarkStackSizeMax) { 216 if (PrintGCDetails && Verbose) { 217 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 218 } 219 return; 220 } 221 // Double capacity if possible 222 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 223 // Do not give up existing stack until we have managed to 224 // get the double capacity that we desired. 225 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 226 sizeof(oop))); 227 if (rs.is_reserved()) { 228 // Release the backing store associated with old stack 229 _virtual_space.release(); 230 // Reinitialize virtual space for new stack 231 if (!_virtual_space.initialize(rs, rs.size())) { 232 fatal("Not enough swap for expanded marking stack capacity"); 233 } 234 _base = (oop*)(_virtual_space.low()); 235 _index = 0; 236 _capacity = new_capacity; 237 } else { 238 if (PrintGCDetails && Verbose) { 239 // Failed to double capacity, continue; 240 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 241 SIZE_FORMAT"K to " SIZE_FORMAT"K", 242 _capacity / K, new_capacity / K); 243 } 244 } 245 } 246 247 void CMMarkStack::set_should_expand() { 248 // If we're resetting the marking state because of an 249 // marking stack overflow, record that we should, if 250 // possible, expand the stack. 251 _should_expand = _cm->has_overflown(); 252 } 253 254 CMMarkStack::~CMMarkStack() { 255 if (_base != NULL) { 256 _base = NULL; 257 _virtual_space.release(); 258 } 259 } 260 261 void CMMarkStack::par_push(oop ptr) { 262 while (true) { 263 if (isFull()) { 264 _overflow = true; 265 return; 266 } 267 // Otherwise... 268 jint index = _index; 269 jint next_index = index+1; 270 jint res = Atomic::cmpxchg(next_index, &_index, index); 271 if (res == index) { 272 _base[index] = ptr; 273 // Note that we don't maintain this atomically. We could, but it 274 // doesn't seem necessary. 275 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 276 return; 277 } 278 // Otherwise, we need to try again. 279 } 280 } 281 282 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { 283 while (true) { 284 if (isFull()) { 285 _overflow = true; 286 return; 287 } 288 // Otherwise... 289 jint index = _index; 290 jint next_index = index + n; 291 if (next_index > _capacity) { 292 _overflow = true; 293 return; 294 } 295 jint res = Atomic::cmpxchg(next_index, &_index, index); 296 if (res == index) { 297 for (int i = 0; i < n; i++) { 298 int ind = index + i; 299 assert(ind < _capacity, "By overflow test above."); 300 _base[ind] = ptr_arr[i]; 301 } 302 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 303 return; 304 } 305 // Otherwise, we need to try again. 306 } 307 } 308 309 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 310 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 311 jint start = _index; 312 jint next_index = start + n; 313 if (next_index > _capacity) { 314 _overflow = true; 315 return; 316 } 317 // Otherwise. 318 _index = next_index; 319 for (int i = 0; i < n; i++) { 320 int ind = start + i; 321 assert(ind < _capacity, "By overflow test above."); 322 _base[ind] = ptr_arr[i]; 323 } 324 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 325 } 326 327 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 328 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 329 jint index = _index; 330 if (index == 0) { 331 *n = 0; 332 return false; 333 } else { 334 int k = MIN2(max, index); 335 jint new_ind = index - k; 336 for (int j = 0; j < k; j++) { 337 ptr_arr[j] = _base[new_ind + j]; 338 } 339 _index = new_ind; 340 *n = k; 341 return true; 342 } 343 } 344 345 template<class OopClosureClass> 346 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 347 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 348 || SafepointSynchronize::is_at_safepoint(), 349 "Drain recursion must be yield-safe."); 350 bool res = true; 351 debug_only(_drain_in_progress = true); 352 debug_only(_drain_in_progress_yields = yield_after); 353 while (!isEmpty()) { 354 oop newOop = pop(); 355 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 356 assert(newOop->is_oop(), "Expected an oop"); 357 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 358 "only grey objects on this stack"); 359 newOop->oop_iterate(cl); 360 if (yield_after && _cm->do_yield_check()) { 361 res = false; 362 break; 363 } 364 } 365 debug_only(_drain_in_progress = false); 366 return res; 367 } 368 369 void CMMarkStack::note_start_of_gc() { 370 assert(_saved_index == -1, 371 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 372 _saved_index = _index; 373 } 374 375 void CMMarkStack::note_end_of_gc() { 376 // This is intentionally a guarantee, instead of an assert. If we 377 // accidentally add something to the mark stack during GC, it 378 // will be a correctness issue so it's better if we crash. we'll 379 // only check this once per GC anyway, so it won't be a performance 380 // issue in any way. 381 guarantee(_saved_index == _index, 382 err_msg("saved index: %d index: %d", _saved_index, _index)); 383 _saved_index = -1; 384 } 385 386 void CMMarkStack::oops_do(OopClosure* f) { 387 assert(_saved_index == _index, 388 err_msg("saved index: %d index: %d", _saved_index, _index)); 389 for (int i = 0; i < _index; i += 1) { 390 f->do_oop(&_base[i]); 391 } 392 } 393 394 bool ConcurrentMark::not_yet_marked(oop obj) const { 395 return _g1h->is_obj_ill(obj); 396 } 397 398 CMRootRegions::CMRootRegions() : 399 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 400 _should_abort(false), _next_survivor(NULL) { } 401 402 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 403 _young_list = g1h->young_list(); 404 _cm = cm; 405 } 406 407 void CMRootRegions::prepare_for_scan() { 408 assert(!scan_in_progress(), "pre-condition"); 409 410 // Currently, only survivors can be root regions. 411 assert(_next_survivor == NULL, "pre-condition"); 412 _next_survivor = _young_list->first_survivor_region(); 413 _scan_in_progress = (_next_survivor != NULL); 414 _should_abort = false; 415 } 416 417 HeapRegion* CMRootRegions::claim_next() { 418 if (_should_abort) { 419 // If someone has set the should_abort flag, we return NULL to 420 // force the caller to bail out of their loop. 421 return NULL; 422 } 423 424 // Currently, only survivors can be root regions. 425 HeapRegion* res = _next_survivor; 426 if (res != NULL) { 427 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 428 // Read it again in case it changed while we were waiting for the lock. 429 res = _next_survivor; 430 if (res != NULL) { 431 if (res == _young_list->last_survivor_region()) { 432 // We just claimed the last survivor so store NULL to indicate 433 // that we're done. 434 _next_survivor = NULL; 435 } else { 436 _next_survivor = res->get_next_young_region(); 437 } 438 } else { 439 // Someone else claimed the last survivor while we were trying 440 // to take the lock so nothing else to do. 441 } 442 } 443 assert(res == NULL || res->is_survivor(), "post-condition"); 444 445 return res; 446 } 447 448 void CMRootRegions::scan_finished() { 449 assert(scan_in_progress(), "pre-condition"); 450 451 // Currently, only survivors can be root regions. 452 if (!_should_abort) { 453 assert(_next_survivor == NULL, "we should have claimed all survivors"); 454 } 455 _next_survivor = NULL; 456 457 { 458 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 459 _scan_in_progress = false; 460 RootRegionScan_lock->notify_all(); 461 } 462 } 463 464 bool CMRootRegions::wait_until_scan_finished() { 465 if (!scan_in_progress()) return false; 466 467 { 468 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 469 while (scan_in_progress()) { 470 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 471 } 472 } 473 return true; 474 } 475 476 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 477 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 478 #endif // _MSC_VER 479 480 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 481 return MAX2((n_par_threads + 2) / 4, 1U); 482 } 483 484 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) : 485 _g1h(g1h), 486 _markBitMap1(log2_intptr(MinObjAlignment)), 487 _markBitMap2(log2_intptr(MinObjAlignment)), 488 _parallel_marking_threads(0), 489 _max_parallel_marking_threads(0), 490 _sleep_factor(0.0), 491 _marking_task_overhead(1.0), 492 _cleanup_sleep_factor(0.0), 493 _cleanup_task_overhead(1.0), 494 _cleanup_list("Cleanup List"), 495 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 496 _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >> 497 CardTableModRefBS::card_shift, 498 false /* in_resource_area*/), 499 500 _prevMarkBitMap(&_markBitMap1), 501 _nextMarkBitMap(&_markBitMap2), 502 503 _markStack(this), 504 // _finger set in set_non_marking_state 505 506 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 507 // _active_tasks set in set_non_marking_state 508 // _tasks set inside the constructor 509 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 510 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 511 512 _has_overflown(false), 513 _concurrent(false), 514 _has_aborted(false), 515 _aborted_gc_id(GCId::undefined()), 516 _restart_for_overflow(false), 517 _concurrent_marking_in_progress(false), 518 519 // _verbose_level set below 520 521 _init_times(), 522 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 523 _cleanup_times(), 524 _total_counting_time(0.0), 525 _total_rs_scrub_time(0.0), 526 527 _parallel_workers(NULL), 528 529 _count_card_bitmaps(NULL), 530 _count_marked_bytes(NULL), 531 _completed_initialization(false) { 532 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 533 if (verbose_level < no_verbose) { 534 verbose_level = no_verbose; 535 } 536 if (verbose_level > high_verbose) { 537 verbose_level = high_verbose; 538 } 539 _verbose_level = verbose_level; 540 541 if (verbose_low()) { 542 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 543 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 544 } 545 546 if (!_markBitMap1.allocate(heap_rs)) { 547 warning("Failed to allocate first CM bit map"); 548 return; 549 } 550 if (!_markBitMap2.allocate(heap_rs)) { 551 warning("Failed to allocate second CM bit map"); 552 return; 553 } 554 555 // Create & start a ConcurrentMark thread. 556 _cmThread = new ConcurrentMarkThread(this); 557 assert(cmThread() != NULL, "CM Thread should have been created"); 558 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 559 if (_cmThread->osthread() == NULL) { 560 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 561 } 562 563 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 564 assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency"); 565 assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency"); 566 567 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 568 satb_qs.set_buffer_size(G1SATBBufferSize); 569 570 _root_regions.init(_g1h, this); 571 572 if (ConcGCThreads > ParallelGCThreads) { 573 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") " 574 "than ParallelGCThreads (" UINTX_FORMAT ").", 575 ConcGCThreads, ParallelGCThreads); 576 return; 577 } 578 if (ParallelGCThreads == 0) { 579 // if we are not running with any parallel GC threads we will not 580 // spawn any marking threads either 581 _parallel_marking_threads = 0; 582 _max_parallel_marking_threads = 0; 583 _sleep_factor = 0.0; 584 _marking_task_overhead = 1.0; 585 } else { 586 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 587 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 588 // if both are set 589 _sleep_factor = 0.0; 590 _marking_task_overhead = 1.0; 591 } else if (G1MarkingOverheadPercent > 0) { 592 // We will calculate the number of parallel marking threads based 593 // on a target overhead with respect to the soft real-time goal 594 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 595 double overall_cm_overhead = 596 (double) MaxGCPauseMillis * marking_overhead / 597 (double) GCPauseIntervalMillis; 598 double cpu_ratio = 1.0 / (double) os::processor_count(); 599 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 600 double marking_task_overhead = 601 overall_cm_overhead / marking_thread_num * 602 (double) os::processor_count(); 603 double sleep_factor = 604 (1.0 - marking_task_overhead) / marking_task_overhead; 605 606 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); 607 _sleep_factor = sleep_factor; 608 _marking_task_overhead = marking_task_overhead; 609 } else { 610 // Calculate the number of parallel marking threads by scaling 611 // the number of parallel GC threads. 612 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); 613 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); 614 _sleep_factor = 0.0; 615 _marking_task_overhead = 1.0; 616 } 617 618 assert(ConcGCThreads > 0, "Should have been set"); 619 _parallel_marking_threads = (uint) ConcGCThreads; 620 _max_parallel_marking_threads = _parallel_marking_threads; 621 622 if (parallel_marking_threads() > 1) { 623 _cleanup_task_overhead = 1.0; 624 } else { 625 _cleanup_task_overhead = marking_task_overhead(); 626 } 627 _cleanup_sleep_factor = 628 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 629 630 #if 0 631 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 632 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 633 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 634 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 635 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 636 #endif 637 638 guarantee(parallel_marking_threads() > 0, "peace of mind"); 639 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", 640 _max_parallel_marking_threads, false, true); 641 if (_parallel_workers == NULL) { 642 vm_exit_during_initialization("Failed necessary allocation."); 643 } else { 644 _parallel_workers->initialize_workers(); 645 } 646 } 647 648 if (FLAG_IS_DEFAULT(MarkStackSize)) { 649 uintx mark_stack_size = 650 MIN2(MarkStackSizeMax, 651 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE))); 652 // Verify that the calculated value for MarkStackSize is in range. 653 // It would be nice to use the private utility routine from Arguments. 654 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 655 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): " 656 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 657 mark_stack_size, (uintx) 1, MarkStackSizeMax); 658 return; 659 } 660 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size); 661 } else { 662 // Verify MarkStackSize is in range. 663 if (FLAG_IS_CMDLINE(MarkStackSize)) { 664 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 665 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 666 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): " 667 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 668 MarkStackSize, (uintx) 1, MarkStackSizeMax); 669 return; 670 } 671 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 672 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 673 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")" 674 " or for MarkStackSizeMax (" UINTX_FORMAT ")", 675 MarkStackSize, MarkStackSizeMax); 676 return; 677 } 678 } 679 } 680 } 681 682 if (!_markStack.allocate(MarkStackSize)) { 683 warning("Failed to allocate CM marking stack"); 684 return; 685 } 686 687 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 688 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 689 690 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 691 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 692 693 BitMap::idx_t card_bm_size = _card_bm.size(); 694 695 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 696 _active_tasks = _max_worker_id; 697 698 size_t max_regions = (size_t) _g1h->max_regions(); 699 for (uint i = 0; i < _max_worker_id; ++i) { 700 CMTaskQueue* task_queue = new CMTaskQueue(); 701 task_queue->initialize(); 702 _task_queues->register_queue(i, task_queue); 703 704 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 705 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 706 707 _tasks[i] = new CMTask(i, this, 708 _count_marked_bytes[i], 709 &_count_card_bitmaps[i], 710 task_queue, _task_queues); 711 712 _accum_task_vtime[i] = 0.0; 713 } 714 715 // Calculate the card number for the bottom of the heap. Used 716 // in biasing indexes into the accounting card bitmaps. 717 _heap_bottom_card_num = 718 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 719 CardTableModRefBS::card_shift); 720 721 // Clear all the liveness counting data 722 clear_all_count_data(); 723 724 // so that the call below can read a sensible value 725 _heap_start = (HeapWord*) heap_rs.base(); 726 set_non_marking_state(); 727 _completed_initialization = true; 728 } 729 730 void ConcurrentMark::update_g1_committed(bool force) { 731 // If concurrent marking is not in progress, then we do not need to 732 // update _heap_end. 733 if (!concurrent_marking_in_progress() && !force) return; 734 735 MemRegion committed = _g1h->g1_committed(); 736 assert(committed.start() == _heap_start, "start shouldn't change"); 737 HeapWord* new_end = committed.end(); 738 if (new_end > _heap_end) { 739 // The heap has been expanded. 740 741 _heap_end = new_end; 742 } 743 // Notice that the heap can also shrink. However, this only happens 744 // during a Full GC (at least currently) and the entire marking 745 // phase will bail out and the task will not be restarted. So, let's 746 // do nothing. 747 } 748 749 void ConcurrentMark::reset() { 750 // Starting values for these two. This should be called in a STW 751 // phase. CM will be notified of any future g1_committed expansions 752 // will be at the end of evacuation pauses, when tasks are 753 // inactive. 754 MemRegion committed = _g1h->g1_committed(); 755 _heap_start = committed.start(); 756 _heap_end = committed.end(); 757 758 // Separated the asserts so that we know which one fires. 759 assert(_heap_start != NULL, "heap bounds should look ok"); 760 assert(_heap_end != NULL, "heap bounds should look ok"); 761 assert(_heap_start < _heap_end, "heap bounds should look ok"); 762 763 // Reset all the marking data structures and any necessary flags 764 reset_marking_state(); 765 766 if (verbose_low()) { 767 gclog_or_tty->print_cr("[global] resetting"); 768 } 769 770 // We do reset all of them, since different phases will use 771 // different number of active threads. So, it's easiest to have all 772 // of them ready. 773 for (uint i = 0; i < _max_worker_id; ++i) { 774 _tasks[i]->reset(_nextMarkBitMap); 775 } 776 777 // we need this to make sure that the flag is on during the evac 778 // pause with initial mark piggy-backed 779 set_concurrent_marking_in_progress(); 780 } 781 782 783 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 784 _markStack.set_should_expand(); 785 _markStack.setEmpty(); // Also clears the _markStack overflow flag 786 if (clear_overflow) { 787 clear_has_overflown(); 788 } else { 789 assert(has_overflown(), "pre-condition"); 790 } 791 _finger = _heap_start; 792 793 for (uint i = 0; i < _max_worker_id; ++i) { 794 CMTaskQueue* queue = _task_queues->queue(i); 795 queue->set_empty(); 796 } 797 } 798 799 void ConcurrentMark::set_concurrency(uint active_tasks) { 800 assert(active_tasks <= _max_worker_id, "we should not have more"); 801 802 _active_tasks = active_tasks; 803 // Need to update the three data structures below according to the 804 // number of active threads for this phase. 805 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 806 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 807 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 808 } 809 810 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 811 set_concurrency(active_tasks); 812 813 _concurrent = concurrent; 814 // We propagate this to all tasks, not just the active ones. 815 for (uint i = 0; i < _max_worker_id; ++i) 816 _tasks[i]->set_concurrent(concurrent); 817 818 if (concurrent) { 819 set_concurrent_marking_in_progress(); 820 } else { 821 // We currently assume that the concurrent flag has been set to 822 // false before we start remark. At this point we should also be 823 // in a STW phase. 824 assert(!concurrent_marking_in_progress(), "invariant"); 825 assert(out_of_regions(), 826 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 827 p2i(_finger), p2i(_heap_end))); 828 update_g1_committed(true); 829 } 830 } 831 832 void ConcurrentMark::set_non_marking_state() { 833 // We set the global marking state to some default values when we're 834 // not doing marking. 835 reset_marking_state(); 836 _active_tasks = 0; 837 clear_concurrent_marking_in_progress(); 838 } 839 840 ConcurrentMark::~ConcurrentMark() { 841 // The ConcurrentMark instance is never freed. 842 ShouldNotReachHere(); 843 } 844 845 void ConcurrentMark::clearNextBitmap() { 846 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 847 G1CollectorPolicy* g1p = g1h->g1_policy(); 848 849 // Make sure that the concurrent mark thread looks to still be in 850 // the current cycle. 851 guarantee(cmThread()->during_cycle(), "invariant"); 852 853 // We are finishing up the current cycle by clearing the next 854 // marking bitmap and getting it ready for the next cycle. During 855 // this time no other cycle can start. So, let's make sure that this 856 // is the case. 857 guarantee(!g1h->mark_in_progress(), "invariant"); 858 859 // clear the mark bitmap (no grey objects to start with). 860 // We need to do this in chunks and offer to yield in between 861 // each chunk. 862 HeapWord* start = _nextMarkBitMap->startWord(); 863 HeapWord* end = _nextMarkBitMap->endWord(); 864 HeapWord* cur = start; 865 size_t chunkSize = M; 866 while (cur < end) { 867 HeapWord* next = cur + chunkSize; 868 if (next > end) { 869 next = end; 870 } 871 MemRegion mr(cur,next); 872 _nextMarkBitMap->clearRange(mr); 873 cur = next; 874 do_yield_check(); 875 876 // Repeat the asserts from above. We'll do them as asserts here to 877 // minimize their overhead on the product. However, we'll have 878 // them as guarantees at the beginning / end of the bitmap 879 // clearing to get some checking in the product. 880 assert(cmThread()->during_cycle(), "invariant"); 881 assert(!g1h->mark_in_progress(), "invariant"); 882 } 883 884 // Clear the liveness counting data 885 clear_all_count_data(); 886 887 // Repeat the asserts from above. 888 guarantee(cmThread()->during_cycle(), "invariant"); 889 guarantee(!g1h->mark_in_progress(), "invariant"); 890 } 891 892 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 893 public: 894 bool doHeapRegion(HeapRegion* r) { 895 if (!r->continuesHumongous()) { 896 r->note_start_of_marking(); 897 } 898 return false; 899 } 900 }; 901 902 void ConcurrentMark::checkpointRootsInitialPre() { 903 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 904 G1CollectorPolicy* g1p = g1h->g1_policy(); 905 906 _has_aborted = false; 907 908 #ifndef PRODUCT 909 if (G1PrintReachableAtInitialMark) { 910 print_reachable("at-cycle-start", 911 VerifyOption_G1UsePrevMarking, true /* all */); 912 } 913 #endif 914 915 // Initialize marking structures. This has to be done in a STW phase. 916 reset(); 917 918 // For each region note start of marking. 919 NoteStartOfMarkHRClosure startcl; 920 g1h->heap_region_iterate(&startcl); 921 } 922 923 924 void ConcurrentMark::checkpointRootsInitialPost() { 925 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 926 927 // If we force an overflow during remark, the remark operation will 928 // actually abort and we'll restart concurrent marking. If we always 929 // force an overflow during remark we'll never actually complete the 930 // marking phase. So, we initialize this here, at the start of the 931 // cycle, so that at the remaining overflow number will decrease at 932 // every remark and we'll eventually not need to cause one. 933 force_overflow_stw()->init(); 934 935 // Start Concurrent Marking weak-reference discovery. 936 ReferenceProcessor* rp = g1h->ref_processor_cm(); 937 // enable ("weak") refs discovery 938 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); 939 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 940 941 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 942 // This is the start of the marking cycle, we're expected all 943 // threads to have SATB queues with active set to false. 944 satb_mq_set.set_active_all_threads(true, /* new active value */ 945 false /* expected_active */); 946 947 _root_regions.prepare_for_scan(); 948 949 // update_g1_committed() will be called at the end of an evac pause 950 // when marking is on. So, it's also called at the end of the 951 // initial-mark pause to update the heap end, if the heap expands 952 // during it. No need to call it here. 953 } 954 955 /* 956 * Notice that in the next two methods, we actually leave the STS 957 * during the barrier sync and join it immediately afterwards. If we 958 * do not do this, the following deadlock can occur: one thread could 959 * be in the barrier sync code, waiting for the other thread to also 960 * sync up, whereas another one could be trying to yield, while also 961 * waiting for the other threads to sync up too. 962 * 963 * Note, however, that this code is also used during remark and in 964 * this case we should not attempt to leave / enter the STS, otherwise 965 * we'll either hit an assert (debug / fastdebug) or deadlock 966 * (product). So we should only leave / enter the STS if we are 967 * operating concurrently. 968 * 969 * Because the thread that does the sync barrier has left the STS, it 970 * is possible to be suspended for a Full GC or an evacuation pause 971 * could occur. This is actually safe, since the entering the sync 972 * barrier is one of the last things do_marking_step() does, and it 973 * doesn't manipulate any data structures afterwards. 974 */ 975 976 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 977 if (verbose_low()) { 978 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 979 } 980 981 if (concurrent()) { 982 SuspendibleThreadSet::leave(); 983 } 984 985 bool barrier_aborted = !_first_overflow_barrier_sync.enter(); 986 987 if (concurrent()) { 988 SuspendibleThreadSet::join(); 989 } 990 // at this point everyone should have synced up and not be doing any 991 // more work 992 993 if (verbose_low()) { 994 if (barrier_aborted) { 995 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 996 } else { 997 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 998 } 999 } 1000 1001 if (barrier_aborted) { 1002 // If the barrier aborted we ignore the overflow condition and 1003 // just abort the whole marking phase as quickly as possible. 1004 return; 1005 } 1006 1007 // If we're executing the concurrent phase of marking, reset the marking 1008 // state; otherwise the marking state is reset after reference processing, 1009 // during the remark pause. 1010 // If we reset here as a result of an overflow during the remark we will 1011 // see assertion failures from any subsequent set_concurrency_and_phase() 1012 // calls. 1013 if (concurrent()) { 1014 // let the task associated with with worker 0 do this 1015 if (worker_id == 0) { 1016 // task 0 is responsible for clearing the global data structures 1017 // We should be here because of an overflow. During STW we should 1018 // not clear the overflow flag since we rely on it being true when 1019 // we exit this method to abort the pause and restart concurrent 1020 // marking. 1021 reset_marking_state(true /* clear_overflow */); 1022 force_overflow()->update(); 1023 1024 if (G1Log::fine()) { 1025 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1026 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1027 } 1028 } 1029 } 1030 1031 // after this, each task should reset its own data structures then 1032 // then go into the second barrier 1033 } 1034 1035 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1036 if (verbose_low()) { 1037 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1038 } 1039 1040 if (concurrent()) { 1041 SuspendibleThreadSet::leave(); 1042 } 1043 1044 bool barrier_aborted = !_second_overflow_barrier_sync.enter(); 1045 1046 if (concurrent()) { 1047 SuspendibleThreadSet::join(); 1048 } 1049 // at this point everything should be re-initialized and ready to go 1050 1051 if (verbose_low()) { 1052 if (barrier_aborted) { 1053 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1054 } else { 1055 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1056 } 1057 } 1058 } 1059 1060 #ifndef PRODUCT 1061 void ForceOverflowSettings::init() { 1062 _num_remaining = G1ConcMarkForceOverflow; 1063 _force = false; 1064 update(); 1065 } 1066 1067 void ForceOverflowSettings::update() { 1068 if (_num_remaining > 0) { 1069 _num_remaining -= 1; 1070 _force = true; 1071 } else { 1072 _force = false; 1073 } 1074 } 1075 1076 bool ForceOverflowSettings::should_force() { 1077 if (_force) { 1078 _force = false; 1079 return true; 1080 } else { 1081 return false; 1082 } 1083 } 1084 #endif // !PRODUCT 1085 1086 class CMConcurrentMarkingTask: public AbstractGangTask { 1087 private: 1088 ConcurrentMark* _cm; 1089 ConcurrentMarkThread* _cmt; 1090 1091 public: 1092 void work(uint worker_id) { 1093 assert(Thread::current()->is_ConcurrentGC_thread(), 1094 "this should only be done by a conc GC thread"); 1095 ResourceMark rm; 1096 1097 double start_vtime = os::elapsedVTime(); 1098 1099 SuspendibleThreadSet::join(); 1100 1101 assert(worker_id < _cm->active_tasks(), "invariant"); 1102 CMTask* the_task = _cm->task(worker_id); 1103 the_task->record_start_time(); 1104 if (!_cm->has_aborted()) { 1105 do { 1106 double start_vtime_sec = os::elapsedVTime(); 1107 double start_time_sec = os::elapsedTime(); 1108 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1109 1110 the_task->do_marking_step(mark_step_duration_ms, 1111 true /* do_termination */, 1112 false /* is_serial*/); 1113 1114 double end_time_sec = os::elapsedTime(); 1115 double end_vtime_sec = os::elapsedVTime(); 1116 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1117 double elapsed_time_sec = end_time_sec - start_time_sec; 1118 _cm->clear_has_overflown(); 1119 1120 bool ret = _cm->do_yield_check(worker_id); 1121 1122 jlong sleep_time_ms; 1123 if (!_cm->has_aborted() && the_task->has_aborted()) { 1124 sleep_time_ms = 1125 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1126 SuspendibleThreadSet::leave(); 1127 os::sleep(Thread::current(), sleep_time_ms, false); 1128 SuspendibleThreadSet::join(); 1129 } 1130 double end_time2_sec = os::elapsedTime(); 1131 double elapsed_time2_sec = end_time2_sec - start_time_sec; 1132 1133 #if 0 1134 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, " 1135 "overhead %1.4lf", 1136 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms, 1137 the_task->conc_overhead(os::elapsedTime()) * 8.0); 1138 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms", 1139 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0); 1140 #endif 1141 } while (!_cm->has_aborted() && the_task->has_aborted()); 1142 } 1143 the_task->record_end_time(); 1144 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1145 1146 SuspendibleThreadSet::leave(); 1147 1148 double end_vtime = os::elapsedVTime(); 1149 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1150 } 1151 1152 CMConcurrentMarkingTask(ConcurrentMark* cm, 1153 ConcurrentMarkThread* cmt) : 1154 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1155 1156 ~CMConcurrentMarkingTask() { } 1157 }; 1158 1159 // Calculates the number of active workers for a concurrent 1160 // phase. 1161 uint ConcurrentMark::calc_parallel_marking_threads() { 1162 if (G1CollectedHeap::use_parallel_gc_threads()) { 1163 uint n_conc_workers = 0; 1164 if (!UseDynamicNumberOfGCThreads || 1165 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1166 !ForceDynamicNumberOfGCThreads)) { 1167 n_conc_workers = max_parallel_marking_threads(); 1168 } else { 1169 n_conc_workers = 1170 AdaptiveSizePolicy::calc_default_active_workers( 1171 max_parallel_marking_threads(), 1172 1, /* Minimum workers */ 1173 parallel_marking_threads(), 1174 Threads::number_of_non_daemon_threads()); 1175 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1176 // that scaling has already gone into "_max_parallel_marking_threads". 1177 } 1178 assert(n_conc_workers > 0, "Always need at least 1"); 1179 return n_conc_workers; 1180 } 1181 // If we are not running with any parallel GC threads we will not 1182 // have spawned any marking threads either. Hence the number of 1183 // concurrent workers should be 0. 1184 return 0; 1185 } 1186 1187 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1188 // Currently, only survivors can be root regions. 1189 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1190 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1191 1192 const uintx interval = PrefetchScanIntervalInBytes; 1193 HeapWord* curr = hr->bottom(); 1194 const HeapWord* end = hr->top(); 1195 while (curr < end) { 1196 Prefetch::read(curr, interval); 1197 oop obj = oop(curr); 1198 int size = obj->oop_iterate(&cl); 1199 assert(size == obj->size(), "sanity"); 1200 curr += size; 1201 } 1202 } 1203 1204 class CMRootRegionScanTask : public AbstractGangTask { 1205 private: 1206 ConcurrentMark* _cm; 1207 1208 public: 1209 CMRootRegionScanTask(ConcurrentMark* cm) : 1210 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1211 1212 void work(uint worker_id) { 1213 assert(Thread::current()->is_ConcurrentGC_thread(), 1214 "this should only be done by a conc GC thread"); 1215 1216 CMRootRegions* root_regions = _cm->root_regions(); 1217 HeapRegion* hr = root_regions->claim_next(); 1218 while (hr != NULL) { 1219 _cm->scanRootRegion(hr, worker_id); 1220 hr = root_regions->claim_next(); 1221 } 1222 } 1223 }; 1224 1225 void ConcurrentMark::scanRootRegions() { 1226 // scan_in_progress() will have been set to true only if there was 1227 // at least one root region to scan. So, if it's false, we 1228 // should not attempt to do any further work. 1229 if (root_regions()->scan_in_progress()) { 1230 _parallel_marking_threads = calc_parallel_marking_threads(); 1231 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1232 "Maximum number of marking threads exceeded"); 1233 uint active_workers = MAX2(1U, parallel_marking_threads()); 1234 1235 CMRootRegionScanTask task(this); 1236 if (use_parallel_marking_threads()) { 1237 _parallel_workers->set_active_workers((int) active_workers); 1238 _parallel_workers->run_task(&task); 1239 } else { 1240 task.work(0); 1241 } 1242 1243 // It's possible that has_aborted() is true here without actually 1244 // aborting the survivor scan earlier. This is OK as it's 1245 // mainly used for sanity checking. 1246 root_regions()->scan_finished(); 1247 } 1248 } 1249 1250 void ConcurrentMark::markFromRoots() { 1251 // we might be tempted to assert that: 1252 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1253 // "inconsistent argument?"); 1254 // However that wouldn't be right, because it's possible that 1255 // a safepoint is indeed in progress as a younger generation 1256 // stop-the-world GC happens even as we mark in this generation. 1257 1258 _restart_for_overflow = false; 1259 force_overflow_conc()->init(); 1260 1261 // _g1h has _n_par_threads 1262 _parallel_marking_threads = calc_parallel_marking_threads(); 1263 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1264 "Maximum number of marking threads exceeded"); 1265 1266 uint active_workers = MAX2(1U, parallel_marking_threads()); 1267 1268 // Parallel task terminator is set in "set_concurrency_and_phase()" 1269 set_concurrency_and_phase(active_workers, true /* concurrent */); 1270 1271 CMConcurrentMarkingTask markingTask(this, cmThread()); 1272 if (use_parallel_marking_threads()) { 1273 _parallel_workers->set_active_workers((int)active_workers); 1274 // Don't set _n_par_threads because it affects MT in process_strong_roots() 1275 // and the decisions on that MT processing is made elsewhere. 1276 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1277 _parallel_workers->run_task(&markingTask); 1278 } else { 1279 markingTask.work(0); 1280 } 1281 print_stats(); 1282 } 1283 1284 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1285 // world is stopped at this checkpoint 1286 assert(SafepointSynchronize::is_at_safepoint(), 1287 "world should be stopped"); 1288 1289 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1290 1291 // If a full collection has happened, we shouldn't do this. 1292 if (has_aborted()) { 1293 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1294 return; 1295 } 1296 1297 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1298 1299 if (VerifyDuringGC) { 1300 HandleMark hm; // handle scope 1301 Universe::heap()->prepare_for_verify(); 1302 Universe::verify(VerifyOption_G1UsePrevMarking, 1303 " VerifyDuringGC:(before)"); 1304 } 1305 g1h->check_bitmaps("Remark Start"); 1306 1307 G1CollectorPolicy* g1p = g1h->g1_policy(); 1308 g1p->record_concurrent_mark_remark_start(); 1309 1310 double start = os::elapsedTime(); 1311 1312 checkpointRootsFinalWork(); 1313 1314 double mark_work_end = os::elapsedTime(); 1315 1316 weakRefsWork(clear_all_soft_refs); 1317 1318 if (has_overflown()) { 1319 // Oops. We overflowed. Restart concurrent marking. 1320 _restart_for_overflow = true; 1321 if (G1TraceMarkStackOverflow) { 1322 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1323 } 1324 1325 // Verify the heap w.r.t. the previous marking bitmap. 1326 if (VerifyDuringGC) { 1327 HandleMark hm; // handle scope 1328 Universe::heap()->prepare_for_verify(); 1329 Universe::verify(VerifyOption_G1UsePrevMarking, 1330 " VerifyDuringGC:(overflow)"); 1331 } 1332 1333 // Clear the marking state because we will be restarting 1334 // marking due to overflowing the global mark stack. 1335 reset_marking_state(); 1336 } else { 1337 // Aggregate the per-task counting data that we have accumulated 1338 // while marking. 1339 aggregate_count_data(); 1340 1341 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1342 // We're done with marking. 1343 // This is the end of the marking cycle, we're expected all 1344 // threads to have SATB queues with active set to true. 1345 satb_mq_set.set_active_all_threads(false, /* new active value */ 1346 true /* expected_active */); 1347 1348 if (VerifyDuringGC) { 1349 HandleMark hm; // handle scope 1350 Universe::heap()->prepare_for_verify(); 1351 Universe::verify(VerifyOption_G1UseNextMarking, 1352 " VerifyDuringGC:(after)"); 1353 } 1354 g1h->check_bitmaps("Remark End"); 1355 assert(!restart_for_overflow(), "sanity"); 1356 // Completely reset the marking state since marking completed 1357 set_non_marking_state(); 1358 } 1359 1360 // Expand the marking stack, if we have to and if we can. 1361 if (_markStack.should_expand()) { 1362 _markStack.expand(); 1363 } 1364 1365 // Statistics 1366 double now = os::elapsedTime(); 1367 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1368 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1369 _remark_times.add((now - start) * 1000.0); 1370 1371 g1p->record_concurrent_mark_remark_end(); 1372 1373 G1CMIsAliveClosure is_alive(g1h); 1374 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1375 } 1376 1377 // Base class of the closures that finalize and verify the 1378 // liveness counting data. 1379 class CMCountDataClosureBase: public HeapRegionClosure { 1380 protected: 1381 G1CollectedHeap* _g1h; 1382 ConcurrentMark* _cm; 1383 CardTableModRefBS* _ct_bs; 1384 1385 BitMap* _region_bm; 1386 BitMap* _card_bm; 1387 1388 // Takes a region that's not empty (i.e., it has at least one 1389 // live object in it and sets its corresponding bit on the region 1390 // bitmap to 1. If the region is "starts humongous" it will also set 1391 // to 1 the bits on the region bitmap that correspond to its 1392 // associated "continues humongous" regions. 1393 void set_bit_for_region(HeapRegion* hr) { 1394 assert(!hr->continuesHumongous(), "should have filtered those out"); 1395 1396 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); 1397 if (!hr->startsHumongous()) { 1398 // Normal (non-humongous) case: just set the bit. 1399 _region_bm->par_at_put(index, true); 1400 } else { 1401 // Starts humongous case: calculate how many regions are part of 1402 // this humongous region and then set the bit range. 1403 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1404 _region_bm->par_at_put_range(index, end_index, true); 1405 } 1406 } 1407 1408 public: 1409 CMCountDataClosureBase(G1CollectedHeap* g1h, 1410 BitMap* region_bm, BitMap* card_bm): 1411 _g1h(g1h), _cm(g1h->concurrent_mark()), 1412 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 1413 _region_bm(region_bm), _card_bm(card_bm) { } 1414 }; 1415 1416 // Closure that calculates the # live objects per region. Used 1417 // for verification purposes during the cleanup pause. 1418 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1419 CMBitMapRO* _bm; 1420 size_t _region_marked_bytes; 1421 1422 public: 1423 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1424 BitMap* region_bm, BitMap* card_bm) : 1425 CMCountDataClosureBase(g1h, region_bm, card_bm), 1426 _bm(bm), _region_marked_bytes(0) { } 1427 1428 bool doHeapRegion(HeapRegion* hr) { 1429 1430 if (hr->continuesHumongous()) { 1431 // We will ignore these here and process them when their 1432 // associated "starts humongous" region is processed (see 1433 // set_bit_for_heap_region()). Note that we cannot rely on their 1434 // associated "starts humongous" region to have their bit set to 1435 // 1 since, due to the region chunking in the parallel region 1436 // iteration, a "continues humongous" region might be visited 1437 // before its associated "starts humongous". 1438 return false; 1439 } 1440 1441 HeapWord* ntams = hr->next_top_at_mark_start(); 1442 HeapWord* start = hr->bottom(); 1443 1444 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1445 err_msg("Preconditions not met - " 1446 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1447 p2i(start), p2i(ntams), p2i(hr->end()))); 1448 1449 // Find the first marked object at or after "start". 1450 start = _bm->getNextMarkedWordAddress(start, ntams); 1451 1452 size_t marked_bytes = 0; 1453 1454 while (start < ntams) { 1455 oop obj = oop(start); 1456 int obj_sz = obj->size(); 1457 HeapWord* obj_end = start + obj_sz; 1458 1459 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1460 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1461 1462 // Note: if we're looking at the last region in heap - obj_end 1463 // could be actually just beyond the end of the heap; end_idx 1464 // will then correspond to a (non-existent) card that is also 1465 // just beyond the heap. 1466 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1467 // end of object is not card aligned - increment to cover 1468 // all the cards spanned by the object 1469 end_idx += 1; 1470 } 1471 1472 // Set the bits in the card BM for the cards spanned by this object. 1473 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1474 1475 // Add the size of this object to the number of marked bytes. 1476 marked_bytes += (size_t)obj_sz * HeapWordSize; 1477 1478 // Find the next marked object after this one. 1479 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1480 } 1481 1482 // Mark the allocated-since-marking portion... 1483 HeapWord* top = hr->top(); 1484 if (ntams < top) { 1485 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1486 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1487 1488 // Note: if we're looking at the last region in heap - top 1489 // could be actually just beyond the end of the heap; end_idx 1490 // will then correspond to a (non-existent) card that is also 1491 // just beyond the heap. 1492 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1493 // end of object is not card aligned - increment to cover 1494 // all the cards spanned by the object 1495 end_idx += 1; 1496 } 1497 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1498 1499 // This definitely means the region has live objects. 1500 set_bit_for_region(hr); 1501 } 1502 1503 // Update the live region bitmap. 1504 if (marked_bytes > 0) { 1505 set_bit_for_region(hr); 1506 } 1507 1508 // Set the marked bytes for the current region so that 1509 // it can be queried by a calling verification routine 1510 _region_marked_bytes = marked_bytes; 1511 1512 return false; 1513 } 1514 1515 size_t region_marked_bytes() const { return _region_marked_bytes; } 1516 }; 1517 1518 // Heap region closure used for verifying the counting data 1519 // that was accumulated concurrently and aggregated during 1520 // the remark pause. This closure is applied to the heap 1521 // regions during the STW cleanup pause. 1522 1523 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1524 G1CollectedHeap* _g1h; 1525 ConcurrentMark* _cm; 1526 CalcLiveObjectsClosure _calc_cl; 1527 BitMap* _region_bm; // Region BM to be verified 1528 BitMap* _card_bm; // Card BM to be verified 1529 bool _verbose; // verbose output? 1530 1531 BitMap* _exp_region_bm; // Expected Region BM values 1532 BitMap* _exp_card_bm; // Expected card BM values 1533 1534 int _failures; 1535 1536 public: 1537 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1538 BitMap* region_bm, 1539 BitMap* card_bm, 1540 BitMap* exp_region_bm, 1541 BitMap* exp_card_bm, 1542 bool verbose) : 1543 _g1h(g1h), _cm(g1h->concurrent_mark()), 1544 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1545 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1546 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1547 _failures(0) { } 1548 1549 int failures() const { return _failures; } 1550 1551 bool doHeapRegion(HeapRegion* hr) { 1552 if (hr->continuesHumongous()) { 1553 // We will ignore these here and process them when their 1554 // associated "starts humongous" region is processed (see 1555 // set_bit_for_heap_region()). Note that we cannot rely on their 1556 // associated "starts humongous" region to have their bit set to 1557 // 1 since, due to the region chunking in the parallel region 1558 // iteration, a "continues humongous" region might be visited 1559 // before its associated "starts humongous". 1560 return false; 1561 } 1562 1563 int failures = 0; 1564 1565 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1566 // this region and set the corresponding bits in the expected region 1567 // and card bitmaps. 1568 bool res = _calc_cl.doHeapRegion(hr); 1569 assert(res == false, "should be continuing"); 1570 1571 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1572 Mutex::_no_safepoint_check_flag); 1573 1574 // Verify the marked bytes for this region. 1575 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1576 size_t act_marked_bytes = hr->next_marked_bytes(); 1577 1578 // We're not OK if expected marked bytes > actual marked bytes. It means 1579 // we have missed accounting some objects during the actual marking. 1580 if (exp_marked_bytes > act_marked_bytes) { 1581 if (_verbose) { 1582 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1583 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1584 hr->hrs_index(), exp_marked_bytes, act_marked_bytes); 1585 } 1586 failures += 1; 1587 } 1588 1589 // Verify the bit, for this region, in the actual and expected 1590 // (which was just calculated) region bit maps. 1591 // We're not OK if the bit in the calculated expected region 1592 // bitmap is set and the bit in the actual region bitmap is not. 1593 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); 1594 1595 bool expected = _exp_region_bm->at(index); 1596 bool actual = _region_bm->at(index); 1597 if (expected && !actual) { 1598 if (_verbose) { 1599 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1600 "expected: %s, actual: %s", 1601 hr->hrs_index(), 1602 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1603 } 1604 failures += 1; 1605 } 1606 1607 // Verify that the card bit maps for the cards spanned by the current 1608 // region match. We have an error if we have a set bit in the expected 1609 // bit map and the corresponding bit in the actual bitmap is not set. 1610 1611 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1612 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1613 1614 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1615 expected = _exp_card_bm->at(i); 1616 actual = _card_bm->at(i); 1617 1618 if (expected && !actual) { 1619 if (_verbose) { 1620 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1621 "expected: %s, actual: %s", 1622 hr->hrs_index(), i, 1623 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1624 } 1625 failures += 1; 1626 } 1627 } 1628 1629 if (failures > 0 && _verbose) { 1630 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1631 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1632 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1633 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1634 } 1635 1636 _failures += failures; 1637 1638 // We could stop iteration over the heap when we 1639 // find the first violating region by returning true. 1640 return false; 1641 } 1642 }; 1643 1644 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1645 protected: 1646 G1CollectedHeap* _g1h; 1647 ConcurrentMark* _cm; 1648 BitMap* _actual_region_bm; 1649 BitMap* _actual_card_bm; 1650 1651 uint _n_workers; 1652 1653 BitMap* _expected_region_bm; 1654 BitMap* _expected_card_bm; 1655 1656 int _failures; 1657 bool _verbose; 1658 1659 public: 1660 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1661 BitMap* region_bm, BitMap* card_bm, 1662 BitMap* expected_region_bm, BitMap* expected_card_bm) 1663 : AbstractGangTask("G1 verify final counting"), 1664 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1665 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1666 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1667 _failures(0), _verbose(false), 1668 _n_workers(0) { 1669 assert(VerifyDuringGC, "don't call this otherwise"); 1670 1671 // Use the value already set as the number of active threads 1672 // in the call to run_task(). 1673 if (G1CollectedHeap::use_parallel_gc_threads()) { 1674 assert( _g1h->workers()->active_workers() > 0, 1675 "Should have been previously set"); 1676 _n_workers = _g1h->workers()->active_workers(); 1677 } else { 1678 _n_workers = 1; 1679 } 1680 1681 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1682 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1683 1684 _verbose = _cm->verbose_medium(); 1685 } 1686 1687 void work(uint worker_id) { 1688 assert(worker_id < _n_workers, "invariant"); 1689 1690 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1691 _actual_region_bm, _actual_card_bm, 1692 _expected_region_bm, 1693 _expected_card_bm, 1694 _verbose); 1695 1696 if (G1CollectedHeap::use_parallel_gc_threads()) { 1697 _g1h->heap_region_par_iterate_chunked(&verify_cl, 1698 worker_id, 1699 _n_workers, 1700 HeapRegion::VerifyCountClaimValue); 1701 } else { 1702 _g1h->heap_region_iterate(&verify_cl); 1703 } 1704 1705 Atomic::add(verify_cl.failures(), &_failures); 1706 } 1707 1708 int failures() const { return _failures; } 1709 }; 1710 1711 // Closure that finalizes the liveness counting data. 1712 // Used during the cleanup pause. 1713 // Sets the bits corresponding to the interval [NTAMS, top] 1714 // (which contains the implicitly live objects) in the 1715 // card liveness bitmap. Also sets the bit for each region, 1716 // containing live data, in the region liveness bitmap. 1717 1718 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1719 public: 1720 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1721 BitMap* region_bm, 1722 BitMap* card_bm) : 1723 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1724 1725 bool doHeapRegion(HeapRegion* hr) { 1726 1727 if (hr->continuesHumongous()) { 1728 // We will ignore these here and process them when their 1729 // associated "starts humongous" region is processed (see 1730 // set_bit_for_heap_region()). Note that we cannot rely on their 1731 // associated "starts humongous" region to have their bit set to 1732 // 1 since, due to the region chunking in the parallel region 1733 // iteration, a "continues humongous" region might be visited 1734 // before its associated "starts humongous". 1735 return false; 1736 } 1737 1738 HeapWord* ntams = hr->next_top_at_mark_start(); 1739 HeapWord* top = hr->top(); 1740 1741 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1742 1743 // Mark the allocated-since-marking portion... 1744 if (ntams < top) { 1745 // This definitely means the region has live objects. 1746 set_bit_for_region(hr); 1747 1748 // Now set the bits in the card bitmap for [ntams, top) 1749 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1750 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1751 1752 // Note: if we're looking at the last region in heap - top 1753 // could be actually just beyond the end of the heap; end_idx 1754 // will then correspond to a (non-existent) card that is also 1755 // just beyond the heap. 1756 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1757 // end of object is not card aligned - increment to cover 1758 // all the cards spanned by the object 1759 end_idx += 1; 1760 } 1761 1762 assert(end_idx <= _card_bm->size(), 1763 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1764 end_idx, _card_bm->size())); 1765 assert(start_idx < _card_bm->size(), 1766 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1767 start_idx, _card_bm->size())); 1768 1769 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1770 } 1771 1772 // Set the bit for the region if it contains live data 1773 if (hr->next_marked_bytes() > 0) { 1774 set_bit_for_region(hr); 1775 } 1776 1777 return false; 1778 } 1779 }; 1780 1781 class G1ParFinalCountTask: public AbstractGangTask { 1782 protected: 1783 G1CollectedHeap* _g1h; 1784 ConcurrentMark* _cm; 1785 BitMap* _actual_region_bm; 1786 BitMap* _actual_card_bm; 1787 1788 uint _n_workers; 1789 1790 public: 1791 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1792 : AbstractGangTask("G1 final counting"), 1793 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1794 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1795 _n_workers(0) { 1796 // Use the value already set as the number of active threads 1797 // in the call to run_task(). 1798 if (G1CollectedHeap::use_parallel_gc_threads()) { 1799 assert( _g1h->workers()->active_workers() > 0, 1800 "Should have been previously set"); 1801 _n_workers = _g1h->workers()->active_workers(); 1802 } else { 1803 _n_workers = 1; 1804 } 1805 } 1806 1807 void work(uint worker_id) { 1808 assert(worker_id < _n_workers, "invariant"); 1809 1810 FinalCountDataUpdateClosure final_update_cl(_g1h, 1811 _actual_region_bm, 1812 _actual_card_bm); 1813 1814 if (G1CollectedHeap::use_parallel_gc_threads()) { 1815 _g1h->heap_region_par_iterate_chunked(&final_update_cl, 1816 worker_id, 1817 _n_workers, 1818 HeapRegion::FinalCountClaimValue); 1819 } else { 1820 _g1h->heap_region_iterate(&final_update_cl); 1821 } 1822 } 1823 }; 1824 1825 class G1ParNoteEndTask; 1826 1827 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1828 G1CollectedHeap* _g1; 1829 size_t _max_live_bytes; 1830 uint _regions_claimed; 1831 size_t _freed_bytes; 1832 FreeRegionList* _local_cleanup_list; 1833 HeapRegionSetCount _old_regions_removed; 1834 HeapRegionSetCount _humongous_regions_removed; 1835 HRRSCleanupTask* _hrrs_cleanup_task; 1836 double _claimed_region_time; 1837 double _max_region_time; 1838 1839 public: 1840 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1841 FreeRegionList* local_cleanup_list, 1842 HRRSCleanupTask* hrrs_cleanup_task) : 1843 _g1(g1), 1844 _max_live_bytes(0), _regions_claimed(0), 1845 _freed_bytes(0), 1846 _claimed_region_time(0.0), _max_region_time(0.0), 1847 _local_cleanup_list(local_cleanup_list), 1848 _old_regions_removed(), 1849 _humongous_regions_removed(), 1850 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1851 1852 size_t freed_bytes() { return _freed_bytes; } 1853 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1854 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1855 1856 bool doHeapRegion(HeapRegion *hr) { 1857 if (hr->continuesHumongous()) { 1858 return false; 1859 } 1860 // We use a claim value of zero here because all regions 1861 // were claimed with value 1 in the FinalCount task. 1862 _g1->reset_gc_time_stamps(hr); 1863 double start = os::elapsedTime(); 1864 _regions_claimed++; 1865 hr->note_end_of_marking(); 1866 _max_live_bytes += hr->max_live_bytes(); 1867 1868 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1869 _freed_bytes += hr->used(); 1870 hr->set_containing_set(NULL); 1871 if (hr->isHumongous()) { 1872 assert(hr->startsHumongous(), "we should only see starts humongous"); 1873 _humongous_regions_removed.increment(1u, hr->capacity()); 1874 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1875 } else { 1876 _old_regions_removed.increment(1u, hr->capacity()); 1877 _g1->free_region(hr, _local_cleanup_list, true); 1878 } 1879 } else { 1880 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1881 } 1882 1883 double region_time = (os::elapsedTime() - start); 1884 _claimed_region_time += region_time; 1885 if (region_time > _max_region_time) { 1886 _max_region_time = region_time; 1887 } 1888 return false; 1889 } 1890 1891 size_t max_live_bytes() { return _max_live_bytes; } 1892 uint regions_claimed() { return _regions_claimed; } 1893 double claimed_region_time_sec() { return _claimed_region_time; } 1894 double max_region_time_sec() { return _max_region_time; } 1895 }; 1896 1897 class G1ParNoteEndTask: public AbstractGangTask { 1898 friend class G1NoteEndOfConcMarkClosure; 1899 1900 protected: 1901 G1CollectedHeap* _g1h; 1902 size_t _max_live_bytes; 1903 size_t _freed_bytes; 1904 FreeRegionList* _cleanup_list; 1905 1906 public: 1907 G1ParNoteEndTask(G1CollectedHeap* g1h, 1908 FreeRegionList* cleanup_list) : 1909 AbstractGangTask("G1 note end"), _g1h(g1h), 1910 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { } 1911 1912 void work(uint worker_id) { 1913 double start = os::elapsedTime(); 1914 FreeRegionList local_cleanup_list("Local Cleanup List"); 1915 HRRSCleanupTask hrrs_cleanup_task; 1916 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1917 &hrrs_cleanup_task); 1918 if (G1CollectedHeap::use_parallel_gc_threads()) { 1919 _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id, 1920 _g1h->workers()->active_workers(), 1921 HeapRegion::NoteEndClaimValue); 1922 } else { 1923 _g1h->heap_region_iterate(&g1_note_end); 1924 } 1925 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1926 1927 // Now update the lists 1928 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1929 { 1930 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1931 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1932 _max_live_bytes += g1_note_end.max_live_bytes(); 1933 _freed_bytes += g1_note_end.freed_bytes(); 1934 1935 // If we iterate over the global cleanup list at the end of 1936 // cleanup to do this printing we will not guarantee to only 1937 // generate output for the newly-reclaimed regions (the list 1938 // might not be empty at the beginning of cleanup; we might 1939 // still be working on its previous contents). So we do the 1940 // printing here, before we append the new regions to the global 1941 // cleanup list. 1942 1943 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1944 if (hr_printer->is_active()) { 1945 FreeRegionListIterator iter(&local_cleanup_list); 1946 while (iter.more_available()) { 1947 HeapRegion* hr = iter.get_next(); 1948 hr_printer->cleanup(hr); 1949 } 1950 } 1951 1952 _cleanup_list->add_ordered(&local_cleanup_list); 1953 assert(local_cleanup_list.is_empty(), "post-condition"); 1954 1955 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1956 } 1957 } 1958 size_t max_live_bytes() { return _max_live_bytes; } 1959 size_t freed_bytes() { return _freed_bytes; } 1960 }; 1961 1962 class G1ParScrubRemSetTask: public AbstractGangTask { 1963 protected: 1964 G1RemSet* _g1rs; 1965 BitMap* _region_bm; 1966 BitMap* _card_bm; 1967 public: 1968 G1ParScrubRemSetTask(G1CollectedHeap* g1h, 1969 BitMap* region_bm, BitMap* card_bm) : 1970 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), 1971 _region_bm(region_bm), _card_bm(card_bm) { } 1972 1973 void work(uint worker_id) { 1974 if (G1CollectedHeap::use_parallel_gc_threads()) { 1975 _g1rs->scrub_par(_region_bm, _card_bm, worker_id, 1976 HeapRegion::ScrubRemSetClaimValue); 1977 } else { 1978 _g1rs->scrub(_region_bm, _card_bm); 1979 } 1980 } 1981 1982 }; 1983 1984 void ConcurrentMark::cleanup() { 1985 // world is stopped at this checkpoint 1986 assert(SafepointSynchronize::is_at_safepoint(), 1987 "world should be stopped"); 1988 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1989 1990 // If a full collection has happened, we shouldn't do this. 1991 if (has_aborted()) { 1992 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1993 return; 1994 } 1995 1996 g1h->verify_region_sets_optional(); 1997 1998 if (VerifyDuringGC) { 1999 HandleMark hm; // handle scope 2000 Universe::heap()->prepare_for_verify(); 2001 Universe::verify(VerifyOption_G1UsePrevMarking, 2002 " VerifyDuringGC:(before)"); 2003 } 2004 g1h->check_bitmaps("Cleanup Start"); 2005 2006 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); 2007 g1p->record_concurrent_mark_cleanup_start(); 2008 2009 double start = os::elapsedTime(); 2010 2011 HeapRegionRemSet::reset_for_cleanup_tasks(); 2012 2013 uint n_workers; 2014 2015 // Do counting once more with the world stopped for good measure. 2016 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 2017 2018 if (G1CollectedHeap::use_parallel_gc_threads()) { 2019 assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 2020 "sanity check"); 2021 2022 g1h->set_par_threads(); 2023 n_workers = g1h->n_par_threads(); 2024 assert(g1h->n_par_threads() == n_workers, 2025 "Should not have been reset"); 2026 g1h->workers()->run_task(&g1_par_count_task); 2027 // Done with the parallel phase so reset to 0. 2028 g1h->set_par_threads(0); 2029 2030 assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue), 2031 "sanity check"); 2032 } else { 2033 n_workers = 1; 2034 g1_par_count_task.work(0); 2035 } 2036 2037 if (VerifyDuringGC) { 2038 // Verify that the counting data accumulated during marking matches 2039 // that calculated by walking the marking bitmap. 2040 2041 // Bitmaps to hold expected values 2042 BitMap expected_region_bm(_region_bm.size(), true); 2043 BitMap expected_card_bm(_card_bm.size(), true); 2044 2045 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 2046 &_region_bm, 2047 &_card_bm, 2048 &expected_region_bm, 2049 &expected_card_bm); 2050 2051 if (G1CollectedHeap::use_parallel_gc_threads()) { 2052 g1h->set_par_threads((int)n_workers); 2053 g1h->workers()->run_task(&g1_par_verify_task); 2054 // Done with the parallel phase so reset to 0. 2055 g1h->set_par_threads(0); 2056 2057 assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue), 2058 "sanity check"); 2059 } else { 2060 g1_par_verify_task.work(0); 2061 } 2062 2063 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 2064 } 2065 2066 size_t start_used_bytes = g1h->used(); 2067 g1h->set_marking_complete(); 2068 2069 double count_end = os::elapsedTime(); 2070 double this_final_counting_time = (count_end - start); 2071 _total_counting_time += this_final_counting_time; 2072 2073 if (G1PrintRegionLivenessInfo) { 2074 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 2075 _g1h->heap_region_iterate(&cl); 2076 } 2077 2078 // Install newly created mark bitMap as "prev". 2079 swapMarkBitMaps(); 2080 2081 g1h->reset_gc_time_stamp(); 2082 2083 // Note end of marking in all heap regions. 2084 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list); 2085 if (G1CollectedHeap::use_parallel_gc_threads()) { 2086 g1h->set_par_threads((int)n_workers); 2087 g1h->workers()->run_task(&g1_par_note_end_task); 2088 g1h->set_par_threads(0); 2089 2090 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue), 2091 "sanity check"); 2092 } else { 2093 g1_par_note_end_task.work(0); 2094 } 2095 g1h->check_gc_time_stamps(); 2096 2097 if (!cleanup_list_is_empty()) { 2098 // The cleanup list is not empty, so we'll have to process it 2099 // concurrently. Notify anyone else that might be wanting free 2100 // regions that there will be more free regions coming soon. 2101 g1h->set_free_regions_coming(); 2102 } 2103 2104 // call below, since it affects the metric by which we sort the heap 2105 // regions. 2106 if (G1ScrubRemSets) { 2107 double rs_scrub_start = os::elapsedTime(); 2108 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm); 2109 if (G1CollectedHeap::use_parallel_gc_threads()) { 2110 g1h->set_par_threads((int)n_workers); 2111 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2112 g1h->set_par_threads(0); 2113 2114 assert(g1h->check_heap_region_claim_values( 2115 HeapRegion::ScrubRemSetClaimValue), 2116 "sanity check"); 2117 } else { 2118 g1_par_scrub_rs_task.work(0); 2119 } 2120 2121 double rs_scrub_end = os::elapsedTime(); 2122 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2123 _total_rs_scrub_time += this_rs_scrub_time; 2124 } 2125 2126 // this will also free any regions totally full of garbage objects, 2127 // and sort the regions. 2128 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2129 2130 // Statistics. 2131 double end = os::elapsedTime(); 2132 _cleanup_times.add((end - start) * 1000.0); 2133 2134 if (G1Log::fine()) { 2135 g1h->print_size_transition(gclog_or_tty, 2136 start_used_bytes, 2137 g1h->used(), 2138 g1h->capacity()); 2139 } 2140 2141 // Clean up will have freed any regions completely full of garbage. 2142 // Update the soft reference policy with the new heap occupancy. 2143 Universe::update_heap_info_at_gc(); 2144 2145 // We need to make this be a "collection" so any collection pause that 2146 // races with it goes around and waits for completeCleanup to finish. 2147 g1h->increment_total_collections(); 2148 2149 // We reclaimed old regions so we should calculate the sizes to make 2150 // sure we update the old gen/space data. 2151 g1h->g1mm()->update_sizes(); 2152 2153 if (VerifyDuringGC) { 2154 HandleMark hm; // handle scope 2155 Universe::heap()->prepare_for_verify(); 2156 Universe::verify(VerifyOption_G1UsePrevMarking, 2157 " VerifyDuringGC:(after)"); 2158 } 2159 g1h->check_bitmaps("Cleanup End"); 2160 2161 g1h->verify_region_sets_optional(); 2162 g1h->trace_heap_after_concurrent_cycle(); 2163 } 2164 2165 void ConcurrentMark::completeCleanup() { 2166 if (has_aborted()) return; 2167 2168 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2169 2170 _cleanup_list.verify_optional(); 2171 FreeRegionList tmp_free_list("Tmp Free List"); 2172 2173 if (G1ConcRegionFreeingVerbose) { 2174 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2175 "cleanup list has %u entries", 2176 _cleanup_list.length()); 2177 } 2178 2179 // Noone else should be accessing the _cleanup_list at this point, 2180 // so it's not necessary to take any locks 2181 while (!_cleanup_list.is_empty()) { 2182 HeapRegion* hr = _cleanup_list.remove_head(); 2183 assert(hr != NULL, "Got NULL from a non-empty list"); 2184 hr->par_clear(); 2185 tmp_free_list.add_ordered(hr); 2186 2187 // Instead of adding one region at a time to the secondary_free_list, 2188 // we accumulate them in the local list and move them a few at a 2189 // time. This also cuts down on the number of notify_all() calls 2190 // we do during this process. We'll also append the local list when 2191 // _cleanup_list is empty (which means we just removed the last 2192 // region from the _cleanup_list). 2193 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2194 _cleanup_list.is_empty()) { 2195 if (G1ConcRegionFreeingVerbose) { 2196 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2197 "appending %u entries to the secondary_free_list, " 2198 "cleanup list still has %u entries", 2199 tmp_free_list.length(), 2200 _cleanup_list.length()); 2201 } 2202 2203 { 2204 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2205 g1h->secondary_free_list_add(&tmp_free_list); 2206 SecondaryFreeList_lock->notify_all(); 2207 } 2208 2209 if (G1StressConcRegionFreeing) { 2210 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2211 os::sleep(Thread::current(), (jlong) 1, false); 2212 } 2213 } 2214 } 2215 } 2216 assert(tmp_free_list.is_empty(), "post-condition"); 2217 } 2218 2219 // Supporting Object and Oop closures for reference discovery 2220 // and processing in during marking 2221 2222 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2223 HeapWord* addr = (HeapWord*)obj; 2224 return addr != NULL && 2225 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2226 } 2227 2228 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2229 // Uses the CMTask associated with a worker thread (for serial reference 2230 // processing the CMTask for worker 0 is used) to preserve (mark) and 2231 // trace referent objects. 2232 // 2233 // Using the CMTask and embedded local queues avoids having the worker 2234 // threads operating on the global mark stack. This reduces the risk 2235 // of overflowing the stack - which we would rather avoid at this late 2236 // state. Also using the tasks' local queues removes the potential 2237 // of the workers interfering with each other that could occur if 2238 // operating on the global stack. 2239 2240 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2241 ConcurrentMark* _cm; 2242 CMTask* _task; 2243 int _ref_counter_limit; 2244 int _ref_counter; 2245 bool _is_serial; 2246 public: 2247 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2248 _cm(cm), _task(task), _is_serial(is_serial), 2249 _ref_counter_limit(G1RefProcDrainInterval) { 2250 assert(_ref_counter_limit > 0, "sanity"); 2251 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2252 _ref_counter = _ref_counter_limit; 2253 } 2254 2255 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2256 virtual void do_oop( oop* p) { do_oop_work(p); } 2257 2258 template <class T> void do_oop_work(T* p) { 2259 if (!_cm->has_overflown()) { 2260 oop obj = oopDesc::load_decode_heap_oop(p); 2261 if (_cm->verbose_high()) { 2262 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2263 "*"PTR_FORMAT" = "PTR_FORMAT, 2264 _task->worker_id(), p2i(p), p2i((void*) obj)); 2265 } 2266 2267 _task->deal_with_reference(obj); 2268 _ref_counter--; 2269 2270 if (_ref_counter == 0) { 2271 // We have dealt with _ref_counter_limit references, pushing them 2272 // and objects reachable from them on to the local stack (and 2273 // possibly the global stack). Call CMTask::do_marking_step() to 2274 // process these entries. 2275 // 2276 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2277 // there's nothing more to do (i.e. we're done with the entries that 2278 // were pushed as a result of the CMTask::deal_with_reference() calls 2279 // above) or we overflow. 2280 // 2281 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2282 // flag while there may still be some work to do. (See the comment at 2283 // the beginning of CMTask::do_marking_step() for those conditions - 2284 // one of which is reaching the specified time target.) It is only 2285 // when CMTask::do_marking_step() returns without setting the 2286 // has_aborted() flag that the marking step has completed. 2287 do { 2288 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2289 _task->do_marking_step(mark_step_duration_ms, 2290 false /* do_termination */, 2291 _is_serial); 2292 } while (_task->has_aborted() && !_cm->has_overflown()); 2293 _ref_counter = _ref_counter_limit; 2294 } 2295 } else { 2296 if (_cm->verbose_high()) { 2297 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2298 } 2299 } 2300 } 2301 }; 2302 2303 // 'Drain' oop closure used by both serial and parallel reference processing. 2304 // Uses the CMTask associated with a given worker thread (for serial 2305 // reference processing the CMtask for worker 0 is used). Calls the 2306 // do_marking_step routine, with an unbelievably large timeout value, 2307 // to drain the marking data structures of the remaining entries 2308 // added by the 'keep alive' oop closure above. 2309 2310 class G1CMDrainMarkingStackClosure: public VoidClosure { 2311 ConcurrentMark* _cm; 2312 CMTask* _task; 2313 bool _is_serial; 2314 public: 2315 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2316 _cm(cm), _task(task), _is_serial(is_serial) { 2317 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2318 } 2319 2320 void do_void() { 2321 do { 2322 if (_cm->verbose_high()) { 2323 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2324 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2325 } 2326 2327 // We call CMTask::do_marking_step() to completely drain the local 2328 // and global marking stacks of entries pushed by the 'keep alive' 2329 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2330 // 2331 // CMTask::do_marking_step() is called in a loop, which we'll exit 2332 // if there's nothing more to do (i.e. we've completely drained the 2333 // entries that were pushed as a a result of applying the 'keep alive' 2334 // closure to the entries on the discovered ref lists) or we overflow 2335 // the global marking stack. 2336 // 2337 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2338 // flag while there may still be some work to do. (See the comment at 2339 // the beginning of CMTask::do_marking_step() for those conditions - 2340 // one of which is reaching the specified time target.) It is only 2341 // when CMTask::do_marking_step() returns without setting the 2342 // has_aborted() flag that the marking step has completed. 2343 2344 _task->do_marking_step(1000000000.0 /* something very large */, 2345 true /* do_termination */, 2346 _is_serial); 2347 } while (_task->has_aborted() && !_cm->has_overflown()); 2348 } 2349 }; 2350 2351 // Implementation of AbstractRefProcTaskExecutor for parallel 2352 // reference processing at the end of G1 concurrent marking 2353 2354 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2355 private: 2356 G1CollectedHeap* _g1h; 2357 ConcurrentMark* _cm; 2358 WorkGang* _workers; 2359 int _active_workers; 2360 2361 public: 2362 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2363 ConcurrentMark* cm, 2364 WorkGang* workers, 2365 int n_workers) : 2366 _g1h(g1h), _cm(cm), 2367 _workers(workers), _active_workers(n_workers) { } 2368 2369 // Executes the given task using concurrent marking worker threads. 2370 virtual void execute(ProcessTask& task); 2371 virtual void execute(EnqueueTask& task); 2372 }; 2373 2374 class G1CMRefProcTaskProxy: public AbstractGangTask { 2375 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2376 ProcessTask& _proc_task; 2377 G1CollectedHeap* _g1h; 2378 ConcurrentMark* _cm; 2379 2380 public: 2381 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2382 G1CollectedHeap* g1h, 2383 ConcurrentMark* cm) : 2384 AbstractGangTask("Process reference objects in parallel"), 2385 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2386 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2387 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2388 } 2389 2390 virtual void work(uint worker_id) { 2391 CMTask* task = _cm->task(worker_id); 2392 G1CMIsAliveClosure g1_is_alive(_g1h); 2393 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2394 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2395 2396 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2397 } 2398 }; 2399 2400 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2401 assert(_workers != NULL, "Need parallel worker threads."); 2402 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2403 2404 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2405 2406 // We need to reset the concurrency level before each 2407 // proxy task execution, so that the termination protocol 2408 // and overflow handling in CMTask::do_marking_step() knows 2409 // how many workers to wait for. 2410 _cm->set_concurrency(_active_workers); 2411 _g1h->set_par_threads(_active_workers); 2412 _workers->run_task(&proc_task_proxy); 2413 _g1h->set_par_threads(0); 2414 } 2415 2416 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2417 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2418 EnqueueTask& _enq_task; 2419 2420 public: 2421 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2422 AbstractGangTask("Enqueue reference objects in parallel"), 2423 _enq_task(enq_task) { } 2424 2425 virtual void work(uint worker_id) { 2426 _enq_task.work(worker_id); 2427 } 2428 }; 2429 2430 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2431 assert(_workers != NULL, "Need parallel worker threads."); 2432 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2433 2434 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2435 2436 // Not strictly necessary but... 2437 // 2438 // We need to reset the concurrency level before each 2439 // proxy task execution, so that the termination protocol 2440 // and overflow handling in CMTask::do_marking_step() knows 2441 // how many workers to wait for. 2442 _cm->set_concurrency(_active_workers); 2443 _g1h->set_par_threads(_active_workers); 2444 _workers->run_task(&enq_task_proxy); 2445 _g1h->set_par_threads(0); 2446 } 2447 2448 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2449 if (has_overflown()) { 2450 // Skip processing the discovered references if we have 2451 // overflown the global marking stack. Reference objects 2452 // only get discovered once so it is OK to not 2453 // de-populate the discovered reference lists. We could have, 2454 // but the only benefit would be that, when marking restarts, 2455 // less reference objects are discovered. 2456 return; 2457 } 2458 2459 ResourceMark rm; 2460 HandleMark hm; 2461 2462 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2463 2464 // Is alive closure. 2465 G1CMIsAliveClosure g1_is_alive(g1h); 2466 2467 // Inner scope to exclude the cleaning of the string and symbol 2468 // tables from the displayed time. 2469 { 2470 if (G1Log::finer()) { 2471 gclog_or_tty->put(' '); 2472 } 2473 GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id()); 2474 2475 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2476 2477 // See the comment in G1CollectedHeap::ref_processing_init() 2478 // about how reference processing currently works in G1. 2479 2480 // Set the soft reference policy 2481 rp->setup_policy(clear_all_soft_refs); 2482 assert(_markStack.isEmpty(), "mark stack should be empty"); 2483 2484 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2485 // in serial reference processing. Note these closures are also 2486 // used for serially processing (by the the current thread) the 2487 // JNI references during parallel reference processing. 2488 // 2489 // These closures do not need to synchronize with the worker 2490 // threads involved in parallel reference processing as these 2491 // instances are executed serially by the current thread (e.g. 2492 // reference processing is not multi-threaded and is thus 2493 // performed by the current thread instead of a gang worker). 2494 // 2495 // The gang tasks involved in parallel reference processing create 2496 // their own instances of these closures, which do their own 2497 // synchronization among themselves. 2498 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2499 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2500 2501 // We need at least one active thread. If reference processing 2502 // is not multi-threaded we use the current (VMThread) thread, 2503 // otherwise we use the work gang from the G1CollectedHeap and 2504 // we utilize all the worker threads we can. 2505 bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL; 2506 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2507 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2508 2509 // Parallel processing task executor. 2510 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2511 g1h->workers(), active_workers); 2512 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2513 2514 // Set the concurrency level. The phase was already set prior to 2515 // executing the remark task. 2516 set_concurrency(active_workers); 2517 2518 // Set the degree of MT processing here. If the discovery was done MT, 2519 // the number of threads involved during discovery could differ from 2520 // the number of active workers. This is OK as long as the discovered 2521 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2522 rp->set_active_mt_degree(active_workers); 2523 2524 // Process the weak references. 2525 const ReferenceProcessorStats& stats = 2526 rp->process_discovered_references(&g1_is_alive, 2527 &g1_keep_alive, 2528 &g1_drain_mark_stack, 2529 executor, 2530 g1h->gc_timer_cm(), 2531 concurrent_gc_id()); 2532 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2533 2534 // The do_oop work routines of the keep_alive and drain_marking_stack 2535 // oop closures will set the has_overflown flag if we overflow the 2536 // global marking stack. 2537 2538 assert(_markStack.overflow() || _markStack.isEmpty(), 2539 "mark stack should be empty (unless it overflowed)"); 2540 2541 if (_markStack.overflow()) { 2542 // This should have been done already when we tried to push an 2543 // entry on to the global mark stack. But let's do it again. 2544 set_has_overflown(); 2545 } 2546 2547 assert(rp->num_q() == active_workers, "why not"); 2548 2549 rp->enqueue_discovered_references(executor); 2550 2551 rp->verify_no_references_recorded(); 2552 assert(!rp->discovery_enabled(), "Post condition"); 2553 } 2554 2555 if (has_overflown()) { 2556 // We can not trust g1_is_alive if the marking stack overflowed 2557 return; 2558 } 2559 2560 g1h->unlink_string_and_symbol_table(&g1_is_alive, 2561 /* process_strings */ false, // currently strings are always roots 2562 /* process_symbols */ true); 2563 } 2564 2565 void ConcurrentMark::swapMarkBitMaps() { 2566 CMBitMapRO* temp = _prevMarkBitMap; 2567 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2568 _nextMarkBitMap = (CMBitMap*) temp; 2569 } 2570 2571 class CMRemarkTask: public AbstractGangTask { 2572 private: 2573 ConcurrentMark* _cm; 2574 bool _is_serial; 2575 public: 2576 void work(uint worker_id) { 2577 // Since all available tasks are actually started, we should 2578 // only proceed if we're supposed to be active. 2579 if (worker_id < _cm->active_tasks()) { 2580 CMTask* task = _cm->task(worker_id); 2581 task->record_start_time(); 2582 do { 2583 task->do_marking_step(1000000000.0 /* something very large */, 2584 true /* do_termination */, 2585 _is_serial); 2586 } while (task->has_aborted() && !_cm->has_overflown()); 2587 // If we overflow, then we do not want to restart. We instead 2588 // want to abort remark and do concurrent marking again. 2589 task->record_end_time(); 2590 } 2591 } 2592 2593 CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) : 2594 AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) { 2595 _cm->terminator()->reset_for_reuse(active_workers); 2596 } 2597 }; 2598 2599 void ConcurrentMark::checkpointRootsFinalWork() { 2600 ResourceMark rm; 2601 HandleMark hm; 2602 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2603 2604 g1h->ensure_parsability(false); 2605 2606 if (G1CollectedHeap::use_parallel_gc_threads()) { 2607 G1CollectedHeap::StrongRootsScope srs(g1h); 2608 // this is remark, so we'll use up all active threads 2609 uint active_workers = g1h->workers()->active_workers(); 2610 if (active_workers == 0) { 2611 assert(active_workers > 0, "Should have been set earlier"); 2612 active_workers = (uint) ParallelGCThreads; 2613 g1h->workers()->set_active_workers(active_workers); 2614 } 2615 set_concurrency_and_phase(active_workers, false /* concurrent */); 2616 // Leave _parallel_marking_threads at it's 2617 // value originally calculated in the ConcurrentMark 2618 // constructor and pass values of the active workers 2619 // through the gang in the task. 2620 2621 CMRemarkTask remarkTask(this, active_workers, false /* is_serial */); 2622 // We will start all available threads, even if we decide that the 2623 // active_workers will be fewer. The extra ones will just bail out 2624 // immediately. 2625 g1h->set_par_threads(active_workers); 2626 g1h->workers()->run_task(&remarkTask); 2627 g1h->set_par_threads(0); 2628 } else { 2629 G1CollectedHeap::StrongRootsScope srs(g1h); 2630 uint active_workers = 1; 2631 set_concurrency_and_phase(active_workers, false /* concurrent */); 2632 2633 // Note - if there's no work gang then the VMThread will be 2634 // the thread to execute the remark - serially. We have 2635 // to pass true for the is_serial parameter so that 2636 // CMTask::do_marking_step() doesn't enter the sync 2637 // barriers in the event of an overflow. Doing so will 2638 // cause an assert that the current thread is not a 2639 // concurrent GC thread. 2640 CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/); 2641 remarkTask.work(0); 2642 } 2643 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2644 guarantee(has_overflown() || 2645 satb_mq_set.completed_buffers_num() == 0, 2646 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2647 BOOL_TO_STR(has_overflown()), 2648 satb_mq_set.completed_buffers_num())); 2649 2650 print_stats(); 2651 } 2652 2653 #ifndef PRODUCT 2654 2655 class PrintReachableOopClosure: public OopClosure { 2656 private: 2657 G1CollectedHeap* _g1h; 2658 outputStream* _out; 2659 VerifyOption _vo; 2660 bool _all; 2661 2662 public: 2663 PrintReachableOopClosure(outputStream* out, 2664 VerifyOption vo, 2665 bool all) : 2666 _g1h(G1CollectedHeap::heap()), 2667 _out(out), _vo(vo), _all(all) { } 2668 2669 void do_oop(narrowOop* p) { do_oop_work(p); } 2670 void do_oop( oop* p) { do_oop_work(p); } 2671 2672 template <class T> void do_oop_work(T* p) { 2673 oop obj = oopDesc::load_decode_heap_oop(p); 2674 const char* str = NULL; 2675 const char* str2 = ""; 2676 2677 if (obj == NULL) { 2678 str = ""; 2679 } else if (!_g1h->is_in_g1_reserved(obj)) { 2680 str = " O"; 2681 } else { 2682 HeapRegion* hr = _g1h->heap_region_containing(obj); 2683 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); 2684 bool marked = _g1h->is_marked(obj, _vo); 2685 2686 if (over_tams) { 2687 str = " >"; 2688 if (marked) { 2689 str2 = " AND MARKED"; 2690 } 2691 } else if (marked) { 2692 str = " M"; 2693 } else { 2694 str = " NOT"; 2695 } 2696 } 2697 2698 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", 2699 p2i(p), p2i((void*) obj), str, str2); 2700 } 2701 }; 2702 2703 class PrintReachableObjectClosure : public ObjectClosure { 2704 private: 2705 G1CollectedHeap* _g1h; 2706 outputStream* _out; 2707 VerifyOption _vo; 2708 bool _all; 2709 HeapRegion* _hr; 2710 2711 public: 2712 PrintReachableObjectClosure(outputStream* out, 2713 VerifyOption vo, 2714 bool all, 2715 HeapRegion* hr) : 2716 _g1h(G1CollectedHeap::heap()), 2717 _out(out), _vo(vo), _all(all), _hr(hr) { } 2718 2719 void do_object(oop o) { 2720 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo); 2721 bool marked = _g1h->is_marked(o, _vo); 2722 bool print_it = _all || over_tams || marked; 2723 2724 if (print_it) { 2725 _out->print_cr(" "PTR_FORMAT"%s", 2726 p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : ""); 2727 PrintReachableOopClosure oopCl(_out, _vo, _all); 2728 o->oop_iterate_no_header(&oopCl); 2729 } 2730 } 2731 }; 2732 2733 class PrintReachableRegionClosure : public HeapRegionClosure { 2734 private: 2735 G1CollectedHeap* _g1h; 2736 outputStream* _out; 2737 VerifyOption _vo; 2738 bool _all; 2739 2740 public: 2741 bool doHeapRegion(HeapRegion* hr) { 2742 HeapWord* b = hr->bottom(); 2743 HeapWord* e = hr->end(); 2744 HeapWord* t = hr->top(); 2745 HeapWord* p = _g1h->top_at_mark_start(hr, _vo); 2746 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " 2747 "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p)); 2748 _out->cr(); 2749 2750 HeapWord* from = b; 2751 HeapWord* to = t; 2752 2753 if (to > from) { 2754 _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to)); 2755 _out->cr(); 2756 PrintReachableObjectClosure ocl(_out, _vo, _all, hr); 2757 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); 2758 _out->cr(); 2759 } 2760 2761 return false; 2762 } 2763 2764 PrintReachableRegionClosure(outputStream* out, 2765 VerifyOption vo, 2766 bool all) : 2767 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { } 2768 }; 2769 2770 void ConcurrentMark::print_reachable(const char* str, 2771 VerifyOption vo, 2772 bool all) { 2773 gclog_or_tty->cr(); 2774 gclog_or_tty->print_cr("== Doing heap dump... "); 2775 2776 if (G1PrintReachableBaseFile == NULL) { 2777 gclog_or_tty->print_cr(" #### error: no base file defined"); 2778 return; 2779 } 2780 2781 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) > 2782 (JVM_MAXPATHLEN - 1)) { 2783 gclog_or_tty->print_cr(" #### error: file name too long"); 2784 return; 2785 } 2786 2787 char file_name[JVM_MAXPATHLEN]; 2788 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str); 2789 gclog_or_tty->print_cr(" dumping to file %s", file_name); 2790 2791 fileStream fout(file_name); 2792 if (!fout.is_open()) { 2793 gclog_or_tty->print_cr(" #### error: could not open file"); 2794 return; 2795 } 2796 2797 outputStream* out = &fout; 2798 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo)); 2799 out->cr(); 2800 2801 out->print_cr("--- ITERATING OVER REGIONS"); 2802 out->cr(); 2803 PrintReachableRegionClosure rcl(out, vo, all); 2804 _g1h->heap_region_iterate(&rcl); 2805 out->cr(); 2806 2807 gclog_or_tty->print_cr(" done"); 2808 gclog_or_tty->flush(); 2809 } 2810 2811 #endif // PRODUCT 2812 2813 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2814 // Note we are overriding the read-only view of the prev map here, via 2815 // the cast. 2816 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2817 } 2818 2819 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2820 _nextMarkBitMap->clearRange(mr); 2821 } 2822 2823 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) { 2824 clearRangePrevBitmap(mr); 2825 clearRangeNextBitmap(mr); 2826 } 2827 2828 HeapRegion* 2829 ConcurrentMark::claim_region(uint worker_id) { 2830 // "checkpoint" the finger 2831 HeapWord* finger = _finger; 2832 2833 // _heap_end will not change underneath our feet; it only changes at 2834 // yield points. 2835 while (finger < _heap_end) { 2836 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2837 2838 // Note on how this code handles humongous regions. In the 2839 // normal case the finger will reach the start of a "starts 2840 // humongous" (SH) region. Its end will either be the end of the 2841 // last "continues humongous" (CH) region in the sequence, or the 2842 // standard end of the SH region (if the SH is the only region in 2843 // the sequence). That way claim_region() will skip over the CH 2844 // regions. However, there is a subtle race between a CM thread 2845 // executing this method and a mutator thread doing a humongous 2846 // object allocation. The two are not mutually exclusive as the CM 2847 // thread does not need to hold the Heap_lock when it gets 2848 // here. So there is a chance that claim_region() will come across 2849 // a free region that's in the progress of becoming a SH or a CH 2850 // region. In the former case, it will either 2851 // a) Miss the update to the region's end, in which case it will 2852 // visit every subsequent CH region, will find their bitmaps 2853 // empty, and do nothing, or 2854 // b) Will observe the update of the region's end (in which case 2855 // it will skip the subsequent CH regions). 2856 // If it comes across a region that suddenly becomes CH, the 2857 // scenario will be similar to b). So, the race between 2858 // claim_region() and a humongous object allocation might force us 2859 // to do a bit of unnecessary work (due to some unnecessary bitmap 2860 // iterations) but it should not introduce and correctness issues. 2861 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2862 HeapWord* bottom = curr_region->bottom(); 2863 HeapWord* end = curr_region->end(); 2864 HeapWord* limit = curr_region->next_top_at_mark_start(); 2865 2866 if (verbose_low()) { 2867 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2868 "["PTR_FORMAT", "PTR_FORMAT"), " 2869 "limit = "PTR_FORMAT, 2870 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2871 } 2872 2873 // Is the gap between reading the finger and doing the CAS too long? 2874 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2875 if (res == finger) { 2876 // we succeeded 2877 2878 // notice that _finger == end cannot be guaranteed here since, 2879 // someone else might have moved the finger even further 2880 assert(_finger >= end, "the finger should have moved forward"); 2881 2882 if (verbose_low()) { 2883 gclog_or_tty->print_cr("[%u] we were successful with region = " 2884 PTR_FORMAT, worker_id, p2i(curr_region)); 2885 } 2886 2887 if (limit > bottom) { 2888 if (verbose_low()) { 2889 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 2890 "returning it ", worker_id, p2i(curr_region)); 2891 } 2892 return curr_region; 2893 } else { 2894 assert(limit == bottom, 2895 "the region limit should be at bottom"); 2896 if (verbose_low()) { 2897 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 2898 "returning NULL", worker_id, p2i(curr_region)); 2899 } 2900 // we return NULL and the caller should try calling 2901 // claim_region() again. 2902 return NULL; 2903 } 2904 } else { 2905 assert(_finger > finger, "the finger should have moved forward"); 2906 if (verbose_low()) { 2907 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2908 "global finger = "PTR_FORMAT", " 2909 "our finger = "PTR_FORMAT, 2910 worker_id, p2i(_finger), p2i(finger)); 2911 } 2912 2913 // read it again 2914 finger = _finger; 2915 } 2916 } 2917 2918 return NULL; 2919 } 2920 2921 #ifndef PRODUCT 2922 enum VerifyNoCSetOopsPhase { 2923 VerifyNoCSetOopsStack, 2924 VerifyNoCSetOopsQueues, 2925 VerifyNoCSetOopsSATBCompleted, 2926 VerifyNoCSetOopsSATBThread 2927 }; 2928 2929 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 2930 private: 2931 G1CollectedHeap* _g1h; 2932 VerifyNoCSetOopsPhase _phase; 2933 int _info; 2934 2935 const char* phase_str() { 2936 switch (_phase) { 2937 case VerifyNoCSetOopsStack: return "Stack"; 2938 case VerifyNoCSetOopsQueues: return "Queue"; 2939 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers"; 2940 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers"; 2941 default: ShouldNotReachHere(); 2942 } 2943 return NULL; 2944 } 2945 2946 void do_object_work(oop obj) { 2947 guarantee(!_g1h->obj_in_cs(obj), 2948 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 2949 p2i((void*) obj), phase_str(), _info)); 2950 } 2951 2952 public: 2953 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 2954 2955 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 2956 _phase = phase; 2957 _info = info; 2958 } 2959 2960 virtual void do_oop(oop* p) { 2961 oop obj = oopDesc::load_decode_heap_oop(p); 2962 do_object_work(obj); 2963 } 2964 2965 virtual void do_oop(narrowOop* p) { 2966 // We should not come across narrow oops while scanning marking 2967 // stacks and SATB buffers. 2968 ShouldNotReachHere(); 2969 } 2970 2971 virtual void do_object(oop obj) { 2972 do_object_work(obj); 2973 } 2974 }; 2975 2976 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, 2977 bool verify_enqueued_buffers, 2978 bool verify_thread_buffers, 2979 bool verify_fingers) { 2980 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2981 if (!G1CollectedHeap::heap()->mark_in_progress()) { 2982 return; 2983 } 2984 2985 VerifyNoCSetOopsClosure cl; 2986 2987 if (verify_stacks) { 2988 // Verify entries on the global mark stack 2989 cl.set_phase(VerifyNoCSetOopsStack); 2990 _markStack.oops_do(&cl); 2991 2992 // Verify entries on the task queues 2993 for (uint i = 0; i < _max_worker_id; i += 1) { 2994 cl.set_phase(VerifyNoCSetOopsQueues, i); 2995 CMTaskQueue* queue = _task_queues->queue(i); 2996 queue->oops_do(&cl); 2997 } 2998 } 2999 3000 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 3001 3002 // Verify entries on the enqueued SATB buffers 3003 if (verify_enqueued_buffers) { 3004 cl.set_phase(VerifyNoCSetOopsSATBCompleted); 3005 satb_qs.iterate_completed_buffers_read_only(&cl); 3006 } 3007 3008 // Verify entries on the per-thread SATB buffers 3009 if (verify_thread_buffers) { 3010 cl.set_phase(VerifyNoCSetOopsSATBThread); 3011 satb_qs.iterate_thread_buffers_read_only(&cl); 3012 } 3013 3014 if (verify_fingers) { 3015 // Verify the global finger 3016 HeapWord* global_finger = finger(); 3017 if (global_finger != NULL && global_finger < _heap_end) { 3018 // The global finger always points to a heap region boundary. We 3019 // use heap_region_containing_raw() to get the containing region 3020 // given that the global finger could be pointing to a free region 3021 // which subsequently becomes continues humongous. If that 3022 // happens, heap_region_containing() will return the bottom of the 3023 // corresponding starts humongous region and the check below will 3024 // not hold any more. 3025 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 3026 guarantee(global_finger == global_hr->bottom(), 3027 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 3028 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 3029 } 3030 3031 // Verify the task fingers 3032 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 3033 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 3034 CMTask* task = _tasks[i]; 3035 HeapWord* task_finger = task->finger(); 3036 if (task_finger != NULL && task_finger < _heap_end) { 3037 // See above note on the global finger verification. 3038 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 3039 guarantee(task_finger == task_hr->bottom() || 3040 !task_hr->in_collection_set(), 3041 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 3042 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 3043 } 3044 } 3045 } 3046 } 3047 #endif // PRODUCT 3048 3049 // Aggregate the counting data that was constructed concurrently 3050 // with marking. 3051 class AggregateCountDataHRClosure: public HeapRegionClosure { 3052 G1CollectedHeap* _g1h; 3053 ConcurrentMark* _cm; 3054 CardTableModRefBS* _ct_bs; 3055 BitMap* _cm_card_bm; 3056 uint _max_worker_id; 3057 3058 public: 3059 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 3060 BitMap* cm_card_bm, 3061 uint max_worker_id) : 3062 _g1h(g1h), _cm(g1h->concurrent_mark()), 3063 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 3064 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 3065 3066 bool doHeapRegion(HeapRegion* hr) { 3067 if (hr->continuesHumongous()) { 3068 // We will ignore these here and process them when their 3069 // associated "starts humongous" region is processed. 3070 // Note that we cannot rely on their associated 3071 // "starts humongous" region to have their bit set to 1 3072 // since, due to the region chunking in the parallel region 3073 // iteration, a "continues humongous" region might be visited 3074 // before its associated "starts humongous". 3075 return false; 3076 } 3077 3078 HeapWord* start = hr->bottom(); 3079 HeapWord* limit = hr->next_top_at_mark_start(); 3080 HeapWord* end = hr->end(); 3081 3082 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 3083 err_msg("Preconditions not met - " 3084 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 3085 "top: "PTR_FORMAT", end: "PTR_FORMAT, 3086 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 3087 3088 assert(hr->next_marked_bytes() == 0, "Precondition"); 3089 3090 if (start == limit) { 3091 // NTAMS of this region has not been set so nothing to do. 3092 return false; 3093 } 3094 3095 // 'start' should be in the heap. 3096 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 3097 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 3098 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 3099 3100 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 3101 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 3102 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 3103 3104 // If ntams is not card aligned then we bump card bitmap index 3105 // for limit so that we get the all the cards spanned by 3106 // the object ending at ntams. 3107 // Note: if this is the last region in the heap then ntams 3108 // could be actually just beyond the end of the the heap; 3109 // limit_idx will then correspond to a (non-existent) card 3110 // that is also outside the heap. 3111 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 3112 limit_idx += 1; 3113 } 3114 3115 assert(limit_idx <= end_idx, "or else use atomics"); 3116 3117 // Aggregate the "stripe" in the count data associated with hr. 3118 uint hrs_index = hr->hrs_index(); 3119 size_t marked_bytes = 0; 3120 3121 for (uint i = 0; i < _max_worker_id; i += 1) { 3122 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 3123 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 3124 3125 // Fetch the marked_bytes in this region for task i and 3126 // add it to the running total for this region. 3127 marked_bytes += marked_bytes_array[hrs_index]; 3128 3129 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 3130 // into the global card bitmap. 3131 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 3132 3133 while (scan_idx < limit_idx) { 3134 assert(task_card_bm->at(scan_idx) == true, "should be"); 3135 _cm_card_bm->set_bit(scan_idx); 3136 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 3137 3138 // BitMap::get_next_one_offset() can handle the case when 3139 // its left_offset parameter is greater than its right_offset 3140 // parameter. It does, however, have an early exit if 3141 // left_offset == right_offset. So let's limit the value 3142 // passed in for left offset here. 3143 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 3144 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 3145 } 3146 } 3147 3148 // Update the marked bytes for this region. 3149 hr->add_to_marked_bytes(marked_bytes); 3150 3151 // Next heap region 3152 return false; 3153 } 3154 }; 3155 3156 class G1AggregateCountDataTask: public AbstractGangTask { 3157 protected: 3158 G1CollectedHeap* _g1h; 3159 ConcurrentMark* _cm; 3160 BitMap* _cm_card_bm; 3161 uint _max_worker_id; 3162 int _active_workers; 3163 3164 public: 3165 G1AggregateCountDataTask(G1CollectedHeap* g1h, 3166 ConcurrentMark* cm, 3167 BitMap* cm_card_bm, 3168 uint max_worker_id, 3169 int n_workers) : 3170 AbstractGangTask("Count Aggregation"), 3171 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 3172 _max_worker_id(max_worker_id), 3173 _active_workers(n_workers) { } 3174 3175 void work(uint worker_id) { 3176 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 3177 3178 if (G1CollectedHeap::use_parallel_gc_threads()) { 3179 _g1h->heap_region_par_iterate_chunked(&cl, worker_id, 3180 _active_workers, 3181 HeapRegion::AggregateCountClaimValue); 3182 } else { 3183 _g1h->heap_region_iterate(&cl); 3184 } 3185 } 3186 }; 3187 3188 3189 void ConcurrentMark::aggregate_count_data() { 3190 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? 3191 _g1h->workers()->active_workers() : 3192 1); 3193 3194 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 3195 _max_worker_id, n_workers); 3196 3197 if (G1CollectedHeap::use_parallel_gc_threads()) { 3198 assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 3199 "sanity check"); 3200 _g1h->set_par_threads(n_workers); 3201 _g1h->workers()->run_task(&g1_par_agg_task); 3202 _g1h->set_par_threads(0); 3203 3204 assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue), 3205 "sanity check"); 3206 _g1h->reset_heap_region_claim_values(); 3207 } else { 3208 g1_par_agg_task.work(0); 3209 } 3210 } 3211 3212 // Clear the per-worker arrays used to store the per-region counting data 3213 void ConcurrentMark::clear_all_count_data() { 3214 // Clear the global card bitmap - it will be filled during 3215 // liveness count aggregation (during remark) and the 3216 // final counting task. 3217 _card_bm.clear(); 3218 3219 // Clear the global region bitmap - it will be filled as part 3220 // of the final counting task. 3221 _region_bm.clear(); 3222 3223 uint max_regions = _g1h->max_regions(); 3224 assert(_max_worker_id > 0, "uninitialized"); 3225 3226 for (uint i = 0; i < _max_worker_id; i += 1) { 3227 BitMap* task_card_bm = count_card_bitmap_for(i); 3228 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3229 3230 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3231 assert(marked_bytes_array != NULL, "uninitialized"); 3232 3233 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3234 task_card_bm->clear(); 3235 } 3236 } 3237 3238 void ConcurrentMark::print_stats() { 3239 if (verbose_stats()) { 3240 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3241 for (size_t i = 0; i < _active_tasks; ++i) { 3242 _tasks[i]->print_stats(); 3243 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3244 } 3245 } 3246 } 3247 3248 // abandon current marking iteration due to a Full GC 3249 void ConcurrentMark::abort() { 3250 // Clear all marks to force marking thread to do nothing 3251 _nextMarkBitMap->clearAll(); 3252 3253 // Note we cannot clear the previous marking bitmap here 3254 // since VerifyDuringGC verifies the objects marked during 3255 // a full GC against the previous bitmap. 3256 3257 // Clear the liveness counting data 3258 clear_all_count_data(); 3259 // Empty mark stack 3260 reset_marking_state(); 3261 for (uint i = 0; i < _max_worker_id; ++i) { 3262 _tasks[i]->clear_region_fields(); 3263 } 3264 _first_overflow_barrier_sync.abort(); 3265 _second_overflow_barrier_sync.abort(); 3266 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); 3267 if (!gc_id.is_undefined()) { 3268 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance 3269 // to detect that it was aborted. Only keep track of the first GC id that we aborted. 3270 _aborted_gc_id = gc_id; 3271 } 3272 _has_aborted = true; 3273 3274 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3275 satb_mq_set.abandon_partial_marking(); 3276 // This can be called either during or outside marking, we'll read 3277 // the expected_active value from the SATB queue set. 3278 satb_mq_set.set_active_all_threads( 3279 false, /* new active value */ 3280 satb_mq_set.is_active() /* expected_active */); 3281 3282 _g1h->trace_heap_after_concurrent_cycle(); 3283 _g1h->register_concurrent_cycle_end(); 3284 } 3285 3286 const GCId& ConcurrentMark::concurrent_gc_id() { 3287 if (has_aborted()) { 3288 return _aborted_gc_id; 3289 } 3290 return _g1h->gc_tracer_cm()->gc_id(); 3291 } 3292 3293 static void print_ms_time_info(const char* prefix, const char* name, 3294 NumberSeq& ns) { 3295 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3296 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3297 if (ns.num() > 0) { 3298 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3299 prefix, ns.sd(), ns.maximum()); 3300 } 3301 } 3302 3303 void ConcurrentMark::print_summary_info() { 3304 gclog_or_tty->print_cr(" Concurrent marking:"); 3305 print_ms_time_info(" ", "init marks", _init_times); 3306 print_ms_time_info(" ", "remarks", _remark_times); 3307 { 3308 print_ms_time_info(" ", "final marks", _remark_mark_times); 3309 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3310 3311 } 3312 print_ms_time_info(" ", "cleanups", _cleanup_times); 3313 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3314 _total_counting_time, 3315 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3316 (double)_cleanup_times.num() 3317 : 0.0)); 3318 if (G1ScrubRemSets) { 3319 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3320 _total_rs_scrub_time, 3321 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3322 (double)_cleanup_times.num() 3323 : 0.0)); 3324 } 3325 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3326 (_init_times.sum() + _remark_times.sum() + 3327 _cleanup_times.sum())/1000.0); 3328 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3329 "(%8.2f s marking).", 3330 cmThread()->vtime_accum(), 3331 cmThread()->vtime_mark_accum()); 3332 } 3333 3334 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3335 if (use_parallel_marking_threads()) { 3336 _parallel_workers->print_worker_threads_on(st); 3337 } 3338 } 3339 3340 void ConcurrentMark::print_on_error(outputStream* st) const { 3341 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3342 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3343 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3344 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3345 } 3346 3347 // We take a break if someone is trying to stop the world. 3348 bool ConcurrentMark::do_yield_check(uint worker_id) { 3349 if (SuspendibleThreadSet::should_yield()) { 3350 if (worker_id == 0) { 3351 _g1h->g1_policy()->record_concurrent_pause(); 3352 } 3353 SuspendibleThreadSet::yield(); 3354 return true; 3355 } else { 3356 return false; 3357 } 3358 } 3359 3360 bool ConcurrentMark::containing_card_is_marked(void* p) { 3361 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1); 3362 return _card_bm.at(offset >> CardTableModRefBS::card_shift); 3363 } 3364 3365 bool ConcurrentMark::containing_cards_are_marked(void* start, 3366 void* last) { 3367 return containing_card_is_marked(start) && 3368 containing_card_is_marked(last); 3369 } 3370 3371 #ifndef PRODUCT 3372 // for debugging purposes 3373 void ConcurrentMark::print_finger() { 3374 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3375 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3376 for (uint i = 0; i < _max_worker_id; ++i) { 3377 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3378 } 3379 gclog_or_tty->cr(); 3380 } 3381 #endif 3382 3383 void CMTask::scan_object(oop obj) { 3384 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3385 3386 if (_cm->verbose_high()) { 3387 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT, 3388 _worker_id, p2i((void*) obj)); 3389 } 3390 3391 size_t obj_size = obj->size(); 3392 _words_scanned += obj_size; 3393 3394 obj->oop_iterate(_cm_oop_closure); 3395 statsOnly( ++_objs_scanned ); 3396 check_limits(); 3397 } 3398 3399 // Closure for iteration over bitmaps 3400 class CMBitMapClosure : public BitMapClosure { 3401 private: 3402 // the bitmap that is being iterated over 3403 CMBitMap* _nextMarkBitMap; 3404 ConcurrentMark* _cm; 3405 CMTask* _task; 3406 3407 public: 3408 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3409 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3410 3411 bool do_bit(size_t offset) { 3412 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3413 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3414 assert( addr < _cm->finger(), "invariant"); 3415 3416 statsOnly( _task->increase_objs_found_on_bitmap() ); 3417 assert(addr >= _task->finger(), "invariant"); 3418 3419 // We move that task's local finger along. 3420 _task->move_finger_to(addr); 3421 3422 _task->scan_object(oop(addr)); 3423 // we only partially drain the local queue and global stack 3424 _task->drain_local_queue(true); 3425 _task->drain_global_stack(true); 3426 3427 // if the has_aborted flag has been raised, we need to bail out of 3428 // the iteration 3429 return !_task->has_aborted(); 3430 } 3431 }; 3432 3433 // Closure for iterating over objects, currently only used for 3434 // processing SATB buffers. 3435 class CMObjectClosure : public ObjectClosure { 3436 private: 3437 CMTask* _task; 3438 3439 public: 3440 void do_object(oop obj) { 3441 _task->deal_with_reference(obj); 3442 } 3443 3444 CMObjectClosure(CMTask* task) : _task(task) { } 3445 }; 3446 3447 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3448 ConcurrentMark* cm, 3449 CMTask* task) 3450 : _g1h(g1h), _cm(cm), _task(task) { 3451 assert(_ref_processor == NULL, "should be initialized to NULL"); 3452 3453 if (G1UseConcMarkReferenceProcessing) { 3454 _ref_processor = g1h->ref_processor_cm(); 3455 assert(_ref_processor != NULL, "should not be NULL"); 3456 } 3457 } 3458 3459 void CMTask::setup_for_region(HeapRegion* hr) { 3460 assert(hr != NULL, 3461 "claim_region() should have filtered out NULL regions"); 3462 assert(!hr->continuesHumongous(), 3463 "claim_region() should have filtered out continues humongous regions"); 3464 3465 if (_cm->verbose_low()) { 3466 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3467 _worker_id, p2i(hr)); 3468 } 3469 3470 _curr_region = hr; 3471 _finger = hr->bottom(); 3472 update_region_limit(); 3473 } 3474 3475 void CMTask::update_region_limit() { 3476 HeapRegion* hr = _curr_region; 3477 HeapWord* bottom = hr->bottom(); 3478 HeapWord* limit = hr->next_top_at_mark_start(); 3479 3480 if (limit == bottom) { 3481 if (_cm->verbose_low()) { 3482 gclog_or_tty->print_cr("[%u] found an empty region " 3483 "["PTR_FORMAT", "PTR_FORMAT")", 3484 _worker_id, p2i(bottom), p2i(limit)); 3485 } 3486 // The region was collected underneath our feet. 3487 // We set the finger to bottom to ensure that the bitmap 3488 // iteration that will follow this will not do anything. 3489 // (this is not a condition that holds when we set the region up, 3490 // as the region is not supposed to be empty in the first place) 3491 _finger = bottom; 3492 } else if (limit >= _region_limit) { 3493 assert(limit >= _finger, "peace of mind"); 3494 } else { 3495 assert(limit < _region_limit, "only way to get here"); 3496 // This can happen under some pretty unusual circumstances. An 3497 // evacuation pause empties the region underneath our feet (NTAMS 3498 // at bottom). We then do some allocation in the region (NTAMS 3499 // stays at bottom), followed by the region being used as a GC 3500 // alloc region (NTAMS will move to top() and the objects 3501 // originally below it will be grayed). All objects now marked in 3502 // the region are explicitly grayed, if below the global finger, 3503 // and we do not need in fact to scan anything else. So, we simply 3504 // set _finger to be limit to ensure that the bitmap iteration 3505 // doesn't do anything. 3506 _finger = limit; 3507 } 3508 3509 _region_limit = limit; 3510 } 3511 3512 void CMTask::giveup_current_region() { 3513 assert(_curr_region != NULL, "invariant"); 3514 if (_cm->verbose_low()) { 3515 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3516 _worker_id, p2i(_curr_region)); 3517 } 3518 clear_region_fields(); 3519 } 3520 3521 void CMTask::clear_region_fields() { 3522 // Values for these three fields that indicate that we're not 3523 // holding on to a region. 3524 _curr_region = NULL; 3525 _finger = NULL; 3526 _region_limit = NULL; 3527 } 3528 3529 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3530 if (cm_oop_closure == NULL) { 3531 assert(_cm_oop_closure != NULL, "invariant"); 3532 } else { 3533 assert(_cm_oop_closure == NULL, "invariant"); 3534 } 3535 _cm_oop_closure = cm_oop_closure; 3536 } 3537 3538 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3539 guarantee(nextMarkBitMap != NULL, "invariant"); 3540 3541 if (_cm->verbose_low()) { 3542 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3543 } 3544 3545 _nextMarkBitMap = nextMarkBitMap; 3546 clear_region_fields(); 3547 3548 _calls = 0; 3549 _elapsed_time_ms = 0.0; 3550 _termination_time_ms = 0.0; 3551 _termination_start_time_ms = 0.0; 3552 3553 #if _MARKING_STATS_ 3554 _local_pushes = 0; 3555 _local_pops = 0; 3556 _local_max_size = 0; 3557 _objs_scanned = 0; 3558 _global_pushes = 0; 3559 _global_pops = 0; 3560 _global_max_size = 0; 3561 _global_transfers_to = 0; 3562 _global_transfers_from = 0; 3563 _regions_claimed = 0; 3564 _objs_found_on_bitmap = 0; 3565 _satb_buffers_processed = 0; 3566 _steal_attempts = 0; 3567 _steals = 0; 3568 _aborted = 0; 3569 _aborted_overflow = 0; 3570 _aborted_cm_aborted = 0; 3571 _aborted_yield = 0; 3572 _aborted_timed_out = 0; 3573 _aborted_satb = 0; 3574 _aborted_termination = 0; 3575 #endif // _MARKING_STATS_ 3576 } 3577 3578 bool CMTask::should_exit_termination() { 3579 regular_clock_call(); 3580 // This is called when we are in the termination protocol. We should 3581 // quit if, for some reason, this task wants to abort or the global 3582 // stack is not empty (this means that we can get work from it). 3583 return !_cm->mark_stack_empty() || has_aborted(); 3584 } 3585 3586 void CMTask::reached_limit() { 3587 assert(_words_scanned >= _words_scanned_limit || 3588 _refs_reached >= _refs_reached_limit , 3589 "shouldn't have been called otherwise"); 3590 regular_clock_call(); 3591 } 3592 3593 void CMTask::regular_clock_call() { 3594 if (has_aborted()) return; 3595 3596 // First, we need to recalculate the words scanned and refs reached 3597 // limits for the next clock call. 3598 recalculate_limits(); 3599 3600 // During the regular clock call we do the following 3601 3602 // (1) If an overflow has been flagged, then we abort. 3603 if (_cm->has_overflown()) { 3604 set_has_aborted(); 3605 return; 3606 } 3607 3608 // If we are not concurrent (i.e. we're doing remark) we don't need 3609 // to check anything else. The other steps are only needed during 3610 // the concurrent marking phase. 3611 if (!concurrent()) return; 3612 3613 // (2) If marking has been aborted for Full GC, then we also abort. 3614 if (_cm->has_aborted()) { 3615 set_has_aborted(); 3616 statsOnly( ++_aborted_cm_aborted ); 3617 return; 3618 } 3619 3620 double curr_time_ms = os::elapsedVTime() * 1000.0; 3621 3622 // (3) If marking stats are enabled, then we update the step history. 3623 #if _MARKING_STATS_ 3624 if (_words_scanned >= _words_scanned_limit) { 3625 ++_clock_due_to_scanning; 3626 } 3627 if (_refs_reached >= _refs_reached_limit) { 3628 ++_clock_due_to_marking; 3629 } 3630 3631 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3632 _interval_start_time_ms = curr_time_ms; 3633 _all_clock_intervals_ms.add(last_interval_ms); 3634 3635 if (_cm->verbose_medium()) { 3636 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3637 "scanned = %d%s, refs reached = %d%s", 3638 _worker_id, last_interval_ms, 3639 _words_scanned, 3640 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3641 _refs_reached, 3642 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3643 } 3644 #endif // _MARKING_STATS_ 3645 3646 // (4) We check whether we should yield. If we have to, then we abort. 3647 if (SuspendibleThreadSet::should_yield()) { 3648 // We should yield. To do this we abort the task. The caller is 3649 // responsible for yielding. 3650 set_has_aborted(); 3651 statsOnly( ++_aborted_yield ); 3652 return; 3653 } 3654 3655 // (5) We check whether we've reached our time quota. If we have, 3656 // then we abort. 3657 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3658 if (elapsed_time_ms > _time_target_ms) { 3659 set_has_aborted(); 3660 _has_timed_out = true; 3661 statsOnly( ++_aborted_timed_out ); 3662 return; 3663 } 3664 3665 // (6) Finally, we check whether there are enough completed STAB 3666 // buffers available for processing. If there are, we abort. 3667 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3668 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3669 if (_cm->verbose_low()) { 3670 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3671 _worker_id); 3672 } 3673 // we do need to process SATB buffers, we'll abort and restart 3674 // the marking task to do so 3675 set_has_aborted(); 3676 statsOnly( ++_aborted_satb ); 3677 return; 3678 } 3679 } 3680 3681 void CMTask::recalculate_limits() { 3682 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3683 _words_scanned_limit = _real_words_scanned_limit; 3684 3685 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3686 _refs_reached_limit = _real_refs_reached_limit; 3687 } 3688 3689 void CMTask::decrease_limits() { 3690 // This is called when we believe that we're going to do an infrequent 3691 // operation which will increase the per byte scanned cost (i.e. move 3692 // entries to/from the global stack). It basically tries to decrease the 3693 // scanning limit so that the clock is called earlier. 3694 3695 if (_cm->verbose_medium()) { 3696 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3697 } 3698 3699 _words_scanned_limit = _real_words_scanned_limit - 3700 3 * words_scanned_period / 4; 3701 _refs_reached_limit = _real_refs_reached_limit - 3702 3 * refs_reached_period / 4; 3703 } 3704 3705 void CMTask::move_entries_to_global_stack() { 3706 // local array where we'll store the entries that will be popped 3707 // from the local queue 3708 oop buffer[global_stack_transfer_size]; 3709 3710 int n = 0; 3711 oop obj; 3712 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3713 buffer[n] = obj; 3714 ++n; 3715 } 3716 3717 if (n > 0) { 3718 // we popped at least one entry from the local queue 3719 3720 statsOnly( ++_global_transfers_to; _local_pops += n ); 3721 3722 if (!_cm->mark_stack_push(buffer, n)) { 3723 if (_cm->verbose_low()) { 3724 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3725 _worker_id); 3726 } 3727 set_has_aborted(); 3728 } else { 3729 // the transfer was successful 3730 3731 if (_cm->verbose_medium()) { 3732 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3733 _worker_id, n); 3734 } 3735 statsOnly( int tmp_size = _cm->mark_stack_size(); 3736 if (tmp_size > _global_max_size) { 3737 _global_max_size = tmp_size; 3738 } 3739 _global_pushes += n ); 3740 } 3741 } 3742 3743 // this operation was quite expensive, so decrease the limits 3744 decrease_limits(); 3745 } 3746 3747 void CMTask::get_entries_from_global_stack() { 3748 // local array where we'll store the entries that will be popped 3749 // from the global stack. 3750 oop buffer[global_stack_transfer_size]; 3751 int n; 3752 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3753 assert(n <= global_stack_transfer_size, 3754 "we should not pop more than the given limit"); 3755 if (n > 0) { 3756 // yes, we did actually pop at least one entry 3757 3758 statsOnly( ++_global_transfers_from; _global_pops += n ); 3759 if (_cm->verbose_medium()) { 3760 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3761 _worker_id, n); 3762 } 3763 for (int i = 0; i < n; ++i) { 3764 bool success = _task_queue->push(buffer[i]); 3765 // We only call this when the local queue is empty or under a 3766 // given target limit. So, we do not expect this push to fail. 3767 assert(success, "invariant"); 3768 } 3769 3770 statsOnly( int tmp_size = _task_queue->size(); 3771 if (tmp_size > _local_max_size) { 3772 _local_max_size = tmp_size; 3773 } 3774 _local_pushes += n ); 3775 } 3776 3777 // this operation was quite expensive, so decrease the limits 3778 decrease_limits(); 3779 } 3780 3781 void CMTask::drain_local_queue(bool partially) { 3782 if (has_aborted()) return; 3783 3784 // Decide what the target size is, depending whether we're going to 3785 // drain it partially (so that other tasks can steal if they run out 3786 // of things to do) or totally (at the very end). 3787 size_t target_size; 3788 if (partially) { 3789 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3790 } else { 3791 target_size = 0; 3792 } 3793 3794 if (_task_queue->size() > target_size) { 3795 if (_cm->verbose_high()) { 3796 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3797 _worker_id, target_size); 3798 } 3799 3800 oop obj; 3801 bool ret = _task_queue->pop_local(obj); 3802 while (ret) { 3803 statsOnly( ++_local_pops ); 3804 3805 if (_cm->verbose_high()) { 3806 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3807 p2i((void*) obj)); 3808 } 3809 3810 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3811 assert(!_g1h->is_on_master_free_list( 3812 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3813 3814 scan_object(obj); 3815 3816 if (_task_queue->size() <= target_size || has_aborted()) { 3817 ret = false; 3818 } else { 3819 ret = _task_queue->pop_local(obj); 3820 } 3821 } 3822 3823 if (_cm->verbose_high()) { 3824 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3825 _worker_id, _task_queue->size()); 3826 } 3827 } 3828 } 3829 3830 void CMTask::drain_global_stack(bool partially) { 3831 if (has_aborted()) return; 3832 3833 // We have a policy to drain the local queue before we attempt to 3834 // drain the global stack. 3835 assert(partially || _task_queue->size() == 0, "invariant"); 3836 3837 // Decide what the target size is, depending whether we're going to 3838 // drain it partially (so that other tasks can steal if they run out 3839 // of things to do) or totally (at the very end). Notice that, 3840 // because we move entries from the global stack in chunks or 3841 // because another task might be doing the same, we might in fact 3842 // drop below the target. But, this is not a problem. 3843 size_t target_size; 3844 if (partially) { 3845 target_size = _cm->partial_mark_stack_size_target(); 3846 } else { 3847 target_size = 0; 3848 } 3849 3850 if (_cm->mark_stack_size() > target_size) { 3851 if (_cm->verbose_low()) { 3852 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3853 _worker_id, target_size); 3854 } 3855 3856 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3857 get_entries_from_global_stack(); 3858 drain_local_queue(partially); 3859 } 3860 3861 if (_cm->verbose_low()) { 3862 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3863 _worker_id, _cm->mark_stack_size()); 3864 } 3865 } 3866 } 3867 3868 // SATB Queue has several assumptions on whether to call the par or 3869 // non-par versions of the methods. this is why some of the code is 3870 // replicated. We should really get rid of the single-threaded version 3871 // of the code to simplify things. 3872 void CMTask::drain_satb_buffers() { 3873 if (has_aborted()) return; 3874 3875 // We set this so that the regular clock knows that we're in the 3876 // middle of draining buffers and doesn't set the abort flag when it 3877 // notices that SATB buffers are available for draining. It'd be 3878 // very counter productive if it did that. :-) 3879 _draining_satb_buffers = true; 3880 3881 CMObjectClosure oc(this); 3882 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3883 if (G1CollectedHeap::use_parallel_gc_threads()) { 3884 satb_mq_set.set_par_closure(_worker_id, &oc); 3885 } else { 3886 satb_mq_set.set_closure(&oc); 3887 } 3888 3889 // This keeps claiming and applying the closure to completed buffers 3890 // until we run out of buffers or we need to abort. 3891 if (G1CollectedHeap::use_parallel_gc_threads()) { 3892 while (!has_aborted() && 3893 satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) { 3894 if (_cm->verbose_medium()) { 3895 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3896 } 3897 statsOnly( ++_satb_buffers_processed ); 3898 regular_clock_call(); 3899 } 3900 } else { 3901 while (!has_aborted() && 3902 satb_mq_set.apply_closure_to_completed_buffer()) { 3903 if (_cm->verbose_medium()) { 3904 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3905 } 3906 statsOnly( ++_satb_buffers_processed ); 3907 regular_clock_call(); 3908 } 3909 } 3910 3911 if (!concurrent() && !has_aborted()) { 3912 // We should only do this during remark. 3913 if (G1CollectedHeap::use_parallel_gc_threads()) { 3914 satb_mq_set.par_iterate_closure_all_threads(_worker_id); 3915 } else { 3916 satb_mq_set.iterate_closure_all_threads(); 3917 } 3918 } 3919 3920 _draining_satb_buffers = false; 3921 3922 assert(has_aborted() || 3923 concurrent() || 3924 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3925 3926 if (G1CollectedHeap::use_parallel_gc_threads()) { 3927 satb_mq_set.set_par_closure(_worker_id, NULL); 3928 } else { 3929 satb_mq_set.set_closure(NULL); 3930 } 3931 3932 // again, this was a potentially expensive operation, decrease the 3933 // limits to get the regular clock call early 3934 decrease_limits(); 3935 } 3936 3937 void CMTask::print_stats() { 3938 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3939 _worker_id, _calls); 3940 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3941 _elapsed_time_ms, _termination_time_ms); 3942 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3943 _step_times_ms.num(), _step_times_ms.avg(), 3944 _step_times_ms.sd()); 3945 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3946 _step_times_ms.maximum(), _step_times_ms.sum()); 3947 3948 #if _MARKING_STATS_ 3949 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3950 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3951 _all_clock_intervals_ms.sd()); 3952 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3953 _all_clock_intervals_ms.maximum(), 3954 _all_clock_intervals_ms.sum()); 3955 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", 3956 _clock_due_to_scanning, _clock_due_to_marking); 3957 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", 3958 _objs_scanned, _objs_found_on_bitmap); 3959 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", 3960 _local_pushes, _local_pops, _local_max_size); 3961 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", 3962 _global_pushes, _global_pops, _global_max_size); 3963 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", 3964 _global_transfers_to,_global_transfers_from); 3965 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed); 3966 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); 3967 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", 3968 _steal_attempts, _steals); 3969 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); 3970 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", 3971 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3972 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", 3973 _aborted_timed_out, _aborted_satb, _aborted_termination); 3974 #endif // _MARKING_STATS_ 3975 } 3976 3977 /***************************************************************************** 3978 3979 The do_marking_step(time_target_ms, ...) method is the building 3980 block of the parallel marking framework. It can be called in parallel 3981 with other invocations of do_marking_step() on different tasks 3982 (but only one per task, obviously) and concurrently with the 3983 mutator threads, or during remark, hence it eliminates the need 3984 for two versions of the code. When called during remark, it will 3985 pick up from where the task left off during the concurrent marking 3986 phase. Interestingly, tasks are also claimable during evacuation 3987 pauses too, since do_marking_step() ensures that it aborts before 3988 it needs to yield. 3989 3990 The data structures that it uses to do marking work are the 3991 following: 3992 3993 (1) Marking Bitmap. If there are gray objects that appear only 3994 on the bitmap (this happens either when dealing with an overflow 3995 or when the initial marking phase has simply marked the roots 3996 and didn't push them on the stack), then tasks claim heap 3997 regions whose bitmap they then scan to find gray objects. A 3998 global finger indicates where the end of the last claimed region 3999 is. A local finger indicates how far into the region a task has 4000 scanned. The two fingers are used to determine how to gray an 4001 object (i.e. whether simply marking it is OK, as it will be 4002 visited by a task in the future, or whether it needs to be also 4003 pushed on a stack). 4004 4005 (2) Local Queue. The local queue of the task which is accessed 4006 reasonably efficiently by the task. Other tasks can steal from 4007 it when they run out of work. Throughout the marking phase, a 4008 task attempts to keep its local queue short but not totally 4009 empty, so that entries are available for stealing by other 4010 tasks. Only when there is no more work, a task will totally 4011 drain its local queue. 4012 4013 (3) Global Mark Stack. This handles local queue overflow. During 4014 marking only sets of entries are moved between it and the local 4015 queues, as access to it requires a mutex and more fine-grain 4016 interaction with it which might cause contention. If it 4017 overflows, then the marking phase should restart and iterate 4018 over the bitmap to identify gray objects. Throughout the marking 4019 phase, tasks attempt to keep the global mark stack at a small 4020 length but not totally empty, so that entries are available for 4021 popping by other tasks. Only when there is no more work, tasks 4022 will totally drain the global mark stack. 4023 4024 (4) SATB Buffer Queue. This is where completed SATB buffers are 4025 made available. Buffers are regularly removed from this queue 4026 and scanned for roots, so that the queue doesn't get too 4027 long. During remark, all completed buffers are processed, as 4028 well as the filled in parts of any uncompleted buffers. 4029 4030 The do_marking_step() method tries to abort when the time target 4031 has been reached. There are a few other cases when the 4032 do_marking_step() method also aborts: 4033 4034 (1) When the marking phase has been aborted (after a Full GC). 4035 4036 (2) When a global overflow (on the global stack) has been 4037 triggered. Before the task aborts, it will actually sync up with 4038 the other tasks to ensure that all the marking data structures 4039 (local queues, stacks, fingers etc.) are re-initialized so that 4040 when do_marking_step() completes, the marking phase can 4041 immediately restart. 4042 4043 (3) When enough completed SATB buffers are available. The 4044 do_marking_step() method only tries to drain SATB buffers right 4045 at the beginning. So, if enough buffers are available, the 4046 marking step aborts and the SATB buffers are processed at 4047 the beginning of the next invocation. 4048 4049 (4) To yield. when we have to yield then we abort and yield 4050 right at the end of do_marking_step(). This saves us from a lot 4051 of hassle as, by yielding we might allow a Full GC. If this 4052 happens then objects will be compacted underneath our feet, the 4053 heap might shrink, etc. We save checking for this by just 4054 aborting and doing the yield right at the end. 4055 4056 From the above it follows that the do_marking_step() method should 4057 be called in a loop (or, otherwise, regularly) until it completes. 4058 4059 If a marking step completes without its has_aborted() flag being 4060 true, it means it has completed the current marking phase (and 4061 also all other marking tasks have done so and have all synced up). 4062 4063 A method called regular_clock_call() is invoked "regularly" (in 4064 sub ms intervals) throughout marking. It is this clock method that 4065 checks all the abort conditions which were mentioned above and 4066 decides when the task should abort. A work-based scheme is used to 4067 trigger this clock method: when the number of object words the 4068 marking phase has scanned or the number of references the marking 4069 phase has visited reach a given limit. Additional invocations to 4070 the method clock have been planted in a few other strategic places 4071 too. The initial reason for the clock method was to avoid calling 4072 vtime too regularly, as it is quite expensive. So, once it was in 4073 place, it was natural to piggy-back all the other conditions on it 4074 too and not constantly check them throughout the code. 4075 4076 If do_termination is true then do_marking_step will enter its 4077 termination protocol. 4078 4079 The value of is_serial must be true when do_marking_step is being 4080 called serially (i.e. by the VMThread) and do_marking_step should 4081 skip any synchronization in the termination and overflow code. 4082 Examples include the serial remark code and the serial reference 4083 processing closures. 4084 4085 The value of is_serial must be false when do_marking_step is 4086 being called by any of the worker threads in a work gang. 4087 Examples include the concurrent marking code (CMMarkingTask), 4088 the MT remark code, and the MT reference processing closures. 4089 4090 *****************************************************************************/ 4091 4092 void CMTask::do_marking_step(double time_target_ms, 4093 bool do_termination, 4094 bool is_serial) { 4095 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 4096 assert(concurrent() == _cm->concurrent(), "they should be the same"); 4097 4098 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 4099 assert(_task_queues != NULL, "invariant"); 4100 assert(_task_queue != NULL, "invariant"); 4101 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 4102 4103 assert(!_claimed, 4104 "only one thread should claim this task at any one time"); 4105 4106 // OK, this doesn't safeguard again all possible scenarios, as it is 4107 // possible for two threads to set the _claimed flag at the same 4108 // time. But it is only for debugging purposes anyway and it will 4109 // catch most problems. 4110 _claimed = true; 4111 4112 _start_time_ms = os::elapsedVTime() * 1000.0; 4113 statsOnly( _interval_start_time_ms = _start_time_ms ); 4114 4115 // If do_stealing is true then do_marking_step will attempt to 4116 // steal work from the other CMTasks. It only makes sense to 4117 // enable stealing when the termination protocol is enabled 4118 // and do_marking_step() is not being called serially. 4119 bool do_stealing = do_termination && !is_serial; 4120 4121 double diff_prediction_ms = 4122 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 4123 _time_target_ms = time_target_ms - diff_prediction_ms; 4124 4125 // set up the variables that are used in the work-based scheme to 4126 // call the regular clock method 4127 _words_scanned = 0; 4128 _refs_reached = 0; 4129 recalculate_limits(); 4130 4131 // clear all flags 4132 clear_has_aborted(); 4133 _has_timed_out = false; 4134 _draining_satb_buffers = false; 4135 4136 ++_calls; 4137 4138 if (_cm->verbose_low()) { 4139 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 4140 "target = %1.2lfms >>>>>>>>>>", 4141 _worker_id, _calls, _time_target_ms); 4142 } 4143 4144 // Set up the bitmap and oop closures. Anything that uses them is 4145 // eventually called from this method, so it is OK to allocate these 4146 // statically. 4147 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 4148 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 4149 set_cm_oop_closure(&cm_oop_closure); 4150 4151 if (_cm->has_overflown()) { 4152 // This can happen if the mark stack overflows during a GC pause 4153 // and this task, after a yield point, restarts. We have to abort 4154 // as we need to get into the overflow protocol which happens 4155 // right at the end of this task. 4156 set_has_aborted(); 4157 } 4158 4159 // First drain any available SATB buffers. After this, we will not 4160 // look at SATB buffers before the next invocation of this method. 4161 // If enough completed SATB buffers are queued up, the regular clock 4162 // will abort this task so that it restarts. 4163 drain_satb_buffers(); 4164 // ...then partially drain the local queue and the global stack 4165 drain_local_queue(true); 4166 drain_global_stack(true); 4167 4168 do { 4169 if (!has_aborted() && _curr_region != NULL) { 4170 // This means that we're already holding on to a region. 4171 assert(_finger != NULL, "if region is not NULL, then the finger " 4172 "should not be NULL either"); 4173 4174 // We might have restarted this task after an evacuation pause 4175 // which might have evacuated the region we're holding on to 4176 // underneath our feet. Let's read its limit again to make sure 4177 // that we do not iterate over a region of the heap that 4178 // contains garbage (update_region_limit() will also move 4179 // _finger to the start of the region if it is found empty). 4180 update_region_limit(); 4181 // We will start from _finger not from the start of the region, 4182 // as we might be restarting this task after aborting half-way 4183 // through scanning this region. In this case, _finger points to 4184 // the address where we last found a marked object. If this is a 4185 // fresh region, _finger points to start(). 4186 MemRegion mr = MemRegion(_finger, _region_limit); 4187 4188 if (_cm->verbose_low()) { 4189 gclog_or_tty->print_cr("[%u] we're scanning part " 4190 "["PTR_FORMAT", "PTR_FORMAT") " 4191 "of region "HR_FORMAT, 4192 _worker_id, p2i(_finger), p2i(_region_limit), 4193 HR_FORMAT_PARAMS(_curr_region)); 4194 } 4195 4196 assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(), 4197 "humongous regions should go around loop once only"); 4198 4199 // Some special cases: 4200 // If the memory region is empty, we can just give up the region. 4201 // If the current region is humongous then we only need to check 4202 // the bitmap for the bit associated with the start of the object, 4203 // scan the object if it's live, and give up the region. 4204 // Otherwise, let's iterate over the bitmap of the part of the region 4205 // that is left. 4206 // If the iteration is successful, give up the region. 4207 if (mr.is_empty()) { 4208 giveup_current_region(); 4209 regular_clock_call(); 4210 } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) { 4211 if (_nextMarkBitMap->isMarked(mr.start())) { 4212 // The object is marked - apply the closure 4213 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 4214 bitmap_closure.do_bit(offset); 4215 } 4216 // Even if this task aborted while scanning the humongous object 4217 // we can (and should) give up the current region. 4218 giveup_current_region(); 4219 regular_clock_call(); 4220 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4221 giveup_current_region(); 4222 regular_clock_call(); 4223 } else { 4224 assert(has_aborted(), "currently the only way to do so"); 4225 // The only way to abort the bitmap iteration is to return 4226 // false from the do_bit() method. However, inside the 4227 // do_bit() method we move the _finger to point to the 4228 // object currently being looked at. So, if we bail out, we 4229 // have definitely set _finger to something non-null. 4230 assert(_finger != NULL, "invariant"); 4231 4232 // Region iteration was actually aborted. So now _finger 4233 // points to the address of the object we last scanned. If we 4234 // leave it there, when we restart this task, we will rescan 4235 // the object. It is easy to avoid this. We move the finger by 4236 // enough to point to the next possible object header (the 4237 // bitmap knows by how much we need to move it as it knows its 4238 // granularity). 4239 assert(_finger < _region_limit, "invariant"); 4240 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 4241 // Check if bitmap iteration was aborted while scanning the last object 4242 if (new_finger >= _region_limit) { 4243 giveup_current_region(); 4244 } else { 4245 move_finger_to(new_finger); 4246 } 4247 } 4248 } 4249 // At this point we have either completed iterating over the 4250 // region we were holding on to, or we have aborted. 4251 4252 // We then partially drain the local queue and the global stack. 4253 // (Do we really need this?) 4254 drain_local_queue(true); 4255 drain_global_stack(true); 4256 4257 // Read the note on the claim_region() method on why it might 4258 // return NULL with potentially more regions available for 4259 // claiming and why we have to check out_of_regions() to determine 4260 // whether we're done or not. 4261 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4262 // We are going to try to claim a new region. We should have 4263 // given up on the previous one. 4264 // Separated the asserts so that we know which one fires. 4265 assert(_curr_region == NULL, "invariant"); 4266 assert(_finger == NULL, "invariant"); 4267 assert(_region_limit == NULL, "invariant"); 4268 if (_cm->verbose_low()) { 4269 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4270 } 4271 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4272 if (claimed_region != NULL) { 4273 // Yes, we managed to claim one 4274 statsOnly( ++_regions_claimed ); 4275 4276 if (_cm->verbose_low()) { 4277 gclog_or_tty->print_cr("[%u] we successfully claimed " 4278 "region "PTR_FORMAT, 4279 _worker_id, p2i(claimed_region)); 4280 } 4281 4282 setup_for_region(claimed_region); 4283 assert(_curr_region == claimed_region, "invariant"); 4284 } 4285 // It is important to call the regular clock here. It might take 4286 // a while to claim a region if, for example, we hit a large 4287 // block of empty regions. So we need to call the regular clock 4288 // method once round the loop to make sure it's called 4289 // frequently enough. 4290 regular_clock_call(); 4291 } 4292 4293 if (!has_aborted() && _curr_region == NULL) { 4294 assert(_cm->out_of_regions(), 4295 "at this point we should be out of regions"); 4296 } 4297 } while ( _curr_region != NULL && !has_aborted()); 4298 4299 if (!has_aborted()) { 4300 // We cannot check whether the global stack is empty, since other 4301 // tasks might be pushing objects to it concurrently. 4302 assert(_cm->out_of_regions(), 4303 "at this point we should be out of regions"); 4304 4305 if (_cm->verbose_low()) { 4306 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4307 } 4308 4309 // Try to reduce the number of available SATB buffers so that 4310 // remark has less work to do. 4311 drain_satb_buffers(); 4312 } 4313 4314 // Since we've done everything else, we can now totally drain the 4315 // local queue and global stack. 4316 drain_local_queue(false); 4317 drain_global_stack(false); 4318 4319 // Attempt at work stealing from other task's queues. 4320 if (do_stealing && !has_aborted()) { 4321 // We have not aborted. This means that we have finished all that 4322 // we could. Let's try to do some stealing... 4323 4324 // We cannot check whether the global stack is empty, since other 4325 // tasks might be pushing objects to it concurrently. 4326 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4327 "only way to reach here"); 4328 4329 if (_cm->verbose_low()) { 4330 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4331 } 4332 4333 while (!has_aborted()) { 4334 oop obj; 4335 statsOnly( ++_steal_attempts ); 4336 4337 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4338 if (_cm->verbose_medium()) { 4339 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4340 _worker_id, p2i((void*) obj)); 4341 } 4342 4343 statsOnly( ++_steals ); 4344 4345 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4346 "any stolen object should be marked"); 4347 scan_object(obj); 4348 4349 // And since we're towards the end, let's totally drain the 4350 // local queue and global stack. 4351 drain_local_queue(false); 4352 drain_global_stack(false); 4353 } else { 4354 break; 4355 } 4356 } 4357 } 4358 4359 // If we are about to wrap up and go into termination, check if we 4360 // should raise the overflow flag. 4361 if (do_termination && !has_aborted()) { 4362 if (_cm->force_overflow()->should_force()) { 4363 _cm->set_has_overflown(); 4364 regular_clock_call(); 4365 } 4366 } 4367 4368 // We still haven't aborted. Now, let's try to get into the 4369 // termination protocol. 4370 if (do_termination && !has_aborted()) { 4371 // We cannot check whether the global stack is empty, since other 4372 // tasks might be concurrently pushing objects on it. 4373 // Separated the asserts so that we know which one fires. 4374 assert(_cm->out_of_regions(), "only way to reach here"); 4375 assert(_task_queue->size() == 0, "only way to reach here"); 4376 4377 if (_cm->verbose_low()) { 4378 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4379 } 4380 4381 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4382 4383 // The CMTask class also extends the TerminatorTerminator class, 4384 // hence its should_exit_termination() method will also decide 4385 // whether to exit the termination protocol or not. 4386 bool finished = (is_serial || 4387 _cm->terminator()->offer_termination(this)); 4388 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4389 _termination_time_ms += 4390 termination_end_time_ms - _termination_start_time_ms; 4391 4392 if (finished) { 4393 // We're all done. 4394 4395 if (_worker_id == 0) { 4396 // let's allow task 0 to do this 4397 if (concurrent()) { 4398 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4399 // we need to set this to false before the next 4400 // safepoint. This way we ensure that the marking phase 4401 // doesn't observe any more heap expansions. 4402 _cm->clear_concurrent_marking_in_progress(); 4403 } 4404 } 4405 4406 // We can now guarantee that the global stack is empty, since 4407 // all other tasks have finished. We separated the guarantees so 4408 // that, if a condition is false, we can immediately find out 4409 // which one. 4410 guarantee(_cm->out_of_regions(), "only way to reach here"); 4411 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4412 guarantee(_task_queue->size() == 0, "only way to reach here"); 4413 guarantee(!_cm->has_overflown(), "only way to reach here"); 4414 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4415 4416 if (_cm->verbose_low()) { 4417 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4418 } 4419 } else { 4420 // Apparently there's more work to do. Let's abort this task. It 4421 // will restart it and we can hopefully find more things to do. 4422 4423 if (_cm->verbose_low()) { 4424 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4425 _worker_id); 4426 } 4427 4428 set_has_aborted(); 4429 statsOnly( ++_aborted_termination ); 4430 } 4431 } 4432 4433 // Mainly for debugging purposes to make sure that a pointer to the 4434 // closure which was statically allocated in this frame doesn't 4435 // escape it by accident. 4436 set_cm_oop_closure(NULL); 4437 double end_time_ms = os::elapsedVTime() * 1000.0; 4438 double elapsed_time_ms = end_time_ms - _start_time_ms; 4439 // Update the step history. 4440 _step_times_ms.add(elapsed_time_ms); 4441 4442 if (has_aborted()) { 4443 // The task was aborted for some reason. 4444 4445 statsOnly( ++_aborted ); 4446 4447 if (_has_timed_out) { 4448 double diff_ms = elapsed_time_ms - _time_target_ms; 4449 // Keep statistics of how well we did with respect to hitting 4450 // our target only if we actually timed out (if we aborted for 4451 // other reasons, then the results might get skewed). 4452 _marking_step_diffs_ms.add(diff_ms); 4453 } 4454 4455 if (_cm->has_overflown()) { 4456 // This is the interesting one. We aborted because a global 4457 // overflow was raised. This means we have to restart the 4458 // marking phase and start iterating over regions. However, in 4459 // order to do this we have to make sure that all tasks stop 4460 // what they are doing and re-initialize in a safe manner. We 4461 // will achieve this with the use of two barrier sync points. 4462 4463 if (_cm->verbose_low()) { 4464 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4465 } 4466 4467 if (!is_serial) { 4468 // We only need to enter the sync barrier if being called 4469 // from a parallel context 4470 _cm->enter_first_sync_barrier(_worker_id); 4471 4472 // When we exit this sync barrier we know that all tasks have 4473 // stopped doing marking work. So, it's now safe to 4474 // re-initialize our data structures. At the end of this method, 4475 // task 0 will clear the global data structures. 4476 } 4477 4478 statsOnly( ++_aborted_overflow ); 4479 4480 // We clear the local state of this task... 4481 clear_region_fields(); 4482 4483 if (!is_serial) { 4484 // ...and enter the second barrier. 4485 _cm->enter_second_sync_barrier(_worker_id); 4486 } 4487 // At this point, if we're during the concurrent phase of 4488 // marking, everything has been re-initialized and we're 4489 // ready to restart. 4490 } 4491 4492 if (_cm->verbose_low()) { 4493 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4494 "elapsed = %1.2lfms <<<<<<<<<<", 4495 _worker_id, _time_target_ms, elapsed_time_ms); 4496 if (_cm->has_aborted()) { 4497 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4498 _worker_id); 4499 } 4500 } 4501 } else { 4502 if (_cm->verbose_low()) { 4503 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4504 "elapsed = %1.2lfms <<<<<<<<<<", 4505 _worker_id, _time_target_ms, elapsed_time_ms); 4506 } 4507 } 4508 4509 _claimed = false; 4510 } 4511 4512 CMTask::CMTask(uint worker_id, 4513 ConcurrentMark* cm, 4514 size_t* marked_bytes, 4515 BitMap* card_bm, 4516 CMTaskQueue* task_queue, 4517 CMTaskQueueSet* task_queues) 4518 : _g1h(G1CollectedHeap::heap()), 4519 _worker_id(worker_id), _cm(cm), 4520 _claimed(false), 4521 _nextMarkBitMap(NULL), _hash_seed(17), 4522 _task_queue(task_queue), 4523 _task_queues(task_queues), 4524 _cm_oop_closure(NULL), 4525 _marked_bytes_array(marked_bytes), 4526 _card_bm(card_bm) { 4527 guarantee(task_queue != NULL, "invariant"); 4528 guarantee(task_queues != NULL, "invariant"); 4529 4530 statsOnly( _clock_due_to_scanning = 0; 4531 _clock_due_to_marking = 0 ); 4532 4533 _marking_step_diffs_ms.add(0.5); 4534 } 4535 4536 // These are formatting macros that are used below to ensure 4537 // consistent formatting. The *_H_* versions are used to format the 4538 // header for a particular value and they should be kept consistent 4539 // with the corresponding macro. Also note that most of the macros add 4540 // the necessary white space (as a prefix) which makes them a bit 4541 // easier to compose. 4542 4543 // All the output lines are prefixed with this string to be able to 4544 // identify them easily in a large log file. 4545 #define G1PPRL_LINE_PREFIX "###" 4546 4547 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4548 #ifdef _LP64 4549 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4550 #else // _LP64 4551 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4552 #endif // _LP64 4553 4554 // For per-region info 4555 #define G1PPRL_TYPE_FORMAT " %-4s" 4556 #define G1PPRL_TYPE_H_FORMAT " %4s" 4557 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4558 #define G1PPRL_BYTE_H_FORMAT " %9s" 4559 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4560 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4561 4562 // For summary info 4563 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4564 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4565 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4566 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4567 4568 G1PrintRegionLivenessInfoClosure:: 4569 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4570 : _out(out), 4571 _total_used_bytes(0), _total_capacity_bytes(0), 4572 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4573 _hum_used_bytes(0), _hum_capacity_bytes(0), 4574 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4575 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4576 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4577 MemRegion g1_committed = g1h->g1_committed(); 4578 MemRegion g1_reserved = g1h->g1_reserved(); 4579 double now = os::elapsedTime(); 4580 4581 // Print the header of the output. 4582 _out->cr(); 4583 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4584 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4585 G1PPRL_SUM_ADDR_FORMAT("committed") 4586 G1PPRL_SUM_ADDR_FORMAT("reserved") 4587 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4588 p2i(g1_committed.start()), p2i(g1_committed.end()), 4589 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4590 HeapRegion::GrainBytes); 4591 _out->print_cr(G1PPRL_LINE_PREFIX); 4592 _out->print_cr(G1PPRL_LINE_PREFIX 4593 G1PPRL_TYPE_H_FORMAT 4594 G1PPRL_ADDR_BASE_H_FORMAT 4595 G1PPRL_BYTE_H_FORMAT 4596 G1PPRL_BYTE_H_FORMAT 4597 G1PPRL_BYTE_H_FORMAT 4598 G1PPRL_DOUBLE_H_FORMAT 4599 G1PPRL_BYTE_H_FORMAT 4600 G1PPRL_BYTE_H_FORMAT, 4601 "type", "address-range", 4602 "used", "prev-live", "next-live", "gc-eff", 4603 "remset", "code-roots"); 4604 _out->print_cr(G1PPRL_LINE_PREFIX 4605 G1PPRL_TYPE_H_FORMAT 4606 G1PPRL_ADDR_BASE_H_FORMAT 4607 G1PPRL_BYTE_H_FORMAT 4608 G1PPRL_BYTE_H_FORMAT 4609 G1PPRL_BYTE_H_FORMAT 4610 G1PPRL_DOUBLE_H_FORMAT 4611 G1PPRL_BYTE_H_FORMAT 4612 G1PPRL_BYTE_H_FORMAT, 4613 "", "", 4614 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4615 "(bytes)", "(bytes)"); 4616 } 4617 4618 // It takes as a parameter a reference to one of the _hum_* fields, it 4619 // deduces the corresponding value for a region in a humongous region 4620 // series (either the region size, or what's left if the _hum_* field 4621 // is < the region size), and updates the _hum_* field accordingly. 4622 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4623 size_t bytes = 0; 4624 // The > 0 check is to deal with the prev and next live bytes which 4625 // could be 0. 4626 if (*hum_bytes > 0) { 4627 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4628 *hum_bytes -= bytes; 4629 } 4630 return bytes; 4631 } 4632 4633 // It deduces the values for a region in a humongous region series 4634 // from the _hum_* fields and updates those accordingly. It assumes 4635 // that that _hum_* fields have already been set up from the "starts 4636 // humongous" region and we visit the regions in address order. 4637 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4638 size_t* capacity_bytes, 4639 size_t* prev_live_bytes, 4640 size_t* next_live_bytes) { 4641 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4642 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4643 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4644 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4645 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4646 } 4647 4648 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4649 const char* type = ""; 4650 HeapWord* bottom = r->bottom(); 4651 HeapWord* end = r->end(); 4652 size_t capacity_bytes = r->capacity(); 4653 size_t used_bytes = r->used(); 4654 size_t prev_live_bytes = r->live_bytes(); 4655 size_t next_live_bytes = r->next_live_bytes(); 4656 double gc_eff = r->gc_efficiency(); 4657 size_t remset_bytes = r->rem_set()->mem_size(); 4658 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4659 4660 if (r->used() == 0) { 4661 type = "FREE"; 4662 } else if (r->is_survivor()) { 4663 type = "SURV"; 4664 } else if (r->is_young()) { 4665 type = "EDEN"; 4666 } else if (r->startsHumongous()) { 4667 type = "HUMS"; 4668 4669 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4670 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4671 "they should have been zeroed after the last time we used them"); 4672 // Set up the _hum_* fields. 4673 _hum_capacity_bytes = capacity_bytes; 4674 _hum_used_bytes = used_bytes; 4675 _hum_prev_live_bytes = prev_live_bytes; 4676 _hum_next_live_bytes = next_live_bytes; 4677 get_hum_bytes(&used_bytes, &capacity_bytes, 4678 &prev_live_bytes, &next_live_bytes); 4679 end = bottom + HeapRegion::GrainWords; 4680 } else if (r->continuesHumongous()) { 4681 type = "HUMC"; 4682 get_hum_bytes(&used_bytes, &capacity_bytes, 4683 &prev_live_bytes, &next_live_bytes); 4684 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4685 } else { 4686 type = "OLD"; 4687 } 4688 4689 _total_used_bytes += used_bytes; 4690 _total_capacity_bytes += capacity_bytes; 4691 _total_prev_live_bytes += prev_live_bytes; 4692 _total_next_live_bytes += next_live_bytes; 4693 _total_remset_bytes += remset_bytes; 4694 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4695 4696 // Print a line for this particular region. 4697 _out->print_cr(G1PPRL_LINE_PREFIX 4698 G1PPRL_TYPE_FORMAT 4699 G1PPRL_ADDR_BASE_FORMAT 4700 G1PPRL_BYTE_FORMAT 4701 G1PPRL_BYTE_FORMAT 4702 G1PPRL_BYTE_FORMAT 4703 G1PPRL_DOUBLE_FORMAT 4704 G1PPRL_BYTE_FORMAT 4705 G1PPRL_BYTE_FORMAT, 4706 type, p2i(bottom), p2i(end), 4707 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4708 remset_bytes, strong_code_roots_bytes); 4709 4710 return false; 4711 } 4712 4713 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4714 // add static memory usages to remembered set sizes 4715 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4716 // Print the footer of the output. 4717 _out->print_cr(G1PPRL_LINE_PREFIX); 4718 _out->print_cr(G1PPRL_LINE_PREFIX 4719 " SUMMARY" 4720 G1PPRL_SUM_MB_FORMAT("capacity") 4721 G1PPRL_SUM_MB_PERC_FORMAT("used") 4722 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4723 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4724 G1PPRL_SUM_MB_FORMAT("remset") 4725 G1PPRL_SUM_MB_FORMAT("code-roots"), 4726 bytes_to_mb(_total_capacity_bytes), 4727 bytes_to_mb(_total_used_bytes), 4728 perc(_total_used_bytes, _total_capacity_bytes), 4729 bytes_to_mb(_total_prev_live_bytes), 4730 perc(_total_prev_live_bytes, _total_capacity_bytes), 4731 bytes_to_mb(_total_next_live_bytes), 4732 perc(_total_next_live_bytes, _total_capacity_bytes), 4733 bytes_to_mb(_total_remset_bytes), 4734 bytes_to_mb(_total_strong_code_roots_bytes)); 4735 _out->cr(); 4736 }