1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/symbolTable.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc_implementation/g1/concurrentMark.inline.hpp" 29 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 32 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 33 #include "gc_implementation/g1/g1Log.hpp" 34 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 35 #include "gc_implementation/g1/g1RemSet.hpp" 36 #include "gc_implementation/g1/heapRegion.inline.hpp" 37 #include "gc_implementation/g1/heapRegionRemSet.hpp" 38 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 39 #include "gc_implementation/shared/vmGCOperations.hpp" 40 #include "gc_implementation/shared/gcTimer.hpp" 41 #include "gc_implementation/shared/gcTrace.hpp" 42 #include "gc_implementation/shared/gcTraceTime.hpp" 43 #include "memory/allocation.hpp" 44 #include "memory/genOopClosures.inline.hpp" 45 #include "memory/referencePolicy.hpp" 46 #include "memory/resourceArea.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "runtime/handles.inline.hpp" 49 #include "runtime/java.hpp" 50 #include "runtime/atomic.inline.hpp" 51 #include "runtime/prefetch.inline.hpp" 52 #include "services/memTracker.hpp" 53 54 // Concurrent marking bit map wrapper 55 56 CMBitMapRO::CMBitMapRO(int shifter) : 57 _bm(), 58 _shifter(shifter) { 59 _bmStartWord = 0; 60 _bmWordSize = 0; 61 } 62 63 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 64 const HeapWord* limit) const { 65 // First we must round addr *up* to a possible object boundary. 66 addr = (HeapWord*)align_size_up((intptr_t)addr, 67 HeapWordSize << _shifter); 68 size_t addrOffset = heapWordToOffset(addr); 69 if (limit == NULL) { 70 limit = _bmStartWord + _bmWordSize; 71 } 72 size_t limitOffset = heapWordToOffset(limit); 73 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 74 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 75 assert(nextAddr >= addr, "get_next_one postcondition"); 76 assert(nextAddr == limit || isMarked(nextAddr), 77 "get_next_one postcondition"); 78 return nextAddr; 79 } 80 81 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, 82 const HeapWord* limit) const { 83 size_t addrOffset = heapWordToOffset(addr); 84 if (limit == NULL) { 85 limit = _bmStartWord + _bmWordSize; 86 } 87 size_t limitOffset = heapWordToOffset(limit); 88 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 89 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 90 assert(nextAddr >= addr, "get_next_one postcondition"); 91 assert(nextAddr == limit || !isMarked(nextAddr), 92 "get_next_one postcondition"); 93 return nextAddr; 94 } 95 96 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 97 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 98 return (int) (diff >> _shifter); 99 } 100 101 #ifndef PRODUCT 102 bool CMBitMapRO::covers(ReservedSpace heap_rs) const { 103 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 104 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 105 "size inconsistency"); 106 return _bmStartWord == (HeapWord*)(heap_rs.base()) && 107 _bmWordSize == heap_rs.size()>>LogHeapWordSize; 108 } 109 #endif 110 111 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 112 _bm.print_on_error(st, prefix); 113 } 114 115 bool CMBitMap::allocate(ReservedSpace heap_rs) { 116 _bmStartWord = (HeapWord*)(heap_rs.base()); 117 _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes 118 ReservedSpace brs(ReservedSpace::allocation_align_size_up( 119 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); 120 if (!brs.is_reserved()) { 121 warning("ConcurrentMark marking bit map allocation failure"); 122 return false; 123 } 124 MemTracker::record_virtual_memory_type((address)brs.base(), mtGC); 125 // For now we'll just commit all of the bit map up front. 126 // Later on we'll try to be more parsimonious with swap. 127 if (!_virtual_space.initialize(brs, brs.size())) { 128 warning("ConcurrentMark marking bit map backing store failure"); 129 return false; 130 } 131 assert(_virtual_space.committed_size() == brs.size(), 132 "didn't reserve backing store for all of concurrent marking bit map?"); 133 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low()); 134 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= 135 _bmWordSize, "inconsistency in bit map sizing"); 136 _bm.set_size(_bmWordSize >> _shifter); 137 return true; 138 } 139 140 void CMBitMap::clearAll() { 141 _bm.clear(); 142 return; 143 } 144 145 void CMBitMap::markRange(MemRegion mr) { 146 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 147 assert(!mr.is_empty(), "unexpected empty region"); 148 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 149 ((HeapWord *) mr.end())), 150 "markRange memory region end is not card aligned"); 151 // convert address range into offset range 152 _bm.at_put_range(heapWordToOffset(mr.start()), 153 heapWordToOffset(mr.end()), true); 154 } 155 156 void CMBitMap::clearRange(MemRegion mr) { 157 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 158 assert(!mr.is_empty(), "unexpected empty region"); 159 // convert address range into offset range 160 _bm.at_put_range(heapWordToOffset(mr.start()), 161 heapWordToOffset(mr.end()), false); 162 } 163 164 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 165 HeapWord* end_addr) { 166 HeapWord* start = getNextMarkedWordAddress(addr); 167 start = MIN2(start, end_addr); 168 HeapWord* end = getNextUnmarkedWordAddress(start); 169 end = MIN2(end, end_addr); 170 assert(start <= end, "Consistency check"); 171 MemRegion mr(start, end); 172 if (!mr.is_empty()) { 173 clearRange(mr); 174 } 175 return mr; 176 } 177 178 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 179 _base(NULL), _cm(cm) 180 #ifdef ASSERT 181 , _drain_in_progress(false) 182 , _drain_in_progress_yields(false) 183 #endif 184 {} 185 186 bool CMMarkStack::allocate(size_t capacity) { 187 // allocate a stack of the requisite depth 188 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 189 if (!rs.is_reserved()) { 190 warning("ConcurrentMark MarkStack allocation failure"); 191 return false; 192 } 193 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 194 if (!_virtual_space.initialize(rs, rs.size())) { 195 warning("ConcurrentMark MarkStack backing store failure"); 196 // Release the virtual memory reserved for the marking stack 197 rs.release(); 198 return false; 199 } 200 assert(_virtual_space.committed_size() == rs.size(), 201 "Didn't reserve backing store for all of ConcurrentMark stack?"); 202 _base = (oop*) _virtual_space.low(); 203 setEmpty(); 204 _capacity = (jint) capacity; 205 _saved_index = -1; 206 _should_expand = false; 207 NOT_PRODUCT(_max_depth = 0); 208 return true; 209 } 210 211 void CMMarkStack::expand() { 212 // Called, during remark, if we've overflown the marking stack during marking. 213 assert(isEmpty(), "stack should been emptied while handling overflow"); 214 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 215 // Clear expansion flag 216 _should_expand = false; 217 if (_capacity == (jint) MarkStackSizeMax) { 218 if (PrintGCDetails && Verbose) { 219 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 220 } 221 return; 222 } 223 // Double capacity if possible 224 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 225 // Do not give up existing stack until we have managed to 226 // get the double capacity that we desired. 227 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 228 sizeof(oop))); 229 if (rs.is_reserved()) { 230 // Release the backing store associated with old stack 231 _virtual_space.release(); 232 // Reinitialize virtual space for new stack 233 if (!_virtual_space.initialize(rs, rs.size())) { 234 fatal("Not enough swap for expanded marking stack capacity"); 235 } 236 _base = (oop*)(_virtual_space.low()); 237 _index = 0; 238 _capacity = new_capacity; 239 } else { 240 if (PrintGCDetails && Verbose) { 241 // Failed to double capacity, continue; 242 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 243 SIZE_FORMAT"K to " SIZE_FORMAT"K", 244 _capacity / K, new_capacity / K); 245 } 246 } 247 } 248 249 void CMMarkStack::set_should_expand() { 250 // If we're resetting the marking state because of an 251 // marking stack overflow, record that we should, if 252 // possible, expand the stack. 253 _should_expand = _cm->has_overflown(); 254 } 255 256 CMMarkStack::~CMMarkStack() { 257 if (_base != NULL) { 258 _base = NULL; 259 _virtual_space.release(); 260 } 261 } 262 263 void CMMarkStack::par_push(oop ptr) { 264 while (true) { 265 if (isFull()) { 266 _overflow = true; 267 return; 268 } 269 // Otherwise... 270 jint index = _index; 271 jint next_index = index+1; 272 jint res = Atomic::cmpxchg(next_index, &_index, index); 273 if (res == index) { 274 _base[index] = ptr; 275 // Note that we don't maintain this atomically. We could, but it 276 // doesn't seem necessary. 277 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 278 return; 279 } 280 // Otherwise, we need to try again. 281 } 282 } 283 284 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { 285 while (true) { 286 if (isFull()) { 287 _overflow = true; 288 return; 289 } 290 // Otherwise... 291 jint index = _index; 292 jint next_index = index + n; 293 if (next_index > _capacity) { 294 _overflow = true; 295 return; 296 } 297 jint res = Atomic::cmpxchg(next_index, &_index, index); 298 if (res == index) { 299 for (int i = 0; i < n; i++) { 300 int ind = index + i; 301 assert(ind < _capacity, "By overflow test above."); 302 _base[ind] = ptr_arr[i]; 303 } 304 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 305 return; 306 } 307 // Otherwise, we need to try again. 308 } 309 } 310 311 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 312 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 313 jint start = _index; 314 jint next_index = start + n; 315 if (next_index > _capacity) { 316 _overflow = true; 317 return; 318 } 319 // Otherwise. 320 _index = next_index; 321 for (int i = 0; i < n; i++) { 322 int ind = start + i; 323 assert(ind < _capacity, "By overflow test above."); 324 _base[ind] = ptr_arr[i]; 325 } 326 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 327 } 328 329 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 330 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 331 jint index = _index; 332 if (index == 0) { 333 *n = 0; 334 return false; 335 } else { 336 int k = MIN2(max, index); 337 jint new_ind = index - k; 338 for (int j = 0; j < k; j++) { 339 ptr_arr[j] = _base[new_ind + j]; 340 } 341 _index = new_ind; 342 *n = k; 343 return true; 344 } 345 } 346 347 template<class OopClosureClass> 348 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 349 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 350 || SafepointSynchronize::is_at_safepoint(), 351 "Drain recursion must be yield-safe."); 352 bool res = true; 353 debug_only(_drain_in_progress = true); 354 debug_only(_drain_in_progress_yields = yield_after); 355 while (!isEmpty()) { 356 oop newOop = pop(); 357 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 358 assert(newOop->is_oop(), "Expected an oop"); 359 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 360 "only grey objects on this stack"); 361 newOop->oop_iterate(cl); 362 if (yield_after && _cm->do_yield_check()) { 363 res = false; 364 break; 365 } 366 } 367 debug_only(_drain_in_progress = false); 368 return res; 369 } 370 371 void CMMarkStack::note_start_of_gc() { 372 assert(_saved_index == -1, 373 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 374 _saved_index = _index; 375 } 376 377 void CMMarkStack::note_end_of_gc() { 378 // This is intentionally a guarantee, instead of an assert. If we 379 // accidentally add something to the mark stack during GC, it 380 // will be a correctness issue so it's better if we crash. we'll 381 // only check this once per GC anyway, so it won't be a performance 382 // issue in any way. 383 guarantee(_saved_index == _index, 384 err_msg("saved index: %d index: %d", _saved_index, _index)); 385 _saved_index = -1; 386 } 387 388 void CMMarkStack::oops_do(OopClosure* f) { 389 assert(_saved_index == _index, 390 err_msg("saved index: %d index: %d", _saved_index, _index)); 391 for (int i = 0; i < _index; i += 1) { 392 f->do_oop(&_base[i]); 393 } 394 } 395 396 bool ConcurrentMark::not_yet_marked(oop obj) const { 397 return _g1h->is_obj_ill(obj); 398 } 399 400 CMRootRegions::CMRootRegions() : 401 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 402 _should_abort(false), _next_survivor(NULL) { } 403 404 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 405 _young_list = g1h->young_list(); 406 _cm = cm; 407 } 408 409 void CMRootRegions::prepare_for_scan() { 410 assert(!scan_in_progress(), "pre-condition"); 411 412 // Currently, only survivors can be root regions. 413 assert(_next_survivor == NULL, "pre-condition"); 414 _next_survivor = _young_list->first_survivor_region(); 415 _scan_in_progress = (_next_survivor != NULL); 416 _should_abort = false; 417 } 418 419 HeapRegion* CMRootRegions::claim_next() { 420 if (_should_abort) { 421 // If someone has set the should_abort flag, we return NULL to 422 // force the caller to bail out of their loop. 423 return NULL; 424 } 425 426 // Currently, only survivors can be root regions. 427 HeapRegion* res = _next_survivor; 428 if (res != NULL) { 429 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 430 // Read it again in case it changed while we were waiting for the lock. 431 res = _next_survivor; 432 if (res != NULL) { 433 if (res == _young_list->last_survivor_region()) { 434 // We just claimed the last survivor so store NULL to indicate 435 // that we're done. 436 _next_survivor = NULL; 437 } else { 438 _next_survivor = res->get_next_young_region(); 439 } 440 } else { 441 // Someone else claimed the last survivor while we were trying 442 // to take the lock so nothing else to do. 443 } 444 } 445 assert(res == NULL || res->is_survivor(), "post-condition"); 446 447 return res; 448 } 449 450 void CMRootRegions::scan_finished() { 451 assert(scan_in_progress(), "pre-condition"); 452 453 // Currently, only survivors can be root regions. 454 if (!_should_abort) { 455 assert(_next_survivor == NULL, "we should have claimed all survivors"); 456 } 457 _next_survivor = NULL; 458 459 { 460 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 461 _scan_in_progress = false; 462 RootRegionScan_lock->notify_all(); 463 } 464 } 465 466 bool CMRootRegions::wait_until_scan_finished() { 467 if (!scan_in_progress()) return false; 468 469 { 470 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 471 while (scan_in_progress()) { 472 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 473 } 474 } 475 return true; 476 } 477 478 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 479 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 480 #endif // _MSC_VER 481 482 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 483 return MAX2((n_par_threads + 2) / 4, 1U); 484 } 485 486 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) : 487 _g1h(g1h), 488 _markBitMap1(log2_intptr(MinObjAlignment)), 489 _markBitMap2(log2_intptr(MinObjAlignment)), 490 _parallel_marking_threads(0), 491 _max_parallel_marking_threads(0), 492 _sleep_factor(0.0), 493 _marking_task_overhead(1.0), 494 _cleanup_sleep_factor(0.0), 495 _cleanup_task_overhead(1.0), 496 _cleanup_list("Cleanup List"), 497 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 498 _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >> 499 CardTableModRefBS::card_shift, 500 false /* in_resource_area*/), 501 502 _prevMarkBitMap(&_markBitMap1), 503 _nextMarkBitMap(&_markBitMap2), 504 505 _markStack(this), 506 // _finger set in set_non_marking_state 507 508 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 509 // _active_tasks set in set_non_marking_state 510 // _tasks set inside the constructor 511 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 512 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 513 514 _has_overflown(false), 515 _concurrent(false), 516 _has_aborted(false), 517 _aborted_gc_id(GCId::undefined()), 518 _restart_for_overflow(false), 519 _concurrent_marking_in_progress(false), 520 521 // _verbose_level set below 522 523 _init_times(), 524 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 525 _cleanup_times(), 526 _total_counting_time(0.0), 527 _total_rs_scrub_time(0.0), 528 529 _parallel_workers(NULL), 530 531 _count_card_bitmaps(NULL), 532 _count_marked_bytes(NULL), 533 _completed_initialization(false) { 534 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 535 if (verbose_level < no_verbose) { 536 verbose_level = no_verbose; 537 } 538 if (verbose_level > high_verbose) { 539 verbose_level = high_verbose; 540 } 541 _verbose_level = verbose_level; 542 543 if (verbose_low()) { 544 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 545 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 546 } 547 548 if (!_markBitMap1.allocate(heap_rs)) { 549 warning("Failed to allocate first CM bit map"); 550 return; 551 } 552 if (!_markBitMap2.allocate(heap_rs)) { 553 warning("Failed to allocate second CM bit map"); 554 return; 555 } 556 557 // Create & start a ConcurrentMark thread. 558 _cmThread = new ConcurrentMarkThread(this); 559 assert(cmThread() != NULL, "CM Thread should have been created"); 560 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 561 if (_cmThread->osthread() == NULL) { 562 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 563 } 564 565 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 566 assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency"); 567 assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency"); 568 569 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 570 satb_qs.set_buffer_size(G1SATBBufferSize); 571 572 _root_regions.init(_g1h, this); 573 574 if (ConcGCThreads > ParallelGCThreads) { 575 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") " 576 "than ParallelGCThreads (" UINTX_FORMAT ").", 577 ConcGCThreads, ParallelGCThreads); 578 return; 579 } 580 if (ParallelGCThreads == 0) { 581 // if we are not running with any parallel GC threads we will not 582 // spawn any marking threads either 583 _parallel_marking_threads = 0; 584 _max_parallel_marking_threads = 0; 585 _sleep_factor = 0.0; 586 _marking_task_overhead = 1.0; 587 } else { 588 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 589 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 590 // if both are set 591 _sleep_factor = 0.0; 592 _marking_task_overhead = 1.0; 593 } else if (G1MarkingOverheadPercent > 0) { 594 // We will calculate the number of parallel marking threads based 595 // on a target overhead with respect to the soft real-time goal 596 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 597 double overall_cm_overhead = 598 (double) MaxGCPauseMillis * marking_overhead / 599 (double) GCPauseIntervalMillis; 600 double cpu_ratio = 1.0 / (double) os::processor_count(); 601 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 602 double marking_task_overhead = 603 overall_cm_overhead / marking_thread_num * 604 (double) os::processor_count(); 605 double sleep_factor = 606 (1.0 - marking_task_overhead) / marking_task_overhead; 607 608 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); 609 _sleep_factor = sleep_factor; 610 _marking_task_overhead = marking_task_overhead; 611 } else { 612 // Calculate the number of parallel marking threads by scaling 613 // the number of parallel GC threads. 614 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); 615 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); 616 _sleep_factor = 0.0; 617 _marking_task_overhead = 1.0; 618 } 619 620 assert(ConcGCThreads > 0, "Should have been set"); 621 _parallel_marking_threads = (uint) ConcGCThreads; 622 _max_parallel_marking_threads = _parallel_marking_threads; 623 624 if (parallel_marking_threads() > 1) { 625 _cleanup_task_overhead = 1.0; 626 } else { 627 _cleanup_task_overhead = marking_task_overhead(); 628 } 629 _cleanup_sleep_factor = 630 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 631 632 #if 0 633 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 634 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 635 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 636 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 637 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 638 #endif 639 640 guarantee(parallel_marking_threads() > 0, "peace of mind"); 641 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", 642 _max_parallel_marking_threads, false, true); 643 if (_parallel_workers == NULL) { 644 vm_exit_during_initialization("Failed necessary allocation."); 645 } else { 646 _parallel_workers->initialize_workers(); 647 } 648 } 649 650 if (FLAG_IS_DEFAULT(MarkStackSize)) { 651 uintx mark_stack_size = 652 MIN2(MarkStackSizeMax, 653 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE))); 654 // Verify that the calculated value for MarkStackSize is in range. 655 // It would be nice to use the private utility routine from Arguments. 656 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 657 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): " 658 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 659 mark_stack_size, (uintx) 1, MarkStackSizeMax); 660 return; 661 } 662 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size); 663 } else { 664 // Verify MarkStackSize is in range. 665 if (FLAG_IS_CMDLINE(MarkStackSize)) { 666 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 667 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 668 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): " 669 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 670 MarkStackSize, (uintx) 1, MarkStackSizeMax); 671 return; 672 } 673 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 674 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 675 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")" 676 " or for MarkStackSizeMax (" UINTX_FORMAT ")", 677 MarkStackSize, MarkStackSizeMax); 678 return; 679 } 680 } 681 } 682 } 683 684 if (!_markStack.allocate(MarkStackSize)) { 685 warning("Failed to allocate CM marking stack"); 686 return; 687 } 688 689 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 690 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 691 692 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 693 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 694 695 BitMap::idx_t card_bm_size = _card_bm.size(); 696 697 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 698 _active_tasks = _max_worker_id; 699 700 size_t max_regions = (size_t) _g1h->max_regions(); 701 for (uint i = 0; i < _max_worker_id; ++i) { 702 CMTaskQueue* task_queue = new CMTaskQueue(); 703 task_queue->initialize(); 704 _task_queues->register_queue(i, task_queue); 705 706 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 707 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 708 709 _tasks[i] = new CMTask(i, this, 710 _count_marked_bytes[i], 711 &_count_card_bitmaps[i], 712 task_queue, _task_queues); 713 714 _accum_task_vtime[i] = 0.0; 715 } 716 717 // Calculate the card number for the bottom of the heap. Used 718 // in biasing indexes into the accounting card bitmaps. 719 _heap_bottom_card_num = 720 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 721 CardTableModRefBS::card_shift); 722 723 // Clear all the liveness counting data 724 clear_all_count_data(); 725 726 // so that the call below can read a sensible value 727 _heap_start = (HeapWord*) heap_rs.base(); 728 set_non_marking_state(); 729 _completed_initialization = true; 730 } 731 732 void ConcurrentMark::update_g1_committed(bool force) { 733 // If concurrent marking is not in progress, then we do not need to 734 // update _heap_end. 735 if (!concurrent_marking_in_progress() && !force) return; 736 737 MemRegion committed = _g1h->g1_committed(); 738 assert(committed.start() == _heap_start, "start shouldn't change"); 739 HeapWord* new_end = committed.end(); 740 if (new_end > _heap_end) { 741 // The heap has been expanded. 742 743 _heap_end = new_end; 744 } 745 // Notice that the heap can also shrink. However, this only happens 746 // during a Full GC (at least currently) and the entire marking 747 // phase will bail out and the task will not be restarted. So, let's 748 // do nothing. 749 } 750 751 void ConcurrentMark::reset() { 752 // Starting values for these two. This should be called in a STW 753 // phase. CM will be notified of any future g1_committed expansions 754 // will be at the end of evacuation pauses, when tasks are 755 // inactive. 756 MemRegion committed = _g1h->g1_committed(); 757 _heap_start = committed.start(); 758 _heap_end = committed.end(); 759 760 // Separated the asserts so that we know which one fires. 761 assert(_heap_start != NULL, "heap bounds should look ok"); 762 assert(_heap_end != NULL, "heap bounds should look ok"); 763 assert(_heap_start < _heap_end, "heap bounds should look ok"); 764 765 // Reset all the marking data structures and any necessary flags 766 reset_marking_state(); 767 768 if (verbose_low()) { 769 gclog_or_tty->print_cr("[global] resetting"); 770 } 771 772 // We do reset all of them, since different phases will use 773 // different number of active threads. So, it's easiest to have all 774 // of them ready. 775 for (uint i = 0; i < _max_worker_id; ++i) { 776 _tasks[i]->reset(_nextMarkBitMap); 777 } 778 779 // we need this to make sure that the flag is on during the evac 780 // pause with initial mark piggy-backed 781 set_concurrent_marking_in_progress(); 782 } 783 784 785 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 786 _markStack.set_should_expand(); 787 _markStack.setEmpty(); // Also clears the _markStack overflow flag 788 if (clear_overflow) { 789 clear_has_overflown(); 790 } else { 791 assert(has_overflown(), "pre-condition"); 792 } 793 _finger = _heap_start; 794 795 for (uint i = 0; i < _max_worker_id; ++i) { 796 CMTaskQueue* queue = _task_queues->queue(i); 797 queue->set_empty(); 798 } 799 } 800 801 void ConcurrentMark::set_concurrency(uint active_tasks) { 802 assert(active_tasks <= _max_worker_id, "we should not have more"); 803 804 _active_tasks = active_tasks; 805 // Need to update the three data structures below according to the 806 // number of active threads for this phase. 807 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 808 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 809 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 810 } 811 812 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 813 set_concurrency(active_tasks); 814 815 _concurrent = concurrent; 816 // We propagate this to all tasks, not just the active ones. 817 for (uint i = 0; i < _max_worker_id; ++i) 818 _tasks[i]->set_concurrent(concurrent); 819 820 if (concurrent) { 821 set_concurrent_marking_in_progress(); 822 } else { 823 // We currently assume that the concurrent flag has been set to 824 // false before we start remark. At this point we should also be 825 // in a STW phase. 826 assert(!concurrent_marking_in_progress(), "invariant"); 827 assert(out_of_regions(), 828 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 829 p2i(_finger), p2i(_heap_end))); 830 update_g1_committed(true); 831 } 832 } 833 834 void ConcurrentMark::set_non_marking_state() { 835 // We set the global marking state to some default values when we're 836 // not doing marking. 837 reset_marking_state(); 838 _active_tasks = 0; 839 clear_concurrent_marking_in_progress(); 840 } 841 842 ConcurrentMark::~ConcurrentMark() { 843 // The ConcurrentMark instance is never freed. 844 ShouldNotReachHere(); 845 } 846 847 void ConcurrentMark::clearNextBitmap() { 848 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 849 G1CollectorPolicy* g1p = g1h->g1_policy(); 850 851 // Make sure that the concurrent mark thread looks to still be in 852 // the current cycle. 853 guarantee(cmThread()->during_cycle(), "invariant"); 854 855 // We are finishing up the current cycle by clearing the next 856 // marking bitmap and getting it ready for the next cycle. During 857 // this time no other cycle can start. So, let's make sure that this 858 // is the case. 859 guarantee(!g1h->mark_in_progress(), "invariant"); 860 861 // clear the mark bitmap (no grey objects to start with). 862 // We need to do this in chunks and offer to yield in between 863 // each chunk. 864 HeapWord* start = _nextMarkBitMap->startWord(); 865 HeapWord* end = _nextMarkBitMap->endWord(); 866 HeapWord* cur = start; 867 size_t chunkSize = M; 868 while (cur < end) { 869 HeapWord* next = cur + chunkSize; 870 if (next > end) { 871 next = end; 872 } 873 MemRegion mr(cur,next); 874 _nextMarkBitMap->clearRange(mr); 875 cur = next; 876 do_yield_check(); 877 878 // Repeat the asserts from above. We'll do them as asserts here to 879 // minimize their overhead on the product. However, we'll have 880 // them as guarantees at the beginning / end of the bitmap 881 // clearing to get some checking in the product. 882 assert(cmThread()->during_cycle(), "invariant"); 883 assert(!g1h->mark_in_progress(), "invariant"); 884 } 885 886 // Clear the liveness counting data 887 clear_all_count_data(); 888 889 // Repeat the asserts from above. 890 guarantee(cmThread()->during_cycle(), "invariant"); 891 guarantee(!g1h->mark_in_progress(), "invariant"); 892 } 893 894 bool ConcurrentMark::nextMarkBitmapIsClear() { 895 return _nextMarkBitMap->getNextMarkedWordAddress(_heap_start, _heap_end) == _heap_end; 896 } 897 898 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 899 public: 900 bool doHeapRegion(HeapRegion* r) { 901 if (!r->continuesHumongous()) { 902 r->note_start_of_marking(); 903 } 904 return false; 905 } 906 }; 907 908 void ConcurrentMark::checkpointRootsInitialPre() { 909 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 910 G1CollectorPolicy* g1p = g1h->g1_policy(); 911 912 _has_aborted = false; 913 914 #ifndef PRODUCT 915 if (G1PrintReachableAtInitialMark) { 916 print_reachable("at-cycle-start", 917 VerifyOption_G1UsePrevMarking, true /* all */); 918 } 919 #endif 920 921 // Initialize marking structures. This has to be done in a STW phase. 922 reset(); 923 924 // For each region note start of marking. 925 NoteStartOfMarkHRClosure startcl; 926 g1h->heap_region_iterate(&startcl); 927 } 928 929 930 void ConcurrentMark::checkpointRootsInitialPost() { 931 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 932 933 // If we force an overflow during remark, the remark operation will 934 // actually abort and we'll restart concurrent marking. If we always 935 // force an overflow during remark we'll never actually complete the 936 // marking phase. So, we initialize this here, at the start of the 937 // cycle, so that at the remaining overflow number will decrease at 938 // every remark and we'll eventually not need to cause one. 939 force_overflow_stw()->init(); 940 941 // Start Concurrent Marking weak-reference discovery. 942 ReferenceProcessor* rp = g1h->ref_processor_cm(); 943 // enable ("weak") refs discovery 944 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); 945 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 946 947 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 948 // This is the start of the marking cycle, we're expected all 949 // threads to have SATB queues with active set to false. 950 satb_mq_set.set_active_all_threads(true, /* new active value */ 951 false /* expected_active */); 952 953 _root_regions.prepare_for_scan(); 954 955 // update_g1_committed() will be called at the end of an evac pause 956 // when marking is on. So, it's also called at the end of the 957 // initial-mark pause to update the heap end, if the heap expands 958 // during it. No need to call it here. 959 } 960 961 /* 962 * Notice that in the next two methods, we actually leave the STS 963 * during the barrier sync and join it immediately afterwards. If we 964 * do not do this, the following deadlock can occur: one thread could 965 * be in the barrier sync code, waiting for the other thread to also 966 * sync up, whereas another one could be trying to yield, while also 967 * waiting for the other threads to sync up too. 968 * 969 * Note, however, that this code is also used during remark and in 970 * this case we should not attempt to leave / enter the STS, otherwise 971 * we'll either hit an assert (debug / fastdebug) or deadlock 972 * (product). So we should only leave / enter the STS if we are 973 * operating concurrently. 974 * 975 * Because the thread that does the sync barrier has left the STS, it 976 * is possible to be suspended for a Full GC or an evacuation pause 977 * could occur. This is actually safe, since the entering the sync 978 * barrier is one of the last things do_marking_step() does, and it 979 * doesn't manipulate any data structures afterwards. 980 */ 981 982 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 983 if (verbose_low()) { 984 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 985 } 986 987 if (concurrent()) { 988 SuspendibleThreadSet::leave(); 989 } 990 991 bool barrier_aborted = !_first_overflow_barrier_sync.enter(); 992 993 if (concurrent()) { 994 SuspendibleThreadSet::join(); 995 } 996 // at this point everyone should have synced up and not be doing any 997 // more work 998 999 if (verbose_low()) { 1000 if (barrier_aborted) { 1001 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 1002 } else { 1003 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 1004 } 1005 } 1006 1007 if (barrier_aborted) { 1008 // If the barrier aborted we ignore the overflow condition and 1009 // just abort the whole marking phase as quickly as possible. 1010 return; 1011 } 1012 1013 // If we're executing the concurrent phase of marking, reset the marking 1014 // state; otherwise the marking state is reset after reference processing, 1015 // during the remark pause. 1016 // If we reset here as a result of an overflow during the remark we will 1017 // see assertion failures from any subsequent set_concurrency_and_phase() 1018 // calls. 1019 if (concurrent()) { 1020 // let the task associated with with worker 0 do this 1021 if (worker_id == 0) { 1022 // task 0 is responsible for clearing the global data structures 1023 // We should be here because of an overflow. During STW we should 1024 // not clear the overflow flag since we rely on it being true when 1025 // we exit this method to abort the pause and restart concurrent 1026 // marking. 1027 reset_marking_state(true /* clear_overflow */); 1028 force_overflow()->update(); 1029 1030 if (G1Log::fine()) { 1031 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1032 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1033 } 1034 } 1035 } 1036 1037 // after this, each task should reset its own data structures then 1038 // then go into the second barrier 1039 } 1040 1041 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1042 if (verbose_low()) { 1043 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1044 } 1045 1046 if (concurrent()) { 1047 SuspendibleThreadSet::leave(); 1048 } 1049 1050 bool barrier_aborted = !_second_overflow_barrier_sync.enter(); 1051 1052 if (concurrent()) { 1053 SuspendibleThreadSet::join(); 1054 } 1055 // at this point everything should be re-initialized and ready to go 1056 1057 if (verbose_low()) { 1058 if (barrier_aborted) { 1059 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1060 } else { 1061 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1062 } 1063 } 1064 } 1065 1066 #ifndef PRODUCT 1067 void ForceOverflowSettings::init() { 1068 _num_remaining = G1ConcMarkForceOverflow; 1069 _force = false; 1070 update(); 1071 } 1072 1073 void ForceOverflowSettings::update() { 1074 if (_num_remaining > 0) { 1075 _num_remaining -= 1; 1076 _force = true; 1077 } else { 1078 _force = false; 1079 } 1080 } 1081 1082 bool ForceOverflowSettings::should_force() { 1083 if (_force) { 1084 _force = false; 1085 return true; 1086 } else { 1087 return false; 1088 } 1089 } 1090 #endif // !PRODUCT 1091 1092 class CMConcurrentMarkingTask: public AbstractGangTask { 1093 private: 1094 ConcurrentMark* _cm; 1095 ConcurrentMarkThread* _cmt; 1096 1097 public: 1098 void work(uint worker_id) { 1099 assert(Thread::current()->is_ConcurrentGC_thread(), 1100 "this should only be done by a conc GC thread"); 1101 ResourceMark rm; 1102 1103 double start_vtime = os::elapsedVTime(); 1104 1105 SuspendibleThreadSet::join(); 1106 1107 assert(worker_id < _cm->active_tasks(), "invariant"); 1108 CMTask* the_task = _cm->task(worker_id); 1109 the_task->record_start_time(); 1110 if (!_cm->has_aborted()) { 1111 do { 1112 double start_vtime_sec = os::elapsedVTime(); 1113 double start_time_sec = os::elapsedTime(); 1114 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1115 1116 the_task->do_marking_step(mark_step_duration_ms, 1117 true /* do_termination */, 1118 false /* is_serial*/); 1119 1120 double end_time_sec = os::elapsedTime(); 1121 double end_vtime_sec = os::elapsedVTime(); 1122 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1123 double elapsed_time_sec = end_time_sec - start_time_sec; 1124 _cm->clear_has_overflown(); 1125 1126 bool ret = _cm->do_yield_check(worker_id); 1127 1128 jlong sleep_time_ms; 1129 if (!_cm->has_aborted() && the_task->has_aborted()) { 1130 sleep_time_ms = 1131 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1132 SuspendibleThreadSet::leave(); 1133 os::sleep(Thread::current(), sleep_time_ms, false); 1134 SuspendibleThreadSet::join(); 1135 } 1136 double end_time2_sec = os::elapsedTime(); 1137 double elapsed_time2_sec = end_time2_sec - start_time_sec; 1138 1139 #if 0 1140 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, " 1141 "overhead %1.4lf", 1142 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms, 1143 the_task->conc_overhead(os::elapsedTime()) * 8.0); 1144 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms", 1145 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0); 1146 #endif 1147 } while (!_cm->has_aborted() && the_task->has_aborted()); 1148 } 1149 the_task->record_end_time(); 1150 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1151 1152 SuspendibleThreadSet::leave(); 1153 1154 double end_vtime = os::elapsedVTime(); 1155 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1156 } 1157 1158 CMConcurrentMarkingTask(ConcurrentMark* cm, 1159 ConcurrentMarkThread* cmt) : 1160 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1161 1162 ~CMConcurrentMarkingTask() { } 1163 }; 1164 1165 // Calculates the number of active workers for a concurrent 1166 // phase. 1167 uint ConcurrentMark::calc_parallel_marking_threads() { 1168 if (G1CollectedHeap::use_parallel_gc_threads()) { 1169 uint n_conc_workers = 0; 1170 if (!UseDynamicNumberOfGCThreads || 1171 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1172 !ForceDynamicNumberOfGCThreads)) { 1173 n_conc_workers = max_parallel_marking_threads(); 1174 } else { 1175 n_conc_workers = 1176 AdaptiveSizePolicy::calc_default_active_workers( 1177 max_parallel_marking_threads(), 1178 1, /* Minimum workers */ 1179 parallel_marking_threads(), 1180 Threads::number_of_non_daemon_threads()); 1181 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1182 // that scaling has already gone into "_max_parallel_marking_threads". 1183 } 1184 assert(n_conc_workers > 0, "Always need at least 1"); 1185 return n_conc_workers; 1186 } 1187 // If we are not running with any parallel GC threads we will not 1188 // have spawned any marking threads either. Hence the number of 1189 // concurrent workers should be 0. 1190 return 0; 1191 } 1192 1193 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1194 // Currently, only survivors can be root regions. 1195 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1196 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1197 1198 const uintx interval = PrefetchScanIntervalInBytes; 1199 HeapWord* curr = hr->bottom(); 1200 const HeapWord* end = hr->top(); 1201 while (curr < end) { 1202 Prefetch::read(curr, interval); 1203 oop obj = oop(curr); 1204 int size = obj->oop_iterate(&cl); 1205 assert(size == obj->size(), "sanity"); 1206 curr += size; 1207 } 1208 } 1209 1210 class CMRootRegionScanTask : public AbstractGangTask { 1211 private: 1212 ConcurrentMark* _cm; 1213 1214 public: 1215 CMRootRegionScanTask(ConcurrentMark* cm) : 1216 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1217 1218 void work(uint worker_id) { 1219 assert(Thread::current()->is_ConcurrentGC_thread(), 1220 "this should only be done by a conc GC thread"); 1221 1222 CMRootRegions* root_regions = _cm->root_regions(); 1223 HeapRegion* hr = root_regions->claim_next(); 1224 while (hr != NULL) { 1225 _cm->scanRootRegion(hr, worker_id); 1226 hr = root_regions->claim_next(); 1227 } 1228 } 1229 }; 1230 1231 void ConcurrentMark::scanRootRegions() { 1232 // Start of concurrent marking. 1233 ClassLoaderDataGraph::clear_claimed_marks(); 1234 1235 // scan_in_progress() will have been set to true only if there was 1236 // at least one root region to scan. So, if it's false, we 1237 // should not attempt to do any further work. 1238 if (root_regions()->scan_in_progress()) { 1239 _parallel_marking_threads = calc_parallel_marking_threads(); 1240 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1241 "Maximum number of marking threads exceeded"); 1242 uint active_workers = MAX2(1U, parallel_marking_threads()); 1243 1244 CMRootRegionScanTask task(this); 1245 if (use_parallel_marking_threads()) { 1246 _parallel_workers->set_active_workers((int) active_workers); 1247 _parallel_workers->run_task(&task); 1248 } else { 1249 task.work(0); 1250 } 1251 1252 // It's possible that has_aborted() is true here without actually 1253 // aborting the survivor scan earlier. This is OK as it's 1254 // mainly used for sanity checking. 1255 root_regions()->scan_finished(); 1256 } 1257 } 1258 1259 void ConcurrentMark::markFromRoots() { 1260 // we might be tempted to assert that: 1261 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1262 // "inconsistent argument?"); 1263 // However that wouldn't be right, because it's possible that 1264 // a safepoint is indeed in progress as a younger generation 1265 // stop-the-world GC happens even as we mark in this generation. 1266 1267 _restart_for_overflow = false; 1268 force_overflow_conc()->init(); 1269 1270 // _g1h has _n_par_threads 1271 _parallel_marking_threads = calc_parallel_marking_threads(); 1272 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1273 "Maximum number of marking threads exceeded"); 1274 1275 uint active_workers = MAX2(1U, parallel_marking_threads()); 1276 1277 // Parallel task terminator is set in "set_concurrency_and_phase()" 1278 set_concurrency_and_phase(active_workers, true /* concurrent */); 1279 1280 CMConcurrentMarkingTask markingTask(this, cmThread()); 1281 if (use_parallel_marking_threads()) { 1282 _parallel_workers->set_active_workers((int)active_workers); 1283 // Don't set _n_par_threads because it affects MT in process_roots() 1284 // and the decisions on that MT processing is made elsewhere. 1285 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1286 _parallel_workers->run_task(&markingTask); 1287 } else { 1288 markingTask.work(0); 1289 } 1290 print_stats(); 1291 } 1292 1293 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1294 // world is stopped at this checkpoint 1295 assert(SafepointSynchronize::is_at_safepoint(), 1296 "world should be stopped"); 1297 1298 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1299 1300 // If a full collection has happened, we shouldn't do this. 1301 if (has_aborted()) { 1302 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1303 return; 1304 } 1305 1306 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1307 1308 if (VerifyDuringGC) { 1309 HandleMark hm; // handle scope 1310 Universe::heap()->prepare_for_verify(); 1311 Universe::verify(VerifyOption_G1UsePrevMarking, 1312 " VerifyDuringGC:(before)"); 1313 } 1314 g1h->check_bitmaps("Remark Start"); 1315 1316 G1CollectorPolicy* g1p = g1h->g1_policy(); 1317 g1p->record_concurrent_mark_remark_start(); 1318 1319 double start = os::elapsedTime(); 1320 1321 checkpointRootsFinalWork(); 1322 1323 double mark_work_end = os::elapsedTime(); 1324 1325 weakRefsWork(clear_all_soft_refs); 1326 1327 if (has_overflown()) { 1328 // Oops. We overflowed. Restart concurrent marking. 1329 _restart_for_overflow = true; 1330 if (G1TraceMarkStackOverflow) { 1331 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1332 } 1333 1334 // Verify the heap w.r.t. the previous marking bitmap. 1335 if (VerifyDuringGC) { 1336 HandleMark hm; // handle scope 1337 Universe::heap()->prepare_for_verify(); 1338 Universe::verify(VerifyOption_G1UsePrevMarking, 1339 " VerifyDuringGC:(overflow)"); 1340 } 1341 1342 // Clear the marking state because we will be restarting 1343 // marking due to overflowing the global mark stack. 1344 reset_marking_state(); 1345 } else { 1346 // Aggregate the per-task counting data that we have accumulated 1347 // while marking. 1348 aggregate_count_data(); 1349 1350 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1351 // We're done with marking. 1352 // This is the end of the marking cycle, we're expected all 1353 // threads to have SATB queues with active set to true. 1354 satb_mq_set.set_active_all_threads(false, /* new active value */ 1355 true /* expected_active */); 1356 1357 if (VerifyDuringGC) { 1358 HandleMark hm; // handle scope 1359 Universe::heap()->prepare_for_verify(); 1360 Universe::verify(VerifyOption_G1UseNextMarking, 1361 " VerifyDuringGC:(after)"); 1362 } 1363 g1h->check_bitmaps("Remark End"); 1364 assert(!restart_for_overflow(), "sanity"); 1365 // Completely reset the marking state since marking completed 1366 set_non_marking_state(); 1367 } 1368 1369 // Expand the marking stack, if we have to and if we can. 1370 if (_markStack.should_expand()) { 1371 _markStack.expand(); 1372 } 1373 1374 // Statistics 1375 double now = os::elapsedTime(); 1376 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1377 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1378 _remark_times.add((now - start) * 1000.0); 1379 1380 g1p->record_concurrent_mark_remark_end(); 1381 1382 G1CMIsAliveClosure is_alive(g1h); 1383 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1384 } 1385 1386 // Base class of the closures that finalize and verify the 1387 // liveness counting data. 1388 class CMCountDataClosureBase: public HeapRegionClosure { 1389 protected: 1390 G1CollectedHeap* _g1h; 1391 ConcurrentMark* _cm; 1392 CardTableModRefBS* _ct_bs; 1393 1394 BitMap* _region_bm; 1395 BitMap* _card_bm; 1396 1397 // Takes a region that's not empty (i.e., it has at least one 1398 // live object in it and sets its corresponding bit on the region 1399 // bitmap to 1. If the region is "starts humongous" it will also set 1400 // to 1 the bits on the region bitmap that correspond to its 1401 // associated "continues humongous" regions. 1402 void set_bit_for_region(HeapRegion* hr) { 1403 assert(!hr->continuesHumongous(), "should have filtered those out"); 1404 1405 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); 1406 if (!hr->startsHumongous()) { 1407 // Normal (non-humongous) case: just set the bit. 1408 _region_bm->par_at_put(index, true); 1409 } else { 1410 // Starts humongous case: calculate how many regions are part of 1411 // this humongous region and then set the bit range. 1412 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1413 _region_bm->par_at_put_range(index, end_index, true); 1414 } 1415 } 1416 1417 public: 1418 CMCountDataClosureBase(G1CollectedHeap* g1h, 1419 BitMap* region_bm, BitMap* card_bm): 1420 _g1h(g1h), _cm(g1h->concurrent_mark()), 1421 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 1422 _region_bm(region_bm), _card_bm(card_bm) { } 1423 }; 1424 1425 // Closure that calculates the # live objects per region. Used 1426 // for verification purposes during the cleanup pause. 1427 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1428 CMBitMapRO* _bm; 1429 size_t _region_marked_bytes; 1430 1431 public: 1432 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1433 BitMap* region_bm, BitMap* card_bm) : 1434 CMCountDataClosureBase(g1h, region_bm, card_bm), 1435 _bm(bm), _region_marked_bytes(0) { } 1436 1437 bool doHeapRegion(HeapRegion* hr) { 1438 1439 if (hr->continuesHumongous()) { 1440 // We will ignore these here and process them when their 1441 // associated "starts humongous" region is processed (see 1442 // set_bit_for_heap_region()). Note that we cannot rely on their 1443 // associated "starts humongous" region to have their bit set to 1444 // 1 since, due to the region chunking in the parallel region 1445 // iteration, a "continues humongous" region might be visited 1446 // before its associated "starts humongous". 1447 return false; 1448 } 1449 1450 HeapWord* ntams = hr->next_top_at_mark_start(); 1451 HeapWord* start = hr->bottom(); 1452 1453 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1454 err_msg("Preconditions not met - " 1455 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1456 p2i(start), p2i(ntams), p2i(hr->end()))); 1457 1458 // Find the first marked object at or after "start". 1459 start = _bm->getNextMarkedWordAddress(start, ntams); 1460 1461 size_t marked_bytes = 0; 1462 1463 while (start < ntams) { 1464 oop obj = oop(start); 1465 int obj_sz = obj->size(); 1466 HeapWord* obj_end = start + obj_sz; 1467 1468 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1469 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1470 1471 // Note: if we're looking at the last region in heap - obj_end 1472 // could be actually just beyond the end of the heap; end_idx 1473 // will then correspond to a (non-existent) card that is also 1474 // just beyond the heap. 1475 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1476 // end of object is not card aligned - increment to cover 1477 // all the cards spanned by the object 1478 end_idx += 1; 1479 } 1480 1481 // Set the bits in the card BM for the cards spanned by this object. 1482 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1483 1484 // Add the size of this object to the number of marked bytes. 1485 marked_bytes += (size_t)obj_sz * HeapWordSize; 1486 1487 // Find the next marked object after this one. 1488 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1489 } 1490 1491 // Mark the allocated-since-marking portion... 1492 HeapWord* top = hr->top(); 1493 if (ntams < top) { 1494 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1495 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1496 1497 // Note: if we're looking at the last region in heap - top 1498 // could be actually just beyond the end of the heap; end_idx 1499 // will then correspond to a (non-existent) card that is also 1500 // just beyond the heap. 1501 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1502 // end of object is not card aligned - increment to cover 1503 // all the cards spanned by the object 1504 end_idx += 1; 1505 } 1506 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1507 1508 // This definitely means the region has live objects. 1509 set_bit_for_region(hr); 1510 } 1511 1512 // Update the live region bitmap. 1513 if (marked_bytes > 0) { 1514 set_bit_for_region(hr); 1515 } 1516 1517 // Set the marked bytes for the current region so that 1518 // it can be queried by a calling verification routine 1519 _region_marked_bytes = marked_bytes; 1520 1521 return false; 1522 } 1523 1524 size_t region_marked_bytes() const { return _region_marked_bytes; } 1525 }; 1526 1527 // Heap region closure used for verifying the counting data 1528 // that was accumulated concurrently and aggregated during 1529 // the remark pause. This closure is applied to the heap 1530 // regions during the STW cleanup pause. 1531 1532 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1533 G1CollectedHeap* _g1h; 1534 ConcurrentMark* _cm; 1535 CalcLiveObjectsClosure _calc_cl; 1536 BitMap* _region_bm; // Region BM to be verified 1537 BitMap* _card_bm; // Card BM to be verified 1538 bool _verbose; // verbose output? 1539 1540 BitMap* _exp_region_bm; // Expected Region BM values 1541 BitMap* _exp_card_bm; // Expected card BM values 1542 1543 int _failures; 1544 1545 public: 1546 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1547 BitMap* region_bm, 1548 BitMap* card_bm, 1549 BitMap* exp_region_bm, 1550 BitMap* exp_card_bm, 1551 bool verbose) : 1552 _g1h(g1h), _cm(g1h->concurrent_mark()), 1553 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1554 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1555 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1556 _failures(0) { } 1557 1558 int failures() const { return _failures; } 1559 1560 bool doHeapRegion(HeapRegion* hr) { 1561 if (hr->continuesHumongous()) { 1562 // We will ignore these here and process them when their 1563 // associated "starts humongous" region is processed (see 1564 // set_bit_for_heap_region()). Note that we cannot rely on their 1565 // associated "starts humongous" region to have their bit set to 1566 // 1 since, due to the region chunking in the parallel region 1567 // iteration, a "continues humongous" region might be visited 1568 // before its associated "starts humongous". 1569 return false; 1570 } 1571 1572 int failures = 0; 1573 1574 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1575 // this region and set the corresponding bits in the expected region 1576 // and card bitmaps. 1577 bool res = _calc_cl.doHeapRegion(hr); 1578 assert(res == false, "should be continuing"); 1579 1580 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1581 Mutex::_no_safepoint_check_flag); 1582 1583 // Verify the marked bytes for this region. 1584 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1585 size_t act_marked_bytes = hr->next_marked_bytes(); 1586 1587 // We're not OK if expected marked bytes > actual marked bytes. It means 1588 // we have missed accounting some objects during the actual marking. 1589 if (exp_marked_bytes > act_marked_bytes) { 1590 if (_verbose) { 1591 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1592 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1593 hr->hrs_index(), exp_marked_bytes, act_marked_bytes); 1594 } 1595 failures += 1; 1596 } 1597 1598 // Verify the bit, for this region, in the actual and expected 1599 // (which was just calculated) region bit maps. 1600 // We're not OK if the bit in the calculated expected region 1601 // bitmap is set and the bit in the actual region bitmap is not. 1602 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); 1603 1604 bool expected = _exp_region_bm->at(index); 1605 bool actual = _region_bm->at(index); 1606 if (expected && !actual) { 1607 if (_verbose) { 1608 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1609 "expected: %s, actual: %s", 1610 hr->hrs_index(), 1611 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1612 } 1613 failures += 1; 1614 } 1615 1616 // Verify that the card bit maps for the cards spanned by the current 1617 // region match. We have an error if we have a set bit in the expected 1618 // bit map and the corresponding bit in the actual bitmap is not set. 1619 1620 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1621 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1622 1623 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1624 expected = _exp_card_bm->at(i); 1625 actual = _card_bm->at(i); 1626 1627 if (expected && !actual) { 1628 if (_verbose) { 1629 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1630 "expected: %s, actual: %s", 1631 hr->hrs_index(), i, 1632 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1633 } 1634 failures += 1; 1635 } 1636 } 1637 1638 if (failures > 0 && _verbose) { 1639 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1640 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1641 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1642 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1643 } 1644 1645 _failures += failures; 1646 1647 // We could stop iteration over the heap when we 1648 // find the first violating region by returning true. 1649 return false; 1650 } 1651 }; 1652 1653 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1654 protected: 1655 G1CollectedHeap* _g1h; 1656 ConcurrentMark* _cm; 1657 BitMap* _actual_region_bm; 1658 BitMap* _actual_card_bm; 1659 1660 uint _n_workers; 1661 1662 BitMap* _expected_region_bm; 1663 BitMap* _expected_card_bm; 1664 1665 int _failures; 1666 bool _verbose; 1667 1668 public: 1669 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1670 BitMap* region_bm, BitMap* card_bm, 1671 BitMap* expected_region_bm, BitMap* expected_card_bm) 1672 : AbstractGangTask("G1 verify final counting"), 1673 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1674 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1675 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1676 _failures(0), _verbose(false), 1677 _n_workers(0) { 1678 assert(VerifyDuringGC, "don't call this otherwise"); 1679 1680 // Use the value already set as the number of active threads 1681 // in the call to run_task(). 1682 if (G1CollectedHeap::use_parallel_gc_threads()) { 1683 assert( _g1h->workers()->active_workers() > 0, 1684 "Should have been previously set"); 1685 _n_workers = _g1h->workers()->active_workers(); 1686 } else { 1687 _n_workers = 1; 1688 } 1689 1690 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1691 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1692 1693 _verbose = _cm->verbose_medium(); 1694 } 1695 1696 void work(uint worker_id) { 1697 assert(worker_id < _n_workers, "invariant"); 1698 1699 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1700 _actual_region_bm, _actual_card_bm, 1701 _expected_region_bm, 1702 _expected_card_bm, 1703 _verbose); 1704 1705 if (G1CollectedHeap::use_parallel_gc_threads()) { 1706 _g1h->heap_region_par_iterate_chunked(&verify_cl, 1707 worker_id, 1708 _n_workers, 1709 HeapRegion::VerifyCountClaimValue); 1710 } else { 1711 _g1h->heap_region_iterate(&verify_cl); 1712 } 1713 1714 Atomic::add(verify_cl.failures(), &_failures); 1715 } 1716 1717 int failures() const { return _failures; } 1718 }; 1719 1720 // Closure that finalizes the liveness counting data. 1721 // Used during the cleanup pause. 1722 // Sets the bits corresponding to the interval [NTAMS, top] 1723 // (which contains the implicitly live objects) in the 1724 // card liveness bitmap. Also sets the bit for each region, 1725 // containing live data, in the region liveness bitmap. 1726 1727 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1728 public: 1729 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1730 BitMap* region_bm, 1731 BitMap* card_bm) : 1732 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1733 1734 bool doHeapRegion(HeapRegion* hr) { 1735 1736 if (hr->continuesHumongous()) { 1737 // We will ignore these here and process them when their 1738 // associated "starts humongous" region is processed (see 1739 // set_bit_for_heap_region()). Note that we cannot rely on their 1740 // associated "starts humongous" region to have their bit set to 1741 // 1 since, due to the region chunking in the parallel region 1742 // iteration, a "continues humongous" region might be visited 1743 // before its associated "starts humongous". 1744 return false; 1745 } 1746 1747 HeapWord* ntams = hr->next_top_at_mark_start(); 1748 HeapWord* top = hr->top(); 1749 1750 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1751 1752 // Mark the allocated-since-marking portion... 1753 if (ntams < top) { 1754 // This definitely means the region has live objects. 1755 set_bit_for_region(hr); 1756 1757 // Now set the bits in the card bitmap for [ntams, top) 1758 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1759 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1760 1761 // Note: if we're looking at the last region in heap - top 1762 // could be actually just beyond the end of the heap; end_idx 1763 // will then correspond to a (non-existent) card that is also 1764 // just beyond the heap. 1765 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1766 // end of object is not card aligned - increment to cover 1767 // all the cards spanned by the object 1768 end_idx += 1; 1769 } 1770 1771 assert(end_idx <= _card_bm->size(), 1772 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1773 end_idx, _card_bm->size())); 1774 assert(start_idx < _card_bm->size(), 1775 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1776 start_idx, _card_bm->size())); 1777 1778 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1779 } 1780 1781 // Set the bit for the region if it contains live data 1782 if (hr->next_marked_bytes() > 0) { 1783 set_bit_for_region(hr); 1784 } 1785 1786 return false; 1787 } 1788 }; 1789 1790 class G1ParFinalCountTask: public AbstractGangTask { 1791 protected: 1792 G1CollectedHeap* _g1h; 1793 ConcurrentMark* _cm; 1794 BitMap* _actual_region_bm; 1795 BitMap* _actual_card_bm; 1796 1797 uint _n_workers; 1798 1799 public: 1800 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1801 : AbstractGangTask("G1 final counting"), 1802 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1803 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1804 _n_workers(0) { 1805 // Use the value already set as the number of active threads 1806 // in the call to run_task(). 1807 if (G1CollectedHeap::use_parallel_gc_threads()) { 1808 assert( _g1h->workers()->active_workers() > 0, 1809 "Should have been previously set"); 1810 _n_workers = _g1h->workers()->active_workers(); 1811 } else { 1812 _n_workers = 1; 1813 } 1814 } 1815 1816 void work(uint worker_id) { 1817 assert(worker_id < _n_workers, "invariant"); 1818 1819 FinalCountDataUpdateClosure final_update_cl(_g1h, 1820 _actual_region_bm, 1821 _actual_card_bm); 1822 1823 if (G1CollectedHeap::use_parallel_gc_threads()) { 1824 _g1h->heap_region_par_iterate_chunked(&final_update_cl, 1825 worker_id, 1826 _n_workers, 1827 HeapRegion::FinalCountClaimValue); 1828 } else { 1829 _g1h->heap_region_iterate(&final_update_cl); 1830 } 1831 } 1832 }; 1833 1834 class G1ParNoteEndTask; 1835 1836 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1837 G1CollectedHeap* _g1; 1838 size_t _max_live_bytes; 1839 uint _regions_claimed; 1840 size_t _freed_bytes; 1841 FreeRegionList* _local_cleanup_list; 1842 HeapRegionSetCount _old_regions_removed; 1843 HeapRegionSetCount _humongous_regions_removed; 1844 HRRSCleanupTask* _hrrs_cleanup_task; 1845 double _claimed_region_time; 1846 double _max_region_time; 1847 1848 public: 1849 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1850 FreeRegionList* local_cleanup_list, 1851 HRRSCleanupTask* hrrs_cleanup_task) : 1852 _g1(g1), 1853 _max_live_bytes(0), _regions_claimed(0), 1854 _freed_bytes(0), 1855 _claimed_region_time(0.0), _max_region_time(0.0), 1856 _local_cleanup_list(local_cleanup_list), 1857 _old_regions_removed(), 1858 _humongous_regions_removed(), 1859 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1860 1861 size_t freed_bytes() { return _freed_bytes; } 1862 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1863 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1864 1865 bool doHeapRegion(HeapRegion *hr) { 1866 if (hr->continuesHumongous()) { 1867 return false; 1868 } 1869 // We use a claim value of zero here because all regions 1870 // were claimed with value 1 in the FinalCount task. 1871 _g1->reset_gc_time_stamps(hr); 1872 double start = os::elapsedTime(); 1873 _regions_claimed++; 1874 hr->note_end_of_marking(); 1875 _max_live_bytes += hr->max_live_bytes(); 1876 1877 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1878 _freed_bytes += hr->used(); 1879 hr->set_containing_set(NULL); 1880 if (hr->isHumongous()) { 1881 assert(hr->startsHumongous(), "we should only see starts humongous"); 1882 _humongous_regions_removed.increment(1u, hr->capacity()); 1883 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1884 } else { 1885 _old_regions_removed.increment(1u, hr->capacity()); 1886 _g1->free_region(hr, _local_cleanup_list, true); 1887 } 1888 } else { 1889 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1890 } 1891 1892 double region_time = (os::elapsedTime() - start); 1893 _claimed_region_time += region_time; 1894 if (region_time > _max_region_time) { 1895 _max_region_time = region_time; 1896 } 1897 return false; 1898 } 1899 1900 size_t max_live_bytes() { return _max_live_bytes; } 1901 uint regions_claimed() { return _regions_claimed; } 1902 double claimed_region_time_sec() { return _claimed_region_time; } 1903 double max_region_time_sec() { return _max_region_time; } 1904 }; 1905 1906 class G1ParNoteEndTask: public AbstractGangTask { 1907 friend class G1NoteEndOfConcMarkClosure; 1908 1909 protected: 1910 G1CollectedHeap* _g1h; 1911 size_t _max_live_bytes; 1912 size_t _freed_bytes; 1913 FreeRegionList* _cleanup_list; 1914 1915 public: 1916 G1ParNoteEndTask(G1CollectedHeap* g1h, 1917 FreeRegionList* cleanup_list) : 1918 AbstractGangTask("G1 note end"), _g1h(g1h), 1919 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { } 1920 1921 void work(uint worker_id) { 1922 double start = os::elapsedTime(); 1923 FreeRegionList local_cleanup_list("Local Cleanup List"); 1924 HRRSCleanupTask hrrs_cleanup_task; 1925 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1926 &hrrs_cleanup_task); 1927 if (G1CollectedHeap::use_parallel_gc_threads()) { 1928 _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id, 1929 _g1h->workers()->active_workers(), 1930 HeapRegion::NoteEndClaimValue); 1931 } else { 1932 _g1h->heap_region_iterate(&g1_note_end); 1933 } 1934 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1935 1936 // Now update the lists 1937 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1938 { 1939 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1940 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1941 _max_live_bytes += g1_note_end.max_live_bytes(); 1942 _freed_bytes += g1_note_end.freed_bytes(); 1943 1944 // If we iterate over the global cleanup list at the end of 1945 // cleanup to do this printing we will not guarantee to only 1946 // generate output for the newly-reclaimed regions (the list 1947 // might not be empty at the beginning of cleanup; we might 1948 // still be working on its previous contents). So we do the 1949 // printing here, before we append the new regions to the global 1950 // cleanup list. 1951 1952 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1953 if (hr_printer->is_active()) { 1954 FreeRegionListIterator iter(&local_cleanup_list); 1955 while (iter.more_available()) { 1956 HeapRegion* hr = iter.get_next(); 1957 hr_printer->cleanup(hr); 1958 } 1959 } 1960 1961 _cleanup_list->add_ordered(&local_cleanup_list); 1962 assert(local_cleanup_list.is_empty(), "post-condition"); 1963 1964 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1965 } 1966 } 1967 size_t max_live_bytes() { return _max_live_bytes; } 1968 size_t freed_bytes() { return _freed_bytes; } 1969 }; 1970 1971 class G1ParScrubRemSetTask: public AbstractGangTask { 1972 protected: 1973 G1RemSet* _g1rs; 1974 BitMap* _region_bm; 1975 BitMap* _card_bm; 1976 public: 1977 G1ParScrubRemSetTask(G1CollectedHeap* g1h, 1978 BitMap* region_bm, BitMap* card_bm) : 1979 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), 1980 _region_bm(region_bm), _card_bm(card_bm) { } 1981 1982 void work(uint worker_id) { 1983 if (G1CollectedHeap::use_parallel_gc_threads()) { 1984 _g1rs->scrub_par(_region_bm, _card_bm, worker_id, 1985 HeapRegion::ScrubRemSetClaimValue); 1986 } else { 1987 _g1rs->scrub(_region_bm, _card_bm); 1988 } 1989 } 1990 1991 }; 1992 1993 void ConcurrentMark::cleanup() { 1994 // world is stopped at this checkpoint 1995 assert(SafepointSynchronize::is_at_safepoint(), 1996 "world should be stopped"); 1997 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1998 1999 // If a full collection has happened, we shouldn't do this. 2000 if (has_aborted()) { 2001 g1h->set_marking_complete(); // So bitmap clearing isn't confused 2002 return; 2003 } 2004 2005 g1h->verify_region_sets_optional(); 2006 2007 if (VerifyDuringGC) { 2008 HandleMark hm; // handle scope 2009 Universe::heap()->prepare_for_verify(); 2010 Universe::verify(VerifyOption_G1UsePrevMarking, 2011 " VerifyDuringGC:(before)"); 2012 } 2013 g1h->check_bitmaps("Cleanup Start"); 2014 2015 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); 2016 g1p->record_concurrent_mark_cleanup_start(); 2017 2018 double start = os::elapsedTime(); 2019 2020 HeapRegionRemSet::reset_for_cleanup_tasks(); 2021 2022 uint n_workers; 2023 2024 // Do counting once more with the world stopped for good measure. 2025 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 2026 2027 if (G1CollectedHeap::use_parallel_gc_threads()) { 2028 assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 2029 "sanity check"); 2030 2031 g1h->set_par_threads(); 2032 n_workers = g1h->n_par_threads(); 2033 assert(g1h->n_par_threads() == n_workers, 2034 "Should not have been reset"); 2035 g1h->workers()->run_task(&g1_par_count_task); 2036 // Done with the parallel phase so reset to 0. 2037 g1h->set_par_threads(0); 2038 2039 assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue), 2040 "sanity check"); 2041 } else { 2042 n_workers = 1; 2043 g1_par_count_task.work(0); 2044 } 2045 2046 if (VerifyDuringGC) { 2047 // Verify that the counting data accumulated during marking matches 2048 // that calculated by walking the marking bitmap. 2049 2050 // Bitmaps to hold expected values 2051 BitMap expected_region_bm(_region_bm.size(), true); 2052 BitMap expected_card_bm(_card_bm.size(), true); 2053 2054 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 2055 &_region_bm, 2056 &_card_bm, 2057 &expected_region_bm, 2058 &expected_card_bm); 2059 2060 if (G1CollectedHeap::use_parallel_gc_threads()) { 2061 g1h->set_par_threads((int)n_workers); 2062 g1h->workers()->run_task(&g1_par_verify_task); 2063 // Done with the parallel phase so reset to 0. 2064 g1h->set_par_threads(0); 2065 2066 assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue), 2067 "sanity check"); 2068 } else { 2069 g1_par_verify_task.work(0); 2070 } 2071 2072 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 2073 } 2074 2075 size_t start_used_bytes = g1h->used(); 2076 g1h->set_marking_complete(); 2077 2078 double count_end = os::elapsedTime(); 2079 double this_final_counting_time = (count_end - start); 2080 _total_counting_time += this_final_counting_time; 2081 2082 if (G1PrintRegionLivenessInfo) { 2083 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 2084 _g1h->heap_region_iterate(&cl); 2085 } 2086 2087 // Install newly created mark bitMap as "prev". 2088 swapMarkBitMaps(); 2089 2090 g1h->reset_gc_time_stamp(); 2091 2092 // Note end of marking in all heap regions. 2093 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list); 2094 if (G1CollectedHeap::use_parallel_gc_threads()) { 2095 g1h->set_par_threads((int)n_workers); 2096 g1h->workers()->run_task(&g1_par_note_end_task); 2097 g1h->set_par_threads(0); 2098 2099 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue), 2100 "sanity check"); 2101 } else { 2102 g1_par_note_end_task.work(0); 2103 } 2104 g1h->check_gc_time_stamps(); 2105 2106 if (!cleanup_list_is_empty()) { 2107 // The cleanup list is not empty, so we'll have to process it 2108 // concurrently. Notify anyone else that might be wanting free 2109 // regions that there will be more free regions coming soon. 2110 g1h->set_free_regions_coming(); 2111 } 2112 2113 // call below, since it affects the metric by which we sort the heap 2114 // regions. 2115 if (G1ScrubRemSets) { 2116 double rs_scrub_start = os::elapsedTime(); 2117 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm); 2118 if (G1CollectedHeap::use_parallel_gc_threads()) { 2119 g1h->set_par_threads((int)n_workers); 2120 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2121 g1h->set_par_threads(0); 2122 2123 assert(g1h->check_heap_region_claim_values( 2124 HeapRegion::ScrubRemSetClaimValue), 2125 "sanity check"); 2126 } else { 2127 g1_par_scrub_rs_task.work(0); 2128 } 2129 2130 double rs_scrub_end = os::elapsedTime(); 2131 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2132 _total_rs_scrub_time += this_rs_scrub_time; 2133 } 2134 2135 // this will also free any regions totally full of garbage objects, 2136 // and sort the regions. 2137 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2138 2139 // Statistics. 2140 double end = os::elapsedTime(); 2141 _cleanup_times.add((end - start) * 1000.0); 2142 2143 if (G1Log::fine()) { 2144 g1h->print_size_transition(gclog_or_tty, 2145 start_used_bytes, 2146 g1h->used(), 2147 g1h->capacity()); 2148 } 2149 2150 // Clean up will have freed any regions completely full of garbage. 2151 // Update the soft reference policy with the new heap occupancy. 2152 Universe::update_heap_info_at_gc(); 2153 2154 if (VerifyDuringGC) { 2155 HandleMark hm; // handle scope 2156 Universe::heap()->prepare_for_verify(); 2157 Universe::verify(VerifyOption_G1UsePrevMarking, 2158 " VerifyDuringGC:(after)"); 2159 } 2160 2161 g1h->check_bitmaps("Cleanup End"); 2162 2163 g1h->verify_region_sets_optional(); 2164 2165 // We need to make this be a "collection" so any collection pause that 2166 // races with it goes around and waits for completeCleanup to finish. 2167 g1h->increment_total_collections(); 2168 2169 // Clean out dead classes and update Metaspace sizes. 2170 ClassLoaderDataGraph::purge(); 2171 MetaspaceGC::compute_new_size(); 2172 2173 // We reclaimed old regions so we should calculate the sizes to make 2174 // sure we update the old gen/space data. 2175 g1h->g1mm()->update_sizes(); 2176 2177 g1h->trace_heap_after_concurrent_cycle(); 2178 } 2179 2180 void ConcurrentMark::completeCleanup() { 2181 if (has_aborted()) return; 2182 2183 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2184 2185 _cleanup_list.verify_optional(); 2186 FreeRegionList tmp_free_list("Tmp Free List"); 2187 2188 if (G1ConcRegionFreeingVerbose) { 2189 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2190 "cleanup list has %u entries", 2191 _cleanup_list.length()); 2192 } 2193 2194 // Noone else should be accessing the _cleanup_list at this point, 2195 // so it's not necessary to take any locks 2196 while (!_cleanup_list.is_empty()) { 2197 HeapRegion* hr = _cleanup_list.remove_head(); 2198 assert(hr != NULL, "Got NULL from a non-empty list"); 2199 hr->par_clear(); 2200 tmp_free_list.add_ordered(hr); 2201 2202 // Instead of adding one region at a time to the secondary_free_list, 2203 // we accumulate them in the local list and move them a few at a 2204 // time. This also cuts down on the number of notify_all() calls 2205 // we do during this process. We'll also append the local list when 2206 // _cleanup_list is empty (which means we just removed the last 2207 // region from the _cleanup_list). 2208 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2209 _cleanup_list.is_empty()) { 2210 if (G1ConcRegionFreeingVerbose) { 2211 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2212 "appending %u entries to the secondary_free_list, " 2213 "cleanup list still has %u entries", 2214 tmp_free_list.length(), 2215 _cleanup_list.length()); 2216 } 2217 2218 { 2219 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2220 g1h->secondary_free_list_add(&tmp_free_list); 2221 SecondaryFreeList_lock->notify_all(); 2222 } 2223 2224 if (G1StressConcRegionFreeing) { 2225 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2226 os::sleep(Thread::current(), (jlong) 1, false); 2227 } 2228 } 2229 } 2230 } 2231 assert(tmp_free_list.is_empty(), "post-condition"); 2232 } 2233 2234 // Supporting Object and Oop closures for reference discovery 2235 // and processing in during marking 2236 2237 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2238 HeapWord* addr = (HeapWord*)obj; 2239 return addr != NULL && 2240 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2241 } 2242 2243 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2244 // Uses the CMTask associated with a worker thread (for serial reference 2245 // processing the CMTask for worker 0 is used) to preserve (mark) and 2246 // trace referent objects. 2247 // 2248 // Using the CMTask and embedded local queues avoids having the worker 2249 // threads operating on the global mark stack. This reduces the risk 2250 // of overflowing the stack - which we would rather avoid at this late 2251 // state. Also using the tasks' local queues removes the potential 2252 // of the workers interfering with each other that could occur if 2253 // operating on the global stack. 2254 2255 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2256 ConcurrentMark* _cm; 2257 CMTask* _task; 2258 int _ref_counter_limit; 2259 int _ref_counter; 2260 bool _is_serial; 2261 public: 2262 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2263 _cm(cm), _task(task), _is_serial(is_serial), 2264 _ref_counter_limit(G1RefProcDrainInterval) { 2265 assert(_ref_counter_limit > 0, "sanity"); 2266 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2267 _ref_counter = _ref_counter_limit; 2268 } 2269 2270 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2271 virtual void do_oop( oop* p) { do_oop_work(p); } 2272 2273 template <class T> void do_oop_work(T* p) { 2274 if (!_cm->has_overflown()) { 2275 oop obj = oopDesc::load_decode_heap_oop(p); 2276 if (_cm->verbose_high()) { 2277 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2278 "*"PTR_FORMAT" = "PTR_FORMAT, 2279 _task->worker_id(), p2i(p), p2i((void*) obj)); 2280 } 2281 2282 _task->deal_with_reference(obj); 2283 _ref_counter--; 2284 2285 if (_ref_counter == 0) { 2286 // We have dealt with _ref_counter_limit references, pushing them 2287 // and objects reachable from them on to the local stack (and 2288 // possibly the global stack). Call CMTask::do_marking_step() to 2289 // process these entries. 2290 // 2291 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2292 // there's nothing more to do (i.e. we're done with the entries that 2293 // were pushed as a result of the CMTask::deal_with_reference() calls 2294 // above) or we overflow. 2295 // 2296 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2297 // flag while there may still be some work to do. (See the comment at 2298 // the beginning of CMTask::do_marking_step() for those conditions - 2299 // one of which is reaching the specified time target.) It is only 2300 // when CMTask::do_marking_step() returns without setting the 2301 // has_aborted() flag that the marking step has completed. 2302 do { 2303 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2304 _task->do_marking_step(mark_step_duration_ms, 2305 false /* do_termination */, 2306 _is_serial); 2307 } while (_task->has_aborted() && !_cm->has_overflown()); 2308 _ref_counter = _ref_counter_limit; 2309 } 2310 } else { 2311 if (_cm->verbose_high()) { 2312 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2313 } 2314 } 2315 } 2316 }; 2317 2318 // 'Drain' oop closure used by both serial and parallel reference processing. 2319 // Uses the CMTask associated with a given worker thread (for serial 2320 // reference processing the CMtask for worker 0 is used). Calls the 2321 // do_marking_step routine, with an unbelievably large timeout value, 2322 // to drain the marking data structures of the remaining entries 2323 // added by the 'keep alive' oop closure above. 2324 2325 class G1CMDrainMarkingStackClosure: public VoidClosure { 2326 ConcurrentMark* _cm; 2327 CMTask* _task; 2328 bool _is_serial; 2329 public: 2330 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2331 _cm(cm), _task(task), _is_serial(is_serial) { 2332 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2333 } 2334 2335 void do_void() { 2336 do { 2337 if (_cm->verbose_high()) { 2338 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2339 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2340 } 2341 2342 // We call CMTask::do_marking_step() to completely drain the local 2343 // and global marking stacks of entries pushed by the 'keep alive' 2344 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2345 // 2346 // CMTask::do_marking_step() is called in a loop, which we'll exit 2347 // if there's nothing more to do (i.e. we've completely drained the 2348 // entries that were pushed as a a result of applying the 'keep alive' 2349 // closure to the entries on the discovered ref lists) or we overflow 2350 // the global marking stack. 2351 // 2352 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2353 // flag while there may still be some work to do. (See the comment at 2354 // the beginning of CMTask::do_marking_step() for those conditions - 2355 // one of which is reaching the specified time target.) It is only 2356 // when CMTask::do_marking_step() returns without setting the 2357 // has_aborted() flag that the marking step has completed. 2358 2359 _task->do_marking_step(1000000000.0 /* something very large */, 2360 true /* do_termination */, 2361 _is_serial); 2362 } while (_task->has_aborted() && !_cm->has_overflown()); 2363 } 2364 }; 2365 2366 // Implementation of AbstractRefProcTaskExecutor for parallel 2367 // reference processing at the end of G1 concurrent marking 2368 2369 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2370 private: 2371 G1CollectedHeap* _g1h; 2372 ConcurrentMark* _cm; 2373 WorkGang* _workers; 2374 int _active_workers; 2375 2376 public: 2377 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2378 ConcurrentMark* cm, 2379 WorkGang* workers, 2380 int n_workers) : 2381 _g1h(g1h), _cm(cm), 2382 _workers(workers), _active_workers(n_workers) { } 2383 2384 // Executes the given task using concurrent marking worker threads. 2385 virtual void execute(ProcessTask& task); 2386 virtual void execute(EnqueueTask& task); 2387 }; 2388 2389 class G1CMRefProcTaskProxy: public AbstractGangTask { 2390 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2391 ProcessTask& _proc_task; 2392 G1CollectedHeap* _g1h; 2393 ConcurrentMark* _cm; 2394 2395 public: 2396 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2397 G1CollectedHeap* g1h, 2398 ConcurrentMark* cm) : 2399 AbstractGangTask("Process reference objects in parallel"), 2400 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2401 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2402 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2403 } 2404 2405 virtual void work(uint worker_id) { 2406 ResourceMark rm; 2407 HandleMark hm; 2408 CMTask* task = _cm->task(worker_id); 2409 G1CMIsAliveClosure g1_is_alive(_g1h); 2410 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2411 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2412 2413 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2414 } 2415 }; 2416 2417 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2418 assert(_workers != NULL, "Need parallel worker threads."); 2419 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2420 2421 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2422 2423 // We need to reset the concurrency level before each 2424 // proxy task execution, so that the termination protocol 2425 // and overflow handling in CMTask::do_marking_step() knows 2426 // how many workers to wait for. 2427 _cm->set_concurrency(_active_workers); 2428 _g1h->set_par_threads(_active_workers); 2429 _workers->run_task(&proc_task_proxy); 2430 _g1h->set_par_threads(0); 2431 } 2432 2433 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2434 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2435 EnqueueTask& _enq_task; 2436 2437 public: 2438 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2439 AbstractGangTask("Enqueue reference objects in parallel"), 2440 _enq_task(enq_task) { } 2441 2442 virtual void work(uint worker_id) { 2443 _enq_task.work(worker_id); 2444 } 2445 }; 2446 2447 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2448 assert(_workers != NULL, "Need parallel worker threads."); 2449 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2450 2451 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2452 2453 // Not strictly necessary but... 2454 // 2455 // We need to reset the concurrency level before each 2456 // proxy task execution, so that the termination protocol 2457 // and overflow handling in CMTask::do_marking_step() knows 2458 // how many workers to wait for. 2459 _cm->set_concurrency(_active_workers); 2460 _g1h->set_par_threads(_active_workers); 2461 _workers->run_task(&enq_task_proxy); 2462 _g1h->set_par_threads(0); 2463 } 2464 2465 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2466 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2467 } 2468 2469 // Helper class to get rid of some boilerplate code. 2470 class G1RemarkGCTraceTime : public GCTraceTime { 2471 static bool doit_and_prepend(bool doit) { 2472 if (doit) { 2473 gclog_or_tty->put(' '); 2474 } 2475 return doit; 2476 } 2477 2478 public: 2479 G1RemarkGCTraceTime(const char* title, bool doit) 2480 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), 2481 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { 2482 } 2483 }; 2484 2485 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2486 if (has_overflown()) { 2487 // Skip processing the discovered references if we have 2488 // overflown the global marking stack. Reference objects 2489 // only get discovered once so it is OK to not 2490 // de-populate the discovered reference lists. We could have, 2491 // but the only benefit would be that, when marking restarts, 2492 // less reference objects are discovered. 2493 return; 2494 } 2495 2496 ResourceMark rm; 2497 HandleMark hm; 2498 2499 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2500 2501 // Is alive closure. 2502 G1CMIsAliveClosure g1_is_alive(g1h); 2503 2504 // Inner scope to exclude the cleaning of the string and symbol 2505 // tables from the displayed time. 2506 { 2507 if (G1Log::finer()) { 2508 gclog_or_tty->put(' '); 2509 } 2510 GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id()); 2511 2512 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2513 2514 // See the comment in G1CollectedHeap::ref_processing_init() 2515 // about how reference processing currently works in G1. 2516 2517 // Set the soft reference policy 2518 rp->setup_policy(clear_all_soft_refs); 2519 assert(_markStack.isEmpty(), "mark stack should be empty"); 2520 2521 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2522 // in serial reference processing. Note these closures are also 2523 // used for serially processing (by the the current thread) the 2524 // JNI references during parallel reference processing. 2525 // 2526 // These closures do not need to synchronize with the worker 2527 // threads involved in parallel reference processing as these 2528 // instances are executed serially by the current thread (e.g. 2529 // reference processing is not multi-threaded and is thus 2530 // performed by the current thread instead of a gang worker). 2531 // 2532 // The gang tasks involved in parallel reference processing create 2533 // their own instances of these closures, which do their own 2534 // synchronization among themselves. 2535 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2536 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2537 2538 // We need at least one active thread. If reference processing 2539 // is not multi-threaded we use the current (VMThread) thread, 2540 // otherwise we use the work gang from the G1CollectedHeap and 2541 // we utilize all the worker threads we can. 2542 bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL; 2543 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2544 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2545 2546 // Parallel processing task executor. 2547 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2548 g1h->workers(), active_workers); 2549 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2550 2551 // Set the concurrency level. The phase was already set prior to 2552 // executing the remark task. 2553 set_concurrency(active_workers); 2554 2555 // Set the degree of MT processing here. If the discovery was done MT, 2556 // the number of threads involved during discovery could differ from 2557 // the number of active workers. This is OK as long as the discovered 2558 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2559 rp->set_active_mt_degree(active_workers); 2560 2561 // Process the weak references. 2562 const ReferenceProcessorStats& stats = 2563 rp->process_discovered_references(&g1_is_alive, 2564 &g1_keep_alive, 2565 &g1_drain_mark_stack, 2566 executor, 2567 g1h->gc_timer_cm(), 2568 concurrent_gc_id()); 2569 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2570 2571 // The do_oop work routines of the keep_alive and drain_marking_stack 2572 // oop closures will set the has_overflown flag if we overflow the 2573 // global marking stack. 2574 2575 assert(_markStack.overflow() || _markStack.isEmpty(), 2576 "mark stack should be empty (unless it overflowed)"); 2577 2578 if (_markStack.overflow()) { 2579 // This should have been done already when we tried to push an 2580 // entry on to the global mark stack. But let's do it again. 2581 set_has_overflown(); 2582 } 2583 2584 assert(rp->num_q() == active_workers, "why not"); 2585 2586 rp->enqueue_discovered_references(executor); 2587 2588 rp->verify_no_references_recorded(); 2589 assert(!rp->discovery_enabled(), "Post condition"); 2590 } 2591 2592 if (has_overflown()) { 2593 // We can not trust g1_is_alive if the marking stack overflowed 2594 return; 2595 } 2596 2597 assert(_markStack.isEmpty(), "Marking should have completed"); 2598 2599 // Unload Klasses, String, Symbols, Code Cache, etc. 2600 2601 G1RemarkGCTraceTime trace("Unloading", G1Log::finer()); 2602 2603 bool purged_classes; 2604 2605 { 2606 G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest()); 2607 purged_classes = SystemDictionary::do_unloading(&g1_is_alive); 2608 } 2609 2610 { 2611 G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest()); 2612 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2613 } 2614 2615 if (G1StringDedup::is_enabled()) { 2616 G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest()); 2617 G1StringDedup::unlink(&g1_is_alive); 2618 } 2619 } 2620 2621 void ConcurrentMark::swapMarkBitMaps() { 2622 CMBitMapRO* temp = _prevMarkBitMap; 2623 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2624 _nextMarkBitMap = (CMBitMap*) temp; 2625 } 2626 2627 class CMObjectClosure; 2628 2629 // Closure for iterating over objects, currently only used for 2630 // processing SATB buffers. 2631 class CMObjectClosure : public ObjectClosure { 2632 private: 2633 CMTask* _task; 2634 2635 public: 2636 void do_object(oop obj) { 2637 _task->deal_with_reference(obj); 2638 } 2639 2640 CMObjectClosure(CMTask* task) : _task(task) { } 2641 }; 2642 2643 class G1RemarkThreadsClosure : public ThreadClosure { 2644 CMObjectClosure _cm_obj; 2645 G1CMOopClosure _cm_cl; 2646 MarkingCodeBlobClosure _code_cl; 2647 int _thread_parity; 2648 bool _is_par; 2649 2650 public: 2651 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) : 2652 _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2653 _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {} 2654 2655 void do_thread(Thread* thread) { 2656 if (thread->is_Java_thread()) { 2657 if (thread->claim_oops_do(_is_par, _thread_parity)) { 2658 JavaThread* jt = (JavaThread*)thread; 2659 2660 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2661 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2662 // * Alive if on the stack of an executing method 2663 // * Weakly reachable otherwise 2664 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2665 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2666 jt->nmethods_do(&_code_cl); 2667 2668 jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj); 2669 } 2670 } else if (thread->is_VM_thread()) { 2671 if (thread->claim_oops_do(_is_par, _thread_parity)) { 2672 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj); 2673 } 2674 } 2675 } 2676 }; 2677 2678 class CMRemarkTask: public AbstractGangTask { 2679 private: 2680 ConcurrentMark* _cm; 2681 bool _is_serial; 2682 public: 2683 void work(uint worker_id) { 2684 // Since all available tasks are actually started, we should 2685 // only proceed if we're supposed to be active. 2686 if (worker_id < _cm->active_tasks()) { 2687 CMTask* task = _cm->task(worker_id); 2688 task->record_start_time(); 2689 { 2690 ResourceMark rm; 2691 HandleMark hm; 2692 2693 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial); 2694 Threads::threads_do(&threads_f); 2695 } 2696 2697 do { 2698 task->do_marking_step(1000000000.0 /* something very large */, 2699 true /* do_termination */, 2700 _is_serial); 2701 } while (task->has_aborted() && !_cm->has_overflown()); 2702 // If we overflow, then we do not want to restart. We instead 2703 // want to abort remark and do concurrent marking again. 2704 task->record_end_time(); 2705 } 2706 } 2707 2708 CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) : 2709 AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) { 2710 _cm->terminator()->reset_for_reuse(active_workers); 2711 } 2712 }; 2713 2714 void ConcurrentMark::checkpointRootsFinalWork() { 2715 ResourceMark rm; 2716 HandleMark hm; 2717 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2718 2719 G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer()); 2720 2721 g1h->ensure_parsability(false); 2722 2723 if (G1CollectedHeap::use_parallel_gc_threads()) { 2724 G1CollectedHeap::StrongRootsScope srs(g1h); 2725 // this is remark, so we'll use up all active threads 2726 uint active_workers = g1h->workers()->active_workers(); 2727 if (active_workers == 0) { 2728 assert(active_workers > 0, "Should have been set earlier"); 2729 active_workers = (uint) ParallelGCThreads; 2730 g1h->workers()->set_active_workers(active_workers); 2731 } 2732 set_concurrency_and_phase(active_workers, false /* concurrent */); 2733 // Leave _parallel_marking_threads at it's 2734 // value originally calculated in the ConcurrentMark 2735 // constructor and pass values of the active workers 2736 // through the gang in the task. 2737 2738 CMRemarkTask remarkTask(this, active_workers, false /* is_serial */); 2739 // We will start all available threads, even if we decide that the 2740 // active_workers will be fewer. The extra ones will just bail out 2741 // immediately. 2742 g1h->set_par_threads(active_workers); 2743 g1h->workers()->run_task(&remarkTask); 2744 g1h->set_par_threads(0); 2745 } else { 2746 G1CollectedHeap::StrongRootsScope srs(g1h); 2747 uint active_workers = 1; 2748 set_concurrency_and_phase(active_workers, false /* concurrent */); 2749 2750 // Note - if there's no work gang then the VMThread will be 2751 // the thread to execute the remark - serially. We have 2752 // to pass true for the is_serial parameter so that 2753 // CMTask::do_marking_step() doesn't enter the sync 2754 // barriers in the event of an overflow. Doing so will 2755 // cause an assert that the current thread is not a 2756 // concurrent GC thread. 2757 CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/); 2758 remarkTask.work(0); 2759 } 2760 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2761 guarantee(has_overflown() || 2762 satb_mq_set.completed_buffers_num() == 0, 2763 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2764 BOOL_TO_STR(has_overflown()), 2765 satb_mq_set.completed_buffers_num())); 2766 2767 print_stats(); 2768 } 2769 2770 #ifndef PRODUCT 2771 2772 class PrintReachableOopClosure: public OopClosure { 2773 private: 2774 G1CollectedHeap* _g1h; 2775 outputStream* _out; 2776 VerifyOption _vo; 2777 bool _all; 2778 2779 public: 2780 PrintReachableOopClosure(outputStream* out, 2781 VerifyOption vo, 2782 bool all) : 2783 _g1h(G1CollectedHeap::heap()), 2784 _out(out), _vo(vo), _all(all) { } 2785 2786 void do_oop(narrowOop* p) { do_oop_work(p); } 2787 void do_oop( oop* p) { do_oop_work(p); } 2788 2789 template <class T> void do_oop_work(T* p) { 2790 oop obj = oopDesc::load_decode_heap_oop(p); 2791 const char* str = NULL; 2792 const char* str2 = ""; 2793 2794 if (obj == NULL) { 2795 str = ""; 2796 } else if (!_g1h->is_in_g1_reserved(obj)) { 2797 str = " O"; 2798 } else { 2799 HeapRegion* hr = _g1h->heap_region_containing(obj); 2800 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); 2801 bool marked = _g1h->is_marked(obj, _vo); 2802 2803 if (over_tams) { 2804 str = " >"; 2805 if (marked) { 2806 str2 = " AND MARKED"; 2807 } 2808 } else if (marked) { 2809 str = " M"; 2810 } else { 2811 str = " NOT"; 2812 } 2813 } 2814 2815 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", 2816 p2i(p), p2i((void*) obj), str, str2); 2817 } 2818 }; 2819 2820 class PrintReachableObjectClosure : public ObjectClosure { 2821 private: 2822 G1CollectedHeap* _g1h; 2823 outputStream* _out; 2824 VerifyOption _vo; 2825 bool _all; 2826 HeapRegion* _hr; 2827 2828 public: 2829 PrintReachableObjectClosure(outputStream* out, 2830 VerifyOption vo, 2831 bool all, 2832 HeapRegion* hr) : 2833 _g1h(G1CollectedHeap::heap()), 2834 _out(out), _vo(vo), _all(all), _hr(hr) { } 2835 2836 void do_object(oop o) { 2837 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo); 2838 bool marked = _g1h->is_marked(o, _vo); 2839 bool print_it = _all || over_tams || marked; 2840 2841 if (print_it) { 2842 _out->print_cr(" "PTR_FORMAT"%s", 2843 p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : ""); 2844 PrintReachableOopClosure oopCl(_out, _vo, _all); 2845 o->oop_iterate_no_header(&oopCl); 2846 } 2847 } 2848 }; 2849 2850 class PrintReachableRegionClosure : public HeapRegionClosure { 2851 private: 2852 G1CollectedHeap* _g1h; 2853 outputStream* _out; 2854 VerifyOption _vo; 2855 bool _all; 2856 2857 public: 2858 bool doHeapRegion(HeapRegion* hr) { 2859 HeapWord* b = hr->bottom(); 2860 HeapWord* e = hr->end(); 2861 HeapWord* t = hr->top(); 2862 HeapWord* p = _g1h->top_at_mark_start(hr, _vo); 2863 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " 2864 "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p)); 2865 _out->cr(); 2866 2867 HeapWord* from = b; 2868 HeapWord* to = t; 2869 2870 if (to > from) { 2871 _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to)); 2872 _out->cr(); 2873 PrintReachableObjectClosure ocl(_out, _vo, _all, hr); 2874 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); 2875 _out->cr(); 2876 } 2877 2878 return false; 2879 } 2880 2881 PrintReachableRegionClosure(outputStream* out, 2882 VerifyOption vo, 2883 bool all) : 2884 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { } 2885 }; 2886 2887 void ConcurrentMark::print_reachable(const char* str, 2888 VerifyOption vo, 2889 bool all) { 2890 gclog_or_tty->cr(); 2891 gclog_or_tty->print_cr("== Doing heap dump... "); 2892 2893 if (G1PrintReachableBaseFile == NULL) { 2894 gclog_or_tty->print_cr(" #### error: no base file defined"); 2895 return; 2896 } 2897 2898 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) > 2899 (JVM_MAXPATHLEN - 1)) { 2900 gclog_or_tty->print_cr(" #### error: file name too long"); 2901 return; 2902 } 2903 2904 char file_name[JVM_MAXPATHLEN]; 2905 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str); 2906 gclog_or_tty->print_cr(" dumping to file %s", file_name); 2907 2908 fileStream fout(file_name); 2909 if (!fout.is_open()) { 2910 gclog_or_tty->print_cr(" #### error: could not open file"); 2911 return; 2912 } 2913 2914 outputStream* out = &fout; 2915 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo)); 2916 out->cr(); 2917 2918 out->print_cr("--- ITERATING OVER REGIONS"); 2919 out->cr(); 2920 PrintReachableRegionClosure rcl(out, vo, all); 2921 _g1h->heap_region_iterate(&rcl); 2922 out->cr(); 2923 2924 gclog_or_tty->print_cr(" done"); 2925 gclog_or_tty->flush(); 2926 } 2927 2928 #endif // PRODUCT 2929 2930 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2931 // Note we are overriding the read-only view of the prev map here, via 2932 // the cast. 2933 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2934 } 2935 2936 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2937 _nextMarkBitMap->clearRange(mr); 2938 } 2939 2940 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) { 2941 clearRangePrevBitmap(mr); 2942 clearRangeNextBitmap(mr); 2943 } 2944 2945 HeapRegion* 2946 ConcurrentMark::claim_region(uint worker_id) { 2947 // "checkpoint" the finger 2948 HeapWord* finger = _finger; 2949 2950 // _heap_end will not change underneath our feet; it only changes at 2951 // yield points. 2952 while (finger < _heap_end) { 2953 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2954 2955 // Note on how this code handles humongous regions. In the 2956 // normal case the finger will reach the start of a "starts 2957 // humongous" (SH) region. Its end will either be the end of the 2958 // last "continues humongous" (CH) region in the sequence, or the 2959 // standard end of the SH region (if the SH is the only region in 2960 // the sequence). That way claim_region() will skip over the CH 2961 // regions. However, there is a subtle race between a CM thread 2962 // executing this method and a mutator thread doing a humongous 2963 // object allocation. The two are not mutually exclusive as the CM 2964 // thread does not need to hold the Heap_lock when it gets 2965 // here. So there is a chance that claim_region() will come across 2966 // a free region that's in the progress of becoming a SH or a CH 2967 // region. In the former case, it will either 2968 // a) Miss the update to the region's end, in which case it will 2969 // visit every subsequent CH region, will find their bitmaps 2970 // empty, and do nothing, or 2971 // b) Will observe the update of the region's end (in which case 2972 // it will skip the subsequent CH regions). 2973 // If it comes across a region that suddenly becomes CH, the 2974 // scenario will be similar to b). So, the race between 2975 // claim_region() and a humongous object allocation might force us 2976 // to do a bit of unnecessary work (due to some unnecessary bitmap 2977 // iterations) but it should not introduce and correctness issues. 2978 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2979 HeapWord* bottom = curr_region->bottom(); 2980 HeapWord* end = curr_region->end(); 2981 HeapWord* limit = curr_region->next_top_at_mark_start(); 2982 2983 if (verbose_low()) { 2984 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2985 "["PTR_FORMAT", "PTR_FORMAT"), " 2986 "limit = "PTR_FORMAT, 2987 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2988 } 2989 2990 // Is the gap between reading the finger and doing the CAS too long? 2991 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2992 if (res == finger) { 2993 // we succeeded 2994 2995 // notice that _finger == end cannot be guaranteed here since, 2996 // someone else might have moved the finger even further 2997 assert(_finger >= end, "the finger should have moved forward"); 2998 2999 if (verbose_low()) { 3000 gclog_or_tty->print_cr("[%u] we were successful with region = " 3001 PTR_FORMAT, worker_id, p2i(curr_region)); 3002 } 3003 3004 if (limit > bottom) { 3005 if (verbose_low()) { 3006 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 3007 "returning it ", worker_id, p2i(curr_region)); 3008 } 3009 return curr_region; 3010 } else { 3011 assert(limit == bottom, 3012 "the region limit should be at bottom"); 3013 if (verbose_low()) { 3014 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 3015 "returning NULL", worker_id, p2i(curr_region)); 3016 } 3017 // we return NULL and the caller should try calling 3018 // claim_region() again. 3019 return NULL; 3020 } 3021 } else { 3022 assert(_finger > finger, "the finger should have moved forward"); 3023 if (verbose_low()) { 3024 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 3025 "global finger = "PTR_FORMAT", " 3026 "our finger = "PTR_FORMAT, 3027 worker_id, p2i(_finger), p2i(finger)); 3028 } 3029 3030 // read it again 3031 finger = _finger; 3032 } 3033 } 3034 3035 return NULL; 3036 } 3037 3038 #ifndef PRODUCT 3039 enum VerifyNoCSetOopsPhase { 3040 VerifyNoCSetOopsStack, 3041 VerifyNoCSetOopsQueues, 3042 VerifyNoCSetOopsSATBCompleted, 3043 VerifyNoCSetOopsSATBThread 3044 }; 3045 3046 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 3047 private: 3048 G1CollectedHeap* _g1h; 3049 VerifyNoCSetOopsPhase _phase; 3050 int _info; 3051 3052 const char* phase_str() { 3053 switch (_phase) { 3054 case VerifyNoCSetOopsStack: return "Stack"; 3055 case VerifyNoCSetOopsQueues: return "Queue"; 3056 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers"; 3057 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers"; 3058 default: ShouldNotReachHere(); 3059 } 3060 return NULL; 3061 } 3062 3063 void do_object_work(oop obj) { 3064 guarantee(!_g1h->obj_in_cs(obj), 3065 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 3066 p2i((void*) obj), phase_str(), _info)); 3067 } 3068 3069 public: 3070 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 3071 3072 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 3073 _phase = phase; 3074 _info = info; 3075 } 3076 3077 virtual void do_oop(oop* p) { 3078 oop obj = oopDesc::load_decode_heap_oop(p); 3079 do_object_work(obj); 3080 } 3081 3082 virtual void do_oop(narrowOop* p) { 3083 // We should not come across narrow oops while scanning marking 3084 // stacks and SATB buffers. 3085 ShouldNotReachHere(); 3086 } 3087 3088 virtual void do_object(oop obj) { 3089 do_object_work(obj); 3090 } 3091 }; 3092 3093 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, 3094 bool verify_enqueued_buffers, 3095 bool verify_thread_buffers, 3096 bool verify_fingers) { 3097 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 3098 if (!G1CollectedHeap::heap()->mark_in_progress()) { 3099 return; 3100 } 3101 3102 VerifyNoCSetOopsClosure cl; 3103 3104 if (verify_stacks) { 3105 // Verify entries on the global mark stack 3106 cl.set_phase(VerifyNoCSetOopsStack); 3107 _markStack.oops_do(&cl); 3108 3109 // Verify entries on the task queues 3110 for (uint i = 0; i < _max_worker_id; i += 1) { 3111 cl.set_phase(VerifyNoCSetOopsQueues, i); 3112 CMTaskQueue* queue = _task_queues->queue(i); 3113 queue->oops_do(&cl); 3114 } 3115 } 3116 3117 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 3118 3119 // Verify entries on the enqueued SATB buffers 3120 if (verify_enqueued_buffers) { 3121 cl.set_phase(VerifyNoCSetOopsSATBCompleted); 3122 satb_qs.iterate_completed_buffers_read_only(&cl); 3123 } 3124 3125 // Verify entries on the per-thread SATB buffers 3126 if (verify_thread_buffers) { 3127 cl.set_phase(VerifyNoCSetOopsSATBThread); 3128 satb_qs.iterate_thread_buffers_read_only(&cl); 3129 } 3130 3131 if (verify_fingers) { 3132 // Verify the global finger 3133 HeapWord* global_finger = finger(); 3134 if (global_finger != NULL && global_finger < _heap_end) { 3135 // The global finger always points to a heap region boundary. We 3136 // use heap_region_containing_raw() to get the containing region 3137 // given that the global finger could be pointing to a free region 3138 // which subsequently becomes continues humongous. If that 3139 // happens, heap_region_containing() will return the bottom of the 3140 // corresponding starts humongous region and the check below will 3141 // not hold any more. 3142 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 3143 guarantee(global_finger == global_hr->bottom(), 3144 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 3145 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 3146 } 3147 3148 // Verify the task fingers 3149 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 3150 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 3151 CMTask* task = _tasks[i]; 3152 HeapWord* task_finger = task->finger(); 3153 if (task_finger != NULL && task_finger < _heap_end) { 3154 // See above note on the global finger verification. 3155 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 3156 guarantee(task_finger == task_hr->bottom() || 3157 !task_hr->in_collection_set(), 3158 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 3159 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 3160 } 3161 } 3162 } 3163 } 3164 #endif // PRODUCT 3165 3166 // Aggregate the counting data that was constructed concurrently 3167 // with marking. 3168 class AggregateCountDataHRClosure: public HeapRegionClosure { 3169 G1CollectedHeap* _g1h; 3170 ConcurrentMark* _cm; 3171 CardTableModRefBS* _ct_bs; 3172 BitMap* _cm_card_bm; 3173 uint _max_worker_id; 3174 3175 public: 3176 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 3177 BitMap* cm_card_bm, 3178 uint max_worker_id) : 3179 _g1h(g1h), _cm(g1h->concurrent_mark()), 3180 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 3181 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 3182 3183 bool doHeapRegion(HeapRegion* hr) { 3184 if (hr->continuesHumongous()) { 3185 // We will ignore these here and process them when their 3186 // associated "starts humongous" region is processed. 3187 // Note that we cannot rely on their associated 3188 // "starts humongous" region to have their bit set to 1 3189 // since, due to the region chunking in the parallel region 3190 // iteration, a "continues humongous" region might be visited 3191 // before its associated "starts humongous". 3192 return false; 3193 } 3194 3195 HeapWord* start = hr->bottom(); 3196 HeapWord* limit = hr->next_top_at_mark_start(); 3197 HeapWord* end = hr->end(); 3198 3199 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 3200 err_msg("Preconditions not met - " 3201 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 3202 "top: "PTR_FORMAT", end: "PTR_FORMAT, 3203 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 3204 3205 assert(hr->next_marked_bytes() == 0, "Precondition"); 3206 3207 if (start == limit) { 3208 // NTAMS of this region has not been set so nothing to do. 3209 return false; 3210 } 3211 3212 // 'start' should be in the heap. 3213 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 3214 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 3215 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 3216 3217 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 3218 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 3219 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 3220 3221 // If ntams is not card aligned then we bump card bitmap index 3222 // for limit so that we get the all the cards spanned by 3223 // the object ending at ntams. 3224 // Note: if this is the last region in the heap then ntams 3225 // could be actually just beyond the end of the the heap; 3226 // limit_idx will then correspond to a (non-existent) card 3227 // that is also outside the heap. 3228 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 3229 limit_idx += 1; 3230 } 3231 3232 assert(limit_idx <= end_idx, "or else use atomics"); 3233 3234 // Aggregate the "stripe" in the count data associated with hr. 3235 uint hrs_index = hr->hrs_index(); 3236 size_t marked_bytes = 0; 3237 3238 for (uint i = 0; i < _max_worker_id; i += 1) { 3239 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 3240 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 3241 3242 // Fetch the marked_bytes in this region for task i and 3243 // add it to the running total for this region. 3244 marked_bytes += marked_bytes_array[hrs_index]; 3245 3246 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 3247 // into the global card bitmap. 3248 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 3249 3250 while (scan_idx < limit_idx) { 3251 assert(task_card_bm->at(scan_idx) == true, "should be"); 3252 _cm_card_bm->set_bit(scan_idx); 3253 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 3254 3255 // BitMap::get_next_one_offset() can handle the case when 3256 // its left_offset parameter is greater than its right_offset 3257 // parameter. It does, however, have an early exit if 3258 // left_offset == right_offset. So let's limit the value 3259 // passed in for left offset here. 3260 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 3261 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 3262 } 3263 } 3264 3265 // Update the marked bytes for this region. 3266 hr->add_to_marked_bytes(marked_bytes); 3267 3268 // Next heap region 3269 return false; 3270 } 3271 }; 3272 3273 class G1AggregateCountDataTask: public AbstractGangTask { 3274 protected: 3275 G1CollectedHeap* _g1h; 3276 ConcurrentMark* _cm; 3277 BitMap* _cm_card_bm; 3278 uint _max_worker_id; 3279 int _active_workers; 3280 3281 public: 3282 G1AggregateCountDataTask(G1CollectedHeap* g1h, 3283 ConcurrentMark* cm, 3284 BitMap* cm_card_bm, 3285 uint max_worker_id, 3286 int n_workers) : 3287 AbstractGangTask("Count Aggregation"), 3288 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 3289 _max_worker_id(max_worker_id), 3290 _active_workers(n_workers) { } 3291 3292 void work(uint worker_id) { 3293 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 3294 3295 if (G1CollectedHeap::use_parallel_gc_threads()) { 3296 _g1h->heap_region_par_iterate_chunked(&cl, worker_id, 3297 _active_workers, 3298 HeapRegion::AggregateCountClaimValue); 3299 } else { 3300 _g1h->heap_region_iterate(&cl); 3301 } 3302 } 3303 }; 3304 3305 3306 void ConcurrentMark::aggregate_count_data() { 3307 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? 3308 _g1h->workers()->active_workers() : 3309 1); 3310 3311 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 3312 _max_worker_id, n_workers); 3313 3314 if (G1CollectedHeap::use_parallel_gc_threads()) { 3315 assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 3316 "sanity check"); 3317 _g1h->set_par_threads(n_workers); 3318 _g1h->workers()->run_task(&g1_par_agg_task); 3319 _g1h->set_par_threads(0); 3320 3321 assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue), 3322 "sanity check"); 3323 _g1h->reset_heap_region_claim_values(); 3324 } else { 3325 g1_par_agg_task.work(0); 3326 } 3327 } 3328 3329 // Clear the per-worker arrays used to store the per-region counting data 3330 void ConcurrentMark::clear_all_count_data() { 3331 // Clear the global card bitmap - it will be filled during 3332 // liveness count aggregation (during remark) and the 3333 // final counting task. 3334 _card_bm.clear(); 3335 3336 // Clear the global region bitmap - it will be filled as part 3337 // of the final counting task. 3338 _region_bm.clear(); 3339 3340 uint max_regions = _g1h->max_regions(); 3341 assert(_max_worker_id > 0, "uninitialized"); 3342 3343 for (uint i = 0; i < _max_worker_id; i += 1) { 3344 BitMap* task_card_bm = count_card_bitmap_for(i); 3345 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3346 3347 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3348 assert(marked_bytes_array != NULL, "uninitialized"); 3349 3350 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3351 task_card_bm->clear(); 3352 } 3353 } 3354 3355 void ConcurrentMark::print_stats() { 3356 if (verbose_stats()) { 3357 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3358 for (size_t i = 0; i < _active_tasks; ++i) { 3359 _tasks[i]->print_stats(); 3360 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3361 } 3362 } 3363 } 3364 3365 // abandon current marking iteration due to a Full GC 3366 void ConcurrentMark::abort() { 3367 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 3368 // concurrent bitmap clearing. 3369 _nextMarkBitMap->clearAll(); 3370 3371 // Note we cannot clear the previous marking bitmap here 3372 // since VerifyDuringGC verifies the objects marked during 3373 // a full GC against the previous bitmap. 3374 3375 // Clear the liveness counting data 3376 clear_all_count_data(); 3377 // Empty mark stack 3378 reset_marking_state(); 3379 for (uint i = 0; i < _max_worker_id; ++i) { 3380 _tasks[i]->clear_region_fields(); 3381 } 3382 _first_overflow_barrier_sync.abort(); 3383 _second_overflow_barrier_sync.abort(); 3384 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); 3385 if (!gc_id.is_undefined()) { 3386 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance 3387 // to detect that it was aborted. Only keep track of the first GC id that we aborted. 3388 _aborted_gc_id = gc_id; 3389 } 3390 _has_aborted = true; 3391 3392 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3393 satb_mq_set.abandon_partial_marking(); 3394 // This can be called either during or outside marking, we'll read 3395 // the expected_active value from the SATB queue set. 3396 satb_mq_set.set_active_all_threads( 3397 false, /* new active value */ 3398 satb_mq_set.is_active() /* expected_active */); 3399 3400 _g1h->trace_heap_after_concurrent_cycle(); 3401 _g1h->register_concurrent_cycle_end(); 3402 } 3403 3404 const GCId& ConcurrentMark::concurrent_gc_id() { 3405 if (has_aborted()) { 3406 return _aborted_gc_id; 3407 } 3408 return _g1h->gc_tracer_cm()->gc_id(); 3409 } 3410 3411 static void print_ms_time_info(const char* prefix, const char* name, 3412 NumberSeq& ns) { 3413 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3414 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3415 if (ns.num() > 0) { 3416 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3417 prefix, ns.sd(), ns.maximum()); 3418 } 3419 } 3420 3421 void ConcurrentMark::print_summary_info() { 3422 gclog_or_tty->print_cr(" Concurrent marking:"); 3423 print_ms_time_info(" ", "init marks", _init_times); 3424 print_ms_time_info(" ", "remarks", _remark_times); 3425 { 3426 print_ms_time_info(" ", "final marks", _remark_mark_times); 3427 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3428 3429 } 3430 print_ms_time_info(" ", "cleanups", _cleanup_times); 3431 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3432 _total_counting_time, 3433 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3434 (double)_cleanup_times.num() 3435 : 0.0)); 3436 if (G1ScrubRemSets) { 3437 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3438 _total_rs_scrub_time, 3439 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3440 (double)_cleanup_times.num() 3441 : 0.0)); 3442 } 3443 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3444 (_init_times.sum() + _remark_times.sum() + 3445 _cleanup_times.sum())/1000.0); 3446 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3447 "(%8.2f s marking).", 3448 cmThread()->vtime_accum(), 3449 cmThread()->vtime_mark_accum()); 3450 } 3451 3452 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3453 if (use_parallel_marking_threads()) { 3454 _parallel_workers->print_worker_threads_on(st); 3455 } 3456 } 3457 3458 void ConcurrentMark::print_on_error(outputStream* st) const { 3459 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3460 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3461 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3462 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3463 } 3464 3465 // We take a break if someone is trying to stop the world. 3466 bool ConcurrentMark::do_yield_check(uint worker_id) { 3467 if (SuspendibleThreadSet::should_yield()) { 3468 if (worker_id == 0) { 3469 _g1h->g1_policy()->record_concurrent_pause(); 3470 } 3471 SuspendibleThreadSet::yield(); 3472 return true; 3473 } else { 3474 return false; 3475 } 3476 } 3477 3478 bool ConcurrentMark::containing_card_is_marked(void* p) { 3479 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1); 3480 return _card_bm.at(offset >> CardTableModRefBS::card_shift); 3481 } 3482 3483 bool ConcurrentMark::containing_cards_are_marked(void* start, 3484 void* last) { 3485 return containing_card_is_marked(start) && 3486 containing_card_is_marked(last); 3487 } 3488 3489 #ifndef PRODUCT 3490 // for debugging purposes 3491 void ConcurrentMark::print_finger() { 3492 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3493 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3494 for (uint i = 0; i < _max_worker_id; ++i) { 3495 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3496 } 3497 gclog_or_tty->cr(); 3498 } 3499 #endif 3500 3501 void CMTask::scan_object(oop obj) { 3502 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3503 3504 if (_cm->verbose_high()) { 3505 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT, 3506 _worker_id, p2i((void*) obj)); 3507 } 3508 3509 size_t obj_size = obj->size(); 3510 _words_scanned += obj_size; 3511 3512 obj->oop_iterate(_cm_oop_closure); 3513 statsOnly( ++_objs_scanned ); 3514 check_limits(); 3515 } 3516 3517 // Closure for iteration over bitmaps 3518 class CMBitMapClosure : public BitMapClosure { 3519 private: 3520 // the bitmap that is being iterated over 3521 CMBitMap* _nextMarkBitMap; 3522 ConcurrentMark* _cm; 3523 CMTask* _task; 3524 3525 public: 3526 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3527 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3528 3529 bool do_bit(size_t offset) { 3530 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3531 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3532 assert( addr < _cm->finger(), "invariant"); 3533 3534 statsOnly( _task->increase_objs_found_on_bitmap() ); 3535 assert(addr >= _task->finger(), "invariant"); 3536 3537 // We move that task's local finger along. 3538 _task->move_finger_to(addr); 3539 3540 _task->scan_object(oop(addr)); 3541 // we only partially drain the local queue and global stack 3542 _task->drain_local_queue(true); 3543 _task->drain_global_stack(true); 3544 3545 // if the has_aborted flag has been raised, we need to bail out of 3546 // the iteration 3547 return !_task->has_aborted(); 3548 } 3549 }; 3550 3551 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3552 ConcurrentMark* cm, 3553 CMTask* task) 3554 : _g1h(g1h), _cm(cm), _task(task) { 3555 assert(_ref_processor == NULL, "should be initialized to NULL"); 3556 3557 if (G1UseConcMarkReferenceProcessing) { 3558 _ref_processor = g1h->ref_processor_cm(); 3559 assert(_ref_processor != NULL, "should not be NULL"); 3560 } 3561 } 3562 3563 void CMTask::setup_for_region(HeapRegion* hr) { 3564 assert(hr != NULL, 3565 "claim_region() should have filtered out NULL regions"); 3566 assert(!hr->continuesHumongous(), 3567 "claim_region() should have filtered out continues humongous regions"); 3568 3569 if (_cm->verbose_low()) { 3570 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3571 _worker_id, p2i(hr)); 3572 } 3573 3574 _curr_region = hr; 3575 _finger = hr->bottom(); 3576 update_region_limit(); 3577 } 3578 3579 void CMTask::update_region_limit() { 3580 HeapRegion* hr = _curr_region; 3581 HeapWord* bottom = hr->bottom(); 3582 HeapWord* limit = hr->next_top_at_mark_start(); 3583 3584 if (limit == bottom) { 3585 if (_cm->verbose_low()) { 3586 gclog_or_tty->print_cr("[%u] found an empty region " 3587 "["PTR_FORMAT", "PTR_FORMAT")", 3588 _worker_id, p2i(bottom), p2i(limit)); 3589 } 3590 // The region was collected underneath our feet. 3591 // We set the finger to bottom to ensure that the bitmap 3592 // iteration that will follow this will not do anything. 3593 // (this is not a condition that holds when we set the region up, 3594 // as the region is not supposed to be empty in the first place) 3595 _finger = bottom; 3596 } else if (limit >= _region_limit) { 3597 assert(limit >= _finger, "peace of mind"); 3598 } else { 3599 assert(limit < _region_limit, "only way to get here"); 3600 // This can happen under some pretty unusual circumstances. An 3601 // evacuation pause empties the region underneath our feet (NTAMS 3602 // at bottom). We then do some allocation in the region (NTAMS 3603 // stays at bottom), followed by the region being used as a GC 3604 // alloc region (NTAMS will move to top() and the objects 3605 // originally below it will be grayed). All objects now marked in 3606 // the region are explicitly grayed, if below the global finger, 3607 // and we do not need in fact to scan anything else. So, we simply 3608 // set _finger to be limit to ensure that the bitmap iteration 3609 // doesn't do anything. 3610 _finger = limit; 3611 } 3612 3613 _region_limit = limit; 3614 } 3615 3616 void CMTask::giveup_current_region() { 3617 assert(_curr_region != NULL, "invariant"); 3618 if (_cm->verbose_low()) { 3619 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3620 _worker_id, p2i(_curr_region)); 3621 } 3622 clear_region_fields(); 3623 } 3624 3625 void CMTask::clear_region_fields() { 3626 // Values for these three fields that indicate that we're not 3627 // holding on to a region. 3628 _curr_region = NULL; 3629 _finger = NULL; 3630 _region_limit = NULL; 3631 } 3632 3633 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3634 if (cm_oop_closure == NULL) { 3635 assert(_cm_oop_closure != NULL, "invariant"); 3636 } else { 3637 assert(_cm_oop_closure == NULL, "invariant"); 3638 } 3639 _cm_oop_closure = cm_oop_closure; 3640 } 3641 3642 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3643 guarantee(nextMarkBitMap != NULL, "invariant"); 3644 3645 if (_cm->verbose_low()) { 3646 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3647 } 3648 3649 _nextMarkBitMap = nextMarkBitMap; 3650 clear_region_fields(); 3651 3652 _calls = 0; 3653 _elapsed_time_ms = 0.0; 3654 _termination_time_ms = 0.0; 3655 _termination_start_time_ms = 0.0; 3656 3657 #if _MARKING_STATS_ 3658 _local_pushes = 0; 3659 _local_pops = 0; 3660 _local_max_size = 0; 3661 _objs_scanned = 0; 3662 _global_pushes = 0; 3663 _global_pops = 0; 3664 _global_max_size = 0; 3665 _global_transfers_to = 0; 3666 _global_transfers_from = 0; 3667 _regions_claimed = 0; 3668 _objs_found_on_bitmap = 0; 3669 _satb_buffers_processed = 0; 3670 _steal_attempts = 0; 3671 _steals = 0; 3672 _aborted = 0; 3673 _aborted_overflow = 0; 3674 _aborted_cm_aborted = 0; 3675 _aborted_yield = 0; 3676 _aborted_timed_out = 0; 3677 _aborted_satb = 0; 3678 _aborted_termination = 0; 3679 #endif // _MARKING_STATS_ 3680 } 3681 3682 bool CMTask::should_exit_termination() { 3683 regular_clock_call(); 3684 // This is called when we are in the termination protocol. We should 3685 // quit if, for some reason, this task wants to abort or the global 3686 // stack is not empty (this means that we can get work from it). 3687 return !_cm->mark_stack_empty() || has_aborted(); 3688 } 3689 3690 void CMTask::reached_limit() { 3691 assert(_words_scanned >= _words_scanned_limit || 3692 _refs_reached >= _refs_reached_limit , 3693 "shouldn't have been called otherwise"); 3694 regular_clock_call(); 3695 } 3696 3697 void CMTask::regular_clock_call() { 3698 if (has_aborted()) return; 3699 3700 // First, we need to recalculate the words scanned and refs reached 3701 // limits for the next clock call. 3702 recalculate_limits(); 3703 3704 // During the regular clock call we do the following 3705 3706 // (1) If an overflow has been flagged, then we abort. 3707 if (_cm->has_overflown()) { 3708 set_has_aborted(); 3709 return; 3710 } 3711 3712 // If we are not concurrent (i.e. we're doing remark) we don't need 3713 // to check anything else. The other steps are only needed during 3714 // the concurrent marking phase. 3715 if (!concurrent()) return; 3716 3717 // (2) If marking has been aborted for Full GC, then we also abort. 3718 if (_cm->has_aborted()) { 3719 set_has_aborted(); 3720 statsOnly( ++_aborted_cm_aborted ); 3721 return; 3722 } 3723 3724 double curr_time_ms = os::elapsedVTime() * 1000.0; 3725 3726 // (3) If marking stats are enabled, then we update the step history. 3727 #if _MARKING_STATS_ 3728 if (_words_scanned >= _words_scanned_limit) { 3729 ++_clock_due_to_scanning; 3730 } 3731 if (_refs_reached >= _refs_reached_limit) { 3732 ++_clock_due_to_marking; 3733 } 3734 3735 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3736 _interval_start_time_ms = curr_time_ms; 3737 _all_clock_intervals_ms.add(last_interval_ms); 3738 3739 if (_cm->verbose_medium()) { 3740 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3741 "scanned = %d%s, refs reached = %d%s", 3742 _worker_id, last_interval_ms, 3743 _words_scanned, 3744 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3745 _refs_reached, 3746 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3747 } 3748 #endif // _MARKING_STATS_ 3749 3750 // (4) We check whether we should yield. If we have to, then we abort. 3751 if (SuspendibleThreadSet::should_yield()) { 3752 // We should yield. To do this we abort the task. The caller is 3753 // responsible for yielding. 3754 set_has_aborted(); 3755 statsOnly( ++_aborted_yield ); 3756 return; 3757 } 3758 3759 // (5) We check whether we've reached our time quota. If we have, 3760 // then we abort. 3761 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3762 if (elapsed_time_ms > _time_target_ms) { 3763 set_has_aborted(); 3764 _has_timed_out = true; 3765 statsOnly( ++_aborted_timed_out ); 3766 return; 3767 } 3768 3769 // (6) Finally, we check whether there are enough completed STAB 3770 // buffers available for processing. If there are, we abort. 3771 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3772 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3773 if (_cm->verbose_low()) { 3774 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3775 _worker_id); 3776 } 3777 // we do need to process SATB buffers, we'll abort and restart 3778 // the marking task to do so 3779 set_has_aborted(); 3780 statsOnly( ++_aborted_satb ); 3781 return; 3782 } 3783 } 3784 3785 void CMTask::recalculate_limits() { 3786 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3787 _words_scanned_limit = _real_words_scanned_limit; 3788 3789 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3790 _refs_reached_limit = _real_refs_reached_limit; 3791 } 3792 3793 void CMTask::decrease_limits() { 3794 // This is called when we believe that we're going to do an infrequent 3795 // operation which will increase the per byte scanned cost (i.e. move 3796 // entries to/from the global stack). It basically tries to decrease the 3797 // scanning limit so that the clock is called earlier. 3798 3799 if (_cm->verbose_medium()) { 3800 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3801 } 3802 3803 _words_scanned_limit = _real_words_scanned_limit - 3804 3 * words_scanned_period / 4; 3805 _refs_reached_limit = _real_refs_reached_limit - 3806 3 * refs_reached_period / 4; 3807 } 3808 3809 void CMTask::move_entries_to_global_stack() { 3810 // local array where we'll store the entries that will be popped 3811 // from the local queue 3812 oop buffer[global_stack_transfer_size]; 3813 3814 int n = 0; 3815 oop obj; 3816 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3817 buffer[n] = obj; 3818 ++n; 3819 } 3820 3821 if (n > 0) { 3822 // we popped at least one entry from the local queue 3823 3824 statsOnly( ++_global_transfers_to; _local_pops += n ); 3825 3826 if (!_cm->mark_stack_push(buffer, n)) { 3827 if (_cm->verbose_low()) { 3828 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3829 _worker_id); 3830 } 3831 set_has_aborted(); 3832 } else { 3833 // the transfer was successful 3834 3835 if (_cm->verbose_medium()) { 3836 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3837 _worker_id, n); 3838 } 3839 statsOnly( int tmp_size = _cm->mark_stack_size(); 3840 if (tmp_size > _global_max_size) { 3841 _global_max_size = tmp_size; 3842 } 3843 _global_pushes += n ); 3844 } 3845 } 3846 3847 // this operation was quite expensive, so decrease the limits 3848 decrease_limits(); 3849 } 3850 3851 void CMTask::get_entries_from_global_stack() { 3852 // local array where we'll store the entries that will be popped 3853 // from the global stack. 3854 oop buffer[global_stack_transfer_size]; 3855 int n; 3856 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3857 assert(n <= global_stack_transfer_size, 3858 "we should not pop more than the given limit"); 3859 if (n > 0) { 3860 // yes, we did actually pop at least one entry 3861 3862 statsOnly( ++_global_transfers_from; _global_pops += n ); 3863 if (_cm->verbose_medium()) { 3864 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3865 _worker_id, n); 3866 } 3867 for (int i = 0; i < n; ++i) { 3868 bool success = _task_queue->push(buffer[i]); 3869 // We only call this when the local queue is empty or under a 3870 // given target limit. So, we do not expect this push to fail. 3871 assert(success, "invariant"); 3872 } 3873 3874 statsOnly( int tmp_size = _task_queue->size(); 3875 if (tmp_size > _local_max_size) { 3876 _local_max_size = tmp_size; 3877 } 3878 _local_pushes += n ); 3879 } 3880 3881 // this operation was quite expensive, so decrease the limits 3882 decrease_limits(); 3883 } 3884 3885 void CMTask::drain_local_queue(bool partially) { 3886 if (has_aborted()) return; 3887 3888 // Decide what the target size is, depending whether we're going to 3889 // drain it partially (so that other tasks can steal if they run out 3890 // of things to do) or totally (at the very end). 3891 size_t target_size; 3892 if (partially) { 3893 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3894 } else { 3895 target_size = 0; 3896 } 3897 3898 if (_task_queue->size() > target_size) { 3899 if (_cm->verbose_high()) { 3900 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3901 _worker_id, target_size); 3902 } 3903 3904 oop obj; 3905 bool ret = _task_queue->pop_local(obj); 3906 while (ret) { 3907 statsOnly( ++_local_pops ); 3908 3909 if (_cm->verbose_high()) { 3910 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3911 p2i((void*) obj)); 3912 } 3913 3914 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3915 assert(!_g1h->is_on_master_free_list( 3916 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3917 3918 scan_object(obj); 3919 3920 if (_task_queue->size() <= target_size || has_aborted()) { 3921 ret = false; 3922 } else { 3923 ret = _task_queue->pop_local(obj); 3924 } 3925 } 3926 3927 if (_cm->verbose_high()) { 3928 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3929 _worker_id, _task_queue->size()); 3930 } 3931 } 3932 } 3933 3934 void CMTask::drain_global_stack(bool partially) { 3935 if (has_aborted()) return; 3936 3937 // We have a policy to drain the local queue before we attempt to 3938 // drain the global stack. 3939 assert(partially || _task_queue->size() == 0, "invariant"); 3940 3941 // Decide what the target size is, depending whether we're going to 3942 // drain it partially (so that other tasks can steal if they run out 3943 // of things to do) or totally (at the very end). Notice that, 3944 // because we move entries from the global stack in chunks or 3945 // because another task might be doing the same, we might in fact 3946 // drop below the target. But, this is not a problem. 3947 size_t target_size; 3948 if (partially) { 3949 target_size = _cm->partial_mark_stack_size_target(); 3950 } else { 3951 target_size = 0; 3952 } 3953 3954 if (_cm->mark_stack_size() > target_size) { 3955 if (_cm->verbose_low()) { 3956 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3957 _worker_id, target_size); 3958 } 3959 3960 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3961 get_entries_from_global_stack(); 3962 drain_local_queue(partially); 3963 } 3964 3965 if (_cm->verbose_low()) { 3966 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3967 _worker_id, _cm->mark_stack_size()); 3968 } 3969 } 3970 } 3971 3972 // SATB Queue has several assumptions on whether to call the par or 3973 // non-par versions of the methods. this is why some of the code is 3974 // replicated. We should really get rid of the single-threaded version 3975 // of the code to simplify things. 3976 void CMTask::drain_satb_buffers() { 3977 if (has_aborted()) return; 3978 3979 // We set this so that the regular clock knows that we're in the 3980 // middle of draining buffers and doesn't set the abort flag when it 3981 // notices that SATB buffers are available for draining. It'd be 3982 // very counter productive if it did that. :-) 3983 _draining_satb_buffers = true; 3984 3985 CMObjectClosure oc(this); 3986 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3987 if (G1CollectedHeap::use_parallel_gc_threads()) { 3988 satb_mq_set.set_par_closure(_worker_id, &oc); 3989 } else { 3990 satb_mq_set.set_closure(&oc); 3991 } 3992 3993 // This keeps claiming and applying the closure to completed buffers 3994 // until we run out of buffers or we need to abort. 3995 if (G1CollectedHeap::use_parallel_gc_threads()) { 3996 while (!has_aborted() && 3997 satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) { 3998 if (_cm->verbose_medium()) { 3999 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 4000 } 4001 statsOnly( ++_satb_buffers_processed ); 4002 regular_clock_call(); 4003 } 4004 } else { 4005 while (!has_aborted() && 4006 satb_mq_set.apply_closure_to_completed_buffer()) { 4007 if (_cm->verbose_medium()) { 4008 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 4009 } 4010 statsOnly( ++_satb_buffers_processed ); 4011 regular_clock_call(); 4012 } 4013 } 4014 4015 _draining_satb_buffers = false; 4016 4017 assert(has_aborted() || 4018 concurrent() || 4019 satb_mq_set.completed_buffers_num() == 0, "invariant"); 4020 4021 if (G1CollectedHeap::use_parallel_gc_threads()) { 4022 satb_mq_set.set_par_closure(_worker_id, NULL); 4023 } else { 4024 satb_mq_set.set_closure(NULL); 4025 } 4026 4027 // again, this was a potentially expensive operation, decrease the 4028 // limits to get the regular clock call early 4029 decrease_limits(); 4030 } 4031 4032 void CMTask::print_stats() { 4033 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 4034 _worker_id, _calls); 4035 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 4036 _elapsed_time_ms, _termination_time_ms); 4037 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 4038 _step_times_ms.num(), _step_times_ms.avg(), 4039 _step_times_ms.sd()); 4040 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 4041 _step_times_ms.maximum(), _step_times_ms.sum()); 4042 4043 #if _MARKING_STATS_ 4044 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 4045 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 4046 _all_clock_intervals_ms.sd()); 4047 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 4048 _all_clock_intervals_ms.maximum(), 4049 _all_clock_intervals_ms.sum()); 4050 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", 4051 _clock_due_to_scanning, _clock_due_to_marking); 4052 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", 4053 _objs_scanned, _objs_found_on_bitmap); 4054 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", 4055 _local_pushes, _local_pops, _local_max_size); 4056 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", 4057 _global_pushes, _global_pops, _global_max_size); 4058 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", 4059 _global_transfers_to,_global_transfers_from); 4060 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed); 4061 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); 4062 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", 4063 _steal_attempts, _steals); 4064 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); 4065 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", 4066 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 4067 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", 4068 _aborted_timed_out, _aborted_satb, _aborted_termination); 4069 #endif // _MARKING_STATS_ 4070 } 4071 4072 /***************************************************************************** 4073 4074 The do_marking_step(time_target_ms, ...) method is the building 4075 block of the parallel marking framework. It can be called in parallel 4076 with other invocations of do_marking_step() on different tasks 4077 (but only one per task, obviously) and concurrently with the 4078 mutator threads, or during remark, hence it eliminates the need 4079 for two versions of the code. When called during remark, it will 4080 pick up from where the task left off during the concurrent marking 4081 phase. Interestingly, tasks are also claimable during evacuation 4082 pauses too, since do_marking_step() ensures that it aborts before 4083 it needs to yield. 4084 4085 The data structures that it uses to do marking work are the 4086 following: 4087 4088 (1) Marking Bitmap. If there are gray objects that appear only 4089 on the bitmap (this happens either when dealing with an overflow 4090 or when the initial marking phase has simply marked the roots 4091 and didn't push them on the stack), then tasks claim heap 4092 regions whose bitmap they then scan to find gray objects. A 4093 global finger indicates where the end of the last claimed region 4094 is. A local finger indicates how far into the region a task has 4095 scanned. The two fingers are used to determine how to gray an 4096 object (i.e. whether simply marking it is OK, as it will be 4097 visited by a task in the future, or whether it needs to be also 4098 pushed on a stack). 4099 4100 (2) Local Queue. The local queue of the task which is accessed 4101 reasonably efficiently by the task. Other tasks can steal from 4102 it when they run out of work. Throughout the marking phase, a 4103 task attempts to keep its local queue short but not totally 4104 empty, so that entries are available for stealing by other 4105 tasks. Only when there is no more work, a task will totally 4106 drain its local queue. 4107 4108 (3) Global Mark Stack. This handles local queue overflow. During 4109 marking only sets of entries are moved between it and the local 4110 queues, as access to it requires a mutex and more fine-grain 4111 interaction with it which might cause contention. If it 4112 overflows, then the marking phase should restart and iterate 4113 over the bitmap to identify gray objects. Throughout the marking 4114 phase, tasks attempt to keep the global mark stack at a small 4115 length but not totally empty, so that entries are available for 4116 popping by other tasks. Only when there is no more work, tasks 4117 will totally drain the global mark stack. 4118 4119 (4) SATB Buffer Queue. This is where completed SATB buffers are 4120 made available. Buffers are regularly removed from this queue 4121 and scanned for roots, so that the queue doesn't get too 4122 long. During remark, all completed buffers are processed, as 4123 well as the filled in parts of any uncompleted buffers. 4124 4125 The do_marking_step() method tries to abort when the time target 4126 has been reached. There are a few other cases when the 4127 do_marking_step() method also aborts: 4128 4129 (1) When the marking phase has been aborted (after a Full GC). 4130 4131 (2) When a global overflow (on the global stack) has been 4132 triggered. Before the task aborts, it will actually sync up with 4133 the other tasks to ensure that all the marking data structures 4134 (local queues, stacks, fingers etc.) are re-initialized so that 4135 when do_marking_step() completes, the marking phase can 4136 immediately restart. 4137 4138 (3) When enough completed SATB buffers are available. The 4139 do_marking_step() method only tries to drain SATB buffers right 4140 at the beginning. So, if enough buffers are available, the 4141 marking step aborts and the SATB buffers are processed at 4142 the beginning of the next invocation. 4143 4144 (4) To yield. when we have to yield then we abort and yield 4145 right at the end of do_marking_step(). This saves us from a lot 4146 of hassle as, by yielding we might allow a Full GC. If this 4147 happens then objects will be compacted underneath our feet, the 4148 heap might shrink, etc. We save checking for this by just 4149 aborting and doing the yield right at the end. 4150 4151 From the above it follows that the do_marking_step() method should 4152 be called in a loop (or, otherwise, regularly) until it completes. 4153 4154 If a marking step completes without its has_aborted() flag being 4155 true, it means it has completed the current marking phase (and 4156 also all other marking tasks have done so and have all synced up). 4157 4158 A method called regular_clock_call() is invoked "regularly" (in 4159 sub ms intervals) throughout marking. It is this clock method that 4160 checks all the abort conditions which were mentioned above and 4161 decides when the task should abort. A work-based scheme is used to 4162 trigger this clock method: when the number of object words the 4163 marking phase has scanned or the number of references the marking 4164 phase has visited reach a given limit. Additional invocations to 4165 the method clock have been planted in a few other strategic places 4166 too. The initial reason for the clock method was to avoid calling 4167 vtime too regularly, as it is quite expensive. So, once it was in 4168 place, it was natural to piggy-back all the other conditions on it 4169 too and not constantly check them throughout the code. 4170 4171 If do_termination is true then do_marking_step will enter its 4172 termination protocol. 4173 4174 The value of is_serial must be true when do_marking_step is being 4175 called serially (i.e. by the VMThread) and do_marking_step should 4176 skip any synchronization in the termination and overflow code. 4177 Examples include the serial remark code and the serial reference 4178 processing closures. 4179 4180 The value of is_serial must be false when do_marking_step is 4181 being called by any of the worker threads in a work gang. 4182 Examples include the concurrent marking code (CMMarkingTask), 4183 the MT remark code, and the MT reference processing closures. 4184 4185 *****************************************************************************/ 4186 4187 void CMTask::do_marking_step(double time_target_ms, 4188 bool do_termination, 4189 bool is_serial) { 4190 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 4191 assert(concurrent() == _cm->concurrent(), "they should be the same"); 4192 4193 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 4194 assert(_task_queues != NULL, "invariant"); 4195 assert(_task_queue != NULL, "invariant"); 4196 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 4197 4198 assert(!_claimed, 4199 "only one thread should claim this task at any one time"); 4200 4201 // OK, this doesn't safeguard again all possible scenarios, as it is 4202 // possible for two threads to set the _claimed flag at the same 4203 // time. But it is only for debugging purposes anyway and it will 4204 // catch most problems. 4205 _claimed = true; 4206 4207 _start_time_ms = os::elapsedVTime() * 1000.0; 4208 statsOnly( _interval_start_time_ms = _start_time_ms ); 4209 4210 // If do_stealing is true then do_marking_step will attempt to 4211 // steal work from the other CMTasks. It only makes sense to 4212 // enable stealing when the termination protocol is enabled 4213 // and do_marking_step() is not being called serially. 4214 bool do_stealing = do_termination && !is_serial; 4215 4216 double diff_prediction_ms = 4217 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 4218 _time_target_ms = time_target_ms - diff_prediction_ms; 4219 4220 // set up the variables that are used in the work-based scheme to 4221 // call the regular clock method 4222 _words_scanned = 0; 4223 _refs_reached = 0; 4224 recalculate_limits(); 4225 4226 // clear all flags 4227 clear_has_aborted(); 4228 _has_timed_out = false; 4229 _draining_satb_buffers = false; 4230 4231 ++_calls; 4232 4233 if (_cm->verbose_low()) { 4234 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 4235 "target = %1.2lfms >>>>>>>>>>", 4236 _worker_id, _calls, _time_target_ms); 4237 } 4238 4239 // Set up the bitmap and oop closures. Anything that uses them is 4240 // eventually called from this method, so it is OK to allocate these 4241 // statically. 4242 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 4243 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 4244 set_cm_oop_closure(&cm_oop_closure); 4245 4246 if (_cm->has_overflown()) { 4247 // This can happen if the mark stack overflows during a GC pause 4248 // and this task, after a yield point, restarts. We have to abort 4249 // as we need to get into the overflow protocol which happens 4250 // right at the end of this task. 4251 set_has_aborted(); 4252 } 4253 4254 // First drain any available SATB buffers. After this, we will not 4255 // look at SATB buffers before the next invocation of this method. 4256 // If enough completed SATB buffers are queued up, the regular clock 4257 // will abort this task so that it restarts. 4258 drain_satb_buffers(); 4259 // ...then partially drain the local queue and the global stack 4260 drain_local_queue(true); 4261 drain_global_stack(true); 4262 4263 do { 4264 if (!has_aborted() && _curr_region != NULL) { 4265 // This means that we're already holding on to a region. 4266 assert(_finger != NULL, "if region is not NULL, then the finger " 4267 "should not be NULL either"); 4268 4269 // We might have restarted this task after an evacuation pause 4270 // which might have evacuated the region we're holding on to 4271 // underneath our feet. Let's read its limit again to make sure 4272 // that we do not iterate over a region of the heap that 4273 // contains garbage (update_region_limit() will also move 4274 // _finger to the start of the region if it is found empty). 4275 update_region_limit(); 4276 // We will start from _finger not from the start of the region, 4277 // as we might be restarting this task after aborting half-way 4278 // through scanning this region. In this case, _finger points to 4279 // the address where we last found a marked object. If this is a 4280 // fresh region, _finger points to start(). 4281 MemRegion mr = MemRegion(_finger, _region_limit); 4282 4283 if (_cm->verbose_low()) { 4284 gclog_or_tty->print_cr("[%u] we're scanning part " 4285 "["PTR_FORMAT", "PTR_FORMAT") " 4286 "of region "HR_FORMAT, 4287 _worker_id, p2i(_finger), p2i(_region_limit), 4288 HR_FORMAT_PARAMS(_curr_region)); 4289 } 4290 4291 assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(), 4292 "humongous regions should go around loop once only"); 4293 4294 // Some special cases: 4295 // If the memory region is empty, we can just give up the region. 4296 // If the current region is humongous then we only need to check 4297 // the bitmap for the bit associated with the start of the object, 4298 // scan the object if it's live, and give up the region. 4299 // Otherwise, let's iterate over the bitmap of the part of the region 4300 // that is left. 4301 // If the iteration is successful, give up the region. 4302 if (mr.is_empty()) { 4303 giveup_current_region(); 4304 regular_clock_call(); 4305 } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) { 4306 if (_nextMarkBitMap->isMarked(mr.start())) { 4307 // The object is marked - apply the closure 4308 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 4309 bitmap_closure.do_bit(offset); 4310 } 4311 // Even if this task aborted while scanning the humongous object 4312 // we can (and should) give up the current region. 4313 giveup_current_region(); 4314 regular_clock_call(); 4315 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4316 giveup_current_region(); 4317 regular_clock_call(); 4318 } else { 4319 assert(has_aborted(), "currently the only way to do so"); 4320 // The only way to abort the bitmap iteration is to return 4321 // false from the do_bit() method. However, inside the 4322 // do_bit() method we move the _finger to point to the 4323 // object currently being looked at. So, if we bail out, we 4324 // have definitely set _finger to something non-null. 4325 assert(_finger != NULL, "invariant"); 4326 4327 // Region iteration was actually aborted. So now _finger 4328 // points to the address of the object we last scanned. If we 4329 // leave it there, when we restart this task, we will rescan 4330 // the object. It is easy to avoid this. We move the finger by 4331 // enough to point to the next possible object header (the 4332 // bitmap knows by how much we need to move it as it knows its 4333 // granularity). 4334 assert(_finger < _region_limit, "invariant"); 4335 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 4336 // Check if bitmap iteration was aborted while scanning the last object 4337 if (new_finger >= _region_limit) { 4338 giveup_current_region(); 4339 } else { 4340 move_finger_to(new_finger); 4341 } 4342 } 4343 } 4344 // At this point we have either completed iterating over the 4345 // region we were holding on to, or we have aborted. 4346 4347 // We then partially drain the local queue and the global stack. 4348 // (Do we really need this?) 4349 drain_local_queue(true); 4350 drain_global_stack(true); 4351 4352 // Read the note on the claim_region() method on why it might 4353 // return NULL with potentially more regions available for 4354 // claiming and why we have to check out_of_regions() to determine 4355 // whether we're done or not. 4356 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4357 // We are going to try to claim a new region. We should have 4358 // given up on the previous one. 4359 // Separated the asserts so that we know which one fires. 4360 assert(_curr_region == NULL, "invariant"); 4361 assert(_finger == NULL, "invariant"); 4362 assert(_region_limit == NULL, "invariant"); 4363 if (_cm->verbose_low()) { 4364 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4365 } 4366 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4367 if (claimed_region != NULL) { 4368 // Yes, we managed to claim one 4369 statsOnly( ++_regions_claimed ); 4370 4371 if (_cm->verbose_low()) { 4372 gclog_or_tty->print_cr("[%u] we successfully claimed " 4373 "region "PTR_FORMAT, 4374 _worker_id, p2i(claimed_region)); 4375 } 4376 4377 setup_for_region(claimed_region); 4378 assert(_curr_region == claimed_region, "invariant"); 4379 } 4380 // It is important to call the regular clock here. It might take 4381 // a while to claim a region if, for example, we hit a large 4382 // block of empty regions. So we need to call the regular clock 4383 // method once round the loop to make sure it's called 4384 // frequently enough. 4385 regular_clock_call(); 4386 } 4387 4388 if (!has_aborted() && _curr_region == NULL) { 4389 assert(_cm->out_of_regions(), 4390 "at this point we should be out of regions"); 4391 } 4392 } while ( _curr_region != NULL && !has_aborted()); 4393 4394 if (!has_aborted()) { 4395 // We cannot check whether the global stack is empty, since other 4396 // tasks might be pushing objects to it concurrently. 4397 assert(_cm->out_of_regions(), 4398 "at this point we should be out of regions"); 4399 4400 if (_cm->verbose_low()) { 4401 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4402 } 4403 4404 // Try to reduce the number of available SATB buffers so that 4405 // remark has less work to do. 4406 drain_satb_buffers(); 4407 } 4408 4409 // Since we've done everything else, we can now totally drain the 4410 // local queue and global stack. 4411 drain_local_queue(false); 4412 drain_global_stack(false); 4413 4414 // Attempt at work stealing from other task's queues. 4415 if (do_stealing && !has_aborted()) { 4416 // We have not aborted. This means that we have finished all that 4417 // we could. Let's try to do some stealing... 4418 4419 // We cannot check whether the global stack is empty, since other 4420 // tasks might be pushing objects to it concurrently. 4421 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4422 "only way to reach here"); 4423 4424 if (_cm->verbose_low()) { 4425 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4426 } 4427 4428 while (!has_aborted()) { 4429 oop obj; 4430 statsOnly( ++_steal_attempts ); 4431 4432 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4433 if (_cm->verbose_medium()) { 4434 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4435 _worker_id, p2i((void*) obj)); 4436 } 4437 4438 statsOnly( ++_steals ); 4439 4440 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4441 "any stolen object should be marked"); 4442 scan_object(obj); 4443 4444 // And since we're towards the end, let's totally drain the 4445 // local queue and global stack. 4446 drain_local_queue(false); 4447 drain_global_stack(false); 4448 } else { 4449 break; 4450 } 4451 } 4452 } 4453 4454 // If we are about to wrap up and go into termination, check if we 4455 // should raise the overflow flag. 4456 if (do_termination && !has_aborted()) { 4457 if (_cm->force_overflow()->should_force()) { 4458 _cm->set_has_overflown(); 4459 regular_clock_call(); 4460 } 4461 } 4462 4463 // We still haven't aborted. Now, let's try to get into the 4464 // termination protocol. 4465 if (do_termination && !has_aborted()) { 4466 // We cannot check whether the global stack is empty, since other 4467 // tasks might be concurrently pushing objects on it. 4468 // Separated the asserts so that we know which one fires. 4469 assert(_cm->out_of_regions(), "only way to reach here"); 4470 assert(_task_queue->size() == 0, "only way to reach here"); 4471 4472 if (_cm->verbose_low()) { 4473 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4474 } 4475 4476 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4477 4478 // The CMTask class also extends the TerminatorTerminator class, 4479 // hence its should_exit_termination() method will also decide 4480 // whether to exit the termination protocol or not. 4481 bool finished = (is_serial || 4482 _cm->terminator()->offer_termination(this)); 4483 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4484 _termination_time_ms += 4485 termination_end_time_ms - _termination_start_time_ms; 4486 4487 if (finished) { 4488 // We're all done. 4489 4490 if (_worker_id == 0) { 4491 // let's allow task 0 to do this 4492 if (concurrent()) { 4493 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4494 // we need to set this to false before the next 4495 // safepoint. This way we ensure that the marking phase 4496 // doesn't observe any more heap expansions. 4497 _cm->clear_concurrent_marking_in_progress(); 4498 } 4499 } 4500 4501 // We can now guarantee that the global stack is empty, since 4502 // all other tasks have finished. We separated the guarantees so 4503 // that, if a condition is false, we can immediately find out 4504 // which one. 4505 guarantee(_cm->out_of_regions(), "only way to reach here"); 4506 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4507 guarantee(_task_queue->size() == 0, "only way to reach here"); 4508 guarantee(!_cm->has_overflown(), "only way to reach here"); 4509 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4510 4511 if (_cm->verbose_low()) { 4512 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4513 } 4514 } else { 4515 // Apparently there's more work to do. Let's abort this task. It 4516 // will restart it and we can hopefully find more things to do. 4517 4518 if (_cm->verbose_low()) { 4519 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4520 _worker_id); 4521 } 4522 4523 set_has_aborted(); 4524 statsOnly( ++_aborted_termination ); 4525 } 4526 } 4527 4528 // Mainly for debugging purposes to make sure that a pointer to the 4529 // closure which was statically allocated in this frame doesn't 4530 // escape it by accident. 4531 set_cm_oop_closure(NULL); 4532 double end_time_ms = os::elapsedVTime() * 1000.0; 4533 double elapsed_time_ms = end_time_ms - _start_time_ms; 4534 // Update the step history. 4535 _step_times_ms.add(elapsed_time_ms); 4536 4537 if (has_aborted()) { 4538 // The task was aborted for some reason. 4539 4540 statsOnly( ++_aborted ); 4541 4542 if (_has_timed_out) { 4543 double diff_ms = elapsed_time_ms - _time_target_ms; 4544 // Keep statistics of how well we did with respect to hitting 4545 // our target only if we actually timed out (if we aborted for 4546 // other reasons, then the results might get skewed). 4547 _marking_step_diffs_ms.add(diff_ms); 4548 } 4549 4550 if (_cm->has_overflown()) { 4551 // This is the interesting one. We aborted because a global 4552 // overflow was raised. This means we have to restart the 4553 // marking phase and start iterating over regions. However, in 4554 // order to do this we have to make sure that all tasks stop 4555 // what they are doing and re-initialize in a safe manner. We 4556 // will achieve this with the use of two barrier sync points. 4557 4558 if (_cm->verbose_low()) { 4559 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4560 } 4561 4562 if (!is_serial) { 4563 // We only need to enter the sync barrier if being called 4564 // from a parallel context 4565 _cm->enter_first_sync_barrier(_worker_id); 4566 4567 // When we exit this sync barrier we know that all tasks have 4568 // stopped doing marking work. So, it's now safe to 4569 // re-initialize our data structures. At the end of this method, 4570 // task 0 will clear the global data structures. 4571 } 4572 4573 statsOnly( ++_aborted_overflow ); 4574 4575 // We clear the local state of this task... 4576 clear_region_fields(); 4577 4578 if (!is_serial) { 4579 // ...and enter the second barrier. 4580 _cm->enter_second_sync_barrier(_worker_id); 4581 } 4582 // At this point, if we're during the concurrent phase of 4583 // marking, everything has been re-initialized and we're 4584 // ready to restart. 4585 } 4586 4587 if (_cm->verbose_low()) { 4588 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4589 "elapsed = %1.2lfms <<<<<<<<<<", 4590 _worker_id, _time_target_ms, elapsed_time_ms); 4591 if (_cm->has_aborted()) { 4592 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4593 _worker_id); 4594 } 4595 } 4596 } else { 4597 if (_cm->verbose_low()) { 4598 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4599 "elapsed = %1.2lfms <<<<<<<<<<", 4600 _worker_id, _time_target_ms, elapsed_time_ms); 4601 } 4602 } 4603 4604 _claimed = false; 4605 } 4606 4607 CMTask::CMTask(uint worker_id, 4608 ConcurrentMark* cm, 4609 size_t* marked_bytes, 4610 BitMap* card_bm, 4611 CMTaskQueue* task_queue, 4612 CMTaskQueueSet* task_queues) 4613 : _g1h(G1CollectedHeap::heap()), 4614 _worker_id(worker_id), _cm(cm), 4615 _claimed(false), 4616 _nextMarkBitMap(NULL), _hash_seed(17), 4617 _task_queue(task_queue), 4618 _task_queues(task_queues), 4619 _cm_oop_closure(NULL), 4620 _marked_bytes_array(marked_bytes), 4621 _card_bm(card_bm) { 4622 guarantee(task_queue != NULL, "invariant"); 4623 guarantee(task_queues != NULL, "invariant"); 4624 4625 statsOnly( _clock_due_to_scanning = 0; 4626 _clock_due_to_marking = 0 ); 4627 4628 _marking_step_diffs_ms.add(0.5); 4629 } 4630 4631 // These are formatting macros that are used below to ensure 4632 // consistent formatting. The *_H_* versions are used to format the 4633 // header for a particular value and they should be kept consistent 4634 // with the corresponding macro. Also note that most of the macros add 4635 // the necessary white space (as a prefix) which makes them a bit 4636 // easier to compose. 4637 4638 // All the output lines are prefixed with this string to be able to 4639 // identify them easily in a large log file. 4640 #define G1PPRL_LINE_PREFIX "###" 4641 4642 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4643 #ifdef _LP64 4644 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4645 #else // _LP64 4646 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4647 #endif // _LP64 4648 4649 // For per-region info 4650 #define G1PPRL_TYPE_FORMAT " %-4s" 4651 #define G1PPRL_TYPE_H_FORMAT " %4s" 4652 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4653 #define G1PPRL_BYTE_H_FORMAT " %9s" 4654 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4655 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4656 4657 // For summary info 4658 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4659 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4660 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4661 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4662 4663 G1PrintRegionLivenessInfoClosure:: 4664 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4665 : _out(out), 4666 _total_used_bytes(0), _total_capacity_bytes(0), 4667 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4668 _hum_used_bytes(0), _hum_capacity_bytes(0), 4669 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4670 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4671 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4672 MemRegion g1_committed = g1h->g1_committed(); 4673 MemRegion g1_reserved = g1h->g1_reserved(); 4674 double now = os::elapsedTime(); 4675 4676 // Print the header of the output. 4677 _out->cr(); 4678 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4679 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4680 G1PPRL_SUM_ADDR_FORMAT("committed") 4681 G1PPRL_SUM_ADDR_FORMAT("reserved") 4682 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4683 p2i(g1_committed.start()), p2i(g1_committed.end()), 4684 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4685 HeapRegion::GrainBytes); 4686 _out->print_cr(G1PPRL_LINE_PREFIX); 4687 _out->print_cr(G1PPRL_LINE_PREFIX 4688 G1PPRL_TYPE_H_FORMAT 4689 G1PPRL_ADDR_BASE_H_FORMAT 4690 G1PPRL_BYTE_H_FORMAT 4691 G1PPRL_BYTE_H_FORMAT 4692 G1PPRL_BYTE_H_FORMAT 4693 G1PPRL_DOUBLE_H_FORMAT 4694 G1PPRL_BYTE_H_FORMAT 4695 G1PPRL_BYTE_H_FORMAT, 4696 "type", "address-range", 4697 "used", "prev-live", "next-live", "gc-eff", 4698 "remset", "code-roots"); 4699 _out->print_cr(G1PPRL_LINE_PREFIX 4700 G1PPRL_TYPE_H_FORMAT 4701 G1PPRL_ADDR_BASE_H_FORMAT 4702 G1PPRL_BYTE_H_FORMAT 4703 G1PPRL_BYTE_H_FORMAT 4704 G1PPRL_BYTE_H_FORMAT 4705 G1PPRL_DOUBLE_H_FORMAT 4706 G1PPRL_BYTE_H_FORMAT 4707 G1PPRL_BYTE_H_FORMAT, 4708 "", "", 4709 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4710 "(bytes)", "(bytes)"); 4711 } 4712 4713 // It takes as a parameter a reference to one of the _hum_* fields, it 4714 // deduces the corresponding value for a region in a humongous region 4715 // series (either the region size, or what's left if the _hum_* field 4716 // is < the region size), and updates the _hum_* field accordingly. 4717 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4718 size_t bytes = 0; 4719 // The > 0 check is to deal with the prev and next live bytes which 4720 // could be 0. 4721 if (*hum_bytes > 0) { 4722 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4723 *hum_bytes -= bytes; 4724 } 4725 return bytes; 4726 } 4727 4728 // It deduces the values for a region in a humongous region series 4729 // from the _hum_* fields and updates those accordingly. It assumes 4730 // that that _hum_* fields have already been set up from the "starts 4731 // humongous" region and we visit the regions in address order. 4732 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4733 size_t* capacity_bytes, 4734 size_t* prev_live_bytes, 4735 size_t* next_live_bytes) { 4736 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4737 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4738 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4739 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4740 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4741 } 4742 4743 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4744 const char* type = ""; 4745 HeapWord* bottom = r->bottom(); 4746 HeapWord* end = r->end(); 4747 size_t capacity_bytes = r->capacity(); 4748 size_t used_bytes = r->used(); 4749 size_t prev_live_bytes = r->live_bytes(); 4750 size_t next_live_bytes = r->next_live_bytes(); 4751 double gc_eff = r->gc_efficiency(); 4752 size_t remset_bytes = r->rem_set()->mem_size(); 4753 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4754 4755 if (r->used() == 0) { 4756 type = "FREE"; 4757 } else if (r->is_survivor()) { 4758 type = "SURV"; 4759 } else if (r->is_young()) { 4760 type = "EDEN"; 4761 } else if (r->startsHumongous()) { 4762 type = "HUMS"; 4763 4764 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4765 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4766 "they should have been zeroed after the last time we used them"); 4767 // Set up the _hum_* fields. 4768 _hum_capacity_bytes = capacity_bytes; 4769 _hum_used_bytes = used_bytes; 4770 _hum_prev_live_bytes = prev_live_bytes; 4771 _hum_next_live_bytes = next_live_bytes; 4772 get_hum_bytes(&used_bytes, &capacity_bytes, 4773 &prev_live_bytes, &next_live_bytes); 4774 end = bottom + HeapRegion::GrainWords; 4775 } else if (r->continuesHumongous()) { 4776 type = "HUMC"; 4777 get_hum_bytes(&used_bytes, &capacity_bytes, 4778 &prev_live_bytes, &next_live_bytes); 4779 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4780 } else { 4781 type = "OLD"; 4782 } 4783 4784 _total_used_bytes += used_bytes; 4785 _total_capacity_bytes += capacity_bytes; 4786 _total_prev_live_bytes += prev_live_bytes; 4787 _total_next_live_bytes += next_live_bytes; 4788 _total_remset_bytes += remset_bytes; 4789 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4790 4791 // Print a line for this particular region. 4792 _out->print_cr(G1PPRL_LINE_PREFIX 4793 G1PPRL_TYPE_FORMAT 4794 G1PPRL_ADDR_BASE_FORMAT 4795 G1PPRL_BYTE_FORMAT 4796 G1PPRL_BYTE_FORMAT 4797 G1PPRL_BYTE_FORMAT 4798 G1PPRL_DOUBLE_FORMAT 4799 G1PPRL_BYTE_FORMAT 4800 G1PPRL_BYTE_FORMAT, 4801 type, p2i(bottom), p2i(end), 4802 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4803 remset_bytes, strong_code_roots_bytes); 4804 4805 return false; 4806 } 4807 4808 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4809 // add static memory usages to remembered set sizes 4810 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4811 // Print the footer of the output. 4812 _out->print_cr(G1PPRL_LINE_PREFIX); 4813 _out->print_cr(G1PPRL_LINE_PREFIX 4814 " SUMMARY" 4815 G1PPRL_SUM_MB_FORMAT("capacity") 4816 G1PPRL_SUM_MB_PERC_FORMAT("used") 4817 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4818 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4819 G1PPRL_SUM_MB_FORMAT("remset") 4820 G1PPRL_SUM_MB_FORMAT("code-roots"), 4821 bytes_to_mb(_total_capacity_bytes), 4822 bytes_to_mb(_total_used_bytes), 4823 perc(_total_used_bytes, _total_capacity_bytes), 4824 bytes_to_mb(_total_prev_live_bytes), 4825 perc(_total_prev_live_bytes, _total_capacity_bytes), 4826 bytes_to_mb(_total_next_live_bytes), 4827 perc(_total_next_live_bytes, _total_capacity_bytes), 4828 bytes_to_mb(_total_remset_bytes), 4829 bytes_to_mb(_total_strong_code_roots_bytes)); 4830 _out->cr(); 4831 }