1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/symbolTable.hpp" 27 #include "gc_implementation/g1/concurrentMark.inline.hpp" 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 32 #include "gc_implementation/g1/g1Log.hpp" 33 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 34 #include "gc_implementation/g1/g1RemSet.hpp" 35 #include "gc_implementation/g1/heapRegion.inline.hpp" 36 #include "gc_implementation/g1/heapRegionRemSet.hpp" 37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 38 #include "gc_implementation/shared/vmGCOperations.hpp" 39 #include "gc_implementation/shared/gcTimer.hpp" 40 #include "gc_implementation/shared/gcTrace.hpp" 41 #include "gc_implementation/shared/gcTraceTime.hpp" 42 #include "memory/genOopClosures.inline.hpp" 43 #include "memory/referencePolicy.hpp" 44 #include "memory/resourceArea.hpp" 45 #include "oops/oop.inline.hpp" 46 #include "runtime/handles.inline.hpp" 47 #include "runtime/java.hpp" 48 #include "services/memTracker.hpp" 49 50 // Concurrent marking bit map wrapper 51 52 CMBitMapRO::CMBitMapRO(int shifter) : 53 _bm(), 54 _shifter(shifter) { 55 _bmStartWord = 0; 56 _bmWordSize = 0; 57 } 58 59 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr, 60 HeapWord* limit) const { 61 // First we must round addr *up* to a possible object boundary. 62 addr = (HeapWord*)align_size_up((intptr_t)addr, 63 HeapWordSize << _shifter); 64 size_t addrOffset = heapWordToOffset(addr); 65 if (limit == NULL) { 66 limit = _bmStartWord + _bmWordSize; 67 } 68 size_t limitOffset = heapWordToOffset(limit); 69 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 70 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 71 assert(nextAddr >= addr, "get_next_one postcondition"); 72 assert(nextAddr == limit || isMarked(nextAddr), 73 "get_next_one postcondition"); 74 return nextAddr; 75 } 76 77 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr, 78 HeapWord* limit) const { 79 size_t addrOffset = heapWordToOffset(addr); 80 if (limit == NULL) { 81 limit = _bmStartWord + _bmWordSize; 82 } 83 size_t limitOffset = heapWordToOffset(limit); 84 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 85 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 86 assert(nextAddr >= addr, "get_next_one postcondition"); 87 assert(nextAddr == limit || !isMarked(nextAddr), 88 "get_next_one postcondition"); 89 return nextAddr; 90 } 91 92 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 93 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 94 return (int) (diff >> _shifter); 95 } 96 97 #ifndef PRODUCT 98 bool CMBitMapRO::covers(ReservedSpace heap_rs) const { 99 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 100 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 101 "size inconsistency"); 102 return _bmStartWord == (HeapWord*)(heap_rs.base()) && 103 _bmWordSize == heap_rs.size()>>LogHeapWordSize; 104 } 105 #endif 106 107 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 108 _bm.print_on_error(st, prefix); 109 } 110 111 bool CMBitMap::allocate(ReservedSpace heap_rs) { 112 _bmStartWord = (HeapWord*)(heap_rs.base()); 113 _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes 114 ReservedSpace brs(ReservedSpace::allocation_align_size_up( 115 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); 116 if (!brs.is_reserved()) { 117 warning("ConcurrentMark marking bit map allocation failure"); 118 return false; 119 } 120 MemTracker::record_virtual_memory_type((address)brs.base(), mtGC); 121 // For now we'll just commit all of the bit map up front. 122 // Later on we'll try to be more parsimonious with swap. 123 if (!_virtual_space.initialize(brs, brs.size())) { 124 warning("ConcurrentMark marking bit map backing store failure"); 125 return false; 126 } 127 assert(_virtual_space.committed_size() == brs.size(), 128 "didn't reserve backing store for all of concurrent marking bit map?"); 129 _bm.set_map((uintptr_t*)_virtual_space.low()); 130 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= 131 _bmWordSize, "inconsistency in bit map sizing"); 132 _bm.set_size(_bmWordSize >> _shifter); 133 return true; 134 } 135 136 void CMBitMap::clearAll() { 137 _bm.clear(); 138 return; 139 } 140 141 void CMBitMap::markRange(MemRegion mr) { 142 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 143 assert(!mr.is_empty(), "unexpected empty region"); 144 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 145 ((HeapWord *) mr.end())), 146 "markRange memory region end is not card aligned"); 147 // convert address range into offset range 148 _bm.at_put_range(heapWordToOffset(mr.start()), 149 heapWordToOffset(mr.end()), true); 150 } 151 152 void CMBitMap::clearRange(MemRegion mr) { 153 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 154 assert(!mr.is_empty(), "unexpected empty region"); 155 // convert address range into offset range 156 _bm.at_put_range(heapWordToOffset(mr.start()), 157 heapWordToOffset(mr.end()), false); 158 } 159 160 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 161 HeapWord* end_addr) { 162 HeapWord* start = getNextMarkedWordAddress(addr); 163 start = MIN2(start, end_addr); 164 HeapWord* end = getNextUnmarkedWordAddress(start); 165 end = MIN2(end, end_addr); 166 assert(start <= end, "Consistency check"); 167 MemRegion mr(start, end); 168 if (!mr.is_empty()) { 169 clearRange(mr); 170 } 171 return mr; 172 } 173 174 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 175 _base(NULL), _cm(cm) 176 #ifdef ASSERT 177 , _drain_in_progress(false) 178 , _drain_in_progress_yields(false) 179 #endif 180 {} 181 182 bool CMMarkStack::allocate(size_t capacity) { 183 // allocate a stack of the requisite depth 184 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 185 if (!rs.is_reserved()) { 186 warning("ConcurrentMark MarkStack allocation failure"); 187 return false; 188 } 189 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 190 if (!_virtual_space.initialize(rs, rs.size())) { 191 warning("ConcurrentMark MarkStack backing store failure"); 192 // Release the virtual memory reserved for the marking stack 193 rs.release(); 194 return false; 195 } 196 assert(_virtual_space.committed_size() == rs.size(), 197 "Didn't reserve backing store for all of ConcurrentMark stack?"); 198 _base = (oop*) _virtual_space.low(); 199 setEmpty(); 200 _capacity = (jint) capacity; 201 _saved_index = -1; 202 _should_expand = false; 203 NOT_PRODUCT(_max_depth = 0); 204 return true; 205 } 206 207 void CMMarkStack::expand() { 208 // Called, during remark, if we've overflown the marking stack during marking. 209 assert(isEmpty(), "stack should been emptied while handling overflow"); 210 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 211 // Clear expansion flag 212 _should_expand = false; 213 if (_capacity == (jint) MarkStackSizeMax) { 214 if (PrintGCDetails && Verbose) { 215 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 216 } 217 return; 218 } 219 // Double capacity if possible 220 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 221 // Do not give up existing stack until we have managed to 222 // get the double capacity that we desired. 223 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 224 sizeof(oop))); 225 if (rs.is_reserved()) { 226 // Release the backing store associated with old stack 227 _virtual_space.release(); 228 // Reinitialize virtual space for new stack 229 if (!_virtual_space.initialize(rs, rs.size())) { 230 fatal("Not enough swap for expanded marking stack capacity"); 231 } 232 _base = (oop*)(_virtual_space.low()); 233 _index = 0; 234 _capacity = new_capacity; 235 } else { 236 if (PrintGCDetails && Verbose) { 237 // Failed to double capacity, continue; 238 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 239 SIZE_FORMAT"K to " SIZE_FORMAT"K", 240 _capacity / K, new_capacity / K); 241 } 242 } 243 } 244 245 void CMMarkStack::set_should_expand() { 246 // If we're resetting the marking state because of an 247 // marking stack overflow, record that we should, if 248 // possible, expand the stack. 249 _should_expand = _cm->has_overflown(); 250 } 251 252 CMMarkStack::~CMMarkStack() { 253 if (_base != NULL) { 254 _base = NULL; 255 _virtual_space.release(); 256 } 257 } 258 259 void CMMarkStack::par_push(oop ptr) { 260 while (true) { 261 if (isFull()) { 262 _overflow = true; 263 return; 264 } 265 // Otherwise... 266 jint index = _index; 267 jint next_index = index+1; 268 jint res = Atomic::cmpxchg(next_index, &_index, index); 269 if (res == index) { 270 _base[index] = ptr; 271 // Note that we don't maintain this atomically. We could, but it 272 // doesn't seem necessary. 273 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 274 return; 275 } 276 // Otherwise, we need to try again. 277 } 278 } 279 280 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { 281 while (true) { 282 if (isFull()) { 283 _overflow = true; 284 return; 285 } 286 // Otherwise... 287 jint index = _index; 288 jint next_index = index + n; 289 if (next_index > _capacity) { 290 _overflow = true; 291 return; 292 } 293 jint res = Atomic::cmpxchg(next_index, &_index, index); 294 if (res == index) { 295 for (int i = 0; i < n; i++) { 296 int ind = index + i; 297 assert(ind < _capacity, "By overflow test above."); 298 _base[ind] = ptr_arr[i]; 299 } 300 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 301 return; 302 } 303 // Otherwise, we need to try again. 304 } 305 } 306 307 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 308 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 309 jint start = _index; 310 jint next_index = start + n; 311 if (next_index > _capacity) { 312 _overflow = true; 313 return; 314 } 315 // Otherwise. 316 _index = next_index; 317 for (int i = 0; i < n; i++) { 318 int ind = start + i; 319 assert(ind < _capacity, "By overflow test above."); 320 _base[ind] = ptr_arr[i]; 321 } 322 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 323 } 324 325 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 326 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 327 jint index = _index; 328 if (index == 0) { 329 *n = 0; 330 return false; 331 } else { 332 int k = MIN2(max, index); 333 jint new_ind = index - k; 334 for (int j = 0; j < k; j++) { 335 ptr_arr[j] = _base[new_ind + j]; 336 } 337 _index = new_ind; 338 *n = k; 339 return true; 340 } 341 } 342 343 template<class OopClosureClass> 344 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 345 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 346 || SafepointSynchronize::is_at_safepoint(), 347 "Drain recursion must be yield-safe."); 348 bool res = true; 349 debug_only(_drain_in_progress = true); 350 debug_only(_drain_in_progress_yields = yield_after); 351 while (!isEmpty()) { 352 oop newOop = pop(); 353 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 354 assert(newOop->is_oop(), "Expected an oop"); 355 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 356 "only grey objects on this stack"); 357 newOop->oop_iterate(cl); 358 if (yield_after && _cm->do_yield_check()) { 359 res = false; 360 break; 361 } 362 } 363 debug_only(_drain_in_progress = false); 364 return res; 365 } 366 367 void CMMarkStack::note_start_of_gc() { 368 assert(_saved_index == -1, 369 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 370 _saved_index = _index; 371 } 372 373 void CMMarkStack::note_end_of_gc() { 374 // This is intentionally a guarantee, instead of an assert. If we 375 // accidentally add something to the mark stack during GC, it 376 // will be a correctness issue so it's better if we crash. we'll 377 // only check this once per GC anyway, so it won't be a performance 378 // issue in any way. 379 guarantee(_saved_index == _index, 380 err_msg("saved index: %d index: %d", _saved_index, _index)); 381 _saved_index = -1; 382 } 383 384 void CMMarkStack::oops_do(OopClosure* f) { 385 assert(_saved_index == _index, 386 err_msg("saved index: %d index: %d", _saved_index, _index)); 387 for (int i = 0; i < _index; i += 1) { 388 f->do_oop(&_base[i]); 389 } 390 } 391 392 bool ConcurrentMark::not_yet_marked(oop obj) const { 393 return _g1h->is_obj_ill(obj); 394 } 395 396 CMRootRegions::CMRootRegions() : 397 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 398 _should_abort(false), _next_survivor(NULL) { } 399 400 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 401 _young_list = g1h->young_list(); 402 _cm = cm; 403 } 404 405 void CMRootRegions::prepare_for_scan() { 406 assert(!scan_in_progress(), "pre-condition"); 407 408 // Currently, only survivors can be root regions. 409 assert(_next_survivor == NULL, "pre-condition"); 410 _next_survivor = _young_list->first_survivor_region(); 411 _scan_in_progress = (_next_survivor != NULL); 412 _should_abort = false; 413 } 414 415 HeapRegion* CMRootRegions::claim_next() { 416 if (_should_abort) { 417 // If someone has set the should_abort flag, we return NULL to 418 // force the caller to bail out of their loop. 419 return NULL; 420 } 421 422 // Currently, only survivors can be root regions. 423 HeapRegion* res = _next_survivor; 424 if (res != NULL) { 425 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 426 // Read it again in case it changed while we were waiting for the lock. 427 res = _next_survivor; 428 if (res != NULL) { 429 if (res == _young_list->last_survivor_region()) { 430 // We just claimed the last survivor so store NULL to indicate 431 // that we're done. 432 _next_survivor = NULL; 433 } else { 434 _next_survivor = res->get_next_young_region(); 435 } 436 } else { 437 // Someone else claimed the last survivor while we were trying 438 // to take the lock so nothing else to do. 439 } 440 } 441 assert(res == NULL || res->is_survivor(), "post-condition"); 442 443 return res; 444 } 445 446 void CMRootRegions::scan_finished() { 447 assert(scan_in_progress(), "pre-condition"); 448 449 // Currently, only survivors can be root regions. 450 if (!_should_abort) { 451 assert(_next_survivor == NULL, "we should have claimed all survivors"); 452 } 453 _next_survivor = NULL; 454 455 { 456 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 457 _scan_in_progress = false; 458 RootRegionScan_lock->notify_all(); 459 } 460 } 461 462 bool CMRootRegions::wait_until_scan_finished() { 463 if (!scan_in_progress()) return false; 464 465 { 466 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 467 while (scan_in_progress()) { 468 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 469 } 470 } 471 return true; 472 } 473 474 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 475 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 476 #endif // _MSC_VER 477 478 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 479 return MAX2((n_par_threads + 2) / 4, 1U); 480 } 481 482 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) : 483 _g1h(g1h), 484 _markBitMap1(log2_intptr(MinObjAlignment)), 485 _markBitMap2(log2_intptr(MinObjAlignment)), 486 _parallel_marking_threads(0), 487 _max_parallel_marking_threads(0), 488 _sleep_factor(0.0), 489 _marking_task_overhead(1.0), 490 _cleanup_sleep_factor(0.0), 491 _cleanup_task_overhead(1.0), 492 _cleanup_list("Cleanup List"), 493 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 494 _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >> 495 CardTableModRefBS::card_shift, 496 false /* in_resource_area*/), 497 498 _prevMarkBitMap(&_markBitMap1), 499 _nextMarkBitMap(&_markBitMap2), 500 501 _markStack(this), 502 // _finger set in set_non_marking_state 503 504 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 505 // _active_tasks set in set_non_marking_state 506 // _tasks set inside the constructor 507 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 508 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 509 510 _has_overflown(false), 511 _concurrent(false), 512 _has_aborted(false), 513 _restart_for_overflow(false), 514 _concurrent_marking_in_progress(false), 515 516 // _verbose_level set below 517 518 _init_times(), 519 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 520 _cleanup_times(), 521 _total_counting_time(0.0), 522 _total_rs_scrub_time(0.0), 523 524 _parallel_workers(NULL), 525 526 _count_card_bitmaps(NULL), 527 _count_marked_bytes(NULL), 528 _completed_initialization(false) { 529 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 530 if (verbose_level < no_verbose) { 531 verbose_level = no_verbose; 532 } 533 if (verbose_level > high_verbose) { 534 verbose_level = high_verbose; 535 } 536 _verbose_level = verbose_level; 537 538 if (verbose_low()) { 539 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 540 "heap end = "PTR_FORMAT, _heap_start, _heap_end); 541 } 542 543 if (!_markBitMap1.allocate(heap_rs)) { 544 warning("Failed to allocate first CM bit map"); 545 return; 546 } 547 if (!_markBitMap2.allocate(heap_rs)) { 548 warning("Failed to allocate second CM bit map"); 549 return; 550 } 551 552 // Create & start a ConcurrentMark thread. 553 _cmThread = new ConcurrentMarkThread(this); 554 assert(cmThread() != NULL, "CM Thread should have been created"); 555 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 556 if (_cmThread->osthread() == NULL) { 557 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 558 } 559 560 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 561 assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency"); 562 assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency"); 563 564 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 565 satb_qs.set_buffer_size(G1SATBBufferSize); 566 567 _root_regions.init(_g1h, this); 568 569 if (ConcGCThreads > ParallelGCThreads) { 570 warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") " 571 "than ParallelGCThreads (" UINT32_FORMAT ").", 572 ConcGCThreads, ParallelGCThreads); 573 return; 574 } 575 if (ParallelGCThreads == 0) { 576 // if we are not running with any parallel GC threads we will not 577 // spawn any marking threads either 578 _parallel_marking_threads = 0; 579 _max_parallel_marking_threads = 0; 580 _sleep_factor = 0.0; 581 _marking_task_overhead = 1.0; 582 } else { 583 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 584 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 585 // if both are set 586 _sleep_factor = 0.0; 587 _marking_task_overhead = 1.0; 588 } else if (G1MarkingOverheadPercent > 0) { 589 // We will calculate the number of parallel marking threads based 590 // on a target overhead with respect to the soft real-time goal 591 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 592 double overall_cm_overhead = 593 (double) MaxGCPauseMillis * marking_overhead / 594 (double) GCPauseIntervalMillis; 595 double cpu_ratio = 1.0 / (double) os::processor_count(); 596 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 597 double marking_task_overhead = 598 overall_cm_overhead / marking_thread_num * 599 (double) os::processor_count(); 600 double sleep_factor = 601 (1.0 - marking_task_overhead) / marking_task_overhead; 602 603 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); 604 _sleep_factor = sleep_factor; 605 _marking_task_overhead = marking_task_overhead; 606 } else { 607 // Calculate the number of parallel marking threads by scaling 608 // the number of parallel GC threads. 609 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); 610 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); 611 _sleep_factor = 0.0; 612 _marking_task_overhead = 1.0; 613 } 614 615 assert(ConcGCThreads > 0, "Should have been set"); 616 _parallel_marking_threads = (uint) ConcGCThreads; 617 _max_parallel_marking_threads = _parallel_marking_threads; 618 619 if (parallel_marking_threads() > 1) { 620 _cleanup_task_overhead = 1.0; 621 } else { 622 _cleanup_task_overhead = marking_task_overhead(); 623 } 624 _cleanup_sleep_factor = 625 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 626 627 #if 0 628 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 629 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 630 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 631 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 632 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 633 #endif 634 635 guarantee(parallel_marking_threads() > 0, "peace of mind"); 636 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", 637 _max_parallel_marking_threads, false, true); 638 if (_parallel_workers == NULL) { 639 vm_exit_during_initialization("Failed necessary allocation."); 640 } else { 641 _parallel_workers->initialize_workers(); 642 } 643 } 644 645 if (FLAG_IS_DEFAULT(MarkStackSize)) { 646 uintx mark_stack_size = 647 MIN2(MarkStackSizeMax, 648 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE))); 649 // Verify that the calculated value for MarkStackSize is in range. 650 // It would be nice to use the private utility routine from Arguments. 651 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 652 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): " 653 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 654 mark_stack_size, 1, MarkStackSizeMax); 655 return; 656 } 657 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size); 658 } else { 659 // Verify MarkStackSize is in range. 660 if (FLAG_IS_CMDLINE(MarkStackSize)) { 661 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 662 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 663 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): " 664 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 665 MarkStackSize, 1, MarkStackSizeMax); 666 return; 667 } 668 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 669 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 670 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")" 671 " or for MarkStackSizeMax (" UINTX_FORMAT ")", 672 MarkStackSize, MarkStackSizeMax); 673 return; 674 } 675 } 676 } 677 } 678 679 if (!_markStack.allocate(MarkStackSize)) { 680 warning("Failed to allocate CM marking stack"); 681 return; 682 } 683 684 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 685 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 686 687 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 688 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 689 690 BitMap::idx_t card_bm_size = _card_bm.size(); 691 692 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 693 _active_tasks = _max_worker_id; 694 695 size_t max_regions = (size_t) _g1h->max_regions(); 696 for (uint i = 0; i < _max_worker_id; ++i) { 697 CMTaskQueue* task_queue = new CMTaskQueue(); 698 task_queue->initialize(); 699 _task_queues->register_queue(i, task_queue); 700 701 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 702 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 703 704 _tasks[i] = new CMTask(i, this, 705 _count_marked_bytes[i], 706 &_count_card_bitmaps[i], 707 task_queue, _task_queues); 708 709 _accum_task_vtime[i] = 0.0; 710 } 711 712 // Calculate the card number for the bottom of the heap. Used 713 // in biasing indexes into the accounting card bitmaps. 714 _heap_bottom_card_num = 715 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 716 CardTableModRefBS::card_shift); 717 718 // Clear all the liveness counting data 719 clear_all_count_data(); 720 721 // so that the call below can read a sensible value 722 _heap_start = (HeapWord*) heap_rs.base(); 723 set_non_marking_state(); 724 _completed_initialization = true; 725 } 726 727 void ConcurrentMark::update_g1_committed(bool force) { 728 // If concurrent marking is not in progress, then we do not need to 729 // update _heap_end. 730 if (!concurrent_marking_in_progress() && !force) return; 731 732 MemRegion committed = _g1h->g1_committed(); 733 assert(committed.start() == _heap_start, "start shouldn't change"); 734 HeapWord* new_end = committed.end(); 735 if (new_end > _heap_end) { 736 // The heap has been expanded. 737 738 _heap_end = new_end; 739 } 740 // Notice that the heap can also shrink. However, this only happens 741 // during a Full GC (at least currently) and the entire marking 742 // phase will bail out and the task will not be restarted. So, let's 743 // do nothing. 744 } 745 746 void ConcurrentMark::reset() { 747 // Starting values for these two. This should be called in a STW 748 // phase. CM will be notified of any future g1_committed expansions 749 // will be at the end of evacuation pauses, when tasks are 750 // inactive. 751 MemRegion committed = _g1h->g1_committed(); 752 _heap_start = committed.start(); 753 _heap_end = committed.end(); 754 755 // Separated the asserts so that we know which one fires. 756 assert(_heap_start != NULL, "heap bounds should look ok"); 757 assert(_heap_end != NULL, "heap bounds should look ok"); 758 assert(_heap_start < _heap_end, "heap bounds should look ok"); 759 760 // Reset all the marking data structures and any necessary flags 761 reset_marking_state(); 762 763 if (verbose_low()) { 764 gclog_or_tty->print_cr("[global] resetting"); 765 } 766 767 // We do reset all of them, since different phases will use 768 // different number of active threads. So, it's easiest to have all 769 // of them ready. 770 for (uint i = 0; i < _max_worker_id; ++i) { 771 _tasks[i]->reset(_nextMarkBitMap); 772 } 773 774 // we need this to make sure that the flag is on during the evac 775 // pause with initial mark piggy-backed 776 set_concurrent_marking_in_progress(); 777 } 778 779 780 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 781 _markStack.set_should_expand(); 782 _markStack.setEmpty(); // Also clears the _markStack overflow flag 783 if (clear_overflow) { 784 clear_has_overflown(); 785 } else { 786 assert(has_overflown(), "pre-condition"); 787 } 788 _finger = _heap_start; 789 790 for (uint i = 0; i < _max_worker_id; ++i) { 791 CMTaskQueue* queue = _task_queues->queue(i); 792 queue->set_empty(); 793 } 794 } 795 796 void ConcurrentMark::set_concurrency(uint active_tasks) { 797 assert(active_tasks <= _max_worker_id, "we should not have more"); 798 799 _active_tasks = active_tasks; 800 // Need to update the three data structures below according to the 801 // number of active threads for this phase. 802 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 803 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 804 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 805 } 806 807 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 808 set_concurrency(active_tasks); 809 810 _concurrent = concurrent; 811 // We propagate this to all tasks, not just the active ones. 812 for (uint i = 0; i < _max_worker_id; ++i) 813 _tasks[i]->set_concurrent(concurrent); 814 815 if (concurrent) { 816 set_concurrent_marking_in_progress(); 817 } else { 818 // We currently assume that the concurrent flag has been set to 819 // false before we start remark. At this point we should also be 820 // in a STW phase. 821 assert(!concurrent_marking_in_progress(), "invariant"); 822 assert(_finger == _heap_end, 823 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 824 _finger, _heap_end)); 825 update_g1_committed(true); 826 } 827 } 828 829 void ConcurrentMark::set_non_marking_state() { 830 // We set the global marking state to some default values when we're 831 // not doing marking. 832 reset_marking_state(); 833 _active_tasks = 0; 834 clear_concurrent_marking_in_progress(); 835 } 836 837 ConcurrentMark::~ConcurrentMark() { 838 // The ConcurrentMark instance is never freed. 839 ShouldNotReachHere(); 840 } 841 842 void ConcurrentMark::clearNextBitmap() { 843 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 844 G1CollectorPolicy* g1p = g1h->g1_policy(); 845 846 // Make sure that the concurrent mark thread looks to still be in 847 // the current cycle. 848 guarantee(cmThread()->during_cycle(), "invariant"); 849 850 // We are finishing up the current cycle by clearing the next 851 // marking bitmap and getting it ready for the next cycle. During 852 // this time no other cycle can start. So, let's make sure that this 853 // is the case. 854 guarantee(!g1h->mark_in_progress(), "invariant"); 855 856 // clear the mark bitmap (no grey objects to start with). 857 // We need to do this in chunks and offer to yield in between 858 // each chunk. 859 HeapWord* start = _nextMarkBitMap->startWord(); 860 HeapWord* end = _nextMarkBitMap->endWord(); 861 HeapWord* cur = start; 862 size_t chunkSize = M; 863 while (cur < end) { 864 HeapWord* next = cur + chunkSize; 865 if (next > end) { 866 next = end; 867 } 868 MemRegion mr(cur,next); 869 _nextMarkBitMap->clearRange(mr); 870 cur = next; 871 do_yield_check(); 872 873 // Repeat the asserts from above. We'll do them as asserts here to 874 // minimize their overhead on the product. However, we'll have 875 // them as guarantees at the beginning / end of the bitmap 876 // clearing to get some checking in the product. 877 assert(cmThread()->during_cycle(), "invariant"); 878 assert(!g1h->mark_in_progress(), "invariant"); 879 } 880 881 // Clear the liveness counting data 882 clear_all_count_data(); 883 884 // Repeat the asserts from above. 885 guarantee(cmThread()->during_cycle(), "invariant"); 886 guarantee(!g1h->mark_in_progress(), "invariant"); 887 } 888 889 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 890 public: 891 bool doHeapRegion(HeapRegion* r) { 892 if (!r->continuesHumongous()) { 893 r->note_start_of_marking(); 894 } 895 return false; 896 } 897 }; 898 899 void ConcurrentMark::checkpointRootsInitialPre() { 900 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 901 G1CollectorPolicy* g1p = g1h->g1_policy(); 902 903 _has_aborted = false; 904 905 #ifndef PRODUCT 906 if (G1PrintReachableAtInitialMark) { 907 print_reachable("at-cycle-start", 908 VerifyOption_G1UsePrevMarking, true /* all */); 909 } 910 #endif 911 912 // Initialize marking structures. This has to be done in a STW phase. 913 reset(); 914 915 // For each region note start of marking. 916 NoteStartOfMarkHRClosure startcl; 917 g1h->heap_region_iterate(&startcl); 918 } 919 920 921 void ConcurrentMark::checkpointRootsInitialPost() { 922 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 923 924 // If we force an overflow during remark, the remark operation will 925 // actually abort and we'll restart concurrent marking. If we always 926 // force an overflow during remark we'll never actually complete the 927 // marking phase. So, we initialize this here, at the start of the 928 // cycle, so that at the remaining overflow number will decrease at 929 // every remark and we'll eventually not need to cause one. 930 force_overflow_stw()->init(); 931 932 // Start Concurrent Marking weak-reference discovery. 933 ReferenceProcessor* rp = g1h->ref_processor_cm(); 934 // enable ("weak") refs discovery 935 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); 936 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 937 938 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 939 // This is the start of the marking cycle, we're expected all 940 // threads to have SATB queues with active set to false. 941 satb_mq_set.set_active_all_threads(true, /* new active value */ 942 false /* expected_active */); 943 944 _root_regions.prepare_for_scan(); 945 946 // update_g1_committed() will be called at the end of an evac pause 947 // when marking is on. So, it's also called at the end of the 948 // initial-mark pause to update the heap end, if the heap expands 949 // during it. No need to call it here. 950 } 951 952 /* 953 * Notice that in the next two methods, we actually leave the STS 954 * during the barrier sync and join it immediately afterwards. If we 955 * do not do this, the following deadlock can occur: one thread could 956 * be in the barrier sync code, waiting for the other thread to also 957 * sync up, whereas another one could be trying to yield, while also 958 * waiting for the other threads to sync up too. 959 * 960 * Note, however, that this code is also used during remark and in 961 * this case we should not attempt to leave / enter the STS, otherwise 962 * we'll either hit an assert (debug / fastdebug) or deadlock 963 * (product). So we should only leave / enter the STS if we are 964 * operating concurrently. 965 * 966 * Because the thread that does the sync barrier has left the STS, it 967 * is possible to be suspended for a Full GC or an evacuation pause 968 * could occur. This is actually safe, since the entering the sync 969 * barrier is one of the last things do_marking_step() does, and it 970 * doesn't manipulate any data structures afterwards. 971 */ 972 973 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 974 if (verbose_low()) { 975 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 976 } 977 978 if (concurrent()) { 979 ConcurrentGCThread::stsLeave(); 980 } 981 _first_overflow_barrier_sync.enter(); 982 if (concurrent()) { 983 ConcurrentGCThread::stsJoin(); 984 } 985 // at this point everyone should have synced up and not be doing any 986 // more work 987 988 if (verbose_low()) { 989 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 990 } 991 992 // If we're executing the concurrent phase of marking, reset the marking 993 // state; otherwise the marking state is reset after reference processing, 994 // during the remark pause. 995 // If we reset here as a result of an overflow during the remark we will 996 // see assertion failures from any subsequent set_concurrency_and_phase() 997 // calls. 998 if (concurrent()) { 999 // let the task associated with with worker 0 do this 1000 if (worker_id == 0) { 1001 // task 0 is responsible for clearing the global data structures 1002 // We should be here because of an overflow. During STW we should 1003 // not clear the overflow flag since we rely on it being true when 1004 // we exit this method to abort the pause and restart concurrent 1005 // marking. 1006 reset_marking_state(true /* clear_overflow */); 1007 force_overflow()->update(); 1008 1009 if (G1Log::fine()) { 1010 gclog_or_tty->date_stamp(PrintGCDateStamps); 1011 gclog_or_tty->stamp(PrintGCTimeStamps); 1012 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1013 } 1014 } 1015 } 1016 1017 // after this, each task should reset its own data structures then 1018 // then go into the second barrier 1019 } 1020 1021 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1022 if (verbose_low()) { 1023 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1024 } 1025 1026 if (concurrent()) { 1027 ConcurrentGCThread::stsLeave(); 1028 } 1029 _second_overflow_barrier_sync.enter(); 1030 if (concurrent()) { 1031 ConcurrentGCThread::stsJoin(); 1032 } 1033 // at this point everything should be re-initialized and ready to go 1034 1035 if (verbose_low()) { 1036 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1037 } 1038 } 1039 1040 #ifndef PRODUCT 1041 void ForceOverflowSettings::init() { 1042 _num_remaining = G1ConcMarkForceOverflow; 1043 _force = false; 1044 update(); 1045 } 1046 1047 void ForceOverflowSettings::update() { 1048 if (_num_remaining > 0) { 1049 _num_remaining -= 1; 1050 _force = true; 1051 } else { 1052 _force = false; 1053 } 1054 } 1055 1056 bool ForceOverflowSettings::should_force() { 1057 if (_force) { 1058 _force = false; 1059 return true; 1060 } else { 1061 return false; 1062 } 1063 } 1064 #endif // !PRODUCT 1065 1066 class CMConcurrentMarkingTask: public AbstractGangTask { 1067 private: 1068 ConcurrentMark* _cm; 1069 ConcurrentMarkThread* _cmt; 1070 1071 public: 1072 void work(uint worker_id) { 1073 assert(Thread::current()->is_ConcurrentGC_thread(), 1074 "this should only be done by a conc GC thread"); 1075 ResourceMark rm; 1076 1077 double start_vtime = os::elapsedVTime(); 1078 1079 ConcurrentGCThread::stsJoin(); 1080 1081 assert(worker_id < _cm->active_tasks(), "invariant"); 1082 CMTask* the_task = _cm->task(worker_id); 1083 the_task->record_start_time(); 1084 if (!_cm->has_aborted()) { 1085 do { 1086 double start_vtime_sec = os::elapsedVTime(); 1087 double start_time_sec = os::elapsedTime(); 1088 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1089 1090 the_task->do_marking_step(mark_step_duration_ms, 1091 true /* do_termination */, 1092 false /* is_serial*/); 1093 1094 double end_time_sec = os::elapsedTime(); 1095 double end_vtime_sec = os::elapsedVTime(); 1096 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1097 double elapsed_time_sec = end_time_sec - start_time_sec; 1098 _cm->clear_has_overflown(); 1099 1100 bool ret = _cm->do_yield_check(worker_id); 1101 1102 jlong sleep_time_ms; 1103 if (!_cm->has_aborted() && the_task->has_aborted()) { 1104 sleep_time_ms = 1105 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1106 ConcurrentGCThread::stsLeave(); 1107 os::sleep(Thread::current(), sleep_time_ms, false); 1108 ConcurrentGCThread::stsJoin(); 1109 } 1110 double end_time2_sec = os::elapsedTime(); 1111 double elapsed_time2_sec = end_time2_sec - start_time_sec; 1112 1113 #if 0 1114 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, " 1115 "overhead %1.4lf", 1116 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms, 1117 the_task->conc_overhead(os::elapsedTime()) * 8.0); 1118 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms", 1119 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0); 1120 #endif 1121 } while (!_cm->has_aborted() && the_task->has_aborted()); 1122 } 1123 the_task->record_end_time(); 1124 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1125 1126 ConcurrentGCThread::stsLeave(); 1127 1128 double end_vtime = os::elapsedVTime(); 1129 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1130 } 1131 1132 CMConcurrentMarkingTask(ConcurrentMark* cm, 1133 ConcurrentMarkThread* cmt) : 1134 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1135 1136 ~CMConcurrentMarkingTask() { } 1137 }; 1138 1139 // Calculates the number of active workers for a concurrent 1140 // phase. 1141 uint ConcurrentMark::calc_parallel_marking_threads() { 1142 if (G1CollectedHeap::use_parallel_gc_threads()) { 1143 uint n_conc_workers = 0; 1144 if (!UseDynamicNumberOfGCThreads || 1145 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1146 !ForceDynamicNumberOfGCThreads)) { 1147 n_conc_workers = max_parallel_marking_threads(); 1148 } else { 1149 n_conc_workers = 1150 AdaptiveSizePolicy::calc_default_active_workers( 1151 max_parallel_marking_threads(), 1152 1, /* Minimum workers */ 1153 parallel_marking_threads(), 1154 Threads::number_of_non_daemon_threads()); 1155 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1156 // that scaling has already gone into "_max_parallel_marking_threads". 1157 } 1158 assert(n_conc_workers > 0, "Always need at least 1"); 1159 return n_conc_workers; 1160 } 1161 // If we are not running with any parallel GC threads we will not 1162 // have spawned any marking threads either. Hence the number of 1163 // concurrent workers should be 0. 1164 return 0; 1165 } 1166 1167 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1168 // Currently, only survivors can be root regions. 1169 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1170 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1171 1172 const uintx interval = PrefetchScanIntervalInBytes; 1173 HeapWord* curr = hr->bottom(); 1174 const HeapWord* end = hr->top(); 1175 while (curr < end) { 1176 Prefetch::read(curr, interval); 1177 oop obj = oop(curr); 1178 int size = obj->oop_iterate(&cl); 1179 assert(size == obj->size(), "sanity"); 1180 curr += size; 1181 } 1182 } 1183 1184 class CMRootRegionScanTask : public AbstractGangTask { 1185 private: 1186 ConcurrentMark* _cm; 1187 1188 public: 1189 CMRootRegionScanTask(ConcurrentMark* cm) : 1190 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1191 1192 void work(uint worker_id) { 1193 assert(Thread::current()->is_ConcurrentGC_thread(), 1194 "this should only be done by a conc GC thread"); 1195 1196 CMRootRegions* root_regions = _cm->root_regions(); 1197 HeapRegion* hr = root_regions->claim_next(); 1198 while (hr != NULL) { 1199 _cm->scanRootRegion(hr, worker_id); 1200 hr = root_regions->claim_next(); 1201 } 1202 } 1203 }; 1204 1205 void ConcurrentMark::scanRootRegions() { 1206 // scan_in_progress() will have been set to true only if there was 1207 // at least one root region to scan. So, if it's false, we 1208 // should not attempt to do any further work. 1209 if (root_regions()->scan_in_progress()) { 1210 _parallel_marking_threads = calc_parallel_marking_threads(); 1211 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1212 "Maximum number of marking threads exceeded"); 1213 uint active_workers = MAX2(1U, parallel_marking_threads()); 1214 1215 CMRootRegionScanTask task(this); 1216 if (use_parallel_marking_threads()) { 1217 _parallel_workers->set_active_workers((int) active_workers); 1218 _parallel_workers->run_task(&task); 1219 } else { 1220 task.work(0); 1221 } 1222 1223 // It's possible that has_aborted() is true here without actually 1224 // aborting the survivor scan earlier. This is OK as it's 1225 // mainly used for sanity checking. 1226 root_regions()->scan_finished(); 1227 } 1228 } 1229 1230 void ConcurrentMark::markFromRoots() { 1231 // we might be tempted to assert that: 1232 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1233 // "inconsistent argument?"); 1234 // However that wouldn't be right, because it's possible that 1235 // a safepoint is indeed in progress as a younger generation 1236 // stop-the-world GC happens even as we mark in this generation. 1237 1238 _restart_for_overflow = false; 1239 force_overflow_conc()->init(); 1240 1241 // _g1h has _n_par_threads 1242 _parallel_marking_threads = calc_parallel_marking_threads(); 1243 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1244 "Maximum number of marking threads exceeded"); 1245 1246 uint active_workers = MAX2(1U, parallel_marking_threads()); 1247 1248 // Parallel task terminator is set in "set_concurrency_and_phase()" 1249 set_concurrency_and_phase(active_workers, true /* concurrent */); 1250 1251 CMConcurrentMarkingTask markingTask(this, cmThread()); 1252 if (use_parallel_marking_threads()) { 1253 _parallel_workers->set_active_workers((int)active_workers); 1254 // Don't set _n_par_threads because it affects MT in process_strong_roots() 1255 // and the decisions on that MT processing is made elsewhere. 1256 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1257 _parallel_workers->run_task(&markingTask); 1258 } else { 1259 markingTask.work(0); 1260 } 1261 print_stats(); 1262 } 1263 1264 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1265 // world is stopped at this checkpoint 1266 assert(SafepointSynchronize::is_at_safepoint(), 1267 "world should be stopped"); 1268 1269 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1270 1271 // If a full collection has happened, we shouldn't do this. 1272 if (has_aborted()) { 1273 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1274 return; 1275 } 1276 1277 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1278 1279 if (VerifyDuringGC) { 1280 HandleMark hm; // handle scope 1281 Universe::heap()->prepare_for_verify(); 1282 Universe::verify(VerifyOption_G1UsePrevMarking, 1283 " VerifyDuringGC:(before)"); 1284 } 1285 1286 G1CollectorPolicy* g1p = g1h->g1_policy(); 1287 g1p->record_concurrent_mark_remark_start(); 1288 1289 double start = os::elapsedTime(); 1290 1291 checkpointRootsFinalWork(); 1292 1293 double mark_work_end = os::elapsedTime(); 1294 1295 weakRefsWork(clear_all_soft_refs); 1296 1297 if (has_overflown()) { 1298 // Oops. We overflowed. Restart concurrent marking. 1299 _restart_for_overflow = true; 1300 if (G1TraceMarkStackOverflow) { 1301 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1302 } 1303 1304 // Verify the heap w.r.t. the previous marking bitmap. 1305 if (VerifyDuringGC) { 1306 HandleMark hm; // handle scope 1307 Universe::heap()->prepare_for_verify(); 1308 Universe::verify(VerifyOption_G1UsePrevMarking, 1309 " VerifyDuringGC:(overflow)"); 1310 } 1311 1312 // Clear the marking state because we will be restarting 1313 // marking due to overflowing the global mark stack. 1314 reset_marking_state(); 1315 } else { 1316 // Aggregate the per-task counting data that we have accumulated 1317 // while marking. 1318 aggregate_count_data(); 1319 1320 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1321 // We're done with marking. 1322 // This is the end of the marking cycle, we're expected all 1323 // threads to have SATB queues with active set to true. 1324 satb_mq_set.set_active_all_threads(false, /* new active value */ 1325 true /* expected_active */); 1326 1327 if (VerifyDuringGC) { 1328 HandleMark hm; // handle scope 1329 Universe::heap()->prepare_for_verify(); 1330 Universe::verify(VerifyOption_G1UseNextMarking, 1331 " VerifyDuringGC:(after)"); 1332 } 1333 assert(!restart_for_overflow(), "sanity"); 1334 // Completely reset the marking state since marking completed 1335 set_non_marking_state(); 1336 } 1337 1338 // Expand the marking stack, if we have to and if we can. 1339 if (_markStack.should_expand()) { 1340 _markStack.expand(); 1341 } 1342 1343 // Statistics 1344 double now = os::elapsedTime(); 1345 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1346 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1347 _remark_times.add((now - start) * 1000.0); 1348 1349 g1p->record_concurrent_mark_remark_end(); 1350 1351 G1CMIsAliveClosure is_alive(g1h); 1352 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1353 } 1354 1355 // Base class of the closures that finalize and verify the 1356 // liveness counting data. 1357 class CMCountDataClosureBase: public HeapRegionClosure { 1358 protected: 1359 G1CollectedHeap* _g1h; 1360 ConcurrentMark* _cm; 1361 CardTableModRefBS* _ct_bs; 1362 1363 BitMap* _region_bm; 1364 BitMap* _card_bm; 1365 1366 // Takes a region that's not empty (i.e., it has at least one 1367 // live object in it and sets its corresponding bit on the region 1368 // bitmap to 1. If the region is "starts humongous" it will also set 1369 // to 1 the bits on the region bitmap that correspond to its 1370 // associated "continues humongous" regions. 1371 void set_bit_for_region(HeapRegion* hr) { 1372 assert(!hr->continuesHumongous(), "should have filtered those out"); 1373 1374 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); 1375 if (!hr->startsHumongous()) { 1376 // Normal (non-humongous) case: just set the bit. 1377 _region_bm->par_at_put(index, true); 1378 } else { 1379 // Starts humongous case: calculate how many regions are part of 1380 // this humongous region and then set the bit range. 1381 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1382 _region_bm->par_at_put_range(index, end_index, true); 1383 } 1384 } 1385 1386 public: 1387 CMCountDataClosureBase(G1CollectedHeap* g1h, 1388 BitMap* region_bm, BitMap* card_bm): 1389 _g1h(g1h), _cm(g1h->concurrent_mark()), 1390 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 1391 _region_bm(region_bm), _card_bm(card_bm) { } 1392 }; 1393 1394 // Closure that calculates the # live objects per region. Used 1395 // for verification purposes during the cleanup pause. 1396 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1397 CMBitMapRO* _bm; 1398 size_t _region_marked_bytes; 1399 1400 public: 1401 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1402 BitMap* region_bm, BitMap* card_bm) : 1403 CMCountDataClosureBase(g1h, region_bm, card_bm), 1404 _bm(bm), _region_marked_bytes(0) { } 1405 1406 bool doHeapRegion(HeapRegion* hr) { 1407 1408 if (hr->continuesHumongous()) { 1409 // We will ignore these here and process them when their 1410 // associated "starts humongous" region is processed (see 1411 // set_bit_for_heap_region()). Note that we cannot rely on their 1412 // associated "starts humongous" region to have their bit set to 1413 // 1 since, due to the region chunking in the parallel region 1414 // iteration, a "continues humongous" region might be visited 1415 // before its associated "starts humongous". 1416 return false; 1417 } 1418 1419 HeapWord* ntams = hr->next_top_at_mark_start(); 1420 HeapWord* start = hr->bottom(); 1421 1422 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1423 err_msg("Preconditions not met - " 1424 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1425 start, ntams, hr->end())); 1426 1427 // Find the first marked object at or after "start". 1428 start = _bm->getNextMarkedWordAddress(start, ntams); 1429 1430 size_t marked_bytes = 0; 1431 1432 while (start < ntams) { 1433 oop obj = oop(start); 1434 int obj_sz = obj->size(); 1435 HeapWord* obj_end = start + obj_sz; 1436 1437 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1438 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1439 1440 // Note: if we're looking at the last region in heap - obj_end 1441 // could be actually just beyond the end of the heap; end_idx 1442 // will then correspond to a (non-existent) card that is also 1443 // just beyond the heap. 1444 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1445 // end of object is not card aligned - increment to cover 1446 // all the cards spanned by the object 1447 end_idx += 1; 1448 } 1449 1450 // Set the bits in the card BM for the cards spanned by this object. 1451 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1452 1453 // Add the size of this object to the number of marked bytes. 1454 marked_bytes += (size_t)obj_sz * HeapWordSize; 1455 1456 // Find the next marked object after this one. 1457 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1458 } 1459 1460 // Mark the allocated-since-marking portion... 1461 HeapWord* top = hr->top(); 1462 if (ntams < top) { 1463 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1464 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1465 1466 // Note: if we're looking at the last region in heap - top 1467 // could be actually just beyond the end of the heap; end_idx 1468 // will then correspond to a (non-existent) card that is also 1469 // just beyond the heap. 1470 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1471 // end of object is not card aligned - increment to cover 1472 // all the cards spanned by the object 1473 end_idx += 1; 1474 } 1475 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1476 1477 // This definitely means the region has live objects. 1478 set_bit_for_region(hr); 1479 } 1480 1481 // Update the live region bitmap. 1482 if (marked_bytes > 0) { 1483 set_bit_for_region(hr); 1484 } 1485 1486 // Set the marked bytes for the current region so that 1487 // it can be queried by a calling verification routine 1488 _region_marked_bytes = marked_bytes; 1489 1490 return false; 1491 } 1492 1493 size_t region_marked_bytes() const { return _region_marked_bytes; } 1494 }; 1495 1496 // Heap region closure used for verifying the counting data 1497 // that was accumulated concurrently and aggregated during 1498 // the remark pause. This closure is applied to the heap 1499 // regions during the STW cleanup pause. 1500 1501 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1502 G1CollectedHeap* _g1h; 1503 ConcurrentMark* _cm; 1504 CalcLiveObjectsClosure _calc_cl; 1505 BitMap* _region_bm; // Region BM to be verified 1506 BitMap* _card_bm; // Card BM to be verified 1507 bool _verbose; // verbose output? 1508 1509 BitMap* _exp_region_bm; // Expected Region BM values 1510 BitMap* _exp_card_bm; // Expected card BM values 1511 1512 int _failures; 1513 1514 public: 1515 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1516 BitMap* region_bm, 1517 BitMap* card_bm, 1518 BitMap* exp_region_bm, 1519 BitMap* exp_card_bm, 1520 bool verbose) : 1521 _g1h(g1h), _cm(g1h->concurrent_mark()), 1522 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1523 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1524 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1525 _failures(0) { } 1526 1527 int failures() const { return _failures; } 1528 1529 bool doHeapRegion(HeapRegion* hr) { 1530 if (hr->continuesHumongous()) { 1531 // We will ignore these here and process them when their 1532 // associated "starts humongous" region is processed (see 1533 // set_bit_for_heap_region()). Note that we cannot rely on their 1534 // associated "starts humongous" region to have their bit set to 1535 // 1 since, due to the region chunking in the parallel region 1536 // iteration, a "continues humongous" region might be visited 1537 // before its associated "starts humongous". 1538 return false; 1539 } 1540 1541 int failures = 0; 1542 1543 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1544 // this region and set the corresponding bits in the expected region 1545 // and card bitmaps. 1546 bool res = _calc_cl.doHeapRegion(hr); 1547 assert(res == false, "should be continuing"); 1548 1549 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1550 Mutex::_no_safepoint_check_flag); 1551 1552 // Verify the marked bytes for this region. 1553 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1554 size_t act_marked_bytes = hr->next_marked_bytes(); 1555 1556 // We're not OK if expected marked bytes > actual marked bytes. It means 1557 // we have missed accounting some objects during the actual marking. 1558 if (exp_marked_bytes > act_marked_bytes) { 1559 if (_verbose) { 1560 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1561 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1562 hr->hrs_index(), exp_marked_bytes, act_marked_bytes); 1563 } 1564 failures += 1; 1565 } 1566 1567 // Verify the bit, for this region, in the actual and expected 1568 // (which was just calculated) region bit maps. 1569 // We're not OK if the bit in the calculated expected region 1570 // bitmap is set and the bit in the actual region bitmap is not. 1571 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); 1572 1573 bool expected = _exp_region_bm->at(index); 1574 bool actual = _region_bm->at(index); 1575 if (expected && !actual) { 1576 if (_verbose) { 1577 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1578 "expected: %s, actual: %s", 1579 hr->hrs_index(), 1580 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1581 } 1582 failures += 1; 1583 } 1584 1585 // Verify that the card bit maps for the cards spanned by the current 1586 // region match. We have an error if we have a set bit in the expected 1587 // bit map and the corresponding bit in the actual bitmap is not set. 1588 1589 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1590 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1591 1592 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1593 expected = _exp_card_bm->at(i); 1594 actual = _card_bm->at(i); 1595 1596 if (expected && !actual) { 1597 if (_verbose) { 1598 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1599 "expected: %s, actual: %s", 1600 hr->hrs_index(), i, 1601 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1602 } 1603 failures += 1; 1604 } 1605 } 1606 1607 if (failures > 0 && _verbose) { 1608 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1609 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1610 HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start(), 1611 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1612 } 1613 1614 _failures += failures; 1615 1616 // We could stop iteration over the heap when we 1617 // find the first violating region by returning true. 1618 return false; 1619 } 1620 }; 1621 1622 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1623 protected: 1624 G1CollectedHeap* _g1h; 1625 ConcurrentMark* _cm; 1626 BitMap* _actual_region_bm; 1627 BitMap* _actual_card_bm; 1628 1629 uint _n_workers; 1630 1631 BitMap* _expected_region_bm; 1632 BitMap* _expected_card_bm; 1633 1634 int _failures; 1635 bool _verbose; 1636 1637 public: 1638 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1639 BitMap* region_bm, BitMap* card_bm, 1640 BitMap* expected_region_bm, BitMap* expected_card_bm) 1641 : AbstractGangTask("G1 verify final counting"), 1642 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1643 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1644 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1645 _failures(0), _verbose(false), 1646 _n_workers(0) { 1647 assert(VerifyDuringGC, "don't call this otherwise"); 1648 1649 // Use the value already set as the number of active threads 1650 // in the call to run_task(). 1651 if (G1CollectedHeap::use_parallel_gc_threads()) { 1652 assert( _g1h->workers()->active_workers() > 0, 1653 "Should have been previously set"); 1654 _n_workers = _g1h->workers()->active_workers(); 1655 } else { 1656 _n_workers = 1; 1657 } 1658 1659 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1660 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1661 1662 _verbose = _cm->verbose_medium(); 1663 } 1664 1665 void work(uint worker_id) { 1666 assert(worker_id < _n_workers, "invariant"); 1667 1668 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1669 _actual_region_bm, _actual_card_bm, 1670 _expected_region_bm, 1671 _expected_card_bm, 1672 _verbose); 1673 1674 if (G1CollectedHeap::use_parallel_gc_threads()) { 1675 _g1h->heap_region_par_iterate_chunked(&verify_cl, 1676 worker_id, 1677 _n_workers, 1678 HeapRegion::VerifyCountClaimValue); 1679 } else { 1680 _g1h->heap_region_iterate(&verify_cl); 1681 } 1682 1683 Atomic::add(verify_cl.failures(), &_failures); 1684 } 1685 1686 int failures() const { return _failures; } 1687 }; 1688 1689 // Closure that finalizes the liveness counting data. 1690 // Used during the cleanup pause. 1691 // Sets the bits corresponding to the interval [NTAMS, top] 1692 // (which contains the implicitly live objects) in the 1693 // card liveness bitmap. Also sets the bit for each region, 1694 // containing live data, in the region liveness bitmap. 1695 1696 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1697 public: 1698 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1699 BitMap* region_bm, 1700 BitMap* card_bm) : 1701 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1702 1703 bool doHeapRegion(HeapRegion* hr) { 1704 1705 if (hr->continuesHumongous()) { 1706 // We will ignore these here and process them when their 1707 // associated "starts humongous" region is processed (see 1708 // set_bit_for_heap_region()). Note that we cannot rely on their 1709 // associated "starts humongous" region to have their bit set to 1710 // 1 since, due to the region chunking in the parallel region 1711 // iteration, a "continues humongous" region might be visited 1712 // before its associated "starts humongous". 1713 return false; 1714 } 1715 1716 HeapWord* ntams = hr->next_top_at_mark_start(); 1717 HeapWord* top = hr->top(); 1718 1719 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1720 1721 // Mark the allocated-since-marking portion... 1722 if (ntams < top) { 1723 // This definitely means the region has live objects. 1724 set_bit_for_region(hr); 1725 1726 // Now set the bits in the card bitmap for [ntams, top) 1727 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1728 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1729 1730 // Note: if we're looking at the last region in heap - top 1731 // could be actually just beyond the end of the heap; end_idx 1732 // will then correspond to a (non-existent) card that is also 1733 // just beyond the heap. 1734 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1735 // end of object is not card aligned - increment to cover 1736 // all the cards spanned by the object 1737 end_idx += 1; 1738 } 1739 1740 assert(end_idx <= _card_bm->size(), 1741 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1742 end_idx, _card_bm->size())); 1743 assert(start_idx < _card_bm->size(), 1744 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1745 start_idx, _card_bm->size())); 1746 1747 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1748 } 1749 1750 // Set the bit for the region if it contains live data 1751 if (hr->next_marked_bytes() > 0) { 1752 set_bit_for_region(hr); 1753 } 1754 1755 return false; 1756 } 1757 }; 1758 1759 class G1ParFinalCountTask: public AbstractGangTask { 1760 protected: 1761 G1CollectedHeap* _g1h; 1762 ConcurrentMark* _cm; 1763 BitMap* _actual_region_bm; 1764 BitMap* _actual_card_bm; 1765 1766 uint _n_workers; 1767 1768 public: 1769 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1770 : AbstractGangTask("G1 final counting"), 1771 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1772 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1773 _n_workers(0) { 1774 // Use the value already set as the number of active threads 1775 // in the call to run_task(). 1776 if (G1CollectedHeap::use_parallel_gc_threads()) { 1777 assert( _g1h->workers()->active_workers() > 0, 1778 "Should have been previously set"); 1779 _n_workers = _g1h->workers()->active_workers(); 1780 } else { 1781 _n_workers = 1; 1782 } 1783 } 1784 1785 void work(uint worker_id) { 1786 assert(worker_id < _n_workers, "invariant"); 1787 1788 FinalCountDataUpdateClosure final_update_cl(_g1h, 1789 _actual_region_bm, 1790 _actual_card_bm); 1791 1792 if (G1CollectedHeap::use_parallel_gc_threads()) { 1793 _g1h->heap_region_par_iterate_chunked(&final_update_cl, 1794 worker_id, 1795 _n_workers, 1796 HeapRegion::FinalCountClaimValue); 1797 } else { 1798 _g1h->heap_region_iterate(&final_update_cl); 1799 } 1800 } 1801 }; 1802 1803 class G1ParNoteEndTask; 1804 1805 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1806 G1CollectedHeap* _g1; 1807 int _worker_num; 1808 size_t _max_live_bytes; 1809 uint _regions_claimed; 1810 size_t _freed_bytes; 1811 FreeRegionList* _local_cleanup_list; 1812 OldRegionSet* _old_proxy_set; 1813 HumongousRegionSet* _humongous_proxy_set; 1814 HRRSCleanupTask* _hrrs_cleanup_task; 1815 double _claimed_region_time; 1816 double _max_region_time; 1817 1818 public: 1819 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1820 int worker_num, 1821 FreeRegionList* local_cleanup_list, 1822 OldRegionSet* old_proxy_set, 1823 HumongousRegionSet* humongous_proxy_set, 1824 HRRSCleanupTask* hrrs_cleanup_task) : 1825 _g1(g1), _worker_num(worker_num), 1826 _max_live_bytes(0), _regions_claimed(0), 1827 _freed_bytes(0), 1828 _claimed_region_time(0.0), _max_region_time(0.0), 1829 _local_cleanup_list(local_cleanup_list), 1830 _old_proxy_set(old_proxy_set), 1831 _humongous_proxy_set(humongous_proxy_set), 1832 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1833 1834 size_t freed_bytes() { return _freed_bytes; } 1835 1836 bool doHeapRegion(HeapRegion *hr) { 1837 if (hr->continuesHumongous()) { 1838 return false; 1839 } 1840 // We use a claim value of zero here because all regions 1841 // were claimed with value 1 in the FinalCount task. 1842 _g1->reset_gc_time_stamps(hr); 1843 double start = os::elapsedTime(); 1844 _regions_claimed++; 1845 hr->note_end_of_marking(); 1846 _max_live_bytes += hr->max_live_bytes(); 1847 _g1->free_region_if_empty(hr, 1848 &_freed_bytes, 1849 _local_cleanup_list, 1850 _old_proxy_set, 1851 _humongous_proxy_set, 1852 _hrrs_cleanup_task, 1853 true /* par */); 1854 double region_time = (os::elapsedTime() - start); 1855 _claimed_region_time += region_time; 1856 if (region_time > _max_region_time) { 1857 _max_region_time = region_time; 1858 } 1859 return false; 1860 } 1861 1862 size_t max_live_bytes() { return _max_live_bytes; } 1863 uint regions_claimed() { return _regions_claimed; } 1864 double claimed_region_time_sec() { return _claimed_region_time; } 1865 double max_region_time_sec() { return _max_region_time; } 1866 }; 1867 1868 class G1ParNoteEndTask: public AbstractGangTask { 1869 friend class G1NoteEndOfConcMarkClosure; 1870 1871 protected: 1872 G1CollectedHeap* _g1h; 1873 size_t _max_live_bytes; 1874 size_t _freed_bytes; 1875 FreeRegionList* _cleanup_list; 1876 1877 public: 1878 G1ParNoteEndTask(G1CollectedHeap* g1h, 1879 FreeRegionList* cleanup_list) : 1880 AbstractGangTask("G1 note end"), _g1h(g1h), 1881 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { } 1882 1883 void work(uint worker_id) { 1884 double start = os::elapsedTime(); 1885 FreeRegionList local_cleanup_list("Local Cleanup List"); 1886 OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set"); 1887 HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set"); 1888 HRRSCleanupTask hrrs_cleanup_task; 1889 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list, 1890 &old_proxy_set, 1891 &humongous_proxy_set, 1892 &hrrs_cleanup_task); 1893 if (G1CollectedHeap::use_parallel_gc_threads()) { 1894 _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id, 1895 _g1h->workers()->active_workers(), 1896 HeapRegion::NoteEndClaimValue); 1897 } else { 1898 _g1h->heap_region_iterate(&g1_note_end); 1899 } 1900 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1901 1902 // Now update the lists 1903 _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(), 1904 NULL /* free_list */, 1905 &old_proxy_set, 1906 &humongous_proxy_set, 1907 true /* par */); 1908 { 1909 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1910 _max_live_bytes += g1_note_end.max_live_bytes(); 1911 _freed_bytes += g1_note_end.freed_bytes(); 1912 1913 // If we iterate over the global cleanup list at the end of 1914 // cleanup to do this printing we will not guarantee to only 1915 // generate output for the newly-reclaimed regions (the list 1916 // might not be empty at the beginning of cleanup; we might 1917 // still be working on its previous contents). So we do the 1918 // printing here, before we append the new regions to the global 1919 // cleanup list. 1920 1921 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1922 if (hr_printer->is_active()) { 1923 HeapRegionLinkedListIterator iter(&local_cleanup_list); 1924 while (iter.more_available()) { 1925 HeapRegion* hr = iter.get_next(); 1926 hr_printer->cleanup(hr); 1927 } 1928 } 1929 1930 _cleanup_list->add_as_tail(&local_cleanup_list); 1931 assert(local_cleanup_list.is_empty(), "post-condition"); 1932 1933 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1934 } 1935 } 1936 size_t max_live_bytes() { return _max_live_bytes; } 1937 size_t freed_bytes() { return _freed_bytes; } 1938 }; 1939 1940 class G1ParScrubRemSetTask: public AbstractGangTask { 1941 protected: 1942 G1RemSet* _g1rs; 1943 BitMap* _region_bm; 1944 BitMap* _card_bm; 1945 public: 1946 G1ParScrubRemSetTask(G1CollectedHeap* g1h, 1947 BitMap* region_bm, BitMap* card_bm) : 1948 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), 1949 _region_bm(region_bm), _card_bm(card_bm) { } 1950 1951 void work(uint worker_id) { 1952 if (G1CollectedHeap::use_parallel_gc_threads()) { 1953 _g1rs->scrub_par(_region_bm, _card_bm, worker_id, 1954 HeapRegion::ScrubRemSetClaimValue); 1955 } else { 1956 _g1rs->scrub(_region_bm, _card_bm); 1957 } 1958 } 1959 1960 }; 1961 1962 void ConcurrentMark::cleanup() { 1963 // world is stopped at this checkpoint 1964 assert(SafepointSynchronize::is_at_safepoint(), 1965 "world should be stopped"); 1966 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1967 1968 // If a full collection has happened, we shouldn't do this. 1969 if (has_aborted()) { 1970 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1971 return; 1972 } 1973 1974 HRSPhaseSetter x(HRSPhaseCleanup); 1975 g1h->verify_region_sets_optional(); 1976 1977 if (VerifyDuringGC) { 1978 HandleMark hm; // handle scope 1979 Universe::heap()->prepare_for_verify(); 1980 Universe::verify(VerifyOption_G1UsePrevMarking, 1981 " VerifyDuringGC:(before)"); 1982 } 1983 1984 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); 1985 g1p->record_concurrent_mark_cleanup_start(); 1986 1987 double start = os::elapsedTime(); 1988 1989 HeapRegionRemSet::reset_for_cleanup_tasks(); 1990 1991 uint n_workers; 1992 1993 // Do counting once more with the world stopped for good measure. 1994 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 1995 1996 if (G1CollectedHeap::use_parallel_gc_threads()) { 1997 assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 1998 "sanity check"); 1999 2000 g1h->set_par_threads(); 2001 n_workers = g1h->n_par_threads(); 2002 assert(g1h->n_par_threads() == n_workers, 2003 "Should not have been reset"); 2004 g1h->workers()->run_task(&g1_par_count_task); 2005 // Done with the parallel phase so reset to 0. 2006 g1h->set_par_threads(0); 2007 2008 assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue), 2009 "sanity check"); 2010 } else { 2011 n_workers = 1; 2012 g1_par_count_task.work(0); 2013 } 2014 2015 if (VerifyDuringGC) { 2016 // Verify that the counting data accumulated during marking matches 2017 // that calculated by walking the marking bitmap. 2018 2019 // Bitmaps to hold expected values 2020 BitMap expected_region_bm(_region_bm.size(), false); 2021 BitMap expected_card_bm(_card_bm.size(), false); 2022 2023 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 2024 &_region_bm, 2025 &_card_bm, 2026 &expected_region_bm, 2027 &expected_card_bm); 2028 2029 if (G1CollectedHeap::use_parallel_gc_threads()) { 2030 g1h->set_par_threads((int)n_workers); 2031 g1h->workers()->run_task(&g1_par_verify_task); 2032 // Done with the parallel phase so reset to 0. 2033 g1h->set_par_threads(0); 2034 2035 assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue), 2036 "sanity check"); 2037 } else { 2038 g1_par_verify_task.work(0); 2039 } 2040 2041 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 2042 } 2043 2044 size_t start_used_bytes = g1h->used(); 2045 g1h->set_marking_complete(); 2046 2047 double count_end = os::elapsedTime(); 2048 double this_final_counting_time = (count_end - start); 2049 _total_counting_time += this_final_counting_time; 2050 2051 if (G1PrintRegionLivenessInfo) { 2052 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 2053 _g1h->heap_region_iterate(&cl); 2054 } 2055 2056 // Install newly created mark bitMap as "prev". 2057 swapMarkBitMaps(); 2058 2059 g1h->reset_gc_time_stamp(); 2060 2061 // Note end of marking in all heap regions. 2062 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list); 2063 if (G1CollectedHeap::use_parallel_gc_threads()) { 2064 g1h->set_par_threads((int)n_workers); 2065 g1h->workers()->run_task(&g1_par_note_end_task); 2066 g1h->set_par_threads(0); 2067 2068 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue), 2069 "sanity check"); 2070 } else { 2071 g1_par_note_end_task.work(0); 2072 } 2073 g1h->check_gc_time_stamps(); 2074 2075 if (!cleanup_list_is_empty()) { 2076 // The cleanup list is not empty, so we'll have to process it 2077 // concurrently. Notify anyone else that might be wanting free 2078 // regions that there will be more free regions coming soon. 2079 g1h->set_free_regions_coming(); 2080 } 2081 2082 // call below, since it affects the metric by which we sort the heap 2083 // regions. 2084 if (G1ScrubRemSets) { 2085 double rs_scrub_start = os::elapsedTime(); 2086 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm); 2087 if (G1CollectedHeap::use_parallel_gc_threads()) { 2088 g1h->set_par_threads((int)n_workers); 2089 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2090 g1h->set_par_threads(0); 2091 2092 assert(g1h->check_heap_region_claim_values( 2093 HeapRegion::ScrubRemSetClaimValue), 2094 "sanity check"); 2095 } else { 2096 g1_par_scrub_rs_task.work(0); 2097 } 2098 2099 double rs_scrub_end = os::elapsedTime(); 2100 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2101 _total_rs_scrub_time += this_rs_scrub_time; 2102 } 2103 2104 // this will also free any regions totally full of garbage objects, 2105 // and sort the regions. 2106 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2107 2108 // Statistics. 2109 double end = os::elapsedTime(); 2110 _cleanup_times.add((end - start) * 1000.0); 2111 2112 if (G1Log::fine()) { 2113 g1h->print_size_transition(gclog_or_tty, 2114 start_used_bytes, 2115 g1h->used(), 2116 g1h->capacity()); 2117 } 2118 2119 // Clean up will have freed any regions completely full of garbage. 2120 // Update the soft reference policy with the new heap occupancy. 2121 Universe::update_heap_info_at_gc(); 2122 2123 // We need to make this be a "collection" so any collection pause that 2124 // races with it goes around and waits for completeCleanup to finish. 2125 g1h->increment_total_collections(); 2126 2127 // We reclaimed old regions so we should calculate the sizes to make 2128 // sure we update the old gen/space data. 2129 g1h->g1mm()->update_sizes(); 2130 2131 if (VerifyDuringGC) { 2132 HandleMark hm; // handle scope 2133 Universe::heap()->prepare_for_verify(); 2134 Universe::verify(VerifyOption_G1UsePrevMarking, 2135 " VerifyDuringGC:(after)"); 2136 } 2137 2138 g1h->verify_region_sets_optional(); 2139 g1h->trace_heap_after_concurrent_cycle(); 2140 } 2141 2142 void ConcurrentMark::completeCleanup() { 2143 if (has_aborted()) return; 2144 2145 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2146 2147 _cleanup_list.verify_optional(); 2148 FreeRegionList tmp_free_list("Tmp Free List"); 2149 2150 if (G1ConcRegionFreeingVerbose) { 2151 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2152 "cleanup list has %u entries", 2153 _cleanup_list.length()); 2154 } 2155 2156 // Noone else should be accessing the _cleanup_list at this point, 2157 // so it's not necessary to take any locks 2158 while (!_cleanup_list.is_empty()) { 2159 HeapRegion* hr = _cleanup_list.remove_head(); 2160 assert(hr != NULL, "the list was not empty"); 2161 hr->par_clear(); 2162 tmp_free_list.add_as_tail(hr); 2163 2164 // Instead of adding one region at a time to the secondary_free_list, 2165 // we accumulate them in the local list and move them a few at a 2166 // time. This also cuts down on the number of notify_all() calls 2167 // we do during this process. We'll also append the local list when 2168 // _cleanup_list is empty (which means we just removed the last 2169 // region from the _cleanup_list). 2170 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2171 _cleanup_list.is_empty()) { 2172 if (G1ConcRegionFreeingVerbose) { 2173 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2174 "appending %u entries to the secondary_free_list, " 2175 "cleanup list still has %u entries", 2176 tmp_free_list.length(), 2177 _cleanup_list.length()); 2178 } 2179 2180 { 2181 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2182 g1h->secondary_free_list_add_as_tail(&tmp_free_list); 2183 SecondaryFreeList_lock->notify_all(); 2184 } 2185 2186 if (G1StressConcRegionFreeing) { 2187 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2188 os::sleep(Thread::current(), (jlong) 1, false); 2189 } 2190 } 2191 } 2192 } 2193 assert(tmp_free_list.is_empty(), "post-condition"); 2194 } 2195 2196 // Supporting Object and Oop closures for reference discovery 2197 // and processing in during marking 2198 2199 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2200 HeapWord* addr = (HeapWord*)obj; 2201 return addr != NULL && 2202 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2203 } 2204 2205 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2206 // Uses the CMTask associated with a worker thread (for serial reference 2207 // processing the CMTask for worker 0 is used) to preserve (mark) and 2208 // trace referent objects. 2209 // 2210 // Using the CMTask and embedded local queues avoids having the worker 2211 // threads operating on the global mark stack. This reduces the risk 2212 // of overflowing the stack - which we would rather avoid at this late 2213 // state. Also using the tasks' local queues removes the potential 2214 // of the workers interfering with each other that could occur if 2215 // operating on the global stack. 2216 2217 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2218 ConcurrentMark* _cm; 2219 CMTask* _task; 2220 int _ref_counter_limit; 2221 int _ref_counter; 2222 bool _is_serial; 2223 public: 2224 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2225 _cm(cm), _task(task), _is_serial(is_serial), 2226 _ref_counter_limit(G1RefProcDrainInterval) { 2227 assert(_ref_counter_limit > 0, "sanity"); 2228 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2229 _ref_counter = _ref_counter_limit; 2230 } 2231 2232 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2233 virtual void do_oop( oop* p) { do_oop_work(p); } 2234 2235 template <class T> void do_oop_work(T* p) { 2236 if (!_cm->has_overflown()) { 2237 oop obj = oopDesc::load_decode_heap_oop(p); 2238 if (_cm->verbose_high()) { 2239 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2240 "*"PTR_FORMAT" = "PTR_FORMAT, 2241 _task->worker_id(), p, (void*) obj); 2242 } 2243 2244 _task->deal_with_reference(obj); 2245 _ref_counter--; 2246 2247 if (_ref_counter == 0) { 2248 // We have dealt with _ref_counter_limit references, pushing them 2249 // and objects reachable from them on to the local stack (and 2250 // possibly the global stack). Call CMTask::do_marking_step() to 2251 // process these entries. 2252 // 2253 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2254 // there's nothing more to do (i.e. we're done with the entries that 2255 // were pushed as a result of the CMTask::deal_with_reference() calls 2256 // above) or we overflow. 2257 // 2258 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2259 // flag while there may still be some work to do. (See the comment at 2260 // the beginning of CMTask::do_marking_step() for those conditions - 2261 // one of which is reaching the specified time target.) It is only 2262 // when CMTask::do_marking_step() returns without setting the 2263 // has_aborted() flag that the marking step has completed. 2264 do { 2265 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2266 _task->do_marking_step(mark_step_duration_ms, 2267 false /* do_termination */, 2268 _is_serial); 2269 } while (_task->has_aborted() && !_cm->has_overflown()); 2270 _ref_counter = _ref_counter_limit; 2271 } 2272 } else { 2273 if (_cm->verbose_high()) { 2274 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2275 } 2276 } 2277 } 2278 }; 2279 2280 // 'Drain' oop closure used by both serial and parallel reference processing. 2281 // Uses the CMTask associated with a given worker thread (for serial 2282 // reference processing the CMtask for worker 0 is used). Calls the 2283 // do_marking_step routine, with an unbelievably large timeout value, 2284 // to drain the marking data structures of the remaining entries 2285 // added by the 'keep alive' oop closure above. 2286 2287 class G1CMDrainMarkingStackClosure: public VoidClosure { 2288 ConcurrentMark* _cm; 2289 CMTask* _task; 2290 bool _is_serial; 2291 public: 2292 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2293 _cm(cm), _task(task), _is_serial(is_serial) { 2294 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2295 } 2296 2297 void do_void() { 2298 do { 2299 if (_cm->verbose_high()) { 2300 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2301 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2302 } 2303 2304 // We call CMTask::do_marking_step() to completely drain the local 2305 // and global marking stacks of entries pushed by the 'keep alive' 2306 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2307 // 2308 // CMTask::do_marking_step() is called in a loop, which we'll exit 2309 // if there's nothing more to do (i.e. we've completely drained the 2310 // entries that were pushed as a a result of applying the 'keep alive' 2311 // closure to the entries on the discovered ref lists) or we overflow 2312 // the global marking stack. 2313 // 2314 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2315 // flag while there may still be some work to do. (See the comment at 2316 // the beginning of CMTask::do_marking_step() for those conditions - 2317 // one of which is reaching the specified time target.) It is only 2318 // when CMTask::do_marking_step() returns without setting the 2319 // has_aborted() flag that the marking step has completed. 2320 2321 _task->do_marking_step(1000000000.0 /* something very large */, 2322 true /* do_termination */, 2323 _is_serial); 2324 } while (_task->has_aborted() && !_cm->has_overflown()); 2325 } 2326 }; 2327 2328 // Implementation of AbstractRefProcTaskExecutor for parallel 2329 // reference processing at the end of G1 concurrent marking 2330 2331 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2332 private: 2333 G1CollectedHeap* _g1h; 2334 ConcurrentMark* _cm; 2335 WorkGang* _workers; 2336 int _active_workers; 2337 2338 public: 2339 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2340 ConcurrentMark* cm, 2341 WorkGang* workers, 2342 int n_workers) : 2343 _g1h(g1h), _cm(cm), 2344 _workers(workers), _active_workers(n_workers) { } 2345 2346 // Executes the given task using concurrent marking worker threads. 2347 virtual void execute(ProcessTask& task); 2348 virtual void execute(EnqueueTask& task); 2349 }; 2350 2351 class G1CMRefProcTaskProxy: public AbstractGangTask { 2352 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2353 ProcessTask& _proc_task; 2354 G1CollectedHeap* _g1h; 2355 ConcurrentMark* _cm; 2356 2357 public: 2358 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2359 G1CollectedHeap* g1h, 2360 ConcurrentMark* cm) : 2361 AbstractGangTask("Process reference objects in parallel"), 2362 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2363 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2364 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2365 } 2366 2367 virtual void work(uint worker_id) { 2368 CMTask* task = _cm->task(worker_id); 2369 G1CMIsAliveClosure g1_is_alive(_g1h); 2370 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2371 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2372 2373 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2374 } 2375 }; 2376 2377 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2378 assert(_workers != NULL, "Need parallel worker threads."); 2379 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2380 2381 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2382 2383 // We need to reset the concurrency level before each 2384 // proxy task execution, so that the termination protocol 2385 // and overflow handling in CMTask::do_marking_step() knows 2386 // how many workers to wait for. 2387 _cm->set_concurrency(_active_workers); 2388 _g1h->set_par_threads(_active_workers); 2389 _workers->run_task(&proc_task_proxy); 2390 _g1h->set_par_threads(0); 2391 } 2392 2393 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2394 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2395 EnqueueTask& _enq_task; 2396 2397 public: 2398 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2399 AbstractGangTask("Enqueue reference objects in parallel"), 2400 _enq_task(enq_task) { } 2401 2402 virtual void work(uint worker_id) { 2403 _enq_task.work(worker_id); 2404 } 2405 }; 2406 2407 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2408 assert(_workers != NULL, "Need parallel worker threads."); 2409 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2410 2411 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2412 2413 // Not strictly necessary but... 2414 // 2415 // We need to reset the concurrency level before each 2416 // proxy task execution, so that the termination protocol 2417 // and overflow handling in CMTask::do_marking_step() knows 2418 // how many workers to wait for. 2419 _cm->set_concurrency(_active_workers); 2420 _g1h->set_par_threads(_active_workers); 2421 _workers->run_task(&enq_task_proxy); 2422 _g1h->set_par_threads(0); 2423 } 2424 2425 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2426 if (has_overflown()) { 2427 // Skip processing the discovered references if we have 2428 // overflown the global marking stack. Reference objects 2429 // only get discovered once so it is OK to not 2430 // de-populate the discovered reference lists. We could have, 2431 // but the only benefit would be that, when marking restarts, 2432 // less reference objects are discovered. 2433 return; 2434 } 2435 2436 ResourceMark rm; 2437 HandleMark hm; 2438 2439 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2440 2441 // Is alive closure. 2442 G1CMIsAliveClosure g1_is_alive(g1h); 2443 2444 // Inner scope to exclude the cleaning of the string and symbol 2445 // tables from the displayed time. 2446 { 2447 if (G1Log::finer()) { 2448 gclog_or_tty->put(' '); 2449 } 2450 GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm()); 2451 2452 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2453 2454 // See the comment in G1CollectedHeap::ref_processing_init() 2455 // about how reference processing currently works in G1. 2456 2457 // Set the soft reference policy 2458 rp->setup_policy(clear_all_soft_refs); 2459 assert(_markStack.isEmpty(), "mark stack should be empty"); 2460 2461 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2462 // in serial reference processing. Note these closures are also 2463 // used for serially processing (by the the current thread) the 2464 // JNI references during parallel reference processing. 2465 // 2466 // These closures do not need to synchronize with the worker 2467 // threads involved in parallel reference processing as these 2468 // instances are executed serially by the current thread (e.g. 2469 // reference processing is not multi-threaded and is thus 2470 // performed by the current thread instead of a gang worker). 2471 // 2472 // The gang tasks involved in parallel reference processing create 2473 // their own instances of these closures, which do their own 2474 // synchronization among themselves. 2475 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2476 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2477 2478 // We need at least one active thread. If reference processing 2479 // is not multi-threaded we use the current (VMThread) thread, 2480 // otherwise we use the work gang from the G1CollectedHeap and 2481 // we utilize all the worker threads we can. 2482 bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL; 2483 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2484 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2485 2486 // Parallel processing task executor. 2487 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2488 g1h->workers(), active_workers); 2489 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2490 2491 // Set the concurrency level. The phase was already set prior to 2492 // executing the remark task. 2493 set_concurrency(active_workers); 2494 2495 // Set the degree of MT processing here. If the discovery was done MT, 2496 // the number of threads involved during discovery could differ from 2497 // the number of active workers. This is OK as long as the discovered 2498 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2499 rp->set_active_mt_degree(active_workers); 2500 2501 // Process the weak references. 2502 const ReferenceProcessorStats& stats = 2503 rp->process_discovered_references(&g1_is_alive, 2504 &g1_keep_alive, 2505 &g1_drain_mark_stack, 2506 executor, 2507 g1h->gc_timer_cm()); 2508 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2509 2510 // The do_oop work routines of the keep_alive and drain_marking_stack 2511 // oop closures will set the has_overflown flag if we overflow the 2512 // global marking stack. 2513 2514 assert(_markStack.overflow() || _markStack.isEmpty(), 2515 "mark stack should be empty (unless it overflowed)"); 2516 2517 if (_markStack.overflow()) { 2518 // This should have been done already when we tried to push an 2519 // entry on to the global mark stack. But let's do it again. 2520 set_has_overflown(); 2521 } 2522 2523 assert(rp->num_q() == active_workers, "why not"); 2524 2525 rp->enqueue_discovered_references(executor); 2526 2527 rp->verify_no_references_recorded(); 2528 assert(!rp->discovery_enabled(), "Post condition"); 2529 } 2530 2531 g1h->unlink_string_and_symbol_table(&g1_is_alive, 2532 /* process_strings */ false, // currently strings are always roots 2533 /* process_symbols */ true); 2534 } 2535 2536 void ConcurrentMark::swapMarkBitMaps() { 2537 CMBitMapRO* temp = _prevMarkBitMap; 2538 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2539 _nextMarkBitMap = (CMBitMap*) temp; 2540 } 2541 2542 class CMRemarkTask: public AbstractGangTask { 2543 private: 2544 ConcurrentMark* _cm; 2545 bool _is_serial; 2546 public: 2547 void work(uint worker_id) { 2548 // Since all available tasks are actually started, we should 2549 // only proceed if we're supposed to be active. 2550 if (worker_id < _cm->active_tasks()) { 2551 CMTask* task = _cm->task(worker_id); 2552 task->record_start_time(); 2553 do { 2554 task->do_marking_step(1000000000.0 /* something very large */, 2555 true /* do_termination */, 2556 _is_serial); 2557 } while (task->has_aborted() && !_cm->has_overflown()); 2558 // If we overflow, then we do not want to restart. We instead 2559 // want to abort remark and do concurrent marking again. 2560 task->record_end_time(); 2561 } 2562 } 2563 2564 CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) : 2565 AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) { 2566 _cm->terminator()->reset_for_reuse(active_workers); 2567 } 2568 }; 2569 2570 void ConcurrentMark::checkpointRootsFinalWork() { 2571 ResourceMark rm; 2572 HandleMark hm; 2573 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2574 2575 g1h->ensure_parsability(false); 2576 2577 if (G1CollectedHeap::use_parallel_gc_threads()) { 2578 G1CollectedHeap::StrongRootsScope srs(g1h); 2579 // this is remark, so we'll use up all active threads 2580 uint active_workers = g1h->workers()->active_workers(); 2581 if (active_workers == 0) { 2582 assert(active_workers > 0, "Should have been set earlier"); 2583 active_workers = (uint) ParallelGCThreads; 2584 g1h->workers()->set_active_workers(active_workers); 2585 } 2586 set_concurrency_and_phase(active_workers, false /* concurrent */); 2587 // Leave _parallel_marking_threads at it's 2588 // value originally calculated in the ConcurrentMark 2589 // constructor and pass values of the active workers 2590 // through the gang in the task. 2591 2592 CMRemarkTask remarkTask(this, active_workers, false /* is_serial */); 2593 // We will start all available threads, even if we decide that the 2594 // active_workers will be fewer. The extra ones will just bail out 2595 // immediately. 2596 g1h->set_par_threads(active_workers); 2597 g1h->workers()->run_task(&remarkTask); 2598 g1h->set_par_threads(0); 2599 } else { 2600 G1CollectedHeap::StrongRootsScope srs(g1h); 2601 uint active_workers = 1; 2602 set_concurrency_and_phase(active_workers, false /* concurrent */); 2603 2604 // Note - if there's no work gang then the VMThread will be 2605 // the thread to execute the remark - serially. We have 2606 // to pass true for the is_serial parameter so that 2607 // CMTask::do_marking_step() doesn't enter the sync 2608 // barriers in the event of an overflow. Doing so will 2609 // cause an assert that the current thread is not a 2610 // concurrent GC thread. 2611 CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/); 2612 remarkTask.work(0); 2613 } 2614 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2615 guarantee(has_overflown() || 2616 satb_mq_set.completed_buffers_num() == 0, 2617 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2618 BOOL_TO_STR(has_overflown()), 2619 satb_mq_set.completed_buffers_num())); 2620 2621 print_stats(); 2622 } 2623 2624 #ifndef PRODUCT 2625 2626 class PrintReachableOopClosure: public OopClosure { 2627 private: 2628 G1CollectedHeap* _g1h; 2629 outputStream* _out; 2630 VerifyOption _vo; 2631 bool _all; 2632 2633 public: 2634 PrintReachableOopClosure(outputStream* out, 2635 VerifyOption vo, 2636 bool all) : 2637 _g1h(G1CollectedHeap::heap()), 2638 _out(out), _vo(vo), _all(all) { } 2639 2640 void do_oop(narrowOop* p) { do_oop_work(p); } 2641 void do_oop( oop* p) { do_oop_work(p); } 2642 2643 template <class T> void do_oop_work(T* p) { 2644 oop obj = oopDesc::load_decode_heap_oop(p); 2645 const char* str = NULL; 2646 const char* str2 = ""; 2647 2648 if (obj == NULL) { 2649 str = ""; 2650 } else if (!_g1h->is_in_g1_reserved(obj)) { 2651 str = " O"; 2652 } else { 2653 HeapRegion* hr = _g1h->heap_region_containing(obj); 2654 guarantee(hr != NULL, "invariant"); 2655 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); 2656 bool marked = _g1h->is_marked(obj, _vo); 2657 2658 if (over_tams) { 2659 str = " >"; 2660 if (marked) { 2661 str2 = " AND MARKED"; 2662 } 2663 } else if (marked) { 2664 str = " M"; 2665 } else { 2666 str = " NOT"; 2667 } 2668 } 2669 2670 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", 2671 p, (void*) obj, str, str2); 2672 } 2673 }; 2674 2675 class PrintReachableObjectClosure : public ObjectClosure { 2676 private: 2677 G1CollectedHeap* _g1h; 2678 outputStream* _out; 2679 VerifyOption _vo; 2680 bool _all; 2681 HeapRegion* _hr; 2682 2683 public: 2684 PrintReachableObjectClosure(outputStream* out, 2685 VerifyOption vo, 2686 bool all, 2687 HeapRegion* hr) : 2688 _g1h(G1CollectedHeap::heap()), 2689 _out(out), _vo(vo), _all(all), _hr(hr) { } 2690 2691 void do_object(oop o) { 2692 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo); 2693 bool marked = _g1h->is_marked(o, _vo); 2694 bool print_it = _all || over_tams || marked; 2695 2696 if (print_it) { 2697 _out->print_cr(" "PTR_FORMAT"%s", 2698 (void *)o, (over_tams) ? " >" : (marked) ? " M" : ""); 2699 PrintReachableOopClosure oopCl(_out, _vo, _all); 2700 o->oop_iterate_no_header(&oopCl); 2701 } 2702 } 2703 }; 2704 2705 class PrintReachableRegionClosure : public HeapRegionClosure { 2706 private: 2707 G1CollectedHeap* _g1h; 2708 outputStream* _out; 2709 VerifyOption _vo; 2710 bool _all; 2711 2712 public: 2713 bool doHeapRegion(HeapRegion* hr) { 2714 HeapWord* b = hr->bottom(); 2715 HeapWord* e = hr->end(); 2716 HeapWord* t = hr->top(); 2717 HeapWord* p = _g1h->top_at_mark_start(hr, _vo); 2718 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " 2719 "TAMS: "PTR_FORMAT, b, e, t, p); 2720 _out->cr(); 2721 2722 HeapWord* from = b; 2723 HeapWord* to = t; 2724 2725 if (to > from) { 2726 _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to); 2727 _out->cr(); 2728 PrintReachableObjectClosure ocl(_out, _vo, _all, hr); 2729 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); 2730 _out->cr(); 2731 } 2732 2733 return false; 2734 } 2735 2736 PrintReachableRegionClosure(outputStream* out, 2737 VerifyOption vo, 2738 bool all) : 2739 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { } 2740 }; 2741 2742 void ConcurrentMark::print_reachable(const char* str, 2743 VerifyOption vo, 2744 bool all) { 2745 gclog_or_tty->cr(); 2746 gclog_or_tty->print_cr("== Doing heap dump... "); 2747 2748 if (G1PrintReachableBaseFile == NULL) { 2749 gclog_or_tty->print_cr(" #### error: no base file defined"); 2750 return; 2751 } 2752 2753 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) > 2754 (JVM_MAXPATHLEN - 1)) { 2755 gclog_or_tty->print_cr(" #### error: file name too long"); 2756 return; 2757 } 2758 2759 char file_name[JVM_MAXPATHLEN]; 2760 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str); 2761 gclog_or_tty->print_cr(" dumping to file %s", file_name); 2762 2763 fileStream fout(file_name); 2764 if (!fout.is_open()) { 2765 gclog_or_tty->print_cr(" #### error: could not open file"); 2766 return; 2767 } 2768 2769 outputStream* out = &fout; 2770 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo)); 2771 out->cr(); 2772 2773 out->print_cr("--- ITERATING OVER REGIONS"); 2774 out->cr(); 2775 PrintReachableRegionClosure rcl(out, vo, all); 2776 _g1h->heap_region_iterate(&rcl); 2777 out->cr(); 2778 2779 gclog_or_tty->print_cr(" done"); 2780 gclog_or_tty->flush(); 2781 } 2782 2783 #endif // PRODUCT 2784 2785 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2786 // Note we are overriding the read-only view of the prev map here, via 2787 // the cast. 2788 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2789 } 2790 2791 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2792 _nextMarkBitMap->clearRange(mr); 2793 } 2794 2795 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) { 2796 clearRangePrevBitmap(mr); 2797 clearRangeNextBitmap(mr); 2798 } 2799 2800 HeapRegion* 2801 ConcurrentMark::claim_region(uint worker_id) { 2802 // "checkpoint" the finger 2803 HeapWord* finger = _finger; 2804 2805 // _heap_end will not change underneath our feet; it only changes at 2806 // yield points. 2807 while (finger < _heap_end) { 2808 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2809 2810 // Note on how this code handles humongous regions. In the 2811 // normal case the finger will reach the start of a "starts 2812 // humongous" (SH) region. Its end will either be the end of the 2813 // last "continues humongous" (CH) region in the sequence, or the 2814 // standard end of the SH region (if the SH is the only region in 2815 // the sequence). That way claim_region() will skip over the CH 2816 // regions. However, there is a subtle race between a CM thread 2817 // executing this method and a mutator thread doing a humongous 2818 // object allocation. The two are not mutually exclusive as the CM 2819 // thread does not need to hold the Heap_lock when it gets 2820 // here. So there is a chance that claim_region() will come across 2821 // a free region that's in the progress of becoming a SH or a CH 2822 // region. In the former case, it will either 2823 // a) Miss the update to the region's end, in which case it will 2824 // visit every subsequent CH region, will find their bitmaps 2825 // empty, and do nothing, or 2826 // b) Will observe the update of the region's end (in which case 2827 // it will skip the subsequent CH regions). 2828 // If it comes across a region that suddenly becomes CH, the 2829 // scenario will be similar to b). So, the race between 2830 // claim_region() and a humongous object allocation might force us 2831 // to do a bit of unnecessary work (due to some unnecessary bitmap 2832 // iterations) but it should not introduce and correctness issues. 2833 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2834 HeapWord* bottom = curr_region->bottom(); 2835 HeapWord* end = curr_region->end(); 2836 HeapWord* limit = curr_region->next_top_at_mark_start(); 2837 2838 if (verbose_low()) { 2839 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2840 "["PTR_FORMAT", "PTR_FORMAT"), " 2841 "limit = "PTR_FORMAT, 2842 worker_id, curr_region, bottom, end, limit); 2843 } 2844 2845 // Is the gap between reading the finger and doing the CAS too long? 2846 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2847 if (res == finger) { 2848 // we succeeded 2849 2850 // notice that _finger == end cannot be guaranteed here since, 2851 // someone else might have moved the finger even further 2852 assert(_finger >= end, "the finger should have moved forward"); 2853 2854 if (verbose_low()) { 2855 gclog_or_tty->print_cr("[%u] we were successful with region = " 2856 PTR_FORMAT, worker_id, curr_region); 2857 } 2858 2859 if (limit > bottom) { 2860 if (verbose_low()) { 2861 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 2862 "returning it ", worker_id, curr_region); 2863 } 2864 return curr_region; 2865 } else { 2866 assert(limit == bottom, 2867 "the region limit should be at bottom"); 2868 if (verbose_low()) { 2869 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 2870 "returning NULL", worker_id, curr_region); 2871 } 2872 // we return NULL and the caller should try calling 2873 // claim_region() again. 2874 return NULL; 2875 } 2876 } else { 2877 assert(_finger > finger, "the finger should have moved forward"); 2878 if (verbose_low()) { 2879 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2880 "global finger = "PTR_FORMAT", " 2881 "our finger = "PTR_FORMAT, 2882 worker_id, _finger, finger); 2883 } 2884 2885 // read it again 2886 finger = _finger; 2887 } 2888 } 2889 2890 return NULL; 2891 } 2892 2893 #ifndef PRODUCT 2894 enum VerifyNoCSetOopsPhase { 2895 VerifyNoCSetOopsStack, 2896 VerifyNoCSetOopsQueues, 2897 VerifyNoCSetOopsSATBCompleted, 2898 VerifyNoCSetOopsSATBThread 2899 }; 2900 2901 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 2902 private: 2903 G1CollectedHeap* _g1h; 2904 VerifyNoCSetOopsPhase _phase; 2905 int _info; 2906 2907 const char* phase_str() { 2908 switch (_phase) { 2909 case VerifyNoCSetOopsStack: return "Stack"; 2910 case VerifyNoCSetOopsQueues: return "Queue"; 2911 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers"; 2912 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers"; 2913 default: ShouldNotReachHere(); 2914 } 2915 return NULL; 2916 } 2917 2918 void do_object_work(oop obj) { 2919 guarantee(!_g1h->obj_in_cs(obj), 2920 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 2921 (void*) obj, phase_str(), _info)); 2922 } 2923 2924 public: 2925 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 2926 2927 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 2928 _phase = phase; 2929 _info = info; 2930 } 2931 2932 virtual void do_oop(oop* p) { 2933 oop obj = oopDesc::load_decode_heap_oop(p); 2934 do_object_work(obj); 2935 } 2936 2937 virtual void do_oop(narrowOop* p) { 2938 // We should not come across narrow oops while scanning marking 2939 // stacks and SATB buffers. 2940 ShouldNotReachHere(); 2941 } 2942 2943 virtual void do_object(oop obj) { 2944 do_object_work(obj); 2945 } 2946 }; 2947 2948 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, 2949 bool verify_enqueued_buffers, 2950 bool verify_thread_buffers, 2951 bool verify_fingers) { 2952 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2953 if (!G1CollectedHeap::heap()->mark_in_progress()) { 2954 return; 2955 } 2956 2957 VerifyNoCSetOopsClosure cl; 2958 2959 if (verify_stacks) { 2960 // Verify entries on the global mark stack 2961 cl.set_phase(VerifyNoCSetOopsStack); 2962 _markStack.oops_do(&cl); 2963 2964 // Verify entries on the task queues 2965 for (uint i = 0; i < _max_worker_id; i += 1) { 2966 cl.set_phase(VerifyNoCSetOopsQueues, i); 2967 CMTaskQueue* queue = _task_queues->queue(i); 2968 queue->oops_do(&cl); 2969 } 2970 } 2971 2972 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 2973 2974 // Verify entries on the enqueued SATB buffers 2975 if (verify_enqueued_buffers) { 2976 cl.set_phase(VerifyNoCSetOopsSATBCompleted); 2977 satb_qs.iterate_completed_buffers_read_only(&cl); 2978 } 2979 2980 // Verify entries on the per-thread SATB buffers 2981 if (verify_thread_buffers) { 2982 cl.set_phase(VerifyNoCSetOopsSATBThread); 2983 satb_qs.iterate_thread_buffers_read_only(&cl); 2984 } 2985 2986 if (verify_fingers) { 2987 // Verify the global finger 2988 HeapWord* global_finger = finger(); 2989 if (global_finger != NULL && global_finger < _heap_end) { 2990 // The global finger always points to a heap region boundary. We 2991 // use heap_region_containing_raw() to get the containing region 2992 // given that the global finger could be pointing to a free region 2993 // which subsequently becomes continues humongous. If that 2994 // happens, heap_region_containing() will return the bottom of the 2995 // corresponding starts humongous region and the check below will 2996 // not hold any more. 2997 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 2998 guarantee(global_finger == global_hr->bottom(), 2999 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 3000 global_finger, HR_FORMAT_PARAMS(global_hr))); 3001 } 3002 3003 // Verify the task fingers 3004 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 3005 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 3006 CMTask* task = _tasks[i]; 3007 HeapWord* task_finger = task->finger(); 3008 if (task_finger != NULL && task_finger < _heap_end) { 3009 // See above note on the global finger verification. 3010 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 3011 guarantee(task_finger == task_hr->bottom() || 3012 !task_hr->in_collection_set(), 3013 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 3014 task_finger, HR_FORMAT_PARAMS(task_hr))); 3015 } 3016 } 3017 } 3018 } 3019 #endif // PRODUCT 3020 3021 // Aggregate the counting data that was constructed concurrently 3022 // with marking. 3023 class AggregateCountDataHRClosure: public HeapRegionClosure { 3024 G1CollectedHeap* _g1h; 3025 ConcurrentMark* _cm; 3026 CardTableModRefBS* _ct_bs; 3027 BitMap* _cm_card_bm; 3028 uint _max_worker_id; 3029 3030 public: 3031 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 3032 BitMap* cm_card_bm, 3033 uint max_worker_id) : 3034 _g1h(g1h), _cm(g1h->concurrent_mark()), 3035 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 3036 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 3037 3038 bool doHeapRegion(HeapRegion* hr) { 3039 if (hr->continuesHumongous()) { 3040 // We will ignore these here and process them when their 3041 // associated "starts humongous" region is processed. 3042 // Note that we cannot rely on their associated 3043 // "starts humongous" region to have their bit set to 1 3044 // since, due to the region chunking in the parallel region 3045 // iteration, a "continues humongous" region might be visited 3046 // before its associated "starts humongous". 3047 return false; 3048 } 3049 3050 HeapWord* start = hr->bottom(); 3051 HeapWord* limit = hr->next_top_at_mark_start(); 3052 HeapWord* end = hr->end(); 3053 3054 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 3055 err_msg("Preconditions not met - " 3056 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 3057 "top: "PTR_FORMAT", end: "PTR_FORMAT, 3058 start, limit, hr->top(), hr->end())); 3059 3060 assert(hr->next_marked_bytes() == 0, "Precondition"); 3061 3062 if (start == limit) { 3063 // NTAMS of this region has not been set so nothing to do. 3064 return false; 3065 } 3066 3067 // 'start' should be in the heap. 3068 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 3069 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 3070 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 3071 3072 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 3073 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 3074 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 3075 3076 // If ntams is not card aligned then we bump card bitmap index 3077 // for limit so that we get the all the cards spanned by 3078 // the object ending at ntams. 3079 // Note: if this is the last region in the heap then ntams 3080 // could be actually just beyond the end of the the heap; 3081 // limit_idx will then correspond to a (non-existent) card 3082 // that is also outside the heap. 3083 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 3084 limit_idx += 1; 3085 } 3086 3087 assert(limit_idx <= end_idx, "or else use atomics"); 3088 3089 // Aggregate the "stripe" in the count data associated with hr. 3090 uint hrs_index = hr->hrs_index(); 3091 size_t marked_bytes = 0; 3092 3093 for (uint i = 0; i < _max_worker_id; i += 1) { 3094 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 3095 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 3096 3097 // Fetch the marked_bytes in this region for task i and 3098 // add it to the running total for this region. 3099 marked_bytes += marked_bytes_array[hrs_index]; 3100 3101 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 3102 // into the global card bitmap. 3103 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 3104 3105 while (scan_idx < limit_idx) { 3106 assert(task_card_bm->at(scan_idx) == true, "should be"); 3107 _cm_card_bm->set_bit(scan_idx); 3108 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 3109 3110 // BitMap::get_next_one_offset() can handle the case when 3111 // its left_offset parameter is greater than its right_offset 3112 // parameter. It does, however, have an early exit if 3113 // left_offset == right_offset. So let's limit the value 3114 // passed in for left offset here. 3115 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 3116 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 3117 } 3118 } 3119 3120 // Update the marked bytes for this region. 3121 hr->add_to_marked_bytes(marked_bytes); 3122 3123 // Next heap region 3124 return false; 3125 } 3126 }; 3127 3128 class G1AggregateCountDataTask: public AbstractGangTask { 3129 protected: 3130 G1CollectedHeap* _g1h; 3131 ConcurrentMark* _cm; 3132 BitMap* _cm_card_bm; 3133 uint _max_worker_id; 3134 int _active_workers; 3135 3136 public: 3137 G1AggregateCountDataTask(G1CollectedHeap* g1h, 3138 ConcurrentMark* cm, 3139 BitMap* cm_card_bm, 3140 uint max_worker_id, 3141 int n_workers) : 3142 AbstractGangTask("Count Aggregation"), 3143 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 3144 _max_worker_id(max_worker_id), 3145 _active_workers(n_workers) { } 3146 3147 void work(uint worker_id) { 3148 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 3149 3150 if (G1CollectedHeap::use_parallel_gc_threads()) { 3151 _g1h->heap_region_par_iterate_chunked(&cl, worker_id, 3152 _active_workers, 3153 HeapRegion::AggregateCountClaimValue); 3154 } else { 3155 _g1h->heap_region_iterate(&cl); 3156 } 3157 } 3158 }; 3159 3160 3161 void ConcurrentMark::aggregate_count_data() { 3162 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? 3163 _g1h->workers()->active_workers() : 3164 1); 3165 3166 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 3167 _max_worker_id, n_workers); 3168 3169 if (G1CollectedHeap::use_parallel_gc_threads()) { 3170 assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 3171 "sanity check"); 3172 _g1h->set_par_threads(n_workers); 3173 _g1h->workers()->run_task(&g1_par_agg_task); 3174 _g1h->set_par_threads(0); 3175 3176 assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue), 3177 "sanity check"); 3178 _g1h->reset_heap_region_claim_values(); 3179 } else { 3180 g1_par_agg_task.work(0); 3181 } 3182 } 3183 3184 // Clear the per-worker arrays used to store the per-region counting data 3185 void ConcurrentMark::clear_all_count_data() { 3186 // Clear the global card bitmap - it will be filled during 3187 // liveness count aggregation (during remark) and the 3188 // final counting task. 3189 _card_bm.clear(); 3190 3191 // Clear the global region bitmap - it will be filled as part 3192 // of the final counting task. 3193 _region_bm.clear(); 3194 3195 uint max_regions = _g1h->max_regions(); 3196 assert(_max_worker_id > 0, "uninitialized"); 3197 3198 for (uint i = 0; i < _max_worker_id; i += 1) { 3199 BitMap* task_card_bm = count_card_bitmap_for(i); 3200 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3201 3202 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3203 assert(marked_bytes_array != NULL, "uninitialized"); 3204 3205 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3206 task_card_bm->clear(); 3207 } 3208 } 3209 3210 void ConcurrentMark::print_stats() { 3211 if (verbose_stats()) { 3212 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3213 for (size_t i = 0; i < _active_tasks; ++i) { 3214 _tasks[i]->print_stats(); 3215 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3216 } 3217 } 3218 } 3219 3220 // abandon current marking iteration due to a Full GC 3221 void ConcurrentMark::abort() { 3222 // Clear all marks to force marking thread to do nothing 3223 _nextMarkBitMap->clearAll(); 3224 // Clear the liveness counting data 3225 clear_all_count_data(); 3226 // Empty mark stack 3227 reset_marking_state(); 3228 for (uint i = 0; i < _max_worker_id; ++i) { 3229 _tasks[i]->clear_region_fields(); 3230 } 3231 _has_aborted = true; 3232 3233 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3234 satb_mq_set.abandon_partial_marking(); 3235 // This can be called either during or outside marking, we'll read 3236 // the expected_active value from the SATB queue set. 3237 satb_mq_set.set_active_all_threads( 3238 false, /* new active value */ 3239 satb_mq_set.is_active() /* expected_active */); 3240 3241 _g1h->trace_heap_after_concurrent_cycle(); 3242 _g1h->register_concurrent_cycle_end(); 3243 } 3244 3245 static void print_ms_time_info(const char* prefix, const char* name, 3246 NumberSeq& ns) { 3247 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3248 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3249 if (ns.num() > 0) { 3250 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3251 prefix, ns.sd(), ns.maximum()); 3252 } 3253 } 3254 3255 void ConcurrentMark::print_summary_info() { 3256 gclog_or_tty->print_cr(" Concurrent marking:"); 3257 print_ms_time_info(" ", "init marks", _init_times); 3258 print_ms_time_info(" ", "remarks", _remark_times); 3259 { 3260 print_ms_time_info(" ", "final marks", _remark_mark_times); 3261 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3262 3263 } 3264 print_ms_time_info(" ", "cleanups", _cleanup_times); 3265 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3266 _total_counting_time, 3267 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3268 (double)_cleanup_times.num() 3269 : 0.0)); 3270 if (G1ScrubRemSets) { 3271 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3272 _total_rs_scrub_time, 3273 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3274 (double)_cleanup_times.num() 3275 : 0.0)); 3276 } 3277 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3278 (_init_times.sum() + _remark_times.sum() + 3279 _cleanup_times.sum())/1000.0); 3280 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3281 "(%8.2f s marking).", 3282 cmThread()->vtime_accum(), 3283 cmThread()->vtime_mark_accum()); 3284 } 3285 3286 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3287 if (use_parallel_marking_threads()) { 3288 _parallel_workers->print_worker_threads_on(st); 3289 } 3290 } 3291 3292 void ConcurrentMark::print_on_error(outputStream* st) const { 3293 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3294 _prevMarkBitMap, _nextMarkBitMap); 3295 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3296 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3297 } 3298 3299 // We take a break if someone is trying to stop the world. 3300 bool ConcurrentMark::do_yield_check(uint worker_id) { 3301 if (should_yield()) { 3302 if (worker_id == 0) { 3303 _g1h->g1_policy()->record_concurrent_pause(); 3304 } 3305 cmThread()->yield(); 3306 return true; 3307 } else { 3308 return false; 3309 } 3310 } 3311 3312 bool ConcurrentMark::should_yield() { 3313 return cmThread()->should_yield(); 3314 } 3315 3316 bool ConcurrentMark::containing_card_is_marked(void* p) { 3317 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1); 3318 return _card_bm.at(offset >> CardTableModRefBS::card_shift); 3319 } 3320 3321 bool ConcurrentMark::containing_cards_are_marked(void* start, 3322 void* last) { 3323 return containing_card_is_marked(start) && 3324 containing_card_is_marked(last); 3325 } 3326 3327 #ifndef PRODUCT 3328 // for debugging purposes 3329 void ConcurrentMark::print_finger() { 3330 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3331 _heap_start, _heap_end, _finger); 3332 for (uint i = 0; i < _max_worker_id; ++i) { 3333 gclog_or_tty->print(" %u: "PTR_FORMAT, i, _tasks[i]->finger()); 3334 } 3335 gclog_or_tty->print_cr(""); 3336 } 3337 #endif 3338 3339 void CMTask::scan_object(oop obj) { 3340 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3341 3342 if (_cm->verbose_high()) { 3343 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT, 3344 _worker_id, (void*) obj); 3345 } 3346 3347 size_t obj_size = obj->size(); 3348 _words_scanned += obj_size; 3349 3350 obj->oop_iterate(_cm_oop_closure); 3351 statsOnly( ++_objs_scanned ); 3352 check_limits(); 3353 } 3354 3355 // Closure for iteration over bitmaps 3356 class CMBitMapClosure : public BitMapClosure { 3357 private: 3358 // the bitmap that is being iterated over 3359 CMBitMap* _nextMarkBitMap; 3360 ConcurrentMark* _cm; 3361 CMTask* _task; 3362 3363 public: 3364 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3365 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3366 3367 bool do_bit(size_t offset) { 3368 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3369 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3370 assert( addr < _cm->finger(), "invariant"); 3371 3372 statsOnly( _task->increase_objs_found_on_bitmap() ); 3373 assert(addr >= _task->finger(), "invariant"); 3374 3375 // We move that task's local finger along. 3376 _task->move_finger_to(addr); 3377 3378 _task->scan_object(oop(addr)); 3379 // we only partially drain the local queue and global stack 3380 _task->drain_local_queue(true); 3381 _task->drain_global_stack(true); 3382 3383 // if the has_aborted flag has been raised, we need to bail out of 3384 // the iteration 3385 return !_task->has_aborted(); 3386 } 3387 }; 3388 3389 // Closure for iterating over objects, currently only used for 3390 // processing SATB buffers. 3391 class CMObjectClosure : public ObjectClosure { 3392 private: 3393 CMTask* _task; 3394 3395 public: 3396 void do_object(oop obj) { 3397 _task->deal_with_reference(obj); 3398 } 3399 3400 CMObjectClosure(CMTask* task) : _task(task) { } 3401 }; 3402 3403 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3404 ConcurrentMark* cm, 3405 CMTask* task) 3406 : _g1h(g1h), _cm(cm), _task(task) { 3407 assert(_ref_processor == NULL, "should be initialized to NULL"); 3408 3409 if (G1UseConcMarkReferenceProcessing) { 3410 _ref_processor = g1h->ref_processor_cm(); 3411 assert(_ref_processor != NULL, "should not be NULL"); 3412 } 3413 } 3414 3415 void CMTask::setup_for_region(HeapRegion* hr) { 3416 // Separated the asserts so that we know which one fires. 3417 assert(hr != NULL, 3418 "claim_region() should have filtered out continues humongous regions"); 3419 assert(!hr->continuesHumongous(), 3420 "claim_region() should have filtered out continues humongous regions"); 3421 3422 if (_cm->verbose_low()) { 3423 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3424 _worker_id, hr); 3425 } 3426 3427 _curr_region = hr; 3428 _finger = hr->bottom(); 3429 update_region_limit(); 3430 } 3431 3432 void CMTask::update_region_limit() { 3433 HeapRegion* hr = _curr_region; 3434 HeapWord* bottom = hr->bottom(); 3435 HeapWord* limit = hr->next_top_at_mark_start(); 3436 3437 if (limit == bottom) { 3438 if (_cm->verbose_low()) { 3439 gclog_or_tty->print_cr("[%u] found an empty region " 3440 "["PTR_FORMAT", "PTR_FORMAT")", 3441 _worker_id, bottom, limit); 3442 } 3443 // The region was collected underneath our feet. 3444 // We set the finger to bottom to ensure that the bitmap 3445 // iteration that will follow this will not do anything. 3446 // (this is not a condition that holds when we set the region up, 3447 // as the region is not supposed to be empty in the first place) 3448 _finger = bottom; 3449 } else if (limit >= _region_limit) { 3450 assert(limit >= _finger, "peace of mind"); 3451 } else { 3452 assert(limit < _region_limit, "only way to get here"); 3453 // This can happen under some pretty unusual circumstances. An 3454 // evacuation pause empties the region underneath our feet (NTAMS 3455 // at bottom). We then do some allocation in the region (NTAMS 3456 // stays at bottom), followed by the region being used as a GC 3457 // alloc region (NTAMS will move to top() and the objects 3458 // originally below it will be grayed). All objects now marked in 3459 // the region are explicitly grayed, if below the global finger, 3460 // and we do not need in fact to scan anything else. So, we simply 3461 // set _finger to be limit to ensure that the bitmap iteration 3462 // doesn't do anything. 3463 _finger = limit; 3464 } 3465 3466 _region_limit = limit; 3467 } 3468 3469 void CMTask::giveup_current_region() { 3470 assert(_curr_region != NULL, "invariant"); 3471 if (_cm->verbose_low()) { 3472 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3473 _worker_id, _curr_region); 3474 } 3475 clear_region_fields(); 3476 } 3477 3478 void CMTask::clear_region_fields() { 3479 // Values for these three fields that indicate that we're not 3480 // holding on to a region. 3481 _curr_region = NULL; 3482 _finger = NULL; 3483 _region_limit = NULL; 3484 } 3485 3486 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3487 if (cm_oop_closure == NULL) { 3488 assert(_cm_oop_closure != NULL, "invariant"); 3489 } else { 3490 assert(_cm_oop_closure == NULL, "invariant"); 3491 } 3492 _cm_oop_closure = cm_oop_closure; 3493 } 3494 3495 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3496 guarantee(nextMarkBitMap != NULL, "invariant"); 3497 3498 if (_cm->verbose_low()) { 3499 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3500 } 3501 3502 _nextMarkBitMap = nextMarkBitMap; 3503 clear_region_fields(); 3504 3505 _calls = 0; 3506 _elapsed_time_ms = 0.0; 3507 _termination_time_ms = 0.0; 3508 _termination_start_time_ms = 0.0; 3509 3510 #if _MARKING_STATS_ 3511 _local_pushes = 0; 3512 _local_pops = 0; 3513 _local_max_size = 0; 3514 _objs_scanned = 0; 3515 _global_pushes = 0; 3516 _global_pops = 0; 3517 _global_max_size = 0; 3518 _global_transfers_to = 0; 3519 _global_transfers_from = 0; 3520 _regions_claimed = 0; 3521 _objs_found_on_bitmap = 0; 3522 _satb_buffers_processed = 0; 3523 _steal_attempts = 0; 3524 _steals = 0; 3525 _aborted = 0; 3526 _aborted_overflow = 0; 3527 _aborted_cm_aborted = 0; 3528 _aborted_yield = 0; 3529 _aborted_timed_out = 0; 3530 _aborted_satb = 0; 3531 _aborted_termination = 0; 3532 #endif // _MARKING_STATS_ 3533 } 3534 3535 bool CMTask::should_exit_termination() { 3536 regular_clock_call(); 3537 // This is called when we are in the termination protocol. We should 3538 // quit if, for some reason, this task wants to abort or the global 3539 // stack is not empty (this means that we can get work from it). 3540 return !_cm->mark_stack_empty() || has_aborted(); 3541 } 3542 3543 void CMTask::reached_limit() { 3544 assert(_words_scanned >= _words_scanned_limit || 3545 _refs_reached >= _refs_reached_limit , 3546 "shouldn't have been called otherwise"); 3547 regular_clock_call(); 3548 } 3549 3550 void CMTask::regular_clock_call() { 3551 if (has_aborted()) return; 3552 3553 // First, we need to recalculate the words scanned and refs reached 3554 // limits for the next clock call. 3555 recalculate_limits(); 3556 3557 // During the regular clock call we do the following 3558 3559 // (1) If an overflow has been flagged, then we abort. 3560 if (_cm->has_overflown()) { 3561 set_has_aborted(); 3562 return; 3563 } 3564 3565 // If we are not concurrent (i.e. we're doing remark) we don't need 3566 // to check anything else. The other steps are only needed during 3567 // the concurrent marking phase. 3568 if (!concurrent()) return; 3569 3570 // (2) If marking has been aborted for Full GC, then we also abort. 3571 if (_cm->has_aborted()) { 3572 set_has_aborted(); 3573 statsOnly( ++_aborted_cm_aborted ); 3574 return; 3575 } 3576 3577 double curr_time_ms = os::elapsedVTime() * 1000.0; 3578 3579 // (3) If marking stats are enabled, then we update the step history. 3580 #if _MARKING_STATS_ 3581 if (_words_scanned >= _words_scanned_limit) { 3582 ++_clock_due_to_scanning; 3583 } 3584 if (_refs_reached >= _refs_reached_limit) { 3585 ++_clock_due_to_marking; 3586 } 3587 3588 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3589 _interval_start_time_ms = curr_time_ms; 3590 _all_clock_intervals_ms.add(last_interval_ms); 3591 3592 if (_cm->verbose_medium()) { 3593 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3594 "scanned = %d%s, refs reached = %d%s", 3595 _worker_id, last_interval_ms, 3596 _words_scanned, 3597 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3598 _refs_reached, 3599 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3600 } 3601 #endif // _MARKING_STATS_ 3602 3603 // (4) We check whether we should yield. If we have to, then we abort. 3604 if (_cm->should_yield()) { 3605 // We should yield. To do this we abort the task. The caller is 3606 // responsible for yielding. 3607 set_has_aborted(); 3608 statsOnly( ++_aborted_yield ); 3609 return; 3610 } 3611 3612 // (5) We check whether we've reached our time quota. If we have, 3613 // then we abort. 3614 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3615 if (elapsed_time_ms > _time_target_ms) { 3616 set_has_aborted(); 3617 _has_timed_out = true; 3618 statsOnly( ++_aborted_timed_out ); 3619 return; 3620 } 3621 3622 // (6) Finally, we check whether there are enough completed STAB 3623 // buffers available for processing. If there are, we abort. 3624 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3625 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3626 if (_cm->verbose_low()) { 3627 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3628 _worker_id); 3629 } 3630 // we do need to process SATB buffers, we'll abort and restart 3631 // the marking task to do so 3632 set_has_aborted(); 3633 statsOnly( ++_aborted_satb ); 3634 return; 3635 } 3636 } 3637 3638 void CMTask::recalculate_limits() { 3639 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3640 _words_scanned_limit = _real_words_scanned_limit; 3641 3642 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3643 _refs_reached_limit = _real_refs_reached_limit; 3644 } 3645 3646 void CMTask::decrease_limits() { 3647 // This is called when we believe that we're going to do an infrequent 3648 // operation which will increase the per byte scanned cost (i.e. move 3649 // entries to/from the global stack). It basically tries to decrease the 3650 // scanning limit so that the clock is called earlier. 3651 3652 if (_cm->verbose_medium()) { 3653 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3654 } 3655 3656 _words_scanned_limit = _real_words_scanned_limit - 3657 3 * words_scanned_period / 4; 3658 _refs_reached_limit = _real_refs_reached_limit - 3659 3 * refs_reached_period / 4; 3660 } 3661 3662 void CMTask::move_entries_to_global_stack() { 3663 // local array where we'll store the entries that will be popped 3664 // from the local queue 3665 oop buffer[global_stack_transfer_size]; 3666 3667 int n = 0; 3668 oop obj; 3669 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3670 buffer[n] = obj; 3671 ++n; 3672 } 3673 3674 if (n > 0) { 3675 // we popped at least one entry from the local queue 3676 3677 statsOnly( ++_global_transfers_to; _local_pops += n ); 3678 3679 if (!_cm->mark_stack_push(buffer, n)) { 3680 if (_cm->verbose_low()) { 3681 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3682 _worker_id); 3683 } 3684 set_has_aborted(); 3685 } else { 3686 // the transfer was successful 3687 3688 if (_cm->verbose_medium()) { 3689 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3690 _worker_id, n); 3691 } 3692 statsOnly( int tmp_size = _cm->mark_stack_size(); 3693 if (tmp_size > _global_max_size) { 3694 _global_max_size = tmp_size; 3695 } 3696 _global_pushes += n ); 3697 } 3698 } 3699 3700 // this operation was quite expensive, so decrease the limits 3701 decrease_limits(); 3702 } 3703 3704 void CMTask::get_entries_from_global_stack() { 3705 // local array where we'll store the entries that will be popped 3706 // from the global stack. 3707 oop buffer[global_stack_transfer_size]; 3708 int n; 3709 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3710 assert(n <= global_stack_transfer_size, 3711 "we should not pop more than the given limit"); 3712 if (n > 0) { 3713 // yes, we did actually pop at least one entry 3714 3715 statsOnly( ++_global_transfers_from; _global_pops += n ); 3716 if (_cm->verbose_medium()) { 3717 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3718 _worker_id, n); 3719 } 3720 for (int i = 0; i < n; ++i) { 3721 bool success = _task_queue->push(buffer[i]); 3722 // We only call this when the local queue is empty or under a 3723 // given target limit. So, we do not expect this push to fail. 3724 assert(success, "invariant"); 3725 } 3726 3727 statsOnly( int tmp_size = _task_queue->size(); 3728 if (tmp_size > _local_max_size) { 3729 _local_max_size = tmp_size; 3730 } 3731 _local_pushes += n ); 3732 } 3733 3734 // this operation was quite expensive, so decrease the limits 3735 decrease_limits(); 3736 } 3737 3738 void CMTask::drain_local_queue(bool partially) { 3739 if (has_aborted()) return; 3740 3741 // Decide what the target size is, depending whether we're going to 3742 // drain it partially (so that other tasks can steal if they run out 3743 // of things to do) or totally (at the very end). 3744 size_t target_size; 3745 if (partially) { 3746 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3747 } else { 3748 target_size = 0; 3749 } 3750 3751 if (_task_queue->size() > target_size) { 3752 if (_cm->verbose_high()) { 3753 gclog_or_tty->print_cr("[%u] draining local queue, target size = %d", 3754 _worker_id, target_size); 3755 } 3756 3757 oop obj; 3758 bool ret = _task_queue->pop_local(obj); 3759 while (ret) { 3760 statsOnly( ++_local_pops ); 3761 3762 if (_cm->verbose_high()) { 3763 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3764 (void*) obj); 3765 } 3766 3767 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3768 assert(!_g1h->is_on_master_free_list( 3769 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3770 3771 scan_object(obj); 3772 3773 if (_task_queue->size() <= target_size || has_aborted()) { 3774 ret = false; 3775 } else { 3776 ret = _task_queue->pop_local(obj); 3777 } 3778 } 3779 3780 if (_cm->verbose_high()) { 3781 gclog_or_tty->print_cr("[%u] drained local queue, size = %d", 3782 _worker_id, _task_queue->size()); 3783 } 3784 } 3785 } 3786 3787 void CMTask::drain_global_stack(bool partially) { 3788 if (has_aborted()) return; 3789 3790 // We have a policy to drain the local queue before we attempt to 3791 // drain the global stack. 3792 assert(partially || _task_queue->size() == 0, "invariant"); 3793 3794 // Decide what the target size is, depending whether we're going to 3795 // drain it partially (so that other tasks can steal if they run out 3796 // of things to do) or totally (at the very end). Notice that, 3797 // because we move entries from the global stack in chunks or 3798 // because another task might be doing the same, we might in fact 3799 // drop below the target. But, this is not a problem. 3800 size_t target_size; 3801 if (partially) { 3802 target_size = _cm->partial_mark_stack_size_target(); 3803 } else { 3804 target_size = 0; 3805 } 3806 3807 if (_cm->mark_stack_size() > target_size) { 3808 if (_cm->verbose_low()) { 3809 gclog_or_tty->print_cr("[%u] draining global_stack, target size %d", 3810 _worker_id, target_size); 3811 } 3812 3813 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3814 get_entries_from_global_stack(); 3815 drain_local_queue(partially); 3816 } 3817 3818 if (_cm->verbose_low()) { 3819 gclog_or_tty->print_cr("[%u] drained global stack, size = %d", 3820 _worker_id, _cm->mark_stack_size()); 3821 } 3822 } 3823 } 3824 3825 // SATB Queue has several assumptions on whether to call the par or 3826 // non-par versions of the methods. this is why some of the code is 3827 // replicated. We should really get rid of the single-threaded version 3828 // of the code to simplify things. 3829 void CMTask::drain_satb_buffers() { 3830 if (has_aborted()) return; 3831 3832 // We set this so that the regular clock knows that we're in the 3833 // middle of draining buffers and doesn't set the abort flag when it 3834 // notices that SATB buffers are available for draining. It'd be 3835 // very counter productive if it did that. :-) 3836 _draining_satb_buffers = true; 3837 3838 CMObjectClosure oc(this); 3839 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3840 if (G1CollectedHeap::use_parallel_gc_threads()) { 3841 satb_mq_set.set_par_closure(_worker_id, &oc); 3842 } else { 3843 satb_mq_set.set_closure(&oc); 3844 } 3845 3846 // This keeps claiming and applying the closure to completed buffers 3847 // until we run out of buffers or we need to abort. 3848 if (G1CollectedHeap::use_parallel_gc_threads()) { 3849 while (!has_aborted() && 3850 satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) { 3851 if (_cm->verbose_medium()) { 3852 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3853 } 3854 statsOnly( ++_satb_buffers_processed ); 3855 regular_clock_call(); 3856 } 3857 } else { 3858 while (!has_aborted() && 3859 satb_mq_set.apply_closure_to_completed_buffer()) { 3860 if (_cm->verbose_medium()) { 3861 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3862 } 3863 statsOnly( ++_satb_buffers_processed ); 3864 regular_clock_call(); 3865 } 3866 } 3867 3868 if (!concurrent() && !has_aborted()) { 3869 // We should only do this during remark. 3870 if (G1CollectedHeap::use_parallel_gc_threads()) { 3871 satb_mq_set.par_iterate_closure_all_threads(_worker_id); 3872 } else { 3873 satb_mq_set.iterate_closure_all_threads(); 3874 } 3875 } 3876 3877 _draining_satb_buffers = false; 3878 3879 assert(has_aborted() || 3880 concurrent() || 3881 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3882 3883 if (G1CollectedHeap::use_parallel_gc_threads()) { 3884 satb_mq_set.set_par_closure(_worker_id, NULL); 3885 } else { 3886 satb_mq_set.set_closure(NULL); 3887 } 3888 3889 // again, this was a potentially expensive operation, decrease the 3890 // limits to get the regular clock call early 3891 decrease_limits(); 3892 } 3893 3894 void CMTask::print_stats() { 3895 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3896 _worker_id, _calls); 3897 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3898 _elapsed_time_ms, _termination_time_ms); 3899 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3900 _step_times_ms.num(), _step_times_ms.avg(), 3901 _step_times_ms.sd()); 3902 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3903 _step_times_ms.maximum(), _step_times_ms.sum()); 3904 3905 #if _MARKING_STATS_ 3906 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3907 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3908 _all_clock_intervals_ms.sd()); 3909 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3910 _all_clock_intervals_ms.maximum(), 3911 _all_clock_intervals_ms.sum()); 3912 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", 3913 _clock_due_to_scanning, _clock_due_to_marking); 3914 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", 3915 _objs_scanned, _objs_found_on_bitmap); 3916 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", 3917 _local_pushes, _local_pops, _local_max_size); 3918 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", 3919 _global_pushes, _global_pops, _global_max_size); 3920 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", 3921 _global_transfers_to,_global_transfers_from); 3922 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed); 3923 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); 3924 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", 3925 _steal_attempts, _steals); 3926 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); 3927 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", 3928 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3929 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", 3930 _aborted_timed_out, _aborted_satb, _aborted_termination); 3931 #endif // _MARKING_STATS_ 3932 } 3933 3934 /***************************************************************************** 3935 3936 The do_marking_step(time_target_ms, ...) method is the building 3937 block of the parallel marking framework. It can be called in parallel 3938 with other invocations of do_marking_step() on different tasks 3939 (but only one per task, obviously) and concurrently with the 3940 mutator threads, or during remark, hence it eliminates the need 3941 for two versions of the code. When called during remark, it will 3942 pick up from where the task left off during the concurrent marking 3943 phase. Interestingly, tasks are also claimable during evacuation 3944 pauses too, since do_marking_step() ensures that it aborts before 3945 it needs to yield. 3946 3947 The data structures that it uses to do marking work are the 3948 following: 3949 3950 (1) Marking Bitmap. If there are gray objects that appear only 3951 on the bitmap (this happens either when dealing with an overflow 3952 or when the initial marking phase has simply marked the roots 3953 and didn't push them on the stack), then tasks claim heap 3954 regions whose bitmap they then scan to find gray objects. A 3955 global finger indicates where the end of the last claimed region 3956 is. A local finger indicates how far into the region a task has 3957 scanned. The two fingers are used to determine how to gray an 3958 object (i.e. whether simply marking it is OK, as it will be 3959 visited by a task in the future, or whether it needs to be also 3960 pushed on a stack). 3961 3962 (2) Local Queue. The local queue of the task which is accessed 3963 reasonably efficiently by the task. Other tasks can steal from 3964 it when they run out of work. Throughout the marking phase, a 3965 task attempts to keep its local queue short but not totally 3966 empty, so that entries are available for stealing by other 3967 tasks. Only when there is no more work, a task will totally 3968 drain its local queue. 3969 3970 (3) Global Mark Stack. This handles local queue overflow. During 3971 marking only sets of entries are moved between it and the local 3972 queues, as access to it requires a mutex and more fine-grain 3973 interaction with it which might cause contention. If it 3974 overflows, then the marking phase should restart and iterate 3975 over the bitmap to identify gray objects. Throughout the marking 3976 phase, tasks attempt to keep the global mark stack at a small 3977 length but not totally empty, so that entries are available for 3978 popping by other tasks. Only when there is no more work, tasks 3979 will totally drain the global mark stack. 3980 3981 (4) SATB Buffer Queue. This is where completed SATB buffers are 3982 made available. Buffers are regularly removed from this queue 3983 and scanned for roots, so that the queue doesn't get too 3984 long. During remark, all completed buffers are processed, as 3985 well as the filled in parts of any uncompleted buffers. 3986 3987 The do_marking_step() method tries to abort when the time target 3988 has been reached. There are a few other cases when the 3989 do_marking_step() method also aborts: 3990 3991 (1) When the marking phase has been aborted (after a Full GC). 3992 3993 (2) When a global overflow (on the global stack) has been 3994 triggered. Before the task aborts, it will actually sync up with 3995 the other tasks to ensure that all the marking data structures 3996 (local queues, stacks, fingers etc.) are re-initialized so that 3997 when do_marking_step() completes, the marking phase can 3998 immediately restart. 3999 4000 (3) When enough completed SATB buffers are available. The 4001 do_marking_step() method only tries to drain SATB buffers right 4002 at the beginning. So, if enough buffers are available, the 4003 marking step aborts and the SATB buffers are processed at 4004 the beginning of the next invocation. 4005 4006 (4) To yield. when we have to yield then we abort and yield 4007 right at the end of do_marking_step(). This saves us from a lot 4008 of hassle as, by yielding we might allow a Full GC. If this 4009 happens then objects will be compacted underneath our feet, the 4010 heap might shrink, etc. We save checking for this by just 4011 aborting and doing the yield right at the end. 4012 4013 From the above it follows that the do_marking_step() method should 4014 be called in a loop (or, otherwise, regularly) until it completes. 4015 4016 If a marking step completes without its has_aborted() flag being 4017 true, it means it has completed the current marking phase (and 4018 also all other marking tasks have done so and have all synced up). 4019 4020 A method called regular_clock_call() is invoked "regularly" (in 4021 sub ms intervals) throughout marking. It is this clock method that 4022 checks all the abort conditions which were mentioned above and 4023 decides when the task should abort. A work-based scheme is used to 4024 trigger this clock method: when the number of object words the 4025 marking phase has scanned or the number of references the marking 4026 phase has visited reach a given limit. Additional invocations to 4027 the method clock have been planted in a few other strategic places 4028 too. The initial reason for the clock method was to avoid calling 4029 vtime too regularly, as it is quite expensive. So, once it was in 4030 place, it was natural to piggy-back all the other conditions on it 4031 too and not constantly check them throughout the code. 4032 4033 If do_termination is true then do_marking_step will enter its 4034 termination protocol. 4035 4036 The value of is_serial must be true when do_marking_step is being 4037 called serially (i.e. by the VMThread) and do_marking_step should 4038 skip any synchronization in the termination and overflow code. 4039 Examples include the serial remark code and the serial reference 4040 processing closures. 4041 4042 The value of is_serial must be false when do_marking_step is 4043 being called by any of the worker threads in a work gang. 4044 Examples include the concurrent marking code (CMMarkingTask), 4045 the MT remark code, and the MT reference processing closures. 4046 4047 *****************************************************************************/ 4048 4049 void CMTask::do_marking_step(double time_target_ms, 4050 bool do_termination, 4051 bool is_serial) { 4052 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 4053 assert(concurrent() == _cm->concurrent(), "they should be the same"); 4054 4055 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 4056 assert(_task_queues != NULL, "invariant"); 4057 assert(_task_queue != NULL, "invariant"); 4058 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 4059 4060 assert(!_claimed, 4061 "only one thread should claim this task at any one time"); 4062 4063 // OK, this doesn't safeguard again all possible scenarios, as it is 4064 // possible for two threads to set the _claimed flag at the same 4065 // time. But it is only for debugging purposes anyway and it will 4066 // catch most problems. 4067 _claimed = true; 4068 4069 _start_time_ms = os::elapsedVTime() * 1000.0; 4070 statsOnly( _interval_start_time_ms = _start_time_ms ); 4071 4072 // If do_stealing is true then do_marking_step will attempt to 4073 // steal work from the other CMTasks. It only makes sense to 4074 // enable stealing when the termination protocol is enabled 4075 // and do_marking_step() is not being called serially. 4076 bool do_stealing = do_termination && !is_serial; 4077 4078 double diff_prediction_ms = 4079 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 4080 _time_target_ms = time_target_ms - diff_prediction_ms; 4081 4082 // set up the variables that are used in the work-based scheme to 4083 // call the regular clock method 4084 _words_scanned = 0; 4085 _refs_reached = 0; 4086 recalculate_limits(); 4087 4088 // clear all flags 4089 clear_has_aborted(); 4090 _has_timed_out = false; 4091 _draining_satb_buffers = false; 4092 4093 ++_calls; 4094 4095 if (_cm->verbose_low()) { 4096 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 4097 "target = %1.2lfms >>>>>>>>>>", 4098 _worker_id, _calls, _time_target_ms); 4099 } 4100 4101 // Set up the bitmap and oop closures. Anything that uses them is 4102 // eventually called from this method, so it is OK to allocate these 4103 // statically. 4104 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 4105 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 4106 set_cm_oop_closure(&cm_oop_closure); 4107 4108 if (_cm->has_overflown()) { 4109 // This can happen if the mark stack overflows during a GC pause 4110 // and this task, after a yield point, restarts. We have to abort 4111 // as we need to get into the overflow protocol which happens 4112 // right at the end of this task. 4113 set_has_aborted(); 4114 } 4115 4116 // First drain any available SATB buffers. After this, we will not 4117 // look at SATB buffers before the next invocation of this method. 4118 // If enough completed SATB buffers are queued up, the regular clock 4119 // will abort this task so that it restarts. 4120 drain_satb_buffers(); 4121 // ...then partially drain the local queue and the global stack 4122 drain_local_queue(true); 4123 drain_global_stack(true); 4124 4125 do { 4126 if (!has_aborted() && _curr_region != NULL) { 4127 // This means that we're already holding on to a region. 4128 assert(_finger != NULL, "if region is not NULL, then the finger " 4129 "should not be NULL either"); 4130 4131 // We might have restarted this task after an evacuation pause 4132 // which might have evacuated the region we're holding on to 4133 // underneath our feet. Let's read its limit again to make sure 4134 // that we do not iterate over a region of the heap that 4135 // contains garbage (update_region_limit() will also move 4136 // _finger to the start of the region if it is found empty). 4137 update_region_limit(); 4138 // We will start from _finger not from the start of the region, 4139 // as we might be restarting this task after aborting half-way 4140 // through scanning this region. In this case, _finger points to 4141 // the address where we last found a marked object. If this is a 4142 // fresh region, _finger points to start(). 4143 MemRegion mr = MemRegion(_finger, _region_limit); 4144 4145 if (_cm->verbose_low()) { 4146 gclog_or_tty->print_cr("[%u] we're scanning part " 4147 "["PTR_FORMAT", "PTR_FORMAT") " 4148 "of region "HR_FORMAT, 4149 _worker_id, _finger, _region_limit, 4150 HR_FORMAT_PARAMS(_curr_region)); 4151 } 4152 4153 assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(), 4154 "humongous regions should go around loop once only"); 4155 4156 // Some special cases: 4157 // If the memory region is empty, we can just give up the region. 4158 // If the current region is humongous then we only need to check 4159 // the bitmap for the bit associated with the start of the object, 4160 // scan the object if it's live, and give up the region. 4161 // Otherwise, let's iterate over the bitmap of the part of the region 4162 // that is left. 4163 // If the iteration is successful, give up the region. 4164 if (mr.is_empty()) { 4165 giveup_current_region(); 4166 regular_clock_call(); 4167 } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) { 4168 if (_nextMarkBitMap->isMarked(mr.start())) { 4169 // The object is marked - apply the closure 4170 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 4171 bitmap_closure.do_bit(offset); 4172 } 4173 // Even if this task aborted while scanning the humongous object 4174 // we can (and should) give up the current region. 4175 giveup_current_region(); 4176 regular_clock_call(); 4177 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4178 giveup_current_region(); 4179 regular_clock_call(); 4180 } else { 4181 assert(has_aborted(), "currently the only way to do so"); 4182 // The only way to abort the bitmap iteration is to return 4183 // false from the do_bit() method. However, inside the 4184 // do_bit() method we move the _finger to point to the 4185 // object currently being looked at. So, if we bail out, we 4186 // have definitely set _finger to something non-null. 4187 assert(_finger != NULL, "invariant"); 4188 4189 // Region iteration was actually aborted. So now _finger 4190 // points to the address of the object we last scanned. If we 4191 // leave it there, when we restart this task, we will rescan 4192 // the object. It is easy to avoid this. We move the finger by 4193 // enough to point to the next possible object header (the 4194 // bitmap knows by how much we need to move it as it knows its 4195 // granularity). 4196 assert(_finger < _region_limit, "invariant"); 4197 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 4198 // Check if bitmap iteration was aborted while scanning the last object 4199 if (new_finger >= _region_limit) { 4200 giveup_current_region(); 4201 } else { 4202 move_finger_to(new_finger); 4203 } 4204 } 4205 } 4206 // At this point we have either completed iterating over the 4207 // region we were holding on to, or we have aborted. 4208 4209 // We then partially drain the local queue and the global stack. 4210 // (Do we really need this?) 4211 drain_local_queue(true); 4212 drain_global_stack(true); 4213 4214 // Read the note on the claim_region() method on why it might 4215 // return NULL with potentially more regions available for 4216 // claiming and why we have to check out_of_regions() to determine 4217 // whether we're done or not. 4218 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4219 // We are going to try to claim a new region. We should have 4220 // given up on the previous one. 4221 // Separated the asserts so that we know which one fires. 4222 assert(_curr_region == NULL, "invariant"); 4223 assert(_finger == NULL, "invariant"); 4224 assert(_region_limit == NULL, "invariant"); 4225 if (_cm->verbose_low()) { 4226 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4227 } 4228 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4229 if (claimed_region != NULL) { 4230 // Yes, we managed to claim one 4231 statsOnly( ++_regions_claimed ); 4232 4233 if (_cm->verbose_low()) { 4234 gclog_or_tty->print_cr("[%u] we successfully claimed " 4235 "region "PTR_FORMAT, 4236 _worker_id, claimed_region); 4237 } 4238 4239 setup_for_region(claimed_region); 4240 assert(_curr_region == claimed_region, "invariant"); 4241 } 4242 // It is important to call the regular clock here. It might take 4243 // a while to claim a region if, for example, we hit a large 4244 // block of empty regions. So we need to call the regular clock 4245 // method once round the loop to make sure it's called 4246 // frequently enough. 4247 regular_clock_call(); 4248 } 4249 4250 if (!has_aborted() && _curr_region == NULL) { 4251 assert(_cm->out_of_regions(), 4252 "at this point we should be out of regions"); 4253 } 4254 } while ( _curr_region != NULL && !has_aborted()); 4255 4256 if (!has_aborted()) { 4257 // We cannot check whether the global stack is empty, since other 4258 // tasks might be pushing objects to it concurrently. 4259 assert(_cm->out_of_regions(), 4260 "at this point we should be out of regions"); 4261 4262 if (_cm->verbose_low()) { 4263 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4264 } 4265 4266 // Try to reduce the number of available SATB buffers so that 4267 // remark has less work to do. 4268 drain_satb_buffers(); 4269 } 4270 4271 // Since we've done everything else, we can now totally drain the 4272 // local queue and global stack. 4273 drain_local_queue(false); 4274 drain_global_stack(false); 4275 4276 // Attempt at work stealing from other task's queues. 4277 if (do_stealing && !has_aborted()) { 4278 // We have not aborted. This means that we have finished all that 4279 // we could. Let's try to do some stealing... 4280 4281 // We cannot check whether the global stack is empty, since other 4282 // tasks might be pushing objects to it concurrently. 4283 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4284 "only way to reach here"); 4285 4286 if (_cm->verbose_low()) { 4287 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4288 } 4289 4290 while (!has_aborted()) { 4291 oop obj; 4292 statsOnly( ++_steal_attempts ); 4293 4294 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4295 if (_cm->verbose_medium()) { 4296 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4297 _worker_id, (void*) obj); 4298 } 4299 4300 statsOnly( ++_steals ); 4301 4302 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4303 "any stolen object should be marked"); 4304 scan_object(obj); 4305 4306 // And since we're towards the end, let's totally drain the 4307 // local queue and global stack. 4308 drain_local_queue(false); 4309 drain_global_stack(false); 4310 } else { 4311 break; 4312 } 4313 } 4314 } 4315 4316 // If we are about to wrap up and go into termination, check if we 4317 // should raise the overflow flag. 4318 if (do_termination && !has_aborted()) { 4319 if (_cm->force_overflow()->should_force()) { 4320 _cm->set_has_overflown(); 4321 regular_clock_call(); 4322 } 4323 } 4324 4325 // We still haven't aborted. Now, let's try to get into the 4326 // termination protocol. 4327 if (do_termination && !has_aborted()) { 4328 // We cannot check whether the global stack is empty, since other 4329 // tasks might be concurrently pushing objects on it. 4330 // Separated the asserts so that we know which one fires. 4331 assert(_cm->out_of_regions(), "only way to reach here"); 4332 assert(_task_queue->size() == 0, "only way to reach here"); 4333 4334 if (_cm->verbose_low()) { 4335 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4336 } 4337 4338 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4339 4340 // The CMTask class also extends the TerminatorTerminator class, 4341 // hence its should_exit_termination() method will also decide 4342 // whether to exit the termination protocol or not. 4343 bool finished = (is_serial || 4344 _cm->terminator()->offer_termination(this)); 4345 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4346 _termination_time_ms += 4347 termination_end_time_ms - _termination_start_time_ms; 4348 4349 if (finished) { 4350 // We're all done. 4351 4352 if (_worker_id == 0) { 4353 // let's allow task 0 to do this 4354 if (concurrent()) { 4355 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4356 // we need to set this to false before the next 4357 // safepoint. This way we ensure that the marking phase 4358 // doesn't observe any more heap expansions. 4359 _cm->clear_concurrent_marking_in_progress(); 4360 } 4361 } 4362 4363 // We can now guarantee that the global stack is empty, since 4364 // all other tasks have finished. We separated the guarantees so 4365 // that, if a condition is false, we can immediately find out 4366 // which one. 4367 guarantee(_cm->out_of_regions(), "only way to reach here"); 4368 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4369 guarantee(_task_queue->size() == 0, "only way to reach here"); 4370 guarantee(!_cm->has_overflown(), "only way to reach here"); 4371 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4372 4373 if (_cm->verbose_low()) { 4374 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4375 } 4376 } else { 4377 // Apparently there's more work to do. Let's abort this task. It 4378 // will restart it and we can hopefully find more things to do. 4379 4380 if (_cm->verbose_low()) { 4381 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4382 _worker_id); 4383 } 4384 4385 set_has_aborted(); 4386 statsOnly( ++_aborted_termination ); 4387 } 4388 } 4389 4390 // Mainly for debugging purposes to make sure that a pointer to the 4391 // closure which was statically allocated in this frame doesn't 4392 // escape it by accident. 4393 set_cm_oop_closure(NULL); 4394 double end_time_ms = os::elapsedVTime() * 1000.0; 4395 double elapsed_time_ms = end_time_ms - _start_time_ms; 4396 // Update the step history. 4397 _step_times_ms.add(elapsed_time_ms); 4398 4399 if (has_aborted()) { 4400 // The task was aborted for some reason. 4401 4402 statsOnly( ++_aborted ); 4403 4404 if (_has_timed_out) { 4405 double diff_ms = elapsed_time_ms - _time_target_ms; 4406 // Keep statistics of how well we did with respect to hitting 4407 // our target only if we actually timed out (if we aborted for 4408 // other reasons, then the results might get skewed). 4409 _marking_step_diffs_ms.add(diff_ms); 4410 } 4411 4412 if (_cm->has_overflown()) { 4413 // This is the interesting one. We aborted because a global 4414 // overflow was raised. This means we have to restart the 4415 // marking phase and start iterating over regions. However, in 4416 // order to do this we have to make sure that all tasks stop 4417 // what they are doing and re-initialize in a safe manner. We 4418 // will achieve this with the use of two barrier sync points. 4419 4420 if (_cm->verbose_low()) { 4421 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4422 } 4423 4424 if (!is_serial) { 4425 // We only need to enter the sync barrier if being called 4426 // from a parallel context 4427 _cm->enter_first_sync_barrier(_worker_id); 4428 4429 // When we exit this sync barrier we know that all tasks have 4430 // stopped doing marking work. So, it's now safe to 4431 // re-initialize our data structures. At the end of this method, 4432 // task 0 will clear the global data structures. 4433 } 4434 4435 statsOnly( ++_aborted_overflow ); 4436 4437 // We clear the local state of this task... 4438 clear_region_fields(); 4439 4440 if (!is_serial) { 4441 // ...and enter the second barrier. 4442 _cm->enter_second_sync_barrier(_worker_id); 4443 } 4444 // At this point, if we're during the concurrent phase of 4445 // marking, everything has been re-initialized and we're 4446 // ready to restart. 4447 } 4448 4449 if (_cm->verbose_low()) { 4450 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4451 "elapsed = %1.2lfms <<<<<<<<<<", 4452 _worker_id, _time_target_ms, elapsed_time_ms); 4453 if (_cm->has_aborted()) { 4454 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4455 _worker_id); 4456 } 4457 } 4458 } else { 4459 if (_cm->verbose_low()) { 4460 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4461 "elapsed = %1.2lfms <<<<<<<<<<", 4462 _worker_id, _time_target_ms, elapsed_time_ms); 4463 } 4464 } 4465 4466 _claimed = false; 4467 } 4468 4469 CMTask::CMTask(uint worker_id, 4470 ConcurrentMark* cm, 4471 size_t* marked_bytes, 4472 BitMap* card_bm, 4473 CMTaskQueue* task_queue, 4474 CMTaskQueueSet* task_queues) 4475 : _g1h(G1CollectedHeap::heap()), 4476 _worker_id(worker_id), _cm(cm), 4477 _claimed(false), 4478 _nextMarkBitMap(NULL), _hash_seed(17), 4479 _task_queue(task_queue), 4480 _task_queues(task_queues), 4481 _cm_oop_closure(NULL), 4482 _marked_bytes_array(marked_bytes), 4483 _card_bm(card_bm) { 4484 guarantee(task_queue != NULL, "invariant"); 4485 guarantee(task_queues != NULL, "invariant"); 4486 4487 statsOnly( _clock_due_to_scanning = 0; 4488 _clock_due_to_marking = 0 ); 4489 4490 _marking_step_diffs_ms.add(0.5); 4491 } 4492 4493 // These are formatting macros that are used below to ensure 4494 // consistent formatting. The *_H_* versions are used to format the 4495 // header for a particular value and they should be kept consistent 4496 // with the corresponding macro. Also note that most of the macros add 4497 // the necessary white space (as a prefix) which makes them a bit 4498 // easier to compose. 4499 4500 // All the output lines are prefixed with this string to be able to 4501 // identify them easily in a large log file. 4502 #define G1PPRL_LINE_PREFIX "###" 4503 4504 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4505 #ifdef _LP64 4506 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4507 #else // _LP64 4508 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4509 #endif // _LP64 4510 4511 // For per-region info 4512 #define G1PPRL_TYPE_FORMAT " %-4s" 4513 #define G1PPRL_TYPE_H_FORMAT " %4s" 4514 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4515 #define G1PPRL_BYTE_H_FORMAT " %9s" 4516 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4517 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4518 4519 // For summary info 4520 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4521 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4522 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4523 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4524 4525 G1PrintRegionLivenessInfoClosure:: 4526 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4527 : _out(out), 4528 _total_used_bytes(0), _total_capacity_bytes(0), 4529 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4530 _hum_used_bytes(0), _hum_capacity_bytes(0), 4531 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4532 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4533 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4534 MemRegion g1_committed = g1h->g1_committed(); 4535 MemRegion g1_reserved = g1h->g1_reserved(); 4536 double now = os::elapsedTime(); 4537 4538 // Print the header of the output. 4539 _out->cr(); 4540 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4541 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4542 G1PPRL_SUM_ADDR_FORMAT("committed") 4543 G1PPRL_SUM_ADDR_FORMAT("reserved") 4544 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4545 g1_committed.start(), g1_committed.end(), 4546 g1_reserved.start(), g1_reserved.end(), 4547 HeapRegion::GrainBytes); 4548 _out->print_cr(G1PPRL_LINE_PREFIX); 4549 _out->print_cr(G1PPRL_LINE_PREFIX 4550 G1PPRL_TYPE_H_FORMAT 4551 G1PPRL_ADDR_BASE_H_FORMAT 4552 G1PPRL_BYTE_H_FORMAT 4553 G1PPRL_BYTE_H_FORMAT 4554 G1PPRL_BYTE_H_FORMAT 4555 G1PPRL_DOUBLE_H_FORMAT 4556 G1PPRL_BYTE_H_FORMAT 4557 G1PPRL_BYTE_H_FORMAT, 4558 "type", "address-range", 4559 "used", "prev-live", "next-live", "gc-eff", 4560 "remset", "code-roots"); 4561 _out->print_cr(G1PPRL_LINE_PREFIX 4562 G1PPRL_TYPE_H_FORMAT 4563 G1PPRL_ADDR_BASE_H_FORMAT 4564 G1PPRL_BYTE_H_FORMAT 4565 G1PPRL_BYTE_H_FORMAT 4566 G1PPRL_BYTE_H_FORMAT 4567 G1PPRL_DOUBLE_H_FORMAT 4568 G1PPRL_BYTE_H_FORMAT 4569 G1PPRL_BYTE_H_FORMAT, 4570 "", "", 4571 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4572 "(bytes)", "(bytes)"); 4573 } 4574 4575 // It takes as a parameter a reference to one of the _hum_* fields, it 4576 // deduces the corresponding value for a region in a humongous region 4577 // series (either the region size, or what's left if the _hum_* field 4578 // is < the region size), and updates the _hum_* field accordingly. 4579 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4580 size_t bytes = 0; 4581 // The > 0 check is to deal with the prev and next live bytes which 4582 // could be 0. 4583 if (*hum_bytes > 0) { 4584 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4585 *hum_bytes -= bytes; 4586 } 4587 return bytes; 4588 } 4589 4590 // It deduces the values for a region in a humongous region series 4591 // from the _hum_* fields and updates those accordingly. It assumes 4592 // that that _hum_* fields have already been set up from the "starts 4593 // humongous" region and we visit the regions in address order. 4594 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4595 size_t* capacity_bytes, 4596 size_t* prev_live_bytes, 4597 size_t* next_live_bytes) { 4598 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4599 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4600 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4601 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4602 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4603 } 4604 4605 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4606 const char* type = ""; 4607 HeapWord* bottom = r->bottom(); 4608 HeapWord* end = r->end(); 4609 size_t capacity_bytes = r->capacity(); 4610 size_t used_bytes = r->used(); 4611 size_t prev_live_bytes = r->live_bytes(); 4612 size_t next_live_bytes = r->next_live_bytes(); 4613 double gc_eff = r->gc_efficiency(); 4614 size_t remset_bytes = r->rem_set()->mem_size(); 4615 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4616 4617 if (r->used() == 0) { 4618 type = "FREE"; 4619 } else if (r->is_survivor()) { 4620 type = "SURV"; 4621 } else if (r->is_young()) { 4622 type = "EDEN"; 4623 } else if (r->startsHumongous()) { 4624 type = "HUMS"; 4625 4626 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4627 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4628 "they should have been zeroed after the last time we used them"); 4629 // Set up the _hum_* fields. 4630 _hum_capacity_bytes = capacity_bytes; 4631 _hum_used_bytes = used_bytes; 4632 _hum_prev_live_bytes = prev_live_bytes; 4633 _hum_next_live_bytes = next_live_bytes; 4634 get_hum_bytes(&used_bytes, &capacity_bytes, 4635 &prev_live_bytes, &next_live_bytes); 4636 end = bottom + HeapRegion::GrainWords; 4637 } else if (r->continuesHumongous()) { 4638 type = "HUMC"; 4639 get_hum_bytes(&used_bytes, &capacity_bytes, 4640 &prev_live_bytes, &next_live_bytes); 4641 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4642 } else { 4643 type = "OLD"; 4644 } 4645 4646 _total_used_bytes += used_bytes; 4647 _total_capacity_bytes += capacity_bytes; 4648 _total_prev_live_bytes += prev_live_bytes; 4649 _total_next_live_bytes += next_live_bytes; 4650 _total_remset_bytes += remset_bytes; 4651 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4652 4653 // Print a line for this particular region. 4654 _out->print_cr(G1PPRL_LINE_PREFIX 4655 G1PPRL_TYPE_FORMAT 4656 G1PPRL_ADDR_BASE_FORMAT 4657 G1PPRL_BYTE_FORMAT 4658 G1PPRL_BYTE_FORMAT 4659 G1PPRL_BYTE_FORMAT 4660 G1PPRL_DOUBLE_FORMAT 4661 G1PPRL_BYTE_FORMAT 4662 G1PPRL_BYTE_FORMAT, 4663 type, bottom, end, 4664 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4665 remset_bytes, strong_code_roots_bytes); 4666 4667 return false; 4668 } 4669 4670 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4671 // add static memory usages to remembered set sizes 4672 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4673 // Print the footer of the output. 4674 _out->print_cr(G1PPRL_LINE_PREFIX); 4675 _out->print_cr(G1PPRL_LINE_PREFIX 4676 " SUMMARY" 4677 G1PPRL_SUM_MB_FORMAT("capacity") 4678 G1PPRL_SUM_MB_PERC_FORMAT("used") 4679 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4680 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4681 G1PPRL_SUM_MB_FORMAT("remset") 4682 G1PPRL_SUM_MB_FORMAT("code-roots"), 4683 bytes_to_mb(_total_capacity_bytes), 4684 bytes_to_mb(_total_used_bytes), 4685 perc(_total_used_bytes, _total_capacity_bytes), 4686 bytes_to_mb(_total_prev_live_bytes), 4687 perc(_total_prev_live_bytes, _total_capacity_bytes), 4688 bytes_to_mb(_total_next_live_bytes), 4689 perc(_total_next_live_bytes, _total_capacity_bytes), 4690 bytes_to_mb(_total_remset_bytes), 4691 bytes_to_mb(_total_strong_code_roots_bytes)); 4692 _out->cr(); 4693 }