1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/symbolTable.hpp" 27 #include "gc_implementation/g1/concurrentMark.inline.hpp" 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 32 #include "gc_implementation/g1/g1Log.hpp" 33 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 34 #include "gc_implementation/g1/g1RemSet.hpp" 35 #include "gc_implementation/g1/heapRegion.inline.hpp" 36 #include "gc_implementation/g1/heapRegionRemSet.hpp" 37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 38 #include "gc_implementation/shared/vmGCOperations.hpp" 39 #include "memory/genOopClosures.inline.hpp" 40 #include "memory/referencePolicy.hpp" 41 #include "memory/resourceArea.hpp" 42 #include "oops/oop.inline.hpp" 43 #include "runtime/handles.inline.hpp" 44 #include "runtime/java.hpp" 45 #include "services/memTracker.hpp" 46 47 // Concurrent marking bit map wrapper 48 49 CMBitMapRO::CMBitMapRO(int shifter) : 50 _bm(), 51 _shifter(shifter) { 52 _bmStartWord = 0; 53 _bmWordSize = 0; 54 } 55 56 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr, 57 HeapWord* limit) const { 58 // First we must round addr *up* to a possible object boundary. 59 addr = (HeapWord*)align_size_up((intptr_t)addr, 60 HeapWordSize << _shifter); 61 size_t addrOffset = heapWordToOffset(addr); 62 if (limit == NULL) { 63 limit = _bmStartWord + _bmWordSize; 64 } 65 size_t limitOffset = heapWordToOffset(limit); 66 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 67 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 68 assert(nextAddr >= addr, "get_next_one postcondition"); 69 assert(nextAddr == limit || isMarked(nextAddr), 70 "get_next_one postcondition"); 71 return nextAddr; 72 } 73 74 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr, 75 HeapWord* limit) const { 76 size_t addrOffset = heapWordToOffset(addr); 77 if (limit == NULL) { 78 limit = _bmStartWord + _bmWordSize; 79 } 80 size_t limitOffset = heapWordToOffset(limit); 81 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 82 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 83 assert(nextAddr >= addr, "get_next_one postcondition"); 84 assert(nextAddr == limit || !isMarked(nextAddr), 85 "get_next_one postcondition"); 86 return nextAddr; 87 } 88 89 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 90 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 91 return (int) (diff >> _shifter); 92 } 93 94 #ifndef PRODUCT 95 bool CMBitMapRO::covers(ReservedSpace heap_rs) const { 96 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 97 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 98 "size inconsistency"); 99 return _bmStartWord == (HeapWord*)(heap_rs.base()) && 100 _bmWordSize == heap_rs.size()>>LogHeapWordSize; 101 } 102 #endif 103 104 bool CMBitMap::allocate(ReservedSpace heap_rs) { 105 _bmStartWord = (HeapWord*)(heap_rs.base()); 106 _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes 107 ReservedSpace brs(ReservedSpace::allocation_align_size_up( 108 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); 109 if (!brs.is_reserved()) { 110 warning("ConcurrentMark marking bit map allocation failure"); 111 return false; 112 } 113 MemTracker::record_virtual_memory_type((address)brs.base(), mtGC); 114 // For now we'll just commit all of the bit map up front. 115 // Later on we'll try to be more parsimonious with swap. 116 if (!_virtual_space.initialize(brs, brs.size())) { 117 warning("ConcurrentMark marking bit map backing store failure"); 118 return false; 119 } 120 assert(_virtual_space.committed_size() == brs.size(), 121 "didn't reserve backing store for all of concurrent marking bit map?"); 122 _bm.set_map((uintptr_t*)_virtual_space.low()); 123 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= 124 _bmWordSize, "inconsistency in bit map sizing"); 125 _bm.set_size(_bmWordSize >> _shifter); 126 return true; 127 } 128 129 void CMBitMap::clearAll() { 130 _bm.clear(); 131 return; 132 } 133 134 void CMBitMap::markRange(MemRegion mr) { 135 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 136 assert(!mr.is_empty(), "unexpected empty region"); 137 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 138 ((HeapWord *) mr.end())), 139 "markRange memory region end is not card aligned"); 140 // convert address range into offset range 141 _bm.at_put_range(heapWordToOffset(mr.start()), 142 heapWordToOffset(mr.end()), true); 143 } 144 145 void CMBitMap::clearRange(MemRegion mr) { 146 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 147 assert(!mr.is_empty(), "unexpected empty region"); 148 // convert address range into offset range 149 _bm.at_put_range(heapWordToOffset(mr.start()), 150 heapWordToOffset(mr.end()), false); 151 } 152 153 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 154 HeapWord* end_addr) { 155 HeapWord* start = getNextMarkedWordAddress(addr); 156 start = MIN2(start, end_addr); 157 HeapWord* end = getNextUnmarkedWordAddress(start); 158 end = MIN2(end, end_addr); 159 assert(start <= end, "Consistency check"); 160 MemRegion mr(start, end); 161 if (!mr.is_empty()) { 162 clearRange(mr); 163 } 164 return mr; 165 } 166 167 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 168 _base(NULL), _cm(cm) 169 #ifdef ASSERT 170 , _drain_in_progress(false) 171 , _drain_in_progress_yields(false) 172 #endif 173 {} 174 175 bool CMMarkStack::allocate(size_t capacity) { 176 // allocate a stack of the requisite depth 177 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 178 if (!rs.is_reserved()) { 179 warning("ConcurrentMark MarkStack allocation failure"); 180 return false; 181 } 182 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 183 if (!_virtual_space.initialize(rs, rs.size())) { 184 warning("ConcurrentMark MarkStack backing store failure"); 185 // Release the virtual memory reserved for the marking stack 186 rs.release(); 187 return false; 188 } 189 assert(_virtual_space.committed_size() == rs.size(), 190 "Didn't reserve backing store for all of ConcurrentMark stack?"); 191 _base = (oop*) _virtual_space.low(); 192 setEmpty(); 193 _capacity = (jint) capacity; 194 _saved_index = -1; 195 _should_expand = false; 196 NOT_PRODUCT(_max_depth = 0); 197 return true; 198 } 199 200 void CMMarkStack::expand() { 201 // Called, during remark, if we've overflown the marking stack during marking. 202 assert(isEmpty(), "stack should been emptied while handling overflow"); 203 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 204 // Clear expansion flag 205 _should_expand = false; 206 if (_capacity == (jint) MarkStackSizeMax) { 207 if (PrintGCDetails && Verbose) { 208 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 209 } 210 return; 211 } 212 // Double capacity if possible 213 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 214 // Do not give up existing stack until we have managed to 215 // get the double capacity that we desired. 216 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 217 sizeof(oop))); 218 if (rs.is_reserved()) { 219 // Release the backing store associated with old stack 220 _virtual_space.release(); 221 // Reinitialize virtual space for new stack 222 if (!_virtual_space.initialize(rs, rs.size())) { 223 fatal("Not enough swap for expanded marking stack capacity"); 224 } 225 _base = (oop*)(_virtual_space.low()); 226 _index = 0; 227 _capacity = new_capacity; 228 } else { 229 if (PrintGCDetails && Verbose) { 230 // Failed to double capacity, continue; 231 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 232 SIZE_FORMAT"K to " SIZE_FORMAT"K", 233 _capacity / K, new_capacity / K); 234 } 235 } 236 } 237 238 void CMMarkStack::set_should_expand() { 239 // If we're resetting the marking state because of an 240 // marking stack overflow, record that we should, if 241 // possible, expand the stack. 242 _should_expand = _cm->has_overflown(); 243 } 244 245 CMMarkStack::~CMMarkStack() { 246 if (_base != NULL) { 247 _base = NULL; 248 _virtual_space.release(); 249 } 250 } 251 252 void CMMarkStack::par_push(oop ptr) { 253 while (true) { 254 if (isFull()) { 255 _overflow = true; 256 return; 257 } 258 // Otherwise... 259 jint index = _index; 260 jint next_index = index+1; 261 jint res = Atomic::cmpxchg(next_index, &_index, index); 262 if (res == index) { 263 _base[index] = ptr; 264 // Note that we don't maintain this atomically. We could, but it 265 // doesn't seem necessary. 266 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 267 return; 268 } 269 // Otherwise, we need to try again. 270 } 271 } 272 273 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { 274 while (true) { 275 if (isFull()) { 276 _overflow = true; 277 return; 278 } 279 // Otherwise... 280 jint index = _index; 281 jint next_index = index + n; 282 if (next_index > _capacity) { 283 _overflow = true; 284 return; 285 } 286 jint res = Atomic::cmpxchg(next_index, &_index, index); 287 if (res == index) { 288 for (int i = 0; i < n; i++) { 289 int ind = index + i; 290 assert(ind < _capacity, "By overflow test above."); 291 _base[ind] = ptr_arr[i]; 292 } 293 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 294 return; 295 } 296 // Otherwise, we need to try again. 297 } 298 } 299 300 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 301 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 302 jint start = _index; 303 jint next_index = start + n; 304 if (next_index > _capacity) { 305 _overflow = true; 306 return; 307 } 308 // Otherwise. 309 _index = next_index; 310 for (int i = 0; i < n; i++) { 311 int ind = start + i; 312 assert(ind < _capacity, "By overflow test above."); 313 _base[ind] = ptr_arr[i]; 314 } 315 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 316 } 317 318 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 319 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 320 jint index = _index; 321 if (index == 0) { 322 *n = 0; 323 return false; 324 } else { 325 int k = MIN2(max, index); 326 jint new_ind = index - k; 327 for (int j = 0; j < k; j++) { 328 ptr_arr[j] = _base[new_ind + j]; 329 } 330 _index = new_ind; 331 *n = k; 332 return true; 333 } 334 } 335 336 template<class OopClosureClass> 337 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 338 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 339 || SafepointSynchronize::is_at_safepoint(), 340 "Drain recursion must be yield-safe."); 341 bool res = true; 342 debug_only(_drain_in_progress = true); 343 debug_only(_drain_in_progress_yields = yield_after); 344 while (!isEmpty()) { 345 oop newOop = pop(); 346 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 347 assert(newOop->is_oop(), "Expected an oop"); 348 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 349 "only grey objects on this stack"); 350 newOop->oop_iterate(cl); 351 if (yield_after && _cm->do_yield_check()) { 352 res = false; 353 break; 354 } 355 } 356 debug_only(_drain_in_progress = false); 357 return res; 358 } 359 360 void CMMarkStack::note_start_of_gc() { 361 assert(_saved_index == -1, 362 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 363 _saved_index = _index; 364 } 365 366 void CMMarkStack::note_end_of_gc() { 367 // This is intentionally a guarantee, instead of an assert. If we 368 // accidentally add something to the mark stack during GC, it 369 // will be a correctness issue so it's better if we crash. we'll 370 // only check this once per GC anyway, so it won't be a performance 371 // issue in any way. 372 guarantee(_saved_index == _index, 373 err_msg("saved index: %d index: %d", _saved_index, _index)); 374 _saved_index = -1; 375 } 376 377 void CMMarkStack::oops_do(OopClosure* f) { 378 assert(_saved_index == _index, 379 err_msg("saved index: %d index: %d", _saved_index, _index)); 380 for (int i = 0; i < _index; i += 1) { 381 f->do_oop(&_base[i]); 382 } 383 } 384 385 bool ConcurrentMark::not_yet_marked(oop obj) const { 386 return _g1h->is_obj_ill(obj); 387 } 388 389 CMRootRegions::CMRootRegions() : 390 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 391 _should_abort(false), _next_survivor(NULL) { } 392 393 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 394 _young_list = g1h->young_list(); 395 _cm = cm; 396 } 397 398 void CMRootRegions::prepare_for_scan() { 399 assert(!scan_in_progress(), "pre-condition"); 400 401 // Currently, only survivors can be root regions. 402 assert(_next_survivor == NULL, "pre-condition"); 403 _next_survivor = _young_list->first_survivor_region(); 404 _scan_in_progress = (_next_survivor != NULL); 405 _should_abort = false; 406 } 407 408 HeapRegion* CMRootRegions::claim_next() { 409 if (_should_abort) { 410 // If someone has set the should_abort flag, we return NULL to 411 // force the caller to bail out of their loop. 412 return NULL; 413 } 414 415 // Currently, only survivors can be root regions. 416 HeapRegion* res = _next_survivor; 417 if (res != NULL) { 418 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 419 // Read it again in case it changed while we were waiting for the lock. 420 res = _next_survivor; 421 if (res != NULL) { 422 if (res == _young_list->last_survivor_region()) { 423 // We just claimed the last survivor so store NULL to indicate 424 // that we're done. 425 _next_survivor = NULL; 426 } else { 427 _next_survivor = res->get_next_young_region(); 428 } 429 } else { 430 // Someone else claimed the last survivor while we were trying 431 // to take the lock so nothing else to do. 432 } 433 } 434 assert(res == NULL || res->is_survivor(), "post-condition"); 435 436 return res; 437 } 438 439 void CMRootRegions::scan_finished() { 440 assert(scan_in_progress(), "pre-condition"); 441 442 // Currently, only survivors can be root regions. 443 if (!_should_abort) { 444 assert(_next_survivor == NULL, "we should have claimed all survivors"); 445 } 446 _next_survivor = NULL; 447 448 { 449 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 450 _scan_in_progress = false; 451 RootRegionScan_lock->notify_all(); 452 } 453 } 454 455 bool CMRootRegions::wait_until_scan_finished() { 456 if (!scan_in_progress()) return false; 457 458 { 459 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 460 while (scan_in_progress()) { 461 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 462 } 463 } 464 return true; 465 } 466 467 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 468 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 469 #endif // _MSC_VER 470 471 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 472 return MAX2((n_par_threads + 2) / 4, 1U); 473 } 474 475 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) : 476 _g1h(g1h), 477 _markBitMap1(MinObjAlignment - 1), 478 _markBitMap2(MinObjAlignment - 1), 479 480 _parallel_marking_threads(0), 481 _max_parallel_marking_threads(0), 482 _sleep_factor(0.0), 483 _marking_task_overhead(1.0), 484 _cleanup_sleep_factor(0.0), 485 _cleanup_task_overhead(1.0), 486 _cleanup_list("Cleanup List"), 487 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 488 _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >> 489 CardTableModRefBS::card_shift, 490 false /* in_resource_area*/), 491 492 _prevMarkBitMap(&_markBitMap1), 493 _nextMarkBitMap(&_markBitMap2), 494 495 _markStack(this), 496 // _finger set in set_non_marking_state 497 498 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 499 // _active_tasks set in set_non_marking_state 500 // _tasks set inside the constructor 501 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 502 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 503 504 _has_overflown(false), 505 _concurrent(false), 506 _has_aborted(false), 507 _restart_for_overflow(false), 508 _concurrent_marking_in_progress(false), 509 510 // _verbose_level set below 511 512 _init_times(), 513 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 514 _cleanup_times(), 515 _total_counting_time(0.0), 516 _total_rs_scrub_time(0.0), 517 518 _parallel_workers(NULL), 519 520 _count_card_bitmaps(NULL), 521 _count_marked_bytes(NULL), 522 _completed_initialization(false) { 523 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 524 if (verbose_level < no_verbose) { 525 verbose_level = no_verbose; 526 } 527 if (verbose_level > high_verbose) { 528 verbose_level = high_verbose; 529 } 530 _verbose_level = verbose_level; 531 532 if (verbose_low()) { 533 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 534 "heap end = "PTR_FORMAT, _heap_start, _heap_end); 535 } 536 537 if (!_markBitMap1.allocate(heap_rs)) { 538 warning("Failed to allocate first CM bit map"); 539 return; 540 } 541 if (!_markBitMap2.allocate(heap_rs)) { 542 warning("Failed to allocate second CM bit map"); 543 return; 544 } 545 546 // Create & start a ConcurrentMark thread. 547 _cmThread = new ConcurrentMarkThread(this); 548 assert(cmThread() != NULL, "CM Thread should have been created"); 549 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 550 551 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 552 assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency"); 553 assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency"); 554 555 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 556 satb_qs.set_buffer_size(G1SATBBufferSize); 557 558 _root_regions.init(_g1h, this); 559 560 if (ConcGCThreads > ParallelGCThreads) { 561 warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") " 562 "than ParallelGCThreads (" UINT32_FORMAT ").", 563 ConcGCThreads, ParallelGCThreads); 564 return; 565 } 566 if (ParallelGCThreads == 0) { 567 // if we are not running with any parallel GC threads we will not 568 // spawn any marking threads either 569 _parallel_marking_threads = 0; 570 _max_parallel_marking_threads = 0; 571 _sleep_factor = 0.0; 572 _marking_task_overhead = 1.0; 573 } else { 574 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 575 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 576 // if both are set 577 _sleep_factor = 0.0; 578 _marking_task_overhead = 1.0; 579 } else if (G1MarkingOverheadPercent > 0) { 580 // We will calculate the number of parallel marking threads based 581 // on a target overhead with respect to the soft real-time goal 582 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 583 double overall_cm_overhead = 584 (double) MaxGCPauseMillis * marking_overhead / 585 (double) GCPauseIntervalMillis; 586 double cpu_ratio = 1.0 / (double) os::processor_count(); 587 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 588 double marking_task_overhead = 589 overall_cm_overhead / marking_thread_num * 590 (double) os::processor_count(); 591 double sleep_factor = 592 (1.0 - marking_task_overhead) / marking_task_overhead; 593 594 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); 595 _sleep_factor = sleep_factor; 596 _marking_task_overhead = marking_task_overhead; 597 } else { 598 // Calculate the number of parallel marking threads by scaling 599 // the number of parallel GC threads. 600 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); 601 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); 602 _sleep_factor = 0.0; 603 _marking_task_overhead = 1.0; 604 } 605 606 assert(ConcGCThreads > 0, "Should have been set"); 607 _parallel_marking_threads = (uint) ConcGCThreads; 608 _max_parallel_marking_threads = _parallel_marking_threads; 609 610 if (parallel_marking_threads() > 1) { 611 _cleanup_task_overhead = 1.0; 612 } else { 613 _cleanup_task_overhead = marking_task_overhead(); 614 } 615 _cleanup_sleep_factor = 616 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 617 618 #if 0 619 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 620 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 621 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 622 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 623 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 624 #endif 625 626 guarantee(parallel_marking_threads() > 0, "peace of mind"); 627 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", 628 _max_parallel_marking_threads, false, true); 629 if (_parallel_workers == NULL) { 630 vm_exit_during_initialization("Failed necessary allocation."); 631 } else { 632 _parallel_workers->initialize_workers(); 633 } 634 } 635 636 if (FLAG_IS_DEFAULT(MarkStackSize)) { 637 uintx mark_stack_size = 638 MIN2(MarkStackSizeMax, 639 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE))); 640 // Verify that the calculated value for MarkStackSize is in range. 641 // It would be nice to use the private utility routine from Arguments. 642 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 643 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): " 644 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 645 mark_stack_size, 1, MarkStackSizeMax); 646 return; 647 } 648 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size); 649 } else { 650 // Verify MarkStackSize is in range. 651 if (FLAG_IS_CMDLINE(MarkStackSize)) { 652 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 653 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 654 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): " 655 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 656 MarkStackSize, 1, MarkStackSizeMax); 657 return; 658 } 659 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 660 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 661 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")" 662 " or for MarkStackSizeMax (" UINTX_FORMAT ")", 663 MarkStackSize, MarkStackSizeMax); 664 return; 665 } 666 } 667 } 668 } 669 670 if (!_markStack.allocate(MarkStackSize)) { 671 warning("Failed to allocate CM marking stack"); 672 return; 673 } 674 675 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 676 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 677 678 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 679 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 680 681 BitMap::idx_t card_bm_size = _card_bm.size(); 682 683 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 684 _active_tasks = _max_worker_id; 685 686 size_t max_regions = (size_t) _g1h->max_regions(); 687 for (uint i = 0; i < _max_worker_id; ++i) { 688 CMTaskQueue* task_queue = new CMTaskQueue(); 689 task_queue->initialize(); 690 _task_queues->register_queue(i, task_queue); 691 692 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 693 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 694 695 _tasks[i] = new CMTask(i, this, 696 _count_marked_bytes[i], 697 &_count_card_bitmaps[i], 698 task_queue, _task_queues); 699 700 _accum_task_vtime[i] = 0.0; 701 } 702 703 // Calculate the card number for the bottom of the heap. Used 704 // in biasing indexes into the accounting card bitmaps. 705 _heap_bottom_card_num = 706 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 707 CardTableModRefBS::card_shift); 708 709 // Clear all the liveness counting data 710 clear_all_count_data(); 711 712 // so that the call below can read a sensible value 713 _heap_start = (HeapWord*) heap_rs.base(); 714 set_non_marking_state(); 715 _completed_initialization = true; 716 } 717 718 void ConcurrentMark::update_g1_committed(bool force) { 719 // If concurrent marking is not in progress, then we do not need to 720 // update _heap_end. 721 if (!concurrent_marking_in_progress() && !force) return; 722 723 MemRegion committed = _g1h->g1_committed(); 724 assert(committed.start() == _heap_start, "start shouldn't change"); 725 HeapWord* new_end = committed.end(); 726 if (new_end > _heap_end) { 727 // The heap has been expanded. 728 729 _heap_end = new_end; 730 } 731 // Notice that the heap can also shrink. However, this only happens 732 // during a Full GC (at least currently) and the entire marking 733 // phase will bail out and the task will not be restarted. So, let's 734 // do nothing. 735 } 736 737 void ConcurrentMark::reset() { 738 // Starting values for these two. This should be called in a STW 739 // phase. CM will be notified of any future g1_committed expansions 740 // will be at the end of evacuation pauses, when tasks are 741 // inactive. 742 MemRegion committed = _g1h->g1_committed(); 743 _heap_start = committed.start(); 744 _heap_end = committed.end(); 745 746 // Separated the asserts so that we know which one fires. 747 assert(_heap_start != NULL, "heap bounds should look ok"); 748 assert(_heap_end != NULL, "heap bounds should look ok"); 749 assert(_heap_start < _heap_end, "heap bounds should look ok"); 750 751 // Reset all the marking data structures and any necessary flags 752 reset_marking_state(); 753 754 if (verbose_low()) { 755 gclog_or_tty->print_cr("[global] resetting"); 756 } 757 758 // We do reset all of them, since different phases will use 759 // different number of active threads. So, it's easiest to have all 760 // of them ready. 761 for (uint i = 0; i < _max_worker_id; ++i) { 762 _tasks[i]->reset(_nextMarkBitMap); 763 } 764 765 // we need this to make sure that the flag is on during the evac 766 // pause with initial mark piggy-backed 767 set_concurrent_marking_in_progress(); 768 } 769 770 771 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 772 _markStack.set_should_expand(); 773 _markStack.setEmpty(); // Also clears the _markStack overflow flag 774 if (clear_overflow) { 775 clear_has_overflown(); 776 } else { 777 assert(has_overflown(), "pre-condition"); 778 } 779 _finger = _heap_start; 780 781 for (uint i = 0; i < _max_worker_id; ++i) { 782 CMTaskQueue* queue = _task_queues->queue(i); 783 queue->set_empty(); 784 } 785 } 786 787 void ConcurrentMark::set_phase(uint active_tasks, bool concurrent) { 788 assert(active_tasks <= _max_worker_id, "we should not have more"); 789 790 _active_tasks = active_tasks; 791 // Need to update the three data structures below according to the 792 // number of active threads for this phase. 793 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 794 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 795 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 796 797 _concurrent = concurrent; 798 // We propagate this to all tasks, not just the active ones. 799 for (uint i = 0; i < _max_worker_id; ++i) 800 _tasks[i]->set_concurrent(concurrent); 801 802 if (concurrent) { 803 set_concurrent_marking_in_progress(); 804 } else { 805 // We currently assume that the concurrent flag has been set to 806 // false before we start remark. At this point we should also be 807 // in a STW phase. 808 assert(!concurrent_marking_in_progress(), "invariant"); 809 assert(_finger == _heap_end, "only way to get here"); 810 update_g1_committed(true); 811 } 812 } 813 814 void ConcurrentMark::set_non_marking_state() { 815 // We set the global marking state to some default values when we're 816 // not doing marking. 817 reset_marking_state(); 818 _active_tasks = 0; 819 clear_concurrent_marking_in_progress(); 820 } 821 822 ConcurrentMark::~ConcurrentMark() { 823 // The ConcurrentMark instance is never freed. 824 ShouldNotReachHere(); 825 } 826 827 void ConcurrentMark::clearNextBitmap() { 828 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 829 G1CollectorPolicy* g1p = g1h->g1_policy(); 830 831 // Make sure that the concurrent mark thread looks to still be in 832 // the current cycle. 833 guarantee(cmThread()->during_cycle(), "invariant"); 834 835 // We are finishing up the current cycle by clearing the next 836 // marking bitmap and getting it ready for the next cycle. During 837 // this time no other cycle can start. So, let's make sure that this 838 // is the case. 839 guarantee(!g1h->mark_in_progress(), "invariant"); 840 841 // clear the mark bitmap (no grey objects to start with). 842 // We need to do this in chunks and offer to yield in between 843 // each chunk. 844 HeapWord* start = _nextMarkBitMap->startWord(); 845 HeapWord* end = _nextMarkBitMap->endWord(); 846 HeapWord* cur = start; 847 size_t chunkSize = M; 848 while (cur < end) { 849 HeapWord* next = cur + chunkSize; 850 if (next > end) { 851 next = end; 852 } 853 MemRegion mr(cur,next); 854 _nextMarkBitMap->clearRange(mr); 855 cur = next; 856 do_yield_check(); 857 858 // Repeat the asserts from above. We'll do them as asserts here to 859 // minimize their overhead on the product. However, we'll have 860 // them as guarantees at the beginning / end of the bitmap 861 // clearing to get some checking in the product. 862 assert(cmThread()->during_cycle(), "invariant"); 863 assert(!g1h->mark_in_progress(), "invariant"); 864 } 865 866 // Clear the liveness counting data 867 clear_all_count_data(); 868 869 // Repeat the asserts from above. 870 guarantee(cmThread()->during_cycle(), "invariant"); 871 guarantee(!g1h->mark_in_progress(), "invariant"); 872 } 873 874 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 875 public: 876 bool doHeapRegion(HeapRegion* r) { 877 if (!r->continuesHumongous()) { 878 r->note_start_of_marking(); 879 } 880 return false; 881 } 882 }; 883 884 void ConcurrentMark::checkpointRootsInitialPre() { 885 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 886 G1CollectorPolicy* g1p = g1h->g1_policy(); 887 888 _has_aborted = false; 889 890 #ifndef PRODUCT 891 if (G1PrintReachableAtInitialMark) { 892 print_reachable("at-cycle-start", 893 VerifyOption_G1UsePrevMarking, true /* all */); 894 } 895 #endif 896 897 // Initialise marking structures. This has to be done in a STW phase. 898 reset(); 899 900 // For each region note start of marking. 901 NoteStartOfMarkHRClosure startcl; 902 g1h->heap_region_iterate(&startcl); 903 } 904 905 906 void ConcurrentMark::checkpointRootsInitialPost() { 907 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 908 909 // If we force an overflow during remark, the remark operation will 910 // actually abort and we'll restart concurrent marking. If we always 911 // force an oveflow during remark we'll never actually complete the 912 // marking phase. So, we initilize this here, at the start of the 913 // cycle, so that at the remaining overflow number will decrease at 914 // every remark and we'll eventually not need to cause one. 915 force_overflow_stw()->init(); 916 917 // Start Concurrent Marking weak-reference discovery. 918 ReferenceProcessor* rp = g1h->ref_processor_cm(); 919 // enable ("weak") refs discovery 920 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); 921 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 922 923 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 924 // This is the start of the marking cycle, we're expected all 925 // threads to have SATB queues with active set to false. 926 satb_mq_set.set_active_all_threads(true, /* new active value */ 927 false /* expected_active */); 928 929 _root_regions.prepare_for_scan(); 930 931 // update_g1_committed() will be called at the end of an evac pause 932 // when marking is on. So, it's also called at the end of the 933 // initial-mark pause to update the heap end, if the heap expands 934 // during it. No need to call it here. 935 } 936 937 /* 938 * Notice that in the next two methods, we actually leave the STS 939 * during the barrier sync and join it immediately afterwards. If we 940 * do not do this, the following deadlock can occur: one thread could 941 * be in the barrier sync code, waiting for the other thread to also 942 * sync up, whereas another one could be trying to yield, while also 943 * waiting for the other threads to sync up too. 944 * 945 * Note, however, that this code is also used during remark and in 946 * this case we should not attempt to leave / enter the STS, otherwise 947 * we'll either hit an asseert (debug / fastdebug) or deadlock 948 * (product). So we should only leave / enter the STS if we are 949 * operating concurrently. 950 * 951 * Because the thread that does the sync barrier has left the STS, it 952 * is possible to be suspended for a Full GC or an evacuation pause 953 * could occur. This is actually safe, since the entering the sync 954 * barrier is one of the last things do_marking_step() does, and it 955 * doesn't manipulate any data structures afterwards. 956 */ 957 958 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 959 if (verbose_low()) { 960 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 961 } 962 963 if (concurrent()) { 964 ConcurrentGCThread::stsLeave(); 965 } 966 _first_overflow_barrier_sync.enter(); 967 if (concurrent()) { 968 ConcurrentGCThread::stsJoin(); 969 } 970 // at this point everyone should have synced up and not be doing any 971 // more work 972 973 if (verbose_low()) { 974 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 975 } 976 977 // let the task associated with with worker 0 do this 978 if (worker_id == 0) { 979 // task 0 is responsible for clearing the global data structures 980 // We should be here because of an overflow. During STW we should 981 // not clear the overflow flag since we rely on it being true when 982 // we exit this method to abort the pause and restart concurent 983 // marking. 984 reset_marking_state(concurrent() /* clear_overflow */); 985 force_overflow()->update(); 986 987 if (G1Log::fine()) { 988 gclog_or_tty->date_stamp(PrintGCDateStamps); 989 gclog_or_tty->stamp(PrintGCTimeStamps); 990 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 991 } 992 } 993 994 // after this, each task should reset its own data structures then 995 // then go into the second barrier 996 } 997 998 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 999 if (verbose_low()) { 1000 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1001 } 1002 1003 if (concurrent()) { 1004 ConcurrentGCThread::stsLeave(); 1005 } 1006 _second_overflow_barrier_sync.enter(); 1007 if (concurrent()) { 1008 ConcurrentGCThread::stsJoin(); 1009 } 1010 // at this point everything should be re-initialised and ready to go 1011 1012 if (verbose_low()) { 1013 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1014 } 1015 } 1016 1017 #ifndef PRODUCT 1018 void ForceOverflowSettings::init() { 1019 _num_remaining = G1ConcMarkForceOverflow; 1020 _force = false; 1021 update(); 1022 } 1023 1024 void ForceOverflowSettings::update() { 1025 if (_num_remaining > 0) { 1026 _num_remaining -= 1; 1027 _force = true; 1028 } else { 1029 _force = false; 1030 } 1031 } 1032 1033 bool ForceOverflowSettings::should_force() { 1034 if (_force) { 1035 _force = false; 1036 return true; 1037 } else { 1038 return false; 1039 } 1040 } 1041 #endif // !PRODUCT 1042 1043 class CMConcurrentMarkingTask: public AbstractGangTask { 1044 private: 1045 ConcurrentMark* _cm; 1046 ConcurrentMarkThread* _cmt; 1047 1048 public: 1049 void work(uint worker_id) { 1050 assert(Thread::current()->is_ConcurrentGC_thread(), 1051 "this should only be done by a conc GC thread"); 1052 ResourceMark rm; 1053 1054 double start_vtime = os::elapsedVTime(); 1055 1056 ConcurrentGCThread::stsJoin(); 1057 1058 assert(worker_id < _cm->active_tasks(), "invariant"); 1059 CMTask* the_task = _cm->task(worker_id); 1060 the_task->record_start_time(); 1061 if (!_cm->has_aborted()) { 1062 do { 1063 double start_vtime_sec = os::elapsedVTime(); 1064 double start_time_sec = os::elapsedTime(); 1065 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1066 1067 the_task->do_marking_step(mark_step_duration_ms, 1068 true /* do_termination */, 1069 false /* is_serial*/); 1070 1071 double end_time_sec = os::elapsedTime(); 1072 double end_vtime_sec = os::elapsedVTime(); 1073 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1074 double elapsed_time_sec = end_time_sec - start_time_sec; 1075 _cm->clear_has_overflown(); 1076 1077 bool ret = _cm->do_yield_check(worker_id); 1078 1079 jlong sleep_time_ms; 1080 if (!_cm->has_aborted() && the_task->has_aborted()) { 1081 sleep_time_ms = 1082 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1083 ConcurrentGCThread::stsLeave(); 1084 os::sleep(Thread::current(), sleep_time_ms, false); 1085 ConcurrentGCThread::stsJoin(); 1086 } 1087 double end_time2_sec = os::elapsedTime(); 1088 double elapsed_time2_sec = end_time2_sec - start_time_sec; 1089 1090 #if 0 1091 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, " 1092 "overhead %1.4lf", 1093 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms, 1094 the_task->conc_overhead(os::elapsedTime()) * 8.0); 1095 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms", 1096 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0); 1097 #endif 1098 } while (!_cm->has_aborted() && the_task->has_aborted()); 1099 } 1100 the_task->record_end_time(); 1101 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1102 1103 ConcurrentGCThread::stsLeave(); 1104 1105 double end_vtime = os::elapsedVTime(); 1106 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1107 } 1108 1109 CMConcurrentMarkingTask(ConcurrentMark* cm, 1110 ConcurrentMarkThread* cmt) : 1111 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1112 1113 ~CMConcurrentMarkingTask() { } 1114 }; 1115 1116 // Calculates the number of active workers for a concurrent 1117 // phase. 1118 uint ConcurrentMark::calc_parallel_marking_threads() { 1119 if (G1CollectedHeap::use_parallel_gc_threads()) { 1120 uint n_conc_workers = 0; 1121 if (!UseDynamicNumberOfGCThreads || 1122 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1123 !ForceDynamicNumberOfGCThreads)) { 1124 n_conc_workers = max_parallel_marking_threads(); 1125 } else { 1126 n_conc_workers = 1127 AdaptiveSizePolicy::calc_default_active_workers( 1128 max_parallel_marking_threads(), 1129 1, /* Minimum workers */ 1130 parallel_marking_threads(), 1131 Threads::number_of_non_daemon_threads()); 1132 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1133 // that scaling has already gone into "_max_parallel_marking_threads". 1134 } 1135 assert(n_conc_workers > 0, "Always need at least 1"); 1136 return n_conc_workers; 1137 } 1138 // If we are not running with any parallel GC threads we will not 1139 // have spawned any marking threads either. Hence the number of 1140 // concurrent workers should be 0. 1141 return 0; 1142 } 1143 1144 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1145 // Currently, only survivors can be root regions. 1146 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1147 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1148 1149 const uintx interval = PrefetchScanIntervalInBytes; 1150 HeapWord* curr = hr->bottom(); 1151 const HeapWord* end = hr->top(); 1152 while (curr < end) { 1153 Prefetch::read(curr, interval); 1154 oop obj = oop(curr); 1155 int size = obj->oop_iterate(&cl); 1156 assert(size == obj->size(), "sanity"); 1157 curr += size; 1158 } 1159 } 1160 1161 class CMRootRegionScanTask : public AbstractGangTask { 1162 private: 1163 ConcurrentMark* _cm; 1164 1165 public: 1166 CMRootRegionScanTask(ConcurrentMark* cm) : 1167 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1168 1169 void work(uint worker_id) { 1170 assert(Thread::current()->is_ConcurrentGC_thread(), 1171 "this should only be done by a conc GC thread"); 1172 1173 CMRootRegions* root_regions = _cm->root_regions(); 1174 HeapRegion* hr = root_regions->claim_next(); 1175 while (hr != NULL) { 1176 _cm->scanRootRegion(hr, worker_id); 1177 hr = root_regions->claim_next(); 1178 } 1179 } 1180 }; 1181 1182 void ConcurrentMark::scanRootRegions() { 1183 // scan_in_progress() will have been set to true only if there was 1184 // at least one root region to scan. So, if it's false, we 1185 // should not attempt to do any further work. 1186 if (root_regions()->scan_in_progress()) { 1187 _parallel_marking_threads = calc_parallel_marking_threads(); 1188 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1189 "Maximum number of marking threads exceeded"); 1190 uint active_workers = MAX2(1U, parallel_marking_threads()); 1191 1192 CMRootRegionScanTask task(this); 1193 if (use_parallel_marking_threads()) { 1194 _parallel_workers->set_active_workers((int) active_workers); 1195 _parallel_workers->run_task(&task); 1196 } else { 1197 task.work(0); 1198 } 1199 1200 // It's possible that has_aborted() is true here without actually 1201 // aborting the survivor scan earlier. This is OK as it's 1202 // mainly used for sanity checking. 1203 root_regions()->scan_finished(); 1204 } 1205 } 1206 1207 void ConcurrentMark::markFromRoots() { 1208 // we might be tempted to assert that: 1209 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1210 // "inconsistent argument?"); 1211 // However that wouldn't be right, because it's possible that 1212 // a safepoint is indeed in progress as a younger generation 1213 // stop-the-world GC happens even as we mark in this generation. 1214 1215 _restart_for_overflow = false; 1216 force_overflow_conc()->init(); 1217 1218 // _g1h has _n_par_threads 1219 _parallel_marking_threads = calc_parallel_marking_threads(); 1220 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1221 "Maximum number of marking threads exceeded"); 1222 1223 uint active_workers = MAX2(1U, parallel_marking_threads()); 1224 1225 // Parallel task terminator is set in "set_phase()" 1226 set_phase(active_workers, true /* concurrent */); 1227 1228 CMConcurrentMarkingTask markingTask(this, cmThread()); 1229 if (use_parallel_marking_threads()) { 1230 _parallel_workers->set_active_workers((int)active_workers); 1231 // Don't set _n_par_threads because it affects MT in proceess_strong_roots() 1232 // and the decisions on that MT processing is made elsewhere. 1233 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1234 _parallel_workers->run_task(&markingTask); 1235 } else { 1236 markingTask.work(0); 1237 } 1238 print_stats(); 1239 } 1240 1241 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1242 // world is stopped at this checkpoint 1243 assert(SafepointSynchronize::is_at_safepoint(), 1244 "world should be stopped"); 1245 1246 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1247 1248 // If a full collection has happened, we shouldn't do this. 1249 if (has_aborted()) { 1250 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1251 return; 1252 } 1253 1254 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1255 1256 if (VerifyDuringGC) { 1257 HandleMark hm; // handle scope 1258 gclog_or_tty->print(" VerifyDuringGC:(before)"); 1259 Universe::heap()->prepare_for_verify(); 1260 Universe::verify(/* silent */ false, 1261 /* option */ VerifyOption_G1UsePrevMarking); 1262 } 1263 1264 G1CollectorPolicy* g1p = g1h->g1_policy(); 1265 g1p->record_concurrent_mark_remark_start(); 1266 1267 double start = os::elapsedTime(); 1268 1269 checkpointRootsFinalWork(); 1270 1271 double mark_work_end = os::elapsedTime(); 1272 1273 weakRefsWork(clear_all_soft_refs); 1274 1275 if (has_overflown()) { 1276 // Oops. We overflowed. Restart concurrent marking. 1277 _restart_for_overflow = true; 1278 // Clear the marking state because we will be restarting 1279 // marking due to overflowing the global mark stack. 1280 reset_marking_state(); 1281 if (G1TraceMarkStackOverflow) { 1282 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1283 } 1284 } else { 1285 // Aggregate the per-task counting data that we have accumulated 1286 // while marking. 1287 aggregate_count_data(); 1288 1289 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1290 // We're done with marking. 1291 // This is the end of the marking cycle, we're expected all 1292 // threads to have SATB queues with active set to true. 1293 satb_mq_set.set_active_all_threads(false, /* new active value */ 1294 true /* expected_active */); 1295 1296 if (VerifyDuringGC) { 1297 HandleMark hm; // handle scope 1298 gclog_or_tty->print(" VerifyDuringGC:(after)"); 1299 Universe::heap()->prepare_for_verify(); 1300 Universe::verify(/* silent */ false, 1301 /* option */ VerifyOption_G1UseNextMarking); 1302 } 1303 assert(!restart_for_overflow(), "sanity"); 1304 // Completely reset the marking state since marking completed 1305 set_non_marking_state(); 1306 } 1307 1308 // Expand the marking stack, if we have to and if we can. 1309 if (_markStack.should_expand()) { 1310 _markStack.expand(); 1311 } 1312 1313 #if VERIFY_OBJS_PROCESSED 1314 _scan_obj_cl.objs_processed = 0; 1315 ThreadLocalObjQueue::objs_enqueued = 0; 1316 #endif 1317 1318 // Statistics 1319 double now = os::elapsedTime(); 1320 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1321 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1322 _remark_times.add((now - start) * 1000.0); 1323 1324 g1p->record_concurrent_mark_remark_end(); 1325 } 1326 1327 // Base class of the closures that finalize and verify the 1328 // liveness counting data. 1329 class CMCountDataClosureBase: public HeapRegionClosure { 1330 protected: 1331 G1CollectedHeap* _g1h; 1332 ConcurrentMark* _cm; 1333 CardTableModRefBS* _ct_bs; 1334 1335 BitMap* _region_bm; 1336 BitMap* _card_bm; 1337 1338 // Takes a region that's not empty (i.e., it has at least one 1339 // live object in it and sets its corresponding bit on the region 1340 // bitmap to 1. If the region is "starts humongous" it will also set 1341 // to 1 the bits on the region bitmap that correspond to its 1342 // associated "continues humongous" regions. 1343 void set_bit_for_region(HeapRegion* hr) { 1344 assert(!hr->continuesHumongous(), "should have filtered those out"); 1345 1346 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); 1347 if (!hr->startsHumongous()) { 1348 // Normal (non-humongous) case: just set the bit. 1349 _region_bm->par_at_put(index, true); 1350 } else { 1351 // Starts humongous case: calculate how many regions are part of 1352 // this humongous region and then set the bit range. 1353 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1354 _region_bm->par_at_put_range(index, end_index, true); 1355 } 1356 } 1357 1358 public: 1359 CMCountDataClosureBase(G1CollectedHeap* g1h, 1360 BitMap* region_bm, BitMap* card_bm): 1361 _g1h(g1h), _cm(g1h->concurrent_mark()), 1362 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 1363 _region_bm(region_bm), _card_bm(card_bm) { } 1364 }; 1365 1366 // Closure that calculates the # live objects per region. Used 1367 // for verification purposes during the cleanup pause. 1368 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1369 CMBitMapRO* _bm; 1370 size_t _region_marked_bytes; 1371 1372 public: 1373 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1374 BitMap* region_bm, BitMap* card_bm) : 1375 CMCountDataClosureBase(g1h, region_bm, card_bm), 1376 _bm(bm), _region_marked_bytes(0) { } 1377 1378 bool doHeapRegion(HeapRegion* hr) { 1379 1380 if (hr->continuesHumongous()) { 1381 // We will ignore these here and process them when their 1382 // associated "starts humongous" region is processed (see 1383 // set_bit_for_heap_region()). Note that we cannot rely on their 1384 // associated "starts humongous" region to have their bit set to 1385 // 1 since, due to the region chunking in the parallel region 1386 // iteration, a "continues humongous" region might be visited 1387 // before its associated "starts humongous". 1388 return false; 1389 } 1390 1391 HeapWord* ntams = hr->next_top_at_mark_start(); 1392 HeapWord* start = hr->bottom(); 1393 1394 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1395 err_msg("Preconditions not met - " 1396 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1397 start, ntams, hr->end())); 1398 1399 // Find the first marked object at or after "start". 1400 start = _bm->getNextMarkedWordAddress(start, ntams); 1401 1402 size_t marked_bytes = 0; 1403 1404 while (start < ntams) { 1405 oop obj = oop(start); 1406 int obj_sz = obj->size(); 1407 HeapWord* obj_end = start + obj_sz; 1408 1409 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1410 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1411 1412 // Note: if we're looking at the last region in heap - obj_end 1413 // could be actually just beyond the end of the heap; end_idx 1414 // will then correspond to a (non-existent) card that is also 1415 // just beyond the heap. 1416 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1417 // end of object is not card aligned - increment to cover 1418 // all the cards spanned by the object 1419 end_idx += 1; 1420 } 1421 1422 // Set the bits in the card BM for the cards spanned by this object. 1423 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1424 1425 // Add the size of this object to the number of marked bytes. 1426 marked_bytes += (size_t)obj_sz * HeapWordSize; 1427 1428 // Find the next marked object after this one. 1429 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1430 } 1431 1432 // Mark the allocated-since-marking portion... 1433 HeapWord* top = hr->top(); 1434 if (ntams < top) { 1435 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1436 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1437 1438 // Note: if we're looking at the last region in heap - top 1439 // could be actually just beyond the end of the heap; end_idx 1440 // will then correspond to a (non-existent) card that is also 1441 // just beyond the heap. 1442 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1443 // end of object is not card aligned - increment to cover 1444 // all the cards spanned by the object 1445 end_idx += 1; 1446 } 1447 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1448 1449 // This definitely means the region has live objects. 1450 set_bit_for_region(hr); 1451 } 1452 1453 // Update the live region bitmap. 1454 if (marked_bytes > 0) { 1455 set_bit_for_region(hr); 1456 } 1457 1458 // Set the marked bytes for the current region so that 1459 // it can be queried by a calling verificiation routine 1460 _region_marked_bytes = marked_bytes; 1461 1462 return false; 1463 } 1464 1465 size_t region_marked_bytes() const { return _region_marked_bytes; } 1466 }; 1467 1468 // Heap region closure used for verifying the counting data 1469 // that was accumulated concurrently and aggregated during 1470 // the remark pause. This closure is applied to the heap 1471 // regions during the STW cleanup pause. 1472 1473 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1474 G1CollectedHeap* _g1h; 1475 ConcurrentMark* _cm; 1476 CalcLiveObjectsClosure _calc_cl; 1477 BitMap* _region_bm; // Region BM to be verified 1478 BitMap* _card_bm; // Card BM to be verified 1479 bool _verbose; // verbose output? 1480 1481 BitMap* _exp_region_bm; // Expected Region BM values 1482 BitMap* _exp_card_bm; // Expected card BM values 1483 1484 int _failures; 1485 1486 public: 1487 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1488 BitMap* region_bm, 1489 BitMap* card_bm, 1490 BitMap* exp_region_bm, 1491 BitMap* exp_card_bm, 1492 bool verbose) : 1493 _g1h(g1h), _cm(g1h->concurrent_mark()), 1494 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1495 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1496 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1497 _failures(0) { } 1498 1499 int failures() const { return _failures; } 1500 1501 bool doHeapRegion(HeapRegion* hr) { 1502 if (hr->continuesHumongous()) { 1503 // We will ignore these here and process them when their 1504 // associated "starts humongous" region is processed (see 1505 // set_bit_for_heap_region()). Note that we cannot rely on their 1506 // associated "starts humongous" region to have their bit set to 1507 // 1 since, due to the region chunking in the parallel region 1508 // iteration, a "continues humongous" region might be visited 1509 // before its associated "starts humongous". 1510 return false; 1511 } 1512 1513 int failures = 0; 1514 1515 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1516 // this region and set the corresponding bits in the expected region 1517 // and card bitmaps. 1518 bool res = _calc_cl.doHeapRegion(hr); 1519 assert(res == false, "should be continuing"); 1520 1521 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1522 Mutex::_no_safepoint_check_flag); 1523 1524 // Verify the marked bytes for this region. 1525 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1526 size_t act_marked_bytes = hr->next_marked_bytes(); 1527 1528 // We're not OK if expected marked bytes > actual marked bytes. It means 1529 // we have missed accounting some objects during the actual marking. 1530 if (exp_marked_bytes > act_marked_bytes) { 1531 if (_verbose) { 1532 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1533 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1534 hr->hrs_index(), exp_marked_bytes, act_marked_bytes); 1535 } 1536 failures += 1; 1537 } 1538 1539 // Verify the bit, for this region, in the actual and expected 1540 // (which was just calculated) region bit maps. 1541 // We're not OK if the bit in the calculated expected region 1542 // bitmap is set and the bit in the actual region bitmap is not. 1543 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); 1544 1545 bool expected = _exp_region_bm->at(index); 1546 bool actual = _region_bm->at(index); 1547 if (expected && !actual) { 1548 if (_verbose) { 1549 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1550 "expected: %s, actual: %s", 1551 hr->hrs_index(), 1552 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1553 } 1554 failures += 1; 1555 } 1556 1557 // Verify that the card bit maps for the cards spanned by the current 1558 // region match. We have an error if we have a set bit in the expected 1559 // bit map and the corresponding bit in the actual bitmap is not set. 1560 1561 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1562 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1563 1564 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1565 expected = _exp_card_bm->at(i); 1566 actual = _card_bm->at(i); 1567 1568 if (expected && !actual) { 1569 if (_verbose) { 1570 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1571 "expected: %s, actual: %s", 1572 hr->hrs_index(), i, 1573 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1574 } 1575 failures += 1; 1576 } 1577 } 1578 1579 if (failures > 0 && _verbose) { 1580 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1581 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1582 HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start(), 1583 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1584 } 1585 1586 _failures += failures; 1587 1588 // We could stop iteration over the heap when we 1589 // find the first violating region by returning true. 1590 return false; 1591 } 1592 }; 1593 1594 1595 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1596 protected: 1597 G1CollectedHeap* _g1h; 1598 ConcurrentMark* _cm; 1599 BitMap* _actual_region_bm; 1600 BitMap* _actual_card_bm; 1601 1602 uint _n_workers; 1603 1604 BitMap* _expected_region_bm; 1605 BitMap* _expected_card_bm; 1606 1607 int _failures; 1608 bool _verbose; 1609 1610 public: 1611 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1612 BitMap* region_bm, BitMap* card_bm, 1613 BitMap* expected_region_bm, BitMap* expected_card_bm) 1614 : AbstractGangTask("G1 verify final counting"), 1615 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1616 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1617 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1618 _failures(0), _verbose(false), 1619 _n_workers(0) { 1620 assert(VerifyDuringGC, "don't call this otherwise"); 1621 1622 // Use the value already set as the number of active threads 1623 // in the call to run_task(). 1624 if (G1CollectedHeap::use_parallel_gc_threads()) { 1625 assert( _g1h->workers()->active_workers() > 0, 1626 "Should have been previously set"); 1627 _n_workers = _g1h->workers()->active_workers(); 1628 } else { 1629 _n_workers = 1; 1630 } 1631 1632 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1633 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1634 1635 _verbose = _cm->verbose_medium(); 1636 } 1637 1638 void work(uint worker_id) { 1639 assert(worker_id < _n_workers, "invariant"); 1640 1641 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1642 _actual_region_bm, _actual_card_bm, 1643 _expected_region_bm, 1644 _expected_card_bm, 1645 _verbose); 1646 1647 if (G1CollectedHeap::use_parallel_gc_threads()) { 1648 _g1h->heap_region_par_iterate_chunked(&verify_cl, 1649 worker_id, 1650 _n_workers, 1651 HeapRegion::VerifyCountClaimValue); 1652 } else { 1653 _g1h->heap_region_iterate(&verify_cl); 1654 } 1655 1656 Atomic::add(verify_cl.failures(), &_failures); 1657 } 1658 1659 int failures() const { return _failures; } 1660 }; 1661 1662 // Closure that finalizes the liveness counting data. 1663 // Used during the cleanup pause. 1664 // Sets the bits corresponding to the interval [NTAMS, top] 1665 // (which contains the implicitly live objects) in the 1666 // card liveness bitmap. Also sets the bit for each region, 1667 // containing live data, in the region liveness bitmap. 1668 1669 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1670 public: 1671 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1672 BitMap* region_bm, 1673 BitMap* card_bm) : 1674 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1675 1676 bool doHeapRegion(HeapRegion* hr) { 1677 1678 if (hr->continuesHumongous()) { 1679 // We will ignore these here and process them when their 1680 // associated "starts humongous" region is processed (see 1681 // set_bit_for_heap_region()). Note that we cannot rely on their 1682 // associated "starts humongous" region to have their bit set to 1683 // 1 since, due to the region chunking in the parallel region 1684 // iteration, a "continues humongous" region might be visited 1685 // before its associated "starts humongous". 1686 return false; 1687 } 1688 1689 HeapWord* ntams = hr->next_top_at_mark_start(); 1690 HeapWord* top = hr->top(); 1691 1692 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1693 1694 // Mark the allocated-since-marking portion... 1695 if (ntams < top) { 1696 // This definitely means the region has live objects. 1697 set_bit_for_region(hr); 1698 1699 // Now set the bits in the card bitmap for [ntams, top) 1700 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1701 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1702 1703 // Note: if we're looking at the last region in heap - top 1704 // could be actually just beyond the end of the heap; end_idx 1705 // will then correspond to a (non-existent) card that is also 1706 // just beyond the heap. 1707 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1708 // end of object is not card aligned - increment to cover 1709 // all the cards spanned by the object 1710 end_idx += 1; 1711 } 1712 1713 assert(end_idx <= _card_bm->size(), 1714 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1715 end_idx, _card_bm->size())); 1716 assert(start_idx < _card_bm->size(), 1717 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1718 start_idx, _card_bm->size())); 1719 1720 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1721 } 1722 1723 // Set the bit for the region if it contains live data 1724 if (hr->next_marked_bytes() > 0) { 1725 set_bit_for_region(hr); 1726 } 1727 1728 return false; 1729 } 1730 }; 1731 1732 class G1ParFinalCountTask: public AbstractGangTask { 1733 protected: 1734 G1CollectedHeap* _g1h; 1735 ConcurrentMark* _cm; 1736 BitMap* _actual_region_bm; 1737 BitMap* _actual_card_bm; 1738 1739 uint _n_workers; 1740 1741 public: 1742 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1743 : AbstractGangTask("G1 final counting"), 1744 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1745 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1746 _n_workers(0) { 1747 // Use the value already set as the number of active threads 1748 // in the call to run_task(). 1749 if (G1CollectedHeap::use_parallel_gc_threads()) { 1750 assert( _g1h->workers()->active_workers() > 0, 1751 "Should have been previously set"); 1752 _n_workers = _g1h->workers()->active_workers(); 1753 } else { 1754 _n_workers = 1; 1755 } 1756 } 1757 1758 void work(uint worker_id) { 1759 assert(worker_id < _n_workers, "invariant"); 1760 1761 FinalCountDataUpdateClosure final_update_cl(_g1h, 1762 _actual_region_bm, 1763 _actual_card_bm); 1764 1765 if (G1CollectedHeap::use_parallel_gc_threads()) { 1766 _g1h->heap_region_par_iterate_chunked(&final_update_cl, 1767 worker_id, 1768 _n_workers, 1769 HeapRegion::FinalCountClaimValue); 1770 } else { 1771 _g1h->heap_region_iterate(&final_update_cl); 1772 } 1773 } 1774 }; 1775 1776 class G1ParNoteEndTask; 1777 1778 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1779 G1CollectedHeap* _g1; 1780 int _worker_num; 1781 size_t _max_live_bytes; 1782 uint _regions_claimed; 1783 size_t _freed_bytes; 1784 FreeRegionList* _local_cleanup_list; 1785 OldRegionSet* _old_proxy_set; 1786 HumongousRegionSet* _humongous_proxy_set; 1787 HRRSCleanupTask* _hrrs_cleanup_task; 1788 double _claimed_region_time; 1789 double _max_region_time; 1790 1791 public: 1792 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1793 int worker_num, 1794 FreeRegionList* local_cleanup_list, 1795 OldRegionSet* old_proxy_set, 1796 HumongousRegionSet* humongous_proxy_set, 1797 HRRSCleanupTask* hrrs_cleanup_task) : 1798 _g1(g1), _worker_num(worker_num), 1799 _max_live_bytes(0), _regions_claimed(0), 1800 _freed_bytes(0), 1801 _claimed_region_time(0.0), _max_region_time(0.0), 1802 _local_cleanup_list(local_cleanup_list), 1803 _old_proxy_set(old_proxy_set), 1804 _humongous_proxy_set(humongous_proxy_set), 1805 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1806 1807 size_t freed_bytes() { return _freed_bytes; } 1808 1809 bool doHeapRegion(HeapRegion *hr) { 1810 if (hr->continuesHumongous()) { 1811 return false; 1812 } 1813 // We use a claim value of zero here because all regions 1814 // were claimed with value 1 in the FinalCount task. 1815 _g1->reset_gc_time_stamps(hr); 1816 double start = os::elapsedTime(); 1817 _regions_claimed++; 1818 hr->note_end_of_marking(); 1819 _max_live_bytes += hr->max_live_bytes(); 1820 _g1->free_region_if_empty(hr, 1821 &_freed_bytes, 1822 _local_cleanup_list, 1823 _old_proxy_set, 1824 _humongous_proxy_set, 1825 _hrrs_cleanup_task, 1826 true /* par */); 1827 double region_time = (os::elapsedTime() - start); 1828 _claimed_region_time += region_time; 1829 if (region_time > _max_region_time) { 1830 _max_region_time = region_time; 1831 } 1832 return false; 1833 } 1834 1835 size_t max_live_bytes() { return _max_live_bytes; } 1836 uint regions_claimed() { return _regions_claimed; } 1837 double claimed_region_time_sec() { return _claimed_region_time; } 1838 double max_region_time_sec() { return _max_region_time; } 1839 }; 1840 1841 class G1ParNoteEndTask: public AbstractGangTask { 1842 friend class G1NoteEndOfConcMarkClosure; 1843 1844 protected: 1845 G1CollectedHeap* _g1h; 1846 size_t _max_live_bytes; 1847 size_t _freed_bytes; 1848 FreeRegionList* _cleanup_list; 1849 1850 public: 1851 G1ParNoteEndTask(G1CollectedHeap* g1h, 1852 FreeRegionList* cleanup_list) : 1853 AbstractGangTask("G1 note end"), _g1h(g1h), 1854 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { } 1855 1856 void work(uint worker_id) { 1857 double start = os::elapsedTime(); 1858 FreeRegionList local_cleanup_list("Local Cleanup List"); 1859 OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set"); 1860 HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set"); 1861 HRRSCleanupTask hrrs_cleanup_task; 1862 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list, 1863 &old_proxy_set, 1864 &humongous_proxy_set, 1865 &hrrs_cleanup_task); 1866 if (G1CollectedHeap::use_parallel_gc_threads()) { 1867 _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id, 1868 _g1h->workers()->active_workers(), 1869 HeapRegion::NoteEndClaimValue); 1870 } else { 1871 _g1h->heap_region_iterate(&g1_note_end); 1872 } 1873 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1874 1875 // Now update the lists 1876 _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(), 1877 NULL /* free_list */, 1878 &old_proxy_set, 1879 &humongous_proxy_set, 1880 true /* par */); 1881 { 1882 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1883 _max_live_bytes += g1_note_end.max_live_bytes(); 1884 _freed_bytes += g1_note_end.freed_bytes(); 1885 1886 // If we iterate over the global cleanup list at the end of 1887 // cleanup to do this printing we will not guarantee to only 1888 // generate output for the newly-reclaimed regions (the list 1889 // might not be empty at the beginning of cleanup; we might 1890 // still be working on its previous contents). So we do the 1891 // printing here, before we append the new regions to the global 1892 // cleanup list. 1893 1894 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1895 if (hr_printer->is_active()) { 1896 HeapRegionLinkedListIterator iter(&local_cleanup_list); 1897 while (iter.more_available()) { 1898 HeapRegion* hr = iter.get_next(); 1899 hr_printer->cleanup(hr); 1900 } 1901 } 1902 1903 _cleanup_list->add_as_tail(&local_cleanup_list); 1904 assert(local_cleanup_list.is_empty(), "post-condition"); 1905 1906 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1907 } 1908 } 1909 size_t max_live_bytes() { return _max_live_bytes; } 1910 size_t freed_bytes() { return _freed_bytes; } 1911 }; 1912 1913 class G1ParScrubRemSetTask: public AbstractGangTask { 1914 protected: 1915 G1RemSet* _g1rs; 1916 BitMap* _region_bm; 1917 BitMap* _card_bm; 1918 public: 1919 G1ParScrubRemSetTask(G1CollectedHeap* g1h, 1920 BitMap* region_bm, BitMap* card_bm) : 1921 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), 1922 _region_bm(region_bm), _card_bm(card_bm) { } 1923 1924 void work(uint worker_id) { 1925 if (G1CollectedHeap::use_parallel_gc_threads()) { 1926 _g1rs->scrub_par(_region_bm, _card_bm, worker_id, 1927 HeapRegion::ScrubRemSetClaimValue); 1928 } else { 1929 _g1rs->scrub(_region_bm, _card_bm); 1930 } 1931 } 1932 1933 }; 1934 1935 void ConcurrentMark::cleanup() { 1936 // world is stopped at this checkpoint 1937 assert(SafepointSynchronize::is_at_safepoint(), 1938 "world should be stopped"); 1939 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1940 1941 // If a full collection has happened, we shouldn't do this. 1942 if (has_aborted()) { 1943 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1944 return; 1945 } 1946 1947 HRSPhaseSetter x(HRSPhaseCleanup); 1948 g1h->verify_region_sets_optional(); 1949 1950 if (VerifyDuringGC) { 1951 HandleMark hm; // handle scope 1952 gclog_or_tty->print(" VerifyDuringGC:(before)"); 1953 Universe::heap()->prepare_for_verify(); 1954 Universe::verify(/* silent */ false, 1955 /* option */ VerifyOption_G1UsePrevMarking); 1956 } 1957 1958 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); 1959 g1p->record_concurrent_mark_cleanup_start(); 1960 1961 double start = os::elapsedTime(); 1962 1963 HeapRegionRemSet::reset_for_cleanup_tasks(); 1964 1965 uint n_workers; 1966 1967 // Do counting once more with the world stopped for good measure. 1968 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 1969 1970 if (G1CollectedHeap::use_parallel_gc_threads()) { 1971 assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 1972 "sanity check"); 1973 1974 g1h->set_par_threads(); 1975 n_workers = g1h->n_par_threads(); 1976 assert(g1h->n_par_threads() == n_workers, 1977 "Should not have been reset"); 1978 g1h->workers()->run_task(&g1_par_count_task); 1979 // Done with the parallel phase so reset to 0. 1980 g1h->set_par_threads(0); 1981 1982 assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue), 1983 "sanity check"); 1984 } else { 1985 n_workers = 1; 1986 g1_par_count_task.work(0); 1987 } 1988 1989 if (VerifyDuringGC) { 1990 // Verify that the counting data accumulated during marking matches 1991 // that calculated by walking the marking bitmap. 1992 1993 // Bitmaps to hold expected values 1994 BitMap expected_region_bm(_region_bm.size(), false); 1995 BitMap expected_card_bm(_card_bm.size(), false); 1996 1997 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 1998 &_region_bm, 1999 &_card_bm, 2000 &expected_region_bm, 2001 &expected_card_bm); 2002 2003 if (G1CollectedHeap::use_parallel_gc_threads()) { 2004 g1h->set_par_threads((int)n_workers); 2005 g1h->workers()->run_task(&g1_par_verify_task); 2006 // Done with the parallel phase so reset to 0. 2007 g1h->set_par_threads(0); 2008 2009 assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue), 2010 "sanity check"); 2011 } else { 2012 g1_par_verify_task.work(0); 2013 } 2014 2015 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 2016 } 2017 2018 size_t start_used_bytes = g1h->used(); 2019 g1h->set_marking_complete(); 2020 2021 double count_end = os::elapsedTime(); 2022 double this_final_counting_time = (count_end - start); 2023 _total_counting_time += this_final_counting_time; 2024 2025 if (G1PrintRegionLivenessInfo) { 2026 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 2027 _g1h->heap_region_iterate(&cl); 2028 } 2029 2030 // Install newly created mark bitMap as "prev". 2031 swapMarkBitMaps(); 2032 2033 g1h->reset_gc_time_stamp(); 2034 2035 // Note end of marking in all heap regions. 2036 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list); 2037 if (G1CollectedHeap::use_parallel_gc_threads()) { 2038 g1h->set_par_threads((int)n_workers); 2039 g1h->workers()->run_task(&g1_par_note_end_task); 2040 g1h->set_par_threads(0); 2041 2042 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue), 2043 "sanity check"); 2044 } else { 2045 g1_par_note_end_task.work(0); 2046 } 2047 g1h->check_gc_time_stamps(); 2048 2049 if (!cleanup_list_is_empty()) { 2050 // The cleanup list is not empty, so we'll have to process it 2051 // concurrently. Notify anyone else that might be wanting free 2052 // regions that there will be more free regions coming soon. 2053 g1h->set_free_regions_coming(); 2054 } 2055 2056 // call below, since it affects the metric by which we sort the heap 2057 // regions. 2058 if (G1ScrubRemSets) { 2059 double rs_scrub_start = os::elapsedTime(); 2060 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm); 2061 if (G1CollectedHeap::use_parallel_gc_threads()) { 2062 g1h->set_par_threads((int)n_workers); 2063 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2064 g1h->set_par_threads(0); 2065 2066 assert(g1h->check_heap_region_claim_values( 2067 HeapRegion::ScrubRemSetClaimValue), 2068 "sanity check"); 2069 } else { 2070 g1_par_scrub_rs_task.work(0); 2071 } 2072 2073 double rs_scrub_end = os::elapsedTime(); 2074 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2075 _total_rs_scrub_time += this_rs_scrub_time; 2076 } 2077 2078 // this will also free any regions totally full of garbage objects, 2079 // and sort the regions. 2080 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2081 2082 // Statistics. 2083 double end = os::elapsedTime(); 2084 _cleanup_times.add((end - start) * 1000.0); 2085 2086 if (G1Log::fine()) { 2087 g1h->print_size_transition(gclog_or_tty, 2088 start_used_bytes, 2089 g1h->used(), 2090 g1h->capacity()); 2091 } 2092 2093 // Clean up will have freed any regions completely full of garbage. 2094 // Update the soft reference policy with the new heap occupancy. 2095 Universe::update_heap_info_at_gc(); 2096 2097 // We need to make this be a "collection" so any collection pause that 2098 // races with it goes around and waits for completeCleanup to finish. 2099 g1h->increment_total_collections(); 2100 2101 // We reclaimed old regions so we should calculate the sizes to make 2102 // sure we update the old gen/space data. 2103 g1h->g1mm()->update_sizes(); 2104 2105 if (VerifyDuringGC) { 2106 HandleMark hm; // handle scope 2107 gclog_or_tty->print(" VerifyDuringGC:(after)"); 2108 Universe::heap()->prepare_for_verify(); 2109 Universe::verify(/* silent */ false, 2110 /* option */ VerifyOption_G1UsePrevMarking); 2111 } 2112 2113 g1h->verify_region_sets_optional(); 2114 } 2115 2116 void ConcurrentMark::completeCleanup() { 2117 if (has_aborted()) return; 2118 2119 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2120 2121 _cleanup_list.verify_optional(); 2122 FreeRegionList tmp_free_list("Tmp Free List"); 2123 2124 if (G1ConcRegionFreeingVerbose) { 2125 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2126 "cleanup list has %u entries", 2127 _cleanup_list.length()); 2128 } 2129 2130 // Noone else should be accessing the _cleanup_list at this point, 2131 // so it's not necessary to take any locks 2132 while (!_cleanup_list.is_empty()) { 2133 HeapRegion* hr = _cleanup_list.remove_head(); 2134 assert(hr != NULL, "the list was not empty"); 2135 hr->par_clear(); 2136 tmp_free_list.add_as_tail(hr); 2137 2138 // Instead of adding one region at a time to the secondary_free_list, 2139 // we accumulate them in the local list and move them a few at a 2140 // time. This also cuts down on the number of notify_all() calls 2141 // we do during this process. We'll also append the local list when 2142 // _cleanup_list is empty (which means we just removed the last 2143 // region from the _cleanup_list). 2144 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2145 _cleanup_list.is_empty()) { 2146 if (G1ConcRegionFreeingVerbose) { 2147 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2148 "appending %u entries to the secondary_free_list, " 2149 "cleanup list still has %u entries", 2150 tmp_free_list.length(), 2151 _cleanup_list.length()); 2152 } 2153 2154 { 2155 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2156 g1h->secondary_free_list_add_as_tail(&tmp_free_list); 2157 SecondaryFreeList_lock->notify_all(); 2158 } 2159 2160 if (G1StressConcRegionFreeing) { 2161 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2162 os::sleep(Thread::current(), (jlong) 1, false); 2163 } 2164 } 2165 } 2166 } 2167 assert(tmp_free_list.is_empty(), "post-condition"); 2168 } 2169 2170 // Supporting Object and Oop closures for reference discovery 2171 // and processing in during marking 2172 2173 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2174 HeapWord* addr = (HeapWord*)obj; 2175 return addr != NULL && 2176 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2177 } 2178 2179 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2180 // Uses the CMTask associated with a worker thread (for serial reference 2181 // processing the CMTask for worker 0 is used) to preserve (mark) and 2182 // trace referent objects. 2183 // 2184 // Using the CMTask and embedded local queues avoids having the worker 2185 // threads operating on the global mark stack. This reduces the risk 2186 // of overflowing the stack - which we would rather avoid at this late 2187 // state. Also using the tasks' local queues removes the potential 2188 // of the workers interfering with each other that could occur if 2189 // operating on the global stack. 2190 2191 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2192 ConcurrentMark* _cm; 2193 CMTask* _task; 2194 int _ref_counter_limit; 2195 int _ref_counter; 2196 bool _is_serial; 2197 public: 2198 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2199 _cm(cm), _task(task), _is_serial(is_serial), 2200 _ref_counter_limit(G1RefProcDrainInterval) { 2201 assert(_ref_counter_limit > 0, "sanity"); 2202 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2203 _ref_counter = _ref_counter_limit; 2204 } 2205 2206 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2207 virtual void do_oop( oop* p) { do_oop_work(p); } 2208 2209 template <class T> void do_oop_work(T* p) { 2210 if (!_cm->has_overflown()) { 2211 oop obj = oopDesc::load_decode_heap_oop(p); 2212 if (_cm->verbose_high()) { 2213 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2214 "*"PTR_FORMAT" = "PTR_FORMAT, 2215 _task->worker_id(), p, (void*) obj); 2216 } 2217 2218 _task->deal_with_reference(obj); 2219 _ref_counter--; 2220 2221 if (_ref_counter == 0) { 2222 // We have dealt with _ref_counter_limit references, pushing them 2223 // and objects reachable from them on to the local stack (and 2224 // possibly the global stack). Call CMTask::do_marking_step() to 2225 // process these entries. 2226 // 2227 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2228 // there's nothing more to do (i.e. we're done with the entries that 2229 // were pushed as a result of the CMTask::deal_with_reference() calls 2230 // above) or we overflow. 2231 // 2232 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2233 // flag while there may still be some work to do. (See the comment at 2234 // the beginning of CMTask::do_marking_step() for those conditions - 2235 // one of which is reaching the specified time target.) It is only 2236 // when CMTask::do_marking_step() returns without setting the 2237 // has_aborted() flag that the marking step has completed. 2238 do { 2239 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2240 _task->do_marking_step(mark_step_duration_ms, 2241 false /* do_termination */, 2242 _is_serial /* is_serial */); 2243 } while (_task->has_aborted() && !_cm->has_overflown()); 2244 _ref_counter = _ref_counter_limit; 2245 } 2246 } else { 2247 if (_cm->verbose_high()) { 2248 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2249 } 2250 } 2251 } 2252 }; 2253 2254 // 'Drain' oop closure used by both serial and parallel reference processing. 2255 // Uses the CMTask associated with a given worker thread (for serial 2256 // reference processing the CMtask for worker 0 is used). Calls the 2257 // do_marking_step routine, with an unbelievably large timeout value, 2258 // to drain the marking data structures of the remaining entries 2259 // added by the 'keep alive' oop closure above. 2260 2261 class G1CMDrainMarkingStackClosure: public VoidClosure { 2262 ConcurrentMark* _cm; 2263 CMTask* _task; 2264 bool _is_serial; 2265 public: 2266 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2267 _cm(cm), _task(task) { 2268 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2269 } 2270 2271 void do_void() { 2272 do { 2273 if (_cm->verbose_high()) { 2274 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2275 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2276 } 2277 2278 // We call CMTask::do_marking_step() to completely drain the local 2279 // and global marking stacks of entries pushed by the 'keep alive' 2280 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2281 // 2282 // CMTask::do_marking_step() is called in a loop, which we'll exit 2283 // if there's nothing more to do (i.e. we'completely drained the 2284 // entries that were pushed as a a result of applying the 'keep alive' 2285 // closure to the entries on the discovered ref lists) or we overflow 2286 // the global marking stack. 2287 // 2288 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2289 // flag while there may still be some work to do. (See the comment at 2290 // the beginning of CMTask::do_marking_step() for those conditions - 2291 // one of which is reaching the specified time target.) It is only 2292 // when CMTask::do_marking_step() returns without setting the 2293 // has_aborted() flag that the marking step has completed. 2294 2295 _task->do_marking_step(1000000000.0 /* something very large */, 2296 true /* do_termination */, 2297 _is_serial /* is_serial */); 2298 } while (_task->has_aborted() && !_cm->has_overflown()); 2299 } 2300 }; 2301 2302 // Implementation of AbstractRefProcTaskExecutor for parallel 2303 // reference processing at the end of G1 concurrent marking 2304 2305 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2306 private: 2307 G1CollectedHeap* _g1h; 2308 ConcurrentMark* _cm; 2309 WorkGang* _workers; 2310 int _active_workers; 2311 2312 public: 2313 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2314 ConcurrentMark* cm, 2315 WorkGang* workers, 2316 int n_workers) : 2317 _g1h(g1h), _cm(cm), 2318 _workers(workers), _active_workers(n_workers) { } 2319 2320 // Executes the given task using concurrent marking worker threads. 2321 virtual void execute(ProcessTask& task); 2322 virtual void execute(EnqueueTask& task); 2323 }; 2324 2325 class G1CMRefProcTaskProxy: public AbstractGangTask { 2326 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2327 ProcessTask& _proc_task; 2328 G1CollectedHeap* _g1h; 2329 ConcurrentMark* _cm; 2330 2331 public: 2332 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2333 G1CollectedHeap* g1h, 2334 ConcurrentMark* cm) : 2335 AbstractGangTask("Process reference objects in parallel"), 2336 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2337 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2338 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2339 } 2340 2341 virtual void work(uint worker_id) { 2342 CMTask* task = _cm->task(worker_id); 2343 G1CMIsAliveClosure g1_is_alive(_g1h); 2344 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2345 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2346 2347 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2348 } 2349 }; 2350 2351 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2352 assert(_workers != NULL, "Need parallel worker threads."); 2353 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2354 2355 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2356 2357 // We need to reset the phase for each task execution so that 2358 // the termination protocol of CMTask::do_marking_step works. 2359 _cm->set_phase(_active_workers, false /* concurrent */); 2360 _g1h->set_par_threads(_active_workers); 2361 _workers->run_task(&proc_task_proxy); 2362 _g1h->set_par_threads(0); 2363 } 2364 2365 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2366 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2367 EnqueueTask& _enq_task; 2368 2369 public: 2370 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2371 AbstractGangTask("Enqueue reference objects in parallel"), 2372 _enq_task(enq_task) { } 2373 2374 virtual void work(uint worker_id) { 2375 _enq_task.work(worker_id); 2376 } 2377 }; 2378 2379 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2380 assert(_workers != NULL, "Need parallel worker threads."); 2381 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2382 2383 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2384 2385 _g1h->set_par_threads(_active_workers); 2386 _workers->run_task(&enq_task_proxy); 2387 _g1h->set_par_threads(0); 2388 } 2389 2390 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2391 ResourceMark rm; 2392 HandleMark hm; 2393 2394 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2395 2396 // Is alive closure. 2397 G1CMIsAliveClosure g1_is_alive(g1h); 2398 2399 // Inner scope to exclude the cleaning of the string and symbol 2400 // tables from the displayed time. 2401 { 2402 if (G1Log::finer()) { 2403 gclog_or_tty->put(' '); 2404 } 2405 TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty); 2406 2407 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2408 2409 // See the comment in G1CollectedHeap::ref_processing_init() 2410 // about how reference processing currently works in G1. 2411 2412 // Set the soft reference policy 2413 rp->setup_policy(clear_all_soft_refs); 2414 assert(_markStack.isEmpty(), "mark stack should be empty"); 2415 2416 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2417 // in serial reference processing. Note these closures are also 2418 // used for serially processing (by the the current thread) the 2419 // JNI references during parallel reference processing. 2420 // 2421 // These closures do not need to synchronize with the worker 2422 // threads involved in parallel reference processing as these 2423 // instances are executed serially by the current thread (e.g. 2424 // reference processing is not multi-threaded and is thus 2425 // performed by the current thread instead of a gang worker). 2426 // 2427 // The gang tasks involved in parallel reference procssing create 2428 // their own instances of these closures, which do their own 2429 // synchronization among themselves. 2430 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2431 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2432 2433 // We need at least one active thread. If reference processing 2434 // is not multi-threaded we use the current (VMThread) thread, 2435 // otherwise we use the work gang from the G1CollectedHeap and 2436 // we utilize all the worker threads we can. 2437 bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL; 2438 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2439 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2440 2441 // Parallel processing task executor. 2442 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2443 g1h->workers(), active_workers); 2444 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2445 2446 // Set the degree of MT processing here. If the discovery was done MT, 2447 // the number of threads involved during discovery could differ from 2448 // the number of active workers. This is OK as long as the discovered 2449 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2450 rp->set_active_mt_degree(active_workers); 2451 2452 // Process the weak references. 2453 rp->process_discovered_references(&g1_is_alive, 2454 &g1_keep_alive, 2455 &g1_drain_mark_stack, 2456 executor); 2457 2458 // The do_oop work routines of the keep_alive and drain_marking_stack 2459 // oop closures will set the has_overflown flag if we overflow the 2460 // global marking stack. 2461 2462 assert(_markStack.overflow() || _markStack.isEmpty(), 2463 "mark stack should be empty (unless it overflowed)"); 2464 2465 if (_markStack.overflow()) { 2466 // This should have been done already when we tried to push an 2467 // entry on to the global mark stack. But let's do it again. 2468 set_has_overflown(); 2469 } 2470 2471 assert(rp->num_q() == active_workers, "why not"); 2472 2473 rp->enqueue_discovered_references(executor); 2474 2475 rp->verify_no_references_recorded(); 2476 assert(!rp->discovery_enabled(), "Post condition"); 2477 } 2478 2479 // Now clean up stale oops in StringTable 2480 StringTable::unlink(&g1_is_alive); 2481 // Clean up unreferenced symbols in symbol table. 2482 SymbolTable::unlink(); 2483 } 2484 2485 void ConcurrentMark::swapMarkBitMaps() { 2486 CMBitMapRO* temp = _prevMarkBitMap; 2487 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2488 _nextMarkBitMap = (CMBitMap*) temp; 2489 } 2490 2491 class CMRemarkTask: public AbstractGangTask { 2492 private: 2493 ConcurrentMark* _cm; 2494 bool _is_serial; 2495 public: 2496 void work(uint worker_id) { 2497 // Since all available tasks are actually started, we should 2498 // only proceed if we're supposed to be actived. 2499 if (worker_id < _cm->active_tasks()) { 2500 CMTask* task = _cm->task(worker_id); 2501 task->record_start_time(); 2502 do { 2503 task->do_marking_step(1000000000.0 /* something very large */, 2504 true /* do_termination */, 2505 _is_serial); 2506 } while (task->has_aborted() && !_cm->has_overflown()); 2507 // If we overflow, then we do not want to restart. We instead 2508 // want to abort remark and do concurrent marking again. 2509 task->record_end_time(); 2510 } 2511 } 2512 2513 CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) : 2514 AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) { 2515 _cm->terminator()->reset_for_reuse(active_workers); 2516 } 2517 }; 2518 2519 void ConcurrentMark::checkpointRootsFinalWork() { 2520 ResourceMark rm; 2521 HandleMark hm; 2522 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2523 2524 g1h->ensure_parsability(false); 2525 2526 if (G1CollectedHeap::use_parallel_gc_threads()) { 2527 G1CollectedHeap::StrongRootsScope srs(g1h); 2528 // this is remark, so we'll use up all active threads 2529 uint active_workers = g1h->workers()->active_workers(); 2530 if (active_workers == 0) { 2531 assert(active_workers > 0, "Should have been set earlier"); 2532 active_workers = (uint) ParallelGCThreads; 2533 g1h->workers()->set_active_workers(active_workers); 2534 } 2535 set_phase(active_workers, false /* concurrent */); 2536 // Leave _parallel_marking_threads at it's 2537 // value originally calculated in the ConcurrentMark 2538 // constructor and pass values of the active workers 2539 // through the gang in the task. 2540 2541 CMRemarkTask remarkTask(this, active_workers, false /* is_serial */); 2542 // We will start all available threads, even if we decide that the 2543 // active_workers will be fewer. The extra ones will just bail out 2544 // immediately. 2545 g1h->set_par_threads(active_workers); 2546 g1h->workers()->run_task(&remarkTask); 2547 g1h->set_par_threads(0); 2548 } else { 2549 G1CollectedHeap::StrongRootsScope srs(g1h); 2550 uint active_workers = 1; 2551 set_phase(active_workers, false /* concurrent */); 2552 2553 // Note - if there's no work gang then the VMThread will be 2554 // the thread to execute the remark - serially. We have 2555 // to pass true for the is_serial parameter so that 2556 // CMTask::do_marking_step() doesn't enter the sync 2557 // barriers in the event of an overflow. Doing so will 2558 // cause an assert that the current thread is not a 2559 // concurrent GC thread. 2560 CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/); 2561 remarkTask.work(0); 2562 } 2563 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2564 guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant"); 2565 2566 print_stats(); 2567 2568 #if VERIFY_OBJS_PROCESSED 2569 if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) { 2570 gclog_or_tty->print_cr("Processed = %d, enqueued = %d.", 2571 _scan_obj_cl.objs_processed, 2572 ThreadLocalObjQueue::objs_enqueued); 2573 guarantee(_scan_obj_cl.objs_processed == 2574 ThreadLocalObjQueue::objs_enqueued, 2575 "Different number of objs processed and enqueued."); 2576 } 2577 #endif 2578 } 2579 2580 #ifndef PRODUCT 2581 2582 class PrintReachableOopClosure: public OopClosure { 2583 private: 2584 G1CollectedHeap* _g1h; 2585 outputStream* _out; 2586 VerifyOption _vo; 2587 bool _all; 2588 2589 public: 2590 PrintReachableOopClosure(outputStream* out, 2591 VerifyOption vo, 2592 bool all) : 2593 _g1h(G1CollectedHeap::heap()), 2594 _out(out), _vo(vo), _all(all) { } 2595 2596 void do_oop(narrowOop* p) { do_oop_work(p); } 2597 void do_oop( oop* p) { do_oop_work(p); } 2598 2599 template <class T> void do_oop_work(T* p) { 2600 oop obj = oopDesc::load_decode_heap_oop(p); 2601 const char* str = NULL; 2602 const char* str2 = ""; 2603 2604 if (obj == NULL) { 2605 str = ""; 2606 } else if (!_g1h->is_in_g1_reserved(obj)) { 2607 str = " O"; 2608 } else { 2609 HeapRegion* hr = _g1h->heap_region_containing(obj); 2610 guarantee(hr != NULL, "invariant"); 2611 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); 2612 bool marked = _g1h->is_marked(obj, _vo); 2613 2614 if (over_tams) { 2615 str = " >"; 2616 if (marked) { 2617 str2 = " AND MARKED"; 2618 } 2619 } else if (marked) { 2620 str = " M"; 2621 } else { 2622 str = " NOT"; 2623 } 2624 } 2625 2626 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", 2627 p, (void*) obj, str, str2); 2628 } 2629 }; 2630 2631 class PrintReachableObjectClosure : public ObjectClosure { 2632 private: 2633 G1CollectedHeap* _g1h; 2634 outputStream* _out; 2635 VerifyOption _vo; 2636 bool _all; 2637 HeapRegion* _hr; 2638 2639 public: 2640 PrintReachableObjectClosure(outputStream* out, 2641 VerifyOption vo, 2642 bool all, 2643 HeapRegion* hr) : 2644 _g1h(G1CollectedHeap::heap()), 2645 _out(out), _vo(vo), _all(all), _hr(hr) { } 2646 2647 void do_object(oop o) { 2648 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo); 2649 bool marked = _g1h->is_marked(o, _vo); 2650 bool print_it = _all || over_tams || marked; 2651 2652 if (print_it) { 2653 _out->print_cr(" "PTR_FORMAT"%s", 2654 o, (over_tams) ? " >" : (marked) ? " M" : ""); 2655 PrintReachableOopClosure oopCl(_out, _vo, _all); 2656 o->oop_iterate_no_header(&oopCl); 2657 } 2658 } 2659 }; 2660 2661 class PrintReachableRegionClosure : public HeapRegionClosure { 2662 private: 2663 G1CollectedHeap* _g1h; 2664 outputStream* _out; 2665 VerifyOption _vo; 2666 bool _all; 2667 2668 public: 2669 bool doHeapRegion(HeapRegion* hr) { 2670 HeapWord* b = hr->bottom(); 2671 HeapWord* e = hr->end(); 2672 HeapWord* t = hr->top(); 2673 HeapWord* p = _g1h->top_at_mark_start(hr, _vo); 2674 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " 2675 "TAMS: "PTR_FORMAT, b, e, t, p); 2676 _out->cr(); 2677 2678 HeapWord* from = b; 2679 HeapWord* to = t; 2680 2681 if (to > from) { 2682 _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to); 2683 _out->cr(); 2684 PrintReachableObjectClosure ocl(_out, _vo, _all, hr); 2685 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); 2686 _out->cr(); 2687 } 2688 2689 return false; 2690 } 2691 2692 PrintReachableRegionClosure(outputStream* out, 2693 VerifyOption vo, 2694 bool all) : 2695 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { } 2696 }; 2697 2698 void ConcurrentMark::print_reachable(const char* str, 2699 VerifyOption vo, 2700 bool all) { 2701 gclog_or_tty->cr(); 2702 gclog_or_tty->print_cr("== Doing heap dump... "); 2703 2704 if (G1PrintReachableBaseFile == NULL) { 2705 gclog_or_tty->print_cr(" #### error: no base file defined"); 2706 return; 2707 } 2708 2709 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) > 2710 (JVM_MAXPATHLEN - 1)) { 2711 gclog_or_tty->print_cr(" #### error: file name too long"); 2712 return; 2713 } 2714 2715 char file_name[JVM_MAXPATHLEN]; 2716 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str); 2717 gclog_or_tty->print_cr(" dumping to file %s", file_name); 2718 2719 fileStream fout(file_name); 2720 if (!fout.is_open()) { 2721 gclog_or_tty->print_cr(" #### error: could not open file"); 2722 return; 2723 } 2724 2725 outputStream* out = &fout; 2726 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo)); 2727 out->cr(); 2728 2729 out->print_cr("--- ITERATING OVER REGIONS"); 2730 out->cr(); 2731 PrintReachableRegionClosure rcl(out, vo, all); 2732 _g1h->heap_region_iterate(&rcl); 2733 out->cr(); 2734 2735 gclog_or_tty->print_cr(" done"); 2736 gclog_or_tty->flush(); 2737 } 2738 2739 #endif // PRODUCT 2740 2741 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2742 // Note we are overriding the read-only view of the prev map here, via 2743 // the cast. 2744 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2745 } 2746 2747 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2748 _nextMarkBitMap->clearRange(mr); 2749 } 2750 2751 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) { 2752 clearRangePrevBitmap(mr); 2753 clearRangeNextBitmap(mr); 2754 } 2755 2756 HeapRegion* 2757 ConcurrentMark::claim_region(uint worker_id) { 2758 // "checkpoint" the finger 2759 HeapWord* finger = _finger; 2760 2761 // _heap_end will not change underneath our feet; it only changes at 2762 // yield points. 2763 while (finger < _heap_end) { 2764 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2765 2766 // Note on how this code handles humongous regions. In the 2767 // normal case the finger will reach the start of a "starts 2768 // humongous" (SH) region. Its end will either be the end of the 2769 // last "continues humongous" (CH) region in the sequence, or the 2770 // standard end of the SH region (if the SH is the only region in 2771 // the sequence). That way claim_region() will skip over the CH 2772 // regions. However, there is a subtle race between a CM thread 2773 // executing this method and a mutator thread doing a humongous 2774 // object allocation. The two are not mutually exclusive as the CM 2775 // thread does not need to hold the Heap_lock when it gets 2776 // here. So there is a chance that claim_region() will come across 2777 // a free region that's in the progress of becoming a SH or a CH 2778 // region. In the former case, it will either 2779 // a) Miss the update to the region's end, in which case it will 2780 // visit every subsequent CH region, will find their bitmaps 2781 // empty, and do nothing, or 2782 // b) Will observe the update of the region's end (in which case 2783 // it will skip the subsequent CH regions). 2784 // If it comes across a region that suddenly becomes CH, the 2785 // scenario will be similar to b). So, the race between 2786 // claim_region() and a humongous object allocation might force us 2787 // to do a bit of unnecessary work (due to some unnecessary bitmap 2788 // iterations) but it should not introduce and correctness issues. 2789 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2790 HeapWord* bottom = curr_region->bottom(); 2791 HeapWord* end = curr_region->end(); 2792 HeapWord* limit = curr_region->next_top_at_mark_start(); 2793 2794 if (verbose_low()) { 2795 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2796 "["PTR_FORMAT", "PTR_FORMAT"), " 2797 "limit = "PTR_FORMAT, 2798 worker_id, curr_region, bottom, end, limit); 2799 } 2800 2801 // Is the gap between reading the finger and doing the CAS too long? 2802 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2803 if (res == finger) { 2804 // we succeeded 2805 2806 // notice that _finger == end cannot be guaranteed here since, 2807 // someone else might have moved the finger even further 2808 assert(_finger >= end, "the finger should have moved forward"); 2809 2810 if (verbose_low()) { 2811 gclog_or_tty->print_cr("[%u] we were successful with region = " 2812 PTR_FORMAT, worker_id, curr_region); 2813 } 2814 2815 if (limit > bottom) { 2816 if (verbose_low()) { 2817 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 2818 "returning it ", worker_id, curr_region); 2819 } 2820 return curr_region; 2821 } else { 2822 assert(limit == bottom, 2823 "the region limit should be at bottom"); 2824 if (verbose_low()) { 2825 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 2826 "returning NULL", worker_id, curr_region); 2827 } 2828 // we return NULL and the caller should try calling 2829 // claim_region() again. 2830 return NULL; 2831 } 2832 } else { 2833 assert(_finger > finger, "the finger should have moved forward"); 2834 if (verbose_low()) { 2835 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2836 "global finger = "PTR_FORMAT", " 2837 "our finger = "PTR_FORMAT, 2838 worker_id, _finger, finger); 2839 } 2840 2841 // read it again 2842 finger = _finger; 2843 } 2844 } 2845 2846 return NULL; 2847 } 2848 2849 #ifndef PRODUCT 2850 enum VerifyNoCSetOopsPhase { 2851 VerifyNoCSetOopsStack, 2852 VerifyNoCSetOopsQueues, 2853 VerifyNoCSetOopsSATBCompleted, 2854 VerifyNoCSetOopsSATBThread 2855 }; 2856 2857 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 2858 private: 2859 G1CollectedHeap* _g1h; 2860 VerifyNoCSetOopsPhase _phase; 2861 int _info; 2862 2863 const char* phase_str() { 2864 switch (_phase) { 2865 case VerifyNoCSetOopsStack: return "Stack"; 2866 case VerifyNoCSetOopsQueues: return "Queue"; 2867 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers"; 2868 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers"; 2869 default: ShouldNotReachHere(); 2870 } 2871 return NULL; 2872 } 2873 2874 void do_object_work(oop obj) { 2875 guarantee(!_g1h->obj_in_cs(obj), 2876 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 2877 (void*) obj, phase_str(), _info)); 2878 } 2879 2880 public: 2881 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 2882 2883 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 2884 _phase = phase; 2885 _info = info; 2886 } 2887 2888 virtual void do_oop(oop* p) { 2889 oop obj = oopDesc::load_decode_heap_oop(p); 2890 do_object_work(obj); 2891 } 2892 2893 virtual void do_oop(narrowOop* p) { 2894 // We should not come across narrow oops while scanning marking 2895 // stacks and SATB buffers. 2896 ShouldNotReachHere(); 2897 } 2898 2899 virtual void do_object(oop obj) { 2900 do_object_work(obj); 2901 } 2902 }; 2903 2904 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, 2905 bool verify_enqueued_buffers, 2906 bool verify_thread_buffers, 2907 bool verify_fingers) { 2908 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2909 if (!G1CollectedHeap::heap()->mark_in_progress()) { 2910 return; 2911 } 2912 2913 VerifyNoCSetOopsClosure cl; 2914 2915 if (verify_stacks) { 2916 // Verify entries on the global mark stack 2917 cl.set_phase(VerifyNoCSetOopsStack); 2918 _markStack.oops_do(&cl); 2919 2920 // Verify entries on the task queues 2921 for (uint i = 0; i < _max_worker_id; i += 1) { 2922 cl.set_phase(VerifyNoCSetOopsQueues, i); 2923 CMTaskQueue* queue = _task_queues->queue(i); 2924 queue->oops_do(&cl); 2925 } 2926 } 2927 2928 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 2929 2930 // Verify entries on the enqueued SATB buffers 2931 if (verify_enqueued_buffers) { 2932 cl.set_phase(VerifyNoCSetOopsSATBCompleted); 2933 satb_qs.iterate_completed_buffers_read_only(&cl); 2934 } 2935 2936 // Verify entries on the per-thread SATB buffers 2937 if (verify_thread_buffers) { 2938 cl.set_phase(VerifyNoCSetOopsSATBThread); 2939 satb_qs.iterate_thread_buffers_read_only(&cl); 2940 } 2941 2942 if (verify_fingers) { 2943 // Verify the global finger 2944 HeapWord* global_finger = finger(); 2945 if (global_finger != NULL && global_finger < _heap_end) { 2946 // The global finger always points to a heap region boundary. We 2947 // use heap_region_containing_raw() to get the containing region 2948 // given that the global finger could be pointing to a free region 2949 // which subsequently becomes continues humongous. If that 2950 // happens, heap_region_containing() will return the bottom of the 2951 // corresponding starts humongous region and the check below will 2952 // not hold any more. 2953 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 2954 guarantee(global_finger == global_hr->bottom(), 2955 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 2956 global_finger, HR_FORMAT_PARAMS(global_hr))); 2957 } 2958 2959 // Verify the task fingers 2960 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2961 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 2962 CMTask* task = _tasks[i]; 2963 HeapWord* task_finger = task->finger(); 2964 if (task_finger != NULL && task_finger < _heap_end) { 2965 // See above note on the global finger verification. 2966 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 2967 guarantee(task_finger == task_hr->bottom() || 2968 !task_hr->in_collection_set(), 2969 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 2970 task_finger, HR_FORMAT_PARAMS(task_hr))); 2971 } 2972 } 2973 } 2974 } 2975 #endif // PRODUCT 2976 2977 // Aggregate the counting data that was constructed concurrently 2978 // with marking. 2979 class AggregateCountDataHRClosure: public HeapRegionClosure { 2980 G1CollectedHeap* _g1h; 2981 ConcurrentMark* _cm; 2982 CardTableModRefBS* _ct_bs; 2983 BitMap* _cm_card_bm; 2984 uint _max_worker_id; 2985 2986 public: 2987 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 2988 BitMap* cm_card_bm, 2989 uint max_worker_id) : 2990 _g1h(g1h), _cm(g1h->concurrent_mark()), 2991 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 2992 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 2993 2994 bool doHeapRegion(HeapRegion* hr) { 2995 if (hr->continuesHumongous()) { 2996 // We will ignore these here and process them when their 2997 // associated "starts humongous" region is processed. 2998 // Note that we cannot rely on their associated 2999 // "starts humongous" region to have their bit set to 1 3000 // since, due to the region chunking in the parallel region 3001 // iteration, a "continues humongous" region might be visited 3002 // before its associated "starts humongous". 3003 return false; 3004 } 3005 3006 HeapWord* start = hr->bottom(); 3007 HeapWord* limit = hr->next_top_at_mark_start(); 3008 HeapWord* end = hr->end(); 3009 3010 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 3011 err_msg("Preconditions not met - " 3012 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 3013 "top: "PTR_FORMAT", end: "PTR_FORMAT, 3014 start, limit, hr->top(), hr->end())); 3015 3016 assert(hr->next_marked_bytes() == 0, "Precondition"); 3017 3018 if (start == limit) { 3019 // NTAMS of this region has not been set so nothing to do. 3020 return false; 3021 } 3022 3023 // 'start' should be in the heap. 3024 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 3025 // 'end' *may* be just beyone the end of the heap (if hr is the last region) 3026 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 3027 3028 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 3029 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 3030 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 3031 3032 // If ntams is not card aligned then we bump card bitmap index 3033 // for limit so that we get the all the cards spanned by 3034 // the object ending at ntams. 3035 // Note: if this is the last region in the heap then ntams 3036 // could be actually just beyond the end of the the heap; 3037 // limit_idx will then correspond to a (non-existent) card 3038 // that is also outside the heap. 3039 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 3040 limit_idx += 1; 3041 } 3042 3043 assert(limit_idx <= end_idx, "or else use atomics"); 3044 3045 // Aggregate the "stripe" in the count data associated with hr. 3046 uint hrs_index = hr->hrs_index(); 3047 size_t marked_bytes = 0; 3048 3049 for (uint i = 0; i < _max_worker_id; i += 1) { 3050 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 3051 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 3052 3053 // Fetch the marked_bytes in this region for task i and 3054 // add it to the running total for this region. 3055 marked_bytes += marked_bytes_array[hrs_index]; 3056 3057 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 3058 // into the global card bitmap. 3059 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 3060 3061 while (scan_idx < limit_idx) { 3062 assert(task_card_bm->at(scan_idx) == true, "should be"); 3063 _cm_card_bm->set_bit(scan_idx); 3064 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 3065 3066 // BitMap::get_next_one_offset() can handle the case when 3067 // its left_offset parameter is greater than its right_offset 3068 // parameter. It does, however, have an early exit if 3069 // left_offset == right_offset. So let's limit the value 3070 // passed in for left offset here. 3071 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 3072 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 3073 } 3074 } 3075 3076 // Update the marked bytes for this region. 3077 hr->add_to_marked_bytes(marked_bytes); 3078 3079 // Next heap region 3080 return false; 3081 } 3082 }; 3083 3084 class G1AggregateCountDataTask: public AbstractGangTask { 3085 protected: 3086 G1CollectedHeap* _g1h; 3087 ConcurrentMark* _cm; 3088 BitMap* _cm_card_bm; 3089 uint _max_worker_id; 3090 int _active_workers; 3091 3092 public: 3093 G1AggregateCountDataTask(G1CollectedHeap* g1h, 3094 ConcurrentMark* cm, 3095 BitMap* cm_card_bm, 3096 uint max_worker_id, 3097 int n_workers) : 3098 AbstractGangTask("Count Aggregation"), 3099 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 3100 _max_worker_id(max_worker_id), 3101 _active_workers(n_workers) { } 3102 3103 void work(uint worker_id) { 3104 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 3105 3106 if (G1CollectedHeap::use_parallel_gc_threads()) { 3107 _g1h->heap_region_par_iterate_chunked(&cl, worker_id, 3108 _active_workers, 3109 HeapRegion::AggregateCountClaimValue); 3110 } else { 3111 _g1h->heap_region_iterate(&cl); 3112 } 3113 } 3114 }; 3115 3116 3117 void ConcurrentMark::aggregate_count_data() { 3118 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? 3119 _g1h->workers()->active_workers() : 3120 1); 3121 3122 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 3123 _max_worker_id, n_workers); 3124 3125 if (G1CollectedHeap::use_parallel_gc_threads()) { 3126 assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 3127 "sanity check"); 3128 _g1h->set_par_threads(n_workers); 3129 _g1h->workers()->run_task(&g1_par_agg_task); 3130 _g1h->set_par_threads(0); 3131 3132 assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue), 3133 "sanity check"); 3134 _g1h->reset_heap_region_claim_values(); 3135 } else { 3136 g1_par_agg_task.work(0); 3137 } 3138 } 3139 3140 // Clear the per-worker arrays used to store the per-region counting data 3141 void ConcurrentMark::clear_all_count_data() { 3142 // Clear the global card bitmap - it will be filled during 3143 // liveness count aggregation (during remark) and the 3144 // final counting task. 3145 _card_bm.clear(); 3146 3147 // Clear the global region bitmap - it will be filled as part 3148 // of the final counting task. 3149 _region_bm.clear(); 3150 3151 uint max_regions = _g1h->max_regions(); 3152 assert(_max_worker_id > 0, "uninitialized"); 3153 3154 for (uint i = 0; i < _max_worker_id; i += 1) { 3155 BitMap* task_card_bm = count_card_bitmap_for(i); 3156 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3157 3158 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3159 assert(marked_bytes_array != NULL, "uninitialized"); 3160 3161 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3162 task_card_bm->clear(); 3163 } 3164 } 3165 3166 void ConcurrentMark::print_stats() { 3167 if (verbose_stats()) { 3168 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3169 for (size_t i = 0; i < _active_tasks; ++i) { 3170 _tasks[i]->print_stats(); 3171 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3172 } 3173 } 3174 } 3175 3176 // abandon current marking iteration due to a Full GC 3177 void ConcurrentMark::abort() { 3178 // Clear all marks to force marking thread to do nothing 3179 _nextMarkBitMap->clearAll(); 3180 // Clear the liveness counting data 3181 clear_all_count_data(); 3182 // Empty mark stack 3183 reset_marking_state(); 3184 for (uint i = 0; i < _max_worker_id; ++i) { 3185 _tasks[i]->clear_region_fields(); 3186 } 3187 _has_aborted = true; 3188 3189 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3190 satb_mq_set.abandon_partial_marking(); 3191 // This can be called either during or outside marking, we'll read 3192 // the expected_active value from the SATB queue set. 3193 satb_mq_set.set_active_all_threads( 3194 false, /* new active value */ 3195 satb_mq_set.is_active() /* expected_active */); 3196 } 3197 3198 static void print_ms_time_info(const char* prefix, const char* name, 3199 NumberSeq& ns) { 3200 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3201 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3202 if (ns.num() > 0) { 3203 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3204 prefix, ns.sd(), ns.maximum()); 3205 } 3206 } 3207 3208 void ConcurrentMark::print_summary_info() { 3209 gclog_or_tty->print_cr(" Concurrent marking:"); 3210 print_ms_time_info(" ", "init marks", _init_times); 3211 print_ms_time_info(" ", "remarks", _remark_times); 3212 { 3213 print_ms_time_info(" ", "final marks", _remark_mark_times); 3214 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3215 3216 } 3217 print_ms_time_info(" ", "cleanups", _cleanup_times); 3218 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3219 _total_counting_time, 3220 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3221 (double)_cleanup_times.num() 3222 : 0.0)); 3223 if (G1ScrubRemSets) { 3224 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3225 _total_rs_scrub_time, 3226 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3227 (double)_cleanup_times.num() 3228 : 0.0)); 3229 } 3230 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3231 (_init_times.sum() + _remark_times.sum() + 3232 _cleanup_times.sum())/1000.0); 3233 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3234 "(%8.2f s marking).", 3235 cmThread()->vtime_accum(), 3236 cmThread()->vtime_mark_accum()); 3237 } 3238 3239 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3240 if (use_parallel_marking_threads()) { 3241 _parallel_workers->print_worker_threads_on(st); 3242 } 3243 } 3244 3245 // We take a break if someone is trying to stop the world. 3246 bool ConcurrentMark::do_yield_check(uint worker_id) { 3247 if (should_yield()) { 3248 if (worker_id == 0) { 3249 _g1h->g1_policy()->record_concurrent_pause(); 3250 } 3251 cmThread()->yield(); 3252 return true; 3253 } else { 3254 return false; 3255 } 3256 } 3257 3258 bool ConcurrentMark::should_yield() { 3259 return cmThread()->should_yield(); 3260 } 3261 3262 bool ConcurrentMark::containing_card_is_marked(void* p) { 3263 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1); 3264 return _card_bm.at(offset >> CardTableModRefBS::card_shift); 3265 } 3266 3267 bool ConcurrentMark::containing_cards_are_marked(void* start, 3268 void* last) { 3269 return containing_card_is_marked(start) && 3270 containing_card_is_marked(last); 3271 } 3272 3273 #ifndef PRODUCT 3274 // for debugging purposes 3275 void ConcurrentMark::print_finger() { 3276 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3277 _heap_start, _heap_end, _finger); 3278 for (uint i = 0; i < _max_worker_id; ++i) { 3279 gclog_or_tty->print(" %u: "PTR_FORMAT, i, _tasks[i]->finger()); 3280 } 3281 gclog_or_tty->print_cr(""); 3282 } 3283 #endif 3284 3285 void CMTask::scan_object(oop obj) { 3286 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3287 3288 if (_cm->verbose_high()) { 3289 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT, 3290 _worker_id, (void*) obj); 3291 } 3292 3293 size_t obj_size = obj->size(); 3294 _words_scanned += obj_size; 3295 3296 obj->oop_iterate(_cm_oop_closure); 3297 statsOnly( ++_objs_scanned ); 3298 check_limits(); 3299 } 3300 3301 // Closure for iteration over bitmaps 3302 class CMBitMapClosure : public BitMapClosure { 3303 private: 3304 // the bitmap that is being iterated over 3305 CMBitMap* _nextMarkBitMap; 3306 ConcurrentMark* _cm; 3307 CMTask* _task; 3308 3309 public: 3310 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3311 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3312 3313 bool do_bit(size_t offset) { 3314 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3315 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3316 assert( addr < _cm->finger(), "invariant"); 3317 3318 statsOnly( _task->increase_objs_found_on_bitmap() ); 3319 assert(addr >= _task->finger(), "invariant"); 3320 3321 // We move that task's local finger along. 3322 _task->move_finger_to(addr); 3323 3324 _task->scan_object(oop(addr)); 3325 // we only partially drain the local queue and global stack 3326 _task->drain_local_queue(true); 3327 _task->drain_global_stack(true); 3328 3329 // if the has_aborted flag has been raised, we need to bail out of 3330 // the iteration 3331 return !_task->has_aborted(); 3332 } 3333 }; 3334 3335 // Closure for iterating over objects, currently only used for 3336 // processing SATB buffers. 3337 class CMObjectClosure : public ObjectClosure { 3338 private: 3339 CMTask* _task; 3340 3341 public: 3342 void do_object(oop obj) { 3343 _task->deal_with_reference(obj); 3344 } 3345 3346 CMObjectClosure(CMTask* task) : _task(task) { } 3347 }; 3348 3349 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3350 ConcurrentMark* cm, 3351 CMTask* task) 3352 : _g1h(g1h), _cm(cm), _task(task) { 3353 assert(_ref_processor == NULL, "should be initialized to NULL"); 3354 3355 if (G1UseConcMarkReferenceProcessing) { 3356 _ref_processor = g1h->ref_processor_cm(); 3357 assert(_ref_processor != NULL, "should not be NULL"); 3358 } 3359 } 3360 3361 void CMTask::setup_for_region(HeapRegion* hr) { 3362 // Separated the asserts so that we know which one fires. 3363 assert(hr != NULL, 3364 "claim_region() should have filtered out continues humongous regions"); 3365 assert(!hr->continuesHumongous(), 3366 "claim_region() should have filtered out continues humongous regions"); 3367 3368 if (_cm->verbose_low()) { 3369 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3370 _worker_id, hr); 3371 } 3372 3373 _curr_region = hr; 3374 _finger = hr->bottom(); 3375 update_region_limit(); 3376 } 3377 3378 void CMTask::update_region_limit() { 3379 HeapRegion* hr = _curr_region; 3380 HeapWord* bottom = hr->bottom(); 3381 HeapWord* limit = hr->next_top_at_mark_start(); 3382 3383 if (limit == bottom) { 3384 if (_cm->verbose_low()) { 3385 gclog_or_tty->print_cr("[%u] found an empty region " 3386 "["PTR_FORMAT", "PTR_FORMAT")", 3387 _worker_id, bottom, limit); 3388 } 3389 // The region was collected underneath our feet. 3390 // We set the finger to bottom to ensure that the bitmap 3391 // iteration that will follow this will not do anything. 3392 // (this is not a condition that holds when we set the region up, 3393 // as the region is not supposed to be empty in the first place) 3394 _finger = bottom; 3395 } else if (limit >= _region_limit) { 3396 assert(limit >= _finger, "peace of mind"); 3397 } else { 3398 assert(limit < _region_limit, "only way to get here"); 3399 // This can happen under some pretty unusual circumstances. An 3400 // evacuation pause empties the region underneath our feet (NTAMS 3401 // at bottom). We then do some allocation in the region (NTAMS 3402 // stays at bottom), followed by the region being used as a GC 3403 // alloc region (NTAMS will move to top() and the objects 3404 // originally below it will be grayed). All objects now marked in 3405 // the region are explicitly grayed, if below the global finger, 3406 // and we do not need in fact to scan anything else. So, we simply 3407 // set _finger to be limit to ensure that the bitmap iteration 3408 // doesn't do anything. 3409 _finger = limit; 3410 } 3411 3412 _region_limit = limit; 3413 } 3414 3415 void CMTask::giveup_current_region() { 3416 assert(_curr_region != NULL, "invariant"); 3417 if (_cm->verbose_low()) { 3418 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3419 _worker_id, _curr_region); 3420 } 3421 clear_region_fields(); 3422 } 3423 3424 void CMTask::clear_region_fields() { 3425 // Values for these three fields that indicate that we're not 3426 // holding on to a region. 3427 _curr_region = NULL; 3428 _finger = NULL; 3429 _region_limit = NULL; 3430 } 3431 3432 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3433 if (cm_oop_closure == NULL) { 3434 assert(_cm_oop_closure != NULL, "invariant"); 3435 } else { 3436 assert(_cm_oop_closure == NULL, "invariant"); 3437 } 3438 _cm_oop_closure = cm_oop_closure; 3439 } 3440 3441 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3442 guarantee(nextMarkBitMap != NULL, "invariant"); 3443 3444 if (_cm->verbose_low()) { 3445 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3446 } 3447 3448 _nextMarkBitMap = nextMarkBitMap; 3449 clear_region_fields(); 3450 3451 _calls = 0; 3452 _elapsed_time_ms = 0.0; 3453 _termination_time_ms = 0.0; 3454 _termination_start_time_ms = 0.0; 3455 3456 #if _MARKING_STATS_ 3457 _local_pushes = 0; 3458 _local_pops = 0; 3459 _local_max_size = 0; 3460 _objs_scanned = 0; 3461 _global_pushes = 0; 3462 _global_pops = 0; 3463 _global_max_size = 0; 3464 _global_transfers_to = 0; 3465 _global_transfers_from = 0; 3466 _regions_claimed = 0; 3467 _objs_found_on_bitmap = 0; 3468 _satb_buffers_processed = 0; 3469 _steal_attempts = 0; 3470 _steals = 0; 3471 _aborted = 0; 3472 _aborted_overflow = 0; 3473 _aborted_cm_aborted = 0; 3474 _aborted_yield = 0; 3475 _aborted_timed_out = 0; 3476 _aborted_satb = 0; 3477 _aborted_termination = 0; 3478 #endif // _MARKING_STATS_ 3479 } 3480 3481 bool CMTask::should_exit_termination() { 3482 regular_clock_call(); 3483 // This is called when we are in the termination protocol. We should 3484 // quit if, for some reason, this task wants to abort or the global 3485 // stack is not empty (this means that we can get work from it). 3486 return !_cm->mark_stack_empty() || has_aborted(); 3487 } 3488 3489 void CMTask::reached_limit() { 3490 assert(_words_scanned >= _words_scanned_limit || 3491 _refs_reached >= _refs_reached_limit , 3492 "shouldn't have been called otherwise"); 3493 regular_clock_call(); 3494 } 3495 3496 void CMTask::regular_clock_call() { 3497 if (has_aborted()) return; 3498 3499 // First, we need to recalculate the words scanned and refs reached 3500 // limits for the next clock call. 3501 recalculate_limits(); 3502 3503 // During the regular clock call we do the following 3504 3505 // (1) If an overflow has been flagged, then we abort. 3506 if (_cm->has_overflown()) { 3507 set_has_aborted(); 3508 return; 3509 } 3510 3511 // If we are not concurrent (i.e. we're doing remark) we don't need 3512 // to check anything else. The other steps are only needed during 3513 // the concurrent marking phase. 3514 if (!concurrent()) return; 3515 3516 // (2) If marking has been aborted for Full GC, then we also abort. 3517 if (_cm->has_aborted()) { 3518 set_has_aborted(); 3519 statsOnly( ++_aborted_cm_aborted ); 3520 return; 3521 } 3522 3523 double curr_time_ms = os::elapsedVTime() * 1000.0; 3524 3525 // (3) If marking stats are enabled, then we update the step history. 3526 #if _MARKING_STATS_ 3527 if (_words_scanned >= _words_scanned_limit) { 3528 ++_clock_due_to_scanning; 3529 } 3530 if (_refs_reached >= _refs_reached_limit) { 3531 ++_clock_due_to_marking; 3532 } 3533 3534 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3535 _interval_start_time_ms = curr_time_ms; 3536 _all_clock_intervals_ms.add(last_interval_ms); 3537 3538 if (_cm->verbose_medium()) { 3539 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3540 "scanned = %d%s, refs reached = %d%s", 3541 _worker_id, last_interval_ms, 3542 _words_scanned, 3543 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3544 _refs_reached, 3545 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3546 } 3547 #endif // _MARKING_STATS_ 3548 3549 // (4) We check whether we should yield. If we have to, then we abort. 3550 if (_cm->should_yield()) { 3551 // We should yield. To do this we abort the task. The caller is 3552 // responsible for yielding. 3553 set_has_aborted(); 3554 statsOnly( ++_aborted_yield ); 3555 return; 3556 } 3557 3558 // (5) We check whether we've reached our time quota. If we have, 3559 // then we abort. 3560 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3561 if (elapsed_time_ms > _time_target_ms) { 3562 set_has_aborted(); 3563 _has_timed_out = true; 3564 statsOnly( ++_aborted_timed_out ); 3565 return; 3566 } 3567 3568 // (6) Finally, we check whether there are enough completed STAB 3569 // buffers available for processing. If there are, we abort. 3570 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3571 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3572 if (_cm->verbose_low()) { 3573 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3574 _worker_id); 3575 } 3576 // we do need to process SATB buffers, we'll abort and restart 3577 // the marking task to do so 3578 set_has_aborted(); 3579 statsOnly( ++_aborted_satb ); 3580 return; 3581 } 3582 } 3583 3584 void CMTask::recalculate_limits() { 3585 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3586 _words_scanned_limit = _real_words_scanned_limit; 3587 3588 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3589 _refs_reached_limit = _real_refs_reached_limit; 3590 } 3591 3592 void CMTask::decrease_limits() { 3593 // This is called when we believe that we're going to do an infrequent 3594 // operation which will increase the per byte scanned cost (i.e. move 3595 // entries to/from the global stack). It basically tries to decrease the 3596 // scanning limit so that the clock is called earlier. 3597 3598 if (_cm->verbose_medium()) { 3599 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3600 } 3601 3602 _words_scanned_limit = _real_words_scanned_limit - 3603 3 * words_scanned_period / 4; 3604 _refs_reached_limit = _real_refs_reached_limit - 3605 3 * refs_reached_period / 4; 3606 } 3607 3608 void CMTask::move_entries_to_global_stack() { 3609 // local array where we'll store the entries that will be popped 3610 // from the local queue 3611 oop buffer[global_stack_transfer_size]; 3612 3613 int n = 0; 3614 oop obj; 3615 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3616 buffer[n] = obj; 3617 ++n; 3618 } 3619 3620 if (n > 0) { 3621 // we popped at least one entry from the local queue 3622 3623 statsOnly( ++_global_transfers_to; _local_pops += n ); 3624 3625 if (!_cm->mark_stack_push(buffer, n)) { 3626 if (_cm->verbose_low()) { 3627 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3628 _worker_id); 3629 } 3630 set_has_aborted(); 3631 } else { 3632 // the transfer was successful 3633 3634 if (_cm->verbose_medium()) { 3635 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3636 _worker_id, n); 3637 } 3638 statsOnly( int tmp_size = _cm->mark_stack_size(); 3639 if (tmp_size > _global_max_size) { 3640 _global_max_size = tmp_size; 3641 } 3642 _global_pushes += n ); 3643 } 3644 } 3645 3646 // this operation was quite expensive, so decrease the limits 3647 decrease_limits(); 3648 } 3649 3650 void CMTask::get_entries_from_global_stack() { 3651 // local array where we'll store the entries that will be popped 3652 // from the global stack. 3653 oop buffer[global_stack_transfer_size]; 3654 int n; 3655 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3656 assert(n <= global_stack_transfer_size, 3657 "we should not pop more than the given limit"); 3658 if (n > 0) { 3659 // yes, we did actually pop at least one entry 3660 3661 statsOnly( ++_global_transfers_from; _global_pops += n ); 3662 if (_cm->verbose_medium()) { 3663 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3664 _worker_id, n); 3665 } 3666 for (int i = 0; i < n; ++i) { 3667 bool success = _task_queue->push(buffer[i]); 3668 // We only call this when the local queue is empty or under a 3669 // given target limit. So, we do not expect this push to fail. 3670 assert(success, "invariant"); 3671 } 3672 3673 statsOnly( int tmp_size = _task_queue->size(); 3674 if (tmp_size > _local_max_size) { 3675 _local_max_size = tmp_size; 3676 } 3677 _local_pushes += n ); 3678 } 3679 3680 // this operation was quite expensive, so decrease the limits 3681 decrease_limits(); 3682 } 3683 3684 void CMTask::drain_local_queue(bool partially) { 3685 if (has_aborted()) return; 3686 3687 // Decide what the target size is, depending whether we're going to 3688 // drain it partially (so that other tasks can steal if they run out 3689 // of things to do) or totally (at the very end). 3690 size_t target_size; 3691 if (partially) { 3692 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3693 } else { 3694 target_size = 0; 3695 } 3696 3697 if (_task_queue->size() > target_size) { 3698 if (_cm->verbose_high()) { 3699 gclog_or_tty->print_cr("[%u] draining local queue, target size = %d", 3700 _worker_id, target_size); 3701 } 3702 3703 oop obj; 3704 bool ret = _task_queue->pop_local(obj); 3705 while (ret) { 3706 statsOnly( ++_local_pops ); 3707 3708 if (_cm->verbose_high()) { 3709 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3710 (void*) obj); 3711 } 3712 3713 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3714 assert(!_g1h->is_on_master_free_list( 3715 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3716 3717 scan_object(obj); 3718 3719 if (_task_queue->size() <= target_size || has_aborted()) { 3720 ret = false; 3721 } else { 3722 ret = _task_queue->pop_local(obj); 3723 } 3724 } 3725 3726 if (_cm->verbose_high()) { 3727 gclog_or_tty->print_cr("[%u] drained local queue, size = %d", 3728 _worker_id, _task_queue->size()); 3729 } 3730 } 3731 } 3732 3733 void CMTask::drain_global_stack(bool partially) { 3734 if (has_aborted()) return; 3735 3736 // We have a policy to drain the local queue before we attempt to 3737 // drain the global stack. 3738 assert(partially || _task_queue->size() == 0, "invariant"); 3739 3740 // Decide what the target size is, depending whether we're going to 3741 // drain it partially (so that other tasks can steal if they run out 3742 // of things to do) or totally (at the very end). Notice that, 3743 // because we move entries from the global stack in chunks or 3744 // because another task might be doing the same, we might in fact 3745 // drop below the target. But, this is not a problem. 3746 size_t target_size; 3747 if (partially) { 3748 target_size = _cm->partial_mark_stack_size_target(); 3749 } else { 3750 target_size = 0; 3751 } 3752 3753 if (_cm->mark_stack_size() > target_size) { 3754 if (_cm->verbose_low()) { 3755 gclog_or_tty->print_cr("[%u] draining global_stack, target size %d", 3756 _worker_id, target_size); 3757 } 3758 3759 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3760 get_entries_from_global_stack(); 3761 drain_local_queue(partially); 3762 } 3763 3764 if (_cm->verbose_low()) { 3765 gclog_or_tty->print_cr("[%u] drained global stack, size = %d", 3766 _worker_id, _cm->mark_stack_size()); 3767 } 3768 } 3769 } 3770 3771 // SATB Queue has several assumptions on whether to call the par or 3772 // non-par versions of the methods. this is why some of the code is 3773 // replicated. We should really get rid of the single-threaded version 3774 // of the code to simplify things. 3775 void CMTask::drain_satb_buffers() { 3776 if (has_aborted()) return; 3777 3778 // We set this so that the regular clock knows that we're in the 3779 // middle of draining buffers and doesn't set the abort flag when it 3780 // notices that SATB buffers are available for draining. It'd be 3781 // very counter productive if it did that. :-) 3782 _draining_satb_buffers = true; 3783 3784 CMObjectClosure oc(this); 3785 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3786 if (G1CollectedHeap::use_parallel_gc_threads()) { 3787 satb_mq_set.set_par_closure(_worker_id, &oc); 3788 } else { 3789 satb_mq_set.set_closure(&oc); 3790 } 3791 3792 // This keeps claiming and applying the closure to completed buffers 3793 // until we run out of buffers or we need to abort. 3794 if (G1CollectedHeap::use_parallel_gc_threads()) { 3795 while (!has_aborted() && 3796 satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) { 3797 if (_cm->verbose_medium()) { 3798 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3799 } 3800 statsOnly( ++_satb_buffers_processed ); 3801 regular_clock_call(); 3802 } 3803 } else { 3804 while (!has_aborted() && 3805 satb_mq_set.apply_closure_to_completed_buffer()) { 3806 if (_cm->verbose_medium()) { 3807 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3808 } 3809 statsOnly( ++_satb_buffers_processed ); 3810 regular_clock_call(); 3811 } 3812 } 3813 3814 if (!concurrent() && !has_aborted()) { 3815 // We should only do this during remark. 3816 if (G1CollectedHeap::use_parallel_gc_threads()) { 3817 satb_mq_set.par_iterate_closure_all_threads(_worker_id); 3818 } else { 3819 satb_mq_set.iterate_closure_all_threads(); 3820 } 3821 } 3822 3823 _draining_satb_buffers = false; 3824 3825 assert(has_aborted() || 3826 concurrent() || 3827 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3828 3829 if (G1CollectedHeap::use_parallel_gc_threads()) { 3830 satb_mq_set.set_par_closure(_worker_id, NULL); 3831 } else { 3832 satb_mq_set.set_closure(NULL); 3833 } 3834 3835 // again, this was a potentially expensive operation, decrease the 3836 // limits to get the regular clock call early 3837 decrease_limits(); 3838 } 3839 3840 void CMTask::print_stats() { 3841 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3842 _worker_id, _calls); 3843 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3844 _elapsed_time_ms, _termination_time_ms); 3845 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3846 _step_times_ms.num(), _step_times_ms.avg(), 3847 _step_times_ms.sd()); 3848 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3849 _step_times_ms.maximum(), _step_times_ms.sum()); 3850 3851 #if _MARKING_STATS_ 3852 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3853 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3854 _all_clock_intervals_ms.sd()); 3855 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3856 _all_clock_intervals_ms.maximum(), 3857 _all_clock_intervals_ms.sum()); 3858 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", 3859 _clock_due_to_scanning, _clock_due_to_marking); 3860 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", 3861 _objs_scanned, _objs_found_on_bitmap); 3862 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", 3863 _local_pushes, _local_pops, _local_max_size); 3864 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", 3865 _global_pushes, _global_pops, _global_max_size); 3866 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", 3867 _global_transfers_to,_global_transfers_from); 3868 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed); 3869 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); 3870 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", 3871 _steal_attempts, _steals); 3872 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); 3873 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", 3874 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3875 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", 3876 _aborted_timed_out, _aborted_satb, _aborted_termination); 3877 #endif // _MARKING_STATS_ 3878 } 3879 3880 /***************************************************************************** 3881 3882 The do_marking_step(time_target_ms, ...) method is the building 3883 block of the parallel marking framework. It can be called in parallel 3884 with other invocations of do_marking_step() on different tasks 3885 (but only one per task, obviously) and concurrently with the 3886 mutator threads, or during remark, hence it eliminates the need 3887 for two versions of the code. When called during remark, it will 3888 pick up from where the task left off during the concurrent marking 3889 phase. Interestingly, tasks are also claimable during evacuation 3890 pauses too, since do_marking_step() ensures that it aborts before 3891 it needs to yield. 3892 3893 The data structures that it uses to do marking work are the 3894 following: 3895 3896 (1) Marking Bitmap. If there are gray objects that appear only 3897 on the bitmap (this happens either when dealing with an overflow 3898 or when the initial marking phase has simply marked the roots 3899 and didn't push them on the stack), then tasks claim heap 3900 regions whose bitmap they then scan to find gray objects. A 3901 global finger indicates where the end of the last claimed region 3902 is. A local finger indicates how far into the region a task has 3903 scanned. The two fingers are used to determine how to gray an 3904 object (i.e. whether simply marking it is OK, as it will be 3905 visited by a task in the future, or whether it needs to be also 3906 pushed on a stack). 3907 3908 (2) Local Queue. The local queue of the task which is accessed 3909 reasonably efficiently by the task. Other tasks can steal from 3910 it when they run out of work. Throughout the marking phase, a 3911 task attempts to keep its local queue short but not totally 3912 empty, so that entries are available for stealing by other 3913 tasks. Only when there is no more work, a task will totally 3914 drain its local queue. 3915 3916 (3) Global Mark Stack. This handles local queue overflow. During 3917 marking only sets of entries are moved between it and the local 3918 queues, as access to it requires a mutex and more fine-grain 3919 interaction with it which might cause contention. If it 3920 overflows, then the marking phase should restart and iterate 3921 over the bitmap to identify gray objects. Throughout the marking 3922 phase, tasks attempt to keep the global mark stack at a small 3923 length but not totally empty, so that entries are available for 3924 popping by other tasks. Only when there is no more work, tasks 3925 will totally drain the global mark stack. 3926 3927 (4) SATB Buffer Queue. This is where completed SATB buffers are 3928 made available. Buffers are regularly removed from this queue 3929 and scanned for roots, so that the queue doesn't get too 3930 long. During remark, all completed buffers are processed, as 3931 well as the filled in parts of any uncompleted buffers. 3932 3933 The do_marking_step() method tries to abort when the time target 3934 has been reached. There are a few other cases when the 3935 do_marking_step() method also aborts: 3936 3937 (1) When the marking phase has been aborted (after a Full GC). 3938 3939 (2) When a global overflow (on the global stack) has been 3940 triggered. Before the task aborts, it will actually sync up with 3941 the other tasks to ensure that all the marking data structures 3942 (local queues, stacks, fingers etc.) are re-initialised so that 3943 when do_marking_step() completes, the marking phase can 3944 immediately restart. 3945 3946 (3) When enough completed SATB buffers are available. The 3947 do_marking_step() method only tries to drain SATB buffers right 3948 at the beginning. So, if enough buffers are available, the 3949 marking step aborts and the SATB buffers are processed at 3950 the beginning of the next invocation. 3951 3952 (4) To yield. when we have to yield then we abort and yield 3953 right at the end of do_marking_step(). This saves us from a lot 3954 of hassle as, by yielding we might allow a Full GC. If this 3955 happens then objects will be compacted underneath our feet, the 3956 heap might shrink, etc. We save checking for this by just 3957 aborting and doing the yield right at the end. 3958 3959 From the above it follows that the do_marking_step() method should 3960 be called in a loop (or, otherwise, regularly) until it completes. 3961 3962 If a marking step completes without its has_aborted() flag being 3963 true, it means it has completed the current marking phase (and 3964 also all other marking tasks have done so and have all synced up). 3965 3966 A method called regular_clock_call() is invoked "regularly" (in 3967 sub ms intervals) throughout marking. It is this clock method that 3968 checks all the abort conditions which were mentioned above and 3969 decides when the task should abort. A work-based scheme is used to 3970 trigger this clock method: when the number of object words the 3971 marking phase has scanned or the number of references the marking 3972 phase has visited reach a given limit. Additional invocations to 3973 the method clock have been planted in a few other strategic places 3974 too. The initial reason for the clock method was to avoid calling 3975 vtime too regularly, as it is quite expensive. So, once it was in 3976 place, it was natural to piggy-back all the other conditions on it 3977 too and not constantly check them throughout the code. 3978 3979 If do_termination is true then do_marking_step will enter its 3980 termination protocol. 3981 3982 The value of is_serial must be true when do_marking_step is being 3983 called serially (i.e. by the VMThread) and do_marking_step should 3984 skip any synchronization in the termination and overflow code. 3985 Examples include the serial remark code and the serial reference 3986 processing closures. 3987 3988 The value of is_serial must be false when do_marking_step is 3989 being called by any of the worker threads in a work gang. 3990 Examples include the concurrent marking code (CMMarkingTask), 3991 the MT remark code, and the MT reference processing closures. 3992 3993 *****************************************************************************/ 3994 3995 void CMTask::do_marking_step(double time_target_ms, 3996 bool do_termination, 3997 bool is_serial) { 3998 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3999 assert(concurrent() == _cm->concurrent(), "they should be the same"); 4000 4001 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 4002 assert(_task_queues != NULL, "invariant"); 4003 assert(_task_queue != NULL, "invariant"); 4004 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 4005 4006 assert(!_claimed, 4007 "only one thread should claim this task at any one time"); 4008 4009 // OK, this doesn't safeguard again all possible scenarios, as it is 4010 // possible for two threads to set the _claimed flag at the same 4011 // time. But it is only for debugging purposes anyway and it will 4012 // catch most problems. 4013 _claimed = true; 4014 4015 _start_time_ms = os::elapsedVTime() * 1000.0; 4016 statsOnly( _interval_start_time_ms = _start_time_ms ); 4017 4018 // If do_stealing is true then do_marking_step will attempt to 4019 // steal work from the other CMTasks. It only makes sense to 4020 // enable stealing when the termination protocol is enabled 4021 // and do_marking_step() is not being called serially. 4022 bool do_stealing = do_termination && !is_serial; 4023 4024 double diff_prediction_ms = 4025 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 4026 _time_target_ms = time_target_ms - diff_prediction_ms; 4027 4028 // set up the variables that are used in the work-based scheme to 4029 // call the regular clock method 4030 _words_scanned = 0; 4031 _refs_reached = 0; 4032 recalculate_limits(); 4033 4034 // clear all flags 4035 clear_has_aborted(); 4036 _has_timed_out = false; 4037 _draining_satb_buffers = false; 4038 4039 ++_calls; 4040 4041 if (_cm->verbose_low()) { 4042 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 4043 "target = %1.2lfms >>>>>>>>>>", 4044 _worker_id, _calls, _time_target_ms); 4045 } 4046 4047 // Set up the bitmap and oop closures. Anything that uses them is 4048 // eventually called from this method, so it is OK to allocate these 4049 // statically. 4050 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 4051 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 4052 set_cm_oop_closure(&cm_oop_closure); 4053 4054 if (_cm->has_overflown()) { 4055 // This can happen if the mark stack overflows during a GC pause 4056 // and this task, after a yield point, restarts. We have to abort 4057 // as we need to get into the overflow protocol which happens 4058 // right at the end of this task. 4059 set_has_aborted(); 4060 } 4061 4062 // First drain any available SATB buffers. After this, we will not 4063 // look at SATB buffers before the next invocation of this method. 4064 // If enough completed SATB buffers are queued up, the regular clock 4065 // will abort this task so that it restarts. 4066 drain_satb_buffers(); 4067 // ...then partially drain the local queue and the global stack 4068 drain_local_queue(true); 4069 drain_global_stack(true); 4070 4071 do { 4072 if (!has_aborted() && _curr_region != NULL) { 4073 // This means that we're already holding on to a region. 4074 assert(_finger != NULL, "if region is not NULL, then the finger " 4075 "should not be NULL either"); 4076 4077 // We might have restarted this task after an evacuation pause 4078 // which might have evacuated the region we're holding on to 4079 // underneath our feet. Let's read its limit again to make sure 4080 // that we do not iterate over a region of the heap that 4081 // contains garbage (update_region_limit() will also move 4082 // _finger to the start of the region if it is found empty). 4083 update_region_limit(); 4084 // We will start from _finger not from the start of the region, 4085 // as we might be restarting this task after aborting half-way 4086 // through scanning this region. In this case, _finger points to 4087 // the address where we last found a marked object. If this is a 4088 // fresh region, _finger points to start(). 4089 MemRegion mr = MemRegion(_finger, _region_limit); 4090 4091 if (_cm->verbose_low()) { 4092 gclog_or_tty->print_cr("[%u] we're scanning part " 4093 "["PTR_FORMAT", "PTR_FORMAT") " 4094 "of region "HR_FORMAT, 4095 _worker_id, _finger, _region_limit, 4096 HR_FORMAT_PARAMS(_curr_region)); 4097 } 4098 4099 assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(), 4100 "humongous regions should go around loop once only"); 4101 4102 // Some special cases: 4103 // If the memory region is empty, we can just give up the region. 4104 // If the current region is humongous then we only need to check 4105 // the bitmap for the bit associated with the start of the object, 4106 // scan the object if it's live, and give up the region. 4107 // Otherwise, let's iterate over the bitmap of the part of the region 4108 // that is left. 4109 // If the iteration is successful, give up the region. 4110 if (mr.is_empty()) { 4111 giveup_current_region(); 4112 regular_clock_call(); 4113 } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) { 4114 if (_nextMarkBitMap->isMarked(mr.start())) { 4115 // The object is marked - apply the closure 4116 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 4117 bitmap_closure.do_bit(offset); 4118 } 4119 // Even if this task aborted while scanning the humongous object 4120 // we can (and should) give up the current region. 4121 giveup_current_region(); 4122 regular_clock_call(); 4123 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4124 giveup_current_region(); 4125 regular_clock_call(); 4126 } else { 4127 assert(has_aborted(), "currently the only way to do so"); 4128 // The only way to abort the bitmap iteration is to return 4129 // false from the do_bit() method. However, inside the 4130 // do_bit() method we move the _finger to point to the 4131 // object currently being looked at. So, if we bail out, we 4132 // have definitely set _finger to something non-null. 4133 assert(_finger != NULL, "invariant"); 4134 4135 // Region iteration was actually aborted. So now _finger 4136 // points to the address of the object we last scanned. If we 4137 // leave it there, when we restart this task, we will rescan 4138 // the object. It is easy to avoid this. We move the finger by 4139 // enough to point to the next possible object header (the 4140 // bitmap knows by how much we need to move it as it knows its 4141 // granularity). 4142 assert(_finger < _region_limit, "invariant"); 4143 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 4144 // Check if bitmap iteration was aborted while scanning the last object 4145 if (new_finger >= _region_limit) { 4146 giveup_current_region(); 4147 } else { 4148 move_finger_to(new_finger); 4149 } 4150 } 4151 } 4152 // At this point we have either completed iterating over the 4153 // region we were holding on to, or we have aborted. 4154 4155 // We then partially drain the local queue and the global stack. 4156 // (Do we really need this?) 4157 drain_local_queue(true); 4158 drain_global_stack(true); 4159 4160 // Read the note on the claim_region() method on why it might 4161 // return NULL with potentially more regions available for 4162 // claiming and why we have to check out_of_regions() to determine 4163 // whether we're done or not. 4164 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4165 // We are going to try to claim a new region. We should have 4166 // given up on the previous one. 4167 // Separated the asserts so that we know which one fires. 4168 assert(_curr_region == NULL, "invariant"); 4169 assert(_finger == NULL, "invariant"); 4170 assert(_region_limit == NULL, "invariant"); 4171 if (_cm->verbose_low()) { 4172 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4173 } 4174 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4175 if (claimed_region != NULL) { 4176 // Yes, we managed to claim one 4177 statsOnly( ++_regions_claimed ); 4178 4179 if (_cm->verbose_low()) { 4180 gclog_or_tty->print_cr("[%u] we successfully claimed " 4181 "region "PTR_FORMAT, 4182 _worker_id, claimed_region); 4183 } 4184 4185 setup_for_region(claimed_region); 4186 assert(_curr_region == claimed_region, "invariant"); 4187 } 4188 // It is important to call the regular clock here. It might take 4189 // a while to claim a region if, for example, we hit a large 4190 // block of empty regions. So we need to call the regular clock 4191 // method once round the loop to make sure it's called 4192 // frequently enough. 4193 regular_clock_call(); 4194 } 4195 4196 if (!has_aborted() && _curr_region == NULL) { 4197 assert(_cm->out_of_regions(), 4198 "at this point we should be out of regions"); 4199 } 4200 } while ( _curr_region != NULL && !has_aborted()); 4201 4202 if (!has_aborted()) { 4203 // We cannot check whether the global stack is empty, since other 4204 // tasks might be pushing objects to it concurrently. 4205 assert(_cm->out_of_regions(), 4206 "at this point we should be out of regions"); 4207 4208 if (_cm->verbose_low()) { 4209 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4210 } 4211 4212 // Try to reduce the number of available SATB buffers so that 4213 // remark has less work to do. 4214 drain_satb_buffers(); 4215 } 4216 4217 // Since we've done everything else, we can now totally drain the 4218 // local queue and global stack. 4219 drain_local_queue(false); 4220 drain_global_stack(false); 4221 4222 // Attempt at work stealing from other task's queues. 4223 if (do_stealing && !has_aborted()) { 4224 // We have not aborted. This means that we have finished all that 4225 // we could. Let's try to do some stealing... 4226 4227 // We cannot check whether the global stack is empty, since other 4228 // tasks might be pushing objects to it concurrently. 4229 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4230 "only way to reach here"); 4231 4232 if (_cm->verbose_low()) { 4233 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4234 } 4235 4236 while (!has_aborted()) { 4237 oop obj; 4238 statsOnly( ++_steal_attempts ); 4239 4240 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4241 if (_cm->verbose_medium()) { 4242 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4243 _worker_id, (void*) obj); 4244 } 4245 4246 statsOnly( ++_steals ); 4247 4248 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4249 "any stolen object should be marked"); 4250 scan_object(obj); 4251 4252 // And since we're towards the end, let's totally drain the 4253 // local queue and global stack. 4254 drain_local_queue(false); 4255 drain_global_stack(false); 4256 } else { 4257 break; 4258 } 4259 } 4260 } 4261 4262 // If we are about to wrap up and go into termination, check if we 4263 // should raise the overflow flag. 4264 if (do_termination && !has_aborted()) { 4265 if (_cm->force_overflow()->should_force()) { 4266 _cm->set_has_overflown(); 4267 regular_clock_call(); 4268 } 4269 } 4270 4271 // We still haven't aborted. Now, let's try to get into the 4272 // termination protocol. 4273 if (do_termination && !has_aborted()) { 4274 // We cannot check whether the global stack is empty, since other 4275 // tasks might be concurrently pushing objects on it. 4276 // Separated the asserts so that we know which one fires. 4277 assert(_cm->out_of_regions(), "only way to reach here"); 4278 assert(_task_queue->size() == 0, "only way to reach here"); 4279 4280 if (_cm->verbose_low()) { 4281 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4282 } 4283 4284 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4285 4286 // The CMTask class also extends the TerminatorTerminator class, 4287 // hence its should_exit_termination() method will also decide 4288 // whether to exit the termination protocol or not. 4289 bool finished = (is_serial || 4290 _cm->terminator()->offer_termination(this)); 4291 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4292 _termination_time_ms += 4293 termination_end_time_ms - _termination_start_time_ms; 4294 4295 if (finished) { 4296 // We're all done. 4297 4298 if (_worker_id == 0) { 4299 // let's allow task 0 to do this 4300 if (concurrent()) { 4301 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4302 // we need to set this to false before the next 4303 // safepoint. This way we ensure that the marking phase 4304 // doesn't observe any more heap expansions. 4305 _cm->clear_concurrent_marking_in_progress(); 4306 } 4307 } 4308 4309 // We can now guarantee that the global stack is empty, since 4310 // all other tasks have finished. We separated the guarantees so 4311 // that, if a condition is false, we can immediately find out 4312 // which one. 4313 guarantee(_cm->out_of_regions(), "only way to reach here"); 4314 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4315 guarantee(_task_queue->size() == 0, "only way to reach here"); 4316 guarantee(!_cm->has_overflown(), "only way to reach here"); 4317 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4318 4319 if (_cm->verbose_low()) { 4320 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4321 } 4322 } else { 4323 // Apparently there's more work to do. Let's abort this task. It 4324 // will restart it and we can hopefully find more things to do. 4325 4326 if (_cm->verbose_low()) { 4327 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4328 _worker_id); 4329 } 4330 4331 set_has_aborted(); 4332 statsOnly( ++_aborted_termination ); 4333 } 4334 } 4335 4336 // Mainly for debugging purposes to make sure that a pointer to the 4337 // closure which was statically allocated in this frame doesn't 4338 // escape it by accident. 4339 set_cm_oop_closure(NULL); 4340 double end_time_ms = os::elapsedVTime() * 1000.0; 4341 double elapsed_time_ms = end_time_ms - _start_time_ms; 4342 // Update the step history. 4343 _step_times_ms.add(elapsed_time_ms); 4344 4345 if (has_aborted()) { 4346 // The task was aborted for some reason. 4347 4348 statsOnly( ++_aborted ); 4349 4350 if (_has_timed_out) { 4351 double diff_ms = elapsed_time_ms - _time_target_ms; 4352 // Keep statistics of how well we did with respect to hitting 4353 // our target only if we actually timed out (if we aborted for 4354 // other reasons, then the results might get skewed). 4355 _marking_step_diffs_ms.add(diff_ms); 4356 } 4357 4358 if (_cm->has_overflown()) { 4359 // This is the interesting one. We aborted because a global 4360 // overflow was raised. This means we have to restart the 4361 // marking phase and start iterating over regions. However, in 4362 // order to do this we have to make sure that all tasks stop 4363 // what they are doing and re-initialise in a safe manner. We 4364 // will achieve this with the use of two barrier sync points. 4365 4366 if (_cm->verbose_low()) { 4367 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4368 } 4369 4370 if (!is_serial) { 4371 // We only need to enter the sync barrier if being called 4372 // from a parallel context 4373 _cm->enter_first_sync_barrier(_worker_id); 4374 4375 // When we exit this sync barrier we know that all tasks have 4376 // stopped doing marking work. So, it's now safe to 4377 // re-initialise our data structures. At the end of this method, 4378 // task 0 will clear the global data structures. 4379 } 4380 4381 statsOnly( ++_aborted_overflow ); 4382 4383 // We clear the local state of this task... 4384 clear_region_fields(); 4385 4386 if (!is_serial) { 4387 // ...and enter the second barrier. 4388 _cm->enter_second_sync_barrier(_worker_id); 4389 } 4390 // At this point everything has bee re-initialised and we're 4391 // ready to restart. 4392 } 4393 4394 if (_cm->verbose_low()) { 4395 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4396 "elapsed = %1.2lfms <<<<<<<<<<", 4397 _worker_id, _time_target_ms, elapsed_time_ms); 4398 if (_cm->has_aborted()) { 4399 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4400 _worker_id); 4401 } 4402 } 4403 } else { 4404 if (_cm->verbose_low()) { 4405 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4406 "elapsed = %1.2lfms <<<<<<<<<<", 4407 _worker_id, _time_target_ms, elapsed_time_ms); 4408 } 4409 } 4410 4411 _claimed = false; 4412 } 4413 4414 CMTask::CMTask(uint worker_id, 4415 ConcurrentMark* cm, 4416 size_t* marked_bytes, 4417 BitMap* card_bm, 4418 CMTaskQueue* task_queue, 4419 CMTaskQueueSet* task_queues) 4420 : _g1h(G1CollectedHeap::heap()), 4421 _worker_id(worker_id), _cm(cm), 4422 _claimed(false), 4423 _nextMarkBitMap(NULL), _hash_seed(17), 4424 _task_queue(task_queue), 4425 _task_queues(task_queues), 4426 _cm_oop_closure(NULL), 4427 _marked_bytes_array(marked_bytes), 4428 _card_bm(card_bm) { 4429 guarantee(task_queue != NULL, "invariant"); 4430 guarantee(task_queues != NULL, "invariant"); 4431 4432 statsOnly( _clock_due_to_scanning = 0; 4433 _clock_due_to_marking = 0 ); 4434 4435 _marking_step_diffs_ms.add(0.5); 4436 } 4437 4438 // These are formatting macros that are used below to ensure 4439 // consistent formatting. The *_H_* versions are used to format the 4440 // header for a particular value and they should be kept consistent 4441 // with the corresponding macro. Also note that most of the macros add 4442 // the necessary white space (as a prefix) which makes them a bit 4443 // easier to compose. 4444 4445 // All the output lines are prefixed with this string to be able to 4446 // identify them easily in a large log file. 4447 #define G1PPRL_LINE_PREFIX "###" 4448 4449 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4450 #ifdef _LP64 4451 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4452 #else // _LP64 4453 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4454 #endif // _LP64 4455 4456 // For per-region info 4457 #define G1PPRL_TYPE_FORMAT " %-4s" 4458 #define G1PPRL_TYPE_H_FORMAT " %4s" 4459 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4460 #define G1PPRL_BYTE_H_FORMAT " %9s" 4461 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4462 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4463 4464 // For summary info 4465 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4466 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4467 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4468 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4469 4470 G1PrintRegionLivenessInfoClosure:: 4471 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4472 : _out(out), 4473 _total_used_bytes(0), _total_capacity_bytes(0), 4474 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4475 _hum_used_bytes(0), _hum_capacity_bytes(0), 4476 _hum_prev_live_bytes(0), _hum_next_live_bytes(0) { 4477 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4478 MemRegion g1_committed = g1h->g1_committed(); 4479 MemRegion g1_reserved = g1h->g1_reserved(); 4480 double now = os::elapsedTime(); 4481 4482 // Print the header of the output. 4483 _out->cr(); 4484 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4485 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4486 G1PPRL_SUM_ADDR_FORMAT("committed") 4487 G1PPRL_SUM_ADDR_FORMAT("reserved") 4488 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4489 g1_committed.start(), g1_committed.end(), 4490 g1_reserved.start(), g1_reserved.end(), 4491 HeapRegion::GrainBytes); 4492 _out->print_cr(G1PPRL_LINE_PREFIX); 4493 _out->print_cr(G1PPRL_LINE_PREFIX 4494 G1PPRL_TYPE_H_FORMAT 4495 G1PPRL_ADDR_BASE_H_FORMAT 4496 G1PPRL_BYTE_H_FORMAT 4497 G1PPRL_BYTE_H_FORMAT 4498 G1PPRL_BYTE_H_FORMAT 4499 G1PPRL_DOUBLE_H_FORMAT, 4500 "type", "address-range", 4501 "used", "prev-live", "next-live", "gc-eff"); 4502 _out->print_cr(G1PPRL_LINE_PREFIX 4503 G1PPRL_TYPE_H_FORMAT 4504 G1PPRL_ADDR_BASE_H_FORMAT 4505 G1PPRL_BYTE_H_FORMAT 4506 G1PPRL_BYTE_H_FORMAT 4507 G1PPRL_BYTE_H_FORMAT 4508 G1PPRL_DOUBLE_H_FORMAT, 4509 "", "", 4510 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)"); 4511 } 4512 4513 // It takes as a parameter a reference to one of the _hum_* fields, it 4514 // deduces the corresponding value for a region in a humongous region 4515 // series (either the region size, or what's left if the _hum_* field 4516 // is < the region size), and updates the _hum_* field accordingly. 4517 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4518 size_t bytes = 0; 4519 // The > 0 check is to deal with the prev and next live bytes which 4520 // could be 0. 4521 if (*hum_bytes > 0) { 4522 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4523 *hum_bytes -= bytes; 4524 } 4525 return bytes; 4526 } 4527 4528 // It deduces the values for a region in a humongous region series 4529 // from the _hum_* fields and updates those accordingly. It assumes 4530 // that that _hum_* fields have already been set up from the "starts 4531 // humongous" region and we visit the regions in address order. 4532 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4533 size_t* capacity_bytes, 4534 size_t* prev_live_bytes, 4535 size_t* next_live_bytes) { 4536 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4537 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4538 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4539 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4540 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4541 } 4542 4543 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4544 const char* type = ""; 4545 HeapWord* bottom = r->bottom(); 4546 HeapWord* end = r->end(); 4547 size_t capacity_bytes = r->capacity(); 4548 size_t used_bytes = r->used(); 4549 size_t prev_live_bytes = r->live_bytes(); 4550 size_t next_live_bytes = r->next_live_bytes(); 4551 double gc_eff = r->gc_efficiency(); 4552 if (r->used() == 0) { 4553 type = "FREE"; 4554 } else if (r->is_survivor()) { 4555 type = "SURV"; 4556 } else if (r->is_young()) { 4557 type = "EDEN"; 4558 } else if (r->startsHumongous()) { 4559 type = "HUMS"; 4560 4561 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4562 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4563 "they should have been zeroed after the last time we used them"); 4564 // Set up the _hum_* fields. 4565 _hum_capacity_bytes = capacity_bytes; 4566 _hum_used_bytes = used_bytes; 4567 _hum_prev_live_bytes = prev_live_bytes; 4568 _hum_next_live_bytes = next_live_bytes; 4569 get_hum_bytes(&used_bytes, &capacity_bytes, 4570 &prev_live_bytes, &next_live_bytes); 4571 end = bottom + HeapRegion::GrainWords; 4572 } else if (r->continuesHumongous()) { 4573 type = "HUMC"; 4574 get_hum_bytes(&used_bytes, &capacity_bytes, 4575 &prev_live_bytes, &next_live_bytes); 4576 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4577 } else { 4578 type = "OLD"; 4579 } 4580 4581 _total_used_bytes += used_bytes; 4582 _total_capacity_bytes += capacity_bytes; 4583 _total_prev_live_bytes += prev_live_bytes; 4584 _total_next_live_bytes += next_live_bytes; 4585 4586 // Print a line for this particular region. 4587 _out->print_cr(G1PPRL_LINE_PREFIX 4588 G1PPRL_TYPE_FORMAT 4589 G1PPRL_ADDR_BASE_FORMAT 4590 G1PPRL_BYTE_FORMAT 4591 G1PPRL_BYTE_FORMAT 4592 G1PPRL_BYTE_FORMAT 4593 G1PPRL_DOUBLE_FORMAT, 4594 type, bottom, end, 4595 used_bytes, prev_live_bytes, next_live_bytes, gc_eff); 4596 4597 return false; 4598 } 4599 4600 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4601 // Print the footer of the output. 4602 _out->print_cr(G1PPRL_LINE_PREFIX); 4603 _out->print_cr(G1PPRL_LINE_PREFIX 4604 " SUMMARY" 4605 G1PPRL_SUM_MB_FORMAT("capacity") 4606 G1PPRL_SUM_MB_PERC_FORMAT("used") 4607 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4608 G1PPRL_SUM_MB_PERC_FORMAT("next-live"), 4609 bytes_to_mb(_total_capacity_bytes), 4610 bytes_to_mb(_total_used_bytes), 4611 perc(_total_used_bytes, _total_capacity_bytes), 4612 bytes_to_mb(_total_prev_live_bytes), 4613 perc(_total_prev_live_bytes, _total_capacity_bytes), 4614 bytes_to_mb(_total_next_live_bytes), 4615 perc(_total_next_live_bytes, _total_capacity_bytes)); 4616 _out->cr(); 4617 }