1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/symbolTable.hpp" 27 #include "gc_implementation/g1/concurrentMark.inline.hpp" 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 32 #include "gc_implementation/g1/g1Log.hpp" 33 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 34 #include "gc_implementation/g1/g1RemSet.hpp" 35 #include "gc_implementation/g1/heapRegion.inline.hpp" 36 #include "gc_implementation/g1/heapRegionRemSet.hpp" 37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 38 #include "gc_implementation/shared/vmGCOperations.hpp" 39 #include "memory/genOopClosures.inline.hpp" 40 #include "memory/referencePolicy.hpp" 41 #include "memory/resourceArea.hpp" 42 #include "oops/oop.inline.hpp" 43 #include "runtime/handles.inline.hpp" 44 #include "runtime/java.hpp" 45 #include "services/memTracker.hpp" 46 47 // Concurrent marking bit map wrapper 48 49 CMBitMapRO::CMBitMapRO(int shifter) : 50 _bm(), 51 _shifter(shifter) { 52 _bmStartWord = 0; 53 _bmWordSize = 0; 54 } 55 56 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr, 57 HeapWord* limit) const { 58 // First we must round addr *up* to a possible object boundary. 59 addr = (HeapWord*)align_size_up((intptr_t)addr, 60 HeapWordSize << _shifter); 61 size_t addrOffset = heapWordToOffset(addr); 62 if (limit == NULL) { 63 limit = _bmStartWord + _bmWordSize; 64 } 65 size_t limitOffset = heapWordToOffset(limit); 66 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 67 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 68 assert(nextAddr >= addr, "get_next_one postcondition"); 69 assert(nextAddr == limit || isMarked(nextAddr), 70 "get_next_one postcondition"); 71 return nextAddr; 72 } 73 74 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr, 75 HeapWord* limit) const { 76 size_t addrOffset = heapWordToOffset(addr); 77 if (limit == NULL) { 78 limit = _bmStartWord + _bmWordSize; 79 } 80 size_t limitOffset = heapWordToOffset(limit); 81 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 82 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 83 assert(nextAddr >= addr, "get_next_one postcondition"); 84 assert(nextAddr == limit || !isMarked(nextAddr), 85 "get_next_one postcondition"); 86 return nextAddr; 87 } 88 89 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 90 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 91 return (int) (diff >> _shifter); 92 } 93 94 #ifndef PRODUCT 95 bool CMBitMapRO::covers(ReservedSpace heap_rs) const { 96 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 97 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 98 "size inconsistency"); 99 return _bmStartWord == (HeapWord*)(heap_rs.base()) && 100 _bmWordSize == heap_rs.size()>>LogHeapWordSize; 101 } 102 #endif 103 104 bool CMBitMap::allocate(ReservedSpace heap_rs) { 105 _bmStartWord = (HeapWord*)(heap_rs.base()); 106 _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes 107 ReservedSpace brs(ReservedSpace::allocation_align_size_up( 108 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); 109 if (!brs.is_reserved()) { 110 warning("ConcurrentMark marking bit map allocation failure"); 111 return false; 112 } 113 MemTracker::record_virtual_memory_type((address)brs.base(), mtGC); 114 // For now we'll just commit all of the bit map up front. 115 // Later on we'll try to be more parsimonious with swap. 116 if (!_virtual_space.initialize(brs, brs.size())) { 117 warning("ConcurrentMark marking bit map backing store failure"); 118 return false; 119 } 120 assert(_virtual_space.committed_size() == brs.size(), 121 "didn't reserve backing store for all of concurrent marking bit map?"); 122 _bm.set_map((uintptr_t*)_virtual_space.low()); 123 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= 124 _bmWordSize, "inconsistency in bit map sizing"); 125 _bm.set_size(_bmWordSize >> _shifter); 126 return true; 127 } 128 129 void CMBitMap::clearAll() { 130 _bm.clear(); 131 return; 132 } 133 134 void CMBitMap::markRange(MemRegion mr) { 135 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 136 assert(!mr.is_empty(), "unexpected empty region"); 137 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 138 ((HeapWord *) mr.end())), 139 "markRange memory region end is not card aligned"); 140 // convert address range into offset range 141 _bm.at_put_range(heapWordToOffset(mr.start()), 142 heapWordToOffset(mr.end()), true); 143 } 144 145 void CMBitMap::clearRange(MemRegion mr) { 146 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 147 assert(!mr.is_empty(), "unexpected empty region"); 148 // convert address range into offset range 149 _bm.at_put_range(heapWordToOffset(mr.start()), 150 heapWordToOffset(mr.end()), false); 151 } 152 153 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 154 HeapWord* end_addr) { 155 HeapWord* start = getNextMarkedWordAddress(addr); 156 start = MIN2(start, end_addr); 157 HeapWord* end = getNextUnmarkedWordAddress(start); 158 end = MIN2(end, end_addr); 159 assert(start <= end, "Consistency check"); 160 MemRegion mr(start, end); 161 if (!mr.is_empty()) { 162 clearRange(mr); 163 } 164 return mr; 165 } 166 167 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 168 _base(NULL), _cm(cm) 169 #ifdef ASSERT 170 , _drain_in_progress(false) 171 , _drain_in_progress_yields(false) 172 #endif 173 {} 174 175 bool CMMarkStack::allocate(size_t capacity) { 176 // allocate a stack of the requisite depth 177 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 178 if (!rs.is_reserved()) { 179 warning("ConcurrentMark MarkStack allocation failure"); 180 return false; 181 } 182 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 183 if (!_virtual_space.initialize(rs, rs.size())) { 184 warning("ConcurrentMark MarkStack backing store failure"); 185 // Release the virtual memory reserved for the marking stack 186 rs.release(); 187 return false; 188 } 189 assert(_virtual_space.committed_size() == rs.size(), 190 "Didn't reserve backing store for all of ConcurrentMark stack?"); 191 _base = (oop*) _virtual_space.low(); 192 setEmpty(); 193 _capacity = (jint) capacity; 194 _saved_index = -1; 195 NOT_PRODUCT(_max_depth = 0); 196 return true; 197 } 198 199 void CMMarkStack::expand() { 200 // Called, during remark, if we've overflown the marking stack during marking. 201 assert(isEmpty(), "stack should been emptied while handling overflow"); 202 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 203 // Clear expansion flag 204 _should_expand = false; 205 if (_capacity == (jint) MarkStackSizeMax) { 206 if (PrintGCDetails && Verbose) { 207 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 208 } 209 return; 210 } 211 // Double capacity if possible 212 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 213 // Do not give up existing stack until we have managed to 214 // get the double capacity that we desired. 215 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 216 sizeof(oop))); 217 if (rs.is_reserved()) { 218 // Release the backing store associated with old stack 219 _virtual_space.release(); 220 // Reinitialize virtual space for new stack 221 if (!_virtual_space.initialize(rs, rs.size())) { 222 fatal("Not enough swap for expanded marking stack capacity"); 223 } 224 _base = (oop*)(_virtual_space.low()); 225 _index = 0; 226 _capacity = new_capacity; 227 } else { 228 if (PrintGCDetails && Verbose) { 229 // Failed to double capacity, continue; 230 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 231 SIZE_FORMAT"K to " SIZE_FORMAT"K", 232 _capacity / K, new_capacity / K); 233 } 234 } 235 } 236 237 void CMMarkStack::set_should_expand() { 238 // If we're resetting the marking state because of an 239 // marking stack overflow, record that we should, if 240 // possible, expand the stack. 241 _should_expand = _cm->has_overflown(); 242 } 243 244 CMMarkStack::~CMMarkStack() { 245 if (_base != NULL) { 246 _base = NULL; 247 _virtual_space.release(); 248 } 249 } 250 251 void CMMarkStack::par_push(oop ptr) { 252 while (true) { 253 if (isFull()) { 254 _overflow = true; 255 return; 256 } 257 // Otherwise... 258 jint index = _index; 259 jint next_index = index+1; 260 jint res = Atomic::cmpxchg(next_index, &_index, index); 261 if (res == index) { 262 _base[index] = ptr; 263 // Note that we don't maintain this atomically. We could, but it 264 // doesn't seem necessary. 265 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 266 return; 267 } 268 // Otherwise, we need to try again. 269 } 270 } 271 272 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { 273 while (true) { 274 if (isFull()) { 275 _overflow = true; 276 return; 277 } 278 // Otherwise... 279 jint index = _index; 280 jint next_index = index + n; 281 if (next_index > _capacity) { 282 _overflow = true; 283 return; 284 } 285 jint res = Atomic::cmpxchg(next_index, &_index, index); 286 if (res == index) { 287 for (int i = 0; i < n; i++) { 288 int ind = index + i; 289 assert(ind < _capacity, "By overflow test above."); 290 _base[ind] = ptr_arr[i]; 291 } 292 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 293 return; 294 } 295 // Otherwise, we need to try again. 296 } 297 } 298 299 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 300 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 301 jint start = _index; 302 jint next_index = start + n; 303 if (next_index > _capacity) { 304 _overflow = true; 305 return; 306 } 307 // Otherwise. 308 _index = next_index; 309 for (int i = 0; i < n; i++) { 310 int ind = start + i; 311 assert(ind < _capacity, "By overflow test above."); 312 _base[ind] = ptr_arr[i]; 313 } 314 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 315 } 316 317 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 318 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 319 jint index = _index; 320 if (index == 0) { 321 *n = 0; 322 return false; 323 } else { 324 int k = MIN2(max, index); 325 jint new_ind = index - k; 326 for (int j = 0; j < k; j++) { 327 ptr_arr[j] = _base[new_ind + j]; 328 } 329 _index = new_ind; 330 *n = k; 331 return true; 332 } 333 } 334 335 template<class OopClosureClass> 336 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 337 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 338 || SafepointSynchronize::is_at_safepoint(), 339 "Drain recursion must be yield-safe."); 340 bool res = true; 341 debug_only(_drain_in_progress = true); 342 debug_only(_drain_in_progress_yields = yield_after); 343 while (!isEmpty()) { 344 oop newOop = pop(); 345 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 346 assert(newOop->is_oop(), "Expected an oop"); 347 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 348 "only grey objects on this stack"); 349 newOop->oop_iterate(cl); 350 if (yield_after && _cm->do_yield_check()) { 351 res = false; 352 break; 353 } 354 } 355 debug_only(_drain_in_progress = false); 356 return res; 357 } 358 359 void CMMarkStack::note_start_of_gc() { 360 assert(_saved_index == -1, 361 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 362 _saved_index = _index; 363 } 364 365 void CMMarkStack::note_end_of_gc() { 366 // This is intentionally a guarantee, instead of an assert. If we 367 // accidentally add something to the mark stack during GC, it 368 // will be a correctness issue so it's better if we crash. we'll 369 // only check this once per GC anyway, so it won't be a performance 370 // issue in any way. 371 guarantee(_saved_index == _index, 372 err_msg("saved index: %d index: %d", _saved_index, _index)); 373 _saved_index = -1; 374 } 375 376 void CMMarkStack::oops_do(OopClosure* f) { 377 assert(_saved_index == _index, 378 err_msg("saved index: %d index: %d", _saved_index, _index)); 379 for (int i = 0; i < _index; i += 1) { 380 f->do_oop(&_base[i]); 381 } 382 } 383 384 bool ConcurrentMark::not_yet_marked(oop obj) const { 385 return _g1h->is_obj_ill(obj); 386 } 387 388 CMRootRegions::CMRootRegions() : 389 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 390 _should_abort(false), _next_survivor(NULL) { } 391 392 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 393 _young_list = g1h->young_list(); 394 _cm = cm; 395 } 396 397 void CMRootRegions::prepare_for_scan() { 398 assert(!scan_in_progress(), "pre-condition"); 399 400 // Currently, only survivors can be root regions. 401 assert(_next_survivor == NULL, "pre-condition"); 402 _next_survivor = _young_list->first_survivor_region(); 403 _scan_in_progress = (_next_survivor != NULL); 404 _should_abort = false; 405 } 406 407 HeapRegion* CMRootRegions::claim_next() { 408 if (_should_abort) { 409 // If someone has set the should_abort flag, we return NULL to 410 // force the caller to bail out of their loop. 411 return NULL; 412 } 413 414 // Currently, only survivors can be root regions. 415 HeapRegion* res = _next_survivor; 416 if (res != NULL) { 417 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 418 // Read it again in case it changed while we were waiting for the lock. 419 res = _next_survivor; 420 if (res != NULL) { 421 if (res == _young_list->last_survivor_region()) { 422 // We just claimed the last survivor so store NULL to indicate 423 // that we're done. 424 _next_survivor = NULL; 425 } else { 426 _next_survivor = res->get_next_young_region(); 427 } 428 } else { 429 // Someone else claimed the last survivor while we were trying 430 // to take the lock so nothing else to do. 431 } 432 } 433 assert(res == NULL || res->is_survivor(), "post-condition"); 434 435 return res; 436 } 437 438 void CMRootRegions::scan_finished() { 439 assert(scan_in_progress(), "pre-condition"); 440 441 // Currently, only survivors can be root regions. 442 if (!_should_abort) { 443 assert(_next_survivor == NULL, "we should have claimed all survivors"); 444 } 445 _next_survivor = NULL; 446 447 { 448 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 449 _scan_in_progress = false; 450 RootRegionScan_lock->notify_all(); 451 } 452 } 453 454 bool CMRootRegions::wait_until_scan_finished() { 455 if (!scan_in_progress()) return false; 456 457 { 458 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 459 while (scan_in_progress()) { 460 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 461 } 462 } 463 return true; 464 } 465 466 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 467 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 468 #endif // _MSC_VER 469 470 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 471 return MAX2((n_par_threads + 2) / 4, 1U); 472 } 473 474 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) : 475 _g1h(g1h), 476 _markBitMap1(MinObjAlignment - 1), 477 _markBitMap2(MinObjAlignment - 1), 478 479 _parallel_marking_threads(0), 480 _max_parallel_marking_threads(0), 481 _sleep_factor(0.0), 482 _marking_task_overhead(1.0), 483 _cleanup_sleep_factor(0.0), 484 _cleanup_task_overhead(1.0), 485 _cleanup_list("Cleanup List"), 486 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 487 _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >> 488 CardTableModRefBS::card_shift, 489 false /* in_resource_area*/), 490 491 _prevMarkBitMap(&_markBitMap1), 492 _nextMarkBitMap(&_markBitMap2), 493 494 _markStack(this), 495 // _finger set in set_non_marking_state 496 497 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 498 // _active_tasks set in set_non_marking_state 499 // _tasks set inside the constructor 500 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 501 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 502 503 _has_overflown(false), 504 _concurrent(false), 505 _has_aborted(false), 506 _restart_for_overflow(false), 507 _concurrent_marking_in_progress(false), 508 509 // _verbose_level set below 510 511 _init_times(), 512 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 513 _cleanup_times(), 514 _total_counting_time(0.0), 515 _total_rs_scrub_time(0.0), 516 517 _parallel_workers(NULL), 518 519 _count_card_bitmaps(NULL), 520 _count_marked_bytes(NULL), 521 _completed_initialization(false) { 522 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 523 if (verbose_level < no_verbose) { 524 verbose_level = no_verbose; 525 } 526 if (verbose_level > high_verbose) { 527 verbose_level = high_verbose; 528 } 529 _verbose_level = verbose_level; 530 531 if (verbose_low()) { 532 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 533 "heap end = "PTR_FORMAT, _heap_start, _heap_end); 534 } 535 536 if (!_markBitMap1.allocate(heap_rs)) { 537 warning("Failed to allocate first CM bit map"); 538 return; 539 } 540 if (!_markBitMap2.allocate(heap_rs)) { 541 warning("Failed to allocate second CM bit map"); 542 return; 543 } 544 545 // Create & start a ConcurrentMark thread. 546 _cmThread = new ConcurrentMarkThread(this); 547 assert(cmThread() != NULL, "CM Thread should have been created"); 548 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 549 550 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 551 assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency"); 552 assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency"); 553 554 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 555 satb_qs.set_buffer_size(G1SATBBufferSize); 556 557 _root_regions.init(_g1h, this); 558 559 if (ConcGCThreads > ParallelGCThreads) { 560 warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") " 561 "than ParallelGCThreads (" UINT32_FORMAT ").", 562 ConcGCThreads, ParallelGCThreads); 563 return; 564 } 565 if (ParallelGCThreads == 0) { 566 // if we are not running with any parallel GC threads we will not 567 // spawn any marking threads either 568 _parallel_marking_threads = 0; 569 _max_parallel_marking_threads = 0; 570 _sleep_factor = 0.0; 571 _marking_task_overhead = 1.0; 572 } else { 573 if (ConcGCThreads > 0) { 574 // notice that ConcGCThreads overwrites G1MarkingOverheadPercent 575 // if both are set 576 577 _parallel_marking_threads = (uint) ConcGCThreads; 578 _max_parallel_marking_threads = _parallel_marking_threads; 579 _sleep_factor = 0.0; 580 _marking_task_overhead = 1.0; 581 } else if (G1MarkingOverheadPercent > 0) { 582 // we will calculate the number of parallel marking threads 583 // based on a target overhead with respect to the soft real-time 584 // goal 585 586 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 587 double overall_cm_overhead = 588 (double) MaxGCPauseMillis * marking_overhead / 589 (double) GCPauseIntervalMillis; 590 double cpu_ratio = 1.0 / (double) os::processor_count(); 591 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 592 double marking_task_overhead = 593 overall_cm_overhead / marking_thread_num * 594 (double) os::processor_count(); 595 double sleep_factor = 596 (1.0 - marking_task_overhead) / marking_task_overhead; 597 598 _parallel_marking_threads = (uint) marking_thread_num; 599 _max_parallel_marking_threads = _parallel_marking_threads; 600 _sleep_factor = sleep_factor; 601 _marking_task_overhead = marking_task_overhead; 602 } else { 603 _parallel_marking_threads = scale_parallel_threads((uint)ParallelGCThreads); 604 _max_parallel_marking_threads = _parallel_marking_threads; 605 _sleep_factor = 0.0; 606 _marking_task_overhead = 1.0; 607 } 608 609 if (parallel_marking_threads() > 1) { 610 _cleanup_task_overhead = 1.0; 611 } else { 612 _cleanup_task_overhead = marking_task_overhead(); 613 } 614 _cleanup_sleep_factor = 615 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 616 617 #if 0 618 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 619 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 620 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 621 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 622 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 623 #endif 624 625 guarantee(parallel_marking_threads() > 0, "peace of mind"); 626 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", 627 _max_parallel_marking_threads, false, true); 628 if (_parallel_workers == NULL) { 629 vm_exit_during_initialization("Failed necessary allocation."); 630 } else { 631 _parallel_workers->initialize_workers(); 632 } 633 } 634 635 if (FLAG_IS_DEFAULT(MarkStackSize)) { 636 uintx mark_stack_size = 637 MIN2(MarkStackSizeMax, 638 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE))); 639 // Verify that the calculated value for MarkStackSize is in range. 640 // It would be nice to use the private utility routine from Arguments. 641 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 642 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): " 643 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 644 mark_stack_size, 1, MarkStackSizeMax); 645 return; 646 } 647 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size); 648 } else { 649 // Verify MarkStackSize is in range. 650 if (FLAG_IS_CMDLINE(MarkStackSize)) { 651 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 652 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 653 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): " 654 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 655 MarkStackSize, 1, MarkStackSizeMax); 656 return; 657 } 658 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 659 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 660 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")" 661 " or for MarkStackSizeMax (" UINTX_FORMAT ")", 662 MarkStackSize, MarkStackSizeMax); 663 return; 664 } 665 } 666 } 667 } 668 669 if (!_markStack.allocate(MarkStackSize)) { 670 warning("Failed to allocate CM marking stack"); 671 return; 672 } 673 674 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 675 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 676 677 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 678 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 679 680 BitMap::idx_t card_bm_size = _card_bm.size(); 681 682 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 683 _active_tasks = _max_worker_id; 684 685 size_t max_regions = (size_t) _g1h->max_regions(); 686 for (uint i = 0; i < _max_worker_id; ++i) { 687 CMTaskQueue* task_queue = new CMTaskQueue(); 688 task_queue->initialize(); 689 _task_queues->register_queue(i, task_queue); 690 691 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 692 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 693 694 _tasks[i] = new CMTask(i, this, 695 _count_marked_bytes[i], 696 &_count_card_bitmaps[i], 697 task_queue, _task_queues); 698 699 _accum_task_vtime[i] = 0.0; 700 } 701 702 // Calculate the card number for the bottom of the heap. Used 703 // in biasing indexes into the accounting card bitmaps. 704 _heap_bottom_card_num = 705 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 706 CardTableModRefBS::card_shift); 707 708 // Clear all the liveness counting data 709 clear_all_count_data(); 710 711 // so that the call below can read a sensible value 712 _heap_start = (HeapWord*) heap_rs.base(); 713 set_non_marking_state(); 714 _completed_initialization = true; 715 } 716 717 void ConcurrentMark::update_g1_committed(bool force) { 718 // If concurrent marking is not in progress, then we do not need to 719 // update _heap_end. 720 if (!concurrent_marking_in_progress() && !force) return; 721 722 MemRegion committed = _g1h->g1_committed(); 723 assert(committed.start() == _heap_start, "start shouldn't change"); 724 HeapWord* new_end = committed.end(); 725 if (new_end > _heap_end) { 726 // The heap has been expanded. 727 728 _heap_end = new_end; 729 } 730 // Notice that the heap can also shrink. However, this only happens 731 // during a Full GC (at least currently) and the entire marking 732 // phase will bail out and the task will not be restarted. So, let's 733 // do nothing. 734 } 735 736 void ConcurrentMark::reset() { 737 // Starting values for these two. This should be called in a STW 738 // phase. CM will be notified of any future g1_committed expansions 739 // will be at the end of evacuation pauses, when tasks are 740 // inactive. 741 MemRegion committed = _g1h->g1_committed(); 742 _heap_start = committed.start(); 743 _heap_end = committed.end(); 744 745 // Separated the asserts so that we know which one fires. 746 assert(_heap_start != NULL, "heap bounds should look ok"); 747 assert(_heap_end != NULL, "heap bounds should look ok"); 748 assert(_heap_start < _heap_end, "heap bounds should look ok"); 749 750 // reset all the marking data structures and any necessary flags 751 clear_marking_state(); 752 753 if (verbose_low()) { 754 gclog_or_tty->print_cr("[global] resetting"); 755 } 756 757 // We do reset all of them, since different phases will use 758 // different number of active threads. So, it's easiest to have all 759 // of them ready. 760 for (uint i = 0; i < _max_worker_id; ++i) { 761 _tasks[i]->reset(_nextMarkBitMap); 762 } 763 764 // we need this to make sure that the flag is on during the evac 765 // pause with initial mark piggy-backed 766 set_concurrent_marking_in_progress(); 767 } 768 769 void ConcurrentMark::set_phase(uint active_tasks, bool concurrent) { 770 assert(active_tasks <= _max_worker_id, "we should not have more"); 771 772 _active_tasks = active_tasks; 773 // Need to update the three data structures below according to the 774 // number of active threads for this phase. 775 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 776 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 777 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 778 779 _concurrent = concurrent; 780 // We propagate this to all tasks, not just the active ones. 781 for (uint i = 0; i < _max_worker_id; ++i) 782 _tasks[i]->set_concurrent(concurrent); 783 784 if (concurrent) { 785 set_concurrent_marking_in_progress(); 786 } else { 787 // We currently assume that the concurrent flag has been set to 788 // false before we start remark. At this point we should also be 789 // in a STW phase. 790 assert(!concurrent_marking_in_progress(), "invariant"); 791 assert(_finger == _heap_end, "only way to get here"); 792 update_g1_committed(true); 793 } 794 } 795 796 void ConcurrentMark::set_non_marking_state() { 797 // We set the global marking state to some default values when we're 798 // not doing marking. 799 clear_marking_state(); 800 _active_tasks = 0; 801 clear_concurrent_marking_in_progress(); 802 } 803 804 ConcurrentMark::~ConcurrentMark() { 805 // The ConcurrentMark instance is never freed. 806 ShouldNotReachHere(); 807 } 808 809 void ConcurrentMark::clearNextBitmap() { 810 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 811 G1CollectorPolicy* g1p = g1h->g1_policy(); 812 813 // Make sure that the concurrent mark thread looks to still be in 814 // the current cycle. 815 guarantee(cmThread()->during_cycle(), "invariant"); 816 817 // We are finishing up the current cycle by clearing the next 818 // marking bitmap and getting it ready for the next cycle. During 819 // this time no other cycle can start. So, let's make sure that this 820 // is the case. 821 guarantee(!g1h->mark_in_progress(), "invariant"); 822 823 // clear the mark bitmap (no grey objects to start with). 824 // We need to do this in chunks and offer to yield in between 825 // each chunk. 826 HeapWord* start = _nextMarkBitMap->startWord(); 827 HeapWord* end = _nextMarkBitMap->endWord(); 828 HeapWord* cur = start; 829 size_t chunkSize = M; 830 while (cur < end) { 831 HeapWord* next = cur + chunkSize; 832 if (next > end) { 833 next = end; 834 } 835 MemRegion mr(cur,next); 836 _nextMarkBitMap->clearRange(mr); 837 cur = next; 838 do_yield_check(); 839 840 // Repeat the asserts from above. We'll do them as asserts here to 841 // minimize their overhead on the product. However, we'll have 842 // them as guarantees at the beginning / end of the bitmap 843 // clearing to get some checking in the product. 844 assert(cmThread()->during_cycle(), "invariant"); 845 assert(!g1h->mark_in_progress(), "invariant"); 846 } 847 848 // Clear the liveness counting data 849 clear_all_count_data(); 850 851 // Repeat the asserts from above. 852 guarantee(cmThread()->during_cycle(), "invariant"); 853 guarantee(!g1h->mark_in_progress(), "invariant"); 854 } 855 856 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 857 public: 858 bool doHeapRegion(HeapRegion* r) { 859 if (!r->continuesHumongous()) { 860 r->note_start_of_marking(); 861 } 862 return false; 863 } 864 }; 865 866 void ConcurrentMark::checkpointRootsInitialPre() { 867 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 868 G1CollectorPolicy* g1p = g1h->g1_policy(); 869 870 _has_aborted = false; 871 872 #ifndef PRODUCT 873 if (G1PrintReachableAtInitialMark) { 874 print_reachable("at-cycle-start", 875 VerifyOption_G1UsePrevMarking, true /* all */); 876 } 877 #endif 878 879 // Initialise marking structures. This has to be done in a STW phase. 880 reset(); 881 882 // For each region note start of marking. 883 NoteStartOfMarkHRClosure startcl; 884 g1h->heap_region_iterate(&startcl); 885 } 886 887 888 void ConcurrentMark::checkpointRootsInitialPost() { 889 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 890 891 // If we force an overflow during remark, the remark operation will 892 // actually abort and we'll restart concurrent marking. If we always 893 // force an oveflow during remark we'll never actually complete the 894 // marking phase. So, we initilize this here, at the start of the 895 // cycle, so that at the remaining overflow number will decrease at 896 // every remark and we'll eventually not need to cause one. 897 force_overflow_stw()->init(); 898 899 // Start Concurrent Marking weak-reference discovery. 900 ReferenceProcessor* rp = g1h->ref_processor_cm(); 901 // enable ("weak") refs discovery 902 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); 903 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 904 905 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 906 // This is the start of the marking cycle, we're expected all 907 // threads to have SATB queues with active set to false. 908 satb_mq_set.set_active_all_threads(true, /* new active value */ 909 false /* expected_active */); 910 911 _root_regions.prepare_for_scan(); 912 913 // update_g1_committed() will be called at the end of an evac pause 914 // when marking is on. So, it's also called at the end of the 915 // initial-mark pause to update the heap end, if the heap expands 916 // during it. No need to call it here. 917 } 918 919 /* 920 * Notice that in the next two methods, we actually leave the STS 921 * during the barrier sync and join it immediately afterwards. If we 922 * do not do this, the following deadlock can occur: one thread could 923 * be in the barrier sync code, waiting for the other thread to also 924 * sync up, whereas another one could be trying to yield, while also 925 * waiting for the other threads to sync up too. 926 * 927 * Note, however, that this code is also used during remark and in 928 * this case we should not attempt to leave / enter the STS, otherwise 929 * we'll either hit an asseert (debug / fastdebug) or deadlock 930 * (product). So we should only leave / enter the STS if we are 931 * operating concurrently. 932 * 933 * Because the thread that does the sync barrier has left the STS, it 934 * is possible to be suspended for a Full GC or an evacuation pause 935 * could occur. This is actually safe, since the entering the sync 936 * barrier is one of the last things do_marking_step() does, and it 937 * doesn't manipulate any data structures afterwards. 938 */ 939 940 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 941 if (verbose_low()) { 942 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 943 } 944 945 if (concurrent()) { 946 ConcurrentGCThread::stsLeave(); 947 } 948 _first_overflow_barrier_sync.enter(); 949 if (concurrent()) { 950 ConcurrentGCThread::stsJoin(); 951 } 952 // at this point everyone should have synced up and not be doing any 953 // more work 954 955 if (verbose_low()) { 956 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 957 } 958 959 // let the task associated with with worker 0 do this 960 if (worker_id == 0) { 961 // task 0 is responsible for clearing the global data structures 962 // We should be here because of an overflow. During STW we should 963 // not clear the overflow flag since we rely on it being true when 964 // we exit this method to abort the pause and restart concurent 965 // marking. 966 clear_marking_state(concurrent() /* clear_overflow */); 967 force_overflow()->update(); 968 969 if (G1Log::fine()) { 970 gclog_or_tty->date_stamp(PrintGCDateStamps); 971 gclog_or_tty->stamp(PrintGCTimeStamps); 972 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 973 } 974 } 975 976 // after this, each task should reset its own data structures then 977 // then go into the second barrier 978 } 979 980 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 981 if (verbose_low()) { 982 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 983 } 984 985 if (concurrent()) { 986 ConcurrentGCThread::stsLeave(); 987 } 988 _second_overflow_barrier_sync.enter(); 989 if (concurrent()) { 990 ConcurrentGCThread::stsJoin(); 991 } 992 // at this point everything should be re-initialised and ready to go 993 994 if (verbose_low()) { 995 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 996 } 997 } 998 999 #ifndef PRODUCT 1000 void ForceOverflowSettings::init() { 1001 _num_remaining = G1ConcMarkForceOverflow; 1002 _force = false; 1003 update(); 1004 } 1005 1006 void ForceOverflowSettings::update() { 1007 if (_num_remaining > 0) { 1008 _num_remaining -= 1; 1009 _force = true; 1010 } else { 1011 _force = false; 1012 } 1013 } 1014 1015 bool ForceOverflowSettings::should_force() { 1016 if (_force) { 1017 _force = false; 1018 return true; 1019 } else { 1020 return false; 1021 } 1022 } 1023 #endif // !PRODUCT 1024 1025 class CMConcurrentMarkingTask: public AbstractGangTask { 1026 private: 1027 ConcurrentMark* _cm; 1028 ConcurrentMarkThread* _cmt; 1029 1030 public: 1031 void work(uint worker_id) { 1032 assert(Thread::current()->is_ConcurrentGC_thread(), 1033 "this should only be done by a conc GC thread"); 1034 ResourceMark rm; 1035 1036 double start_vtime = os::elapsedVTime(); 1037 1038 ConcurrentGCThread::stsJoin(); 1039 1040 assert(worker_id < _cm->active_tasks(), "invariant"); 1041 CMTask* the_task = _cm->task(worker_id); 1042 the_task->record_start_time(); 1043 if (!_cm->has_aborted()) { 1044 do { 1045 double start_vtime_sec = os::elapsedVTime(); 1046 double start_time_sec = os::elapsedTime(); 1047 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1048 1049 the_task->do_marking_step(mark_step_duration_ms, 1050 true /* do_stealing */, 1051 true /* do_termination */); 1052 1053 double end_time_sec = os::elapsedTime(); 1054 double end_vtime_sec = os::elapsedVTime(); 1055 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1056 double elapsed_time_sec = end_time_sec - start_time_sec; 1057 _cm->clear_has_overflown(); 1058 1059 bool ret = _cm->do_yield_check(worker_id); 1060 1061 jlong sleep_time_ms; 1062 if (!_cm->has_aborted() && the_task->has_aborted()) { 1063 sleep_time_ms = 1064 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1065 ConcurrentGCThread::stsLeave(); 1066 os::sleep(Thread::current(), sleep_time_ms, false); 1067 ConcurrentGCThread::stsJoin(); 1068 } 1069 double end_time2_sec = os::elapsedTime(); 1070 double elapsed_time2_sec = end_time2_sec - start_time_sec; 1071 1072 #if 0 1073 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, " 1074 "overhead %1.4lf", 1075 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms, 1076 the_task->conc_overhead(os::elapsedTime()) * 8.0); 1077 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms", 1078 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0); 1079 #endif 1080 } while (!_cm->has_aborted() && the_task->has_aborted()); 1081 } 1082 the_task->record_end_time(); 1083 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1084 1085 ConcurrentGCThread::stsLeave(); 1086 1087 double end_vtime = os::elapsedVTime(); 1088 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1089 } 1090 1091 CMConcurrentMarkingTask(ConcurrentMark* cm, 1092 ConcurrentMarkThread* cmt) : 1093 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1094 1095 ~CMConcurrentMarkingTask() { } 1096 }; 1097 1098 // Calculates the number of active workers for a concurrent 1099 // phase. 1100 uint ConcurrentMark::calc_parallel_marking_threads() { 1101 if (G1CollectedHeap::use_parallel_gc_threads()) { 1102 uint n_conc_workers = 0; 1103 if (!UseDynamicNumberOfGCThreads || 1104 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1105 !ForceDynamicNumberOfGCThreads)) { 1106 n_conc_workers = max_parallel_marking_threads(); 1107 } else { 1108 n_conc_workers = 1109 AdaptiveSizePolicy::calc_default_active_workers( 1110 max_parallel_marking_threads(), 1111 1, /* Minimum workers */ 1112 parallel_marking_threads(), 1113 Threads::number_of_non_daemon_threads()); 1114 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1115 // that scaling has already gone into "_max_parallel_marking_threads". 1116 } 1117 assert(n_conc_workers > 0, "Always need at least 1"); 1118 return n_conc_workers; 1119 } 1120 // If we are not running with any parallel GC threads we will not 1121 // have spawned any marking threads either. Hence the number of 1122 // concurrent workers should be 0. 1123 return 0; 1124 } 1125 1126 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1127 // Currently, only survivors can be root regions. 1128 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1129 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1130 1131 const uintx interval = PrefetchScanIntervalInBytes; 1132 HeapWord* curr = hr->bottom(); 1133 const HeapWord* end = hr->top(); 1134 while (curr < end) { 1135 Prefetch::read(curr, interval); 1136 oop obj = oop(curr); 1137 int size = obj->oop_iterate(&cl); 1138 assert(size == obj->size(), "sanity"); 1139 curr += size; 1140 } 1141 } 1142 1143 class CMRootRegionScanTask : public AbstractGangTask { 1144 private: 1145 ConcurrentMark* _cm; 1146 1147 public: 1148 CMRootRegionScanTask(ConcurrentMark* cm) : 1149 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1150 1151 void work(uint worker_id) { 1152 assert(Thread::current()->is_ConcurrentGC_thread(), 1153 "this should only be done by a conc GC thread"); 1154 1155 CMRootRegions* root_regions = _cm->root_regions(); 1156 HeapRegion* hr = root_regions->claim_next(); 1157 while (hr != NULL) { 1158 _cm->scanRootRegion(hr, worker_id); 1159 hr = root_regions->claim_next(); 1160 } 1161 } 1162 }; 1163 1164 void ConcurrentMark::scanRootRegions() { 1165 // scan_in_progress() will have been set to true only if there was 1166 // at least one root region to scan. So, if it's false, we 1167 // should not attempt to do any further work. 1168 if (root_regions()->scan_in_progress()) { 1169 _parallel_marking_threads = calc_parallel_marking_threads(); 1170 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1171 "Maximum number of marking threads exceeded"); 1172 uint active_workers = MAX2(1U, parallel_marking_threads()); 1173 1174 CMRootRegionScanTask task(this); 1175 if (parallel_marking_threads() > 0) { 1176 _parallel_workers->set_active_workers((int) active_workers); 1177 _parallel_workers->run_task(&task); 1178 } else { 1179 task.work(0); 1180 } 1181 1182 // It's possible that has_aborted() is true here without actually 1183 // aborting the survivor scan earlier. This is OK as it's 1184 // mainly used for sanity checking. 1185 root_regions()->scan_finished(); 1186 } 1187 } 1188 1189 void ConcurrentMark::markFromRoots() { 1190 // we might be tempted to assert that: 1191 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1192 // "inconsistent argument?"); 1193 // However that wouldn't be right, because it's possible that 1194 // a safepoint is indeed in progress as a younger generation 1195 // stop-the-world GC happens even as we mark in this generation. 1196 1197 _restart_for_overflow = false; 1198 force_overflow_conc()->init(); 1199 1200 // _g1h has _n_par_threads 1201 _parallel_marking_threads = calc_parallel_marking_threads(); 1202 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1203 "Maximum number of marking threads exceeded"); 1204 1205 uint active_workers = MAX2(1U, parallel_marking_threads()); 1206 1207 // Parallel task terminator is set in "set_phase()" 1208 set_phase(active_workers, true /* concurrent */); 1209 1210 CMConcurrentMarkingTask markingTask(this, cmThread()); 1211 if (parallel_marking_threads() > 0) { 1212 _parallel_workers->set_active_workers((int)active_workers); 1213 // Don't set _n_par_threads because it affects MT in proceess_strong_roots() 1214 // and the decisions on that MT processing is made elsewhere. 1215 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1216 _parallel_workers->run_task(&markingTask); 1217 } else { 1218 markingTask.work(0); 1219 } 1220 print_stats(); 1221 } 1222 1223 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1224 // world is stopped at this checkpoint 1225 assert(SafepointSynchronize::is_at_safepoint(), 1226 "world should be stopped"); 1227 1228 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1229 1230 // If a full collection has happened, we shouldn't do this. 1231 if (has_aborted()) { 1232 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1233 return; 1234 } 1235 1236 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1237 1238 if (VerifyDuringGC) { 1239 HandleMark hm; // handle scope 1240 gclog_or_tty->print(" VerifyDuringGC:(before)"); 1241 Universe::heap()->prepare_for_verify(); 1242 Universe::verify(/* silent */ false, 1243 /* option */ VerifyOption_G1UsePrevMarking); 1244 } 1245 1246 G1CollectorPolicy* g1p = g1h->g1_policy(); 1247 g1p->record_concurrent_mark_remark_start(); 1248 1249 double start = os::elapsedTime(); 1250 1251 checkpointRootsFinalWork(); 1252 1253 double mark_work_end = os::elapsedTime(); 1254 1255 weakRefsWork(clear_all_soft_refs); 1256 1257 if (has_overflown()) { 1258 // Oops. We overflowed. Restart concurrent marking. 1259 _restart_for_overflow = true; 1260 // Clear the flag. We do not need it any more. 1261 clear_has_overflown(); 1262 if (G1TraceMarkStackOverflow) { 1263 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1264 } 1265 } else { 1266 // Aggregate the per-task counting data that we have accumulated 1267 // while marking. 1268 aggregate_count_data(); 1269 1270 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1271 // We're done with marking. 1272 // This is the end of the marking cycle, we're expected all 1273 // threads to have SATB queues with active set to true. 1274 satb_mq_set.set_active_all_threads(false, /* new active value */ 1275 true /* expected_active */); 1276 1277 if (VerifyDuringGC) { 1278 HandleMark hm; // handle scope 1279 gclog_or_tty->print(" VerifyDuringGC:(after)"); 1280 Universe::heap()->prepare_for_verify(); 1281 Universe::verify(/* silent */ false, 1282 /* option */ VerifyOption_G1UseNextMarking); 1283 } 1284 assert(!restart_for_overflow(), "sanity"); 1285 } 1286 1287 // Expand the marking stack, if we have to and if we can. 1288 if (_markStack.should_expand()) { 1289 _markStack.expand(); 1290 } 1291 1292 // Reset the marking state if marking completed 1293 if (!restart_for_overflow()) { 1294 set_non_marking_state(); 1295 } 1296 1297 #if VERIFY_OBJS_PROCESSED 1298 _scan_obj_cl.objs_processed = 0; 1299 ThreadLocalObjQueue::objs_enqueued = 0; 1300 #endif 1301 1302 // Statistics 1303 double now = os::elapsedTime(); 1304 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1305 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1306 _remark_times.add((now - start) * 1000.0); 1307 1308 g1p->record_concurrent_mark_remark_end(); 1309 } 1310 1311 // Base class of the closures that finalize and verify the 1312 // liveness counting data. 1313 class CMCountDataClosureBase: public HeapRegionClosure { 1314 protected: 1315 G1CollectedHeap* _g1h; 1316 ConcurrentMark* _cm; 1317 CardTableModRefBS* _ct_bs; 1318 1319 BitMap* _region_bm; 1320 BitMap* _card_bm; 1321 1322 // Takes a region that's not empty (i.e., it has at least one 1323 // live object in it and sets its corresponding bit on the region 1324 // bitmap to 1. If the region is "starts humongous" it will also set 1325 // to 1 the bits on the region bitmap that correspond to its 1326 // associated "continues humongous" regions. 1327 void set_bit_for_region(HeapRegion* hr) { 1328 assert(!hr->continuesHumongous(), "should have filtered those out"); 1329 1330 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); 1331 if (!hr->startsHumongous()) { 1332 // Normal (non-humongous) case: just set the bit. 1333 _region_bm->par_at_put(index, true); 1334 } else { 1335 // Starts humongous case: calculate how many regions are part of 1336 // this humongous region and then set the bit range. 1337 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1338 _region_bm->par_at_put_range(index, end_index, true); 1339 } 1340 } 1341 1342 public: 1343 CMCountDataClosureBase(G1CollectedHeap* g1h, 1344 BitMap* region_bm, BitMap* card_bm): 1345 _g1h(g1h), _cm(g1h->concurrent_mark()), 1346 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 1347 _region_bm(region_bm), _card_bm(card_bm) { } 1348 }; 1349 1350 // Closure that calculates the # live objects per region. Used 1351 // for verification purposes during the cleanup pause. 1352 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1353 CMBitMapRO* _bm; 1354 size_t _region_marked_bytes; 1355 1356 public: 1357 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1358 BitMap* region_bm, BitMap* card_bm) : 1359 CMCountDataClosureBase(g1h, region_bm, card_bm), 1360 _bm(bm), _region_marked_bytes(0) { } 1361 1362 bool doHeapRegion(HeapRegion* hr) { 1363 1364 if (hr->continuesHumongous()) { 1365 // We will ignore these here and process them when their 1366 // associated "starts humongous" region is processed (see 1367 // set_bit_for_heap_region()). Note that we cannot rely on their 1368 // associated "starts humongous" region to have their bit set to 1369 // 1 since, due to the region chunking in the parallel region 1370 // iteration, a "continues humongous" region might be visited 1371 // before its associated "starts humongous". 1372 return false; 1373 } 1374 1375 HeapWord* ntams = hr->next_top_at_mark_start(); 1376 HeapWord* start = hr->bottom(); 1377 1378 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1379 err_msg("Preconditions not met - " 1380 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1381 start, ntams, hr->end())); 1382 1383 // Find the first marked object at or after "start". 1384 start = _bm->getNextMarkedWordAddress(start, ntams); 1385 1386 size_t marked_bytes = 0; 1387 1388 while (start < ntams) { 1389 oop obj = oop(start); 1390 int obj_sz = obj->size(); 1391 HeapWord* obj_end = start + obj_sz; 1392 1393 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1394 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1395 1396 // Note: if we're looking at the last region in heap - obj_end 1397 // could be actually just beyond the end of the heap; end_idx 1398 // will then correspond to a (non-existent) card that is also 1399 // just beyond the heap. 1400 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1401 // end of object is not card aligned - increment to cover 1402 // all the cards spanned by the object 1403 end_idx += 1; 1404 } 1405 1406 // Set the bits in the card BM for the cards spanned by this object. 1407 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1408 1409 // Add the size of this object to the number of marked bytes. 1410 marked_bytes += (size_t)obj_sz * HeapWordSize; 1411 1412 // Find the next marked object after this one. 1413 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1414 } 1415 1416 // Mark the allocated-since-marking portion... 1417 HeapWord* top = hr->top(); 1418 if (ntams < top) { 1419 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1420 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1421 1422 // Note: if we're looking at the last region in heap - top 1423 // could be actually just beyond the end of the heap; end_idx 1424 // will then correspond to a (non-existent) card that is also 1425 // just beyond the heap. 1426 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1427 // end of object is not card aligned - increment to cover 1428 // all the cards spanned by the object 1429 end_idx += 1; 1430 } 1431 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1432 1433 // This definitely means the region has live objects. 1434 set_bit_for_region(hr); 1435 } 1436 1437 // Update the live region bitmap. 1438 if (marked_bytes > 0) { 1439 set_bit_for_region(hr); 1440 } 1441 1442 // Set the marked bytes for the current region so that 1443 // it can be queried by a calling verificiation routine 1444 _region_marked_bytes = marked_bytes; 1445 1446 return false; 1447 } 1448 1449 size_t region_marked_bytes() const { return _region_marked_bytes; } 1450 }; 1451 1452 // Heap region closure used for verifying the counting data 1453 // that was accumulated concurrently and aggregated during 1454 // the remark pause. This closure is applied to the heap 1455 // regions during the STW cleanup pause. 1456 1457 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1458 G1CollectedHeap* _g1h; 1459 ConcurrentMark* _cm; 1460 CalcLiveObjectsClosure _calc_cl; 1461 BitMap* _region_bm; // Region BM to be verified 1462 BitMap* _card_bm; // Card BM to be verified 1463 bool _verbose; // verbose output? 1464 1465 BitMap* _exp_region_bm; // Expected Region BM values 1466 BitMap* _exp_card_bm; // Expected card BM values 1467 1468 int _failures; 1469 1470 public: 1471 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1472 BitMap* region_bm, 1473 BitMap* card_bm, 1474 BitMap* exp_region_bm, 1475 BitMap* exp_card_bm, 1476 bool verbose) : 1477 _g1h(g1h), _cm(g1h->concurrent_mark()), 1478 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1479 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1480 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1481 _failures(0) { } 1482 1483 int failures() const { return _failures; } 1484 1485 bool doHeapRegion(HeapRegion* hr) { 1486 if (hr->continuesHumongous()) { 1487 // We will ignore these here and process them when their 1488 // associated "starts humongous" region is processed (see 1489 // set_bit_for_heap_region()). Note that we cannot rely on their 1490 // associated "starts humongous" region to have their bit set to 1491 // 1 since, due to the region chunking in the parallel region 1492 // iteration, a "continues humongous" region might be visited 1493 // before its associated "starts humongous". 1494 return false; 1495 } 1496 1497 int failures = 0; 1498 1499 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1500 // this region and set the corresponding bits in the expected region 1501 // and card bitmaps. 1502 bool res = _calc_cl.doHeapRegion(hr); 1503 assert(res == false, "should be continuing"); 1504 1505 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1506 Mutex::_no_safepoint_check_flag); 1507 1508 // Verify the marked bytes for this region. 1509 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1510 size_t act_marked_bytes = hr->next_marked_bytes(); 1511 1512 // We're not OK if expected marked bytes > actual marked bytes. It means 1513 // we have missed accounting some objects during the actual marking. 1514 if (exp_marked_bytes > act_marked_bytes) { 1515 if (_verbose) { 1516 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1517 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1518 hr->hrs_index(), exp_marked_bytes, act_marked_bytes); 1519 } 1520 failures += 1; 1521 } 1522 1523 // Verify the bit, for this region, in the actual and expected 1524 // (which was just calculated) region bit maps. 1525 // We're not OK if the bit in the calculated expected region 1526 // bitmap is set and the bit in the actual region bitmap is not. 1527 BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); 1528 1529 bool expected = _exp_region_bm->at(index); 1530 bool actual = _region_bm->at(index); 1531 if (expected && !actual) { 1532 if (_verbose) { 1533 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1534 "expected: %s, actual: %s", 1535 hr->hrs_index(), 1536 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1537 } 1538 failures += 1; 1539 } 1540 1541 // Verify that the card bit maps for the cards spanned by the current 1542 // region match. We have an error if we have a set bit in the expected 1543 // bit map and the corresponding bit in the actual bitmap is not set. 1544 1545 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1546 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1547 1548 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1549 expected = _exp_card_bm->at(i); 1550 actual = _card_bm->at(i); 1551 1552 if (expected && !actual) { 1553 if (_verbose) { 1554 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1555 "expected: %s, actual: %s", 1556 hr->hrs_index(), i, 1557 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1558 } 1559 failures += 1; 1560 } 1561 } 1562 1563 if (failures > 0 && _verbose) { 1564 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1565 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1566 HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start(), 1567 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1568 } 1569 1570 _failures += failures; 1571 1572 // We could stop iteration over the heap when we 1573 // find the first violating region by returning true. 1574 return false; 1575 } 1576 }; 1577 1578 1579 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1580 protected: 1581 G1CollectedHeap* _g1h; 1582 ConcurrentMark* _cm; 1583 BitMap* _actual_region_bm; 1584 BitMap* _actual_card_bm; 1585 1586 uint _n_workers; 1587 1588 BitMap* _expected_region_bm; 1589 BitMap* _expected_card_bm; 1590 1591 int _failures; 1592 bool _verbose; 1593 1594 public: 1595 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1596 BitMap* region_bm, BitMap* card_bm, 1597 BitMap* expected_region_bm, BitMap* expected_card_bm) 1598 : AbstractGangTask("G1 verify final counting"), 1599 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1600 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1601 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1602 _failures(0), _verbose(false), 1603 _n_workers(0) { 1604 assert(VerifyDuringGC, "don't call this otherwise"); 1605 1606 // Use the value already set as the number of active threads 1607 // in the call to run_task(). 1608 if (G1CollectedHeap::use_parallel_gc_threads()) { 1609 assert( _g1h->workers()->active_workers() > 0, 1610 "Should have been previously set"); 1611 _n_workers = _g1h->workers()->active_workers(); 1612 } else { 1613 _n_workers = 1; 1614 } 1615 1616 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1617 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1618 1619 _verbose = _cm->verbose_medium(); 1620 } 1621 1622 void work(uint worker_id) { 1623 assert(worker_id < _n_workers, "invariant"); 1624 1625 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1626 _actual_region_bm, _actual_card_bm, 1627 _expected_region_bm, 1628 _expected_card_bm, 1629 _verbose); 1630 1631 if (G1CollectedHeap::use_parallel_gc_threads()) { 1632 _g1h->heap_region_par_iterate_chunked(&verify_cl, 1633 worker_id, 1634 _n_workers, 1635 HeapRegion::VerifyCountClaimValue); 1636 } else { 1637 _g1h->heap_region_iterate(&verify_cl); 1638 } 1639 1640 Atomic::add(verify_cl.failures(), &_failures); 1641 } 1642 1643 int failures() const { return _failures; } 1644 }; 1645 1646 // Closure that finalizes the liveness counting data. 1647 // Used during the cleanup pause. 1648 // Sets the bits corresponding to the interval [NTAMS, top] 1649 // (which contains the implicitly live objects) in the 1650 // card liveness bitmap. Also sets the bit for each region, 1651 // containing live data, in the region liveness bitmap. 1652 1653 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1654 public: 1655 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1656 BitMap* region_bm, 1657 BitMap* card_bm) : 1658 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1659 1660 bool doHeapRegion(HeapRegion* hr) { 1661 1662 if (hr->continuesHumongous()) { 1663 // We will ignore these here and process them when their 1664 // associated "starts humongous" region is processed (see 1665 // set_bit_for_heap_region()). Note that we cannot rely on their 1666 // associated "starts humongous" region to have their bit set to 1667 // 1 since, due to the region chunking in the parallel region 1668 // iteration, a "continues humongous" region might be visited 1669 // before its associated "starts humongous". 1670 return false; 1671 } 1672 1673 HeapWord* ntams = hr->next_top_at_mark_start(); 1674 HeapWord* top = hr->top(); 1675 1676 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1677 1678 // Mark the allocated-since-marking portion... 1679 if (ntams < top) { 1680 // This definitely means the region has live objects. 1681 set_bit_for_region(hr); 1682 1683 // Now set the bits in the card bitmap for [ntams, top) 1684 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1685 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1686 1687 // Note: if we're looking at the last region in heap - top 1688 // could be actually just beyond the end of the heap; end_idx 1689 // will then correspond to a (non-existent) card that is also 1690 // just beyond the heap. 1691 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1692 // end of object is not card aligned - increment to cover 1693 // all the cards spanned by the object 1694 end_idx += 1; 1695 } 1696 1697 assert(end_idx <= _card_bm->size(), 1698 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1699 end_idx, _card_bm->size())); 1700 assert(start_idx < _card_bm->size(), 1701 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1702 start_idx, _card_bm->size())); 1703 1704 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1705 } 1706 1707 // Set the bit for the region if it contains live data 1708 if (hr->next_marked_bytes() > 0) { 1709 set_bit_for_region(hr); 1710 } 1711 1712 return false; 1713 } 1714 }; 1715 1716 class G1ParFinalCountTask: public AbstractGangTask { 1717 protected: 1718 G1CollectedHeap* _g1h; 1719 ConcurrentMark* _cm; 1720 BitMap* _actual_region_bm; 1721 BitMap* _actual_card_bm; 1722 1723 uint _n_workers; 1724 1725 public: 1726 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1727 : AbstractGangTask("G1 final counting"), 1728 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1729 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1730 _n_workers(0) { 1731 // Use the value already set as the number of active threads 1732 // in the call to run_task(). 1733 if (G1CollectedHeap::use_parallel_gc_threads()) { 1734 assert( _g1h->workers()->active_workers() > 0, 1735 "Should have been previously set"); 1736 _n_workers = _g1h->workers()->active_workers(); 1737 } else { 1738 _n_workers = 1; 1739 } 1740 } 1741 1742 void work(uint worker_id) { 1743 assert(worker_id < _n_workers, "invariant"); 1744 1745 FinalCountDataUpdateClosure final_update_cl(_g1h, 1746 _actual_region_bm, 1747 _actual_card_bm); 1748 1749 if (G1CollectedHeap::use_parallel_gc_threads()) { 1750 _g1h->heap_region_par_iterate_chunked(&final_update_cl, 1751 worker_id, 1752 _n_workers, 1753 HeapRegion::FinalCountClaimValue); 1754 } else { 1755 _g1h->heap_region_iterate(&final_update_cl); 1756 } 1757 } 1758 }; 1759 1760 class G1ParNoteEndTask; 1761 1762 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1763 G1CollectedHeap* _g1; 1764 int _worker_num; 1765 size_t _max_live_bytes; 1766 uint _regions_claimed; 1767 size_t _freed_bytes; 1768 FreeRegionList* _local_cleanup_list; 1769 OldRegionSet* _old_proxy_set; 1770 HumongousRegionSet* _humongous_proxy_set; 1771 HRRSCleanupTask* _hrrs_cleanup_task; 1772 double _claimed_region_time; 1773 double _max_region_time; 1774 1775 public: 1776 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1777 int worker_num, 1778 FreeRegionList* local_cleanup_list, 1779 OldRegionSet* old_proxy_set, 1780 HumongousRegionSet* humongous_proxy_set, 1781 HRRSCleanupTask* hrrs_cleanup_task) : 1782 _g1(g1), _worker_num(worker_num), 1783 _max_live_bytes(0), _regions_claimed(0), 1784 _freed_bytes(0), 1785 _claimed_region_time(0.0), _max_region_time(0.0), 1786 _local_cleanup_list(local_cleanup_list), 1787 _old_proxy_set(old_proxy_set), 1788 _humongous_proxy_set(humongous_proxy_set), 1789 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1790 1791 size_t freed_bytes() { return _freed_bytes; } 1792 1793 bool doHeapRegion(HeapRegion *hr) { 1794 if (hr->continuesHumongous()) { 1795 return false; 1796 } 1797 // We use a claim value of zero here because all regions 1798 // were claimed with value 1 in the FinalCount task. 1799 _g1->reset_gc_time_stamps(hr); 1800 double start = os::elapsedTime(); 1801 _regions_claimed++; 1802 hr->note_end_of_marking(); 1803 _max_live_bytes += hr->max_live_bytes(); 1804 _g1->free_region_if_empty(hr, 1805 &_freed_bytes, 1806 _local_cleanup_list, 1807 _old_proxy_set, 1808 _humongous_proxy_set, 1809 _hrrs_cleanup_task, 1810 true /* par */); 1811 double region_time = (os::elapsedTime() - start); 1812 _claimed_region_time += region_time; 1813 if (region_time > _max_region_time) { 1814 _max_region_time = region_time; 1815 } 1816 return false; 1817 } 1818 1819 size_t max_live_bytes() { return _max_live_bytes; } 1820 uint regions_claimed() { return _regions_claimed; } 1821 double claimed_region_time_sec() { return _claimed_region_time; } 1822 double max_region_time_sec() { return _max_region_time; } 1823 }; 1824 1825 class G1ParNoteEndTask: public AbstractGangTask { 1826 friend class G1NoteEndOfConcMarkClosure; 1827 1828 protected: 1829 G1CollectedHeap* _g1h; 1830 size_t _max_live_bytes; 1831 size_t _freed_bytes; 1832 FreeRegionList* _cleanup_list; 1833 1834 public: 1835 G1ParNoteEndTask(G1CollectedHeap* g1h, 1836 FreeRegionList* cleanup_list) : 1837 AbstractGangTask("G1 note end"), _g1h(g1h), 1838 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { } 1839 1840 void work(uint worker_id) { 1841 double start = os::elapsedTime(); 1842 FreeRegionList local_cleanup_list("Local Cleanup List"); 1843 OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set"); 1844 HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set"); 1845 HRRSCleanupTask hrrs_cleanup_task; 1846 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list, 1847 &old_proxy_set, 1848 &humongous_proxy_set, 1849 &hrrs_cleanup_task); 1850 if (G1CollectedHeap::use_parallel_gc_threads()) { 1851 _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id, 1852 _g1h->workers()->active_workers(), 1853 HeapRegion::NoteEndClaimValue); 1854 } else { 1855 _g1h->heap_region_iterate(&g1_note_end); 1856 } 1857 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1858 1859 // Now update the lists 1860 _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(), 1861 NULL /* free_list */, 1862 &old_proxy_set, 1863 &humongous_proxy_set, 1864 true /* par */); 1865 { 1866 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1867 _max_live_bytes += g1_note_end.max_live_bytes(); 1868 _freed_bytes += g1_note_end.freed_bytes(); 1869 1870 // If we iterate over the global cleanup list at the end of 1871 // cleanup to do this printing we will not guarantee to only 1872 // generate output for the newly-reclaimed regions (the list 1873 // might not be empty at the beginning of cleanup; we might 1874 // still be working on its previous contents). So we do the 1875 // printing here, before we append the new regions to the global 1876 // cleanup list. 1877 1878 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1879 if (hr_printer->is_active()) { 1880 HeapRegionLinkedListIterator iter(&local_cleanup_list); 1881 while (iter.more_available()) { 1882 HeapRegion* hr = iter.get_next(); 1883 hr_printer->cleanup(hr); 1884 } 1885 } 1886 1887 _cleanup_list->add_as_tail(&local_cleanup_list); 1888 assert(local_cleanup_list.is_empty(), "post-condition"); 1889 1890 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1891 } 1892 } 1893 size_t max_live_bytes() { return _max_live_bytes; } 1894 size_t freed_bytes() { return _freed_bytes; } 1895 }; 1896 1897 class G1ParScrubRemSetTask: public AbstractGangTask { 1898 protected: 1899 G1RemSet* _g1rs; 1900 BitMap* _region_bm; 1901 BitMap* _card_bm; 1902 public: 1903 G1ParScrubRemSetTask(G1CollectedHeap* g1h, 1904 BitMap* region_bm, BitMap* card_bm) : 1905 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), 1906 _region_bm(region_bm), _card_bm(card_bm) { } 1907 1908 void work(uint worker_id) { 1909 if (G1CollectedHeap::use_parallel_gc_threads()) { 1910 _g1rs->scrub_par(_region_bm, _card_bm, worker_id, 1911 HeapRegion::ScrubRemSetClaimValue); 1912 } else { 1913 _g1rs->scrub(_region_bm, _card_bm); 1914 } 1915 } 1916 1917 }; 1918 1919 void ConcurrentMark::cleanup() { 1920 // world is stopped at this checkpoint 1921 assert(SafepointSynchronize::is_at_safepoint(), 1922 "world should be stopped"); 1923 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1924 1925 // If a full collection has happened, we shouldn't do this. 1926 if (has_aborted()) { 1927 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1928 return; 1929 } 1930 1931 HRSPhaseSetter x(HRSPhaseCleanup); 1932 g1h->verify_region_sets_optional(); 1933 1934 if (VerifyDuringGC) { 1935 HandleMark hm; // handle scope 1936 gclog_or_tty->print(" VerifyDuringGC:(before)"); 1937 Universe::heap()->prepare_for_verify(); 1938 Universe::verify(/* silent */ false, 1939 /* option */ VerifyOption_G1UsePrevMarking); 1940 } 1941 1942 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); 1943 g1p->record_concurrent_mark_cleanup_start(); 1944 1945 double start = os::elapsedTime(); 1946 1947 HeapRegionRemSet::reset_for_cleanup_tasks(); 1948 1949 uint n_workers; 1950 1951 // Do counting once more with the world stopped for good measure. 1952 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 1953 1954 if (G1CollectedHeap::use_parallel_gc_threads()) { 1955 assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 1956 "sanity check"); 1957 1958 g1h->set_par_threads(); 1959 n_workers = g1h->n_par_threads(); 1960 assert(g1h->n_par_threads() == n_workers, 1961 "Should not have been reset"); 1962 g1h->workers()->run_task(&g1_par_count_task); 1963 // Done with the parallel phase so reset to 0. 1964 g1h->set_par_threads(0); 1965 1966 assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue), 1967 "sanity check"); 1968 } else { 1969 n_workers = 1; 1970 g1_par_count_task.work(0); 1971 } 1972 1973 if (VerifyDuringGC) { 1974 // Verify that the counting data accumulated during marking matches 1975 // that calculated by walking the marking bitmap. 1976 1977 // Bitmaps to hold expected values 1978 BitMap expected_region_bm(_region_bm.size(), false); 1979 BitMap expected_card_bm(_card_bm.size(), false); 1980 1981 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 1982 &_region_bm, 1983 &_card_bm, 1984 &expected_region_bm, 1985 &expected_card_bm); 1986 1987 if (G1CollectedHeap::use_parallel_gc_threads()) { 1988 g1h->set_par_threads((int)n_workers); 1989 g1h->workers()->run_task(&g1_par_verify_task); 1990 // Done with the parallel phase so reset to 0. 1991 g1h->set_par_threads(0); 1992 1993 assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue), 1994 "sanity check"); 1995 } else { 1996 g1_par_verify_task.work(0); 1997 } 1998 1999 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 2000 } 2001 2002 size_t start_used_bytes = g1h->used(); 2003 g1h->set_marking_complete(); 2004 2005 double count_end = os::elapsedTime(); 2006 double this_final_counting_time = (count_end - start); 2007 _total_counting_time += this_final_counting_time; 2008 2009 if (G1PrintRegionLivenessInfo) { 2010 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 2011 _g1h->heap_region_iterate(&cl); 2012 } 2013 2014 // Install newly created mark bitMap as "prev". 2015 swapMarkBitMaps(); 2016 2017 g1h->reset_gc_time_stamp(); 2018 2019 // Note end of marking in all heap regions. 2020 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list); 2021 if (G1CollectedHeap::use_parallel_gc_threads()) { 2022 g1h->set_par_threads((int)n_workers); 2023 g1h->workers()->run_task(&g1_par_note_end_task); 2024 g1h->set_par_threads(0); 2025 2026 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue), 2027 "sanity check"); 2028 } else { 2029 g1_par_note_end_task.work(0); 2030 } 2031 g1h->check_gc_time_stamps(); 2032 2033 if (!cleanup_list_is_empty()) { 2034 // The cleanup list is not empty, so we'll have to process it 2035 // concurrently. Notify anyone else that might be wanting free 2036 // regions that there will be more free regions coming soon. 2037 g1h->set_free_regions_coming(); 2038 } 2039 2040 // call below, since it affects the metric by which we sort the heap 2041 // regions. 2042 if (G1ScrubRemSets) { 2043 double rs_scrub_start = os::elapsedTime(); 2044 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm); 2045 if (G1CollectedHeap::use_parallel_gc_threads()) { 2046 g1h->set_par_threads((int)n_workers); 2047 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2048 g1h->set_par_threads(0); 2049 2050 assert(g1h->check_heap_region_claim_values( 2051 HeapRegion::ScrubRemSetClaimValue), 2052 "sanity check"); 2053 } else { 2054 g1_par_scrub_rs_task.work(0); 2055 } 2056 2057 double rs_scrub_end = os::elapsedTime(); 2058 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2059 _total_rs_scrub_time += this_rs_scrub_time; 2060 } 2061 2062 // this will also free any regions totally full of garbage objects, 2063 // and sort the regions. 2064 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2065 2066 // Statistics. 2067 double end = os::elapsedTime(); 2068 _cleanup_times.add((end - start) * 1000.0); 2069 2070 if (G1Log::fine()) { 2071 g1h->print_size_transition(gclog_or_tty, 2072 start_used_bytes, 2073 g1h->used(), 2074 g1h->capacity()); 2075 } 2076 2077 // Clean up will have freed any regions completely full of garbage. 2078 // Update the soft reference policy with the new heap occupancy. 2079 Universe::update_heap_info_at_gc(); 2080 2081 // We need to make this be a "collection" so any collection pause that 2082 // races with it goes around and waits for completeCleanup to finish. 2083 g1h->increment_total_collections(); 2084 2085 // We reclaimed old regions so we should calculate the sizes to make 2086 // sure we update the old gen/space data. 2087 g1h->g1mm()->update_sizes(); 2088 2089 if (VerifyDuringGC) { 2090 HandleMark hm; // handle scope 2091 gclog_or_tty->print(" VerifyDuringGC:(after)"); 2092 Universe::heap()->prepare_for_verify(); 2093 Universe::verify(/* silent */ false, 2094 /* option */ VerifyOption_G1UsePrevMarking); 2095 } 2096 2097 g1h->verify_region_sets_optional(); 2098 } 2099 2100 void ConcurrentMark::completeCleanup() { 2101 if (has_aborted()) return; 2102 2103 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2104 2105 _cleanup_list.verify_optional(); 2106 FreeRegionList tmp_free_list("Tmp Free List"); 2107 2108 if (G1ConcRegionFreeingVerbose) { 2109 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2110 "cleanup list has %u entries", 2111 _cleanup_list.length()); 2112 } 2113 2114 // Noone else should be accessing the _cleanup_list at this point, 2115 // so it's not necessary to take any locks 2116 while (!_cleanup_list.is_empty()) { 2117 HeapRegion* hr = _cleanup_list.remove_head(); 2118 assert(hr != NULL, "the list was not empty"); 2119 hr->par_clear(); 2120 tmp_free_list.add_as_tail(hr); 2121 2122 // Instead of adding one region at a time to the secondary_free_list, 2123 // we accumulate them in the local list and move them a few at a 2124 // time. This also cuts down on the number of notify_all() calls 2125 // we do during this process. We'll also append the local list when 2126 // _cleanup_list is empty (which means we just removed the last 2127 // region from the _cleanup_list). 2128 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2129 _cleanup_list.is_empty()) { 2130 if (G1ConcRegionFreeingVerbose) { 2131 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2132 "appending %u entries to the secondary_free_list, " 2133 "cleanup list still has %u entries", 2134 tmp_free_list.length(), 2135 _cleanup_list.length()); 2136 } 2137 2138 { 2139 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2140 g1h->secondary_free_list_add_as_tail(&tmp_free_list); 2141 SecondaryFreeList_lock->notify_all(); 2142 } 2143 2144 if (G1StressConcRegionFreeing) { 2145 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2146 os::sleep(Thread::current(), (jlong) 1, false); 2147 } 2148 } 2149 } 2150 } 2151 assert(tmp_free_list.is_empty(), "post-condition"); 2152 } 2153 2154 // Support closures for reference procssing in G1 2155 2156 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2157 HeapWord* addr = (HeapWord*)obj; 2158 return addr != NULL && 2159 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2160 } 2161 2162 class G1CMKeepAliveClosure: public ExtendedOopClosure { 2163 G1CollectedHeap* _g1; 2164 ConcurrentMark* _cm; 2165 public: 2166 G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm) : 2167 _g1(g1), _cm(cm) { 2168 assert(Thread::current()->is_VM_thread(), "otherwise fix worker id"); 2169 } 2170 2171 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2172 virtual void do_oop( oop* p) { do_oop_work(p); } 2173 2174 template <class T> void do_oop_work(T* p) { 2175 oop obj = oopDesc::load_decode_heap_oop(p); 2176 HeapWord* addr = (HeapWord*)obj; 2177 2178 if (_cm->verbose_high()) { 2179 gclog_or_tty->print_cr("\t[0] we're looking at location " 2180 "*"PTR_FORMAT" = "PTR_FORMAT, 2181 p, (void*) obj); 2182 } 2183 2184 if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) { 2185 _cm->mark_and_count(obj); 2186 _cm->mark_stack_push(obj); 2187 } 2188 } 2189 }; 2190 2191 class G1CMDrainMarkingStackClosure: public VoidClosure { 2192 ConcurrentMark* _cm; 2193 CMMarkStack* _markStack; 2194 G1CMKeepAliveClosure* _oopClosure; 2195 public: 2196 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMMarkStack* markStack, 2197 G1CMKeepAliveClosure* oopClosure) : 2198 _cm(cm), 2199 _markStack(markStack), 2200 _oopClosure(oopClosure) { } 2201 2202 void do_void() { 2203 _markStack->drain(_oopClosure, _cm->nextMarkBitMap(), false); 2204 } 2205 }; 2206 2207 // 'Keep Alive' closure used by parallel reference processing. 2208 // An instance of this closure is used in the parallel reference processing 2209 // code rather than an instance of G1CMKeepAliveClosure. We could have used 2210 // the G1CMKeepAliveClosure as it is MT-safe. Also reference objects are 2211 // placed on to discovered ref lists once so we can mark and push with no 2212 // need to check whether the object has already been marked. Using the 2213 // G1CMKeepAliveClosure would mean, however, having all the worker threads 2214 // operating on the global mark stack. This means that an individual 2215 // worker would be doing lock-free pushes while it processes its own 2216 // discovered ref list followed by drain call. If the discovered ref lists 2217 // are unbalanced then this could cause interference with the other 2218 // workers. Using a CMTask (and its embedded local data structures) 2219 // avoids that potential interference. 2220 class G1CMParKeepAliveAndDrainClosure: public OopClosure { 2221 ConcurrentMark* _cm; 2222 CMTask* _task; 2223 int _ref_counter_limit; 2224 int _ref_counter; 2225 public: 2226 G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task) : 2227 _cm(cm), _task(task), 2228 _ref_counter_limit(G1RefProcDrainInterval) { 2229 assert(_ref_counter_limit > 0, "sanity"); 2230 _ref_counter = _ref_counter_limit; 2231 } 2232 2233 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2234 virtual void do_oop( oop* p) { do_oop_work(p); } 2235 2236 template <class T> void do_oop_work(T* p) { 2237 if (!_cm->has_overflown()) { 2238 oop obj = oopDesc::load_decode_heap_oop(p); 2239 if (_cm->verbose_high()) { 2240 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2241 "*"PTR_FORMAT" = "PTR_FORMAT, 2242 _task->worker_id(), p, (void*) obj); 2243 } 2244 2245 _task->deal_with_reference(obj); 2246 _ref_counter--; 2247 2248 if (_ref_counter == 0) { 2249 // We have dealt with _ref_counter_limit references, pushing them and objects 2250 // reachable from them on to the local stack (and possibly the global stack). 2251 // Call do_marking_step() to process these entries. We call the routine in a 2252 // loop, which we'll exit if there's nothing more to do (i.e. we're done 2253 // with the entries that we've pushed as a result of the deal_with_reference 2254 // calls above) or we overflow. 2255 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag 2256 // while there may still be some work to do. (See the comment at the 2257 // beginning of CMTask::do_marking_step() for those conditions - one of which 2258 // is reaching the specified time target.) It is only when 2259 // CMTask::do_marking_step() returns without setting the has_aborted() flag 2260 // that the marking has completed. 2261 do { 2262 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2263 _task->do_marking_step(mark_step_duration_ms, 2264 false /* do_stealing */, 2265 false /* do_termination */); 2266 } while (_task->has_aborted() && !_cm->has_overflown()); 2267 _ref_counter = _ref_counter_limit; 2268 } 2269 } else { 2270 if (_cm->verbose_high()) { 2271 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2272 } 2273 } 2274 } 2275 }; 2276 2277 class G1CMParDrainMarkingStackClosure: public VoidClosure { 2278 ConcurrentMark* _cm; 2279 CMTask* _task; 2280 public: 2281 G1CMParDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task) : 2282 _cm(cm), _task(task) { } 2283 2284 void do_void() { 2285 do { 2286 if (_cm->verbose_high()) { 2287 gclog_or_tty->print_cr("\t[%u] Drain: Calling do marking_step", 2288 _task->worker_id()); 2289 } 2290 2291 // We call CMTask::do_marking_step() to completely drain the local and 2292 // global marking stacks. The routine is called in a loop, which we'll 2293 // exit if there's nothing more to do (i.e. we'completely drained the 2294 // entries that were pushed as a result of applying the 2295 // G1CMParKeepAliveAndDrainClosure to the entries on the discovered ref 2296 // lists above) or we overflow the global marking stack. 2297 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag 2298 // while there may still be some work to do. (See the comment at the 2299 // beginning of CMTask::do_marking_step() for those conditions - one of which 2300 // is reaching the specified time target.) It is only when 2301 // CMTask::do_marking_step() returns without setting the has_aborted() flag 2302 // that the marking has completed. 2303 2304 _task->do_marking_step(1000000000.0 /* something very large */, 2305 true /* do_stealing */, 2306 true /* do_termination */); 2307 } while (_task->has_aborted() && !_cm->has_overflown()); 2308 } 2309 }; 2310 2311 // Implementation of AbstractRefProcTaskExecutor for parallel 2312 // reference processing at the end of G1 concurrent marking 2313 2314 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2315 private: 2316 G1CollectedHeap* _g1h; 2317 ConcurrentMark* _cm; 2318 WorkGang* _workers; 2319 int _active_workers; 2320 2321 public: 2322 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2323 ConcurrentMark* cm, 2324 WorkGang* workers, 2325 int n_workers) : 2326 _g1h(g1h), _cm(cm), 2327 _workers(workers), _active_workers(n_workers) { } 2328 2329 // Executes the given task using concurrent marking worker threads. 2330 virtual void execute(ProcessTask& task); 2331 virtual void execute(EnqueueTask& task); 2332 }; 2333 2334 class G1CMRefProcTaskProxy: public AbstractGangTask { 2335 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2336 ProcessTask& _proc_task; 2337 G1CollectedHeap* _g1h; 2338 ConcurrentMark* _cm; 2339 2340 public: 2341 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2342 G1CollectedHeap* g1h, 2343 ConcurrentMark* cm) : 2344 AbstractGangTask("Process reference objects in parallel"), 2345 _proc_task(proc_task), _g1h(g1h), _cm(cm) { } 2346 2347 virtual void work(uint worker_id) { 2348 CMTask* marking_task = _cm->task(worker_id); 2349 G1CMIsAliveClosure g1_is_alive(_g1h); 2350 G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task); 2351 G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task); 2352 2353 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2354 } 2355 }; 2356 2357 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2358 assert(_workers != NULL, "Need parallel worker threads."); 2359 2360 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2361 2362 // We need to reset the phase for each task execution so that 2363 // the termination protocol of CMTask::do_marking_step works. 2364 _cm->set_phase(_active_workers, false /* concurrent */); 2365 _g1h->set_par_threads(_active_workers); 2366 _workers->run_task(&proc_task_proxy); 2367 _g1h->set_par_threads(0); 2368 } 2369 2370 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2371 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2372 EnqueueTask& _enq_task; 2373 2374 public: 2375 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2376 AbstractGangTask("Enqueue reference objects in parallel"), 2377 _enq_task(enq_task) { } 2378 2379 virtual void work(uint worker_id) { 2380 _enq_task.work(worker_id); 2381 } 2382 }; 2383 2384 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2385 assert(_workers != NULL, "Need parallel worker threads."); 2386 2387 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2388 2389 _g1h->set_par_threads(_active_workers); 2390 _workers->run_task(&enq_task_proxy); 2391 _g1h->set_par_threads(0); 2392 } 2393 2394 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2395 ResourceMark rm; 2396 HandleMark hm; 2397 2398 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2399 2400 // Is alive closure. 2401 G1CMIsAliveClosure g1_is_alive(g1h); 2402 2403 // Inner scope to exclude the cleaning of the string and symbol 2404 // tables from the displayed time. 2405 { 2406 if (G1Log::finer()) { 2407 gclog_or_tty->put(' '); 2408 } 2409 TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty); 2410 2411 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2412 2413 // See the comment in G1CollectedHeap::ref_processing_init() 2414 // about how reference processing currently works in G1. 2415 2416 // Process weak references. 2417 rp->setup_policy(clear_all_soft_refs); 2418 assert(_markStack.isEmpty(), "mark stack should be empty"); 2419 2420 G1CMKeepAliveClosure g1_keep_alive(g1h, this); 2421 G1CMDrainMarkingStackClosure 2422 g1_drain_mark_stack(this, &_markStack, &g1_keep_alive); 2423 2424 // We use the work gang from the G1CollectedHeap and we utilize all 2425 // the worker threads. 2426 uint active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1U; 2427 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2428 2429 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2430 g1h->workers(), active_workers); 2431 2432 if (rp->processing_is_mt()) { 2433 // Set the degree of MT here. If the discovery is done MT, there 2434 // may have been a different number of threads doing the discovery 2435 // and a different number of discovered lists may have Ref objects. 2436 // That is OK as long as the Reference lists are balanced (see 2437 // balance_all_queues() and balance_queues()). 2438 rp->set_active_mt_degree(active_workers); 2439 2440 rp->process_discovered_references(&g1_is_alive, 2441 &g1_keep_alive, 2442 &g1_drain_mark_stack, 2443 &par_task_executor); 2444 2445 // The work routines of the parallel keep_alive and drain_marking_stack 2446 // will set the has_overflown flag if we overflow the global marking 2447 // stack. 2448 } else { 2449 rp->process_discovered_references(&g1_is_alive, 2450 &g1_keep_alive, 2451 &g1_drain_mark_stack, 2452 NULL); 2453 } 2454 2455 assert(_markStack.overflow() || _markStack.isEmpty(), 2456 "mark stack should be empty (unless it overflowed)"); 2457 if (_markStack.overflow()) { 2458 // Should have been done already when we tried to push an 2459 // entry on to the global mark stack. But let's do it again. 2460 set_has_overflown(); 2461 } 2462 2463 if (rp->processing_is_mt()) { 2464 assert(rp->num_q() == active_workers, "why not"); 2465 rp->enqueue_discovered_references(&par_task_executor); 2466 } else { 2467 rp->enqueue_discovered_references(); 2468 } 2469 2470 rp->verify_no_references_recorded(); 2471 assert(!rp->discovery_enabled(), "Post condition"); 2472 } 2473 2474 // Now clean up stale oops in StringTable 2475 StringTable::unlink(&g1_is_alive); 2476 // Clean up unreferenced symbols in symbol table. 2477 SymbolTable::unlink(); 2478 } 2479 2480 void ConcurrentMark::swapMarkBitMaps() { 2481 CMBitMapRO* temp = _prevMarkBitMap; 2482 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2483 _nextMarkBitMap = (CMBitMap*) temp; 2484 } 2485 2486 class CMRemarkTask: public AbstractGangTask { 2487 private: 2488 ConcurrentMark *_cm; 2489 2490 public: 2491 void work(uint worker_id) { 2492 // Since all available tasks are actually started, we should 2493 // only proceed if we're supposed to be actived. 2494 if (worker_id < _cm->active_tasks()) { 2495 CMTask* task = _cm->task(worker_id); 2496 task->record_start_time(); 2497 do { 2498 task->do_marking_step(1000000000.0 /* something very large */, 2499 true /* do_stealing */, 2500 true /* do_termination */); 2501 } while (task->has_aborted() && !_cm->has_overflown()); 2502 // If we overflow, then we do not want to restart. We instead 2503 // want to abort remark and do concurrent marking again. 2504 task->record_end_time(); 2505 } 2506 } 2507 2508 CMRemarkTask(ConcurrentMark* cm, int active_workers) : 2509 AbstractGangTask("Par Remark"), _cm(cm) { 2510 _cm->terminator()->reset_for_reuse(active_workers); 2511 } 2512 }; 2513 2514 void ConcurrentMark::checkpointRootsFinalWork() { 2515 ResourceMark rm; 2516 HandleMark hm; 2517 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2518 2519 g1h->ensure_parsability(false); 2520 2521 if (G1CollectedHeap::use_parallel_gc_threads()) { 2522 G1CollectedHeap::StrongRootsScope srs(g1h); 2523 // this is remark, so we'll use up all active threads 2524 uint active_workers = g1h->workers()->active_workers(); 2525 if (active_workers == 0) { 2526 assert(active_workers > 0, "Should have been set earlier"); 2527 active_workers = (uint) ParallelGCThreads; 2528 g1h->workers()->set_active_workers(active_workers); 2529 } 2530 set_phase(active_workers, false /* concurrent */); 2531 // Leave _parallel_marking_threads at it's 2532 // value originally calculated in the ConcurrentMark 2533 // constructor and pass values of the active workers 2534 // through the gang in the task. 2535 2536 CMRemarkTask remarkTask(this, active_workers); 2537 g1h->set_par_threads(active_workers); 2538 g1h->workers()->run_task(&remarkTask); 2539 g1h->set_par_threads(0); 2540 } else { 2541 G1CollectedHeap::StrongRootsScope srs(g1h); 2542 // this is remark, so we'll use up all available threads 2543 uint active_workers = 1; 2544 set_phase(active_workers, false /* concurrent */); 2545 2546 CMRemarkTask remarkTask(this, active_workers); 2547 // We will start all available threads, even if we decide that the 2548 // active_workers will be fewer. The extra ones will just bail out 2549 // immediately. 2550 remarkTask.work(0); 2551 } 2552 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2553 guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant"); 2554 2555 print_stats(); 2556 2557 #if VERIFY_OBJS_PROCESSED 2558 if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) { 2559 gclog_or_tty->print_cr("Processed = %d, enqueued = %d.", 2560 _scan_obj_cl.objs_processed, 2561 ThreadLocalObjQueue::objs_enqueued); 2562 guarantee(_scan_obj_cl.objs_processed == 2563 ThreadLocalObjQueue::objs_enqueued, 2564 "Different number of objs processed and enqueued."); 2565 } 2566 #endif 2567 } 2568 2569 #ifndef PRODUCT 2570 2571 class PrintReachableOopClosure: public OopClosure { 2572 private: 2573 G1CollectedHeap* _g1h; 2574 outputStream* _out; 2575 VerifyOption _vo; 2576 bool _all; 2577 2578 public: 2579 PrintReachableOopClosure(outputStream* out, 2580 VerifyOption vo, 2581 bool all) : 2582 _g1h(G1CollectedHeap::heap()), 2583 _out(out), _vo(vo), _all(all) { } 2584 2585 void do_oop(narrowOop* p) { do_oop_work(p); } 2586 void do_oop( oop* p) { do_oop_work(p); } 2587 2588 template <class T> void do_oop_work(T* p) { 2589 oop obj = oopDesc::load_decode_heap_oop(p); 2590 const char* str = NULL; 2591 const char* str2 = ""; 2592 2593 if (obj == NULL) { 2594 str = ""; 2595 } else if (!_g1h->is_in_g1_reserved(obj)) { 2596 str = " O"; 2597 } else { 2598 HeapRegion* hr = _g1h->heap_region_containing(obj); 2599 guarantee(hr != NULL, "invariant"); 2600 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); 2601 bool marked = _g1h->is_marked(obj, _vo); 2602 2603 if (over_tams) { 2604 str = " >"; 2605 if (marked) { 2606 str2 = " AND MARKED"; 2607 } 2608 } else if (marked) { 2609 str = " M"; 2610 } else { 2611 str = " NOT"; 2612 } 2613 } 2614 2615 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", 2616 p, (void*) obj, str, str2); 2617 } 2618 }; 2619 2620 class PrintReachableObjectClosure : public ObjectClosure { 2621 private: 2622 G1CollectedHeap* _g1h; 2623 outputStream* _out; 2624 VerifyOption _vo; 2625 bool _all; 2626 HeapRegion* _hr; 2627 2628 public: 2629 PrintReachableObjectClosure(outputStream* out, 2630 VerifyOption vo, 2631 bool all, 2632 HeapRegion* hr) : 2633 _g1h(G1CollectedHeap::heap()), 2634 _out(out), _vo(vo), _all(all), _hr(hr) { } 2635 2636 void do_object(oop o) { 2637 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo); 2638 bool marked = _g1h->is_marked(o, _vo); 2639 bool print_it = _all || over_tams || marked; 2640 2641 if (print_it) { 2642 _out->print_cr(" "PTR_FORMAT"%s", 2643 o, (over_tams) ? " >" : (marked) ? " M" : ""); 2644 PrintReachableOopClosure oopCl(_out, _vo, _all); 2645 o->oop_iterate_no_header(&oopCl); 2646 } 2647 } 2648 }; 2649 2650 class PrintReachableRegionClosure : public HeapRegionClosure { 2651 private: 2652 G1CollectedHeap* _g1h; 2653 outputStream* _out; 2654 VerifyOption _vo; 2655 bool _all; 2656 2657 public: 2658 bool doHeapRegion(HeapRegion* hr) { 2659 HeapWord* b = hr->bottom(); 2660 HeapWord* e = hr->end(); 2661 HeapWord* t = hr->top(); 2662 HeapWord* p = _g1h->top_at_mark_start(hr, _vo); 2663 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " 2664 "TAMS: "PTR_FORMAT, b, e, t, p); 2665 _out->cr(); 2666 2667 HeapWord* from = b; 2668 HeapWord* to = t; 2669 2670 if (to > from) { 2671 _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to); 2672 _out->cr(); 2673 PrintReachableObjectClosure ocl(_out, _vo, _all, hr); 2674 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); 2675 _out->cr(); 2676 } 2677 2678 return false; 2679 } 2680 2681 PrintReachableRegionClosure(outputStream* out, 2682 VerifyOption vo, 2683 bool all) : 2684 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { } 2685 }; 2686 2687 void ConcurrentMark::print_reachable(const char* str, 2688 VerifyOption vo, 2689 bool all) { 2690 gclog_or_tty->cr(); 2691 gclog_or_tty->print_cr("== Doing heap dump... "); 2692 2693 if (G1PrintReachableBaseFile == NULL) { 2694 gclog_or_tty->print_cr(" #### error: no base file defined"); 2695 return; 2696 } 2697 2698 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) > 2699 (JVM_MAXPATHLEN - 1)) { 2700 gclog_or_tty->print_cr(" #### error: file name too long"); 2701 return; 2702 } 2703 2704 char file_name[JVM_MAXPATHLEN]; 2705 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str); 2706 gclog_or_tty->print_cr(" dumping to file %s", file_name); 2707 2708 fileStream fout(file_name); 2709 if (!fout.is_open()) { 2710 gclog_or_tty->print_cr(" #### error: could not open file"); 2711 return; 2712 } 2713 2714 outputStream* out = &fout; 2715 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo)); 2716 out->cr(); 2717 2718 out->print_cr("--- ITERATING OVER REGIONS"); 2719 out->cr(); 2720 PrintReachableRegionClosure rcl(out, vo, all); 2721 _g1h->heap_region_iterate(&rcl); 2722 out->cr(); 2723 2724 gclog_or_tty->print_cr(" done"); 2725 gclog_or_tty->flush(); 2726 } 2727 2728 #endif // PRODUCT 2729 2730 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2731 // Note we are overriding the read-only view of the prev map here, via 2732 // the cast. 2733 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2734 } 2735 2736 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2737 _nextMarkBitMap->clearRange(mr); 2738 } 2739 2740 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) { 2741 clearRangePrevBitmap(mr); 2742 clearRangeNextBitmap(mr); 2743 } 2744 2745 HeapRegion* 2746 ConcurrentMark::claim_region(uint worker_id) { 2747 // "checkpoint" the finger 2748 HeapWord* finger = _finger; 2749 2750 // _heap_end will not change underneath our feet; it only changes at 2751 // yield points. 2752 while (finger < _heap_end) { 2753 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2754 2755 // Note on how this code handles humongous regions. In the 2756 // normal case the finger will reach the start of a "starts 2757 // humongous" (SH) region. Its end will either be the end of the 2758 // last "continues humongous" (CH) region in the sequence, or the 2759 // standard end of the SH region (if the SH is the only region in 2760 // the sequence). That way claim_region() will skip over the CH 2761 // regions. However, there is a subtle race between a CM thread 2762 // executing this method and a mutator thread doing a humongous 2763 // object allocation. The two are not mutually exclusive as the CM 2764 // thread does not need to hold the Heap_lock when it gets 2765 // here. So there is a chance that claim_region() will come across 2766 // a free region that's in the progress of becoming a SH or a CH 2767 // region. In the former case, it will either 2768 // a) Miss the update to the region's end, in which case it will 2769 // visit every subsequent CH region, will find their bitmaps 2770 // empty, and do nothing, or 2771 // b) Will observe the update of the region's end (in which case 2772 // it will skip the subsequent CH regions). 2773 // If it comes across a region that suddenly becomes CH, the 2774 // scenario will be similar to b). So, the race between 2775 // claim_region() and a humongous object allocation might force us 2776 // to do a bit of unnecessary work (due to some unnecessary bitmap 2777 // iterations) but it should not introduce and correctness issues. 2778 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2779 HeapWord* bottom = curr_region->bottom(); 2780 HeapWord* end = curr_region->end(); 2781 HeapWord* limit = curr_region->next_top_at_mark_start(); 2782 2783 if (verbose_low()) { 2784 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2785 "["PTR_FORMAT", "PTR_FORMAT"), " 2786 "limit = "PTR_FORMAT, 2787 worker_id, curr_region, bottom, end, limit); 2788 } 2789 2790 // Is the gap between reading the finger and doing the CAS too long? 2791 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2792 if (res == finger) { 2793 // we succeeded 2794 2795 // notice that _finger == end cannot be guaranteed here since, 2796 // someone else might have moved the finger even further 2797 assert(_finger >= end, "the finger should have moved forward"); 2798 2799 if (verbose_low()) { 2800 gclog_or_tty->print_cr("[%u] we were successful with region = " 2801 PTR_FORMAT, worker_id, curr_region); 2802 } 2803 2804 if (limit > bottom) { 2805 if (verbose_low()) { 2806 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 2807 "returning it ", worker_id, curr_region); 2808 } 2809 return curr_region; 2810 } else { 2811 assert(limit == bottom, 2812 "the region limit should be at bottom"); 2813 if (verbose_low()) { 2814 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 2815 "returning NULL", worker_id, curr_region); 2816 } 2817 // we return NULL and the caller should try calling 2818 // claim_region() again. 2819 return NULL; 2820 } 2821 } else { 2822 assert(_finger > finger, "the finger should have moved forward"); 2823 if (verbose_low()) { 2824 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2825 "global finger = "PTR_FORMAT", " 2826 "our finger = "PTR_FORMAT, 2827 worker_id, _finger, finger); 2828 } 2829 2830 // read it again 2831 finger = _finger; 2832 } 2833 } 2834 2835 return NULL; 2836 } 2837 2838 #ifndef PRODUCT 2839 enum VerifyNoCSetOopsPhase { 2840 VerifyNoCSetOopsStack, 2841 VerifyNoCSetOopsQueues, 2842 VerifyNoCSetOopsSATBCompleted, 2843 VerifyNoCSetOopsSATBThread 2844 }; 2845 2846 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 2847 private: 2848 G1CollectedHeap* _g1h; 2849 VerifyNoCSetOopsPhase _phase; 2850 int _info; 2851 2852 const char* phase_str() { 2853 switch (_phase) { 2854 case VerifyNoCSetOopsStack: return "Stack"; 2855 case VerifyNoCSetOopsQueues: return "Queue"; 2856 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers"; 2857 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers"; 2858 default: ShouldNotReachHere(); 2859 } 2860 return NULL; 2861 } 2862 2863 void do_object_work(oop obj) { 2864 guarantee(!_g1h->obj_in_cs(obj), 2865 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 2866 (void*) obj, phase_str(), _info)); 2867 } 2868 2869 public: 2870 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 2871 2872 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 2873 _phase = phase; 2874 _info = info; 2875 } 2876 2877 virtual void do_oop(oop* p) { 2878 oop obj = oopDesc::load_decode_heap_oop(p); 2879 do_object_work(obj); 2880 } 2881 2882 virtual void do_oop(narrowOop* p) { 2883 // We should not come across narrow oops while scanning marking 2884 // stacks and SATB buffers. 2885 ShouldNotReachHere(); 2886 } 2887 2888 virtual void do_object(oop obj) { 2889 do_object_work(obj); 2890 } 2891 }; 2892 2893 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, 2894 bool verify_enqueued_buffers, 2895 bool verify_thread_buffers, 2896 bool verify_fingers) { 2897 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2898 if (!G1CollectedHeap::heap()->mark_in_progress()) { 2899 return; 2900 } 2901 2902 VerifyNoCSetOopsClosure cl; 2903 2904 if (verify_stacks) { 2905 // Verify entries on the global mark stack 2906 cl.set_phase(VerifyNoCSetOopsStack); 2907 _markStack.oops_do(&cl); 2908 2909 // Verify entries on the task queues 2910 for (uint i = 0; i < _max_worker_id; i += 1) { 2911 cl.set_phase(VerifyNoCSetOopsQueues, i); 2912 CMTaskQueue* queue = _task_queues->queue(i); 2913 queue->oops_do(&cl); 2914 } 2915 } 2916 2917 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 2918 2919 // Verify entries on the enqueued SATB buffers 2920 if (verify_enqueued_buffers) { 2921 cl.set_phase(VerifyNoCSetOopsSATBCompleted); 2922 satb_qs.iterate_completed_buffers_read_only(&cl); 2923 } 2924 2925 // Verify entries on the per-thread SATB buffers 2926 if (verify_thread_buffers) { 2927 cl.set_phase(VerifyNoCSetOopsSATBThread); 2928 satb_qs.iterate_thread_buffers_read_only(&cl); 2929 } 2930 2931 if (verify_fingers) { 2932 // Verify the global finger 2933 HeapWord* global_finger = finger(); 2934 if (global_finger != NULL && global_finger < _heap_end) { 2935 // The global finger always points to a heap region boundary. We 2936 // use heap_region_containing_raw() to get the containing region 2937 // given that the global finger could be pointing to a free region 2938 // which subsequently becomes continues humongous. If that 2939 // happens, heap_region_containing() will return the bottom of the 2940 // corresponding starts humongous region and the check below will 2941 // not hold any more. 2942 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 2943 guarantee(global_finger == global_hr->bottom(), 2944 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 2945 global_finger, HR_FORMAT_PARAMS(global_hr))); 2946 } 2947 2948 // Verify the task fingers 2949 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2950 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 2951 CMTask* task = _tasks[i]; 2952 HeapWord* task_finger = task->finger(); 2953 if (task_finger != NULL && task_finger < _heap_end) { 2954 // See above note on the global finger verification. 2955 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 2956 guarantee(task_finger == task_hr->bottom() || 2957 !task_hr->in_collection_set(), 2958 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 2959 task_finger, HR_FORMAT_PARAMS(task_hr))); 2960 } 2961 } 2962 } 2963 } 2964 #endif // PRODUCT 2965 2966 void ConcurrentMark::clear_marking_state(bool clear_overflow) { 2967 _markStack.set_should_expand(); 2968 _markStack.setEmpty(); // Also clears the _markStack overflow flag 2969 if (clear_overflow) { 2970 clear_has_overflown(); 2971 } else { 2972 assert(has_overflown(), "pre-condition"); 2973 } 2974 _finger = _heap_start; 2975 2976 for (uint i = 0; i < _max_worker_id; ++i) { 2977 CMTaskQueue* queue = _task_queues->queue(i); 2978 queue->set_empty(); 2979 } 2980 } 2981 2982 // Aggregate the counting data that was constructed concurrently 2983 // with marking. 2984 class AggregateCountDataHRClosure: public HeapRegionClosure { 2985 G1CollectedHeap* _g1h; 2986 ConcurrentMark* _cm; 2987 CardTableModRefBS* _ct_bs; 2988 BitMap* _cm_card_bm; 2989 uint _max_worker_id; 2990 2991 public: 2992 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 2993 BitMap* cm_card_bm, 2994 uint max_worker_id) : 2995 _g1h(g1h), _cm(g1h->concurrent_mark()), 2996 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 2997 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 2998 2999 bool doHeapRegion(HeapRegion* hr) { 3000 if (hr->continuesHumongous()) { 3001 // We will ignore these here and process them when their 3002 // associated "starts humongous" region is processed. 3003 // Note that we cannot rely on their associated 3004 // "starts humongous" region to have their bit set to 1 3005 // since, due to the region chunking in the parallel region 3006 // iteration, a "continues humongous" region might be visited 3007 // before its associated "starts humongous". 3008 return false; 3009 } 3010 3011 HeapWord* start = hr->bottom(); 3012 HeapWord* limit = hr->next_top_at_mark_start(); 3013 HeapWord* end = hr->end(); 3014 3015 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 3016 err_msg("Preconditions not met - " 3017 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 3018 "top: "PTR_FORMAT", end: "PTR_FORMAT, 3019 start, limit, hr->top(), hr->end())); 3020 3021 assert(hr->next_marked_bytes() == 0, "Precondition"); 3022 3023 if (start == limit) { 3024 // NTAMS of this region has not been set so nothing to do. 3025 return false; 3026 } 3027 3028 // 'start' should be in the heap. 3029 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 3030 // 'end' *may* be just beyone the end of the heap (if hr is the last region) 3031 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 3032 3033 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 3034 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 3035 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 3036 3037 // If ntams is not card aligned then we bump card bitmap index 3038 // for limit so that we get the all the cards spanned by 3039 // the object ending at ntams. 3040 // Note: if this is the last region in the heap then ntams 3041 // could be actually just beyond the end of the the heap; 3042 // limit_idx will then correspond to a (non-existent) card 3043 // that is also outside the heap. 3044 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 3045 limit_idx += 1; 3046 } 3047 3048 assert(limit_idx <= end_idx, "or else use atomics"); 3049 3050 // Aggregate the "stripe" in the count data associated with hr. 3051 uint hrs_index = hr->hrs_index(); 3052 size_t marked_bytes = 0; 3053 3054 for (uint i = 0; i < _max_worker_id; i += 1) { 3055 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 3056 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 3057 3058 // Fetch the marked_bytes in this region for task i and 3059 // add it to the running total for this region. 3060 marked_bytes += marked_bytes_array[hrs_index]; 3061 3062 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 3063 // into the global card bitmap. 3064 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 3065 3066 while (scan_idx < limit_idx) { 3067 assert(task_card_bm->at(scan_idx) == true, "should be"); 3068 _cm_card_bm->set_bit(scan_idx); 3069 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 3070 3071 // BitMap::get_next_one_offset() can handle the case when 3072 // its left_offset parameter is greater than its right_offset 3073 // parameter. It does, however, have an early exit if 3074 // left_offset == right_offset. So let's limit the value 3075 // passed in for left offset here. 3076 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 3077 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 3078 } 3079 } 3080 3081 // Update the marked bytes for this region. 3082 hr->add_to_marked_bytes(marked_bytes); 3083 3084 // Next heap region 3085 return false; 3086 } 3087 }; 3088 3089 class G1AggregateCountDataTask: public AbstractGangTask { 3090 protected: 3091 G1CollectedHeap* _g1h; 3092 ConcurrentMark* _cm; 3093 BitMap* _cm_card_bm; 3094 uint _max_worker_id; 3095 int _active_workers; 3096 3097 public: 3098 G1AggregateCountDataTask(G1CollectedHeap* g1h, 3099 ConcurrentMark* cm, 3100 BitMap* cm_card_bm, 3101 uint max_worker_id, 3102 int n_workers) : 3103 AbstractGangTask("Count Aggregation"), 3104 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 3105 _max_worker_id(max_worker_id), 3106 _active_workers(n_workers) { } 3107 3108 void work(uint worker_id) { 3109 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 3110 3111 if (G1CollectedHeap::use_parallel_gc_threads()) { 3112 _g1h->heap_region_par_iterate_chunked(&cl, worker_id, 3113 _active_workers, 3114 HeapRegion::AggregateCountClaimValue); 3115 } else { 3116 _g1h->heap_region_iterate(&cl); 3117 } 3118 } 3119 }; 3120 3121 3122 void ConcurrentMark::aggregate_count_data() { 3123 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? 3124 _g1h->workers()->active_workers() : 3125 1); 3126 3127 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 3128 _max_worker_id, n_workers); 3129 3130 if (G1CollectedHeap::use_parallel_gc_threads()) { 3131 assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 3132 "sanity check"); 3133 _g1h->set_par_threads(n_workers); 3134 _g1h->workers()->run_task(&g1_par_agg_task); 3135 _g1h->set_par_threads(0); 3136 3137 assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue), 3138 "sanity check"); 3139 _g1h->reset_heap_region_claim_values(); 3140 } else { 3141 g1_par_agg_task.work(0); 3142 } 3143 } 3144 3145 // Clear the per-worker arrays used to store the per-region counting data 3146 void ConcurrentMark::clear_all_count_data() { 3147 // Clear the global card bitmap - it will be filled during 3148 // liveness count aggregation (during remark) and the 3149 // final counting task. 3150 _card_bm.clear(); 3151 3152 // Clear the global region bitmap - it will be filled as part 3153 // of the final counting task. 3154 _region_bm.clear(); 3155 3156 uint max_regions = _g1h->max_regions(); 3157 assert(_max_worker_id > 0, "uninitialized"); 3158 3159 for (uint i = 0; i < _max_worker_id; i += 1) { 3160 BitMap* task_card_bm = count_card_bitmap_for(i); 3161 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3162 3163 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3164 assert(marked_bytes_array != NULL, "uninitialized"); 3165 3166 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3167 task_card_bm->clear(); 3168 } 3169 } 3170 3171 void ConcurrentMark::print_stats() { 3172 if (verbose_stats()) { 3173 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3174 for (size_t i = 0; i < _active_tasks; ++i) { 3175 _tasks[i]->print_stats(); 3176 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3177 } 3178 } 3179 } 3180 3181 // abandon current marking iteration due to a Full GC 3182 void ConcurrentMark::abort() { 3183 // Clear all marks to force marking thread to do nothing 3184 _nextMarkBitMap->clearAll(); 3185 // Clear the liveness counting data 3186 clear_all_count_data(); 3187 // Empty mark stack 3188 clear_marking_state(); 3189 for (uint i = 0; i < _max_worker_id; ++i) { 3190 _tasks[i]->clear_region_fields(); 3191 } 3192 _has_aborted = true; 3193 3194 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3195 satb_mq_set.abandon_partial_marking(); 3196 // This can be called either during or outside marking, we'll read 3197 // the expected_active value from the SATB queue set. 3198 satb_mq_set.set_active_all_threads( 3199 false, /* new active value */ 3200 satb_mq_set.is_active() /* expected_active */); 3201 } 3202 3203 static void print_ms_time_info(const char* prefix, const char* name, 3204 NumberSeq& ns) { 3205 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3206 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3207 if (ns.num() > 0) { 3208 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3209 prefix, ns.sd(), ns.maximum()); 3210 } 3211 } 3212 3213 void ConcurrentMark::print_summary_info() { 3214 gclog_or_tty->print_cr(" Concurrent marking:"); 3215 print_ms_time_info(" ", "init marks", _init_times); 3216 print_ms_time_info(" ", "remarks", _remark_times); 3217 { 3218 print_ms_time_info(" ", "final marks", _remark_mark_times); 3219 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3220 3221 } 3222 print_ms_time_info(" ", "cleanups", _cleanup_times); 3223 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3224 _total_counting_time, 3225 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3226 (double)_cleanup_times.num() 3227 : 0.0)); 3228 if (G1ScrubRemSets) { 3229 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3230 _total_rs_scrub_time, 3231 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3232 (double)_cleanup_times.num() 3233 : 0.0)); 3234 } 3235 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3236 (_init_times.sum() + _remark_times.sum() + 3237 _cleanup_times.sum())/1000.0); 3238 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3239 "(%8.2f s marking).", 3240 cmThread()->vtime_accum(), 3241 cmThread()->vtime_mark_accum()); 3242 } 3243 3244 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3245 _parallel_workers->print_worker_threads_on(st); 3246 } 3247 3248 // We take a break if someone is trying to stop the world. 3249 bool ConcurrentMark::do_yield_check(uint worker_id) { 3250 if (should_yield()) { 3251 if (worker_id == 0) { 3252 _g1h->g1_policy()->record_concurrent_pause(); 3253 } 3254 cmThread()->yield(); 3255 return true; 3256 } else { 3257 return false; 3258 } 3259 } 3260 3261 bool ConcurrentMark::should_yield() { 3262 return cmThread()->should_yield(); 3263 } 3264 3265 bool ConcurrentMark::containing_card_is_marked(void* p) { 3266 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1); 3267 return _card_bm.at(offset >> CardTableModRefBS::card_shift); 3268 } 3269 3270 bool ConcurrentMark::containing_cards_are_marked(void* start, 3271 void* last) { 3272 return containing_card_is_marked(start) && 3273 containing_card_is_marked(last); 3274 } 3275 3276 #ifndef PRODUCT 3277 // for debugging purposes 3278 void ConcurrentMark::print_finger() { 3279 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3280 _heap_start, _heap_end, _finger); 3281 for (uint i = 0; i < _max_worker_id; ++i) { 3282 gclog_or_tty->print(" %u: "PTR_FORMAT, i, _tasks[i]->finger()); 3283 } 3284 gclog_or_tty->print_cr(""); 3285 } 3286 #endif 3287 3288 void CMTask::scan_object(oop obj) { 3289 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3290 3291 if (_cm->verbose_high()) { 3292 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT, 3293 _worker_id, (void*) obj); 3294 } 3295 3296 size_t obj_size = obj->size(); 3297 _words_scanned += obj_size; 3298 3299 obj->oop_iterate(_cm_oop_closure); 3300 statsOnly( ++_objs_scanned ); 3301 check_limits(); 3302 } 3303 3304 // Closure for iteration over bitmaps 3305 class CMBitMapClosure : public BitMapClosure { 3306 private: 3307 // the bitmap that is being iterated over 3308 CMBitMap* _nextMarkBitMap; 3309 ConcurrentMark* _cm; 3310 CMTask* _task; 3311 3312 public: 3313 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3314 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3315 3316 bool do_bit(size_t offset) { 3317 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3318 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3319 assert( addr < _cm->finger(), "invariant"); 3320 3321 statsOnly( _task->increase_objs_found_on_bitmap() ); 3322 assert(addr >= _task->finger(), "invariant"); 3323 3324 // We move that task's local finger along. 3325 _task->move_finger_to(addr); 3326 3327 _task->scan_object(oop(addr)); 3328 // we only partially drain the local queue and global stack 3329 _task->drain_local_queue(true); 3330 _task->drain_global_stack(true); 3331 3332 // if the has_aborted flag has been raised, we need to bail out of 3333 // the iteration 3334 return !_task->has_aborted(); 3335 } 3336 }; 3337 3338 // Closure for iterating over objects, currently only used for 3339 // processing SATB buffers. 3340 class CMObjectClosure : public ObjectClosure { 3341 private: 3342 CMTask* _task; 3343 3344 public: 3345 void do_object(oop obj) { 3346 _task->deal_with_reference(obj); 3347 } 3348 3349 CMObjectClosure(CMTask* task) : _task(task) { } 3350 }; 3351 3352 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3353 ConcurrentMark* cm, 3354 CMTask* task) 3355 : _g1h(g1h), _cm(cm), _task(task) { 3356 assert(_ref_processor == NULL, "should be initialized to NULL"); 3357 3358 if (G1UseConcMarkReferenceProcessing) { 3359 _ref_processor = g1h->ref_processor_cm(); 3360 assert(_ref_processor != NULL, "should not be NULL"); 3361 } 3362 } 3363 3364 void CMTask::setup_for_region(HeapRegion* hr) { 3365 // Separated the asserts so that we know which one fires. 3366 assert(hr != NULL, 3367 "claim_region() should have filtered out continues humongous regions"); 3368 assert(!hr->continuesHumongous(), 3369 "claim_region() should have filtered out continues humongous regions"); 3370 3371 if (_cm->verbose_low()) { 3372 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3373 _worker_id, hr); 3374 } 3375 3376 _curr_region = hr; 3377 _finger = hr->bottom(); 3378 update_region_limit(); 3379 } 3380 3381 void CMTask::update_region_limit() { 3382 HeapRegion* hr = _curr_region; 3383 HeapWord* bottom = hr->bottom(); 3384 HeapWord* limit = hr->next_top_at_mark_start(); 3385 3386 if (limit == bottom) { 3387 if (_cm->verbose_low()) { 3388 gclog_or_tty->print_cr("[%u] found an empty region " 3389 "["PTR_FORMAT", "PTR_FORMAT")", 3390 _worker_id, bottom, limit); 3391 } 3392 // The region was collected underneath our feet. 3393 // We set the finger to bottom to ensure that the bitmap 3394 // iteration that will follow this will not do anything. 3395 // (this is not a condition that holds when we set the region up, 3396 // as the region is not supposed to be empty in the first place) 3397 _finger = bottom; 3398 } else if (limit >= _region_limit) { 3399 assert(limit >= _finger, "peace of mind"); 3400 } else { 3401 assert(limit < _region_limit, "only way to get here"); 3402 // This can happen under some pretty unusual circumstances. An 3403 // evacuation pause empties the region underneath our feet (NTAMS 3404 // at bottom). We then do some allocation in the region (NTAMS 3405 // stays at bottom), followed by the region being used as a GC 3406 // alloc region (NTAMS will move to top() and the objects 3407 // originally below it will be grayed). All objects now marked in 3408 // the region are explicitly grayed, if below the global finger, 3409 // and we do not need in fact to scan anything else. So, we simply 3410 // set _finger to be limit to ensure that the bitmap iteration 3411 // doesn't do anything. 3412 _finger = limit; 3413 } 3414 3415 _region_limit = limit; 3416 } 3417 3418 void CMTask::giveup_current_region() { 3419 assert(_curr_region != NULL, "invariant"); 3420 if (_cm->verbose_low()) { 3421 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3422 _worker_id, _curr_region); 3423 } 3424 clear_region_fields(); 3425 } 3426 3427 void CMTask::clear_region_fields() { 3428 // Values for these three fields that indicate that we're not 3429 // holding on to a region. 3430 _curr_region = NULL; 3431 _finger = NULL; 3432 _region_limit = NULL; 3433 } 3434 3435 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3436 if (cm_oop_closure == NULL) { 3437 assert(_cm_oop_closure != NULL, "invariant"); 3438 } else { 3439 assert(_cm_oop_closure == NULL, "invariant"); 3440 } 3441 _cm_oop_closure = cm_oop_closure; 3442 } 3443 3444 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3445 guarantee(nextMarkBitMap != NULL, "invariant"); 3446 3447 if (_cm->verbose_low()) { 3448 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3449 } 3450 3451 _nextMarkBitMap = nextMarkBitMap; 3452 clear_region_fields(); 3453 3454 _calls = 0; 3455 _elapsed_time_ms = 0.0; 3456 _termination_time_ms = 0.0; 3457 _termination_start_time_ms = 0.0; 3458 3459 #if _MARKING_STATS_ 3460 _local_pushes = 0; 3461 _local_pops = 0; 3462 _local_max_size = 0; 3463 _objs_scanned = 0; 3464 _global_pushes = 0; 3465 _global_pops = 0; 3466 _global_max_size = 0; 3467 _global_transfers_to = 0; 3468 _global_transfers_from = 0; 3469 _regions_claimed = 0; 3470 _objs_found_on_bitmap = 0; 3471 _satb_buffers_processed = 0; 3472 _steal_attempts = 0; 3473 _steals = 0; 3474 _aborted = 0; 3475 _aborted_overflow = 0; 3476 _aborted_cm_aborted = 0; 3477 _aborted_yield = 0; 3478 _aborted_timed_out = 0; 3479 _aborted_satb = 0; 3480 _aborted_termination = 0; 3481 #endif // _MARKING_STATS_ 3482 } 3483 3484 bool CMTask::should_exit_termination() { 3485 regular_clock_call(); 3486 // This is called when we are in the termination protocol. We should 3487 // quit if, for some reason, this task wants to abort or the global 3488 // stack is not empty (this means that we can get work from it). 3489 return !_cm->mark_stack_empty() || has_aborted(); 3490 } 3491 3492 void CMTask::reached_limit() { 3493 assert(_words_scanned >= _words_scanned_limit || 3494 _refs_reached >= _refs_reached_limit , 3495 "shouldn't have been called otherwise"); 3496 regular_clock_call(); 3497 } 3498 3499 void CMTask::regular_clock_call() { 3500 if (has_aborted()) return; 3501 3502 // First, we need to recalculate the words scanned and refs reached 3503 // limits for the next clock call. 3504 recalculate_limits(); 3505 3506 // During the regular clock call we do the following 3507 3508 // (1) If an overflow has been flagged, then we abort. 3509 if (_cm->has_overflown()) { 3510 set_has_aborted(); 3511 return; 3512 } 3513 3514 // If we are not concurrent (i.e. we're doing remark) we don't need 3515 // to check anything else. The other steps are only needed during 3516 // the concurrent marking phase. 3517 if (!concurrent()) return; 3518 3519 // (2) If marking has been aborted for Full GC, then we also abort. 3520 if (_cm->has_aborted()) { 3521 set_has_aborted(); 3522 statsOnly( ++_aborted_cm_aborted ); 3523 return; 3524 } 3525 3526 double curr_time_ms = os::elapsedVTime() * 1000.0; 3527 3528 // (3) If marking stats are enabled, then we update the step history. 3529 #if _MARKING_STATS_ 3530 if (_words_scanned >= _words_scanned_limit) { 3531 ++_clock_due_to_scanning; 3532 } 3533 if (_refs_reached >= _refs_reached_limit) { 3534 ++_clock_due_to_marking; 3535 } 3536 3537 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3538 _interval_start_time_ms = curr_time_ms; 3539 _all_clock_intervals_ms.add(last_interval_ms); 3540 3541 if (_cm->verbose_medium()) { 3542 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3543 "scanned = %d%s, refs reached = %d%s", 3544 _worker_id, last_interval_ms, 3545 _words_scanned, 3546 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3547 _refs_reached, 3548 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3549 } 3550 #endif // _MARKING_STATS_ 3551 3552 // (4) We check whether we should yield. If we have to, then we abort. 3553 if (_cm->should_yield()) { 3554 // We should yield. To do this we abort the task. The caller is 3555 // responsible for yielding. 3556 set_has_aborted(); 3557 statsOnly( ++_aborted_yield ); 3558 return; 3559 } 3560 3561 // (5) We check whether we've reached our time quota. If we have, 3562 // then we abort. 3563 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3564 if (elapsed_time_ms > _time_target_ms) { 3565 set_has_aborted(); 3566 _has_timed_out = true; 3567 statsOnly( ++_aborted_timed_out ); 3568 return; 3569 } 3570 3571 // (6) Finally, we check whether there are enough completed STAB 3572 // buffers available for processing. If there are, we abort. 3573 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3574 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3575 if (_cm->verbose_low()) { 3576 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3577 _worker_id); 3578 } 3579 // we do need to process SATB buffers, we'll abort and restart 3580 // the marking task to do so 3581 set_has_aborted(); 3582 statsOnly( ++_aborted_satb ); 3583 return; 3584 } 3585 } 3586 3587 void CMTask::recalculate_limits() { 3588 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3589 _words_scanned_limit = _real_words_scanned_limit; 3590 3591 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3592 _refs_reached_limit = _real_refs_reached_limit; 3593 } 3594 3595 void CMTask::decrease_limits() { 3596 // This is called when we believe that we're going to do an infrequent 3597 // operation which will increase the per byte scanned cost (i.e. move 3598 // entries to/from the global stack). It basically tries to decrease the 3599 // scanning limit so that the clock is called earlier. 3600 3601 if (_cm->verbose_medium()) { 3602 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3603 } 3604 3605 _words_scanned_limit = _real_words_scanned_limit - 3606 3 * words_scanned_period / 4; 3607 _refs_reached_limit = _real_refs_reached_limit - 3608 3 * refs_reached_period / 4; 3609 } 3610 3611 void CMTask::move_entries_to_global_stack() { 3612 // local array where we'll store the entries that will be popped 3613 // from the local queue 3614 oop buffer[global_stack_transfer_size]; 3615 3616 int n = 0; 3617 oop obj; 3618 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3619 buffer[n] = obj; 3620 ++n; 3621 } 3622 3623 if (n > 0) { 3624 // we popped at least one entry from the local queue 3625 3626 statsOnly( ++_global_transfers_to; _local_pops += n ); 3627 3628 if (!_cm->mark_stack_push(buffer, n)) { 3629 if (_cm->verbose_low()) { 3630 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3631 _worker_id); 3632 } 3633 set_has_aborted(); 3634 } else { 3635 // the transfer was successful 3636 3637 if (_cm->verbose_medium()) { 3638 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3639 _worker_id, n); 3640 } 3641 statsOnly( int tmp_size = _cm->mark_stack_size(); 3642 if (tmp_size > _global_max_size) { 3643 _global_max_size = tmp_size; 3644 } 3645 _global_pushes += n ); 3646 } 3647 } 3648 3649 // this operation was quite expensive, so decrease the limits 3650 decrease_limits(); 3651 } 3652 3653 void CMTask::get_entries_from_global_stack() { 3654 // local array where we'll store the entries that will be popped 3655 // from the global stack. 3656 oop buffer[global_stack_transfer_size]; 3657 int n; 3658 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3659 assert(n <= global_stack_transfer_size, 3660 "we should not pop more than the given limit"); 3661 if (n > 0) { 3662 // yes, we did actually pop at least one entry 3663 3664 statsOnly( ++_global_transfers_from; _global_pops += n ); 3665 if (_cm->verbose_medium()) { 3666 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3667 _worker_id, n); 3668 } 3669 for (int i = 0; i < n; ++i) { 3670 bool success = _task_queue->push(buffer[i]); 3671 // We only call this when the local queue is empty or under a 3672 // given target limit. So, we do not expect this push to fail. 3673 assert(success, "invariant"); 3674 } 3675 3676 statsOnly( int tmp_size = _task_queue->size(); 3677 if (tmp_size > _local_max_size) { 3678 _local_max_size = tmp_size; 3679 } 3680 _local_pushes += n ); 3681 } 3682 3683 // this operation was quite expensive, so decrease the limits 3684 decrease_limits(); 3685 } 3686 3687 void CMTask::drain_local_queue(bool partially) { 3688 if (has_aborted()) return; 3689 3690 // Decide what the target size is, depending whether we're going to 3691 // drain it partially (so that other tasks can steal if they run out 3692 // of things to do) or totally (at the very end). 3693 size_t target_size; 3694 if (partially) { 3695 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3696 } else { 3697 target_size = 0; 3698 } 3699 3700 if (_task_queue->size() > target_size) { 3701 if (_cm->verbose_high()) { 3702 gclog_or_tty->print_cr("[%u] draining local queue, target size = %d", 3703 _worker_id, target_size); 3704 } 3705 3706 oop obj; 3707 bool ret = _task_queue->pop_local(obj); 3708 while (ret) { 3709 statsOnly( ++_local_pops ); 3710 3711 if (_cm->verbose_high()) { 3712 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3713 (void*) obj); 3714 } 3715 3716 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3717 assert(!_g1h->is_on_master_free_list( 3718 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3719 3720 scan_object(obj); 3721 3722 if (_task_queue->size() <= target_size || has_aborted()) { 3723 ret = false; 3724 } else { 3725 ret = _task_queue->pop_local(obj); 3726 } 3727 } 3728 3729 if (_cm->verbose_high()) { 3730 gclog_or_tty->print_cr("[%u] drained local queue, size = %d", 3731 _worker_id, _task_queue->size()); 3732 } 3733 } 3734 } 3735 3736 void CMTask::drain_global_stack(bool partially) { 3737 if (has_aborted()) return; 3738 3739 // We have a policy to drain the local queue before we attempt to 3740 // drain the global stack. 3741 assert(partially || _task_queue->size() == 0, "invariant"); 3742 3743 // Decide what the target size is, depending whether we're going to 3744 // drain it partially (so that other tasks can steal if they run out 3745 // of things to do) or totally (at the very end). Notice that, 3746 // because we move entries from the global stack in chunks or 3747 // because another task might be doing the same, we might in fact 3748 // drop below the target. But, this is not a problem. 3749 size_t target_size; 3750 if (partially) { 3751 target_size = _cm->partial_mark_stack_size_target(); 3752 } else { 3753 target_size = 0; 3754 } 3755 3756 if (_cm->mark_stack_size() > target_size) { 3757 if (_cm->verbose_low()) { 3758 gclog_or_tty->print_cr("[%u] draining global_stack, target size %d", 3759 _worker_id, target_size); 3760 } 3761 3762 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3763 get_entries_from_global_stack(); 3764 drain_local_queue(partially); 3765 } 3766 3767 if (_cm->verbose_low()) { 3768 gclog_or_tty->print_cr("[%u] drained global stack, size = %d", 3769 _worker_id, _cm->mark_stack_size()); 3770 } 3771 } 3772 } 3773 3774 // SATB Queue has several assumptions on whether to call the par or 3775 // non-par versions of the methods. this is why some of the code is 3776 // replicated. We should really get rid of the single-threaded version 3777 // of the code to simplify things. 3778 void CMTask::drain_satb_buffers() { 3779 if (has_aborted()) return; 3780 3781 // We set this so that the regular clock knows that we're in the 3782 // middle of draining buffers and doesn't set the abort flag when it 3783 // notices that SATB buffers are available for draining. It'd be 3784 // very counter productive if it did that. :-) 3785 _draining_satb_buffers = true; 3786 3787 CMObjectClosure oc(this); 3788 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3789 if (G1CollectedHeap::use_parallel_gc_threads()) { 3790 satb_mq_set.set_par_closure(_worker_id, &oc); 3791 } else { 3792 satb_mq_set.set_closure(&oc); 3793 } 3794 3795 // This keeps claiming and applying the closure to completed buffers 3796 // until we run out of buffers or we need to abort. 3797 if (G1CollectedHeap::use_parallel_gc_threads()) { 3798 while (!has_aborted() && 3799 satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) { 3800 if (_cm->verbose_medium()) { 3801 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3802 } 3803 statsOnly( ++_satb_buffers_processed ); 3804 regular_clock_call(); 3805 } 3806 } else { 3807 while (!has_aborted() && 3808 satb_mq_set.apply_closure_to_completed_buffer()) { 3809 if (_cm->verbose_medium()) { 3810 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3811 } 3812 statsOnly( ++_satb_buffers_processed ); 3813 regular_clock_call(); 3814 } 3815 } 3816 3817 if (!concurrent() && !has_aborted()) { 3818 // We should only do this during remark. 3819 if (G1CollectedHeap::use_parallel_gc_threads()) { 3820 satb_mq_set.par_iterate_closure_all_threads(_worker_id); 3821 } else { 3822 satb_mq_set.iterate_closure_all_threads(); 3823 } 3824 } 3825 3826 _draining_satb_buffers = false; 3827 3828 assert(has_aborted() || 3829 concurrent() || 3830 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3831 3832 if (G1CollectedHeap::use_parallel_gc_threads()) { 3833 satb_mq_set.set_par_closure(_worker_id, NULL); 3834 } else { 3835 satb_mq_set.set_closure(NULL); 3836 } 3837 3838 // again, this was a potentially expensive operation, decrease the 3839 // limits to get the regular clock call early 3840 decrease_limits(); 3841 } 3842 3843 void CMTask::print_stats() { 3844 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3845 _worker_id, _calls); 3846 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3847 _elapsed_time_ms, _termination_time_ms); 3848 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3849 _step_times_ms.num(), _step_times_ms.avg(), 3850 _step_times_ms.sd()); 3851 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3852 _step_times_ms.maximum(), _step_times_ms.sum()); 3853 3854 #if _MARKING_STATS_ 3855 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3856 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3857 _all_clock_intervals_ms.sd()); 3858 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3859 _all_clock_intervals_ms.maximum(), 3860 _all_clock_intervals_ms.sum()); 3861 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", 3862 _clock_due_to_scanning, _clock_due_to_marking); 3863 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", 3864 _objs_scanned, _objs_found_on_bitmap); 3865 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", 3866 _local_pushes, _local_pops, _local_max_size); 3867 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", 3868 _global_pushes, _global_pops, _global_max_size); 3869 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", 3870 _global_transfers_to,_global_transfers_from); 3871 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed); 3872 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); 3873 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", 3874 _steal_attempts, _steals); 3875 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); 3876 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", 3877 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3878 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", 3879 _aborted_timed_out, _aborted_satb, _aborted_termination); 3880 #endif // _MARKING_STATS_ 3881 } 3882 3883 /***************************************************************************** 3884 3885 The do_marking_step(time_target_ms) method is the building block 3886 of the parallel marking framework. It can be called in parallel 3887 with other invocations of do_marking_step() on different tasks 3888 (but only one per task, obviously) and concurrently with the 3889 mutator threads, or during remark, hence it eliminates the need 3890 for two versions of the code. When called during remark, it will 3891 pick up from where the task left off during the concurrent marking 3892 phase. Interestingly, tasks are also claimable during evacuation 3893 pauses too, since do_marking_step() ensures that it aborts before 3894 it needs to yield. 3895 3896 The data structures that is uses to do marking work are the 3897 following: 3898 3899 (1) Marking Bitmap. If there are gray objects that appear only 3900 on the bitmap (this happens either when dealing with an overflow 3901 or when the initial marking phase has simply marked the roots 3902 and didn't push them on the stack), then tasks claim heap 3903 regions whose bitmap they then scan to find gray objects. A 3904 global finger indicates where the end of the last claimed region 3905 is. A local finger indicates how far into the region a task has 3906 scanned. The two fingers are used to determine how to gray an 3907 object (i.e. whether simply marking it is OK, as it will be 3908 visited by a task in the future, or whether it needs to be also 3909 pushed on a stack). 3910 3911 (2) Local Queue. The local queue of the task which is accessed 3912 reasonably efficiently by the task. Other tasks can steal from 3913 it when they run out of work. Throughout the marking phase, a 3914 task attempts to keep its local queue short but not totally 3915 empty, so that entries are available for stealing by other 3916 tasks. Only when there is no more work, a task will totally 3917 drain its local queue. 3918 3919 (3) Global Mark Stack. This handles local queue overflow. During 3920 marking only sets of entries are moved between it and the local 3921 queues, as access to it requires a mutex and more fine-grain 3922 interaction with it which might cause contention. If it 3923 overflows, then the marking phase should restart and iterate 3924 over the bitmap to identify gray objects. Throughout the marking 3925 phase, tasks attempt to keep the global mark stack at a small 3926 length but not totally empty, so that entries are available for 3927 popping by other tasks. Only when there is no more work, tasks 3928 will totally drain the global mark stack. 3929 3930 (4) SATB Buffer Queue. This is where completed SATB buffers are 3931 made available. Buffers are regularly removed from this queue 3932 and scanned for roots, so that the queue doesn't get too 3933 long. During remark, all completed buffers are processed, as 3934 well as the filled in parts of any uncompleted buffers. 3935 3936 The do_marking_step() method tries to abort when the time target 3937 has been reached. There are a few other cases when the 3938 do_marking_step() method also aborts: 3939 3940 (1) When the marking phase has been aborted (after a Full GC). 3941 3942 (2) When a global overflow (on the global stack) has been 3943 triggered. Before the task aborts, it will actually sync up with 3944 the other tasks to ensure that all the marking data structures 3945 (local queues, stacks, fingers etc.) are re-initialised so that 3946 when do_marking_step() completes, the marking phase can 3947 immediately restart. 3948 3949 (3) When enough completed SATB buffers are available. The 3950 do_marking_step() method only tries to drain SATB buffers right 3951 at the beginning. So, if enough buffers are available, the 3952 marking step aborts and the SATB buffers are processed at 3953 the beginning of the next invocation. 3954 3955 (4) To yield. when we have to yield then we abort and yield 3956 right at the end of do_marking_step(). This saves us from a lot 3957 of hassle as, by yielding we might allow a Full GC. If this 3958 happens then objects will be compacted underneath our feet, the 3959 heap might shrink, etc. We save checking for this by just 3960 aborting and doing the yield right at the end. 3961 3962 From the above it follows that the do_marking_step() method should 3963 be called in a loop (or, otherwise, regularly) until it completes. 3964 3965 If a marking step completes without its has_aborted() flag being 3966 true, it means it has completed the current marking phase (and 3967 also all other marking tasks have done so and have all synced up). 3968 3969 A method called regular_clock_call() is invoked "regularly" (in 3970 sub ms intervals) throughout marking. It is this clock method that 3971 checks all the abort conditions which were mentioned above and 3972 decides when the task should abort. A work-based scheme is used to 3973 trigger this clock method: when the number of object words the 3974 marking phase has scanned or the number of references the marking 3975 phase has visited reach a given limit. Additional invocations to 3976 the method clock have been planted in a few other strategic places 3977 too. The initial reason for the clock method was to avoid calling 3978 vtime too regularly, as it is quite expensive. So, once it was in 3979 place, it was natural to piggy-back all the other conditions on it 3980 too and not constantly check them throughout the code. 3981 3982 *****************************************************************************/ 3983 3984 void CMTask::do_marking_step(double time_target_ms, 3985 bool do_stealing, 3986 bool do_termination) { 3987 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3988 assert(concurrent() == _cm->concurrent(), "they should be the same"); 3989 3990 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3991 assert(_task_queues != NULL, "invariant"); 3992 assert(_task_queue != NULL, "invariant"); 3993 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 3994 3995 assert(!_claimed, 3996 "only one thread should claim this task at any one time"); 3997 3998 // OK, this doesn't safeguard again all possible scenarios, as it is 3999 // possible for two threads to set the _claimed flag at the same 4000 // time. But it is only for debugging purposes anyway and it will 4001 // catch most problems. 4002 _claimed = true; 4003 4004 _start_time_ms = os::elapsedVTime() * 1000.0; 4005 statsOnly( _interval_start_time_ms = _start_time_ms ); 4006 4007 double diff_prediction_ms = 4008 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 4009 _time_target_ms = time_target_ms - diff_prediction_ms; 4010 4011 // set up the variables that are used in the work-based scheme to 4012 // call the regular clock method 4013 _words_scanned = 0; 4014 _refs_reached = 0; 4015 recalculate_limits(); 4016 4017 // clear all flags 4018 clear_has_aborted(); 4019 _has_timed_out = false; 4020 _draining_satb_buffers = false; 4021 4022 ++_calls; 4023 4024 if (_cm->verbose_low()) { 4025 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 4026 "target = %1.2lfms >>>>>>>>>>", 4027 _worker_id, _calls, _time_target_ms); 4028 } 4029 4030 // Set up the bitmap and oop closures. Anything that uses them is 4031 // eventually called from this method, so it is OK to allocate these 4032 // statically. 4033 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 4034 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 4035 set_cm_oop_closure(&cm_oop_closure); 4036 4037 if (_cm->has_overflown()) { 4038 // This can happen if the mark stack overflows during a GC pause 4039 // and this task, after a yield point, restarts. We have to abort 4040 // as we need to get into the overflow protocol which happens 4041 // right at the end of this task. 4042 set_has_aborted(); 4043 } 4044 4045 // First drain any available SATB buffers. After this, we will not 4046 // look at SATB buffers before the next invocation of this method. 4047 // If enough completed SATB buffers are queued up, the regular clock 4048 // will abort this task so that it restarts. 4049 drain_satb_buffers(); 4050 // ...then partially drain the local queue and the global stack 4051 drain_local_queue(true); 4052 drain_global_stack(true); 4053 4054 do { 4055 if (!has_aborted() && _curr_region != NULL) { 4056 // This means that we're already holding on to a region. 4057 assert(_finger != NULL, "if region is not NULL, then the finger " 4058 "should not be NULL either"); 4059 4060 // We might have restarted this task after an evacuation pause 4061 // which might have evacuated the region we're holding on to 4062 // underneath our feet. Let's read its limit again to make sure 4063 // that we do not iterate over a region of the heap that 4064 // contains garbage (update_region_limit() will also move 4065 // _finger to the start of the region if it is found empty). 4066 update_region_limit(); 4067 // We will start from _finger not from the start of the region, 4068 // as we might be restarting this task after aborting half-way 4069 // through scanning this region. In this case, _finger points to 4070 // the address where we last found a marked object. If this is a 4071 // fresh region, _finger points to start(). 4072 MemRegion mr = MemRegion(_finger, _region_limit); 4073 4074 if (_cm->verbose_low()) { 4075 gclog_or_tty->print_cr("[%u] we're scanning part " 4076 "["PTR_FORMAT", "PTR_FORMAT") " 4077 "of region "PTR_FORMAT, 4078 _worker_id, _finger, _region_limit, _curr_region); 4079 } 4080 4081 // Let's iterate over the bitmap of the part of the 4082 // region that is left. 4083 if (mr.is_empty() || _nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4084 // We successfully completed iterating over the region. Now, 4085 // let's give up the region. 4086 giveup_current_region(); 4087 regular_clock_call(); 4088 } else { 4089 assert(has_aborted(), "currently the only way to do so"); 4090 // The only way to abort the bitmap iteration is to return 4091 // false from the do_bit() method. However, inside the 4092 // do_bit() method we move the _finger to point to the 4093 // object currently being looked at. So, if we bail out, we 4094 // have definitely set _finger to something non-null. 4095 assert(_finger != NULL, "invariant"); 4096 4097 // Region iteration was actually aborted. So now _finger 4098 // points to the address of the object we last scanned. If we 4099 // leave it there, when we restart this task, we will rescan 4100 // the object. It is easy to avoid this. We move the finger by 4101 // enough to point to the next possible object header (the 4102 // bitmap knows by how much we need to move it as it knows its 4103 // granularity). 4104 assert(_finger < _region_limit, "invariant"); 4105 HeapWord* new_finger = _nextMarkBitMap->nextWord(_finger); 4106 // Check if bitmap iteration was aborted while scanning the last object 4107 if (new_finger >= _region_limit) { 4108 giveup_current_region(); 4109 } else { 4110 move_finger_to(new_finger); 4111 } 4112 } 4113 } 4114 // At this point we have either completed iterating over the 4115 // region we were holding on to, or we have aborted. 4116 4117 // We then partially drain the local queue and the global stack. 4118 // (Do we really need this?) 4119 drain_local_queue(true); 4120 drain_global_stack(true); 4121 4122 // Read the note on the claim_region() method on why it might 4123 // return NULL with potentially more regions available for 4124 // claiming and why we have to check out_of_regions() to determine 4125 // whether we're done or not. 4126 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4127 // We are going to try to claim a new region. We should have 4128 // given up on the previous one. 4129 // Separated the asserts so that we know which one fires. 4130 assert(_curr_region == NULL, "invariant"); 4131 assert(_finger == NULL, "invariant"); 4132 assert(_region_limit == NULL, "invariant"); 4133 if (_cm->verbose_low()) { 4134 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4135 } 4136 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4137 if (claimed_region != NULL) { 4138 // Yes, we managed to claim one 4139 statsOnly( ++_regions_claimed ); 4140 4141 if (_cm->verbose_low()) { 4142 gclog_or_tty->print_cr("[%u] we successfully claimed " 4143 "region "PTR_FORMAT, 4144 _worker_id, claimed_region); 4145 } 4146 4147 setup_for_region(claimed_region); 4148 assert(_curr_region == claimed_region, "invariant"); 4149 } 4150 // It is important to call the regular clock here. It might take 4151 // a while to claim a region if, for example, we hit a large 4152 // block of empty regions. So we need to call the regular clock 4153 // method once round the loop to make sure it's called 4154 // frequently enough. 4155 regular_clock_call(); 4156 } 4157 4158 if (!has_aborted() && _curr_region == NULL) { 4159 assert(_cm->out_of_regions(), 4160 "at this point we should be out of regions"); 4161 } 4162 } while ( _curr_region != NULL && !has_aborted()); 4163 4164 if (!has_aborted()) { 4165 // We cannot check whether the global stack is empty, since other 4166 // tasks might be pushing objects to it concurrently. 4167 assert(_cm->out_of_regions(), 4168 "at this point we should be out of regions"); 4169 4170 if (_cm->verbose_low()) { 4171 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4172 } 4173 4174 // Try to reduce the number of available SATB buffers so that 4175 // remark has less work to do. 4176 drain_satb_buffers(); 4177 } 4178 4179 // Since we've done everything else, we can now totally drain the 4180 // local queue and global stack. 4181 drain_local_queue(false); 4182 drain_global_stack(false); 4183 4184 // Attempt at work stealing from other task's queues. 4185 if (do_stealing && !has_aborted()) { 4186 // We have not aborted. This means that we have finished all that 4187 // we could. Let's try to do some stealing... 4188 4189 // We cannot check whether the global stack is empty, since other 4190 // tasks might be pushing objects to it concurrently. 4191 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4192 "only way to reach here"); 4193 4194 if (_cm->verbose_low()) { 4195 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4196 } 4197 4198 while (!has_aborted()) { 4199 oop obj; 4200 statsOnly( ++_steal_attempts ); 4201 4202 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4203 if (_cm->verbose_medium()) { 4204 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4205 _worker_id, (void*) obj); 4206 } 4207 4208 statsOnly( ++_steals ); 4209 4210 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4211 "any stolen object should be marked"); 4212 scan_object(obj); 4213 4214 // And since we're towards the end, let's totally drain the 4215 // local queue and global stack. 4216 drain_local_queue(false); 4217 drain_global_stack(false); 4218 } else { 4219 break; 4220 } 4221 } 4222 } 4223 4224 // If we are about to wrap up and go into termination, check if we 4225 // should raise the overflow flag. 4226 if (do_termination && !has_aborted()) { 4227 if (_cm->force_overflow()->should_force()) { 4228 _cm->set_has_overflown(); 4229 regular_clock_call(); 4230 } 4231 } 4232 4233 // We still haven't aborted. Now, let's try to get into the 4234 // termination protocol. 4235 if (do_termination && !has_aborted()) { 4236 // We cannot check whether the global stack is empty, since other 4237 // tasks might be concurrently pushing objects on it. 4238 // Separated the asserts so that we know which one fires. 4239 assert(_cm->out_of_regions(), "only way to reach here"); 4240 assert(_task_queue->size() == 0, "only way to reach here"); 4241 4242 if (_cm->verbose_low()) { 4243 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4244 } 4245 4246 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4247 // The CMTask class also extends the TerminatorTerminator class, 4248 // hence its should_exit_termination() method will also decide 4249 // whether to exit the termination protocol or not. 4250 bool finished = _cm->terminator()->offer_termination(this); 4251 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4252 _termination_time_ms += 4253 termination_end_time_ms - _termination_start_time_ms; 4254 4255 if (finished) { 4256 // We're all done. 4257 4258 if (_worker_id == 0) { 4259 // let's allow task 0 to do this 4260 if (concurrent()) { 4261 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4262 // we need to set this to false before the next 4263 // safepoint. This way we ensure that the marking phase 4264 // doesn't observe any more heap expansions. 4265 _cm->clear_concurrent_marking_in_progress(); 4266 } 4267 } 4268 4269 // We can now guarantee that the global stack is empty, since 4270 // all other tasks have finished. We separated the guarantees so 4271 // that, if a condition is false, we can immediately find out 4272 // which one. 4273 guarantee(_cm->out_of_regions(), "only way to reach here"); 4274 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4275 guarantee(_task_queue->size() == 0, "only way to reach here"); 4276 guarantee(!_cm->has_overflown(), "only way to reach here"); 4277 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4278 4279 if (_cm->verbose_low()) { 4280 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4281 } 4282 } else { 4283 // Apparently there's more work to do. Let's abort this task. It 4284 // will restart it and we can hopefully find more things to do. 4285 4286 if (_cm->verbose_low()) { 4287 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4288 _worker_id); 4289 } 4290 4291 set_has_aborted(); 4292 statsOnly( ++_aborted_termination ); 4293 } 4294 } 4295 4296 // Mainly for debugging purposes to make sure that a pointer to the 4297 // closure which was statically allocated in this frame doesn't 4298 // escape it by accident. 4299 set_cm_oop_closure(NULL); 4300 double end_time_ms = os::elapsedVTime() * 1000.0; 4301 double elapsed_time_ms = end_time_ms - _start_time_ms; 4302 // Update the step history. 4303 _step_times_ms.add(elapsed_time_ms); 4304 4305 if (has_aborted()) { 4306 // The task was aborted for some reason. 4307 4308 statsOnly( ++_aborted ); 4309 4310 if (_has_timed_out) { 4311 double diff_ms = elapsed_time_ms - _time_target_ms; 4312 // Keep statistics of how well we did with respect to hitting 4313 // our target only if we actually timed out (if we aborted for 4314 // other reasons, then the results might get skewed). 4315 _marking_step_diffs_ms.add(diff_ms); 4316 } 4317 4318 if (_cm->has_overflown()) { 4319 // This is the interesting one. We aborted because a global 4320 // overflow was raised. This means we have to restart the 4321 // marking phase and start iterating over regions. However, in 4322 // order to do this we have to make sure that all tasks stop 4323 // what they are doing and re-initialise in a safe manner. We 4324 // will achieve this with the use of two barrier sync points. 4325 4326 if (_cm->verbose_low()) { 4327 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4328 } 4329 4330 _cm->enter_first_sync_barrier(_worker_id); 4331 // When we exit this sync barrier we know that all tasks have 4332 // stopped doing marking work. So, it's now safe to 4333 // re-initialise our data structures. At the end of this method, 4334 // task 0 will clear the global data structures. 4335 4336 statsOnly( ++_aborted_overflow ); 4337 4338 // We clear the local state of this task... 4339 clear_region_fields(); 4340 4341 // ...and enter the second barrier. 4342 _cm->enter_second_sync_barrier(_worker_id); 4343 // At this point everything has bee re-initialised and we're 4344 // ready to restart. 4345 } 4346 4347 if (_cm->verbose_low()) { 4348 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4349 "elapsed = %1.2lfms <<<<<<<<<<", 4350 _worker_id, _time_target_ms, elapsed_time_ms); 4351 if (_cm->has_aborted()) { 4352 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4353 _worker_id); 4354 } 4355 } 4356 } else { 4357 if (_cm->verbose_low()) { 4358 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4359 "elapsed = %1.2lfms <<<<<<<<<<", 4360 _worker_id, _time_target_ms, elapsed_time_ms); 4361 } 4362 } 4363 4364 _claimed = false; 4365 } 4366 4367 CMTask::CMTask(uint worker_id, 4368 ConcurrentMark* cm, 4369 size_t* marked_bytes, 4370 BitMap* card_bm, 4371 CMTaskQueue* task_queue, 4372 CMTaskQueueSet* task_queues) 4373 : _g1h(G1CollectedHeap::heap()), 4374 _worker_id(worker_id), _cm(cm), 4375 _claimed(false), 4376 _nextMarkBitMap(NULL), _hash_seed(17), 4377 _task_queue(task_queue), 4378 _task_queues(task_queues), 4379 _cm_oop_closure(NULL), 4380 _marked_bytes_array(marked_bytes), 4381 _card_bm(card_bm) { 4382 guarantee(task_queue != NULL, "invariant"); 4383 guarantee(task_queues != NULL, "invariant"); 4384 4385 statsOnly( _clock_due_to_scanning = 0; 4386 _clock_due_to_marking = 0 ); 4387 4388 _marking_step_diffs_ms.add(0.5); 4389 } 4390 4391 // These are formatting macros that are used below to ensure 4392 // consistent formatting. The *_H_* versions are used to format the 4393 // header for a particular value and they should be kept consistent 4394 // with the corresponding macro. Also note that most of the macros add 4395 // the necessary white space (as a prefix) which makes them a bit 4396 // easier to compose. 4397 4398 // All the output lines are prefixed with this string to be able to 4399 // identify them easily in a large log file. 4400 #define G1PPRL_LINE_PREFIX "###" 4401 4402 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4403 #ifdef _LP64 4404 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4405 #else // _LP64 4406 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4407 #endif // _LP64 4408 4409 // For per-region info 4410 #define G1PPRL_TYPE_FORMAT " %-4s" 4411 #define G1PPRL_TYPE_H_FORMAT " %4s" 4412 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4413 #define G1PPRL_BYTE_H_FORMAT " %9s" 4414 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4415 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4416 4417 // For summary info 4418 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4419 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4420 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4421 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4422 4423 G1PrintRegionLivenessInfoClosure:: 4424 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4425 : _out(out), 4426 _total_used_bytes(0), _total_capacity_bytes(0), 4427 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4428 _hum_used_bytes(0), _hum_capacity_bytes(0), 4429 _hum_prev_live_bytes(0), _hum_next_live_bytes(0) { 4430 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4431 MemRegion g1_committed = g1h->g1_committed(); 4432 MemRegion g1_reserved = g1h->g1_reserved(); 4433 double now = os::elapsedTime(); 4434 4435 // Print the header of the output. 4436 _out->cr(); 4437 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4438 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4439 G1PPRL_SUM_ADDR_FORMAT("committed") 4440 G1PPRL_SUM_ADDR_FORMAT("reserved") 4441 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4442 g1_committed.start(), g1_committed.end(), 4443 g1_reserved.start(), g1_reserved.end(), 4444 HeapRegion::GrainBytes); 4445 _out->print_cr(G1PPRL_LINE_PREFIX); 4446 _out->print_cr(G1PPRL_LINE_PREFIX 4447 G1PPRL_TYPE_H_FORMAT 4448 G1PPRL_ADDR_BASE_H_FORMAT 4449 G1PPRL_BYTE_H_FORMAT 4450 G1PPRL_BYTE_H_FORMAT 4451 G1PPRL_BYTE_H_FORMAT 4452 G1PPRL_DOUBLE_H_FORMAT, 4453 "type", "address-range", 4454 "used", "prev-live", "next-live", "gc-eff"); 4455 _out->print_cr(G1PPRL_LINE_PREFIX 4456 G1PPRL_TYPE_H_FORMAT 4457 G1PPRL_ADDR_BASE_H_FORMAT 4458 G1PPRL_BYTE_H_FORMAT 4459 G1PPRL_BYTE_H_FORMAT 4460 G1PPRL_BYTE_H_FORMAT 4461 G1PPRL_DOUBLE_H_FORMAT, 4462 "", "", 4463 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)"); 4464 } 4465 4466 // It takes as a parameter a reference to one of the _hum_* fields, it 4467 // deduces the corresponding value for a region in a humongous region 4468 // series (either the region size, or what's left if the _hum_* field 4469 // is < the region size), and updates the _hum_* field accordingly. 4470 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4471 size_t bytes = 0; 4472 // The > 0 check is to deal with the prev and next live bytes which 4473 // could be 0. 4474 if (*hum_bytes > 0) { 4475 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4476 *hum_bytes -= bytes; 4477 } 4478 return bytes; 4479 } 4480 4481 // It deduces the values for a region in a humongous region series 4482 // from the _hum_* fields and updates those accordingly. It assumes 4483 // that that _hum_* fields have already been set up from the "starts 4484 // humongous" region and we visit the regions in address order. 4485 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4486 size_t* capacity_bytes, 4487 size_t* prev_live_bytes, 4488 size_t* next_live_bytes) { 4489 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4490 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4491 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4492 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4493 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4494 } 4495 4496 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4497 const char* type = ""; 4498 HeapWord* bottom = r->bottom(); 4499 HeapWord* end = r->end(); 4500 size_t capacity_bytes = r->capacity(); 4501 size_t used_bytes = r->used(); 4502 size_t prev_live_bytes = r->live_bytes(); 4503 size_t next_live_bytes = r->next_live_bytes(); 4504 double gc_eff = r->gc_efficiency(); 4505 if (r->used() == 0) { 4506 type = "FREE"; 4507 } else if (r->is_survivor()) { 4508 type = "SURV"; 4509 } else if (r->is_young()) { 4510 type = "EDEN"; 4511 } else if (r->startsHumongous()) { 4512 type = "HUMS"; 4513 4514 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4515 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4516 "they should have been zeroed after the last time we used them"); 4517 // Set up the _hum_* fields. 4518 _hum_capacity_bytes = capacity_bytes; 4519 _hum_used_bytes = used_bytes; 4520 _hum_prev_live_bytes = prev_live_bytes; 4521 _hum_next_live_bytes = next_live_bytes; 4522 get_hum_bytes(&used_bytes, &capacity_bytes, 4523 &prev_live_bytes, &next_live_bytes); 4524 end = bottom + HeapRegion::GrainWords; 4525 } else if (r->continuesHumongous()) { 4526 type = "HUMC"; 4527 get_hum_bytes(&used_bytes, &capacity_bytes, 4528 &prev_live_bytes, &next_live_bytes); 4529 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4530 } else { 4531 type = "OLD"; 4532 } 4533 4534 _total_used_bytes += used_bytes; 4535 _total_capacity_bytes += capacity_bytes; 4536 _total_prev_live_bytes += prev_live_bytes; 4537 _total_next_live_bytes += next_live_bytes; 4538 4539 // Print a line for this particular region. 4540 _out->print_cr(G1PPRL_LINE_PREFIX 4541 G1PPRL_TYPE_FORMAT 4542 G1PPRL_ADDR_BASE_FORMAT 4543 G1PPRL_BYTE_FORMAT 4544 G1PPRL_BYTE_FORMAT 4545 G1PPRL_BYTE_FORMAT 4546 G1PPRL_DOUBLE_FORMAT, 4547 type, bottom, end, 4548 used_bytes, prev_live_bytes, next_live_bytes, gc_eff); 4549 4550 return false; 4551 } 4552 4553 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4554 // Print the footer of the output. 4555 _out->print_cr(G1PPRL_LINE_PREFIX); 4556 _out->print_cr(G1PPRL_LINE_PREFIX 4557 " SUMMARY" 4558 G1PPRL_SUM_MB_FORMAT("capacity") 4559 G1PPRL_SUM_MB_PERC_FORMAT("used") 4560 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4561 G1PPRL_SUM_MB_PERC_FORMAT("next-live"), 4562 bytes_to_mb(_total_capacity_bytes), 4563 bytes_to_mb(_total_used_bytes), 4564 perc(_total_used_bytes, _total_capacity_bytes), 4565 bytes_to_mb(_total_prev_live_bytes), 4566 perc(_total_prev_live_bytes, _total_capacity_bytes), 4567 bytes_to_mb(_total_next_live_bytes), 4568 perc(_total_next_live_bytes, _total_capacity_bytes)); 4569 _out->cr(); 4570 }