1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/symbolTable.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc_implementation/g1/concurrentMark.inline.hpp" 29 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 32 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 33 #include "gc_implementation/g1/g1Log.hpp" 34 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 35 #include "gc_implementation/g1/g1RemSet.hpp" 36 #include "gc_implementation/g1/heapRegion.inline.hpp" 37 #include "gc_implementation/g1/heapRegionManager.inline.hpp" 38 #include "gc_implementation/g1/heapRegionRemSet.hpp" 39 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 40 #include "gc_implementation/shared/vmGCOperations.hpp" 41 #include "gc_implementation/shared/gcTimer.hpp" 42 #include "gc_implementation/shared/gcTrace.hpp" 43 #include "gc_implementation/shared/gcTraceTime.hpp" 44 #include "memory/allocation.hpp" 45 #include "memory/genOopClosures.inline.hpp" 46 #include "memory/referencePolicy.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "oops/oop.inline.hpp" 49 #include "runtime/handles.inline.hpp" 50 #include "runtime/java.hpp" 51 #include "runtime/atomic.inline.hpp" 52 #include "runtime/prefetch.inline.hpp" 53 #include "services/memTracker.hpp" 54 55 // Concurrent marking bit map wrapper 56 57 CMBitMapRO::CMBitMapRO(int shifter) : 58 _bm(), 59 _shifter(shifter) { 60 _bmStartWord = 0; 61 _bmWordSize = 0; 62 } 63 64 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 65 const HeapWord* limit) const { 66 // First we must round addr *up* to a possible object boundary. 67 addr = (HeapWord*)align_size_up((intptr_t)addr, 68 HeapWordSize << _shifter); 69 size_t addrOffset = heapWordToOffset(addr); 70 if (limit == NULL) { 71 limit = _bmStartWord + _bmWordSize; 72 } 73 size_t limitOffset = heapWordToOffset(limit); 74 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 75 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 76 assert(nextAddr >= addr, "get_next_one postcondition"); 77 assert(nextAddr == limit || isMarked(nextAddr), 78 "get_next_one postcondition"); 79 return nextAddr; 80 } 81 82 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, 83 const HeapWord* limit) const { 84 size_t addrOffset = heapWordToOffset(addr); 85 if (limit == NULL) { 86 limit = _bmStartWord + _bmWordSize; 87 } 88 size_t limitOffset = heapWordToOffset(limit); 89 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 90 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 91 assert(nextAddr >= addr, "get_next_one postcondition"); 92 assert(nextAddr == limit || !isMarked(nextAddr), 93 "get_next_one postcondition"); 94 return nextAddr; 95 } 96 97 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 98 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 99 return (int) (diff >> _shifter); 100 } 101 102 #ifndef PRODUCT 103 bool CMBitMapRO::covers(MemRegion heap_rs) const { 104 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 105 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 106 "size inconsistency"); 107 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 108 _bmWordSize == heap_rs.word_size(); 109 } 110 #endif 111 112 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 113 _bm.print_on_error(st, prefix); 114 } 115 116 size_t CMBitMap::compute_size(size_t heap_size) { 117 return heap_size / mark_distance(); 118 } 119 120 size_t CMBitMap::mark_distance() { 121 return MinObjAlignmentInBytes * BitsPerByte; 122 } 123 124 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 125 _bmStartWord = heap.start(); 126 _bmWordSize = heap.word_size(); 127 128 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 129 _bm.set_size(_bmWordSize >> _shifter); 130 131 storage->set_mapping_changed_listener(&_listener); 132 } 133 134 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 135 if (zero_filled) { 136 return; 137 } 138 // We need to clear the bitmap on commit, removing any existing information. 139 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 140 _bm->clearRange(mr); 141 } 142 143 // Closure used for clearing the given mark bitmap. 144 class ClearBitmapHRClosure : public HeapRegionClosure { 145 private: 146 ConcurrentMark* _cm; 147 CMBitMap* _bitmap; 148 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 149 public: 150 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 151 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 152 } 153 154 virtual bool doHeapRegion(HeapRegion* r) { 155 size_t const chunk_size_in_words = M / HeapWordSize; 156 157 HeapWord* cur = r->bottom(); 158 HeapWord* const end = r->end(); 159 160 while (cur < end) { 161 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 162 _bitmap->clearRange(mr); 163 164 cur += chunk_size_in_words; 165 166 // Abort iteration if after yielding the marking has been aborted. 167 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 168 return true; 169 } 170 // Repeat the asserts from before the start of the closure. We will do them 171 // as asserts here to minimize their overhead on the product. However, we 172 // will have them as guarantees at the beginning / end of the bitmap 173 // clearing to get some checking in the product. 174 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 175 assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant"); 176 } 177 178 return false; 179 } 180 }; 181 182 void CMBitMap::clearAll() { 183 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 184 G1CollectedHeap::heap()->heap_region_iterate(&cl); 185 guarantee(cl.complete(), "Must have completed iteration."); 186 return; 187 } 188 189 void CMBitMap::markRange(MemRegion mr) { 190 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 191 assert(!mr.is_empty(), "unexpected empty region"); 192 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 193 ((HeapWord *) mr.end())), 194 "markRange memory region end is not card aligned"); 195 // convert address range into offset range 196 _bm.at_put_range(heapWordToOffset(mr.start()), 197 heapWordToOffset(mr.end()), true); 198 } 199 200 void CMBitMap::clearRange(MemRegion mr) { 201 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 202 assert(!mr.is_empty(), "unexpected empty region"); 203 // convert address range into offset range 204 _bm.at_put_range(heapWordToOffset(mr.start()), 205 heapWordToOffset(mr.end()), false); 206 } 207 208 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 209 HeapWord* end_addr) { 210 HeapWord* start = getNextMarkedWordAddress(addr); 211 start = MIN2(start, end_addr); 212 HeapWord* end = getNextUnmarkedWordAddress(start); 213 end = MIN2(end, end_addr); 214 assert(start <= end, "Consistency check"); 215 MemRegion mr(start, end); 216 if (!mr.is_empty()) { 217 clearRange(mr); 218 } 219 return mr; 220 } 221 222 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 223 _base(NULL), _cm(cm) 224 #ifdef ASSERT 225 , _drain_in_progress(false) 226 , _drain_in_progress_yields(false) 227 #endif 228 {} 229 230 bool CMMarkStack::allocate(size_t capacity) { 231 // allocate a stack of the requisite depth 232 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 233 if (!rs.is_reserved()) { 234 warning("ConcurrentMark MarkStack allocation failure"); 235 return false; 236 } 237 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 238 if (!_virtual_space.initialize(rs, rs.size())) { 239 warning("ConcurrentMark MarkStack backing store failure"); 240 // Release the virtual memory reserved for the marking stack 241 rs.release(); 242 return false; 243 } 244 assert(_virtual_space.committed_size() == rs.size(), 245 "Didn't reserve backing store for all of ConcurrentMark stack?"); 246 _base = (oop*) _virtual_space.low(); 247 setEmpty(); 248 _capacity = (jint) capacity; 249 _saved_index = -1; 250 _should_expand = false; 251 NOT_PRODUCT(_max_depth = 0); 252 return true; 253 } 254 255 void CMMarkStack::expand() { 256 // Called, during remark, if we've overflown the marking stack during marking. 257 assert(isEmpty(), "stack should been emptied while handling overflow"); 258 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 259 // Clear expansion flag 260 _should_expand = false; 261 if (_capacity == (jint) MarkStackSizeMax) { 262 if (PrintGCDetails && Verbose) { 263 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 264 } 265 return; 266 } 267 // Double capacity if possible 268 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 269 // Do not give up existing stack until we have managed to 270 // get the double capacity that we desired. 271 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 272 sizeof(oop))); 273 if (rs.is_reserved()) { 274 // Release the backing store associated with old stack 275 _virtual_space.release(); 276 // Reinitialize virtual space for new stack 277 if (!_virtual_space.initialize(rs, rs.size())) { 278 fatal("Not enough swap for expanded marking stack capacity"); 279 } 280 _base = (oop*)(_virtual_space.low()); 281 _index = 0; 282 _capacity = new_capacity; 283 } else { 284 if (PrintGCDetails && Verbose) { 285 // Failed to double capacity, continue; 286 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 287 SIZE_FORMAT"K to " SIZE_FORMAT"K", 288 _capacity / K, new_capacity / K); 289 } 290 } 291 } 292 293 void CMMarkStack::set_should_expand() { 294 // If we're resetting the marking state because of an 295 // marking stack overflow, record that we should, if 296 // possible, expand the stack. 297 _should_expand = _cm->has_overflown(); 298 } 299 300 CMMarkStack::~CMMarkStack() { 301 if (_base != NULL) { 302 _base = NULL; 303 _virtual_space.release(); 304 } 305 } 306 307 void CMMarkStack::par_push(oop ptr) { 308 while (true) { 309 if (isFull()) { 310 _overflow = true; 311 return; 312 } 313 // Otherwise... 314 jint index = _index; 315 jint next_index = index+1; 316 jint res = Atomic::cmpxchg(next_index, &_index, index); 317 if (res == index) { 318 _base[index] = ptr; 319 // Note that we don't maintain this atomically. We could, but it 320 // doesn't seem necessary. 321 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 322 return; 323 } 324 // Otherwise, we need to try again. 325 } 326 } 327 328 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { 329 while (true) { 330 if (isFull()) { 331 _overflow = true; 332 return; 333 } 334 // Otherwise... 335 jint index = _index; 336 jint next_index = index + n; 337 if (next_index > _capacity) { 338 _overflow = true; 339 return; 340 } 341 jint res = Atomic::cmpxchg(next_index, &_index, index); 342 if (res == index) { 343 for (int i = 0; i < n; i++) { 344 int ind = index + i; 345 assert(ind < _capacity, "By overflow test above."); 346 _base[ind] = ptr_arr[i]; 347 } 348 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 349 return; 350 } 351 // Otherwise, we need to try again. 352 } 353 } 354 355 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 356 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 357 jint start = _index; 358 jint next_index = start + n; 359 if (next_index > _capacity) { 360 _overflow = true; 361 return; 362 } 363 // Otherwise. 364 _index = next_index; 365 for (int i = 0; i < n; i++) { 366 int ind = start + i; 367 assert(ind < _capacity, "By overflow test above."); 368 _base[ind] = ptr_arr[i]; 369 } 370 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 371 } 372 373 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 374 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 375 jint index = _index; 376 if (index == 0) { 377 *n = 0; 378 return false; 379 } else { 380 int k = MIN2(max, index); 381 jint new_ind = index - k; 382 for (int j = 0; j < k; j++) { 383 ptr_arr[j] = _base[new_ind + j]; 384 } 385 _index = new_ind; 386 *n = k; 387 return true; 388 } 389 } 390 391 template<class OopClosureClass> 392 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 393 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 394 || SafepointSynchronize::is_at_safepoint(), 395 "Drain recursion must be yield-safe."); 396 bool res = true; 397 debug_only(_drain_in_progress = true); 398 debug_only(_drain_in_progress_yields = yield_after); 399 while (!isEmpty()) { 400 oop newOop = pop(); 401 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 402 assert(newOop->is_oop(), "Expected an oop"); 403 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 404 "only grey objects on this stack"); 405 newOop->oop_iterate(cl); 406 if (yield_after && _cm->do_yield_check()) { 407 res = false; 408 break; 409 } 410 } 411 debug_only(_drain_in_progress = false); 412 return res; 413 } 414 415 void CMMarkStack::note_start_of_gc() { 416 assert(_saved_index == -1, 417 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 418 _saved_index = _index; 419 } 420 421 void CMMarkStack::note_end_of_gc() { 422 // This is intentionally a guarantee, instead of an assert. If we 423 // accidentally add something to the mark stack during GC, it 424 // will be a correctness issue so it's better if we crash. we'll 425 // only check this once per GC anyway, so it won't be a performance 426 // issue in any way. 427 guarantee(_saved_index == _index, 428 err_msg("saved index: %d index: %d", _saved_index, _index)); 429 _saved_index = -1; 430 } 431 432 void CMMarkStack::oops_do(OopClosure* f) { 433 assert(_saved_index == _index, 434 err_msg("saved index: %d index: %d", _saved_index, _index)); 435 for (int i = 0; i < _index; i += 1) { 436 f->do_oop(&_base[i]); 437 } 438 } 439 440 CMRootRegions::CMRootRegions() : 441 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 442 _should_abort(false), _next_survivor(NULL) { } 443 444 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 445 _young_list = g1h->young_list(); 446 _cm = cm; 447 } 448 449 void CMRootRegions::prepare_for_scan() { 450 assert(!scan_in_progress(), "pre-condition"); 451 452 // Currently, only survivors can be root regions. 453 assert(_next_survivor == NULL, "pre-condition"); 454 _next_survivor = _young_list->first_survivor_region(); 455 _scan_in_progress = (_next_survivor != NULL); 456 _should_abort = false; 457 } 458 459 HeapRegion* CMRootRegions::claim_next() { 460 if (_should_abort) { 461 // If someone has set the should_abort flag, we return NULL to 462 // force the caller to bail out of their loop. 463 return NULL; 464 } 465 466 // Currently, only survivors can be root regions. 467 HeapRegion* res = _next_survivor; 468 if (res != NULL) { 469 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 470 // Read it again in case it changed while we were waiting for the lock. 471 res = _next_survivor; 472 if (res != NULL) { 473 if (res == _young_list->last_survivor_region()) { 474 // We just claimed the last survivor so store NULL to indicate 475 // that we're done. 476 _next_survivor = NULL; 477 } else { 478 _next_survivor = res->get_next_young_region(); 479 } 480 } else { 481 // Someone else claimed the last survivor while we were trying 482 // to take the lock so nothing else to do. 483 } 484 } 485 assert(res == NULL || res->is_survivor(), "post-condition"); 486 487 return res; 488 } 489 490 void CMRootRegions::scan_finished() { 491 assert(scan_in_progress(), "pre-condition"); 492 493 // Currently, only survivors can be root regions. 494 if (!_should_abort) { 495 assert(_next_survivor == NULL, "we should have claimed all survivors"); 496 } 497 _next_survivor = NULL; 498 499 { 500 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 501 _scan_in_progress = false; 502 RootRegionScan_lock->notify_all(); 503 } 504 } 505 506 bool CMRootRegions::wait_until_scan_finished() { 507 if (!scan_in_progress()) return false; 508 509 { 510 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 511 while (scan_in_progress()) { 512 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 513 } 514 } 515 return true; 516 } 517 518 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 519 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 520 #endif // _MSC_VER 521 522 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 523 return MAX2((n_par_threads + 2) / 4, 1U); 524 } 525 526 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 527 _g1h(g1h), 528 _markBitMap1(), 529 _markBitMap2(), 530 _parallel_marking_threads(0), 531 _max_parallel_marking_threads(0), 532 _sleep_factor(0.0), 533 _marking_task_overhead(1.0), 534 _cleanup_sleep_factor(0.0), 535 _cleanup_task_overhead(1.0), 536 _cleanup_list("Cleanup List"), 537 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 538 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 539 CardTableModRefBS::card_shift, 540 false /* in_resource_area*/), 541 542 _prevMarkBitMap(&_markBitMap1), 543 _nextMarkBitMap(&_markBitMap2), 544 545 _markStack(this), 546 // _finger set in set_non_marking_state 547 548 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 549 // _active_tasks set in set_non_marking_state 550 // _tasks set inside the constructor 551 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 552 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 553 554 _has_overflown(false), 555 _concurrent(false), 556 _has_aborted(false), 557 _aborted_gc_id(GCId::undefined()), 558 _restart_for_overflow(false), 559 _concurrent_marking_in_progress(false), 560 561 // _verbose_level set below 562 563 _init_times(), 564 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 565 _cleanup_times(), 566 _total_counting_time(0.0), 567 _total_rs_scrub_time(0.0), 568 569 _parallel_workers(NULL), 570 571 _count_card_bitmaps(NULL), 572 _count_marked_bytes(NULL), 573 _completed_initialization(false) { 574 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 575 if (verbose_level < no_verbose) { 576 verbose_level = no_verbose; 577 } 578 if (verbose_level > high_verbose) { 579 verbose_level = high_verbose; 580 } 581 _verbose_level = verbose_level; 582 583 if (verbose_low()) { 584 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 585 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 586 } 587 588 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 589 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 590 591 // Create & start a ConcurrentMark thread. 592 _cmThread = new ConcurrentMarkThread(this); 593 assert(cmThread() != NULL, "CM Thread should have been created"); 594 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 595 if (_cmThread->osthread() == NULL) { 596 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 597 } 598 599 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 600 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 601 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 602 603 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 604 satb_qs.set_buffer_size(G1SATBBufferSize); 605 606 _root_regions.init(_g1h, this); 607 608 if (ConcGCThreads > ParallelGCThreads) { 609 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") " 610 "than ParallelGCThreads (" UINTX_FORMAT ").", 611 ConcGCThreads, ParallelGCThreads); 612 return; 613 } 614 if (ParallelGCThreads == 0) { 615 // if we are not running with any parallel GC threads we will not 616 // spawn any marking threads either 617 _parallel_marking_threads = 0; 618 _max_parallel_marking_threads = 0; 619 _sleep_factor = 0.0; 620 _marking_task_overhead = 1.0; 621 } else { 622 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 623 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 624 // if both are set 625 _sleep_factor = 0.0; 626 _marking_task_overhead = 1.0; 627 } else if (G1MarkingOverheadPercent > 0) { 628 // We will calculate the number of parallel marking threads based 629 // on a target overhead with respect to the soft real-time goal 630 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 631 double overall_cm_overhead = 632 (double) MaxGCPauseMillis * marking_overhead / 633 (double) GCPauseIntervalMillis; 634 double cpu_ratio = 1.0 / (double) os::processor_count(); 635 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 636 double marking_task_overhead = 637 overall_cm_overhead / marking_thread_num * 638 (double) os::processor_count(); 639 double sleep_factor = 640 (1.0 - marking_task_overhead) / marking_task_overhead; 641 642 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); 643 _sleep_factor = sleep_factor; 644 _marking_task_overhead = marking_task_overhead; 645 } else { 646 // Calculate the number of parallel marking threads by scaling 647 // the number of parallel GC threads. 648 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); 649 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); 650 _sleep_factor = 0.0; 651 _marking_task_overhead = 1.0; 652 } 653 654 assert(ConcGCThreads > 0, "Should have been set"); 655 _parallel_marking_threads = (uint) ConcGCThreads; 656 _max_parallel_marking_threads = _parallel_marking_threads; 657 658 if (parallel_marking_threads() > 1) { 659 _cleanup_task_overhead = 1.0; 660 } else { 661 _cleanup_task_overhead = marking_task_overhead(); 662 } 663 _cleanup_sleep_factor = 664 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 665 666 #if 0 667 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 668 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 669 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 670 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 671 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 672 #endif 673 674 guarantee(parallel_marking_threads() > 0, "peace of mind"); 675 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", 676 _max_parallel_marking_threads, false, true); 677 if (_parallel_workers == NULL) { 678 vm_exit_during_initialization("Failed necessary allocation."); 679 } else { 680 _parallel_workers->initialize_workers(); 681 } 682 } 683 684 if (FLAG_IS_DEFAULT(MarkStackSize)) { 685 uintx mark_stack_size = 686 MIN2(MarkStackSizeMax, 687 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE))); 688 // Verify that the calculated value for MarkStackSize is in range. 689 // It would be nice to use the private utility routine from Arguments. 690 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 691 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): " 692 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 693 mark_stack_size, (uintx) 1, MarkStackSizeMax); 694 return; 695 } 696 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size); 697 } else { 698 // Verify MarkStackSize is in range. 699 if (FLAG_IS_CMDLINE(MarkStackSize)) { 700 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 701 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 702 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): " 703 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 704 MarkStackSize, (uintx) 1, MarkStackSizeMax); 705 return; 706 } 707 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 708 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 709 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")" 710 " or for MarkStackSizeMax (" UINTX_FORMAT ")", 711 MarkStackSize, MarkStackSizeMax); 712 return; 713 } 714 } 715 } 716 } 717 718 if (!_markStack.allocate(MarkStackSize)) { 719 warning("Failed to allocate CM marking stack"); 720 return; 721 } 722 723 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 724 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 725 726 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 727 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 728 729 BitMap::idx_t card_bm_size = _card_bm.size(); 730 731 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 732 _active_tasks = _max_worker_id; 733 734 size_t max_regions = (size_t) _g1h->max_regions(); 735 for (uint i = 0; i < _max_worker_id; ++i) { 736 CMTaskQueue* task_queue = new CMTaskQueue(); 737 task_queue->initialize(); 738 _task_queues->register_queue(i, task_queue); 739 740 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 741 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 742 743 _tasks[i] = new CMTask(i, this, 744 _count_marked_bytes[i], 745 &_count_card_bitmaps[i], 746 task_queue, _task_queues); 747 748 _accum_task_vtime[i] = 0.0; 749 } 750 751 // Calculate the card number for the bottom of the heap. Used 752 // in biasing indexes into the accounting card bitmaps. 753 _heap_bottom_card_num = 754 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 755 CardTableModRefBS::card_shift); 756 757 // Clear all the liveness counting data 758 clear_all_count_data(); 759 760 // so that the call below can read a sensible value 761 _heap_start = g1h->reserved_region().start(); 762 set_non_marking_state(); 763 _completed_initialization = true; 764 } 765 766 void ConcurrentMark::reset() { 767 // Starting values for these two. This should be called in a STW 768 // phase. 769 MemRegion reserved = _g1h->g1_reserved(); 770 _heap_start = reserved.start(); 771 _heap_end = reserved.end(); 772 773 // Separated the asserts so that we know which one fires. 774 assert(_heap_start != NULL, "heap bounds should look ok"); 775 assert(_heap_end != NULL, "heap bounds should look ok"); 776 assert(_heap_start < _heap_end, "heap bounds should look ok"); 777 778 // Reset all the marking data structures and any necessary flags 779 reset_marking_state(); 780 781 if (verbose_low()) { 782 gclog_or_tty->print_cr("[global] resetting"); 783 } 784 785 // We do reset all of them, since different phases will use 786 // different number of active threads. So, it's easiest to have all 787 // of them ready. 788 for (uint i = 0; i < _max_worker_id; ++i) { 789 _tasks[i]->reset(_nextMarkBitMap); 790 } 791 792 // we need this to make sure that the flag is on during the evac 793 // pause with initial mark piggy-backed 794 set_concurrent_marking_in_progress(); 795 } 796 797 798 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 799 _markStack.set_should_expand(); 800 _markStack.setEmpty(); // Also clears the _markStack overflow flag 801 if (clear_overflow) { 802 clear_has_overflown(); 803 } else { 804 assert(has_overflown(), "pre-condition"); 805 } 806 _finger = _heap_start; 807 808 for (uint i = 0; i < _max_worker_id; ++i) { 809 CMTaskQueue* queue = _task_queues->queue(i); 810 queue->set_empty(); 811 } 812 } 813 814 void ConcurrentMark::set_concurrency(uint active_tasks) { 815 assert(active_tasks <= _max_worker_id, "we should not have more"); 816 817 _active_tasks = active_tasks; 818 // Need to update the three data structures below according to the 819 // number of active threads for this phase. 820 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 821 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 822 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 823 } 824 825 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 826 set_concurrency(active_tasks); 827 828 _concurrent = concurrent; 829 // We propagate this to all tasks, not just the active ones. 830 for (uint i = 0; i < _max_worker_id; ++i) 831 _tasks[i]->set_concurrent(concurrent); 832 833 if (concurrent) { 834 set_concurrent_marking_in_progress(); 835 } else { 836 // We currently assume that the concurrent flag has been set to 837 // false before we start remark. At this point we should also be 838 // in a STW phase. 839 assert(!concurrent_marking_in_progress(), "invariant"); 840 assert(out_of_regions(), 841 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 842 p2i(_finger), p2i(_heap_end))); 843 } 844 } 845 846 void ConcurrentMark::set_non_marking_state() { 847 // We set the global marking state to some default values when we're 848 // not doing marking. 849 reset_marking_state(); 850 _active_tasks = 0; 851 clear_concurrent_marking_in_progress(); 852 } 853 854 ConcurrentMark::~ConcurrentMark() { 855 // The ConcurrentMark instance is never freed. 856 ShouldNotReachHere(); 857 } 858 859 void ConcurrentMark::clearNextBitmap() { 860 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 861 862 // Make sure that the concurrent mark thread looks to still be in 863 // the current cycle. 864 guarantee(cmThread()->during_cycle(), "invariant"); 865 866 // We are finishing up the current cycle by clearing the next 867 // marking bitmap and getting it ready for the next cycle. During 868 // this time no other cycle can start. So, let's make sure that this 869 // is the case. 870 guarantee(!g1h->mark_in_progress(), "invariant"); 871 872 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 873 g1h->heap_region_iterate(&cl); 874 875 // Clear the liveness counting data. If the marking has been aborted, the abort() 876 // call already did that. 877 if (cl.complete()) { 878 clear_all_count_data(); 879 } 880 881 // Repeat the asserts from above. 882 guarantee(cmThread()->during_cycle(), "invariant"); 883 guarantee(!g1h->mark_in_progress(), "invariant"); 884 } 885 886 class CheckBitmapClearHRClosure : public HeapRegionClosure { 887 CMBitMap* _bitmap; 888 bool _error; 889 public: 890 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 891 } 892 893 virtual bool doHeapRegion(HeapRegion* r) { 894 // This closure can be called concurrently to the mutator, so we must make sure 895 // that the result of the getNextMarkedWordAddress() call is compared to the 896 // value passed to it as limit to detect any found bits. 897 // We can use the region's orig_end() for the limit and the comparison value 898 // as it always contains the "real" end of the region that never changes and 899 // has no side effects. 900 // Due to the latter, there can also be no problem with the compiler generating 901 // reloads of the orig_end() call. 902 HeapWord* end = r->orig_end(); 903 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 904 } 905 }; 906 907 bool ConcurrentMark::nextMarkBitmapIsClear() { 908 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 909 _g1h->heap_region_iterate(&cl); 910 return cl.complete(); 911 } 912 913 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 914 public: 915 bool doHeapRegion(HeapRegion* r) { 916 if (!r->is_continues_humongous()) { 917 r->note_start_of_marking(); 918 } 919 return false; 920 } 921 }; 922 923 void ConcurrentMark::checkpointRootsInitialPre() { 924 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 925 G1CollectorPolicy* g1p = g1h->g1_policy(); 926 927 _has_aborted = false; 928 929 #ifndef PRODUCT 930 if (G1PrintReachableAtInitialMark) { 931 print_reachable("at-cycle-start", 932 VerifyOption_G1UsePrevMarking, true /* all */); 933 } 934 #endif 935 936 // Initialize marking structures. This has to be done in a STW phase. 937 reset(); 938 939 // For each region note start of marking. 940 NoteStartOfMarkHRClosure startcl; 941 g1h->heap_region_iterate(&startcl); 942 } 943 944 945 void ConcurrentMark::checkpointRootsInitialPost() { 946 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 947 948 // If we force an overflow during remark, the remark operation will 949 // actually abort and we'll restart concurrent marking. If we always 950 // force an overflow during remark we'll never actually complete the 951 // marking phase. So, we initialize this here, at the start of the 952 // cycle, so that at the remaining overflow number will decrease at 953 // every remark and we'll eventually not need to cause one. 954 force_overflow_stw()->init(); 955 956 // Start Concurrent Marking weak-reference discovery. 957 ReferenceProcessor* rp = g1h->ref_processor_cm(); 958 // enable ("weak") refs discovery 959 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); 960 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 961 962 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 963 // This is the start of the marking cycle, we're expected all 964 // threads to have SATB queues with active set to false. 965 satb_mq_set.set_active_all_threads(true, /* new active value */ 966 false /* expected_active */); 967 968 _root_regions.prepare_for_scan(); 969 970 // update_g1_committed() will be called at the end of an evac pause 971 // when marking is on. So, it's also called at the end of the 972 // initial-mark pause to update the heap end, if the heap expands 973 // during it. No need to call it here. 974 } 975 976 /* 977 * Notice that in the next two methods, we actually leave the STS 978 * during the barrier sync and join it immediately afterwards. If we 979 * do not do this, the following deadlock can occur: one thread could 980 * be in the barrier sync code, waiting for the other thread to also 981 * sync up, whereas another one could be trying to yield, while also 982 * waiting for the other threads to sync up too. 983 * 984 * Note, however, that this code is also used during remark and in 985 * this case we should not attempt to leave / enter the STS, otherwise 986 * we'll either hit an assert (debug / fastdebug) or deadlock 987 * (product). So we should only leave / enter the STS if we are 988 * operating concurrently. 989 * 990 * Because the thread that does the sync barrier has left the STS, it 991 * is possible to be suspended for a Full GC or an evacuation pause 992 * could occur. This is actually safe, since the entering the sync 993 * barrier is one of the last things do_marking_step() does, and it 994 * doesn't manipulate any data structures afterwards. 995 */ 996 997 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 998 if (verbose_low()) { 999 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 1000 } 1001 1002 if (concurrent()) { 1003 SuspendibleThreadSet::leave(); 1004 } 1005 1006 bool barrier_aborted = !_first_overflow_barrier_sync.enter(); 1007 1008 if (concurrent()) { 1009 SuspendibleThreadSet::join(); 1010 } 1011 // at this point everyone should have synced up and not be doing any 1012 // more work 1013 1014 if (verbose_low()) { 1015 if (barrier_aborted) { 1016 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 1017 } else { 1018 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 1019 } 1020 } 1021 1022 if (barrier_aborted) { 1023 // If the barrier aborted we ignore the overflow condition and 1024 // just abort the whole marking phase as quickly as possible. 1025 return; 1026 } 1027 1028 // If we're executing the concurrent phase of marking, reset the marking 1029 // state; otherwise the marking state is reset after reference processing, 1030 // during the remark pause. 1031 // If we reset here as a result of an overflow during the remark we will 1032 // see assertion failures from any subsequent set_concurrency_and_phase() 1033 // calls. 1034 if (concurrent()) { 1035 // let the task associated with with worker 0 do this 1036 if (worker_id == 0) { 1037 // task 0 is responsible for clearing the global data structures 1038 // We should be here because of an overflow. During STW we should 1039 // not clear the overflow flag since we rely on it being true when 1040 // we exit this method to abort the pause and restart concurrent 1041 // marking. 1042 reset_marking_state(true /* clear_overflow */); 1043 force_overflow()->update(); 1044 1045 if (G1Log::fine()) { 1046 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1047 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1048 } 1049 } 1050 } 1051 1052 // after this, each task should reset its own data structures then 1053 // then go into the second barrier 1054 } 1055 1056 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1057 if (verbose_low()) { 1058 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1059 } 1060 1061 if (concurrent()) { 1062 SuspendibleThreadSet::leave(); 1063 } 1064 1065 bool barrier_aborted = !_second_overflow_barrier_sync.enter(); 1066 1067 if (concurrent()) { 1068 SuspendibleThreadSet::join(); 1069 } 1070 // at this point everything should be re-initialized and ready to go 1071 1072 if (verbose_low()) { 1073 if (barrier_aborted) { 1074 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1075 } else { 1076 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1077 } 1078 } 1079 } 1080 1081 #ifndef PRODUCT 1082 void ForceOverflowSettings::init() { 1083 _num_remaining = G1ConcMarkForceOverflow; 1084 _force = false; 1085 update(); 1086 } 1087 1088 void ForceOverflowSettings::update() { 1089 if (_num_remaining > 0) { 1090 _num_remaining -= 1; 1091 _force = true; 1092 } else { 1093 _force = false; 1094 } 1095 } 1096 1097 bool ForceOverflowSettings::should_force() { 1098 if (_force) { 1099 _force = false; 1100 return true; 1101 } else { 1102 return false; 1103 } 1104 } 1105 #endif // !PRODUCT 1106 1107 class CMConcurrentMarkingTask: public AbstractGangTask { 1108 private: 1109 ConcurrentMark* _cm; 1110 ConcurrentMarkThread* _cmt; 1111 1112 public: 1113 void work(uint worker_id) { 1114 assert(Thread::current()->is_ConcurrentGC_thread(), 1115 "this should only be done by a conc GC thread"); 1116 ResourceMark rm; 1117 1118 double start_vtime = os::elapsedVTime(); 1119 1120 SuspendibleThreadSet::join(); 1121 1122 assert(worker_id < _cm->active_tasks(), "invariant"); 1123 CMTask* the_task = _cm->task(worker_id); 1124 the_task->record_start_time(); 1125 if (!_cm->has_aborted()) { 1126 do { 1127 double start_vtime_sec = os::elapsedVTime(); 1128 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1129 1130 the_task->do_marking_step(mark_step_duration_ms, 1131 true /* do_termination */, 1132 false /* is_serial*/); 1133 1134 double end_vtime_sec = os::elapsedVTime(); 1135 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1136 _cm->clear_has_overflown(); 1137 1138 _cm->do_yield_check(worker_id); 1139 1140 jlong sleep_time_ms; 1141 if (!_cm->has_aborted() && the_task->has_aborted()) { 1142 sleep_time_ms = 1143 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1144 SuspendibleThreadSet::leave(); 1145 os::sleep(Thread::current(), sleep_time_ms, false); 1146 SuspendibleThreadSet::join(); 1147 } 1148 } while (!_cm->has_aborted() && the_task->has_aborted()); 1149 } 1150 the_task->record_end_time(); 1151 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1152 1153 SuspendibleThreadSet::leave(); 1154 1155 double end_vtime = os::elapsedVTime(); 1156 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1157 } 1158 1159 CMConcurrentMarkingTask(ConcurrentMark* cm, 1160 ConcurrentMarkThread* cmt) : 1161 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1162 1163 ~CMConcurrentMarkingTask() { } 1164 }; 1165 1166 // Calculates the number of active workers for a concurrent 1167 // phase. 1168 uint ConcurrentMark::calc_parallel_marking_threads() { 1169 if (G1CollectedHeap::use_parallel_gc_threads()) { 1170 uint n_conc_workers = 0; 1171 if (!UseDynamicNumberOfGCThreads || 1172 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1173 !ForceDynamicNumberOfGCThreads)) { 1174 n_conc_workers = max_parallel_marking_threads(); 1175 } else { 1176 n_conc_workers = 1177 AdaptiveSizePolicy::calc_default_active_workers( 1178 max_parallel_marking_threads(), 1179 1, /* Minimum workers */ 1180 parallel_marking_threads(), 1181 Threads::number_of_non_daemon_threads()); 1182 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1183 // that scaling has already gone into "_max_parallel_marking_threads". 1184 } 1185 assert(n_conc_workers > 0, "Always need at least 1"); 1186 return n_conc_workers; 1187 } 1188 // If we are not running with any parallel GC threads we will not 1189 // have spawned any marking threads either. Hence the number of 1190 // concurrent workers should be 0. 1191 return 0; 1192 } 1193 1194 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1195 // Currently, only survivors can be root regions. 1196 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1197 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1198 1199 const uintx interval = PrefetchScanIntervalInBytes; 1200 HeapWord* curr = hr->bottom(); 1201 const HeapWord* end = hr->top(); 1202 while (curr < end) { 1203 Prefetch::read(curr, interval); 1204 oop obj = oop(curr); 1205 int size = obj->oop_iterate(&cl); 1206 assert(size == obj->size(), "sanity"); 1207 curr += size; 1208 } 1209 } 1210 1211 class CMRootRegionScanTask : public AbstractGangTask { 1212 private: 1213 ConcurrentMark* _cm; 1214 1215 public: 1216 CMRootRegionScanTask(ConcurrentMark* cm) : 1217 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1218 1219 void work(uint worker_id) { 1220 assert(Thread::current()->is_ConcurrentGC_thread(), 1221 "this should only be done by a conc GC thread"); 1222 1223 CMRootRegions* root_regions = _cm->root_regions(); 1224 HeapRegion* hr = root_regions->claim_next(); 1225 while (hr != NULL) { 1226 _cm->scanRootRegion(hr, worker_id); 1227 hr = root_regions->claim_next(); 1228 } 1229 } 1230 }; 1231 1232 void ConcurrentMark::scanRootRegions() { 1233 // Start of concurrent marking. 1234 ClassLoaderDataGraph::clear_claimed_marks(); 1235 1236 // scan_in_progress() will have been set to true only if there was 1237 // at least one root region to scan. So, if it's false, we 1238 // should not attempt to do any further work. 1239 if (root_regions()->scan_in_progress()) { 1240 _parallel_marking_threads = calc_parallel_marking_threads(); 1241 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1242 "Maximum number of marking threads exceeded"); 1243 uint active_workers = MAX2(1U, parallel_marking_threads()); 1244 1245 CMRootRegionScanTask task(this); 1246 if (use_parallel_marking_threads()) { 1247 _parallel_workers->set_active_workers((int) active_workers); 1248 _parallel_workers->run_task(&task); 1249 } else { 1250 task.work(0); 1251 } 1252 1253 // It's possible that has_aborted() is true here without actually 1254 // aborting the survivor scan earlier. This is OK as it's 1255 // mainly used for sanity checking. 1256 root_regions()->scan_finished(); 1257 } 1258 } 1259 1260 void ConcurrentMark::markFromRoots() { 1261 // we might be tempted to assert that: 1262 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1263 // "inconsistent argument?"); 1264 // However that wouldn't be right, because it's possible that 1265 // a safepoint is indeed in progress as a younger generation 1266 // stop-the-world GC happens even as we mark in this generation. 1267 1268 _restart_for_overflow = false; 1269 force_overflow_conc()->init(); 1270 1271 // _g1h has _n_par_threads 1272 _parallel_marking_threads = calc_parallel_marking_threads(); 1273 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1274 "Maximum number of marking threads exceeded"); 1275 1276 uint active_workers = MAX2(1U, parallel_marking_threads()); 1277 1278 // Parallel task terminator is set in "set_concurrency_and_phase()" 1279 set_concurrency_and_phase(active_workers, true /* concurrent */); 1280 1281 CMConcurrentMarkingTask markingTask(this, cmThread()); 1282 if (use_parallel_marking_threads()) { 1283 _parallel_workers->set_active_workers((int)active_workers); 1284 // Don't set _n_par_threads because it affects MT in process_roots() 1285 // and the decisions on that MT processing is made elsewhere. 1286 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1287 _parallel_workers->run_task(&markingTask); 1288 } else { 1289 markingTask.work(0); 1290 } 1291 print_stats(); 1292 } 1293 1294 // Helper class to get rid of some boilerplate code. 1295 class G1CMTraceTime : public GCTraceTime { 1296 static bool doit_and_prepend(bool doit) { 1297 if (doit) { 1298 gclog_or_tty->put(' '); 1299 } 1300 return doit; 1301 } 1302 1303 public: 1304 G1CMTraceTime(const char* title, bool doit) 1305 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), 1306 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { 1307 } 1308 }; 1309 1310 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1311 // world is stopped at this checkpoint 1312 assert(SafepointSynchronize::is_at_safepoint(), 1313 "world should be stopped"); 1314 1315 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1316 1317 // If a full collection has happened, we shouldn't do this. 1318 if (has_aborted()) { 1319 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1320 return; 1321 } 1322 1323 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1324 1325 if (VerifyDuringGC) { 1326 HandleMark hm; // handle scope 1327 Universe::heap()->prepare_for_verify(); 1328 Universe::verify(VerifyOption_G1UsePrevMarking, 1329 " VerifyDuringGC:(before)"); 1330 } 1331 g1h->check_bitmaps("Remark Start"); 1332 1333 G1CollectorPolicy* g1p = g1h->g1_policy(); 1334 g1p->record_concurrent_mark_remark_start(); 1335 1336 double start = os::elapsedTime(); 1337 1338 checkpointRootsFinalWork(); 1339 1340 double mark_work_end = os::elapsedTime(); 1341 1342 weakRefsWork(clear_all_soft_refs); 1343 1344 if (has_overflown()) { 1345 // Oops. We overflowed. Restart concurrent marking. 1346 _restart_for_overflow = true; 1347 if (G1TraceMarkStackOverflow) { 1348 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1349 } 1350 1351 // Verify the heap w.r.t. the previous marking bitmap. 1352 if (VerifyDuringGC) { 1353 HandleMark hm; // handle scope 1354 Universe::heap()->prepare_for_verify(); 1355 Universe::verify(VerifyOption_G1UsePrevMarking, 1356 " VerifyDuringGC:(overflow)"); 1357 } 1358 1359 // Clear the marking state because we will be restarting 1360 // marking due to overflowing the global mark stack. 1361 reset_marking_state(); 1362 } else { 1363 { 1364 G1CMTraceTime trace("GC aggregate-data", G1Log::finer()); 1365 1366 // Aggregate the per-task counting data that we have accumulated 1367 // while marking. 1368 aggregate_count_data(); 1369 } 1370 1371 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1372 // We're done with marking. 1373 // This is the end of the marking cycle, we're expected all 1374 // threads to have SATB queues with active set to true. 1375 satb_mq_set.set_active_all_threads(false, /* new active value */ 1376 true /* expected_active */); 1377 1378 if (VerifyDuringGC) { 1379 HandleMark hm; // handle scope 1380 Universe::heap()->prepare_for_verify(); 1381 Universe::verify(VerifyOption_G1UseNextMarking, 1382 " VerifyDuringGC:(after)"); 1383 } 1384 g1h->check_bitmaps("Remark End"); 1385 assert(!restart_for_overflow(), "sanity"); 1386 // Completely reset the marking state since marking completed 1387 set_non_marking_state(); 1388 } 1389 1390 // Expand the marking stack, if we have to and if we can. 1391 if (_markStack.should_expand()) { 1392 _markStack.expand(); 1393 } 1394 1395 // Statistics 1396 double now = os::elapsedTime(); 1397 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1398 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1399 _remark_times.add((now - start) * 1000.0); 1400 1401 g1p->record_concurrent_mark_remark_end(); 1402 1403 G1CMIsAliveClosure is_alive(g1h); 1404 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1405 } 1406 1407 // Base class of the closures that finalize and verify the 1408 // liveness counting data. 1409 class CMCountDataClosureBase: public HeapRegionClosure { 1410 protected: 1411 G1CollectedHeap* _g1h; 1412 ConcurrentMark* _cm; 1413 CardTableModRefBS* _ct_bs; 1414 1415 BitMap* _region_bm; 1416 BitMap* _card_bm; 1417 1418 // Takes a region that's not empty (i.e., it has at least one 1419 // live object in it and sets its corresponding bit on the region 1420 // bitmap to 1. If the region is "starts humongous" it will also set 1421 // to 1 the bits on the region bitmap that correspond to its 1422 // associated "continues humongous" regions. 1423 void set_bit_for_region(HeapRegion* hr) { 1424 assert(!hr->is_continues_humongous(), "should have filtered those out"); 1425 1426 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1427 if (!hr->is_starts_humongous()) { 1428 // Normal (non-humongous) case: just set the bit. 1429 _region_bm->par_at_put(index, true); 1430 } else { 1431 // Starts humongous case: calculate how many regions are part of 1432 // this humongous region and then set the bit range. 1433 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1434 _region_bm->par_at_put_range(index, end_index, true); 1435 } 1436 } 1437 1438 public: 1439 CMCountDataClosureBase(G1CollectedHeap* g1h, 1440 BitMap* region_bm, BitMap* card_bm): 1441 _g1h(g1h), _cm(g1h->concurrent_mark()), 1442 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 1443 _region_bm(region_bm), _card_bm(card_bm) { } 1444 }; 1445 1446 // Closure that calculates the # live objects per region. Used 1447 // for verification purposes during the cleanup pause. 1448 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1449 CMBitMapRO* _bm; 1450 size_t _region_marked_bytes; 1451 1452 public: 1453 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1454 BitMap* region_bm, BitMap* card_bm) : 1455 CMCountDataClosureBase(g1h, region_bm, card_bm), 1456 _bm(bm), _region_marked_bytes(0) { } 1457 1458 bool doHeapRegion(HeapRegion* hr) { 1459 1460 if (hr->is_continues_humongous()) { 1461 // We will ignore these here and process them when their 1462 // associated "starts humongous" region is processed (see 1463 // set_bit_for_heap_region()). Note that we cannot rely on their 1464 // associated "starts humongous" region to have their bit set to 1465 // 1 since, due to the region chunking in the parallel region 1466 // iteration, a "continues humongous" region might be visited 1467 // before its associated "starts humongous". 1468 return false; 1469 } 1470 1471 HeapWord* ntams = hr->next_top_at_mark_start(); 1472 HeapWord* start = hr->bottom(); 1473 1474 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1475 err_msg("Preconditions not met - " 1476 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1477 p2i(start), p2i(ntams), p2i(hr->end()))); 1478 1479 // Find the first marked object at or after "start". 1480 start = _bm->getNextMarkedWordAddress(start, ntams); 1481 1482 size_t marked_bytes = 0; 1483 1484 while (start < ntams) { 1485 oop obj = oop(start); 1486 int obj_sz = obj->size(); 1487 HeapWord* obj_end = start + obj_sz; 1488 1489 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1490 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1491 1492 // Note: if we're looking at the last region in heap - obj_end 1493 // could be actually just beyond the end of the heap; end_idx 1494 // will then correspond to a (non-existent) card that is also 1495 // just beyond the heap. 1496 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1497 // end of object is not card aligned - increment to cover 1498 // all the cards spanned by the object 1499 end_idx += 1; 1500 } 1501 1502 // Set the bits in the card BM for the cards spanned by this object. 1503 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1504 1505 // Add the size of this object to the number of marked bytes. 1506 marked_bytes += (size_t)obj_sz * HeapWordSize; 1507 1508 // Find the next marked object after this one. 1509 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1510 } 1511 1512 // Mark the allocated-since-marking portion... 1513 HeapWord* top = hr->top(); 1514 if (ntams < top) { 1515 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1516 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1517 1518 // Note: if we're looking at the last region in heap - top 1519 // could be actually just beyond the end of the heap; end_idx 1520 // will then correspond to a (non-existent) card that is also 1521 // just beyond the heap. 1522 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1523 // end of object is not card aligned - increment to cover 1524 // all the cards spanned by the object 1525 end_idx += 1; 1526 } 1527 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1528 1529 // This definitely means the region has live objects. 1530 set_bit_for_region(hr); 1531 } 1532 1533 // Update the live region bitmap. 1534 if (marked_bytes > 0) { 1535 set_bit_for_region(hr); 1536 } 1537 1538 // Set the marked bytes for the current region so that 1539 // it can be queried by a calling verification routine 1540 _region_marked_bytes = marked_bytes; 1541 1542 return false; 1543 } 1544 1545 size_t region_marked_bytes() const { return _region_marked_bytes; } 1546 }; 1547 1548 // Heap region closure used for verifying the counting data 1549 // that was accumulated concurrently and aggregated during 1550 // the remark pause. This closure is applied to the heap 1551 // regions during the STW cleanup pause. 1552 1553 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1554 G1CollectedHeap* _g1h; 1555 ConcurrentMark* _cm; 1556 CalcLiveObjectsClosure _calc_cl; 1557 BitMap* _region_bm; // Region BM to be verified 1558 BitMap* _card_bm; // Card BM to be verified 1559 bool _verbose; // verbose output? 1560 1561 BitMap* _exp_region_bm; // Expected Region BM values 1562 BitMap* _exp_card_bm; // Expected card BM values 1563 1564 int _failures; 1565 1566 public: 1567 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1568 BitMap* region_bm, 1569 BitMap* card_bm, 1570 BitMap* exp_region_bm, 1571 BitMap* exp_card_bm, 1572 bool verbose) : 1573 _g1h(g1h), _cm(g1h->concurrent_mark()), 1574 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1575 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1576 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1577 _failures(0) { } 1578 1579 int failures() const { return _failures; } 1580 1581 bool doHeapRegion(HeapRegion* hr) { 1582 if (hr->is_continues_humongous()) { 1583 // We will ignore these here and process them when their 1584 // associated "starts humongous" region is processed (see 1585 // set_bit_for_heap_region()). Note that we cannot rely on their 1586 // associated "starts humongous" region to have their bit set to 1587 // 1 since, due to the region chunking in the parallel region 1588 // iteration, a "continues humongous" region might be visited 1589 // before its associated "starts humongous". 1590 return false; 1591 } 1592 1593 int failures = 0; 1594 1595 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1596 // this region and set the corresponding bits in the expected region 1597 // and card bitmaps. 1598 bool res = _calc_cl.doHeapRegion(hr); 1599 assert(res == false, "should be continuing"); 1600 1601 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1602 Mutex::_no_safepoint_check_flag); 1603 1604 // Verify the marked bytes for this region. 1605 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1606 size_t act_marked_bytes = hr->next_marked_bytes(); 1607 1608 // We're not OK if expected marked bytes > actual marked bytes. It means 1609 // we have missed accounting some objects during the actual marking. 1610 if (exp_marked_bytes > act_marked_bytes) { 1611 if (_verbose) { 1612 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1613 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1614 hr->hrm_index(), exp_marked_bytes, act_marked_bytes); 1615 } 1616 failures += 1; 1617 } 1618 1619 // Verify the bit, for this region, in the actual and expected 1620 // (which was just calculated) region bit maps. 1621 // We're not OK if the bit in the calculated expected region 1622 // bitmap is set and the bit in the actual region bitmap is not. 1623 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1624 1625 bool expected = _exp_region_bm->at(index); 1626 bool actual = _region_bm->at(index); 1627 if (expected && !actual) { 1628 if (_verbose) { 1629 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1630 "expected: %s, actual: %s", 1631 hr->hrm_index(), 1632 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1633 } 1634 failures += 1; 1635 } 1636 1637 // Verify that the card bit maps for the cards spanned by the current 1638 // region match. We have an error if we have a set bit in the expected 1639 // bit map and the corresponding bit in the actual bitmap is not set. 1640 1641 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1642 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1643 1644 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1645 expected = _exp_card_bm->at(i); 1646 actual = _card_bm->at(i); 1647 1648 if (expected && !actual) { 1649 if (_verbose) { 1650 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1651 "expected: %s, actual: %s", 1652 hr->hrm_index(), i, 1653 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1654 } 1655 failures += 1; 1656 } 1657 } 1658 1659 if (failures > 0 && _verbose) { 1660 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1661 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1662 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1663 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1664 } 1665 1666 _failures += failures; 1667 1668 // We could stop iteration over the heap when we 1669 // find the first violating region by returning true. 1670 return false; 1671 } 1672 }; 1673 1674 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1675 protected: 1676 G1CollectedHeap* _g1h; 1677 ConcurrentMark* _cm; 1678 BitMap* _actual_region_bm; 1679 BitMap* _actual_card_bm; 1680 1681 uint _n_workers; 1682 1683 BitMap* _expected_region_bm; 1684 BitMap* _expected_card_bm; 1685 1686 int _failures; 1687 bool _verbose; 1688 1689 HeapRegionClaimer _hrclaimer; 1690 1691 public: 1692 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1693 BitMap* region_bm, BitMap* card_bm, 1694 BitMap* expected_region_bm, BitMap* expected_card_bm) 1695 : AbstractGangTask("G1 verify final counting"), 1696 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1697 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1698 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1699 _failures(0), _verbose(false), 1700 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1701 assert(VerifyDuringGC, "don't call this otherwise"); 1702 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1703 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1704 1705 _verbose = _cm->verbose_medium(); 1706 } 1707 1708 void work(uint worker_id) { 1709 assert(worker_id < _n_workers, "invariant"); 1710 1711 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1712 _actual_region_bm, _actual_card_bm, 1713 _expected_region_bm, 1714 _expected_card_bm, 1715 _verbose); 1716 1717 if (G1CollectedHeap::use_parallel_gc_threads()) { 1718 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1719 } else { 1720 _g1h->heap_region_iterate(&verify_cl); 1721 } 1722 1723 Atomic::add(verify_cl.failures(), &_failures); 1724 } 1725 1726 int failures() const { return _failures; } 1727 }; 1728 1729 // Closure that finalizes the liveness counting data. 1730 // Used during the cleanup pause. 1731 // Sets the bits corresponding to the interval [NTAMS, top] 1732 // (which contains the implicitly live objects) in the 1733 // card liveness bitmap. Also sets the bit for each region, 1734 // containing live data, in the region liveness bitmap. 1735 1736 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1737 public: 1738 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1739 BitMap* region_bm, 1740 BitMap* card_bm) : 1741 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1742 1743 bool doHeapRegion(HeapRegion* hr) { 1744 1745 if (hr->is_continues_humongous()) { 1746 // We will ignore these here and process them when their 1747 // associated "starts humongous" region is processed (see 1748 // set_bit_for_heap_region()). Note that we cannot rely on their 1749 // associated "starts humongous" region to have their bit set to 1750 // 1 since, due to the region chunking in the parallel region 1751 // iteration, a "continues humongous" region might be visited 1752 // before its associated "starts humongous". 1753 return false; 1754 } 1755 1756 HeapWord* ntams = hr->next_top_at_mark_start(); 1757 HeapWord* top = hr->top(); 1758 1759 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1760 1761 // Mark the allocated-since-marking portion... 1762 if (ntams < top) { 1763 // This definitely means the region has live objects. 1764 set_bit_for_region(hr); 1765 1766 // Now set the bits in the card bitmap for [ntams, top) 1767 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1768 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1769 1770 // Note: if we're looking at the last region in heap - top 1771 // could be actually just beyond the end of the heap; end_idx 1772 // will then correspond to a (non-existent) card that is also 1773 // just beyond the heap. 1774 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1775 // end of object is not card aligned - increment to cover 1776 // all the cards spanned by the object 1777 end_idx += 1; 1778 } 1779 1780 assert(end_idx <= _card_bm->size(), 1781 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1782 end_idx, _card_bm->size())); 1783 assert(start_idx < _card_bm->size(), 1784 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1785 start_idx, _card_bm->size())); 1786 1787 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1788 } 1789 1790 // Set the bit for the region if it contains live data 1791 if (hr->next_marked_bytes() > 0) { 1792 set_bit_for_region(hr); 1793 } 1794 1795 return false; 1796 } 1797 }; 1798 1799 class G1ParFinalCountTask: public AbstractGangTask { 1800 protected: 1801 G1CollectedHeap* _g1h; 1802 ConcurrentMark* _cm; 1803 BitMap* _actual_region_bm; 1804 BitMap* _actual_card_bm; 1805 1806 uint _n_workers; 1807 HeapRegionClaimer _hrclaimer; 1808 1809 public: 1810 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1811 : AbstractGangTask("G1 final counting"), 1812 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1813 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1814 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1815 } 1816 1817 void work(uint worker_id) { 1818 assert(worker_id < _n_workers, "invariant"); 1819 1820 FinalCountDataUpdateClosure final_update_cl(_g1h, 1821 _actual_region_bm, 1822 _actual_card_bm); 1823 1824 if (G1CollectedHeap::use_parallel_gc_threads()) { 1825 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1826 } else { 1827 _g1h->heap_region_iterate(&final_update_cl); 1828 } 1829 } 1830 }; 1831 1832 class G1ParNoteEndTask; 1833 1834 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1835 G1CollectedHeap* _g1; 1836 size_t _max_live_bytes; 1837 uint _regions_claimed; 1838 size_t _freed_bytes; 1839 FreeRegionList* _local_cleanup_list; 1840 HeapRegionSetCount _old_regions_removed; 1841 HeapRegionSetCount _humongous_regions_removed; 1842 HRRSCleanupTask* _hrrs_cleanup_task; 1843 double _claimed_region_time; 1844 double _max_region_time; 1845 1846 public: 1847 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1848 FreeRegionList* local_cleanup_list, 1849 HRRSCleanupTask* hrrs_cleanup_task) : 1850 _g1(g1), 1851 _max_live_bytes(0), _regions_claimed(0), 1852 _freed_bytes(0), 1853 _claimed_region_time(0.0), _max_region_time(0.0), 1854 _local_cleanup_list(local_cleanup_list), 1855 _old_regions_removed(), 1856 _humongous_regions_removed(), 1857 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1858 1859 size_t freed_bytes() { return _freed_bytes; } 1860 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1861 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1862 1863 bool doHeapRegion(HeapRegion *hr) { 1864 if (hr->is_continues_humongous()) { 1865 return false; 1866 } 1867 // We use a claim value of zero here because all regions 1868 // were claimed with value 1 in the FinalCount task. 1869 _g1->reset_gc_time_stamps(hr); 1870 double start = os::elapsedTime(); 1871 _regions_claimed++; 1872 hr->note_end_of_marking(); 1873 _max_live_bytes += hr->max_live_bytes(); 1874 1875 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1876 _freed_bytes += hr->used(); 1877 hr->set_containing_set(NULL); 1878 if (hr->is_humongous()) { 1879 assert(hr->is_starts_humongous(), "we should only see starts humongous"); 1880 _humongous_regions_removed.increment(1u, hr->capacity()); 1881 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1882 } else { 1883 _old_regions_removed.increment(1u, hr->capacity()); 1884 _g1->free_region(hr, _local_cleanup_list, true); 1885 } 1886 } else { 1887 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1888 } 1889 1890 double region_time = (os::elapsedTime() - start); 1891 _claimed_region_time += region_time; 1892 if (region_time > _max_region_time) { 1893 _max_region_time = region_time; 1894 } 1895 return false; 1896 } 1897 1898 size_t max_live_bytes() { return _max_live_bytes; } 1899 uint regions_claimed() { return _regions_claimed; } 1900 double claimed_region_time_sec() { return _claimed_region_time; } 1901 double max_region_time_sec() { return _max_region_time; } 1902 }; 1903 1904 class G1ParNoteEndTask: public AbstractGangTask { 1905 friend class G1NoteEndOfConcMarkClosure; 1906 1907 protected: 1908 G1CollectedHeap* _g1h; 1909 size_t _max_live_bytes; 1910 size_t _freed_bytes; 1911 FreeRegionList* _cleanup_list; 1912 HeapRegionClaimer _hrclaimer; 1913 1914 public: 1915 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1916 AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1917 } 1918 1919 void work(uint worker_id) { 1920 double start = os::elapsedTime(); 1921 FreeRegionList local_cleanup_list("Local Cleanup List"); 1922 HRRSCleanupTask hrrs_cleanup_task; 1923 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1924 &hrrs_cleanup_task); 1925 if (G1CollectedHeap::use_parallel_gc_threads()) { 1926 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1927 } else { 1928 _g1h->heap_region_iterate(&g1_note_end); 1929 } 1930 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1931 1932 // Now update the lists 1933 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1934 { 1935 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1936 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1937 _max_live_bytes += g1_note_end.max_live_bytes(); 1938 _freed_bytes += g1_note_end.freed_bytes(); 1939 1940 // If we iterate over the global cleanup list at the end of 1941 // cleanup to do this printing we will not guarantee to only 1942 // generate output for the newly-reclaimed regions (the list 1943 // might not be empty at the beginning of cleanup; we might 1944 // still be working on its previous contents). So we do the 1945 // printing here, before we append the new regions to the global 1946 // cleanup list. 1947 1948 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1949 if (hr_printer->is_active()) { 1950 FreeRegionListIterator iter(&local_cleanup_list); 1951 while (iter.more_available()) { 1952 HeapRegion* hr = iter.get_next(); 1953 hr_printer->cleanup(hr); 1954 } 1955 } 1956 1957 _cleanup_list->add_ordered(&local_cleanup_list); 1958 assert(local_cleanup_list.is_empty(), "post-condition"); 1959 1960 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1961 } 1962 } 1963 size_t max_live_bytes() { return _max_live_bytes; } 1964 size_t freed_bytes() { return _freed_bytes; } 1965 }; 1966 1967 class G1ParScrubRemSetTask: public AbstractGangTask { 1968 protected: 1969 G1RemSet* _g1rs; 1970 BitMap* _region_bm; 1971 BitMap* _card_bm; 1972 HeapRegionClaimer _hrclaimer; 1973 1974 public: 1975 G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) : 1976 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) { 1977 } 1978 1979 void work(uint worker_id) { 1980 if (G1CollectedHeap::use_parallel_gc_threads()) { 1981 _g1rs->scrub_par(_region_bm, _card_bm, worker_id, &_hrclaimer); 1982 } else { 1983 _g1rs->scrub(_region_bm, _card_bm); 1984 } 1985 } 1986 1987 }; 1988 1989 void ConcurrentMark::cleanup() { 1990 // world is stopped at this checkpoint 1991 assert(SafepointSynchronize::is_at_safepoint(), 1992 "world should be stopped"); 1993 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1994 1995 // If a full collection has happened, we shouldn't do this. 1996 if (has_aborted()) { 1997 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1998 return; 1999 } 2000 2001 g1h->verify_region_sets_optional(); 2002 2003 if (VerifyDuringGC) { 2004 HandleMark hm; // handle scope 2005 Universe::heap()->prepare_for_verify(); 2006 Universe::verify(VerifyOption_G1UsePrevMarking, 2007 " VerifyDuringGC:(before)"); 2008 } 2009 g1h->check_bitmaps("Cleanup Start"); 2010 2011 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); 2012 g1p->record_concurrent_mark_cleanup_start(); 2013 2014 double start = os::elapsedTime(); 2015 2016 HeapRegionRemSet::reset_for_cleanup_tasks(); 2017 2018 uint n_workers; 2019 2020 // Do counting once more with the world stopped for good measure. 2021 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 2022 2023 if (G1CollectedHeap::use_parallel_gc_threads()) { 2024 g1h->set_par_threads(); 2025 n_workers = g1h->n_par_threads(); 2026 assert(g1h->n_par_threads() == n_workers, 2027 "Should not have been reset"); 2028 g1h->workers()->run_task(&g1_par_count_task); 2029 // Done with the parallel phase so reset to 0. 2030 g1h->set_par_threads(0); 2031 } else { 2032 n_workers = 1; 2033 g1_par_count_task.work(0); 2034 } 2035 2036 if (VerifyDuringGC) { 2037 // Verify that the counting data accumulated during marking matches 2038 // that calculated by walking the marking bitmap. 2039 2040 // Bitmaps to hold expected values 2041 BitMap expected_region_bm(_region_bm.size(), true); 2042 BitMap expected_card_bm(_card_bm.size(), true); 2043 2044 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 2045 &_region_bm, 2046 &_card_bm, 2047 &expected_region_bm, 2048 &expected_card_bm); 2049 2050 if (G1CollectedHeap::use_parallel_gc_threads()) { 2051 g1h->set_par_threads((int)n_workers); 2052 g1h->workers()->run_task(&g1_par_verify_task); 2053 // Done with the parallel phase so reset to 0. 2054 g1h->set_par_threads(0); 2055 } else { 2056 g1_par_verify_task.work(0); 2057 } 2058 2059 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 2060 } 2061 2062 size_t start_used_bytes = g1h->used(); 2063 g1h->set_marking_complete(); 2064 2065 double count_end = os::elapsedTime(); 2066 double this_final_counting_time = (count_end - start); 2067 _total_counting_time += this_final_counting_time; 2068 2069 if (G1PrintRegionLivenessInfo) { 2070 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 2071 _g1h->heap_region_iterate(&cl); 2072 } 2073 2074 // Install newly created mark bitMap as "prev". 2075 swapMarkBitMaps(); 2076 2077 g1h->reset_gc_time_stamp(); 2078 2079 // Note end of marking in all heap regions. 2080 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 2081 if (G1CollectedHeap::use_parallel_gc_threads()) { 2082 g1h->set_par_threads((int)n_workers); 2083 g1h->workers()->run_task(&g1_par_note_end_task); 2084 g1h->set_par_threads(0); 2085 } else { 2086 g1_par_note_end_task.work(0); 2087 } 2088 g1h->check_gc_time_stamps(); 2089 2090 if (!cleanup_list_is_empty()) { 2091 // The cleanup list is not empty, so we'll have to process it 2092 // concurrently. Notify anyone else that might be wanting free 2093 // regions that there will be more free regions coming soon. 2094 g1h->set_free_regions_coming(); 2095 } 2096 2097 // call below, since it affects the metric by which we sort the heap 2098 // regions. 2099 if (G1ScrubRemSets) { 2100 double rs_scrub_start = os::elapsedTime(); 2101 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers); 2102 if (G1CollectedHeap::use_parallel_gc_threads()) { 2103 g1h->set_par_threads((int)n_workers); 2104 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2105 g1h->set_par_threads(0); 2106 } else { 2107 g1_par_scrub_rs_task.work(0); 2108 } 2109 2110 double rs_scrub_end = os::elapsedTime(); 2111 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2112 _total_rs_scrub_time += this_rs_scrub_time; 2113 } 2114 2115 // this will also free any regions totally full of garbage objects, 2116 // and sort the regions. 2117 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2118 2119 // Statistics. 2120 double end = os::elapsedTime(); 2121 _cleanup_times.add((end - start) * 1000.0); 2122 2123 if (G1Log::fine()) { 2124 g1h->print_size_transition(gclog_or_tty, 2125 start_used_bytes, 2126 g1h->used(), 2127 g1h->capacity()); 2128 } 2129 2130 // Clean up will have freed any regions completely full of garbage. 2131 // Update the soft reference policy with the new heap occupancy. 2132 Universe::update_heap_info_at_gc(); 2133 2134 if (VerifyDuringGC) { 2135 HandleMark hm; // handle scope 2136 Universe::heap()->prepare_for_verify(); 2137 Universe::verify(VerifyOption_G1UsePrevMarking, 2138 " VerifyDuringGC:(after)"); 2139 } 2140 2141 g1h->check_bitmaps("Cleanup End"); 2142 2143 g1h->verify_region_sets_optional(); 2144 2145 // We need to make this be a "collection" so any collection pause that 2146 // races with it goes around and waits for completeCleanup to finish. 2147 g1h->increment_total_collections(); 2148 2149 // Clean out dead classes and update Metaspace sizes. 2150 if (ClassUnloadingWithConcurrentMark) { 2151 ClassLoaderDataGraph::purge(); 2152 } 2153 MetaspaceGC::compute_new_size(); 2154 2155 // We reclaimed old regions so we should calculate the sizes to make 2156 // sure we update the old gen/space data. 2157 g1h->g1mm()->update_sizes(); 2158 2159 g1h->trace_heap_after_concurrent_cycle(); 2160 } 2161 2162 void ConcurrentMark::completeCleanup() { 2163 if (has_aborted()) return; 2164 2165 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2166 2167 _cleanup_list.verify_optional(); 2168 FreeRegionList tmp_free_list("Tmp Free List"); 2169 2170 if (G1ConcRegionFreeingVerbose) { 2171 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2172 "cleanup list has %u entries", 2173 _cleanup_list.length()); 2174 } 2175 2176 // No one else should be accessing the _cleanup_list at this point, 2177 // so it is not necessary to take any locks 2178 while (!_cleanup_list.is_empty()) { 2179 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 2180 assert(hr != NULL, "Got NULL from a non-empty list"); 2181 hr->par_clear(); 2182 tmp_free_list.add_ordered(hr); 2183 2184 // Instead of adding one region at a time to the secondary_free_list, 2185 // we accumulate them in the local list and move them a few at a 2186 // time. This also cuts down on the number of notify_all() calls 2187 // we do during this process. We'll also append the local list when 2188 // _cleanup_list is empty (which means we just removed the last 2189 // region from the _cleanup_list). 2190 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2191 _cleanup_list.is_empty()) { 2192 if (G1ConcRegionFreeingVerbose) { 2193 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2194 "appending %u entries to the secondary_free_list, " 2195 "cleanup list still has %u entries", 2196 tmp_free_list.length(), 2197 _cleanup_list.length()); 2198 } 2199 2200 { 2201 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2202 g1h->secondary_free_list_add(&tmp_free_list); 2203 SecondaryFreeList_lock->notify_all(); 2204 } 2205 2206 if (G1StressConcRegionFreeing) { 2207 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2208 os::sleep(Thread::current(), (jlong) 1, false); 2209 } 2210 } 2211 } 2212 } 2213 assert(tmp_free_list.is_empty(), "post-condition"); 2214 } 2215 2216 // Supporting Object and Oop closures for reference discovery 2217 // and processing in during marking 2218 2219 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2220 HeapWord* addr = (HeapWord*)obj; 2221 return addr != NULL && 2222 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2223 } 2224 2225 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2226 // Uses the CMTask associated with a worker thread (for serial reference 2227 // processing the CMTask for worker 0 is used) to preserve (mark) and 2228 // trace referent objects. 2229 // 2230 // Using the CMTask and embedded local queues avoids having the worker 2231 // threads operating on the global mark stack. This reduces the risk 2232 // of overflowing the stack - which we would rather avoid at this late 2233 // state. Also using the tasks' local queues removes the potential 2234 // of the workers interfering with each other that could occur if 2235 // operating on the global stack. 2236 2237 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2238 ConcurrentMark* _cm; 2239 CMTask* _task; 2240 int _ref_counter_limit; 2241 int _ref_counter; 2242 bool _is_serial; 2243 public: 2244 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2245 _cm(cm), _task(task), _is_serial(is_serial), 2246 _ref_counter_limit(G1RefProcDrainInterval) { 2247 assert(_ref_counter_limit > 0, "sanity"); 2248 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2249 _ref_counter = _ref_counter_limit; 2250 } 2251 2252 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2253 virtual void do_oop( oop* p) { do_oop_work(p); } 2254 2255 template <class T> void do_oop_work(T* p) { 2256 if (!_cm->has_overflown()) { 2257 oop obj = oopDesc::load_decode_heap_oop(p); 2258 if (_cm->verbose_high()) { 2259 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2260 "*"PTR_FORMAT" = "PTR_FORMAT, 2261 _task->worker_id(), p2i(p), p2i((void*) obj)); 2262 } 2263 2264 _task->deal_with_reference(obj); 2265 _ref_counter--; 2266 2267 if (_ref_counter == 0) { 2268 // We have dealt with _ref_counter_limit references, pushing them 2269 // and objects reachable from them on to the local stack (and 2270 // possibly the global stack). Call CMTask::do_marking_step() to 2271 // process these entries. 2272 // 2273 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2274 // there's nothing more to do (i.e. we're done with the entries that 2275 // were pushed as a result of the CMTask::deal_with_reference() calls 2276 // above) or we overflow. 2277 // 2278 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2279 // flag while there may still be some work to do. (See the comment at 2280 // the beginning of CMTask::do_marking_step() for those conditions - 2281 // one of which is reaching the specified time target.) It is only 2282 // when CMTask::do_marking_step() returns without setting the 2283 // has_aborted() flag that the marking step has completed. 2284 do { 2285 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2286 _task->do_marking_step(mark_step_duration_ms, 2287 false /* do_termination */, 2288 _is_serial); 2289 } while (_task->has_aborted() && !_cm->has_overflown()); 2290 _ref_counter = _ref_counter_limit; 2291 } 2292 } else { 2293 if (_cm->verbose_high()) { 2294 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2295 } 2296 } 2297 } 2298 }; 2299 2300 // 'Drain' oop closure used by both serial and parallel reference processing. 2301 // Uses the CMTask associated with a given worker thread (for serial 2302 // reference processing the CMtask for worker 0 is used). Calls the 2303 // do_marking_step routine, with an unbelievably large timeout value, 2304 // to drain the marking data structures of the remaining entries 2305 // added by the 'keep alive' oop closure above. 2306 2307 class G1CMDrainMarkingStackClosure: public VoidClosure { 2308 ConcurrentMark* _cm; 2309 CMTask* _task; 2310 bool _is_serial; 2311 public: 2312 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2313 _cm(cm), _task(task), _is_serial(is_serial) { 2314 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2315 } 2316 2317 void do_void() { 2318 do { 2319 if (_cm->verbose_high()) { 2320 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2321 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2322 } 2323 2324 // We call CMTask::do_marking_step() to completely drain the local 2325 // and global marking stacks of entries pushed by the 'keep alive' 2326 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2327 // 2328 // CMTask::do_marking_step() is called in a loop, which we'll exit 2329 // if there's nothing more to do (i.e. we've completely drained the 2330 // entries that were pushed as a a result of applying the 'keep alive' 2331 // closure to the entries on the discovered ref lists) or we overflow 2332 // the global marking stack. 2333 // 2334 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2335 // flag while there may still be some work to do. (See the comment at 2336 // the beginning of CMTask::do_marking_step() for those conditions - 2337 // one of which is reaching the specified time target.) It is only 2338 // when CMTask::do_marking_step() returns without setting the 2339 // has_aborted() flag that the marking step has completed. 2340 2341 _task->do_marking_step(1000000000.0 /* something very large */, 2342 true /* do_termination */, 2343 _is_serial); 2344 } while (_task->has_aborted() && !_cm->has_overflown()); 2345 } 2346 }; 2347 2348 // Implementation of AbstractRefProcTaskExecutor for parallel 2349 // reference processing at the end of G1 concurrent marking 2350 2351 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2352 private: 2353 G1CollectedHeap* _g1h; 2354 ConcurrentMark* _cm; 2355 WorkGang* _workers; 2356 int _active_workers; 2357 2358 public: 2359 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2360 ConcurrentMark* cm, 2361 WorkGang* workers, 2362 int n_workers) : 2363 _g1h(g1h), _cm(cm), 2364 _workers(workers), _active_workers(n_workers) { } 2365 2366 // Executes the given task using concurrent marking worker threads. 2367 virtual void execute(ProcessTask& task); 2368 virtual void execute(EnqueueTask& task); 2369 }; 2370 2371 class G1CMRefProcTaskProxy: public AbstractGangTask { 2372 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2373 ProcessTask& _proc_task; 2374 G1CollectedHeap* _g1h; 2375 ConcurrentMark* _cm; 2376 2377 public: 2378 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2379 G1CollectedHeap* g1h, 2380 ConcurrentMark* cm) : 2381 AbstractGangTask("Process reference objects in parallel"), 2382 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2383 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2384 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2385 } 2386 2387 virtual void work(uint worker_id) { 2388 ResourceMark rm; 2389 HandleMark hm; 2390 CMTask* task = _cm->task(worker_id); 2391 G1CMIsAliveClosure g1_is_alive(_g1h); 2392 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2393 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2394 2395 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2396 } 2397 }; 2398 2399 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2400 assert(_workers != NULL, "Need parallel worker threads."); 2401 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2402 2403 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2404 2405 // We need to reset the concurrency level before each 2406 // proxy task execution, so that the termination protocol 2407 // and overflow handling in CMTask::do_marking_step() knows 2408 // how many workers to wait for. 2409 _cm->set_concurrency(_active_workers); 2410 _g1h->set_par_threads(_active_workers); 2411 _workers->run_task(&proc_task_proxy); 2412 _g1h->set_par_threads(0); 2413 } 2414 2415 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2416 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2417 EnqueueTask& _enq_task; 2418 2419 public: 2420 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2421 AbstractGangTask("Enqueue reference objects in parallel"), 2422 _enq_task(enq_task) { } 2423 2424 virtual void work(uint worker_id) { 2425 _enq_task.work(worker_id); 2426 } 2427 }; 2428 2429 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2430 assert(_workers != NULL, "Need parallel worker threads."); 2431 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2432 2433 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2434 2435 // Not strictly necessary but... 2436 // 2437 // We need to reset the concurrency level before each 2438 // proxy task execution, so that the termination protocol 2439 // and overflow handling in CMTask::do_marking_step() knows 2440 // how many workers to wait for. 2441 _cm->set_concurrency(_active_workers); 2442 _g1h->set_par_threads(_active_workers); 2443 _workers->run_task(&enq_task_proxy); 2444 _g1h->set_par_threads(0); 2445 } 2446 2447 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2448 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2449 } 2450 2451 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2452 if (has_overflown()) { 2453 // Skip processing the discovered references if we have 2454 // overflown the global marking stack. Reference objects 2455 // only get discovered once so it is OK to not 2456 // de-populate the discovered reference lists. We could have, 2457 // but the only benefit would be that, when marking restarts, 2458 // less reference objects are discovered. 2459 return; 2460 } 2461 2462 ResourceMark rm; 2463 HandleMark hm; 2464 2465 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2466 2467 // Is alive closure. 2468 G1CMIsAliveClosure g1_is_alive(g1h); 2469 2470 // Inner scope to exclude the cleaning of the string and symbol 2471 // tables from the displayed time. 2472 { 2473 G1CMTraceTime t("GC ref-proc", G1Log::finer()); 2474 2475 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2476 2477 // See the comment in G1CollectedHeap::ref_processing_init() 2478 // about how reference processing currently works in G1. 2479 2480 // Set the soft reference policy 2481 rp->setup_policy(clear_all_soft_refs); 2482 assert(_markStack.isEmpty(), "mark stack should be empty"); 2483 2484 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2485 // in serial reference processing. Note these closures are also 2486 // used for serially processing (by the the current thread) the 2487 // JNI references during parallel reference processing. 2488 // 2489 // These closures do not need to synchronize with the worker 2490 // threads involved in parallel reference processing as these 2491 // instances are executed serially by the current thread (e.g. 2492 // reference processing is not multi-threaded and is thus 2493 // performed by the current thread instead of a gang worker). 2494 // 2495 // The gang tasks involved in parallel reference processing create 2496 // their own instances of these closures, which do their own 2497 // synchronization among themselves. 2498 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2499 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2500 2501 // We need at least one active thread. If reference processing 2502 // is not multi-threaded we use the current (VMThread) thread, 2503 // otherwise we use the work gang from the G1CollectedHeap and 2504 // we utilize all the worker threads we can. 2505 bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL; 2506 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2507 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2508 2509 // Parallel processing task executor. 2510 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2511 g1h->workers(), active_workers); 2512 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2513 2514 // Set the concurrency level. The phase was already set prior to 2515 // executing the remark task. 2516 set_concurrency(active_workers); 2517 2518 // Set the degree of MT processing here. If the discovery was done MT, 2519 // the number of threads involved during discovery could differ from 2520 // the number of active workers. This is OK as long as the discovered 2521 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2522 rp->set_active_mt_degree(active_workers); 2523 2524 // Process the weak references. 2525 const ReferenceProcessorStats& stats = 2526 rp->process_discovered_references(&g1_is_alive, 2527 &g1_keep_alive, 2528 &g1_drain_mark_stack, 2529 executor, 2530 g1h->gc_timer_cm(), 2531 concurrent_gc_id()); 2532 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2533 2534 // The do_oop work routines of the keep_alive and drain_marking_stack 2535 // oop closures will set the has_overflown flag if we overflow the 2536 // global marking stack. 2537 2538 assert(_markStack.overflow() || _markStack.isEmpty(), 2539 "mark stack should be empty (unless it overflowed)"); 2540 2541 if (_markStack.overflow()) { 2542 // This should have been done already when we tried to push an 2543 // entry on to the global mark stack. But let's do it again. 2544 set_has_overflown(); 2545 } 2546 2547 assert(rp->num_q() == active_workers, "why not"); 2548 2549 rp->enqueue_discovered_references(executor); 2550 2551 rp->verify_no_references_recorded(); 2552 assert(!rp->discovery_enabled(), "Post condition"); 2553 } 2554 2555 if (has_overflown()) { 2556 // We can not trust g1_is_alive if the marking stack overflowed 2557 return; 2558 } 2559 2560 assert(_markStack.isEmpty(), "Marking should have completed"); 2561 2562 // Unload Klasses, String, Symbols, Code Cache, etc. 2563 { 2564 G1CMTraceTime trace("Unloading", G1Log::finer()); 2565 2566 if (ClassUnloadingWithConcurrentMark) { 2567 bool purged_classes; 2568 2569 { 2570 G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest()); 2571 purged_classes = SystemDictionary::do_unloading(&g1_is_alive); 2572 } 2573 2574 { 2575 G1CMTraceTime trace("Parallel Unloading", G1Log::finest()); 2576 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2577 } 2578 } 2579 2580 if (G1StringDedup::is_enabled()) { 2581 G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest()); 2582 G1StringDedup::unlink(&g1_is_alive); 2583 } 2584 } 2585 } 2586 2587 void ConcurrentMark::swapMarkBitMaps() { 2588 CMBitMapRO* temp = _prevMarkBitMap; 2589 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2590 _nextMarkBitMap = (CMBitMap*) temp; 2591 } 2592 2593 class CMObjectClosure; 2594 2595 // Closure for iterating over objects, currently only used for 2596 // processing SATB buffers. 2597 class CMObjectClosure : public ObjectClosure { 2598 private: 2599 CMTask* _task; 2600 2601 public: 2602 void do_object(oop obj) { 2603 _task->deal_with_reference(obj); 2604 } 2605 2606 CMObjectClosure(CMTask* task) : _task(task) { } 2607 }; 2608 2609 class G1RemarkThreadsClosure : public ThreadClosure { 2610 CMObjectClosure _cm_obj; 2611 G1CMOopClosure _cm_cl; 2612 MarkingCodeBlobClosure _code_cl; 2613 int _thread_parity; 2614 bool _is_par; 2615 2616 public: 2617 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) : 2618 _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2619 _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {} 2620 2621 void do_thread(Thread* thread) { 2622 if (thread->is_Java_thread()) { 2623 if (thread->claim_oops_do(_is_par, _thread_parity)) { 2624 JavaThread* jt = (JavaThread*)thread; 2625 2626 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2627 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2628 // * Alive if on the stack of an executing method 2629 // * Weakly reachable otherwise 2630 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2631 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2632 jt->nmethods_do(&_code_cl); 2633 2634 jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj); 2635 } 2636 } else if (thread->is_VM_thread()) { 2637 if (thread->claim_oops_do(_is_par, _thread_parity)) { 2638 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj); 2639 } 2640 } 2641 } 2642 }; 2643 2644 class CMRemarkTask: public AbstractGangTask { 2645 private: 2646 ConcurrentMark* _cm; 2647 bool _is_serial; 2648 public: 2649 void work(uint worker_id) { 2650 // Since all available tasks are actually started, we should 2651 // only proceed if we're supposed to be active. 2652 if (worker_id < _cm->active_tasks()) { 2653 CMTask* task = _cm->task(worker_id); 2654 task->record_start_time(); 2655 { 2656 ResourceMark rm; 2657 HandleMark hm; 2658 2659 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial); 2660 Threads::threads_do(&threads_f); 2661 } 2662 2663 do { 2664 task->do_marking_step(1000000000.0 /* something very large */, 2665 true /* do_termination */, 2666 _is_serial); 2667 } while (task->has_aborted() && !_cm->has_overflown()); 2668 // If we overflow, then we do not want to restart. We instead 2669 // want to abort remark and do concurrent marking again. 2670 task->record_end_time(); 2671 } 2672 } 2673 2674 CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) : 2675 AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) { 2676 _cm->terminator()->reset_for_reuse(active_workers); 2677 } 2678 }; 2679 2680 void ConcurrentMark::checkpointRootsFinalWork() { 2681 ResourceMark rm; 2682 HandleMark hm; 2683 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2684 2685 G1CMTraceTime trace("Finalize Marking", G1Log::finer()); 2686 2687 g1h->ensure_parsability(false); 2688 2689 if (G1CollectedHeap::use_parallel_gc_threads()) { 2690 G1CollectedHeap::StrongRootsScope srs(g1h); 2691 // this is remark, so we'll use up all active threads 2692 uint active_workers = g1h->workers()->active_workers(); 2693 if (active_workers == 0) { 2694 assert(active_workers > 0, "Should have been set earlier"); 2695 active_workers = (uint) ParallelGCThreads; 2696 g1h->workers()->set_active_workers(active_workers); 2697 } 2698 set_concurrency_and_phase(active_workers, false /* concurrent */); 2699 // Leave _parallel_marking_threads at it's 2700 // value originally calculated in the ConcurrentMark 2701 // constructor and pass values of the active workers 2702 // through the gang in the task. 2703 2704 CMRemarkTask remarkTask(this, active_workers, false /* is_serial */); 2705 // We will start all available threads, even if we decide that the 2706 // active_workers will be fewer. The extra ones will just bail out 2707 // immediately. 2708 g1h->set_par_threads(active_workers); 2709 g1h->workers()->run_task(&remarkTask); 2710 g1h->set_par_threads(0); 2711 } else { 2712 G1CollectedHeap::StrongRootsScope srs(g1h); 2713 uint active_workers = 1; 2714 set_concurrency_and_phase(active_workers, false /* concurrent */); 2715 2716 // Note - if there's no work gang then the VMThread will be 2717 // the thread to execute the remark - serially. We have 2718 // to pass true for the is_serial parameter so that 2719 // CMTask::do_marking_step() doesn't enter the sync 2720 // barriers in the event of an overflow. Doing so will 2721 // cause an assert that the current thread is not a 2722 // concurrent GC thread. 2723 CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/); 2724 remarkTask.work(0); 2725 } 2726 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2727 guarantee(has_overflown() || 2728 satb_mq_set.completed_buffers_num() == 0, 2729 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2730 BOOL_TO_STR(has_overflown()), 2731 satb_mq_set.completed_buffers_num())); 2732 2733 print_stats(); 2734 } 2735 2736 #ifndef PRODUCT 2737 2738 class PrintReachableOopClosure: public OopClosure { 2739 private: 2740 G1CollectedHeap* _g1h; 2741 outputStream* _out; 2742 VerifyOption _vo; 2743 bool _all; 2744 2745 public: 2746 PrintReachableOopClosure(outputStream* out, 2747 VerifyOption vo, 2748 bool all) : 2749 _g1h(G1CollectedHeap::heap()), 2750 _out(out), _vo(vo), _all(all) { } 2751 2752 void do_oop(narrowOop* p) { do_oop_work(p); } 2753 void do_oop( oop* p) { do_oop_work(p); } 2754 2755 template <class T> void do_oop_work(T* p) { 2756 oop obj = oopDesc::load_decode_heap_oop(p); 2757 const char* str = NULL; 2758 const char* str2 = ""; 2759 2760 if (obj == NULL) { 2761 str = ""; 2762 } else if (!_g1h->is_in_g1_reserved(obj)) { 2763 str = " O"; 2764 } else { 2765 HeapRegion* hr = _g1h->heap_region_containing(obj); 2766 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); 2767 bool marked = _g1h->is_marked(obj, _vo); 2768 2769 if (over_tams) { 2770 str = " >"; 2771 if (marked) { 2772 str2 = " AND MARKED"; 2773 } 2774 } else if (marked) { 2775 str = " M"; 2776 } else { 2777 str = " NOT"; 2778 } 2779 } 2780 2781 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", 2782 p2i(p), p2i((void*) obj), str, str2); 2783 } 2784 }; 2785 2786 class PrintReachableObjectClosure : public ObjectClosure { 2787 private: 2788 G1CollectedHeap* _g1h; 2789 outputStream* _out; 2790 VerifyOption _vo; 2791 bool _all; 2792 HeapRegion* _hr; 2793 2794 public: 2795 PrintReachableObjectClosure(outputStream* out, 2796 VerifyOption vo, 2797 bool all, 2798 HeapRegion* hr) : 2799 _g1h(G1CollectedHeap::heap()), 2800 _out(out), _vo(vo), _all(all), _hr(hr) { } 2801 2802 void do_object(oop o) { 2803 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo); 2804 bool marked = _g1h->is_marked(o, _vo); 2805 bool print_it = _all || over_tams || marked; 2806 2807 if (print_it) { 2808 _out->print_cr(" "PTR_FORMAT"%s", 2809 p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : ""); 2810 PrintReachableOopClosure oopCl(_out, _vo, _all); 2811 o->oop_iterate_no_header(&oopCl); 2812 } 2813 } 2814 }; 2815 2816 class PrintReachableRegionClosure : public HeapRegionClosure { 2817 private: 2818 G1CollectedHeap* _g1h; 2819 outputStream* _out; 2820 VerifyOption _vo; 2821 bool _all; 2822 2823 public: 2824 bool doHeapRegion(HeapRegion* hr) { 2825 HeapWord* b = hr->bottom(); 2826 HeapWord* e = hr->end(); 2827 HeapWord* t = hr->top(); 2828 HeapWord* p = _g1h->top_at_mark_start(hr, _vo); 2829 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " 2830 "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p)); 2831 _out->cr(); 2832 2833 HeapWord* from = b; 2834 HeapWord* to = t; 2835 2836 if (to > from) { 2837 _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to)); 2838 _out->cr(); 2839 PrintReachableObjectClosure ocl(_out, _vo, _all, hr); 2840 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); 2841 _out->cr(); 2842 } 2843 2844 return false; 2845 } 2846 2847 PrintReachableRegionClosure(outputStream* out, 2848 VerifyOption vo, 2849 bool all) : 2850 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { } 2851 }; 2852 2853 void ConcurrentMark::print_reachable(const char* str, 2854 VerifyOption vo, 2855 bool all) { 2856 gclog_or_tty->cr(); 2857 gclog_or_tty->print_cr("== Doing heap dump... "); 2858 2859 if (G1PrintReachableBaseFile == NULL) { 2860 gclog_or_tty->print_cr(" #### error: no base file defined"); 2861 return; 2862 } 2863 2864 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) > 2865 (JVM_MAXPATHLEN - 1)) { 2866 gclog_or_tty->print_cr(" #### error: file name too long"); 2867 return; 2868 } 2869 2870 char file_name[JVM_MAXPATHLEN]; 2871 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str); 2872 gclog_or_tty->print_cr(" dumping to file %s", file_name); 2873 2874 fileStream fout(file_name); 2875 if (!fout.is_open()) { 2876 gclog_or_tty->print_cr(" #### error: could not open file"); 2877 return; 2878 } 2879 2880 outputStream* out = &fout; 2881 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo)); 2882 out->cr(); 2883 2884 out->print_cr("--- ITERATING OVER REGIONS"); 2885 out->cr(); 2886 PrintReachableRegionClosure rcl(out, vo, all); 2887 _g1h->heap_region_iterate(&rcl); 2888 out->cr(); 2889 2890 gclog_or_tty->print_cr(" done"); 2891 gclog_or_tty->flush(); 2892 } 2893 2894 #endif // PRODUCT 2895 2896 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2897 // Note we are overriding the read-only view of the prev map here, via 2898 // the cast. 2899 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2900 } 2901 2902 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2903 _nextMarkBitMap->clearRange(mr); 2904 } 2905 2906 HeapRegion* 2907 ConcurrentMark::claim_region(uint worker_id) { 2908 // "checkpoint" the finger 2909 HeapWord* finger = _finger; 2910 2911 // _heap_end will not change underneath our feet; it only changes at 2912 // yield points. 2913 while (finger < _heap_end) { 2914 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2915 2916 // Note on how this code handles humongous regions. In the 2917 // normal case the finger will reach the start of a "starts 2918 // humongous" (SH) region. Its end will either be the end of the 2919 // last "continues humongous" (CH) region in the sequence, or the 2920 // standard end of the SH region (if the SH is the only region in 2921 // the sequence). That way claim_region() will skip over the CH 2922 // regions. However, there is a subtle race between a CM thread 2923 // executing this method and a mutator thread doing a humongous 2924 // object allocation. The two are not mutually exclusive as the CM 2925 // thread does not need to hold the Heap_lock when it gets 2926 // here. So there is a chance that claim_region() will come across 2927 // a free region that's in the progress of becoming a SH or a CH 2928 // region. In the former case, it will either 2929 // a) Miss the update to the region's end, in which case it will 2930 // visit every subsequent CH region, will find their bitmaps 2931 // empty, and do nothing, or 2932 // b) Will observe the update of the region's end (in which case 2933 // it will skip the subsequent CH regions). 2934 // If it comes across a region that suddenly becomes CH, the 2935 // scenario will be similar to b). So, the race between 2936 // claim_region() and a humongous object allocation might force us 2937 // to do a bit of unnecessary work (due to some unnecessary bitmap 2938 // iterations) but it should not introduce and correctness issues. 2939 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2940 2941 // Above heap_region_containing_raw may return NULL as we always scan claim 2942 // until the end of the heap. In this case, just jump to the next region. 2943 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2944 2945 // Is the gap between reading the finger and doing the CAS too long? 2946 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2947 if (res == finger && curr_region != NULL) { 2948 // we succeeded 2949 HeapWord* bottom = curr_region->bottom(); 2950 HeapWord* limit = curr_region->next_top_at_mark_start(); 2951 2952 if (verbose_low()) { 2953 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2954 "["PTR_FORMAT", "PTR_FORMAT"), " 2955 "limit = "PTR_FORMAT, 2956 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2957 } 2958 2959 // notice that _finger == end cannot be guaranteed here since, 2960 // someone else might have moved the finger even further 2961 assert(_finger >= end, "the finger should have moved forward"); 2962 2963 if (verbose_low()) { 2964 gclog_or_tty->print_cr("[%u] we were successful with region = " 2965 PTR_FORMAT, worker_id, p2i(curr_region)); 2966 } 2967 2968 if (limit > bottom) { 2969 if (verbose_low()) { 2970 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 2971 "returning it ", worker_id, p2i(curr_region)); 2972 } 2973 return curr_region; 2974 } else { 2975 assert(limit == bottom, 2976 "the region limit should be at bottom"); 2977 if (verbose_low()) { 2978 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 2979 "returning NULL", worker_id, p2i(curr_region)); 2980 } 2981 // we return NULL and the caller should try calling 2982 // claim_region() again. 2983 return NULL; 2984 } 2985 } else { 2986 assert(_finger > finger, "the finger should have moved forward"); 2987 if (verbose_low()) { 2988 if (curr_region == NULL) { 2989 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " 2990 "global finger = "PTR_FORMAT", " 2991 "our finger = "PTR_FORMAT, 2992 worker_id, p2i(_finger), p2i(finger)); 2993 } else { 2994 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2995 "global finger = "PTR_FORMAT", " 2996 "our finger = "PTR_FORMAT, 2997 worker_id, p2i(_finger), p2i(finger)); 2998 } 2999 } 3000 3001 // read it again 3002 finger = _finger; 3003 } 3004 } 3005 3006 return NULL; 3007 } 3008 3009 #ifndef PRODUCT 3010 enum VerifyNoCSetOopsPhase { 3011 VerifyNoCSetOopsStack, 3012 VerifyNoCSetOopsQueues, 3013 VerifyNoCSetOopsSATBCompleted, 3014 VerifyNoCSetOopsSATBThread 3015 }; 3016 3017 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 3018 private: 3019 G1CollectedHeap* _g1h; 3020 VerifyNoCSetOopsPhase _phase; 3021 int _info; 3022 3023 const char* phase_str() { 3024 switch (_phase) { 3025 case VerifyNoCSetOopsStack: return "Stack"; 3026 case VerifyNoCSetOopsQueues: return "Queue"; 3027 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers"; 3028 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers"; 3029 default: ShouldNotReachHere(); 3030 } 3031 return NULL; 3032 } 3033 3034 void do_object_work(oop obj) { 3035 guarantee(!_g1h->obj_in_cs(obj), 3036 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 3037 p2i((void*) obj), phase_str(), _info)); 3038 } 3039 3040 public: 3041 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 3042 3043 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 3044 _phase = phase; 3045 _info = info; 3046 } 3047 3048 virtual void do_oop(oop* p) { 3049 oop obj = oopDesc::load_decode_heap_oop(p); 3050 do_object_work(obj); 3051 } 3052 3053 virtual void do_oop(narrowOop* p) { 3054 // We should not come across narrow oops while scanning marking 3055 // stacks and SATB buffers. 3056 ShouldNotReachHere(); 3057 } 3058 3059 virtual void do_object(oop obj) { 3060 do_object_work(obj); 3061 } 3062 }; 3063 3064 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, 3065 bool verify_enqueued_buffers, 3066 bool verify_thread_buffers, 3067 bool verify_fingers) { 3068 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 3069 if (!G1CollectedHeap::heap()->mark_in_progress()) { 3070 return; 3071 } 3072 3073 VerifyNoCSetOopsClosure cl; 3074 3075 if (verify_stacks) { 3076 // Verify entries on the global mark stack 3077 cl.set_phase(VerifyNoCSetOopsStack); 3078 _markStack.oops_do(&cl); 3079 3080 // Verify entries on the task queues 3081 for (uint i = 0; i < _max_worker_id; i += 1) { 3082 cl.set_phase(VerifyNoCSetOopsQueues, i); 3083 CMTaskQueue* queue = _task_queues->queue(i); 3084 queue->oops_do(&cl); 3085 } 3086 } 3087 3088 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 3089 3090 // Verify entries on the enqueued SATB buffers 3091 if (verify_enqueued_buffers) { 3092 cl.set_phase(VerifyNoCSetOopsSATBCompleted); 3093 satb_qs.iterate_completed_buffers_read_only(&cl); 3094 } 3095 3096 // Verify entries on the per-thread SATB buffers 3097 if (verify_thread_buffers) { 3098 cl.set_phase(VerifyNoCSetOopsSATBThread); 3099 satb_qs.iterate_thread_buffers_read_only(&cl); 3100 } 3101 3102 if (verify_fingers) { 3103 // Verify the global finger 3104 HeapWord* global_finger = finger(); 3105 if (global_finger != NULL && global_finger < _heap_end) { 3106 // The global finger always points to a heap region boundary. We 3107 // use heap_region_containing_raw() to get the containing region 3108 // given that the global finger could be pointing to a free region 3109 // which subsequently becomes continues humongous. If that 3110 // happens, heap_region_containing() will return the bottom of the 3111 // corresponding starts humongous region and the check below will 3112 // not hold any more. 3113 // Since we always iterate over all regions, we might get a NULL HeapRegion 3114 // here. 3115 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 3116 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 3117 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 3118 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 3119 } 3120 3121 // Verify the task fingers 3122 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 3123 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 3124 CMTask* task = _tasks[i]; 3125 HeapWord* task_finger = task->finger(); 3126 if (task_finger != NULL && task_finger < _heap_end) { 3127 // See above note on the global finger verification. 3128 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 3129 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 3130 !task_hr->in_collection_set(), 3131 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 3132 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 3133 } 3134 } 3135 } 3136 } 3137 #endif // PRODUCT 3138 3139 // Aggregate the counting data that was constructed concurrently 3140 // with marking. 3141 class AggregateCountDataHRClosure: public HeapRegionClosure { 3142 G1CollectedHeap* _g1h; 3143 ConcurrentMark* _cm; 3144 CardTableModRefBS* _ct_bs; 3145 BitMap* _cm_card_bm; 3146 uint _max_worker_id; 3147 3148 public: 3149 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 3150 BitMap* cm_card_bm, 3151 uint max_worker_id) : 3152 _g1h(g1h), _cm(g1h->concurrent_mark()), 3153 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 3154 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 3155 3156 bool doHeapRegion(HeapRegion* hr) { 3157 if (hr->is_continues_humongous()) { 3158 // We will ignore these here and process them when their 3159 // associated "starts humongous" region is processed. 3160 // Note that we cannot rely on their associated 3161 // "starts humongous" region to have their bit set to 1 3162 // since, due to the region chunking in the parallel region 3163 // iteration, a "continues humongous" region might be visited 3164 // before its associated "starts humongous". 3165 return false; 3166 } 3167 3168 HeapWord* start = hr->bottom(); 3169 HeapWord* limit = hr->next_top_at_mark_start(); 3170 HeapWord* end = hr->end(); 3171 3172 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 3173 err_msg("Preconditions not met - " 3174 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 3175 "top: "PTR_FORMAT", end: "PTR_FORMAT, 3176 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 3177 3178 assert(hr->next_marked_bytes() == 0, "Precondition"); 3179 3180 if (start == limit) { 3181 // NTAMS of this region has not been set so nothing to do. 3182 return false; 3183 } 3184 3185 // 'start' should be in the heap. 3186 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 3187 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 3188 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 3189 3190 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 3191 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 3192 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 3193 3194 // If ntams is not card aligned then we bump card bitmap index 3195 // for limit so that we get the all the cards spanned by 3196 // the object ending at ntams. 3197 // Note: if this is the last region in the heap then ntams 3198 // could be actually just beyond the end of the the heap; 3199 // limit_idx will then correspond to a (non-existent) card 3200 // that is also outside the heap. 3201 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 3202 limit_idx += 1; 3203 } 3204 3205 assert(limit_idx <= end_idx, "or else use atomics"); 3206 3207 // Aggregate the "stripe" in the count data associated with hr. 3208 uint hrm_index = hr->hrm_index(); 3209 size_t marked_bytes = 0; 3210 3211 for (uint i = 0; i < _max_worker_id; i += 1) { 3212 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 3213 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 3214 3215 // Fetch the marked_bytes in this region for task i and 3216 // add it to the running total for this region. 3217 marked_bytes += marked_bytes_array[hrm_index]; 3218 3219 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 3220 // into the global card bitmap. 3221 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 3222 3223 while (scan_idx < limit_idx) { 3224 assert(task_card_bm->at(scan_idx) == true, "should be"); 3225 _cm_card_bm->set_bit(scan_idx); 3226 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 3227 3228 // BitMap::get_next_one_offset() can handle the case when 3229 // its left_offset parameter is greater than its right_offset 3230 // parameter. It does, however, have an early exit if 3231 // left_offset == right_offset. So let's limit the value 3232 // passed in for left offset here. 3233 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 3234 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 3235 } 3236 } 3237 3238 // Update the marked bytes for this region. 3239 hr->add_to_marked_bytes(marked_bytes); 3240 3241 // Next heap region 3242 return false; 3243 } 3244 }; 3245 3246 class G1AggregateCountDataTask: public AbstractGangTask { 3247 protected: 3248 G1CollectedHeap* _g1h; 3249 ConcurrentMark* _cm; 3250 BitMap* _cm_card_bm; 3251 uint _max_worker_id; 3252 int _active_workers; 3253 HeapRegionClaimer _hrclaimer; 3254 3255 public: 3256 G1AggregateCountDataTask(G1CollectedHeap* g1h, 3257 ConcurrentMark* cm, 3258 BitMap* cm_card_bm, 3259 uint max_worker_id, 3260 int n_workers) : 3261 AbstractGangTask("Count Aggregation"), 3262 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 3263 _max_worker_id(max_worker_id), 3264 _active_workers(n_workers), 3265 _hrclaimer(_active_workers) { 3266 } 3267 3268 void work(uint worker_id) { 3269 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 3270 3271 if (G1CollectedHeap::use_parallel_gc_threads()) { 3272 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 3273 } else { 3274 _g1h->heap_region_iterate(&cl); 3275 } 3276 } 3277 }; 3278 3279 3280 void ConcurrentMark::aggregate_count_data() { 3281 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? 3282 _g1h->workers()->active_workers() : 3283 1); 3284 3285 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 3286 _max_worker_id, n_workers); 3287 3288 if (G1CollectedHeap::use_parallel_gc_threads()) { 3289 _g1h->set_par_threads(n_workers); 3290 _g1h->workers()->run_task(&g1_par_agg_task); 3291 _g1h->set_par_threads(0); 3292 } else { 3293 g1_par_agg_task.work(0); 3294 } 3295 _g1h->allocation_context_stats().update_at_remark(); 3296 } 3297 3298 // Clear the per-worker arrays used to store the per-region counting data 3299 void ConcurrentMark::clear_all_count_data() { 3300 // Clear the global card bitmap - it will be filled during 3301 // liveness count aggregation (during remark) and the 3302 // final counting task. 3303 _card_bm.clear(); 3304 3305 // Clear the global region bitmap - it will be filled as part 3306 // of the final counting task. 3307 _region_bm.clear(); 3308 3309 uint max_regions = _g1h->max_regions(); 3310 assert(_max_worker_id > 0, "uninitialized"); 3311 3312 for (uint i = 0; i < _max_worker_id; i += 1) { 3313 BitMap* task_card_bm = count_card_bitmap_for(i); 3314 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3315 3316 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3317 assert(marked_bytes_array != NULL, "uninitialized"); 3318 3319 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3320 task_card_bm->clear(); 3321 } 3322 } 3323 3324 void ConcurrentMark::print_stats() { 3325 if (verbose_stats()) { 3326 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3327 for (size_t i = 0; i < _active_tasks; ++i) { 3328 _tasks[i]->print_stats(); 3329 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3330 } 3331 } 3332 } 3333 3334 // abandon current marking iteration due to a Full GC 3335 void ConcurrentMark::abort() { 3336 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 3337 // concurrent bitmap clearing. 3338 _nextMarkBitMap->clearAll(); 3339 3340 // Note we cannot clear the previous marking bitmap here 3341 // since VerifyDuringGC verifies the objects marked during 3342 // a full GC against the previous bitmap. 3343 3344 // Clear the liveness counting data 3345 clear_all_count_data(); 3346 // Empty mark stack 3347 reset_marking_state(); 3348 for (uint i = 0; i < _max_worker_id; ++i) { 3349 _tasks[i]->clear_region_fields(); 3350 } 3351 _first_overflow_barrier_sync.abort(); 3352 _second_overflow_barrier_sync.abort(); 3353 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); 3354 if (!gc_id.is_undefined()) { 3355 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance 3356 // to detect that it was aborted. Only keep track of the first GC id that we aborted. 3357 _aborted_gc_id = gc_id; 3358 } 3359 _has_aborted = true; 3360 3361 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3362 satb_mq_set.abandon_partial_marking(); 3363 // This can be called either during or outside marking, we'll read 3364 // the expected_active value from the SATB queue set. 3365 satb_mq_set.set_active_all_threads( 3366 false, /* new active value */ 3367 satb_mq_set.is_active() /* expected_active */); 3368 3369 _g1h->trace_heap_after_concurrent_cycle(); 3370 _g1h->register_concurrent_cycle_end(); 3371 } 3372 3373 const GCId& ConcurrentMark::concurrent_gc_id() { 3374 if (has_aborted()) { 3375 return _aborted_gc_id; 3376 } 3377 return _g1h->gc_tracer_cm()->gc_id(); 3378 } 3379 3380 static void print_ms_time_info(const char* prefix, const char* name, 3381 NumberSeq& ns) { 3382 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3383 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3384 if (ns.num() > 0) { 3385 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3386 prefix, ns.sd(), ns.maximum()); 3387 } 3388 } 3389 3390 void ConcurrentMark::print_summary_info() { 3391 gclog_or_tty->print_cr(" Concurrent marking:"); 3392 print_ms_time_info(" ", "init marks", _init_times); 3393 print_ms_time_info(" ", "remarks", _remark_times); 3394 { 3395 print_ms_time_info(" ", "final marks", _remark_mark_times); 3396 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3397 3398 } 3399 print_ms_time_info(" ", "cleanups", _cleanup_times); 3400 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3401 _total_counting_time, 3402 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3403 (double)_cleanup_times.num() 3404 : 0.0)); 3405 if (G1ScrubRemSets) { 3406 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3407 _total_rs_scrub_time, 3408 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3409 (double)_cleanup_times.num() 3410 : 0.0)); 3411 } 3412 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3413 (_init_times.sum() + _remark_times.sum() + 3414 _cleanup_times.sum())/1000.0); 3415 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3416 "(%8.2f s marking).", 3417 cmThread()->vtime_accum(), 3418 cmThread()->vtime_mark_accum()); 3419 } 3420 3421 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3422 if (use_parallel_marking_threads()) { 3423 _parallel_workers->print_worker_threads_on(st); 3424 } 3425 } 3426 3427 void ConcurrentMark::print_on_error(outputStream* st) const { 3428 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3429 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3430 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3431 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3432 } 3433 3434 // We take a break if someone is trying to stop the world. 3435 bool ConcurrentMark::do_yield_check(uint worker_id) { 3436 if (SuspendibleThreadSet::should_yield()) { 3437 if (worker_id == 0) { 3438 _g1h->g1_policy()->record_concurrent_pause(); 3439 } 3440 SuspendibleThreadSet::yield(); 3441 return true; 3442 } else { 3443 return false; 3444 } 3445 } 3446 3447 #ifndef PRODUCT 3448 // for debugging purposes 3449 void ConcurrentMark::print_finger() { 3450 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3451 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3452 for (uint i = 0; i < _max_worker_id; ++i) { 3453 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3454 } 3455 gclog_or_tty->cr(); 3456 } 3457 #endif 3458 3459 void CMTask::scan_object(oop obj) { 3460 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3461 3462 if (_cm->verbose_high()) { 3463 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT, 3464 _worker_id, p2i((void*) obj)); 3465 } 3466 3467 size_t obj_size = obj->size(); 3468 _words_scanned += obj_size; 3469 3470 obj->oop_iterate(_cm_oop_closure); 3471 statsOnly( ++_objs_scanned ); 3472 check_limits(); 3473 } 3474 3475 // Closure for iteration over bitmaps 3476 class CMBitMapClosure : public BitMapClosure { 3477 private: 3478 // the bitmap that is being iterated over 3479 CMBitMap* _nextMarkBitMap; 3480 ConcurrentMark* _cm; 3481 CMTask* _task; 3482 3483 public: 3484 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3485 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3486 3487 bool do_bit(size_t offset) { 3488 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3489 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3490 assert( addr < _cm->finger(), "invariant"); 3491 3492 statsOnly( _task->increase_objs_found_on_bitmap() ); 3493 assert(addr >= _task->finger(), "invariant"); 3494 3495 // We move that task's local finger along. 3496 _task->move_finger_to(addr); 3497 3498 _task->scan_object(oop(addr)); 3499 // we only partially drain the local queue and global stack 3500 _task->drain_local_queue(true); 3501 _task->drain_global_stack(true); 3502 3503 // if the has_aborted flag has been raised, we need to bail out of 3504 // the iteration 3505 return !_task->has_aborted(); 3506 } 3507 }; 3508 3509 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3510 ConcurrentMark* cm, 3511 CMTask* task) 3512 : _g1h(g1h), _cm(cm), _task(task) { 3513 assert(_ref_processor == NULL, "should be initialized to NULL"); 3514 3515 if (G1UseConcMarkReferenceProcessing) { 3516 _ref_processor = g1h->ref_processor_cm(); 3517 assert(_ref_processor != NULL, "should not be NULL"); 3518 } 3519 } 3520 3521 void CMTask::setup_for_region(HeapRegion* hr) { 3522 assert(hr != NULL, 3523 "claim_region() should have filtered out NULL regions"); 3524 assert(!hr->is_continues_humongous(), 3525 "claim_region() should have filtered out continues humongous regions"); 3526 3527 if (_cm->verbose_low()) { 3528 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3529 _worker_id, p2i(hr)); 3530 } 3531 3532 _curr_region = hr; 3533 _finger = hr->bottom(); 3534 update_region_limit(); 3535 } 3536 3537 void CMTask::update_region_limit() { 3538 HeapRegion* hr = _curr_region; 3539 HeapWord* bottom = hr->bottom(); 3540 HeapWord* limit = hr->next_top_at_mark_start(); 3541 3542 if (limit == bottom) { 3543 if (_cm->verbose_low()) { 3544 gclog_or_tty->print_cr("[%u] found an empty region " 3545 "["PTR_FORMAT", "PTR_FORMAT")", 3546 _worker_id, p2i(bottom), p2i(limit)); 3547 } 3548 // The region was collected underneath our feet. 3549 // We set the finger to bottom to ensure that the bitmap 3550 // iteration that will follow this will not do anything. 3551 // (this is not a condition that holds when we set the region up, 3552 // as the region is not supposed to be empty in the first place) 3553 _finger = bottom; 3554 } else if (limit >= _region_limit) { 3555 assert(limit >= _finger, "peace of mind"); 3556 } else { 3557 assert(limit < _region_limit, "only way to get here"); 3558 // This can happen under some pretty unusual circumstances. An 3559 // evacuation pause empties the region underneath our feet (NTAMS 3560 // at bottom). We then do some allocation in the region (NTAMS 3561 // stays at bottom), followed by the region being used as a GC 3562 // alloc region (NTAMS will move to top() and the objects 3563 // originally below it will be grayed). All objects now marked in 3564 // the region are explicitly grayed, if below the global finger, 3565 // and we do not need in fact to scan anything else. So, we simply 3566 // set _finger to be limit to ensure that the bitmap iteration 3567 // doesn't do anything. 3568 _finger = limit; 3569 } 3570 3571 _region_limit = limit; 3572 } 3573 3574 void CMTask::giveup_current_region() { 3575 assert(_curr_region != NULL, "invariant"); 3576 if (_cm->verbose_low()) { 3577 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3578 _worker_id, p2i(_curr_region)); 3579 } 3580 clear_region_fields(); 3581 } 3582 3583 void CMTask::clear_region_fields() { 3584 // Values for these three fields that indicate that we're not 3585 // holding on to a region. 3586 _curr_region = NULL; 3587 _finger = NULL; 3588 _region_limit = NULL; 3589 } 3590 3591 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3592 if (cm_oop_closure == NULL) { 3593 assert(_cm_oop_closure != NULL, "invariant"); 3594 } else { 3595 assert(_cm_oop_closure == NULL, "invariant"); 3596 } 3597 _cm_oop_closure = cm_oop_closure; 3598 } 3599 3600 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3601 guarantee(nextMarkBitMap != NULL, "invariant"); 3602 3603 if (_cm->verbose_low()) { 3604 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3605 } 3606 3607 _nextMarkBitMap = nextMarkBitMap; 3608 clear_region_fields(); 3609 3610 _calls = 0; 3611 _elapsed_time_ms = 0.0; 3612 _termination_time_ms = 0.0; 3613 _termination_start_time_ms = 0.0; 3614 3615 #if _MARKING_STATS_ 3616 _local_pushes = 0; 3617 _local_pops = 0; 3618 _local_max_size = 0; 3619 _objs_scanned = 0; 3620 _global_pushes = 0; 3621 _global_pops = 0; 3622 _global_max_size = 0; 3623 _global_transfers_to = 0; 3624 _global_transfers_from = 0; 3625 _regions_claimed = 0; 3626 _objs_found_on_bitmap = 0; 3627 _satb_buffers_processed = 0; 3628 _steal_attempts = 0; 3629 _steals = 0; 3630 _aborted = 0; 3631 _aborted_overflow = 0; 3632 _aborted_cm_aborted = 0; 3633 _aborted_yield = 0; 3634 _aborted_timed_out = 0; 3635 _aborted_satb = 0; 3636 _aborted_termination = 0; 3637 #endif // _MARKING_STATS_ 3638 } 3639 3640 bool CMTask::should_exit_termination() { 3641 regular_clock_call(); 3642 // This is called when we are in the termination protocol. We should 3643 // quit if, for some reason, this task wants to abort or the global 3644 // stack is not empty (this means that we can get work from it). 3645 return !_cm->mark_stack_empty() || has_aborted(); 3646 } 3647 3648 void CMTask::reached_limit() { 3649 assert(_words_scanned >= _words_scanned_limit || 3650 _refs_reached >= _refs_reached_limit , 3651 "shouldn't have been called otherwise"); 3652 regular_clock_call(); 3653 } 3654 3655 void CMTask::regular_clock_call() { 3656 if (has_aborted()) return; 3657 3658 // First, we need to recalculate the words scanned and refs reached 3659 // limits for the next clock call. 3660 recalculate_limits(); 3661 3662 // During the regular clock call we do the following 3663 3664 // (1) If an overflow has been flagged, then we abort. 3665 if (_cm->has_overflown()) { 3666 set_has_aborted(); 3667 return; 3668 } 3669 3670 // If we are not concurrent (i.e. we're doing remark) we don't need 3671 // to check anything else. The other steps are only needed during 3672 // the concurrent marking phase. 3673 if (!concurrent()) return; 3674 3675 // (2) If marking has been aborted for Full GC, then we also abort. 3676 if (_cm->has_aborted()) { 3677 set_has_aborted(); 3678 statsOnly( ++_aborted_cm_aborted ); 3679 return; 3680 } 3681 3682 double curr_time_ms = os::elapsedVTime() * 1000.0; 3683 3684 // (3) If marking stats are enabled, then we update the step history. 3685 #if _MARKING_STATS_ 3686 if (_words_scanned >= _words_scanned_limit) { 3687 ++_clock_due_to_scanning; 3688 } 3689 if (_refs_reached >= _refs_reached_limit) { 3690 ++_clock_due_to_marking; 3691 } 3692 3693 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3694 _interval_start_time_ms = curr_time_ms; 3695 _all_clock_intervals_ms.add(last_interval_ms); 3696 3697 if (_cm->verbose_medium()) { 3698 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3699 "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s", 3700 _worker_id, last_interval_ms, 3701 _words_scanned, 3702 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3703 _refs_reached, 3704 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3705 } 3706 #endif // _MARKING_STATS_ 3707 3708 // (4) We check whether we should yield. If we have to, then we abort. 3709 if (SuspendibleThreadSet::should_yield()) { 3710 // We should yield. To do this we abort the task. The caller is 3711 // responsible for yielding. 3712 set_has_aborted(); 3713 statsOnly( ++_aborted_yield ); 3714 return; 3715 } 3716 3717 // (5) We check whether we've reached our time quota. If we have, 3718 // then we abort. 3719 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3720 if (elapsed_time_ms > _time_target_ms) { 3721 set_has_aborted(); 3722 _has_timed_out = true; 3723 statsOnly( ++_aborted_timed_out ); 3724 return; 3725 } 3726 3727 // (6) Finally, we check whether there are enough completed STAB 3728 // buffers available for processing. If there are, we abort. 3729 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3730 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3731 if (_cm->verbose_low()) { 3732 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3733 _worker_id); 3734 } 3735 // we do need to process SATB buffers, we'll abort and restart 3736 // the marking task to do so 3737 set_has_aborted(); 3738 statsOnly( ++_aborted_satb ); 3739 return; 3740 } 3741 } 3742 3743 void CMTask::recalculate_limits() { 3744 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3745 _words_scanned_limit = _real_words_scanned_limit; 3746 3747 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3748 _refs_reached_limit = _real_refs_reached_limit; 3749 } 3750 3751 void CMTask::decrease_limits() { 3752 // This is called when we believe that we're going to do an infrequent 3753 // operation which will increase the per byte scanned cost (i.e. move 3754 // entries to/from the global stack). It basically tries to decrease the 3755 // scanning limit so that the clock is called earlier. 3756 3757 if (_cm->verbose_medium()) { 3758 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3759 } 3760 3761 _words_scanned_limit = _real_words_scanned_limit - 3762 3 * words_scanned_period / 4; 3763 _refs_reached_limit = _real_refs_reached_limit - 3764 3 * refs_reached_period / 4; 3765 } 3766 3767 void CMTask::move_entries_to_global_stack() { 3768 // local array where we'll store the entries that will be popped 3769 // from the local queue 3770 oop buffer[global_stack_transfer_size]; 3771 3772 int n = 0; 3773 oop obj; 3774 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3775 buffer[n] = obj; 3776 ++n; 3777 } 3778 3779 if (n > 0) { 3780 // we popped at least one entry from the local queue 3781 3782 statsOnly( ++_global_transfers_to; _local_pops += n ); 3783 3784 if (!_cm->mark_stack_push(buffer, n)) { 3785 if (_cm->verbose_low()) { 3786 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3787 _worker_id); 3788 } 3789 set_has_aborted(); 3790 } else { 3791 // the transfer was successful 3792 3793 if (_cm->verbose_medium()) { 3794 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3795 _worker_id, n); 3796 } 3797 statsOnly( int tmp_size = _cm->mark_stack_size(); 3798 if (tmp_size > _global_max_size) { 3799 _global_max_size = tmp_size; 3800 } 3801 _global_pushes += n ); 3802 } 3803 } 3804 3805 // this operation was quite expensive, so decrease the limits 3806 decrease_limits(); 3807 } 3808 3809 void CMTask::get_entries_from_global_stack() { 3810 // local array where we'll store the entries that will be popped 3811 // from the global stack. 3812 oop buffer[global_stack_transfer_size]; 3813 int n; 3814 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3815 assert(n <= global_stack_transfer_size, 3816 "we should not pop more than the given limit"); 3817 if (n > 0) { 3818 // yes, we did actually pop at least one entry 3819 3820 statsOnly( ++_global_transfers_from; _global_pops += n ); 3821 if (_cm->verbose_medium()) { 3822 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3823 _worker_id, n); 3824 } 3825 for (int i = 0; i < n; ++i) { 3826 bool success = _task_queue->push(buffer[i]); 3827 // We only call this when the local queue is empty or under a 3828 // given target limit. So, we do not expect this push to fail. 3829 assert(success, "invariant"); 3830 } 3831 3832 statsOnly( int tmp_size = _task_queue->size(); 3833 if (tmp_size > _local_max_size) { 3834 _local_max_size = tmp_size; 3835 } 3836 _local_pushes += n ); 3837 } 3838 3839 // this operation was quite expensive, so decrease the limits 3840 decrease_limits(); 3841 } 3842 3843 void CMTask::drain_local_queue(bool partially) { 3844 if (has_aborted()) return; 3845 3846 // Decide what the target size is, depending whether we're going to 3847 // drain it partially (so that other tasks can steal if they run out 3848 // of things to do) or totally (at the very end). 3849 size_t target_size; 3850 if (partially) { 3851 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3852 } else { 3853 target_size = 0; 3854 } 3855 3856 if (_task_queue->size() > target_size) { 3857 if (_cm->verbose_high()) { 3858 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3859 _worker_id, target_size); 3860 } 3861 3862 oop obj; 3863 bool ret = _task_queue->pop_local(obj); 3864 while (ret) { 3865 statsOnly( ++_local_pops ); 3866 3867 if (_cm->verbose_high()) { 3868 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3869 p2i((void*) obj)); 3870 } 3871 3872 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3873 assert(!_g1h->is_on_master_free_list( 3874 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3875 3876 scan_object(obj); 3877 3878 if (_task_queue->size() <= target_size || has_aborted()) { 3879 ret = false; 3880 } else { 3881 ret = _task_queue->pop_local(obj); 3882 } 3883 } 3884 3885 if (_cm->verbose_high()) { 3886 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3887 _worker_id, _task_queue->size()); 3888 } 3889 } 3890 } 3891 3892 void CMTask::drain_global_stack(bool partially) { 3893 if (has_aborted()) return; 3894 3895 // We have a policy to drain the local queue before we attempt to 3896 // drain the global stack. 3897 assert(partially || _task_queue->size() == 0, "invariant"); 3898 3899 // Decide what the target size is, depending whether we're going to 3900 // drain it partially (so that other tasks can steal if they run out 3901 // of things to do) or totally (at the very end). Notice that, 3902 // because we move entries from the global stack in chunks or 3903 // because another task might be doing the same, we might in fact 3904 // drop below the target. But, this is not a problem. 3905 size_t target_size; 3906 if (partially) { 3907 target_size = _cm->partial_mark_stack_size_target(); 3908 } else { 3909 target_size = 0; 3910 } 3911 3912 if (_cm->mark_stack_size() > target_size) { 3913 if (_cm->verbose_low()) { 3914 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3915 _worker_id, target_size); 3916 } 3917 3918 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3919 get_entries_from_global_stack(); 3920 drain_local_queue(partially); 3921 } 3922 3923 if (_cm->verbose_low()) { 3924 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3925 _worker_id, _cm->mark_stack_size()); 3926 } 3927 } 3928 } 3929 3930 // SATB Queue has several assumptions on whether to call the par or 3931 // non-par versions of the methods. this is why some of the code is 3932 // replicated. We should really get rid of the single-threaded version 3933 // of the code to simplify things. 3934 void CMTask::drain_satb_buffers() { 3935 if (has_aborted()) return; 3936 3937 // We set this so that the regular clock knows that we're in the 3938 // middle of draining buffers and doesn't set the abort flag when it 3939 // notices that SATB buffers are available for draining. It'd be 3940 // very counter productive if it did that. :-) 3941 _draining_satb_buffers = true; 3942 3943 CMObjectClosure oc(this); 3944 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3945 if (G1CollectedHeap::use_parallel_gc_threads()) { 3946 satb_mq_set.set_par_closure(_worker_id, &oc); 3947 } else { 3948 satb_mq_set.set_closure(&oc); 3949 } 3950 3951 // This keeps claiming and applying the closure to completed buffers 3952 // until we run out of buffers or we need to abort. 3953 if (G1CollectedHeap::use_parallel_gc_threads()) { 3954 while (!has_aborted() && 3955 satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) { 3956 if (_cm->verbose_medium()) { 3957 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3958 } 3959 statsOnly( ++_satb_buffers_processed ); 3960 regular_clock_call(); 3961 } 3962 } else { 3963 while (!has_aborted() && 3964 satb_mq_set.apply_closure_to_completed_buffer()) { 3965 if (_cm->verbose_medium()) { 3966 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3967 } 3968 statsOnly( ++_satb_buffers_processed ); 3969 regular_clock_call(); 3970 } 3971 } 3972 3973 _draining_satb_buffers = false; 3974 3975 assert(has_aborted() || 3976 concurrent() || 3977 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3978 3979 if (G1CollectedHeap::use_parallel_gc_threads()) { 3980 satb_mq_set.set_par_closure(_worker_id, NULL); 3981 } else { 3982 satb_mq_set.set_closure(NULL); 3983 } 3984 3985 // again, this was a potentially expensive operation, decrease the 3986 // limits to get the regular clock call early 3987 decrease_limits(); 3988 } 3989 3990 void CMTask::print_stats() { 3991 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3992 _worker_id, _calls); 3993 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3994 _elapsed_time_ms, _termination_time_ms); 3995 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3996 _step_times_ms.num(), _step_times_ms.avg(), 3997 _step_times_ms.sd()); 3998 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3999 _step_times_ms.maximum(), _step_times_ms.sum()); 4000 4001 #if _MARKING_STATS_ 4002 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 4003 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 4004 _all_clock_intervals_ms.sd()); 4005 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 4006 _all_clock_intervals_ms.maximum(), 4007 _all_clock_intervals_ms.sum()); 4008 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", 4009 _clock_due_to_scanning, _clock_due_to_marking); 4010 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", 4011 _objs_scanned, _objs_found_on_bitmap); 4012 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", 4013 _local_pushes, _local_pops, _local_max_size); 4014 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", 4015 _global_pushes, _global_pops, _global_max_size); 4016 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", 4017 _global_transfers_to,_global_transfers_from); 4018 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed); 4019 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); 4020 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", 4021 _steal_attempts, _steals); 4022 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); 4023 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", 4024 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 4025 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", 4026 _aborted_timed_out, _aborted_satb, _aborted_termination); 4027 #endif // _MARKING_STATS_ 4028 } 4029 4030 /***************************************************************************** 4031 4032 The do_marking_step(time_target_ms, ...) method is the building 4033 block of the parallel marking framework. It can be called in parallel 4034 with other invocations of do_marking_step() on different tasks 4035 (but only one per task, obviously) and concurrently with the 4036 mutator threads, or during remark, hence it eliminates the need 4037 for two versions of the code. When called during remark, it will 4038 pick up from where the task left off during the concurrent marking 4039 phase. Interestingly, tasks are also claimable during evacuation 4040 pauses too, since do_marking_step() ensures that it aborts before 4041 it needs to yield. 4042 4043 The data structures that it uses to do marking work are the 4044 following: 4045 4046 (1) Marking Bitmap. If there are gray objects that appear only 4047 on the bitmap (this happens either when dealing with an overflow 4048 or when the initial marking phase has simply marked the roots 4049 and didn't push them on the stack), then tasks claim heap 4050 regions whose bitmap they then scan to find gray objects. A 4051 global finger indicates where the end of the last claimed region 4052 is. A local finger indicates how far into the region a task has 4053 scanned. The two fingers are used to determine how to gray an 4054 object (i.e. whether simply marking it is OK, as it will be 4055 visited by a task in the future, or whether it needs to be also 4056 pushed on a stack). 4057 4058 (2) Local Queue. The local queue of the task which is accessed 4059 reasonably efficiently by the task. Other tasks can steal from 4060 it when they run out of work. Throughout the marking phase, a 4061 task attempts to keep its local queue short but not totally 4062 empty, so that entries are available for stealing by other 4063 tasks. Only when there is no more work, a task will totally 4064 drain its local queue. 4065 4066 (3) Global Mark Stack. This handles local queue overflow. During 4067 marking only sets of entries are moved between it and the local 4068 queues, as access to it requires a mutex and more fine-grain 4069 interaction with it which might cause contention. If it 4070 overflows, then the marking phase should restart and iterate 4071 over the bitmap to identify gray objects. Throughout the marking 4072 phase, tasks attempt to keep the global mark stack at a small 4073 length but not totally empty, so that entries are available for 4074 popping by other tasks. Only when there is no more work, tasks 4075 will totally drain the global mark stack. 4076 4077 (4) SATB Buffer Queue. This is where completed SATB buffers are 4078 made available. Buffers are regularly removed from this queue 4079 and scanned for roots, so that the queue doesn't get too 4080 long. During remark, all completed buffers are processed, as 4081 well as the filled in parts of any uncompleted buffers. 4082 4083 The do_marking_step() method tries to abort when the time target 4084 has been reached. There are a few other cases when the 4085 do_marking_step() method also aborts: 4086 4087 (1) When the marking phase has been aborted (after a Full GC). 4088 4089 (2) When a global overflow (on the global stack) has been 4090 triggered. Before the task aborts, it will actually sync up with 4091 the other tasks to ensure that all the marking data structures 4092 (local queues, stacks, fingers etc.) are re-initialized so that 4093 when do_marking_step() completes, the marking phase can 4094 immediately restart. 4095 4096 (3) When enough completed SATB buffers are available. The 4097 do_marking_step() method only tries to drain SATB buffers right 4098 at the beginning. So, if enough buffers are available, the 4099 marking step aborts and the SATB buffers are processed at 4100 the beginning of the next invocation. 4101 4102 (4) To yield. when we have to yield then we abort and yield 4103 right at the end of do_marking_step(). This saves us from a lot 4104 of hassle as, by yielding we might allow a Full GC. If this 4105 happens then objects will be compacted underneath our feet, the 4106 heap might shrink, etc. We save checking for this by just 4107 aborting and doing the yield right at the end. 4108 4109 From the above it follows that the do_marking_step() method should 4110 be called in a loop (or, otherwise, regularly) until it completes. 4111 4112 If a marking step completes without its has_aborted() flag being 4113 true, it means it has completed the current marking phase (and 4114 also all other marking tasks have done so and have all synced up). 4115 4116 A method called regular_clock_call() is invoked "regularly" (in 4117 sub ms intervals) throughout marking. It is this clock method that 4118 checks all the abort conditions which were mentioned above and 4119 decides when the task should abort. A work-based scheme is used to 4120 trigger this clock method: when the number of object words the 4121 marking phase has scanned or the number of references the marking 4122 phase has visited reach a given limit. Additional invocations to 4123 the method clock have been planted in a few other strategic places 4124 too. The initial reason for the clock method was to avoid calling 4125 vtime too regularly, as it is quite expensive. So, once it was in 4126 place, it was natural to piggy-back all the other conditions on it 4127 too and not constantly check them throughout the code. 4128 4129 If do_termination is true then do_marking_step will enter its 4130 termination protocol. 4131 4132 The value of is_serial must be true when do_marking_step is being 4133 called serially (i.e. by the VMThread) and do_marking_step should 4134 skip any synchronization in the termination and overflow code. 4135 Examples include the serial remark code and the serial reference 4136 processing closures. 4137 4138 The value of is_serial must be false when do_marking_step is 4139 being called by any of the worker threads in a work gang. 4140 Examples include the concurrent marking code (CMMarkingTask), 4141 the MT remark code, and the MT reference processing closures. 4142 4143 *****************************************************************************/ 4144 4145 void CMTask::do_marking_step(double time_target_ms, 4146 bool do_termination, 4147 bool is_serial) { 4148 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 4149 assert(concurrent() == _cm->concurrent(), "they should be the same"); 4150 4151 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 4152 assert(_task_queues != NULL, "invariant"); 4153 assert(_task_queue != NULL, "invariant"); 4154 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 4155 4156 assert(!_claimed, 4157 "only one thread should claim this task at any one time"); 4158 4159 // OK, this doesn't safeguard again all possible scenarios, as it is 4160 // possible for two threads to set the _claimed flag at the same 4161 // time. But it is only for debugging purposes anyway and it will 4162 // catch most problems. 4163 _claimed = true; 4164 4165 _start_time_ms = os::elapsedVTime() * 1000.0; 4166 statsOnly( _interval_start_time_ms = _start_time_ms ); 4167 4168 // If do_stealing is true then do_marking_step will attempt to 4169 // steal work from the other CMTasks. It only makes sense to 4170 // enable stealing when the termination protocol is enabled 4171 // and do_marking_step() is not being called serially. 4172 bool do_stealing = do_termination && !is_serial; 4173 4174 double diff_prediction_ms = 4175 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 4176 _time_target_ms = time_target_ms - diff_prediction_ms; 4177 4178 // set up the variables that are used in the work-based scheme to 4179 // call the regular clock method 4180 _words_scanned = 0; 4181 _refs_reached = 0; 4182 recalculate_limits(); 4183 4184 // clear all flags 4185 clear_has_aborted(); 4186 _has_timed_out = false; 4187 _draining_satb_buffers = false; 4188 4189 ++_calls; 4190 4191 if (_cm->verbose_low()) { 4192 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 4193 "target = %1.2lfms >>>>>>>>>>", 4194 _worker_id, _calls, _time_target_ms); 4195 } 4196 4197 // Set up the bitmap and oop closures. Anything that uses them is 4198 // eventually called from this method, so it is OK to allocate these 4199 // statically. 4200 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 4201 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 4202 set_cm_oop_closure(&cm_oop_closure); 4203 4204 if (_cm->has_overflown()) { 4205 // This can happen if the mark stack overflows during a GC pause 4206 // and this task, after a yield point, restarts. We have to abort 4207 // as we need to get into the overflow protocol which happens 4208 // right at the end of this task. 4209 set_has_aborted(); 4210 } 4211 4212 // First drain any available SATB buffers. After this, we will not 4213 // look at SATB buffers before the next invocation of this method. 4214 // If enough completed SATB buffers are queued up, the regular clock 4215 // will abort this task so that it restarts. 4216 drain_satb_buffers(); 4217 // ...then partially drain the local queue and the global stack 4218 drain_local_queue(true); 4219 drain_global_stack(true); 4220 4221 do { 4222 if (!has_aborted() && _curr_region != NULL) { 4223 // This means that we're already holding on to a region. 4224 assert(_finger != NULL, "if region is not NULL, then the finger " 4225 "should not be NULL either"); 4226 4227 // We might have restarted this task after an evacuation pause 4228 // which might have evacuated the region we're holding on to 4229 // underneath our feet. Let's read its limit again to make sure 4230 // that we do not iterate over a region of the heap that 4231 // contains garbage (update_region_limit() will also move 4232 // _finger to the start of the region if it is found empty). 4233 update_region_limit(); 4234 // We will start from _finger not from the start of the region, 4235 // as we might be restarting this task after aborting half-way 4236 // through scanning this region. In this case, _finger points to 4237 // the address where we last found a marked object. If this is a 4238 // fresh region, _finger points to start(). 4239 MemRegion mr = MemRegion(_finger, _region_limit); 4240 4241 if (_cm->verbose_low()) { 4242 gclog_or_tty->print_cr("[%u] we're scanning part " 4243 "["PTR_FORMAT", "PTR_FORMAT") " 4244 "of region "HR_FORMAT, 4245 _worker_id, p2i(_finger), p2i(_region_limit), 4246 HR_FORMAT_PARAMS(_curr_region)); 4247 } 4248 4249 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 4250 "humongous regions should go around loop once only"); 4251 4252 // Some special cases: 4253 // If the memory region is empty, we can just give up the region. 4254 // If the current region is humongous then we only need to check 4255 // the bitmap for the bit associated with the start of the object, 4256 // scan the object if it's live, and give up the region. 4257 // Otherwise, let's iterate over the bitmap of the part of the region 4258 // that is left. 4259 // If the iteration is successful, give up the region. 4260 if (mr.is_empty()) { 4261 giveup_current_region(); 4262 regular_clock_call(); 4263 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 4264 if (_nextMarkBitMap->isMarked(mr.start())) { 4265 // The object is marked - apply the closure 4266 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 4267 bitmap_closure.do_bit(offset); 4268 } 4269 // Even if this task aborted while scanning the humongous object 4270 // we can (and should) give up the current region. 4271 giveup_current_region(); 4272 regular_clock_call(); 4273 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4274 giveup_current_region(); 4275 regular_clock_call(); 4276 } else { 4277 assert(has_aborted(), "currently the only way to do so"); 4278 // The only way to abort the bitmap iteration is to return 4279 // false from the do_bit() method. However, inside the 4280 // do_bit() method we move the _finger to point to the 4281 // object currently being looked at. So, if we bail out, we 4282 // have definitely set _finger to something non-null. 4283 assert(_finger != NULL, "invariant"); 4284 4285 // Region iteration was actually aborted. So now _finger 4286 // points to the address of the object we last scanned. If we 4287 // leave it there, when we restart this task, we will rescan 4288 // the object. It is easy to avoid this. We move the finger by 4289 // enough to point to the next possible object header (the 4290 // bitmap knows by how much we need to move it as it knows its 4291 // granularity). 4292 assert(_finger < _region_limit, "invariant"); 4293 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 4294 // Check if bitmap iteration was aborted while scanning the last object 4295 if (new_finger >= _region_limit) { 4296 giveup_current_region(); 4297 } else { 4298 move_finger_to(new_finger); 4299 } 4300 } 4301 } 4302 // At this point we have either completed iterating over the 4303 // region we were holding on to, or we have aborted. 4304 4305 // We then partially drain the local queue and the global stack. 4306 // (Do we really need this?) 4307 drain_local_queue(true); 4308 drain_global_stack(true); 4309 4310 // Read the note on the claim_region() method on why it might 4311 // return NULL with potentially more regions available for 4312 // claiming and why we have to check out_of_regions() to determine 4313 // whether we're done or not. 4314 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4315 // We are going to try to claim a new region. We should have 4316 // given up on the previous one. 4317 // Separated the asserts so that we know which one fires. 4318 assert(_curr_region == NULL, "invariant"); 4319 assert(_finger == NULL, "invariant"); 4320 assert(_region_limit == NULL, "invariant"); 4321 if (_cm->verbose_low()) { 4322 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4323 } 4324 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4325 if (claimed_region != NULL) { 4326 // Yes, we managed to claim one 4327 statsOnly( ++_regions_claimed ); 4328 4329 if (_cm->verbose_low()) { 4330 gclog_or_tty->print_cr("[%u] we successfully claimed " 4331 "region "PTR_FORMAT, 4332 _worker_id, p2i(claimed_region)); 4333 } 4334 4335 setup_for_region(claimed_region); 4336 assert(_curr_region == claimed_region, "invariant"); 4337 } 4338 // It is important to call the regular clock here. It might take 4339 // a while to claim a region if, for example, we hit a large 4340 // block of empty regions. So we need to call the regular clock 4341 // method once round the loop to make sure it's called 4342 // frequently enough. 4343 regular_clock_call(); 4344 } 4345 4346 if (!has_aborted() && _curr_region == NULL) { 4347 assert(_cm->out_of_regions(), 4348 "at this point we should be out of regions"); 4349 } 4350 } while ( _curr_region != NULL && !has_aborted()); 4351 4352 if (!has_aborted()) { 4353 // We cannot check whether the global stack is empty, since other 4354 // tasks might be pushing objects to it concurrently. 4355 assert(_cm->out_of_regions(), 4356 "at this point we should be out of regions"); 4357 4358 if (_cm->verbose_low()) { 4359 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4360 } 4361 4362 // Try to reduce the number of available SATB buffers so that 4363 // remark has less work to do. 4364 drain_satb_buffers(); 4365 } 4366 4367 // Since we've done everything else, we can now totally drain the 4368 // local queue and global stack. 4369 drain_local_queue(false); 4370 drain_global_stack(false); 4371 4372 // Attempt at work stealing from other task's queues. 4373 if (do_stealing && !has_aborted()) { 4374 // We have not aborted. This means that we have finished all that 4375 // we could. Let's try to do some stealing... 4376 4377 // We cannot check whether the global stack is empty, since other 4378 // tasks might be pushing objects to it concurrently. 4379 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4380 "only way to reach here"); 4381 4382 if (_cm->verbose_low()) { 4383 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4384 } 4385 4386 while (!has_aborted()) { 4387 oop obj; 4388 statsOnly( ++_steal_attempts ); 4389 4390 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4391 if (_cm->verbose_medium()) { 4392 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4393 _worker_id, p2i((void*) obj)); 4394 } 4395 4396 statsOnly( ++_steals ); 4397 4398 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4399 "any stolen object should be marked"); 4400 scan_object(obj); 4401 4402 // And since we're towards the end, let's totally drain the 4403 // local queue and global stack. 4404 drain_local_queue(false); 4405 drain_global_stack(false); 4406 } else { 4407 break; 4408 } 4409 } 4410 } 4411 4412 // If we are about to wrap up and go into termination, check if we 4413 // should raise the overflow flag. 4414 if (do_termination && !has_aborted()) { 4415 if (_cm->force_overflow()->should_force()) { 4416 _cm->set_has_overflown(); 4417 regular_clock_call(); 4418 } 4419 } 4420 4421 // We still haven't aborted. Now, let's try to get into the 4422 // termination protocol. 4423 if (do_termination && !has_aborted()) { 4424 // We cannot check whether the global stack is empty, since other 4425 // tasks might be concurrently pushing objects on it. 4426 // Separated the asserts so that we know which one fires. 4427 assert(_cm->out_of_regions(), "only way to reach here"); 4428 assert(_task_queue->size() == 0, "only way to reach here"); 4429 4430 if (_cm->verbose_low()) { 4431 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4432 } 4433 4434 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4435 4436 // The CMTask class also extends the TerminatorTerminator class, 4437 // hence its should_exit_termination() method will also decide 4438 // whether to exit the termination protocol or not. 4439 bool finished = (is_serial || 4440 _cm->terminator()->offer_termination(this)); 4441 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4442 _termination_time_ms += 4443 termination_end_time_ms - _termination_start_time_ms; 4444 4445 if (finished) { 4446 // We're all done. 4447 4448 if (_worker_id == 0) { 4449 // let's allow task 0 to do this 4450 if (concurrent()) { 4451 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4452 // we need to set this to false before the next 4453 // safepoint. This way we ensure that the marking phase 4454 // doesn't observe any more heap expansions. 4455 _cm->clear_concurrent_marking_in_progress(); 4456 } 4457 } 4458 4459 // We can now guarantee that the global stack is empty, since 4460 // all other tasks have finished. We separated the guarantees so 4461 // that, if a condition is false, we can immediately find out 4462 // which one. 4463 guarantee(_cm->out_of_regions(), "only way to reach here"); 4464 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4465 guarantee(_task_queue->size() == 0, "only way to reach here"); 4466 guarantee(!_cm->has_overflown(), "only way to reach here"); 4467 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4468 4469 if (_cm->verbose_low()) { 4470 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4471 } 4472 } else { 4473 // Apparently there's more work to do. Let's abort this task. It 4474 // will restart it and we can hopefully find more things to do. 4475 4476 if (_cm->verbose_low()) { 4477 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4478 _worker_id); 4479 } 4480 4481 set_has_aborted(); 4482 statsOnly( ++_aborted_termination ); 4483 } 4484 } 4485 4486 // Mainly for debugging purposes to make sure that a pointer to the 4487 // closure which was statically allocated in this frame doesn't 4488 // escape it by accident. 4489 set_cm_oop_closure(NULL); 4490 double end_time_ms = os::elapsedVTime() * 1000.0; 4491 double elapsed_time_ms = end_time_ms - _start_time_ms; 4492 // Update the step history. 4493 _step_times_ms.add(elapsed_time_ms); 4494 4495 if (has_aborted()) { 4496 // The task was aborted for some reason. 4497 4498 statsOnly( ++_aborted ); 4499 4500 if (_has_timed_out) { 4501 double diff_ms = elapsed_time_ms - _time_target_ms; 4502 // Keep statistics of how well we did with respect to hitting 4503 // our target only if we actually timed out (if we aborted for 4504 // other reasons, then the results might get skewed). 4505 _marking_step_diffs_ms.add(diff_ms); 4506 } 4507 4508 if (_cm->has_overflown()) { 4509 // This is the interesting one. We aborted because a global 4510 // overflow was raised. This means we have to restart the 4511 // marking phase and start iterating over regions. However, in 4512 // order to do this we have to make sure that all tasks stop 4513 // what they are doing and re-initialize in a safe manner. We 4514 // will achieve this with the use of two barrier sync points. 4515 4516 if (_cm->verbose_low()) { 4517 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4518 } 4519 4520 if (!is_serial) { 4521 // We only need to enter the sync barrier if being called 4522 // from a parallel context 4523 _cm->enter_first_sync_barrier(_worker_id); 4524 4525 // When we exit this sync barrier we know that all tasks have 4526 // stopped doing marking work. So, it's now safe to 4527 // re-initialize our data structures. At the end of this method, 4528 // task 0 will clear the global data structures. 4529 } 4530 4531 statsOnly( ++_aborted_overflow ); 4532 4533 // We clear the local state of this task... 4534 clear_region_fields(); 4535 4536 if (!is_serial) { 4537 // ...and enter the second barrier. 4538 _cm->enter_second_sync_barrier(_worker_id); 4539 } 4540 // At this point, if we're during the concurrent phase of 4541 // marking, everything has been re-initialized and we're 4542 // ready to restart. 4543 } 4544 4545 if (_cm->verbose_low()) { 4546 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4547 "elapsed = %1.2lfms <<<<<<<<<<", 4548 _worker_id, _time_target_ms, elapsed_time_ms); 4549 if (_cm->has_aborted()) { 4550 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4551 _worker_id); 4552 } 4553 } 4554 } else { 4555 if (_cm->verbose_low()) { 4556 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4557 "elapsed = %1.2lfms <<<<<<<<<<", 4558 _worker_id, _time_target_ms, elapsed_time_ms); 4559 } 4560 } 4561 4562 _claimed = false; 4563 } 4564 4565 CMTask::CMTask(uint worker_id, 4566 ConcurrentMark* cm, 4567 size_t* marked_bytes, 4568 BitMap* card_bm, 4569 CMTaskQueue* task_queue, 4570 CMTaskQueueSet* task_queues) 4571 : _g1h(G1CollectedHeap::heap()), 4572 _worker_id(worker_id), _cm(cm), 4573 _claimed(false), 4574 _nextMarkBitMap(NULL), _hash_seed(17), 4575 _task_queue(task_queue), 4576 _task_queues(task_queues), 4577 _cm_oop_closure(NULL), 4578 _marked_bytes_array(marked_bytes), 4579 _card_bm(card_bm) { 4580 guarantee(task_queue != NULL, "invariant"); 4581 guarantee(task_queues != NULL, "invariant"); 4582 4583 statsOnly( _clock_due_to_scanning = 0; 4584 _clock_due_to_marking = 0 ); 4585 4586 _marking_step_diffs_ms.add(0.5); 4587 } 4588 4589 // These are formatting macros that are used below to ensure 4590 // consistent formatting. The *_H_* versions are used to format the 4591 // header for a particular value and they should be kept consistent 4592 // with the corresponding macro. Also note that most of the macros add 4593 // the necessary white space (as a prefix) which makes them a bit 4594 // easier to compose. 4595 4596 // All the output lines are prefixed with this string to be able to 4597 // identify them easily in a large log file. 4598 #define G1PPRL_LINE_PREFIX "###" 4599 4600 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4601 #ifdef _LP64 4602 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4603 #else // _LP64 4604 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4605 #endif // _LP64 4606 4607 // For per-region info 4608 #define G1PPRL_TYPE_FORMAT " %-4s" 4609 #define G1PPRL_TYPE_H_FORMAT " %4s" 4610 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4611 #define G1PPRL_BYTE_H_FORMAT " %9s" 4612 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4613 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4614 4615 // For summary info 4616 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4617 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4618 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4619 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4620 4621 G1PrintRegionLivenessInfoClosure:: 4622 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4623 : _out(out), 4624 _total_used_bytes(0), _total_capacity_bytes(0), 4625 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4626 _hum_used_bytes(0), _hum_capacity_bytes(0), 4627 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4628 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4629 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4630 MemRegion g1_reserved = g1h->g1_reserved(); 4631 double now = os::elapsedTime(); 4632 4633 // Print the header of the output. 4634 _out->cr(); 4635 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4636 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4637 G1PPRL_SUM_ADDR_FORMAT("reserved") 4638 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4639 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4640 HeapRegion::GrainBytes); 4641 _out->print_cr(G1PPRL_LINE_PREFIX); 4642 _out->print_cr(G1PPRL_LINE_PREFIX 4643 G1PPRL_TYPE_H_FORMAT 4644 G1PPRL_ADDR_BASE_H_FORMAT 4645 G1PPRL_BYTE_H_FORMAT 4646 G1PPRL_BYTE_H_FORMAT 4647 G1PPRL_BYTE_H_FORMAT 4648 G1PPRL_DOUBLE_H_FORMAT 4649 G1PPRL_BYTE_H_FORMAT 4650 G1PPRL_BYTE_H_FORMAT, 4651 "type", "address-range", 4652 "used", "prev-live", "next-live", "gc-eff", 4653 "remset", "code-roots"); 4654 _out->print_cr(G1PPRL_LINE_PREFIX 4655 G1PPRL_TYPE_H_FORMAT 4656 G1PPRL_ADDR_BASE_H_FORMAT 4657 G1PPRL_BYTE_H_FORMAT 4658 G1PPRL_BYTE_H_FORMAT 4659 G1PPRL_BYTE_H_FORMAT 4660 G1PPRL_DOUBLE_H_FORMAT 4661 G1PPRL_BYTE_H_FORMAT 4662 G1PPRL_BYTE_H_FORMAT, 4663 "", "", 4664 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4665 "(bytes)", "(bytes)"); 4666 } 4667 4668 // It takes as a parameter a reference to one of the _hum_* fields, it 4669 // deduces the corresponding value for a region in a humongous region 4670 // series (either the region size, or what's left if the _hum_* field 4671 // is < the region size), and updates the _hum_* field accordingly. 4672 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4673 size_t bytes = 0; 4674 // The > 0 check is to deal with the prev and next live bytes which 4675 // could be 0. 4676 if (*hum_bytes > 0) { 4677 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4678 *hum_bytes -= bytes; 4679 } 4680 return bytes; 4681 } 4682 4683 // It deduces the values for a region in a humongous region series 4684 // from the _hum_* fields and updates those accordingly. It assumes 4685 // that that _hum_* fields have already been set up from the "starts 4686 // humongous" region and we visit the regions in address order. 4687 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4688 size_t* capacity_bytes, 4689 size_t* prev_live_bytes, 4690 size_t* next_live_bytes) { 4691 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4692 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4693 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4694 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4695 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4696 } 4697 4698 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4699 const char* type = r->get_type_str(); 4700 HeapWord* bottom = r->bottom(); 4701 HeapWord* end = r->end(); 4702 size_t capacity_bytes = r->capacity(); 4703 size_t used_bytes = r->used(); 4704 size_t prev_live_bytes = r->live_bytes(); 4705 size_t next_live_bytes = r->next_live_bytes(); 4706 double gc_eff = r->gc_efficiency(); 4707 size_t remset_bytes = r->rem_set()->mem_size(); 4708 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4709 4710 if (r->is_starts_humongous()) { 4711 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4712 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4713 "they should have been zeroed after the last time we used them"); 4714 // Set up the _hum_* fields. 4715 _hum_capacity_bytes = capacity_bytes; 4716 _hum_used_bytes = used_bytes; 4717 _hum_prev_live_bytes = prev_live_bytes; 4718 _hum_next_live_bytes = next_live_bytes; 4719 get_hum_bytes(&used_bytes, &capacity_bytes, 4720 &prev_live_bytes, &next_live_bytes); 4721 end = bottom + HeapRegion::GrainWords; 4722 } else if (r->is_continues_humongous()) { 4723 get_hum_bytes(&used_bytes, &capacity_bytes, 4724 &prev_live_bytes, &next_live_bytes); 4725 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4726 } 4727 4728 _total_used_bytes += used_bytes; 4729 _total_capacity_bytes += capacity_bytes; 4730 _total_prev_live_bytes += prev_live_bytes; 4731 _total_next_live_bytes += next_live_bytes; 4732 _total_remset_bytes += remset_bytes; 4733 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4734 4735 // Print a line for this particular region. 4736 _out->print_cr(G1PPRL_LINE_PREFIX 4737 G1PPRL_TYPE_FORMAT 4738 G1PPRL_ADDR_BASE_FORMAT 4739 G1PPRL_BYTE_FORMAT 4740 G1PPRL_BYTE_FORMAT 4741 G1PPRL_BYTE_FORMAT 4742 G1PPRL_DOUBLE_FORMAT 4743 G1PPRL_BYTE_FORMAT 4744 G1PPRL_BYTE_FORMAT, 4745 type, p2i(bottom), p2i(end), 4746 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4747 remset_bytes, strong_code_roots_bytes); 4748 4749 return false; 4750 } 4751 4752 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4753 // add static memory usages to remembered set sizes 4754 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4755 // Print the footer of the output. 4756 _out->print_cr(G1PPRL_LINE_PREFIX); 4757 _out->print_cr(G1PPRL_LINE_PREFIX 4758 " SUMMARY" 4759 G1PPRL_SUM_MB_FORMAT("capacity") 4760 G1PPRL_SUM_MB_PERC_FORMAT("used") 4761 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4762 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4763 G1PPRL_SUM_MB_FORMAT("remset") 4764 G1PPRL_SUM_MB_FORMAT("code-roots"), 4765 bytes_to_mb(_total_capacity_bytes), 4766 bytes_to_mb(_total_used_bytes), 4767 perc(_total_used_bytes, _total_capacity_bytes), 4768 bytes_to_mb(_total_prev_live_bytes), 4769 perc(_total_prev_live_bytes, _total_capacity_bytes), 4770 bytes_to_mb(_total_next_live_bytes), 4771 perc(_total_next_live_bytes, _total_capacity_bytes), 4772 bytes_to_mb(_total_remset_bytes), 4773 bytes_to_mb(_total_strong_code_roots_bytes)); 4774 _out->cr(); 4775 }