1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/symbolTable.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc_implementation/g1/concurrentMark.inline.hpp" 29 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 32 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 33 #include "gc_implementation/g1/g1Log.hpp" 34 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 35 #include "gc_implementation/g1/g1RemSet.hpp" 36 #include "gc_implementation/g1/heapRegion.inline.hpp" 37 #include "gc_implementation/g1/heapRegionManager.inline.hpp" 38 #include "gc_implementation/g1/heapRegionRemSet.hpp" 39 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 40 #include "gc_implementation/shared/vmGCOperations.hpp" 41 #include "gc_implementation/shared/gcTimer.hpp" 42 #include "gc_implementation/shared/gcTrace.hpp" 43 #include "gc_implementation/shared/gcTraceTime.hpp" 44 #include "memory/allocation.hpp" 45 #include "memory/genOopClosures.inline.hpp" 46 #include "memory/referencePolicy.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "oops/oop.inline.hpp" 49 #include "runtime/handles.inline.hpp" 50 #include "runtime/java.hpp" 51 #include "runtime/atomic.inline.hpp" 52 #include "runtime/prefetch.inline.hpp" 53 #include "services/memTracker.hpp" 54 55 // Concurrent marking bit map wrapper 56 57 CMBitMapRO::CMBitMapRO(int shifter) : 58 _bm(), 59 _shifter(shifter) { 60 _bmStartWord = 0; 61 _bmWordSize = 0; 62 } 63 64 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 65 const HeapWord* limit) const { 66 // First we must round addr *up* to a possible object boundary. 67 addr = (HeapWord*)align_size_up((intptr_t)addr, 68 HeapWordSize << _shifter); 69 size_t addrOffset = heapWordToOffset(addr); 70 if (limit == NULL) { 71 limit = _bmStartWord + _bmWordSize; 72 } 73 size_t limitOffset = heapWordToOffset(limit); 74 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 75 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 76 assert(nextAddr >= addr, "get_next_one postcondition"); 77 assert(nextAddr == limit || isMarked(nextAddr), 78 "get_next_one postcondition"); 79 return nextAddr; 80 } 81 82 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, 83 const HeapWord* limit) const { 84 size_t addrOffset = heapWordToOffset(addr); 85 if (limit == NULL) { 86 limit = _bmStartWord + _bmWordSize; 87 } 88 size_t limitOffset = heapWordToOffset(limit); 89 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 90 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 91 assert(nextAddr >= addr, "get_next_one postcondition"); 92 assert(nextAddr == limit || !isMarked(nextAddr), 93 "get_next_one postcondition"); 94 return nextAddr; 95 } 96 97 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 98 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 99 return (int) (diff >> _shifter); 100 } 101 102 #ifndef PRODUCT 103 bool CMBitMapRO::covers(MemRegion heap_rs) const { 104 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 105 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 106 "size inconsistency"); 107 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 108 _bmWordSize == heap_rs.word_size(); 109 } 110 #endif 111 112 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 113 _bm.print_on_error(st, prefix); 114 } 115 116 size_t CMBitMap::compute_size(size_t heap_size) { 117 return heap_size / mark_distance(); 118 } 119 120 size_t CMBitMap::mark_distance() { 121 return MinObjAlignmentInBytes * BitsPerByte; 122 } 123 124 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 125 _bmStartWord = heap.start(); 126 _bmWordSize = heap.word_size(); 127 128 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 129 _bm.set_size(_bmWordSize >> _shifter); 130 131 storage->set_mapping_changed_listener(&_listener); 132 } 133 134 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions) { 135 // We need to clear the bitmap on commit, removing any existing information. 136 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 137 _bm->clearRange(mr); 138 } 139 140 // Closure used for clearing the given mark bitmap. 141 class ClearBitmapHRClosure : public HeapRegionClosure { 142 private: 143 ConcurrentMark* _cm; 144 CMBitMap* _bitmap; 145 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 146 public: 147 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 148 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 149 } 150 151 virtual bool doHeapRegion(HeapRegion* r) { 152 size_t const chunk_size_in_words = M / HeapWordSize; 153 154 HeapWord* cur = r->bottom(); 155 HeapWord* const end = r->end(); 156 157 while (cur < end) { 158 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 159 _bitmap->clearRange(mr); 160 161 cur += chunk_size_in_words; 162 163 // Abort iteration if after yielding the marking has been aborted. 164 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 165 return true; 166 } 167 // Repeat the asserts from before the start of the closure. We will do them 168 // as asserts here to minimize their overhead on the product. However, we 169 // will have them as guarantees at the beginning / end of the bitmap 170 // clearing to get some checking in the product. 171 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 172 assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant"); 173 } 174 175 return false; 176 } 177 }; 178 179 void CMBitMap::clearAll() { 180 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 181 G1CollectedHeap::heap()->heap_region_iterate(&cl); 182 guarantee(cl.complete(), "Must have completed iteration."); 183 return; 184 } 185 186 void CMBitMap::markRange(MemRegion mr) { 187 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 188 assert(!mr.is_empty(), "unexpected empty region"); 189 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 190 ((HeapWord *) mr.end())), 191 "markRange memory region end is not card aligned"); 192 // convert address range into offset range 193 _bm.at_put_range(heapWordToOffset(mr.start()), 194 heapWordToOffset(mr.end()), true); 195 } 196 197 void CMBitMap::clearRange(MemRegion mr) { 198 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 199 assert(!mr.is_empty(), "unexpected empty region"); 200 // convert address range into offset range 201 _bm.at_put_range(heapWordToOffset(mr.start()), 202 heapWordToOffset(mr.end()), false); 203 } 204 205 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 206 HeapWord* end_addr) { 207 HeapWord* start = getNextMarkedWordAddress(addr); 208 start = MIN2(start, end_addr); 209 HeapWord* end = getNextUnmarkedWordAddress(start); 210 end = MIN2(end, end_addr); 211 assert(start <= end, "Consistency check"); 212 MemRegion mr(start, end); 213 if (!mr.is_empty()) { 214 clearRange(mr); 215 } 216 return mr; 217 } 218 219 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 220 _base(NULL), _cm(cm) 221 #ifdef ASSERT 222 , _drain_in_progress(false) 223 , _drain_in_progress_yields(false) 224 #endif 225 {} 226 227 bool CMMarkStack::allocate(size_t capacity) { 228 // allocate a stack of the requisite depth 229 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 230 if (!rs.is_reserved()) { 231 warning("ConcurrentMark MarkStack allocation failure"); 232 return false; 233 } 234 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 235 if (!_virtual_space.initialize(rs, rs.size())) { 236 warning("ConcurrentMark MarkStack backing store failure"); 237 // Release the virtual memory reserved for the marking stack 238 rs.release(); 239 return false; 240 } 241 assert(_virtual_space.committed_size() == rs.size(), 242 "Didn't reserve backing store for all of ConcurrentMark stack?"); 243 _base = (oop*) _virtual_space.low(); 244 setEmpty(); 245 _capacity = (jint) capacity; 246 _saved_index = -1; 247 _should_expand = false; 248 NOT_PRODUCT(_max_depth = 0); 249 return true; 250 } 251 252 void CMMarkStack::expand() { 253 // Called, during remark, if we've overflown the marking stack during marking. 254 assert(isEmpty(), "stack should been emptied while handling overflow"); 255 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 256 // Clear expansion flag 257 _should_expand = false; 258 if (_capacity == (jint) MarkStackSizeMax) { 259 if (PrintGCDetails && Verbose) { 260 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 261 } 262 return; 263 } 264 // Double capacity if possible 265 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 266 // Do not give up existing stack until we have managed to 267 // get the double capacity that we desired. 268 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 269 sizeof(oop))); 270 if (rs.is_reserved()) { 271 // Release the backing store associated with old stack 272 _virtual_space.release(); 273 // Reinitialize virtual space for new stack 274 if (!_virtual_space.initialize(rs, rs.size())) { 275 fatal("Not enough swap for expanded marking stack capacity"); 276 } 277 _base = (oop*)(_virtual_space.low()); 278 _index = 0; 279 _capacity = new_capacity; 280 } else { 281 if (PrintGCDetails && Verbose) { 282 // Failed to double capacity, continue; 283 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 284 SIZE_FORMAT"K to " SIZE_FORMAT"K", 285 _capacity / K, new_capacity / K); 286 } 287 } 288 } 289 290 void CMMarkStack::set_should_expand() { 291 // If we're resetting the marking state because of an 292 // marking stack overflow, record that we should, if 293 // possible, expand the stack. 294 _should_expand = _cm->has_overflown(); 295 } 296 297 CMMarkStack::~CMMarkStack() { 298 if (_base != NULL) { 299 _base = NULL; 300 _virtual_space.release(); 301 } 302 } 303 304 void CMMarkStack::par_push(oop ptr) { 305 while (true) { 306 if (isFull()) { 307 _overflow = true; 308 return; 309 } 310 // Otherwise... 311 jint index = _index; 312 jint next_index = index+1; 313 jint res = Atomic::cmpxchg(next_index, &_index, index); 314 if (res == index) { 315 _base[index] = ptr; 316 // Note that we don't maintain this atomically. We could, but it 317 // doesn't seem necessary. 318 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 319 return; 320 } 321 // Otherwise, we need to try again. 322 } 323 } 324 325 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { 326 while (true) { 327 if (isFull()) { 328 _overflow = true; 329 return; 330 } 331 // Otherwise... 332 jint index = _index; 333 jint next_index = index + n; 334 if (next_index > _capacity) { 335 _overflow = true; 336 return; 337 } 338 jint res = Atomic::cmpxchg(next_index, &_index, index); 339 if (res == index) { 340 for (int i = 0; i < n; i++) { 341 int ind = index + i; 342 assert(ind < _capacity, "By overflow test above."); 343 _base[ind] = ptr_arr[i]; 344 } 345 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 346 return; 347 } 348 // Otherwise, we need to try again. 349 } 350 } 351 352 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 353 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 354 jint start = _index; 355 jint next_index = start + n; 356 if (next_index > _capacity) { 357 _overflow = true; 358 return; 359 } 360 // Otherwise. 361 _index = next_index; 362 for (int i = 0; i < n; i++) { 363 int ind = start + i; 364 assert(ind < _capacity, "By overflow test above."); 365 _base[ind] = ptr_arr[i]; 366 } 367 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 368 } 369 370 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 371 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 372 jint index = _index; 373 if (index == 0) { 374 *n = 0; 375 return false; 376 } else { 377 int k = MIN2(max, index); 378 jint new_ind = index - k; 379 for (int j = 0; j < k; j++) { 380 ptr_arr[j] = _base[new_ind + j]; 381 } 382 _index = new_ind; 383 *n = k; 384 return true; 385 } 386 } 387 388 template<class OopClosureClass> 389 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 390 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 391 || SafepointSynchronize::is_at_safepoint(), 392 "Drain recursion must be yield-safe."); 393 bool res = true; 394 debug_only(_drain_in_progress = true); 395 debug_only(_drain_in_progress_yields = yield_after); 396 while (!isEmpty()) { 397 oop newOop = pop(); 398 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 399 assert(newOop->is_oop(), "Expected an oop"); 400 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 401 "only grey objects on this stack"); 402 newOop->oop_iterate(cl); 403 if (yield_after && _cm->do_yield_check()) { 404 res = false; 405 break; 406 } 407 } 408 debug_only(_drain_in_progress = false); 409 return res; 410 } 411 412 void CMMarkStack::note_start_of_gc() { 413 assert(_saved_index == -1, 414 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 415 _saved_index = _index; 416 } 417 418 void CMMarkStack::note_end_of_gc() { 419 // This is intentionally a guarantee, instead of an assert. If we 420 // accidentally add something to the mark stack during GC, it 421 // will be a correctness issue so it's better if we crash. we'll 422 // only check this once per GC anyway, so it won't be a performance 423 // issue in any way. 424 guarantee(_saved_index == _index, 425 err_msg("saved index: %d index: %d", _saved_index, _index)); 426 _saved_index = -1; 427 } 428 429 void CMMarkStack::oops_do(OopClosure* f) { 430 assert(_saved_index == _index, 431 err_msg("saved index: %d index: %d", _saved_index, _index)); 432 for (int i = 0; i < _index; i += 1) { 433 f->do_oop(&_base[i]); 434 } 435 } 436 437 CMRootRegions::CMRootRegions() : 438 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 439 _should_abort(false), _next_survivor(NULL) { } 440 441 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 442 _young_list = g1h->young_list(); 443 _cm = cm; 444 } 445 446 void CMRootRegions::prepare_for_scan() { 447 assert(!scan_in_progress(), "pre-condition"); 448 449 // Currently, only survivors can be root regions. 450 assert(_next_survivor == NULL, "pre-condition"); 451 _next_survivor = _young_list->first_survivor_region(); 452 _scan_in_progress = (_next_survivor != NULL); 453 _should_abort = false; 454 } 455 456 HeapRegion* CMRootRegions::claim_next() { 457 if (_should_abort) { 458 // If someone has set the should_abort flag, we return NULL to 459 // force the caller to bail out of their loop. 460 return NULL; 461 } 462 463 // Currently, only survivors can be root regions. 464 HeapRegion* res = _next_survivor; 465 if (res != NULL) { 466 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 467 // Read it again in case it changed while we were waiting for the lock. 468 res = _next_survivor; 469 if (res != NULL) { 470 if (res == _young_list->last_survivor_region()) { 471 // We just claimed the last survivor so store NULL to indicate 472 // that we're done. 473 _next_survivor = NULL; 474 } else { 475 _next_survivor = res->get_next_young_region(); 476 } 477 } else { 478 // Someone else claimed the last survivor while we were trying 479 // to take the lock so nothing else to do. 480 } 481 } 482 assert(res == NULL || res->is_survivor(), "post-condition"); 483 484 return res; 485 } 486 487 void CMRootRegions::scan_finished() { 488 assert(scan_in_progress(), "pre-condition"); 489 490 // Currently, only survivors can be root regions. 491 if (!_should_abort) { 492 assert(_next_survivor == NULL, "we should have claimed all survivors"); 493 } 494 _next_survivor = NULL; 495 496 { 497 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 498 _scan_in_progress = false; 499 RootRegionScan_lock->notify_all(); 500 } 501 } 502 503 bool CMRootRegions::wait_until_scan_finished() { 504 if (!scan_in_progress()) return false; 505 506 { 507 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 508 while (scan_in_progress()) { 509 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 510 } 511 } 512 return true; 513 } 514 515 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 516 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 517 #endif // _MSC_VER 518 519 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 520 return MAX2((n_par_threads + 2) / 4, 1U); 521 } 522 523 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 524 _g1h(g1h), 525 _markBitMap1(), 526 _markBitMap2(), 527 _parallel_marking_threads(0), 528 _max_parallel_marking_threads(0), 529 _sleep_factor(0.0), 530 _marking_task_overhead(1.0), 531 _cleanup_sleep_factor(0.0), 532 _cleanup_task_overhead(1.0), 533 _cleanup_list("Cleanup List"), 534 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 535 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 536 CardTableModRefBS::card_shift, 537 false /* in_resource_area*/), 538 539 _prevMarkBitMap(&_markBitMap1), 540 _nextMarkBitMap(&_markBitMap2), 541 542 _markStack(this), 543 // _finger set in set_non_marking_state 544 545 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 546 // _active_tasks set in set_non_marking_state 547 // _tasks set inside the constructor 548 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 549 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 550 551 _has_overflown(false), 552 _concurrent(false), 553 _has_aborted(false), 554 _aborted_gc_id(GCId::undefined()), 555 _restart_for_overflow(false), 556 _concurrent_marking_in_progress(false), 557 558 // _verbose_level set below 559 560 _init_times(), 561 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 562 _cleanup_times(), 563 _total_counting_time(0.0), 564 _total_rs_scrub_time(0.0), 565 566 _parallel_workers(NULL), 567 568 _count_card_bitmaps(NULL), 569 _count_marked_bytes(NULL), 570 _completed_initialization(false) { 571 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 572 if (verbose_level < no_verbose) { 573 verbose_level = no_verbose; 574 } 575 if (verbose_level > high_verbose) { 576 verbose_level = high_verbose; 577 } 578 _verbose_level = verbose_level; 579 580 if (verbose_low()) { 581 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 582 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 583 } 584 585 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 586 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 587 588 // Create & start a ConcurrentMark thread. 589 _cmThread = new ConcurrentMarkThread(this); 590 assert(cmThread() != NULL, "CM Thread should have been created"); 591 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 592 if (_cmThread->osthread() == NULL) { 593 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 594 } 595 596 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 597 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 598 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 599 600 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 601 satb_qs.set_buffer_size(G1SATBBufferSize); 602 603 _root_regions.init(_g1h, this); 604 605 if (ConcGCThreads > ParallelGCThreads) { 606 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") " 607 "than ParallelGCThreads (" UINTX_FORMAT ").", 608 ConcGCThreads, ParallelGCThreads); 609 return; 610 } 611 if (ParallelGCThreads == 0) { 612 // if we are not running with any parallel GC threads we will not 613 // spawn any marking threads either 614 _parallel_marking_threads = 0; 615 _max_parallel_marking_threads = 0; 616 _sleep_factor = 0.0; 617 _marking_task_overhead = 1.0; 618 } else { 619 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 620 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 621 // if both are set 622 _sleep_factor = 0.0; 623 _marking_task_overhead = 1.0; 624 } else if (G1MarkingOverheadPercent > 0) { 625 // We will calculate the number of parallel marking threads based 626 // on a target overhead with respect to the soft real-time goal 627 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 628 double overall_cm_overhead = 629 (double) MaxGCPauseMillis * marking_overhead / 630 (double) GCPauseIntervalMillis; 631 double cpu_ratio = 1.0 / (double) os::processor_count(); 632 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 633 double marking_task_overhead = 634 overall_cm_overhead / marking_thread_num * 635 (double) os::processor_count(); 636 double sleep_factor = 637 (1.0 - marking_task_overhead) / marking_task_overhead; 638 639 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); 640 _sleep_factor = sleep_factor; 641 _marking_task_overhead = marking_task_overhead; 642 } else { 643 // Calculate the number of parallel marking threads by scaling 644 // the number of parallel GC threads. 645 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); 646 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); 647 _sleep_factor = 0.0; 648 _marking_task_overhead = 1.0; 649 } 650 651 assert(ConcGCThreads > 0, "Should have been set"); 652 _parallel_marking_threads = (uint) ConcGCThreads; 653 _max_parallel_marking_threads = _parallel_marking_threads; 654 655 if (parallel_marking_threads() > 1) { 656 _cleanup_task_overhead = 1.0; 657 } else { 658 _cleanup_task_overhead = marking_task_overhead(); 659 } 660 _cleanup_sleep_factor = 661 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 662 663 #if 0 664 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 665 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 666 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 667 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 668 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 669 #endif 670 671 guarantee(parallel_marking_threads() > 0, "peace of mind"); 672 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", 673 _max_parallel_marking_threads, false, true); 674 if (_parallel_workers == NULL) { 675 vm_exit_during_initialization("Failed necessary allocation."); 676 } else { 677 _parallel_workers->initialize_workers(); 678 } 679 } 680 681 if (FLAG_IS_DEFAULT(MarkStackSize)) { 682 uintx mark_stack_size = 683 MIN2(MarkStackSizeMax, 684 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE))); 685 // Verify that the calculated value for MarkStackSize is in range. 686 // It would be nice to use the private utility routine from Arguments. 687 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 688 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): " 689 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 690 mark_stack_size, (uintx) 1, MarkStackSizeMax); 691 return; 692 } 693 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size); 694 } else { 695 // Verify MarkStackSize is in range. 696 if (FLAG_IS_CMDLINE(MarkStackSize)) { 697 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 698 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 699 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): " 700 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 701 MarkStackSize, (uintx) 1, MarkStackSizeMax); 702 return; 703 } 704 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 705 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 706 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")" 707 " or for MarkStackSizeMax (" UINTX_FORMAT ")", 708 MarkStackSize, MarkStackSizeMax); 709 return; 710 } 711 } 712 } 713 } 714 715 if (!_markStack.allocate(MarkStackSize)) { 716 warning("Failed to allocate CM marking stack"); 717 return; 718 } 719 720 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 721 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 722 723 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 724 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 725 726 BitMap::idx_t card_bm_size = _card_bm.size(); 727 728 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 729 _active_tasks = _max_worker_id; 730 731 size_t max_regions = (size_t) _g1h->max_regions(); 732 for (uint i = 0; i < _max_worker_id; ++i) { 733 CMTaskQueue* task_queue = new CMTaskQueue(); 734 task_queue->initialize(); 735 _task_queues->register_queue(i, task_queue); 736 737 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 738 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 739 740 _tasks[i] = new CMTask(i, this, 741 _count_marked_bytes[i], 742 &_count_card_bitmaps[i], 743 task_queue, _task_queues); 744 745 _accum_task_vtime[i] = 0.0; 746 } 747 748 // Calculate the card number for the bottom of the heap. Used 749 // in biasing indexes into the accounting card bitmaps. 750 _heap_bottom_card_num = 751 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 752 CardTableModRefBS::card_shift); 753 754 // Clear all the liveness counting data 755 clear_all_count_data(); 756 757 // so that the call below can read a sensible value 758 _heap_start = g1h->reserved_region().start(); 759 set_non_marking_state(); 760 _completed_initialization = true; 761 } 762 763 void ConcurrentMark::reset() { 764 // Starting values for these two. This should be called in a STW 765 // phase. 766 MemRegion reserved = _g1h->g1_reserved(); 767 _heap_start = reserved.start(); 768 _heap_end = reserved.end(); 769 770 // Separated the asserts so that we know which one fires. 771 assert(_heap_start != NULL, "heap bounds should look ok"); 772 assert(_heap_end != NULL, "heap bounds should look ok"); 773 assert(_heap_start < _heap_end, "heap bounds should look ok"); 774 775 // Reset all the marking data structures and any necessary flags 776 reset_marking_state(); 777 778 if (verbose_low()) { 779 gclog_or_tty->print_cr("[global] resetting"); 780 } 781 782 // We do reset all of them, since different phases will use 783 // different number of active threads. So, it's easiest to have all 784 // of them ready. 785 for (uint i = 0; i < _max_worker_id; ++i) { 786 _tasks[i]->reset(_nextMarkBitMap); 787 } 788 789 // we need this to make sure that the flag is on during the evac 790 // pause with initial mark piggy-backed 791 set_concurrent_marking_in_progress(); 792 } 793 794 795 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 796 _markStack.set_should_expand(); 797 _markStack.setEmpty(); // Also clears the _markStack overflow flag 798 if (clear_overflow) { 799 clear_has_overflown(); 800 } else { 801 assert(has_overflown(), "pre-condition"); 802 } 803 _finger = _heap_start; 804 805 for (uint i = 0; i < _max_worker_id; ++i) { 806 CMTaskQueue* queue = _task_queues->queue(i); 807 queue->set_empty(); 808 } 809 } 810 811 void ConcurrentMark::set_concurrency(uint active_tasks) { 812 assert(active_tasks <= _max_worker_id, "we should not have more"); 813 814 _active_tasks = active_tasks; 815 // Need to update the three data structures below according to the 816 // number of active threads for this phase. 817 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 818 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 819 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 820 } 821 822 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 823 set_concurrency(active_tasks); 824 825 _concurrent = concurrent; 826 // We propagate this to all tasks, not just the active ones. 827 for (uint i = 0; i < _max_worker_id; ++i) 828 _tasks[i]->set_concurrent(concurrent); 829 830 if (concurrent) { 831 set_concurrent_marking_in_progress(); 832 } else { 833 // We currently assume that the concurrent flag has been set to 834 // false before we start remark. At this point we should also be 835 // in a STW phase. 836 assert(!concurrent_marking_in_progress(), "invariant"); 837 assert(out_of_regions(), 838 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 839 p2i(_finger), p2i(_heap_end))); 840 } 841 } 842 843 void ConcurrentMark::set_non_marking_state() { 844 // We set the global marking state to some default values when we're 845 // not doing marking. 846 reset_marking_state(); 847 _active_tasks = 0; 848 clear_concurrent_marking_in_progress(); 849 } 850 851 ConcurrentMark::~ConcurrentMark() { 852 // The ConcurrentMark instance is never freed. 853 ShouldNotReachHere(); 854 } 855 856 void ConcurrentMark::clearNextBitmap() { 857 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 858 859 // Make sure that the concurrent mark thread looks to still be in 860 // the current cycle. 861 guarantee(cmThread()->during_cycle(), "invariant"); 862 863 // We are finishing up the current cycle by clearing the next 864 // marking bitmap and getting it ready for the next cycle. During 865 // this time no other cycle can start. So, let's make sure that this 866 // is the case. 867 guarantee(!g1h->mark_in_progress(), "invariant"); 868 869 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 870 g1h->heap_region_iterate(&cl); 871 872 // Clear the liveness counting data. If the marking has been aborted, the abort() 873 // call already did that. 874 if (cl.complete()) { 875 clear_all_count_data(); 876 } 877 878 // Repeat the asserts from above. 879 guarantee(cmThread()->during_cycle(), "invariant"); 880 guarantee(!g1h->mark_in_progress(), "invariant"); 881 } 882 883 class CheckBitmapClearHRClosure : public HeapRegionClosure { 884 CMBitMap* _bitmap; 885 bool _error; 886 public: 887 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 888 } 889 890 virtual bool doHeapRegion(HeapRegion* r) { 891 // This closure can be called concurrently to the mutator, so we must make sure 892 // that the result of the getNextMarkedWordAddress() call is compared to the 893 // value passed to it as limit to detect any found bits. 894 // We can use the region's orig_end() for the limit and the comparison value 895 // as it always contains the "real" end of the region that never changes and 896 // has no side effects. 897 // Due to the latter, there can also be no problem with the compiler generating 898 // reloads of the orig_end() call. 899 HeapWord* end = r->orig_end(); 900 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 901 } 902 }; 903 904 bool ConcurrentMark::nextMarkBitmapIsClear() { 905 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 906 _g1h->heap_region_iterate(&cl); 907 return cl.complete(); 908 } 909 910 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 911 public: 912 bool doHeapRegion(HeapRegion* r) { 913 if (!r->is_continues_humongous()) { 914 r->note_start_of_marking(); 915 } 916 return false; 917 } 918 }; 919 920 void ConcurrentMark::checkpointRootsInitialPre() { 921 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 922 G1CollectorPolicy* g1p = g1h->g1_policy(); 923 924 _has_aborted = false; 925 926 #ifndef PRODUCT 927 if (G1PrintReachableAtInitialMark) { 928 print_reachable("at-cycle-start", 929 VerifyOption_G1UsePrevMarking, true /* all */); 930 } 931 #endif 932 933 // Initialize marking structures. This has to be done in a STW phase. 934 reset(); 935 936 // For each region note start of marking. 937 NoteStartOfMarkHRClosure startcl; 938 g1h->heap_region_iterate(&startcl); 939 } 940 941 942 void ConcurrentMark::checkpointRootsInitialPost() { 943 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 944 945 // If we force an overflow during remark, the remark operation will 946 // actually abort and we'll restart concurrent marking. If we always 947 // force an overflow during remark we'll never actually complete the 948 // marking phase. So, we initialize this here, at the start of the 949 // cycle, so that at the remaining overflow number will decrease at 950 // every remark and we'll eventually not need to cause one. 951 force_overflow_stw()->init(); 952 953 // Start Concurrent Marking weak-reference discovery. 954 ReferenceProcessor* rp = g1h->ref_processor_cm(); 955 // enable ("weak") refs discovery 956 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); 957 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 958 959 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 960 // This is the start of the marking cycle, we're expected all 961 // threads to have SATB queues with active set to false. 962 satb_mq_set.set_active_all_threads(true, /* new active value */ 963 false /* expected_active */); 964 965 _root_regions.prepare_for_scan(); 966 967 // update_g1_committed() will be called at the end of an evac pause 968 // when marking is on. So, it's also called at the end of the 969 // initial-mark pause to update the heap end, if the heap expands 970 // during it. No need to call it here. 971 } 972 973 /* 974 * Notice that in the next two methods, we actually leave the STS 975 * during the barrier sync and join it immediately afterwards. If we 976 * do not do this, the following deadlock can occur: one thread could 977 * be in the barrier sync code, waiting for the other thread to also 978 * sync up, whereas another one could be trying to yield, while also 979 * waiting for the other threads to sync up too. 980 * 981 * Note, however, that this code is also used during remark and in 982 * this case we should not attempt to leave / enter the STS, otherwise 983 * we'll either hit an assert (debug / fastdebug) or deadlock 984 * (product). So we should only leave / enter the STS if we are 985 * operating concurrently. 986 * 987 * Because the thread that does the sync barrier has left the STS, it 988 * is possible to be suspended for a Full GC or an evacuation pause 989 * could occur. This is actually safe, since the entering the sync 990 * barrier is one of the last things do_marking_step() does, and it 991 * doesn't manipulate any data structures afterwards. 992 */ 993 994 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 995 if (verbose_low()) { 996 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 997 } 998 999 if (concurrent()) { 1000 SuspendibleThreadSet::leave(); 1001 } 1002 1003 bool barrier_aborted = !_first_overflow_barrier_sync.enter(); 1004 1005 if (concurrent()) { 1006 SuspendibleThreadSet::join(); 1007 } 1008 // at this point everyone should have synced up and not be doing any 1009 // more work 1010 1011 if (verbose_low()) { 1012 if (barrier_aborted) { 1013 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 1014 } else { 1015 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 1016 } 1017 } 1018 1019 if (barrier_aborted) { 1020 // If the barrier aborted we ignore the overflow condition and 1021 // just abort the whole marking phase as quickly as possible. 1022 return; 1023 } 1024 1025 // If we're executing the concurrent phase of marking, reset the marking 1026 // state; otherwise the marking state is reset after reference processing, 1027 // during the remark pause. 1028 // If we reset here as a result of an overflow during the remark we will 1029 // see assertion failures from any subsequent set_concurrency_and_phase() 1030 // calls. 1031 if (concurrent()) { 1032 // let the task associated with with worker 0 do this 1033 if (worker_id == 0) { 1034 // task 0 is responsible for clearing the global data structures 1035 // We should be here because of an overflow. During STW we should 1036 // not clear the overflow flag since we rely on it being true when 1037 // we exit this method to abort the pause and restart concurrent 1038 // marking. 1039 reset_marking_state(true /* clear_overflow */); 1040 force_overflow()->update(); 1041 1042 if (G1Log::fine()) { 1043 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1044 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1045 } 1046 } 1047 } 1048 1049 // after this, each task should reset its own data structures then 1050 // then go into the second barrier 1051 } 1052 1053 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1054 if (verbose_low()) { 1055 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1056 } 1057 1058 if (concurrent()) { 1059 SuspendibleThreadSet::leave(); 1060 } 1061 1062 bool barrier_aborted = !_second_overflow_barrier_sync.enter(); 1063 1064 if (concurrent()) { 1065 SuspendibleThreadSet::join(); 1066 } 1067 // at this point everything should be re-initialized and ready to go 1068 1069 if (verbose_low()) { 1070 if (barrier_aborted) { 1071 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1072 } else { 1073 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1074 } 1075 } 1076 } 1077 1078 #ifndef PRODUCT 1079 void ForceOverflowSettings::init() { 1080 _num_remaining = G1ConcMarkForceOverflow; 1081 _force = false; 1082 update(); 1083 } 1084 1085 void ForceOverflowSettings::update() { 1086 if (_num_remaining > 0) { 1087 _num_remaining -= 1; 1088 _force = true; 1089 } else { 1090 _force = false; 1091 } 1092 } 1093 1094 bool ForceOverflowSettings::should_force() { 1095 if (_force) { 1096 _force = false; 1097 return true; 1098 } else { 1099 return false; 1100 } 1101 } 1102 #endif // !PRODUCT 1103 1104 class CMConcurrentMarkingTask: public AbstractGangTask { 1105 private: 1106 ConcurrentMark* _cm; 1107 ConcurrentMarkThread* _cmt; 1108 1109 public: 1110 void work(uint worker_id) { 1111 assert(Thread::current()->is_ConcurrentGC_thread(), 1112 "this should only be done by a conc GC thread"); 1113 ResourceMark rm; 1114 1115 double start_vtime = os::elapsedVTime(); 1116 1117 SuspendibleThreadSet::join(); 1118 1119 assert(worker_id < _cm->active_tasks(), "invariant"); 1120 CMTask* the_task = _cm->task(worker_id); 1121 the_task->record_start_time(); 1122 if (!_cm->has_aborted()) { 1123 do { 1124 double start_vtime_sec = os::elapsedVTime(); 1125 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1126 1127 the_task->do_marking_step(mark_step_duration_ms, 1128 true /* do_termination */, 1129 false /* is_serial*/); 1130 1131 double end_vtime_sec = os::elapsedVTime(); 1132 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1133 _cm->clear_has_overflown(); 1134 1135 _cm->do_yield_check(worker_id); 1136 1137 jlong sleep_time_ms; 1138 if (!_cm->has_aborted() && the_task->has_aborted()) { 1139 sleep_time_ms = 1140 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1141 SuspendibleThreadSet::leave(); 1142 os::sleep(Thread::current(), sleep_time_ms, false); 1143 SuspendibleThreadSet::join(); 1144 } 1145 } while (!_cm->has_aborted() && the_task->has_aborted()); 1146 } 1147 the_task->record_end_time(); 1148 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1149 1150 SuspendibleThreadSet::leave(); 1151 1152 double end_vtime = os::elapsedVTime(); 1153 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1154 } 1155 1156 CMConcurrentMarkingTask(ConcurrentMark* cm, 1157 ConcurrentMarkThread* cmt) : 1158 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1159 1160 ~CMConcurrentMarkingTask() { } 1161 }; 1162 1163 // Calculates the number of active workers for a concurrent 1164 // phase. 1165 uint ConcurrentMark::calc_parallel_marking_threads() { 1166 if (G1CollectedHeap::use_parallel_gc_threads()) { 1167 uint n_conc_workers = 0; 1168 if (!UseDynamicNumberOfGCThreads || 1169 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1170 !ForceDynamicNumberOfGCThreads)) { 1171 n_conc_workers = max_parallel_marking_threads(); 1172 } else { 1173 n_conc_workers = 1174 AdaptiveSizePolicy::calc_default_active_workers( 1175 max_parallel_marking_threads(), 1176 1, /* Minimum workers */ 1177 parallel_marking_threads(), 1178 Threads::number_of_non_daemon_threads()); 1179 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1180 // that scaling has already gone into "_max_parallel_marking_threads". 1181 } 1182 assert(n_conc_workers > 0, "Always need at least 1"); 1183 return n_conc_workers; 1184 } 1185 // If we are not running with any parallel GC threads we will not 1186 // have spawned any marking threads either. Hence the number of 1187 // concurrent workers should be 0. 1188 return 0; 1189 } 1190 1191 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1192 // Currently, only survivors can be root regions. 1193 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1194 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1195 1196 const uintx interval = PrefetchScanIntervalInBytes; 1197 HeapWord* curr = hr->bottom(); 1198 const HeapWord* end = hr->top(); 1199 while (curr < end) { 1200 Prefetch::read(curr, interval); 1201 oop obj = oop(curr); 1202 int size = obj->oop_iterate(&cl); 1203 assert(size == obj->size(), "sanity"); 1204 curr += size; 1205 } 1206 } 1207 1208 class CMRootRegionScanTask : public AbstractGangTask { 1209 private: 1210 ConcurrentMark* _cm; 1211 1212 public: 1213 CMRootRegionScanTask(ConcurrentMark* cm) : 1214 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1215 1216 void work(uint worker_id) { 1217 assert(Thread::current()->is_ConcurrentGC_thread(), 1218 "this should only be done by a conc GC thread"); 1219 1220 CMRootRegions* root_regions = _cm->root_regions(); 1221 HeapRegion* hr = root_regions->claim_next(); 1222 while (hr != NULL) { 1223 _cm->scanRootRegion(hr, worker_id); 1224 hr = root_regions->claim_next(); 1225 } 1226 } 1227 }; 1228 1229 void ConcurrentMark::scanRootRegions() { 1230 // Start of concurrent marking. 1231 ClassLoaderDataGraph::clear_claimed_marks(); 1232 1233 // scan_in_progress() will have been set to true only if there was 1234 // at least one root region to scan. So, if it's false, we 1235 // should not attempt to do any further work. 1236 if (root_regions()->scan_in_progress()) { 1237 _parallel_marking_threads = calc_parallel_marking_threads(); 1238 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1239 "Maximum number of marking threads exceeded"); 1240 uint active_workers = MAX2(1U, parallel_marking_threads()); 1241 1242 CMRootRegionScanTask task(this); 1243 if (use_parallel_marking_threads()) { 1244 _parallel_workers->set_active_workers((int) active_workers); 1245 _parallel_workers->run_task(&task); 1246 } else { 1247 task.work(0); 1248 } 1249 1250 // It's possible that has_aborted() is true here without actually 1251 // aborting the survivor scan earlier. This is OK as it's 1252 // mainly used for sanity checking. 1253 root_regions()->scan_finished(); 1254 } 1255 } 1256 1257 void ConcurrentMark::markFromRoots() { 1258 // we might be tempted to assert that: 1259 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1260 // "inconsistent argument?"); 1261 // However that wouldn't be right, because it's possible that 1262 // a safepoint is indeed in progress as a younger generation 1263 // stop-the-world GC happens even as we mark in this generation. 1264 1265 _restart_for_overflow = false; 1266 force_overflow_conc()->init(); 1267 1268 // _g1h has _n_par_threads 1269 _parallel_marking_threads = calc_parallel_marking_threads(); 1270 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1271 "Maximum number of marking threads exceeded"); 1272 1273 uint active_workers = MAX2(1U, parallel_marking_threads()); 1274 1275 // Parallel task terminator is set in "set_concurrency_and_phase()" 1276 set_concurrency_and_phase(active_workers, true /* concurrent */); 1277 1278 CMConcurrentMarkingTask markingTask(this, cmThread()); 1279 if (use_parallel_marking_threads()) { 1280 _parallel_workers->set_active_workers((int)active_workers); 1281 // Don't set _n_par_threads because it affects MT in process_roots() 1282 // and the decisions on that MT processing is made elsewhere. 1283 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1284 _parallel_workers->run_task(&markingTask); 1285 } else { 1286 markingTask.work(0); 1287 } 1288 print_stats(); 1289 } 1290 1291 // Helper class to get rid of some boilerplate code. 1292 class G1CMTraceTime : public GCTraceTime { 1293 static bool doit_and_prepend(bool doit) { 1294 if (doit) { 1295 gclog_or_tty->put(' '); 1296 } 1297 return doit; 1298 } 1299 1300 public: 1301 G1CMTraceTime(const char* title, bool doit) 1302 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), 1303 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { 1304 } 1305 }; 1306 1307 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1308 // world is stopped at this checkpoint 1309 assert(SafepointSynchronize::is_at_safepoint(), 1310 "world should be stopped"); 1311 1312 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1313 1314 // If a full collection has happened, we shouldn't do this. 1315 if (has_aborted()) { 1316 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1317 return; 1318 } 1319 1320 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1321 1322 if (VerifyDuringGC) { 1323 HandleMark hm; // handle scope 1324 Universe::heap()->prepare_for_verify(); 1325 Universe::verify(VerifyOption_G1UsePrevMarking, 1326 " VerifyDuringGC:(before)"); 1327 } 1328 g1h->check_bitmaps("Remark Start"); 1329 1330 G1CollectorPolicy* g1p = g1h->g1_policy(); 1331 g1p->record_concurrent_mark_remark_start(); 1332 1333 double start = os::elapsedTime(); 1334 1335 checkpointRootsFinalWork(); 1336 1337 double mark_work_end = os::elapsedTime(); 1338 1339 weakRefsWork(clear_all_soft_refs); 1340 1341 if (has_overflown()) { 1342 // Oops. We overflowed. Restart concurrent marking. 1343 _restart_for_overflow = true; 1344 if (G1TraceMarkStackOverflow) { 1345 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1346 } 1347 1348 // Verify the heap w.r.t. the previous marking bitmap. 1349 if (VerifyDuringGC) { 1350 HandleMark hm; // handle scope 1351 Universe::heap()->prepare_for_verify(); 1352 Universe::verify(VerifyOption_G1UsePrevMarking, 1353 " VerifyDuringGC:(overflow)"); 1354 } 1355 1356 // Clear the marking state because we will be restarting 1357 // marking due to overflowing the global mark stack. 1358 reset_marking_state(); 1359 } else { 1360 { 1361 G1CMTraceTime trace("GC aggregate-data", G1Log::finer()); 1362 1363 // Aggregate the per-task counting data that we have accumulated 1364 // while marking. 1365 aggregate_count_data(); 1366 } 1367 1368 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1369 // We're done with marking. 1370 // This is the end of the marking cycle, we're expected all 1371 // threads to have SATB queues with active set to true. 1372 satb_mq_set.set_active_all_threads(false, /* new active value */ 1373 true /* expected_active */); 1374 1375 if (VerifyDuringGC) { 1376 HandleMark hm; // handle scope 1377 Universe::heap()->prepare_for_verify(); 1378 Universe::verify(VerifyOption_G1UseNextMarking, 1379 " VerifyDuringGC:(after)"); 1380 } 1381 g1h->check_bitmaps("Remark End"); 1382 assert(!restart_for_overflow(), "sanity"); 1383 // Completely reset the marking state since marking completed 1384 set_non_marking_state(); 1385 } 1386 1387 // Expand the marking stack, if we have to and if we can. 1388 if (_markStack.should_expand()) { 1389 _markStack.expand(); 1390 } 1391 1392 // Statistics 1393 double now = os::elapsedTime(); 1394 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1395 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1396 _remark_times.add((now - start) * 1000.0); 1397 1398 g1p->record_concurrent_mark_remark_end(); 1399 1400 G1CMIsAliveClosure is_alive(g1h); 1401 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1402 } 1403 1404 // Base class of the closures that finalize and verify the 1405 // liveness counting data. 1406 class CMCountDataClosureBase: public HeapRegionClosure { 1407 protected: 1408 G1CollectedHeap* _g1h; 1409 ConcurrentMark* _cm; 1410 CardTableModRefBS* _ct_bs; 1411 1412 BitMap* _region_bm; 1413 BitMap* _card_bm; 1414 1415 // Takes a region that's not empty (i.e., it has at least one 1416 // live object in it and sets its corresponding bit on the region 1417 // bitmap to 1. If the region is "starts humongous" it will also set 1418 // to 1 the bits on the region bitmap that correspond to its 1419 // associated "continues humongous" regions. 1420 void set_bit_for_region(HeapRegion* hr) { 1421 assert(!hr->is_continues_humongous(), "should have filtered those out"); 1422 1423 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1424 if (!hr->is_starts_humongous()) { 1425 // Normal (non-humongous) case: just set the bit. 1426 _region_bm->par_at_put(index, true); 1427 } else { 1428 // Starts humongous case: calculate how many regions are part of 1429 // this humongous region and then set the bit range. 1430 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1431 _region_bm->par_at_put_range(index, end_index, true); 1432 } 1433 } 1434 1435 public: 1436 CMCountDataClosureBase(G1CollectedHeap* g1h, 1437 BitMap* region_bm, BitMap* card_bm): 1438 _g1h(g1h), _cm(g1h->concurrent_mark()), 1439 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 1440 _region_bm(region_bm), _card_bm(card_bm) { } 1441 }; 1442 1443 // Closure that calculates the # live objects per region. Used 1444 // for verification purposes during the cleanup pause. 1445 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1446 CMBitMapRO* _bm; 1447 size_t _region_marked_bytes; 1448 1449 public: 1450 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1451 BitMap* region_bm, BitMap* card_bm) : 1452 CMCountDataClosureBase(g1h, region_bm, card_bm), 1453 _bm(bm), _region_marked_bytes(0) { } 1454 1455 bool doHeapRegion(HeapRegion* hr) { 1456 1457 if (hr->is_continues_humongous()) { 1458 // We will ignore these here and process them when their 1459 // associated "starts humongous" region is processed (see 1460 // set_bit_for_heap_region()). Note that we cannot rely on their 1461 // associated "starts humongous" region to have their bit set to 1462 // 1 since, due to the region chunking in the parallel region 1463 // iteration, a "continues humongous" region might be visited 1464 // before its associated "starts humongous". 1465 return false; 1466 } 1467 1468 HeapWord* ntams = hr->next_top_at_mark_start(); 1469 HeapWord* start = hr->bottom(); 1470 1471 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1472 err_msg("Preconditions not met - " 1473 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1474 p2i(start), p2i(ntams), p2i(hr->end()))); 1475 1476 // Find the first marked object at or after "start". 1477 start = _bm->getNextMarkedWordAddress(start, ntams); 1478 1479 size_t marked_bytes = 0; 1480 1481 while (start < ntams) { 1482 oop obj = oop(start); 1483 int obj_sz = obj->size(); 1484 HeapWord* obj_end = start + obj_sz; 1485 1486 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1487 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1488 1489 // Note: if we're looking at the last region in heap - obj_end 1490 // could be actually just beyond the end of the heap; end_idx 1491 // will then correspond to a (non-existent) card that is also 1492 // just beyond the heap. 1493 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1494 // end of object is not card aligned - increment to cover 1495 // all the cards spanned by the object 1496 end_idx += 1; 1497 } 1498 1499 // Set the bits in the card BM for the cards spanned by this object. 1500 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1501 1502 // Add the size of this object to the number of marked bytes. 1503 marked_bytes += (size_t)obj_sz * HeapWordSize; 1504 1505 // Find the next marked object after this one. 1506 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1507 } 1508 1509 // Mark the allocated-since-marking portion... 1510 HeapWord* top = hr->top(); 1511 if (ntams < top) { 1512 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1513 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1514 1515 // Note: if we're looking at the last region in heap - top 1516 // could be actually just beyond the end of the heap; end_idx 1517 // will then correspond to a (non-existent) card that is also 1518 // just beyond the heap. 1519 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1520 // end of object is not card aligned - increment to cover 1521 // all the cards spanned by the object 1522 end_idx += 1; 1523 } 1524 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1525 1526 // This definitely means the region has live objects. 1527 set_bit_for_region(hr); 1528 } 1529 1530 // Update the live region bitmap. 1531 if (marked_bytes > 0) { 1532 set_bit_for_region(hr); 1533 } 1534 1535 // Set the marked bytes for the current region so that 1536 // it can be queried by a calling verification routine 1537 _region_marked_bytes = marked_bytes; 1538 1539 return false; 1540 } 1541 1542 size_t region_marked_bytes() const { return _region_marked_bytes; } 1543 }; 1544 1545 // Heap region closure used for verifying the counting data 1546 // that was accumulated concurrently and aggregated during 1547 // the remark pause. This closure is applied to the heap 1548 // regions during the STW cleanup pause. 1549 1550 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1551 G1CollectedHeap* _g1h; 1552 ConcurrentMark* _cm; 1553 CalcLiveObjectsClosure _calc_cl; 1554 BitMap* _region_bm; // Region BM to be verified 1555 BitMap* _card_bm; // Card BM to be verified 1556 bool _verbose; // verbose output? 1557 1558 BitMap* _exp_region_bm; // Expected Region BM values 1559 BitMap* _exp_card_bm; // Expected card BM values 1560 1561 int _failures; 1562 1563 public: 1564 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1565 BitMap* region_bm, 1566 BitMap* card_bm, 1567 BitMap* exp_region_bm, 1568 BitMap* exp_card_bm, 1569 bool verbose) : 1570 _g1h(g1h), _cm(g1h->concurrent_mark()), 1571 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1572 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1573 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1574 _failures(0) { } 1575 1576 int failures() const { return _failures; } 1577 1578 bool doHeapRegion(HeapRegion* hr) { 1579 if (hr->is_continues_humongous()) { 1580 // We will ignore these here and process them when their 1581 // associated "starts humongous" region is processed (see 1582 // set_bit_for_heap_region()). Note that we cannot rely on their 1583 // associated "starts humongous" region to have their bit set to 1584 // 1 since, due to the region chunking in the parallel region 1585 // iteration, a "continues humongous" region might be visited 1586 // before its associated "starts humongous". 1587 return false; 1588 } 1589 1590 int failures = 0; 1591 1592 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1593 // this region and set the corresponding bits in the expected region 1594 // and card bitmaps. 1595 bool res = _calc_cl.doHeapRegion(hr); 1596 assert(res == false, "should be continuing"); 1597 1598 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1599 Mutex::_no_safepoint_check_flag); 1600 1601 // Verify the marked bytes for this region. 1602 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1603 size_t act_marked_bytes = hr->next_marked_bytes(); 1604 1605 // We're not OK if expected marked bytes > actual marked bytes. It means 1606 // we have missed accounting some objects during the actual marking. 1607 if (exp_marked_bytes > act_marked_bytes) { 1608 if (_verbose) { 1609 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1610 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1611 hr->hrm_index(), exp_marked_bytes, act_marked_bytes); 1612 } 1613 failures += 1; 1614 } 1615 1616 // Verify the bit, for this region, in the actual and expected 1617 // (which was just calculated) region bit maps. 1618 // We're not OK if the bit in the calculated expected region 1619 // bitmap is set and the bit in the actual region bitmap is not. 1620 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1621 1622 bool expected = _exp_region_bm->at(index); 1623 bool actual = _region_bm->at(index); 1624 if (expected && !actual) { 1625 if (_verbose) { 1626 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1627 "expected: %s, actual: %s", 1628 hr->hrm_index(), 1629 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1630 } 1631 failures += 1; 1632 } 1633 1634 // Verify that the card bit maps for the cards spanned by the current 1635 // region match. We have an error if we have a set bit in the expected 1636 // bit map and the corresponding bit in the actual bitmap is not set. 1637 1638 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1639 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1640 1641 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1642 expected = _exp_card_bm->at(i); 1643 actual = _card_bm->at(i); 1644 1645 if (expected && !actual) { 1646 if (_verbose) { 1647 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1648 "expected: %s, actual: %s", 1649 hr->hrm_index(), i, 1650 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1651 } 1652 failures += 1; 1653 } 1654 } 1655 1656 if (failures > 0 && _verbose) { 1657 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1658 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1659 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1660 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1661 } 1662 1663 _failures += failures; 1664 1665 // We could stop iteration over the heap when we 1666 // find the first violating region by returning true. 1667 return false; 1668 } 1669 }; 1670 1671 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1672 protected: 1673 G1CollectedHeap* _g1h; 1674 ConcurrentMark* _cm; 1675 BitMap* _actual_region_bm; 1676 BitMap* _actual_card_bm; 1677 1678 uint _n_workers; 1679 1680 BitMap* _expected_region_bm; 1681 BitMap* _expected_card_bm; 1682 1683 int _failures; 1684 bool _verbose; 1685 1686 public: 1687 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1688 BitMap* region_bm, BitMap* card_bm, 1689 BitMap* expected_region_bm, BitMap* expected_card_bm) 1690 : AbstractGangTask("G1 verify final counting"), 1691 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1692 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1693 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1694 _failures(0), _verbose(false), 1695 _n_workers(0) { 1696 assert(VerifyDuringGC, "don't call this otherwise"); 1697 1698 // Use the value already set as the number of active threads 1699 // in the call to run_task(). 1700 if (G1CollectedHeap::use_parallel_gc_threads()) { 1701 assert( _g1h->workers()->active_workers() > 0, 1702 "Should have been previously set"); 1703 _n_workers = _g1h->workers()->active_workers(); 1704 } else { 1705 _n_workers = 1; 1706 } 1707 1708 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1709 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1710 1711 _verbose = _cm->verbose_medium(); 1712 } 1713 1714 void work(uint worker_id) { 1715 assert(worker_id < _n_workers, "invariant"); 1716 1717 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1718 _actual_region_bm, _actual_card_bm, 1719 _expected_region_bm, 1720 _expected_card_bm, 1721 _verbose); 1722 1723 if (G1CollectedHeap::use_parallel_gc_threads()) { 1724 _g1h->heap_region_par_iterate_chunked(&verify_cl, 1725 worker_id, 1726 _n_workers, 1727 HeapRegion::VerifyCountClaimValue); 1728 } else { 1729 _g1h->heap_region_iterate(&verify_cl); 1730 } 1731 1732 Atomic::add(verify_cl.failures(), &_failures); 1733 } 1734 1735 int failures() const { return _failures; } 1736 }; 1737 1738 // Closure that finalizes the liveness counting data. 1739 // Used during the cleanup pause. 1740 // Sets the bits corresponding to the interval [NTAMS, top] 1741 // (which contains the implicitly live objects) in the 1742 // card liveness bitmap. Also sets the bit for each region, 1743 // containing live data, in the region liveness bitmap. 1744 1745 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1746 public: 1747 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1748 BitMap* region_bm, 1749 BitMap* card_bm) : 1750 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1751 1752 bool doHeapRegion(HeapRegion* hr) { 1753 1754 if (hr->is_continues_humongous()) { 1755 // We will ignore these here and process them when their 1756 // associated "starts humongous" region is processed (see 1757 // set_bit_for_heap_region()). Note that we cannot rely on their 1758 // associated "starts humongous" region to have their bit set to 1759 // 1 since, due to the region chunking in the parallel region 1760 // iteration, a "continues humongous" region might be visited 1761 // before its associated "starts humongous". 1762 return false; 1763 } 1764 1765 HeapWord* ntams = hr->next_top_at_mark_start(); 1766 HeapWord* top = hr->top(); 1767 1768 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1769 1770 // Mark the allocated-since-marking portion... 1771 if (ntams < top) { 1772 // This definitely means the region has live objects. 1773 set_bit_for_region(hr); 1774 1775 // Now set the bits in the card bitmap for [ntams, top) 1776 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1777 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1778 1779 // Note: if we're looking at the last region in heap - top 1780 // could be actually just beyond the end of the heap; end_idx 1781 // will then correspond to a (non-existent) card that is also 1782 // just beyond the heap. 1783 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1784 // end of object is not card aligned - increment to cover 1785 // all the cards spanned by the object 1786 end_idx += 1; 1787 } 1788 1789 assert(end_idx <= _card_bm->size(), 1790 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1791 end_idx, _card_bm->size())); 1792 assert(start_idx < _card_bm->size(), 1793 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1794 start_idx, _card_bm->size())); 1795 1796 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1797 } 1798 1799 // Set the bit for the region if it contains live data 1800 if (hr->next_marked_bytes() > 0) { 1801 set_bit_for_region(hr); 1802 } 1803 1804 return false; 1805 } 1806 }; 1807 1808 class G1ParFinalCountTask: public AbstractGangTask { 1809 protected: 1810 G1CollectedHeap* _g1h; 1811 ConcurrentMark* _cm; 1812 BitMap* _actual_region_bm; 1813 BitMap* _actual_card_bm; 1814 1815 uint _n_workers; 1816 1817 public: 1818 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1819 : AbstractGangTask("G1 final counting"), 1820 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1821 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1822 _n_workers(0) { 1823 // Use the value already set as the number of active threads 1824 // in the call to run_task(). 1825 if (G1CollectedHeap::use_parallel_gc_threads()) { 1826 assert( _g1h->workers()->active_workers() > 0, 1827 "Should have been previously set"); 1828 _n_workers = _g1h->workers()->active_workers(); 1829 } else { 1830 _n_workers = 1; 1831 } 1832 } 1833 1834 void work(uint worker_id) { 1835 assert(worker_id < _n_workers, "invariant"); 1836 1837 FinalCountDataUpdateClosure final_update_cl(_g1h, 1838 _actual_region_bm, 1839 _actual_card_bm); 1840 1841 if (G1CollectedHeap::use_parallel_gc_threads()) { 1842 _g1h->heap_region_par_iterate_chunked(&final_update_cl, 1843 worker_id, 1844 _n_workers, 1845 HeapRegion::FinalCountClaimValue); 1846 } else { 1847 _g1h->heap_region_iterate(&final_update_cl); 1848 } 1849 } 1850 }; 1851 1852 class G1ParNoteEndTask; 1853 1854 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1855 G1CollectedHeap* _g1; 1856 size_t _max_live_bytes; 1857 uint _regions_claimed; 1858 size_t _freed_bytes; 1859 FreeRegionList* _local_cleanup_list; 1860 HeapRegionSetCount _old_regions_removed; 1861 HeapRegionSetCount _humongous_regions_removed; 1862 HRRSCleanupTask* _hrrs_cleanup_task; 1863 double _claimed_region_time; 1864 double _max_region_time; 1865 1866 public: 1867 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1868 FreeRegionList* local_cleanup_list, 1869 HRRSCleanupTask* hrrs_cleanup_task) : 1870 _g1(g1), 1871 _max_live_bytes(0), _regions_claimed(0), 1872 _freed_bytes(0), 1873 _claimed_region_time(0.0), _max_region_time(0.0), 1874 _local_cleanup_list(local_cleanup_list), 1875 _old_regions_removed(), 1876 _humongous_regions_removed(), 1877 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1878 1879 size_t freed_bytes() { return _freed_bytes; } 1880 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1881 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1882 1883 bool doHeapRegion(HeapRegion *hr) { 1884 if (hr->is_continues_humongous()) { 1885 return false; 1886 } 1887 // We use a claim value of zero here because all regions 1888 // were claimed with value 1 in the FinalCount task. 1889 _g1->reset_gc_time_stamps(hr); 1890 double start = os::elapsedTime(); 1891 _regions_claimed++; 1892 hr->note_end_of_marking(); 1893 _max_live_bytes += hr->max_live_bytes(); 1894 1895 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1896 _freed_bytes += hr->used(); 1897 hr->set_containing_set(NULL); 1898 if (hr->is_humongous()) { 1899 assert(hr->is_starts_humongous(), "we should only see starts humongous"); 1900 _humongous_regions_removed.increment(1u, hr->capacity()); 1901 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1902 } else { 1903 _old_regions_removed.increment(1u, hr->capacity()); 1904 _g1->free_region(hr, _local_cleanup_list, true); 1905 } 1906 } else { 1907 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1908 } 1909 1910 double region_time = (os::elapsedTime() - start); 1911 _claimed_region_time += region_time; 1912 if (region_time > _max_region_time) { 1913 _max_region_time = region_time; 1914 } 1915 return false; 1916 } 1917 1918 size_t max_live_bytes() { return _max_live_bytes; } 1919 uint regions_claimed() { return _regions_claimed; } 1920 double claimed_region_time_sec() { return _claimed_region_time; } 1921 double max_region_time_sec() { return _max_region_time; } 1922 }; 1923 1924 class G1ParNoteEndTask: public AbstractGangTask { 1925 friend class G1NoteEndOfConcMarkClosure; 1926 1927 protected: 1928 G1CollectedHeap* _g1h; 1929 size_t _max_live_bytes; 1930 size_t _freed_bytes; 1931 FreeRegionList* _cleanup_list; 1932 1933 public: 1934 G1ParNoteEndTask(G1CollectedHeap* g1h, 1935 FreeRegionList* cleanup_list) : 1936 AbstractGangTask("G1 note end"), _g1h(g1h), 1937 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { } 1938 1939 void work(uint worker_id) { 1940 double start = os::elapsedTime(); 1941 FreeRegionList local_cleanup_list("Local Cleanup List"); 1942 HRRSCleanupTask hrrs_cleanup_task; 1943 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1944 &hrrs_cleanup_task); 1945 if (G1CollectedHeap::use_parallel_gc_threads()) { 1946 _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id, 1947 _g1h->workers()->active_workers(), 1948 HeapRegion::NoteEndClaimValue); 1949 } else { 1950 _g1h->heap_region_iterate(&g1_note_end); 1951 } 1952 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1953 1954 // Now update the lists 1955 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1956 { 1957 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1958 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1959 _max_live_bytes += g1_note_end.max_live_bytes(); 1960 _freed_bytes += g1_note_end.freed_bytes(); 1961 1962 // If we iterate over the global cleanup list at the end of 1963 // cleanup to do this printing we will not guarantee to only 1964 // generate output for the newly-reclaimed regions (the list 1965 // might not be empty at the beginning of cleanup; we might 1966 // still be working on its previous contents). So we do the 1967 // printing here, before we append the new regions to the global 1968 // cleanup list. 1969 1970 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1971 if (hr_printer->is_active()) { 1972 FreeRegionListIterator iter(&local_cleanup_list); 1973 while (iter.more_available()) { 1974 HeapRegion* hr = iter.get_next(); 1975 hr_printer->cleanup(hr); 1976 } 1977 } 1978 1979 _cleanup_list->add_ordered(&local_cleanup_list); 1980 assert(local_cleanup_list.is_empty(), "post-condition"); 1981 1982 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1983 } 1984 } 1985 size_t max_live_bytes() { return _max_live_bytes; } 1986 size_t freed_bytes() { return _freed_bytes; } 1987 }; 1988 1989 class G1ParScrubRemSetTask: public AbstractGangTask { 1990 protected: 1991 G1RemSet* _g1rs; 1992 BitMap* _region_bm; 1993 BitMap* _card_bm; 1994 public: 1995 G1ParScrubRemSetTask(G1CollectedHeap* g1h, 1996 BitMap* region_bm, BitMap* card_bm) : 1997 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), 1998 _region_bm(region_bm), _card_bm(card_bm) { } 1999 2000 void work(uint worker_id) { 2001 if (G1CollectedHeap::use_parallel_gc_threads()) { 2002 _g1rs->scrub_par(_region_bm, _card_bm, worker_id, 2003 HeapRegion::ScrubRemSetClaimValue); 2004 } else { 2005 _g1rs->scrub(_region_bm, _card_bm); 2006 } 2007 } 2008 2009 }; 2010 2011 void ConcurrentMark::cleanup() { 2012 // world is stopped at this checkpoint 2013 assert(SafepointSynchronize::is_at_safepoint(), 2014 "world should be stopped"); 2015 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2016 2017 // If a full collection has happened, we shouldn't do this. 2018 if (has_aborted()) { 2019 g1h->set_marking_complete(); // So bitmap clearing isn't confused 2020 return; 2021 } 2022 2023 g1h->verify_region_sets_optional(); 2024 2025 if (VerifyDuringGC) { 2026 HandleMark hm; // handle scope 2027 Universe::heap()->prepare_for_verify(); 2028 Universe::verify(VerifyOption_G1UsePrevMarking, 2029 " VerifyDuringGC:(before)"); 2030 } 2031 g1h->check_bitmaps("Cleanup Start"); 2032 2033 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); 2034 g1p->record_concurrent_mark_cleanup_start(); 2035 2036 double start = os::elapsedTime(); 2037 2038 HeapRegionRemSet::reset_for_cleanup_tasks(); 2039 2040 uint n_workers; 2041 2042 // Do counting once more with the world stopped for good measure. 2043 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 2044 2045 if (G1CollectedHeap::use_parallel_gc_threads()) { 2046 assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 2047 "sanity check"); 2048 2049 g1h->set_par_threads(); 2050 n_workers = g1h->n_par_threads(); 2051 assert(g1h->n_par_threads() == n_workers, 2052 "Should not have been reset"); 2053 g1h->workers()->run_task(&g1_par_count_task); 2054 // Done with the parallel phase so reset to 0. 2055 g1h->set_par_threads(0); 2056 2057 assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue), 2058 "sanity check"); 2059 } else { 2060 n_workers = 1; 2061 g1_par_count_task.work(0); 2062 } 2063 2064 if (VerifyDuringGC) { 2065 // Verify that the counting data accumulated during marking matches 2066 // that calculated by walking the marking bitmap. 2067 2068 // Bitmaps to hold expected values 2069 BitMap expected_region_bm(_region_bm.size(), true); 2070 BitMap expected_card_bm(_card_bm.size(), true); 2071 2072 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 2073 &_region_bm, 2074 &_card_bm, 2075 &expected_region_bm, 2076 &expected_card_bm); 2077 2078 if (G1CollectedHeap::use_parallel_gc_threads()) { 2079 g1h->set_par_threads((int)n_workers); 2080 g1h->workers()->run_task(&g1_par_verify_task); 2081 // Done with the parallel phase so reset to 0. 2082 g1h->set_par_threads(0); 2083 2084 assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue), 2085 "sanity check"); 2086 } else { 2087 g1_par_verify_task.work(0); 2088 } 2089 2090 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 2091 } 2092 2093 size_t start_used_bytes = g1h->used(); 2094 g1h->set_marking_complete(); 2095 2096 double count_end = os::elapsedTime(); 2097 double this_final_counting_time = (count_end - start); 2098 _total_counting_time += this_final_counting_time; 2099 2100 if (G1PrintRegionLivenessInfo) { 2101 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 2102 _g1h->heap_region_iterate(&cl); 2103 } 2104 2105 // Install newly created mark bitMap as "prev". 2106 swapMarkBitMaps(); 2107 2108 g1h->reset_gc_time_stamp(); 2109 2110 // Note end of marking in all heap regions. 2111 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list); 2112 if (G1CollectedHeap::use_parallel_gc_threads()) { 2113 g1h->set_par_threads((int)n_workers); 2114 g1h->workers()->run_task(&g1_par_note_end_task); 2115 g1h->set_par_threads(0); 2116 2117 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue), 2118 "sanity check"); 2119 } else { 2120 g1_par_note_end_task.work(0); 2121 } 2122 g1h->check_gc_time_stamps(); 2123 2124 if (!cleanup_list_is_empty()) { 2125 // The cleanup list is not empty, so we'll have to process it 2126 // concurrently. Notify anyone else that might be wanting free 2127 // regions that there will be more free regions coming soon. 2128 g1h->set_free_regions_coming(); 2129 } 2130 2131 // call below, since it affects the metric by which we sort the heap 2132 // regions. 2133 if (G1ScrubRemSets) { 2134 double rs_scrub_start = os::elapsedTime(); 2135 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm); 2136 if (G1CollectedHeap::use_parallel_gc_threads()) { 2137 g1h->set_par_threads((int)n_workers); 2138 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2139 g1h->set_par_threads(0); 2140 2141 assert(g1h->check_heap_region_claim_values( 2142 HeapRegion::ScrubRemSetClaimValue), 2143 "sanity check"); 2144 } else { 2145 g1_par_scrub_rs_task.work(0); 2146 } 2147 2148 double rs_scrub_end = os::elapsedTime(); 2149 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2150 _total_rs_scrub_time += this_rs_scrub_time; 2151 } 2152 2153 // this will also free any regions totally full of garbage objects, 2154 // and sort the regions. 2155 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2156 2157 // Statistics. 2158 double end = os::elapsedTime(); 2159 _cleanup_times.add((end - start) * 1000.0); 2160 2161 if (G1Log::fine()) { 2162 g1h->print_size_transition(gclog_or_tty, 2163 start_used_bytes, 2164 g1h->used(), 2165 g1h->capacity()); 2166 } 2167 2168 // Clean up will have freed any regions completely full of garbage. 2169 // Update the soft reference policy with the new heap occupancy. 2170 Universe::update_heap_info_at_gc(); 2171 2172 if (VerifyDuringGC) { 2173 HandleMark hm; // handle scope 2174 Universe::heap()->prepare_for_verify(); 2175 Universe::verify(VerifyOption_G1UsePrevMarking, 2176 " VerifyDuringGC:(after)"); 2177 } 2178 2179 g1h->check_bitmaps("Cleanup End"); 2180 2181 g1h->verify_region_sets_optional(); 2182 2183 // We need to make this be a "collection" so any collection pause that 2184 // races with it goes around and waits for completeCleanup to finish. 2185 g1h->increment_total_collections(); 2186 2187 // Clean out dead classes and update Metaspace sizes. 2188 if (ClassUnloadingWithConcurrentMark) { 2189 ClassLoaderDataGraph::purge(); 2190 } 2191 MetaspaceGC::compute_new_size(); 2192 2193 // We reclaimed old regions so we should calculate the sizes to make 2194 // sure we update the old gen/space data. 2195 g1h->g1mm()->update_sizes(); 2196 2197 g1h->trace_heap_after_concurrent_cycle(); 2198 } 2199 2200 void ConcurrentMark::completeCleanup() { 2201 if (has_aborted()) return; 2202 2203 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2204 2205 _cleanup_list.verify_optional(); 2206 FreeRegionList tmp_free_list("Tmp Free List"); 2207 2208 if (G1ConcRegionFreeingVerbose) { 2209 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2210 "cleanup list has %u entries", 2211 _cleanup_list.length()); 2212 } 2213 2214 // No one else should be accessing the _cleanup_list at this point, 2215 // so it is not necessary to take any locks 2216 while (!_cleanup_list.is_empty()) { 2217 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 2218 assert(hr != NULL, "Got NULL from a non-empty list"); 2219 hr->par_clear(); 2220 tmp_free_list.add_ordered(hr); 2221 2222 // Instead of adding one region at a time to the secondary_free_list, 2223 // we accumulate them in the local list and move them a few at a 2224 // time. This also cuts down on the number of notify_all() calls 2225 // we do during this process. We'll also append the local list when 2226 // _cleanup_list is empty (which means we just removed the last 2227 // region from the _cleanup_list). 2228 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2229 _cleanup_list.is_empty()) { 2230 if (G1ConcRegionFreeingVerbose) { 2231 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2232 "appending %u entries to the secondary_free_list, " 2233 "cleanup list still has %u entries", 2234 tmp_free_list.length(), 2235 _cleanup_list.length()); 2236 } 2237 2238 { 2239 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2240 g1h->secondary_free_list_add(&tmp_free_list); 2241 SecondaryFreeList_lock->notify_all(); 2242 } 2243 2244 if (G1StressConcRegionFreeing) { 2245 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2246 os::sleep(Thread::current(), (jlong) 1, false); 2247 } 2248 } 2249 } 2250 } 2251 assert(tmp_free_list.is_empty(), "post-condition"); 2252 } 2253 2254 // Supporting Object and Oop closures for reference discovery 2255 // and processing in during marking 2256 2257 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2258 HeapWord* addr = (HeapWord*)obj; 2259 return addr != NULL && 2260 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2261 } 2262 2263 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2264 // Uses the CMTask associated with a worker thread (for serial reference 2265 // processing the CMTask for worker 0 is used) to preserve (mark) and 2266 // trace referent objects. 2267 // 2268 // Using the CMTask and embedded local queues avoids having the worker 2269 // threads operating on the global mark stack. This reduces the risk 2270 // of overflowing the stack - which we would rather avoid at this late 2271 // state. Also using the tasks' local queues removes the potential 2272 // of the workers interfering with each other that could occur if 2273 // operating on the global stack. 2274 2275 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2276 ConcurrentMark* _cm; 2277 CMTask* _task; 2278 int _ref_counter_limit; 2279 int _ref_counter; 2280 bool _is_serial; 2281 public: 2282 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2283 _cm(cm), _task(task), _is_serial(is_serial), 2284 _ref_counter_limit(G1RefProcDrainInterval) { 2285 assert(_ref_counter_limit > 0, "sanity"); 2286 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2287 _ref_counter = _ref_counter_limit; 2288 } 2289 2290 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2291 virtual void do_oop( oop* p) { do_oop_work(p); } 2292 2293 template <class T> void do_oop_work(T* p) { 2294 if (!_cm->has_overflown()) { 2295 oop obj = oopDesc::load_decode_heap_oop(p); 2296 if (_cm->verbose_high()) { 2297 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2298 "*"PTR_FORMAT" = "PTR_FORMAT, 2299 _task->worker_id(), p2i(p), p2i((void*) obj)); 2300 } 2301 2302 _task->deal_with_reference(obj); 2303 _ref_counter--; 2304 2305 if (_ref_counter == 0) { 2306 // We have dealt with _ref_counter_limit references, pushing them 2307 // and objects reachable from them on to the local stack (and 2308 // possibly the global stack). Call CMTask::do_marking_step() to 2309 // process these entries. 2310 // 2311 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2312 // there's nothing more to do (i.e. we're done with the entries that 2313 // were pushed as a result of the CMTask::deal_with_reference() calls 2314 // above) or we overflow. 2315 // 2316 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2317 // flag while there may still be some work to do. (See the comment at 2318 // the beginning of CMTask::do_marking_step() for those conditions - 2319 // one of which is reaching the specified time target.) It is only 2320 // when CMTask::do_marking_step() returns without setting the 2321 // has_aborted() flag that the marking step has completed. 2322 do { 2323 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2324 _task->do_marking_step(mark_step_duration_ms, 2325 false /* do_termination */, 2326 _is_serial); 2327 } while (_task->has_aborted() && !_cm->has_overflown()); 2328 _ref_counter = _ref_counter_limit; 2329 } 2330 } else { 2331 if (_cm->verbose_high()) { 2332 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2333 } 2334 } 2335 } 2336 }; 2337 2338 // 'Drain' oop closure used by both serial and parallel reference processing. 2339 // Uses the CMTask associated with a given worker thread (for serial 2340 // reference processing the CMtask for worker 0 is used). Calls the 2341 // do_marking_step routine, with an unbelievably large timeout value, 2342 // to drain the marking data structures of the remaining entries 2343 // added by the 'keep alive' oop closure above. 2344 2345 class G1CMDrainMarkingStackClosure: public VoidClosure { 2346 ConcurrentMark* _cm; 2347 CMTask* _task; 2348 bool _is_serial; 2349 public: 2350 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2351 _cm(cm), _task(task), _is_serial(is_serial) { 2352 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2353 } 2354 2355 void do_void() { 2356 do { 2357 if (_cm->verbose_high()) { 2358 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2359 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2360 } 2361 2362 // We call CMTask::do_marking_step() to completely drain the local 2363 // and global marking stacks of entries pushed by the 'keep alive' 2364 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2365 // 2366 // CMTask::do_marking_step() is called in a loop, which we'll exit 2367 // if there's nothing more to do (i.e. we've completely drained the 2368 // entries that were pushed as a a result of applying the 'keep alive' 2369 // closure to the entries on the discovered ref lists) or we overflow 2370 // the global marking stack. 2371 // 2372 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2373 // flag while there may still be some work to do. (See the comment at 2374 // the beginning of CMTask::do_marking_step() for those conditions - 2375 // one of which is reaching the specified time target.) It is only 2376 // when CMTask::do_marking_step() returns without setting the 2377 // has_aborted() flag that the marking step has completed. 2378 2379 _task->do_marking_step(1000000000.0 /* something very large */, 2380 true /* do_termination */, 2381 _is_serial); 2382 } while (_task->has_aborted() && !_cm->has_overflown()); 2383 } 2384 }; 2385 2386 // Implementation of AbstractRefProcTaskExecutor for parallel 2387 // reference processing at the end of G1 concurrent marking 2388 2389 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2390 private: 2391 G1CollectedHeap* _g1h; 2392 ConcurrentMark* _cm; 2393 WorkGang* _workers; 2394 int _active_workers; 2395 2396 public: 2397 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2398 ConcurrentMark* cm, 2399 WorkGang* workers, 2400 int n_workers) : 2401 _g1h(g1h), _cm(cm), 2402 _workers(workers), _active_workers(n_workers) { } 2403 2404 // Executes the given task using concurrent marking worker threads. 2405 virtual void execute(ProcessTask& task); 2406 virtual void execute(EnqueueTask& task); 2407 }; 2408 2409 class G1CMRefProcTaskProxy: public AbstractGangTask { 2410 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2411 ProcessTask& _proc_task; 2412 G1CollectedHeap* _g1h; 2413 ConcurrentMark* _cm; 2414 2415 public: 2416 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2417 G1CollectedHeap* g1h, 2418 ConcurrentMark* cm) : 2419 AbstractGangTask("Process reference objects in parallel"), 2420 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2421 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2422 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2423 } 2424 2425 virtual void work(uint worker_id) { 2426 ResourceMark rm; 2427 HandleMark hm; 2428 CMTask* task = _cm->task(worker_id); 2429 G1CMIsAliveClosure g1_is_alive(_g1h); 2430 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2431 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2432 2433 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2434 } 2435 }; 2436 2437 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2438 assert(_workers != NULL, "Need parallel worker threads."); 2439 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2440 2441 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2442 2443 // We need to reset the concurrency level before each 2444 // proxy task execution, so that the termination protocol 2445 // and overflow handling in CMTask::do_marking_step() knows 2446 // how many workers to wait for. 2447 _cm->set_concurrency(_active_workers); 2448 _g1h->set_par_threads(_active_workers); 2449 _workers->run_task(&proc_task_proxy); 2450 _g1h->set_par_threads(0); 2451 } 2452 2453 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2454 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2455 EnqueueTask& _enq_task; 2456 2457 public: 2458 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2459 AbstractGangTask("Enqueue reference objects in parallel"), 2460 _enq_task(enq_task) { } 2461 2462 virtual void work(uint worker_id) { 2463 _enq_task.work(worker_id); 2464 } 2465 }; 2466 2467 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2468 assert(_workers != NULL, "Need parallel worker threads."); 2469 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2470 2471 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2472 2473 // Not strictly necessary but... 2474 // 2475 // We need to reset the concurrency level before each 2476 // proxy task execution, so that the termination protocol 2477 // and overflow handling in CMTask::do_marking_step() knows 2478 // how many workers to wait for. 2479 _cm->set_concurrency(_active_workers); 2480 _g1h->set_par_threads(_active_workers); 2481 _workers->run_task(&enq_task_proxy); 2482 _g1h->set_par_threads(0); 2483 } 2484 2485 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2486 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2487 } 2488 2489 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2490 if (has_overflown()) { 2491 // Skip processing the discovered references if we have 2492 // overflown the global marking stack. Reference objects 2493 // only get discovered once so it is OK to not 2494 // de-populate the discovered reference lists. We could have, 2495 // but the only benefit would be that, when marking restarts, 2496 // less reference objects are discovered. 2497 return; 2498 } 2499 2500 ResourceMark rm; 2501 HandleMark hm; 2502 2503 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2504 2505 // Is alive closure. 2506 G1CMIsAliveClosure g1_is_alive(g1h); 2507 2508 // Inner scope to exclude the cleaning of the string and symbol 2509 // tables from the displayed time. 2510 { 2511 G1CMTraceTime t("GC ref-proc", G1Log::finer()); 2512 2513 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2514 2515 // See the comment in G1CollectedHeap::ref_processing_init() 2516 // about how reference processing currently works in G1. 2517 2518 // Set the soft reference policy 2519 rp->setup_policy(clear_all_soft_refs); 2520 assert(_markStack.isEmpty(), "mark stack should be empty"); 2521 2522 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2523 // in serial reference processing. Note these closures are also 2524 // used for serially processing (by the the current thread) the 2525 // JNI references during parallel reference processing. 2526 // 2527 // These closures do not need to synchronize with the worker 2528 // threads involved in parallel reference processing as these 2529 // instances are executed serially by the current thread (e.g. 2530 // reference processing is not multi-threaded and is thus 2531 // performed by the current thread instead of a gang worker). 2532 // 2533 // The gang tasks involved in parallel reference processing create 2534 // their own instances of these closures, which do their own 2535 // synchronization among themselves. 2536 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2537 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2538 2539 // We need at least one active thread. If reference processing 2540 // is not multi-threaded we use the current (VMThread) thread, 2541 // otherwise we use the work gang from the G1CollectedHeap and 2542 // we utilize all the worker threads we can. 2543 bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL; 2544 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2545 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2546 2547 // Parallel processing task executor. 2548 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2549 g1h->workers(), active_workers); 2550 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2551 2552 // Set the concurrency level. The phase was already set prior to 2553 // executing the remark task. 2554 set_concurrency(active_workers); 2555 2556 // Set the degree of MT processing here. If the discovery was done MT, 2557 // the number of threads involved during discovery could differ from 2558 // the number of active workers. This is OK as long as the discovered 2559 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2560 rp->set_active_mt_degree(active_workers); 2561 2562 // Process the weak references. 2563 const ReferenceProcessorStats& stats = 2564 rp->process_discovered_references(&g1_is_alive, 2565 &g1_keep_alive, 2566 &g1_drain_mark_stack, 2567 executor, 2568 g1h->gc_timer_cm(), 2569 concurrent_gc_id()); 2570 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2571 2572 // The do_oop work routines of the keep_alive and drain_marking_stack 2573 // oop closures will set the has_overflown flag if we overflow the 2574 // global marking stack. 2575 2576 assert(_markStack.overflow() || _markStack.isEmpty(), 2577 "mark stack should be empty (unless it overflowed)"); 2578 2579 if (_markStack.overflow()) { 2580 // This should have been done already when we tried to push an 2581 // entry on to the global mark stack. But let's do it again. 2582 set_has_overflown(); 2583 } 2584 2585 assert(rp->num_q() == active_workers, "why not"); 2586 2587 rp->enqueue_discovered_references(executor); 2588 2589 rp->verify_no_references_recorded(); 2590 assert(!rp->discovery_enabled(), "Post condition"); 2591 } 2592 2593 if (has_overflown()) { 2594 // We can not trust g1_is_alive if the marking stack overflowed 2595 return; 2596 } 2597 2598 assert(_markStack.isEmpty(), "Marking should have completed"); 2599 2600 // Unload Klasses, String, Symbols, Code Cache, etc. 2601 { 2602 G1CMTraceTime trace("Unloading", G1Log::finer()); 2603 2604 if (ClassUnloadingWithConcurrentMark) { 2605 bool purged_classes; 2606 2607 { 2608 G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest()); 2609 purged_classes = SystemDictionary::do_unloading(&g1_is_alive); 2610 } 2611 2612 { 2613 G1CMTraceTime trace("Parallel Unloading", G1Log::finest()); 2614 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2615 } 2616 } 2617 2618 if (G1StringDedup::is_enabled()) { 2619 G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest()); 2620 G1StringDedup::unlink(&g1_is_alive); 2621 } 2622 } 2623 } 2624 2625 void ConcurrentMark::swapMarkBitMaps() { 2626 CMBitMapRO* temp = _prevMarkBitMap; 2627 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2628 _nextMarkBitMap = (CMBitMap*) temp; 2629 } 2630 2631 class CMObjectClosure; 2632 2633 // Closure for iterating over objects, currently only used for 2634 // processing SATB buffers. 2635 class CMObjectClosure : public ObjectClosure { 2636 private: 2637 CMTask* _task; 2638 2639 public: 2640 void do_object(oop obj) { 2641 _task->deal_with_reference(obj); 2642 } 2643 2644 CMObjectClosure(CMTask* task) : _task(task) { } 2645 }; 2646 2647 class G1RemarkThreadsClosure : public ThreadClosure { 2648 CMObjectClosure _cm_obj; 2649 G1CMOopClosure _cm_cl; 2650 MarkingCodeBlobClosure _code_cl; 2651 int _thread_parity; 2652 bool _is_par; 2653 2654 public: 2655 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) : 2656 _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2657 _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {} 2658 2659 void do_thread(Thread* thread) { 2660 if (thread->is_Java_thread()) { 2661 if (thread->claim_oops_do(_is_par, _thread_parity)) { 2662 JavaThread* jt = (JavaThread*)thread; 2663 2664 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2665 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2666 // * Alive if on the stack of an executing method 2667 // * Weakly reachable otherwise 2668 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2669 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2670 jt->nmethods_do(&_code_cl); 2671 2672 jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj); 2673 } 2674 } else if (thread->is_VM_thread()) { 2675 if (thread->claim_oops_do(_is_par, _thread_parity)) { 2676 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj); 2677 } 2678 } 2679 } 2680 }; 2681 2682 class CMRemarkTask: public AbstractGangTask { 2683 private: 2684 ConcurrentMark* _cm; 2685 bool _is_serial; 2686 public: 2687 void work(uint worker_id) { 2688 // Since all available tasks are actually started, we should 2689 // only proceed if we're supposed to be active. 2690 if (worker_id < _cm->active_tasks()) { 2691 CMTask* task = _cm->task(worker_id); 2692 task->record_start_time(); 2693 { 2694 ResourceMark rm; 2695 HandleMark hm; 2696 2697 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial); 2698 Threads::threads_do(&threads_f); 2699 } 2700 2701 do { 2702 task->do_marking_step(1000000000.0 /* something very large */, 2703 true /* do_termination */, 2704 _is_serial); 2705 } while (task->has_aborted() && !_cm->has_overflown()); 2706 // If we overflow, then we do not want to restart. We instead 2707 // want to abort remark and do concurrent marking again. 2708 task->record_end_time(); 2709 } 2710 } 2711 2712 CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) : 2713 AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) { 2714 _cm->terminator()->reset_for_reuse(active_workers); 2715 } 2716 }; 2717 2718 void ConcurrentMark::checkpointRootsFinalWork() { 2719 ResourceMark rm; 2720 HandleMark hm; 2721 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2722 2723 G1CMTraceTime trace("Finalize Marking", G1Log::finer()); 2724 2725 g1h->ensure_parsability(false); 2726 2727 if (G1CollectedHeap::use_parallel_gc_threads()) { 2728 G1CollectedHeap::StrongRootsScope srs(g1h); 2729 // this is remark, so we'll use up all active threads 2730 uint active_workers = g1h->workers()->active_workers(); 2731 if (active_workers == 0) { 2732 assert(active_workers > 0, "Should have been set earlier"); 2733 active_workers = (uint) ParallelGCThreads; 2734 g1h->workers()->set_active_workers(active_workers); 2735 } 2736 set_concurrency_and_phase(active_workers, false /* concurrent */); 2737 // Leave _parallel_marking_threads at it's 2738 // value originally calculated in the ConcurrentMark 2739 // constructor and pass values of the active workers 2740 // through the gang in the task. 2741 2742 CMRemarkTask remarkTask(this, active_workers, false /* is_serial */); 2743 // We will start all available threads, even if we decide that the 2744 // active_workers will be fewer. The extra ones will just bail out 2745 // immediately. 2746 g1h->set_par_threads(active_workers); 2747 g1h->workers()->run_task(&remarkTask); 2748 g1h->set_par_threads(0); 2749 } else { 2750 G1CollectedHeap::StrongRootsScope srs(g1h); 2751 uint active_workers = 1; 2752 set_concurrency_and_phase(active_workers, false /* concurrent */); 2753 2754 // Note - if there's no work gang then the VMThread will be 2755 // the thread to execute the remark - serially. We have 2756 // to pass true for the is_serial parameter so that 2757 // CMTask::do_marking_step() doesn't enter the sync 2758 // barriers in the event of an overflow. Doing so will 2759 // cause an assert that the current thread is not a 2760 // concurrent GC thread. 2761 CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/); 2762 remarkTask.work(0); 2763 } 2764 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2765 guarantee(has_overflown() || 2766 satb_mq_set.completed_buffers_num() == 0, 2767 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2768 BOOL_TO_STR(has_overflown()), 2769 satb_mq_set.completed_buffers_num())); 2770 2771 print_stats(); 2772 } 2773 2774 #ifndef PRODUCT 2775 2776 class PrintReachableOopClosure: public OopClosure { 2777 private: 2778 G1CollectedHeap* _g1h; 2779 outputStream* _out; 2780 VerifyOption _vo; 2781 bool _all; 2782 2783 public: 2784 PrintReachableOopClosure(outputStream* out, 2785 VerifyOption vo, 2786 bool all) : 2787 _g1h(G1CollectedHeap::heap()), 2788 _out(out), _vo(vo), _all(all) { } 2789 2790 void do_oop(narrowOop* p) { do_oop_work(p); } 2791 void do_oop( oop* p) { do_oop_work(p); } 2792 2793 template <class T> void do_oop_work(T* p) { 2794 oop obj = oopDesc::load_decode_heap_oop(p); 2795 const char* str = NULL; 2796 const char* str2 = ""; 2797 2798 if (obj == NULL) { 2799 str = ""; 2800 } else if (!_g1h->is_in_g1_reserved(obj)) { 2801 str = " O"; 2802 } else { 2803 HeapRegion* hr = _g1h->heap_region_containing(obj); 2804 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); 2805 bool marked = _g1h->is_marked(obj, _vo); 2806 2807 if (over_tams) { 2808 str = " >"; 2809 if (marked) { 2810 str2 = " AND MARKED"; 2811 } 2812 } else if (marked) { 2813 str = " M"; 2814 } else { 2815 str = " NOT"; 2816 } 2817 } 2818 2819 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", 2820 p2i(p), p2i((void*) obj), str, str2); 2821 } 2822 }; 2823 2824 class PrintReachableObjectClosure : public ObjectClosure { 2825 private: 2826 G1CollectedHeap* _g1h; 2827 outputStream* _out; 2828 VerifyOption _vo; 2829 bool _all; 2830 HeapRegion* _hr; 2831 2832 public: 2833 PrintReachableObjectClosure(outputStream* out, 2834 VerifyOption vo, 2835 bool all, 2836 HeapRegion* hr) : 2837 _g1h(G1CollectedHeap::heap()), 2838 _out(out), _vo(vo), _all(all), _hr(hr) { } 2839 2840 void do_object(oop o) { 2841 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo); 2842 bool marked = _g1h->is_marked(o, _vo); 2843 bool print_it = _all || over_tams || marked; 2844 2845 if (print_it) { 2846 _out->print_cr(" "PTR_FORMAT"%s", 2847 p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : ""); 2848 PrintReachableOopClosure oopCl(_out, _vo, _all); 2849 o->oop_iterate_no_header(&oopCl); 2850 } 2851 } 2852 }; 2853 2854 class PrintReachableRegionClosure : public HeapRegionClosure { 2855 private: 2856 G1CollectedHeap* _g1h; 2857 outputStream* _out; 2858 VerifyOption _vo; 2859 bool _all; 2860 2861 public: 2862 bool doHeapRegion(HeapRegion* hr) { 2863 HeapWord* b = hr->bottom(); 2864 HeapWord* e = hr->end(); 2865 HeapWord* t = hr->top(); 2866 HeapWord* p = _g1h->top_at_mark_start(hr, _vo); 2867 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " 2868 "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p)); 2869 _out->cr(); 2870 2871 HeapWord* from = b; 2872 HeapWord* to = t; 2873 2874 if (to > from) { 2875 _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to)); 2876 _out->cr(); 2877 PrintReachableObjectClosure ocl(_out, _vo, _all, hr); 2878 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); 2879 _out->cr(); 2880 } 2881 2882 return false; 2883 } 2884 2885 PrintReachableRegionClosure(outputStream* out, 2886 VerifyOption vo, 2887 bool all) : 2888 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { } 2889 }; 2890 2891 void ConcurrentMark::print_reachable(const char* str, 2892 VerifyOption vo, 2893 bool all) { 2894 gclog_or_tty->cr(); 2895 gclog_or_tty->print_cr("== Doing heap dump... "); 2896 2897 if (G1PrintReachableBaseFile == NULL) { 2898 gclog_or_tty->print_cr(" #### error: no base file defined"); 2899 return; 2900 } 2901 2902 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) > 2903 (JVM_MAXPATHLEN - 1)) { 2904 gclog_or_tty->print_cr(" #### error: file name too long"); 2905 return; 2906 } 2907 2908 char file_name[JVM_MAXPATHLEN]; 2909 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str); 2910 gclog_or_tty->print_cr(" dumping to file %s", file_name); 2911 2912 fileStream fout(file_name); 2913 if (!fout.is_open()) { 2914 gclog_or_tty->print_cr(" #### error: could not open file"); 2915 return; 2916 } 2917 2918 outputStream* out = &fout; 2919 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo)); 2920 out->cr(); 2921 2922 out->print_cr("--- ITERATING OVER REGIONS"); 2923 out->cr(); 2924 PrintReachableRegionClosure rcl(out, vo, all); 2925 _g1h->heap_region_iterate(&rcl); 2926 out->cr(); 2927 2928 gclog_or_tty->print_cr(" done"); 2929 gclog_or_tty->flush(); 2930 } 2931 2932 #endif // PRODUCT 2933 2934 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2935 // Note we are overriding the read-only view of the prev map here, via 2936 // the cast. 2937 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2938 } 2939 2940 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2941 _nextMarkBitMap->clearRange(mr); 2942 } 2943 2944 HeapRegion* 2945 ConcurrentMark::claim_region(uint worker_id) { 2946 // "checkpoint" the finger 2947 HeapWord* finger = _finger; 2948 2949 // _heap_end will not change underneath our feet; it only changes at 2950 // yield points. 2951 while (finger < _heap_end) { 2952 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2953 2954 // Note on how this code handles humongous regions. In the 2955 // normal case the finger will reach the start of a "starts 2956 // humongous" (SH) region. Its end will either be the end of the 2957 // last "continues humongous" (CH) region in the sequence, or the 2958 // standard end of the SH region (if the SH is the only region in 2959 // the sequence). That way claim_region() will skip over the CH 2960 // regions. However, there is a subtle race between a CM thread 2961 // executing this method and a mutator thread doing a humongous 2962 // object allocation. The two are not mutually exclusive as the CM 2963 // thread does not need to hold the Heap_lock when it gets 2964 // here. So there is a chance that claim_region() will come across 2965 // a free region that's in the progress of becoming a SH or a CH 2966 // region. In the former case, it will either 2967 // a) Miss the update to the region's end, in which case it will 2968 // visit every subsequent CH region, will find their bitmaps 2969 // empty, and do nothing, or 2970 // b) Will observe the update of the region's end (in which case 2971 // it will skip the subsequent CH regions). 2972 // If it comes across a region that suddenly becomes CH, the 2973 // scenario will be similar to b). So, the race between 2974 // claim_region() and a humongous object allocation might force us 2975 // to do a bit of unnecessary work (due to some unnecessary bitmap 2976 // iterations) but it should not introduce and correctness issues. 2977 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2978 2979 // Above heap_region_containing_raw may return NULL as we always scan claim 2980 // until the end of the heap. In this case, just jump to the next region. 2981 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2982 2983 // Is the gap between reading the finger and doing the CAS too long? 2984 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2985 if (res == finger && curr_region != NULL) { 2986 // we succeeded 2987 HeapWord* bottom = curr_region->bottom(); 2988 HeapWord* limit = curr_region->next_top_at_mark_start(); 2989 2990 if (verbose_low()) { 2991 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2992 "["PTR_FORMAT", "PTR_FORMAT"), " 2993 "limit = "PTR_FORMAT, 2994 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2995 } 2996 2997 // notice that _finger == end cannot be guaranteed here since, 2998 // someone else might have moved the finger even further 2999 assert(_finger >= end, "the finger should have moved forward"); 3000 3001 if (verbose_low()) { 3002 gclog_or_tty->print_cr("[%u] we were successful with region = " 3003 PTR_FORMAT, worker_id, p2i(curr_region)); 3004 } 3005 3006 if (limit > bottom) { 3007 if (verbose_low()) { 3008 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 3009 "returning it ", worker_id, p2i(curr_region)); 3010 } 3011 return curr_region; 3012 } else { 3013 assert(limit == bottom, 3014 "the region limit should be at bottom"); 3015 if (verbose_low()) { 3016 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 3017 "returning NULL", worker_id, p2i(curr_region)); 3018 } 3019 // we return NULL and the caller should try calling 3020 // claim_region() again. 3021 return NULL; 3022 } 3023 } else { 3024 assert(_finger > finger, "the finger should have moved forward"); 3025 if (verbose_low()) { 3026 if (curr_region == NULL) { 3027 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " 3028 "global finger = "PTR_FORMAT", " 3029 "our finger = "PTR_FORMAT, 3030 worker_id, p2i(_finger), p2i(finger)); 3031 } else { 3032 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 3033 "global finger = "PTR_FORMAT", " 3034 "our finger = "PTR_FORMAT, 3035 worker_id, p2i(_finger), p2i(finger)); 3036 } 3037 } 3038 3039 // read it again 3040 finger = _finger; 3041 } 3042 } 3043 3044 return NULL; 3045 } 3046 3047 #ifndef PRODUCT 3048 enum VerifyNoCSetOopsPhase { 3049 VerifyNoCSetOopsStack, 3050 VerifyNoCSetOopsQueues, 3051 VerifyNoCSetOopsSATBCompleted, 3052 VerifyNoCSetOopsSATBThread 3053 }; 3054 3055 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 3056 private: 3057 G1CollectedHeap* _g1h; 3058 VerifyNoCSetOopsPhase _phase; 3059 int _info; 3060 3061 const char* phase_str() { 3062 switch (_phase) { 3063 case VerifyNoCSetOopsStack: return "Stack"; 3064 case VerifyNoCSetOopsQueues: return "Queue"; 3065 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers"; 3066 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers"; 3067 default: ShouldNotReachHere(); 3068 } 3069 return NULL; 3070 } 3071 3072 void do_object_work(oop obj) { 3073 guarantee(!_g1h->obj_in_cs(obj), 3074 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 3075 p2i((void*) obj), phase_str(), _info)); 3076 } 3077 3078 public: 3079 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 3080 3081 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 3082 _phase = phase; 3083 _info = info; 3084 } 3085 3086 virtual void do_oop(oop* p) { 3087 oop obj = oopDesc::load_decode_heap_oop(p); 3088 do_object_work(obj); 3089 } 3090 3091 virtual void do_oop(narrowOop* p) { 3092 // We should not come across narrow oops while scanning marking 3093 // stacks and SATB buffers. 3094 ShouldNotReachHere(); 3095 } 3096 3097 virtual void do_object(oop obj) { 3098 do_object_work(obj); 3099 } 3100 }; 3101 3102 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, 3103 bool verify_enqueued_buffers, 3104 bool verify_thread_buffers, 3105 bool verify_fingers) { 3106 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 3107 if (!G1CollectedHeap::heap()->mark_in_progress()) { 3108 return; 3109 } 3110 3111 VerifyNoCSetOopsClosure cl; 3112 3113 if (verify_stacks) { 3114 // Verify entries on the global mark stack 3115 cl.set_phase(VerifyNoCSetOopsStack); 3116 _markStack.oops_do(&cl); 3117 3118 // Verify entries on the task queues 3119 for (uint i = 0; i < _max_worker_id; i += 1) { 3120 cl.set_phase(VerifyNoCSetOopsQueues, i); 3121 CMTaskQueue* queue = _task_queues->queue(i); 3122 queue->oops_do(&cl); 3123 } 3124 } 3125 3126 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 3127 3128 // Verify entries on the enqueued SATB buffers 3129 if (verify_enqueued_buffers) { 3130 cl.set_phase(VerifyNoCSetOopsSATBCompleted); 3131 satb_qs.iterate_completed_buffers_read_only(&cl); 3132 } 3133 3134 // Verify entries on the per-thread SATB buffers 3135 if (verify_thread_buffers) { 3136 cl.set_phase(VerifyNoCSetOopsSATBThread); 3137 satb_qs.iterate_thread_buffers_read_only(&cl); 3138 } 3139 3140 if (verify_fingers) { 3141 // Verify the global finger 3142 HeapWord* global_finger = finger(); 3143 if (global_finger != NULL && global_finger < _heap_end) { 3144 // The global finger always points to a heap region boundary. We 3145 // use heap_region_containing_raw() to get the containing region 3146 // given that the global finger could be pointing to a free region 3147 // which subsequently becomes continues humongous. If that 3148 // happens, heap_region_containing() will return the bottom of the 3149 // corresponding starts humongous region and the check below will 3150 // not hold any more. 3151 // Since we always iterate over all regions, we might get a NULL HeapRegion 3152 // here. 3153 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 3154 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 3155 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 3156 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 3157 } 3158 3159 // Verify the task fingers 3160 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 3161 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 3162 CMTask* task = _tasks[i]; 3163 HeapWord* task_finger = task->finger(); 3164 if (task_finger != NULL && task_finger < _heap_end) { 3165 // See above note on the global finger verification. 3166 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 3167 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 3168 !task_hr->in_collection_set(), 3169 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 3170 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 3171 } 3172 } 3173 } 3174 } 3175 #endif // PRODUCT 3176 3177 // Aggregate the counting data that was constructed concurrently 3178 // with marking. 3179 class AggregateCountDataHRClosure: public HeapRegionClosure { 3180 G1CollectedHeap* _g1h; 3181 ConcurrentMark* _cm; 3182 CardTableModRefBS* _ct_bs; 3183 BitMap* _cm_card_bm; 3184 uint _max_worker_id; 3185 3186 public: 3187 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 3188 BitMap* cm_card_bm, 3189 uint max_worker_id) : 3190 _g1h(g1h), _cm(g1h->concurrent_mark()), 3191 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 3192 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 3193 3194 bool doHeapRegion(HeapRegion* hr) { 3195 if (hr->is_continues_humongous()) { 3196 // We will ignore these here and process them when their 3197 // associated "starts humongous" region is processed. 3198 // Note that we cannot rely on their associated 3199 // "starts humongous" region to have their bit set to 1 3200 // since, due to the region chunking in the parallel region 3201 // iteration, a "continues humongous" region might be visited 3202 // before its associated "starts humongous". 3203 return false; 3204 } 3205 3206 HeapWord* start = hr->bottom(); 3207 HeapWord* limit = hr->next_top_at_mark_start(); 3208 HeapWord* end = hr->end(); 3209 3210 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 3211 err_msg("Preconditions not met - " 3212 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 3213 "top: "PTR_FORMAT", end: "PTR_FORMAT, 3214 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 3215 3216 assert(hr->next_marked_bytes() == 0, "Precondition"); 3217 3218 if (start == limit) { 3219 // NTAMS of this region has not been set so nothing to do. 3220 return false; 3221 } 3222 3223 // 'start' should be in the heap. 3224 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 3225 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 3226 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 3227 3228 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 3229 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 3230 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 3231 3232 // If ntams is not card aligned then we bump card bitmap index 3233 // for limit so that we get the all the cards spanned by 3234 // the object ending at ntams. 3235 // Note: if this is the last region in the heap then ntams 3236 // could be actually just beyond the end of the the heap; 3237 // limit_idx will then correspond to a (non-existent) card 3238 // that is also outside the heap. 3239 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 3240 limit_idx += 1; 3241 } 3242 3243 assert(limit_idx <= end_idx, "or else use atomics"); 3244 3245 // Aggregate the "stripe" in the count data associated with hr. 3246 uint hrm_index = hr->hrm_index(); 3247 size_t marked_bytes = 0; 3248 3249 for (uint i = 0; i < _max_worker_id; i += 1) { 3250 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 3251 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 3252 3253 // Fetch the marked_bytes in this region for task i and 3254 // add it to the running total for this region. 3255 marked_bytes += marked_bytes_array[hrm_index]; 3256 3257 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 3258 // into the global card bitmap. 3259 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 3260 3261 while (scan_idx < limit_idx) { 3262 assert(task_card_bm->at(scan_idx) == true, "should be"); 3263 _cm_card_bm->set_bit(scan_idx); 3264 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 3265 3266 // BitMap::get_next_one_offset() can handle the case when 3267 // its left_offset parameter is greater than its right_offset 3268 // parameter. It does, however, have an early exit if 3269 // left_offset == right_offset. So let's limit the value 3270 // passed in for left offset here. 3271 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 3272 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 3273 } 3274 } 3275 3276 // Update the marked bytes for this region. 3277 hr->add_to_marked_bytes(marked_bytes); 3278 3279 // Next heap region 3280 return false; 3281 } 3282 }; 3283 3284 class G1AggregateCountDataTask: public AbstractGangTask { 3285 protected: 3286 G1CollectedHeap* _g1h; 3287 ConcurrentMark* _cm; 3288 BitMap* _cm_card_bm; 3289 uint _max_worker_id; 3290 int _active_workers; 3291 3292 public: 3293 G1AggregateCountDataTask(G1CollectedHeap* g1h, 3294 ConcurrentMark* cm, 3295 BitMap* cm_card_bm, 3296 uint max_worker_id, 3297 int n_workers) : 3298 AbstractGangTask("Count Aggregation"), 3299 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 3300 _max_worker_id(max_worker_id), 3301 _active_workers(n_workers) { } 3302 3303 void work(uint worker_id) { 3304 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 3305 3306 if (G1CollectedHeap::use_parallel_gc_threads()) { 3307 _g1h->heap_region_par_iterate_chunked(&cl, worker_id, 3308 _active_workers, 3309 HeapRegion::AggregateCountClaimValue); 3310 } else { 3311 _g1h->heap_region_iterate(&cl); 3312 } 3313 } 3314 }; 3315 3316 3317 void ConcurrentMark::aggregate_count_data() { 3318 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? 3319 _g1h->workers()->active_workers() : 3320 1); 3321 3322 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 3323 _max_worker_id, n_workers); 3324 3325 if (G1CollectedHeap::use_parallel_gc_threads()) { 3326 assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 3327 "sanity check"); 3328 _g1h->set_par_threads(n_workers); 3329 _g1h->workers()->run_task(&g1_par_agg_task); 3330 _g1h->set_par_threads(0); 3331 3332 assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue), 3333 "sanity check"); 3334 _g1h->reset_heap_region_claim_values(); 3335 } else { 3336 g1_par_agg_task.work(0); 3337 } 3338 _g1h->allocation_context_stats().update_at_remark(); 3339 } 3340 3341 // Clear the per-worker arrays used to store the per-region counting data 3342 void ConcurrentMark::clear_all_count_data() { 3343 // Clear the global card bitmap - it will be filled during 3344 // liveness count aggregation (during remark) and the 3345 // final counting task. 3346 _card_bm.clear(); 3347 3348 // Clear the global region bitmap - it will be filled as part 3349 // of the final counting task. 3350 _region_bm.clear(); 3351 3352 uint max_regions = _g1h->max_regions(); 3353 assert(_max_worker_id > 0, "uninitialized"); 3354 3355 for (uint i = 0; i < _max_worker_id; i += 1) { 3356 BitMap* task_card_bm = count_card_bitmap_for(i); 3357 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3358 3359 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3360 assert(marked_bytes_array != NULL, "uninitialized"); 3361 3362 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3363 task_card_bm->clear(); 3364 } 3365 } 3366 3367 void ConcurrentMark::print_stats() { 3368 if (verbose_stats()) { 3369 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3370 for (size_t i = 0; i < _active_tasks; ++i) { 3371 _tasks[i]->print_stats(); 3372 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3373 } 3374 } 3375 } 3376 3377 // abandon current marking iteration due to a Full GC 3378 void ConcurrentMark::abort() { 3379 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 3380 // concurrent bitmap clearing. 3381 _nextMarkBitMap->clearAll(); 3382 3383 // Note we cannot clear the previous marking bitmap here 3384 // since VerifyDuringGC verifies the objects marked during 3385 // a full GC against the previous bitmap. 3386 3387 // Clear the liveness counting data 3388 clear_all_count_data(); 3389 // Empty mark stack 3390 reset_marking_state(); 3391 for (uint i = 0; i < _max_worker_id; ++i) { 3392 _tasks[i]->clear_region_fields(); 3393 } 3394 _first_overflow_barrier_sync.abort(); 3395 _second_overflow_barrier_sync.abort(); 3396 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); 3397 if (!gc_id.is_undefined()) { 3398 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance 3399 // to detect that it was aborted. Only keep track of the first GC id that we aborted. 3400 _aborted_gc_id = gc_id; 3401 } 3402 _has_aborted = true; 3403 3404 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3405 satb_mq_set.abandon_partial_marking(); 3406 // This can be called either during or outside marking, we'll read 3407 // the expected_active value from the SATB queue set. 3408 satb_mq_set.set_active_all_threads( 3409 false, /* new active value */ 3410 satb_mq_set.is_active() /* expected_active */); 3411 3412 _g1h->trace_heap_after_concurrent_cycle(); 3413 _g1h->register_concurrent_cycle_end(); 3414 } 3415 3416 const GCId& ConcurrentMark::concurrent_gc_id() { 3417 if (has_aborted()) { 3418 return _aborted_gc_id; 3419 } 3420 return _g1h->gc_tracer_cm()->gc_id(); 3421 } 3422 3423 static void print_ms_time_info(const char* prefix, const char* name, 3424 NumberSeq& ns) { 3425 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3426 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3427 if (ns.num() > 0) { 3428 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3429 prefix, ns.sd(), ns.maximum()); 3430 } 3431 } 3432 3433 void ConcurrentMark::print_summary_info() { 3434 gclog_or_tty->print_cr(" Concurrent marking:"); 3435 print_ms_time_info(" ", "init marks", _init_times); 3436 print_ms_time_info(" ", "remarks", _remark_times); 3437 { 3438 print_ms_time_info(" ", "final marks", _remark_mark_times); 3439 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3440 3441 } 3442 print_ms_time_info(" ", "cleanups", _cleanup_times); 3443 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3444 _total_counting_time, 3445 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3446 (double)_cleanup_times.num() 3447 : 0.0)); 3448 if (G1ScrubRemSets) { 3449 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3450 _total_rs_scrub_time, 3451 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3452 (double)_cleanup_times.num() 3453 : 0.0)); 3454 } 3455 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3456 (_init_times.sum() + _remark_times.sum() + 3457 _cleanup_times.sum())/1000.0); 3458 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3459 "(%8.2f s marking).", 3460 cmThread()->vtime_accum(), 3461 cmThread()->vtime_mark_accum()); 3462 } 3463 3464 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3465 if (use_parallel_marking_threads()) { 3466 _parallel_workers->print_worker_threads_on(st); 3467 } 3468 } 3469 3470 void ConcurrentMark::print_on_error(outputStream* st) const { 3471 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3472 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3473 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3474 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3475 } 3476 3477 // We take a break if someone is trying to stop the world. 3478 bool ConcurrentMark::do_yield_check(uint worker_id) { 3479 if (SuspendibleThreadSet::should_yield()) { 3480 if (worker_id == 0) { 3481 _g1h->g1_policy()->record_concurrent_pause(); 3482 } 3483 SuspendibleThreadSet::yield(); 3484 return true; 3485 } else { 3486 return false; 3487 } 3488 } 3489 3490 #ifndef PRODUCT 3491 // for debugging purposes 3492 void ConcurrentMark::print_finger() { 3493 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3494 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3495 for (uint i = 0; i < _max_worker_id; ++i) { 3496 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3497 } 3498 gclog_or_tty->cr(); 3499 } 3500 #endif 3501 3502 void CMTask::scan_object(oop obj) { 3503 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3504 3505 if (_cm->verbose_high()) { 3506 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT, 3507 _worker_id, p2i((void*) obj)); 3508 } 3509 3510 size_t obj_size = obj->size(); 3511 _words_scanned += obj_size; 3512 3513 obj->oop_iterate(_cm_oop_closure); 3514 statsOnly( ++_objs_scanned ); 3515 check_limits(); 3516 } 3517 3518 // Closure for iteration over bitmaps 3519 class CMBitMapClosure : public BitMapClosure { 3520 private: 3521 // the bitmap that is being iterated over 3522 CMBitMap* _nextMarkBitMap; 3523 ConcurrentMark* _cm; 3524 CMTask* _task; 3525 3526 public: 3527 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3528 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3529 3530 bool do_bit(size_t offset) { 3531 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3532 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3533 assert( addr < _cm->finger(), "invariant"); 3534 3535 statsOnly( _task->increase_objs_found_on_bitmap() ); 3536 assert(addr >= _task->finger(), "invariant"); 3537 3538 // We move that task's local finger along. 3539 _task->move_finger_to(addr); 3540 3541 _task->scan_object(oop(addr)); 3542 // we only partially drain the local queue and global stack 3543 _task->drain_local_queue(true); 3544 _task->drain_global_stack(true); 3545 3546 // if the has_aborted flag has been raised, we need to bail out of 3547 // the iteration 3548 return !_task->has_aborted(); 3549 } 3550 }; 3551 3552 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3553 ConcurrentMark* cm, 3554 CMTask* task) 3555 : _g1h(g1h), _cm(cm), _task(task) { 3556 assert(_ref_processor == NULL, "should be initialized to NULL"); 3557 3558 if (G1UseConcMarkReferenceProcessing) { 3559 _ref_processor = g1h->ref_processor_cm(); 3560 assert(_ref_processor != NULL, "should not be NULL"); 3561 } 3562 } 3563 3564 void CMTask::setup_for_region(HeapRegion* hr) { 3565 assert(hr != NULL, 3566 "claim_region() should have filtered out NULL regions"); 3567 assert(!hr->is_continues_humongous(), 3568 "claim_region() should have filtered out continues humongous regions"); 3569 3570 if (_cm->verbose_low()) { 3571 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3572 _worker_id, p2i(hr)); 3573 } 3574 3575 _curr_region = hr; 3576 _finger = hr->bottom(); 3577 update_region_limit(); 3578 } 3579 3580 void CMTask::update_region_limit() { 3581 HeapRegion* hr = _curr_region; 3582 HeapWord* bottom = hr->bottom(); 3583 HeapWord* limit = hr->next_top_at_mark_start(); 3584 3585 if (limit == bottom) { 3586 if (_cm->verbose_low()) { 3587 gclog_or_tty->print_cr("[%u] found an empty region " 3588 "["PTR_FORMAT", "PTR_FORMAT")", 3589 _worker_id, p2i(bottom), p2i(limit)); 3590 } 3591 // The region was collected underneath our feet. 3592 // We set the finger to bottom to ensure that the bitmap 3593 // iteration that will follow this will not do anything. 3594 // (this is not a condition that holds when we set the region up, 3595 // as the region is not supposed to be empty in the first place) 3596 _finger = bottom; 3597 } else if (limit >= _region_limit) { 3598 assert(limit >= _finger, "peace of mind"); 3599 } else { 3600 assert(limit < _region_limit, "only way to get here"); 3601 // This can happen under some pretty unusual circumstances. An 3602 // evacuation pause empties the region underneath our feet (NTAMS 3603 // at bottom). We then do some allocation in the region (NTAMS 3604 // stays at bottom), followed by the region being used as a GC 3605 // alloc region (NTAMS will move to top() and the objects 3606 // originally below it will be grayed). All objects now marked in 3607 // the region are explicitly grayed, if below the global finger, 3608 // and we do not need in fact to scan anything else. So, we simply 3609 // set _finger to be limit to ensure that the bitmap iteration 3610 // doesn't do anything. 3611 _finger = limit; 3612 } 3613 3614 _region_limit = limit; 3615 } 3616 3617 void CMTask::giveup_current_region() { 3618 assert(_curr_region != NULL, "invariant"); 3619 if (_cm->verbose_low()) { 3620 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3621 _worker_id, p2i(_curr_region)); 3622 } 3623 clear_region_fields(); 3624 } 3625 3626 void CMTask::clear_region_fields() { 3627 // Values for these three fields that indicate that we're not 3628 // holding on to a region. 3629 _curr_region = NULL; 3630 _finger = NULL; 3631 _region_limit = NULL; 3632 } 3633 3634 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3635 if (cm_oop_closure == NULL) { 3636 assert(_cm_oop_closure != NULL, "invariant"); 3637 } else { 3638 assert(_cm_oop_closure == NULL, "invariant"); 3639 } 3640 _cm_oop_closure = cm_oop_closure; 3641 } 3642 3643 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3644 guarantee(nextMarkBitMap != NULL, "invariant"); 3645 3646 if (_cm->verbose_low()) { 3647 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3648 } 3649 3650 _nextMarkBitMap = nextMarkBitMap; 3651 clear_region_fields(); 3652 3653 _calls = 0; 3654 _elapsed_time_ms = 0.0; 3655 _termination_time_ms = 0.0; 3656 _termination_start_time_ms = 0.0; 3657 3658 #if _MARKING_STATS_ 3659 _local_pushes = 0; 3660 _local_pops = 0; 3661 _local_max_size = 0; 3662 _objs_scanned = 0; 3663 _global_pushes = 0; 3664 _global_pops = 0; 3665 _global_max_size = 0; 3666 _global_transfers_to = 0; 3667 _global_transfers_from = 0; 3668 _regions_claimed = 0; 3669 _objs_found_on_bitmap = 0; 3670 _satb_buffers_processed = 0; 3671 _steal_attempts = 0; 3672 _steals = 0; 3673 _aborted = 0; 3674 _aborted_overflow = 0; 3675 _aborted_cm_aborted = 0; 3676 _aborted_yield = 0; 3677 _aborted_timed_out = 0; 3678 _aborted_satb = 0; 3679 _aborted_termination = 0; 3680 #endif // _MARKING_STATS_ 3681 } 3682 3683 bool CMTask::should_exit_termination() { 3684 regular_clock_call(); 3685 // This is called when we are in the termination protocol. We should 3686 // quit if, for some reason, this task wants to abort or the global 3687 // stack is not empty (this means that we can get work from it). 3688 return !_cm->mark_stack_empty() || has_aborted(); 3689 } 3690 3691 void CMTask::reached_limit() { 3692 assert(_words_scanned >= _words_scanned_limit || 3693 _refs_reached >= _refs_reached_limit , 3694 "shouldn't have been called otherwise"); 3695 regular_clock_call(); 3696 } 3697 3698 void CMTask::regular_clock_call() { 3699 if (has_aborted()) return; 3700 3701 // First, we need to recalculate the words scanned and refs reached 3702 // limits for the next clock call. 3703 recalculate_limits(); 3704 3705 // During the regular clock call we do the following 3706 3707 // (1) If an overflow has been flagged, then we abort. 3708 if (_cm->has_overflown()) { 3709 set_has_aborted(); 3710 return; 3711 } 3712 3713 // If we are not concurrent (i.e. we're doing remark) we don't need 3714 // to check anything else. The other steps are only needed during 3715 // the concurrent marking phase. 3716 if (!concurrent()) return; 3717 3718 // (2) If marking has been aborted for Full GC, then we also abort. 3719 if (_cm->has_aborted()) { 3720 set_has_aborted(); 3721 statsOnly( ++_aborted_cm_aborted ); 3722 return; 3723 } 3724 3725 double curr_time_ms = os::elapsedVTime() * 1000.0; 3726 3727 // (3) If marking stats are enabled, then we update the step history. 3728 #if _MARKING_STATS_ 3729 if (_words_scanned >= _words_scanned_limit) { 3730 ++_clock_due_to_scanning; 3731 } 3732 if (_refs_reached >= _refs_reached_limit) { 3733 ++_clock_due_to_marking; 3734 } 3735 3736 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3737 _interval_start_time_ms = curr_time_ms; 3738 _all_clock_intervals_ms.add(last_interval_ms); 3739 3740 if (_cm->verbose_medium()) { 3741 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3742 "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s", 3743 _worker_id, last_interval_ms, 3744 _words_scanned, 3745 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3746 _refs_reached, 3747 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3748 } 3749 #endif // _MARKING_STATS_ 3750 3751 // (4) We check whether we should yield. If we have to, then we abort. 3752 if (SuspendibleThreadSet::should_yield()) { 3753 // We should yield. To do this we abort the task. The caller is 3754 // responsible for yielding. 3755 set_has_aborted(); 3756 statsOnly( ++_aborted_yield ); 3757 return; 3758 } 3759 3760 // (5) We check whether we've reached our time quota. If we have, 3761 // then we abort. 3762 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3763 if (elapsed_time_ms > _time_target_ms) { 3764 set_has_aborted(); 3765 _has_timed_out = true; 3766 statsOnly( ++_aborted_timed_out ); 3767 return; 3768 } 3769 3770 // (6) Finally, we check whether there are enough completed STAB 3771 // buffers available for processing. If there are, we abort. 3772 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3773 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3774 if (_cm->verbose_low()) { 3775 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3776 _worker_id); 3777 } 3778 // we do need to process SATB buffers, we'll abort and restart 3779 // the marking task to do so 3780 set_has_aborted(); 3781 statsOnly( ++_aborted_satb ); 3782 return; 3783 } 3784 } 3785 3786 void CMTask::recalculate_limits() { 3787 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3788 _words_scanned_limit = _real_words_scanned_limit; 3789 3790 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3791 _refs_reached_limit = _real_refs_reached_limit; 3792 } 3793 3794 void CMTask::decrease_limits() { 3795 // This is called when we believe that we're going to do an infrequent 3796 // operation which will increase the per byte scanned cost (i.e. move 3797 // entries to/from the global stack). It basically tries to decrease the 3798 // scanning limit so that the clock is called earlier. 3799 3800 if (_cm->verbose_medium()) { 3801 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3802 } 3803 3804 _words_scanned_limit = _real_words_scanned_limit - 3805 3 * words_scanned_period / 4; 3806 _refs_reached_limit = _real_refs_reached_limit - 3807 3 * refs_reached_period / 4; 3808 } 3809 3810 void CMTask::move_entries_to_global_stack() { 3811 // local array where we'll store the entries that will be popped 3812 // from the local queue 3813 oop buffer[global_stack_transfer_size]; 3814 3815 int n = 0; 3816 oop obj; 3817 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3818 buffer[n] = obj; 3819 ++n; 3820 } 3821 3822 if (n > 0) { 3823 // we popped at least one entry from the local queue 3824 3825 statsOnly( ++_global_transfers_to; _local_pops += n ); 3826 3827 if (!_cm->mark_stack_push(buffer, n)) { 3828 if (_cm->verbose_low()) { 3829 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3830 _worker_id); 3831 } 3832 set_has_aborted(); 3833 } else { 3834 // the transfer was successful 3835 3836 if (_cm->verbose_medium()) { 3837 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3838 _worker_id, n); 3839 } 3840 statsOnly( int tmp_size = _cm->mark_stack_size(); 3841 if (tmp_size > _global_max_size) { 3842 _global_max_size = tmp_size; 3843 } 3844 _global_pushes += n ); 3845 } 3846 } 3847 3848 // this operation was quite expensive, so decrease the limits 3849 decrease_limits(); 3850 } 3851 3852 void CMTask::get_entries_from_global_stack() { 3853 // local array where we'll store the entries that will be popped 3854 // from the global stack. 3855 oop buffer[global_stack_transfer_size]; 3856 int n; 3857 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3858 assert(n <= global_stack_transfer_size, 3859 "we should not pop more than the given limit"); 3860 if (n > 0) { 3861 // yes, we did actually pop at least one entry 3862 3863 statsOnly( ++_global_transfers_from; _global_pops += n ); 3864 if (_cm->verbose_medium()) { 3865 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3866 _worker_id, n); 3867 } 3868 for (int i = 0; i < n; ++i) { 3869 bool success = _task_queue->push(buffer[i]); 3870 // We only call this when the local queue is empty or under a 3871 // given target limit. So, we do not expect this push to fail. 3872 assert(success, "invariant"); 3873 } 3874 3875 statsOnly( int tmp_size = _task_queue->size(); 3876 if (tmp_size > _local_max_size) { 3877 _local_max_size = tmp_size; 3878 } 3879 _local_pushes += n ); 3880 } 3881 3882 // this operation was quite expensive, so decrease the limits 3883 decrease_limits(); 3884 } 3885 3886 void CMTask::drain_local_queue(bool partially) { 3887 if (has_aborted()) return; 3888 3889 // Decide what the target size is, depending whether we're going to 3890 // drain it partially (so that other tasks can steal if they run out 3891 // of things to do) or totally (at the very end). 3892 size_t target_size; 3893 if (partially) { 3894 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3895 } else { 3896 target_size = 0; 3897 } 3898 3899 if (_task_queue->size() > target_size) { 3900 if (_cm->verbose_high()) { 3901 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3902 _worker_id, target_size); 3903 } 3904 3905 oop obj; 3906 bool ret = _task_queue->pop_local(obj); 3907 while (ret) { 3908 statsOnly( ++_local_pops ); 3909 3910 if (_cm->verbose_high()) { 3911 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3912 p2i((void*) obj)); 3913 } 3914 3915 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3916 assert(!_g1h->is_on_master_free_list( 3917 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3918 3919 scan_object(obj); 3920 3921 if (_task_queue->size() <= target_size || has_aborted()) { 3922 ret = false; 3923 } else { 3924 ret = _task_queue->pop_local(obj); 3925 } 3926 } 3927 3928 if (_cm->verbose_high()) { 3929 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3930 _worker_id, _task_queue->size()); 3931 } 3932 } 3933 } 3934 3935 void CMTask::drain_global_stack(bool partially) { 3936 if (has_aborted()) return; 3937 3938 // We have a policy to drain the local queue before we attempt to 3939 // drain the global stack. 3940 assert(partially || _task_queue->size() == 0, "invariant"); 3941 3942 // Decide what the target size is, depending whether we're going to 3943 // drain it partially (so that other tasks can steal if they run out 3944 // of things to do) or totally (at the very end). Notice that, 3945 // because we move entries from the global stack in chunks or 3946 // because another task might be doing the same, we might in fact 3947 // drop below the target. But, this is not a problem. 3948 size_t target_size; 3949 if (partially) { 3950 target_size = _cm->partial_mark_stack_size_target(); 3951 } else { 3952 target_size = 0; 3953 } 3954 3955 if (_cm->mark_stack_size() > target_size) { 3956 if (_cm->verbose_low()) { 3957 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3958 _worker_id, target_size); 3959 } 3960 3961 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3962 get_entries_from_global_stack(); 3963 drain_local_queue(partially); 3964 } 3965 3966 if (_cm->verbose_low()) { 3967 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3968 _worker_id, _cm->mark_stack_size()); 3969 } 3970 } 3971 } 3972 3973 // SATB Queue has several assumptions on whether to call the par or 3974 // non-par versions of the methods. this is why some of the code is 3975 // replicated. We should really get rid of the single-threaded version 3976 // of the code to simplify things. 3977 void CMTask::drain_satb_buffers() { 3978 if (has_aborted()) return; 3979 3980 // We set this so that the regular clock knows that we're in the 3981 // middle of draining buffers and doesn't set the abort flag when it 3982 // notices that SATB buffers are available for draining. It'd be 3983 // very counter productive if it did that. :-) 3984 _draining_satb_buffers = true; 3985 3986 CMObjectClosure oc(this); 3987 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3988 if (G1CollectedHeap::use_parallel_gc_threads()) { 3989 satb_mq_set.set_par_closure(_worker_id, &oc); 3990 } else { 3991 satb_mq_set.set_closure(&oc); 3992 } 3993 3994 // This keeps claiming and applying the closure to completed buffers 3995 // until we run out of buffers or we need to abort. 3996 if (G1CollectedHeap::use_parallel_gc_threads()) { 3997 while (!has_aborted() && 3998 satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) { 3999 if (_cm->verbose_medium()) { 4000 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 4001 } 4002 statsOnly( ++_satb_buffers_processed ); 4003 regular_clock_call(); 4004 } 4005 } else { 4006 while (!has_aborted() && 4007 satb_mq_set.apply_closure_to_completed_buffer()) { 4008 if (_cm->verbose_medium()) { 4009 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 4010 } 4011 statsOnly( ++_satb_buffers_processed ); 4012 regular_clock_call(); 4013 } 4014 } 4015 4016 _draining_satb_buffers = false; 4017 4018 assert(has_aborted() || 4019 concurrent() || 4020 satb_mq_set.completed_buffers_num() == 0, "invariant"); 4021 4022 if (G1CollectedHeap::use_parallel_gc_threads()) { 4023 satb_mq_set.set_par_closure(_worker_id, NULL); 4024 } else { 4025 satb_mq_set.set_closure(NULL); 4026 } 4027 4028 // again, this was a potentially expensive operation, decrease the 4029 // limits to get the regular clock call early 4030 decrease_limits(); 4031 } 4032 4033 void CMTask::print_stats() { 4034 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 4035 _worker_id, _calls); 4036 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 4037 _elapsed_time_ms, _termination_time_ms); 4038 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 4039 _step_times_ms.num(), _step_times_ms.avg(), 4040 _step_times_ms.sd()); 4041 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 4042 _step_times_ms.maximum(), _step_times_ms.sum()); 4043 4044 #if _MARKING_STATS_ 4045 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 4046 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 4047 _all_clock_intervals_ms.sd()); 4048 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 4049 _all_clock_intervals_ms.maximum(), 4050 _all_clock_intervals_ms.sum()); 4051 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", 4052 _clock_due_to_scanning, _clock_due_to_marking); 4053 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", 4054 _objs_scanned, _objs_found_on_bitmap); 4055 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", 4056 _local_pushes, _local_pops, _local_max_size); 4057 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", 4058 _global_pushes, _global_pops, _global_max_size); 4059 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", 4060 _global_transfers_to,_global_transfers_from); 4061 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed); 4062 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); 4063 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", 4064 _steal_attempts, _steals); 4065 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); 4066 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", 4067 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 4068 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", 4069 _aborted_timed_out, _aborted_satb, _aborted_termination); 4070 #endif // _MARKING_STATS_ 4071 } 4072 4073 /***************************************************************************** 4074 4075 The do_marking_step(time_target_ms, ...) method is the building 4076 block of the parallel marking framework. It can be called in parallel 4077 with other invocations of do_marking_step() on different tasks 4078 (but only one per task, obviously) and concurrently with the 4079 mutator threads, or during remark, hence it eliminates the need 4080 for two versions of the code. When called during remark, it will 4081 pick up from where the task left off during the concurrent marking 4082 phase. Interestingly, tasks are also claimable during evacuation 4083 pauses too, since do_marking_step() ensures that it aborts before 4084 it needs to yield. 4085 4086 The data structures that it uses to do marking work are the 4087 following: 4088 4089 (1) Marking Bitmap. If there are gray objects that appear only 4090 on the bitmap (this happens either when dealing with an overflow 4091 or when the initial marking phase has simply marked the roots 4092 and didn't push them on the stack), then tasks claim heap 4093 regions whose bitmap they then scan to find gray objects. A 4094 global finger indicates where the end of the last claimed region 4095 is. A local finger indicates how far into the region a task has 4096 scanned. The two fingers are used to determine how to gray an 4097 object (i.e. whether simply marking it is OK, as it will be 4098 visited by a task in the future, or whether it needs to be also 4099 pushed on a stack). 4100 4101 (2) Local Queue. The local queue of the task which is accessed 4102 reasonably efficiently by the task. Other tasks can steal from 4103 it when they run out of work. Throughout the marking phase, a 4104 task attempts to keep its local queue short but not totally 4105 empty, so that entries are available for stealing by other 4106 tasks. Only when there is no more work, a task will totally 4107 drain its local queue. 4108 4109 (3) Global Mark Stack. This handles local queue overflow. During 4110 marking only sets of entries are moved between it and the local 4111 queues, as access to it requires a mutex and more fine-grain 4112 interaction with it which might cause contention. If it 4113 overflows, then the marking phase should restart and iterate 4114 over the bitmap to identify gray objects. Throughout the marking 4115 phase, tasks attempt to keep the global mark stack at a small 4116 length but not totally empty, so that entries are available for 4117 popping by other tasks. Only when there is no more work, tasks 4118 will totally drain the global mark stack. 4119 4120 (4) SATB Buffer Queue. This is where completed SATB buffers are 4121 made available. Buffers are regularly removed from this queue 4122 and scanned for roots, so that the queue doesn't get too 4123 long. During remark, all completed buffers are processed, as 4124 well as the filled in parts of any uncompleted buffers. 4125 4126 The do_marking_step() method tries to abort when the time target 4127 has been reached. There are a few other cases when the 4128 do_marking_step() method also aborts: 4129 4130 (1) When the marking phase has been aborted (after a Full GC). 4131 4132 (2) When a global overflow (on the global stack) has been 4133 triggered. Before the task aborts, it will actually sync up with 4134 the other tasks to ensure that all the marking data structures 4135 (local queues, stacks, fingers etc.) are re-initialized so that 4136 when do_marking_step() completes, the marking phase can 4137 immediately restart. 4138 4139 (3) When enough completed SATB buffers are available. The 4140 do_marking_step() method only tries to drain SATB buffers right 4141 at the beginning. So, if enough buffers are available, the 4142 marking step aborts and the SATB buffers are processed at 4143 the beginning of the next invocation. 4144 4145 (4) To yield. when we have to yield then we abort and yield 4146 right at the end of do_marking_step(). This saves us from a lot 4147 of hassle as, by yielding we might allow a Full GC. If this 4148 happens then objects will be compacted underneath our feet, the 4149 heap might shrink, etc. We save checking for this by just 4150 aborting and doing the yield right at the end. 4151 4152 From the above it follows that the do_marking_step() method should 4153 be called in a loop (or, otherwise, regularly) until it completes. 4154 4155 If a marking step completes without its has_aborted() flag being 4156 true, it means it has completed the current marking phase (and 4157 also all other marking tasks have done so and have all synced up). 4158 4159 A method called regular_clock_call() is invoked "regularly" (in 4160 sub ms intervals) throughout marking. It is this clock method that 4161 checks all the abort conditions which were mentioned above and 4162 decides when the task should abort. A work-based scheme is used to 4163 trigger this clock method: when the number of object words the 4164 marking phase has scanned or the number of references the marking 4165 phase has visited reach a given limit. Additional invocations to 4166 the method clock have been planted in a few other strategic places 4167 too. The initial reason for the clock method was to avoid calling 4168 vtime too regularly, as it is quite expensive. So, once it was in 4169 place, it was natural to piggy-back all the other conditions on it 4170 too and not constantly check them throughout the code. 4171 4172 If do_termination is true then do_marking_step will enter its 4173 termination protocol. 4174 4175 The value of is_serial must be true when do_marking_step is being 4176 called serially (i.e. by the VMThread) and do_marking_step should 4177 skip any synchronization in the termination and overflow code. 4178 Examples include the serial remark code and the serial reference 4179 processing closures. 4180 4181 The value of is_serial must be false when do_marking_step is 4182 being called by any of the worker threads in a work gang. 4183 Examples include the concurrent marking code (CMMarkingTask), 4184 the MT remark code, and the MT reference processing closures. 4185 4186 *****************************************************************************/ 4187 4188 void CMTask::do_marking_step(double time_target_ms, 4189 bool do_termination, 4190 bool is_serial) { 4191 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 4192 assert(concurrent() == _cm->concurrent(), "they should be the same"); 4193 4194 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 4195 assert(_task_queues != NULL, "invariant"); 4196 assert(_task_queue != NULL, "invariant"); 4197 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 4198 4199 assert(!_claimed, 4200 "only one thread should claim this task at any one time"); 4201 4202 // OK, this doesn't safeguard again all possible scenarios, as it is 4203 // possible for two threads to set the _claimed flag at the same 4204 // time. But it is only for debugging purposes anyway and it will 4205 // catch most problems. 4206 _claimed = true; 4207 4208 _start_time_ms = os::elapsedVTime() * 1000.0; 4209 statsOnly( _interval_start_time_ms = _start_time_ms ); 4210 4211 // If do_stealing is true then do_marking_step will attempt to 4212 // steal work from the other CMTasks. It only makes sense to 4213 // enable stealing when the termination protocol is enabled 4214 // and do_marking_step() is not being called serially. 4215 bool do_stealing = do_termination && !is_serial; 4216 4217 double diff_prediction_ms = 4218 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 4219 _time_target_ms = time_target_ms - diff_prediction_ms; 4220 4221 // set up the variables that are used in the work-based scheme to 4222 // call the regular clock method 4223 _words_scanned = 0; 4224 _refs_reached = 0; 4225 recalculate_limits(); 4226 4227 // clear all flags 4228 clear_has_aborted(); 4229 _has_timed_out = false; 4230 _draining_satb_buffers = false; 4231 4232 ++_calls; 4233 4234 if (_cm->verbose_low()) { 4235 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 4236 "target = %1.2lfms >>>>>>>>>>", 4237 _worker_id, _calls, _time_target_ms); 4238 } 4239 4240 // Set up the bitmap and oop closures. Anything that uses them is 4241 // eventually called from this method, so it is OK to allocate these 4242 // statically. 4243 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 4244 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 4245 set_cm_oop_closure(&cm_oop_closure); 4246 4247 if (_cm->has_overflown()) { 4248 // This can happen if the mark stack overflows during a GC pause 4249 // and this task, after a yield point, restarts. We have to abort 4250 // as we need to get into the overflow protocol which happens 4251 // right at the end of this task. 4252 set_has_aborted(); 4253 } 4254 4255 // First drain any available SATB buffers. After this, we will not 4256 // look at SATB buffers before the next invocation of this method. 4257 // If enough completed SATB buffers are queued up, the regular clock 4258 // will abort this task so that it restarts. 4259 drain_satb_buffers(); 4260 // ...then partially drain the local queue and the global stack 4261 drain_local_queue(true); 4262 drain_global_stack(true); 4263 4264 do { 4265 if (!has_aborted() && _curr_region != NULL) { 4266 // This means that we're already holding on to a region. 4267 assert(_finger != NULL, "if region is not NULL, then the finger " 4268 "should not be NULL either"); 4269 4270 // We might have restarted this task after an evacuation pause 4271 // which might have evacuated the region we're holding on to 4272 // underneath our feet. Let's read its limit again to make sure 4273 // that we do not iterate over a region of the heap that 4274 // contains garbage (update_region_limit() will also move 4275 // _finger to the start of the region if it is found empty). 4276 update_region_limit(); 4277 // We will start from _finger not from the start of the region, 4278 // as we might be restarting this task after aborting half-way 4279 // through scanning this region. In this case, _finger points to 4280 // the address where we last found a marked object. If this is a 4281 // fresh region, _finger points to start(). 4282 MemRegion mr = MemRegion(_finger, _region_limit); 4283 4284 if (_cm->verbose_low()) { 4285 gclog_or_tty->print_cr("[%u] we're scanning part " 4286 "["PTR_FORMAT", "PTR_FORMAT") " 4287 "of region "HR_FORMAT, 4288 _worker_id, p2i(_finger), p2i(_region_limit), 4289 HR_FORMAT_PARAMS(_curr_region)); 4290 } 4291 4292 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 4293 "humongous regions should go around loop once only"); 4294 4295 // Some special cases: 4296 // If the memory region is empty, we can just give up the region. 4297 // If the current region is humongous then we only need to check 4298 // the bitmap for the bit associated with the start of the object, 4299 // scan the object if it's live, and give up the region. 4300 // Otherwise, let's iterate over the bitmap of the part of the region 4301 // that is left. 4302 // If the iteration is successful, give up the region. 4303 if (mr.is_empty()) { 4304 giveup_current_region(); 4305 regular_clock_call(); 4306 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 4307 if (_nextMarkBitMap->isMarked(mr.start())) { 4308 // The object is marked - apply the closure 4309 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 4310 bitmap_closure.do_bit(offset); 4311 } 4312 // Even if this task aborted while scanning the humongous object 4313 // we can (and should) give up the current region. 4314 giveup_current_region(); 4315 regular_clock_call(); 4316 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4317 giveup_current_region(); 4318 regular_clock_call(); 4319 } else { 4320 assert(has_aborted(), "currently the only way to do so"); 4321 // The only way to abort the bitmap iteration is to return 4322 // false from the do_bit() method. However, inside the 4323 // do_bit() method we move the _finger to point to the 4324 // object currently being looked at. So, if we bail out, we 4325 // have definitely set _finger to something non-null. 4326 assert(_finger != NULL, "invariant"); 4327 4328 // Region iteration was actually aborted. So now _finger 4329 // points to the address of the object we last scanned. If we 4330 // leave it there, when we restart this task, we will rescan 4331 // the object. It is easy to avoid this. We move the finger by 4332 // enough to point to the next possible object header (the 4333 // bitmap knows by how much we need to move it as it knows its 4334 // granularity). 4335 assert(_finger < _region_limit, "invariant"); 4336 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 4337 // Check if bitmap iteration was aborted while scanning the last object 4338 if (new_finger >= _region_limit) { 4339 giveup_current_region(); 4340 } else { 4341 move_finger_to(new_finger); 4342 } 4343 } 4344 } 4345 // At this point we have either completed iterating over the 4346 // region we were holding on to, or we have aborted. 4347 4348 // We then partially drain the local queue and the global stack. 4349 // (Do we really need this?) 4350 drain_local_queue(true); 4351 drain_global_stack(true); 4352 4353 // Read the note on the claim_region() method on why it might 4354 // return NULL with potentially more regions available for 4355 // claiming and why we have to check out_of_regions() to determine 4356 // whether we're done or not. 4357 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4358 // We are going to try to claim a new region. We should have 4359 // given up on the previous one. 4360 // Separated the asserts so that we know which one fires. 4361 assert(_curr_region == NULL, "invariant"); 4362 assert(_finger == NULL, "invariant"); 4363 assert(_region_limit == NULL, "invariant"); 4364 if (_cm->verbose_low()) { 4365 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4366 } 4367 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4368 if (claimed_region != NULL) { 4369 // Yes, we managed to claim one 4370 statsOnly( ++_regions_claimed ); 4371 4372 if (_cm->verbose_low()) { 4373 gclog_or_tty->print_cr("[%u] we successfully claimed " 4374 "region "PTR_FORMAT, 4375 _worker_id, p2i(claimed_region)); 4376 } 4377 4378 setup_for_region(claimed_region); 4379 assert(_curr_region == claimed_region, "invariant"); 4380 } 4381 // It is important to call the regular clock here. It might take 4382 // a while to claim a region if, for example, we hit a large 4383 // block of empty regions. So we need to call the regular clock 4384 // method once round the loop to make sure it's called 4385 // frequently enough. 4386 regular_clock_call(); 4387 } 4388 4389 if (!has_aborted() && _curr_region == NULL) { 4390 assert(_cm->out_of_regions(), 4391 "at this point we should be out of regions"); 4392 } 4393 } while ( _curr_region != NULL && !has_aborted()); 4394 4395 if (!has_aborted()) { 4396 // We cannot check whether the global stack is empty, since other 4397 // tasks might be pushing objects to it concurrently. 4398 assert(_cm->out_of_regions(), 4399 "at this point we should be out of regions"); 4400 4401 if (_cm->verbose_low()) { 4402 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4403 } 4404 4405 // Try to reduce the number of available SATB buffers so that 4406 // remark has less work to do. 4407 drain_satb_buffers(); 4408 } 4409 4410 // Since we've done everything else, we can now totally drain the 4411 // local queue and global stack. 4412 drain_local_queue(false); 4413 drain_global_stack(false); 4414 4415 // Attempt at work stealing from other task's queues. 4416 if (do_stealing && !has_aborted()) { 4417 // We have not aborted. This means that we have finished all that 4418 // we could. Let's try to do some stealing... 4419 4420 // We cannot check whether the global stack is empty, since other 4421 // tasks might be pushing objects to it concurrently. 4422 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4423 "only way to reach here"); 4424 4425 if (_cm->verbose_low()) { 4426 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4427 } 4428 4429 while (!has_aborted()) { 4430 oop obj; 4431 statsOnly( ++_steal_attempts ); 4432 4433 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4434 if (_cm->verbose_medium()) { 4435 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4436 _worker_id, p2i((void*) obj)); 4437 } 4438 4439 statsOnly( ++_steals ); 4440 4441 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4442 "any stolen object should be marked"); 4443 scan_object(obj); 4444 4445 // And since we're towards the end, let's totally drain the 4446 // local queue and global stack. 4447 drain_local_queue(false); 4448 drain_global_stack(false); 4449 } else { 4450 break; 4451 } 4452 } 4453 } 4454 4455 // If we are about to wrap up and go into termination, check if we 4456 // should raise the overflow flag. 4457 if (do_termination && !has_aborted()) { 4458 if (_cm->force_overflow()->should_force()) { 4459 _cm->set_has_overflown(); 4460 regular_clock_call(); 4461 } 4462 } 4463 4464 // We still haven't aborted. Now, let's try to get into the 4465 // termination protocol. 4466 if (do_termination && !has_aborted()) { 4467 // We cannot check whether the global stack is empty, since other 4468 // tasks might be concurrently pushing objects on it. 4469 // Separated the asserts so that we know which one fires. 4470 assert(_cm->out_of_regions(), "only way to reach here"); 4471 assert(_task_queue->size() == 0, "only way to reach here"); 4472 4473 if (_cm->verbose_low()) { 4474 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4475 } 4476 4477 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4478 4479 // The CMTask class also extends the TerminatorTerminator class, 4480 // hence its should_exit_termination() method will also decide 4481 // whether to exit the termination protocol or not. 4482 bool finished = (is_serial || 4483 _cm->terminator()->offer_termination(this)); 4484 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4485 _termination_time_ms += 4486 termination_end_time_ms - _termination_start_time_ms; 4487 4488 if (finished) { 4489 // We're all done. 4490 4491 if (_worker_id == 0) { 4492 // let's allow task 0 to do this 4493 if (concurrent()) { 4494 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4495 // we need to set this to false before the next 4496 // safepoint. This way we ensure that the marking phase 4497 // doesn't observe any more heap expansions. 4498 _cm->clear_concurrent_marking_in_progress(); 4499 } 4500 } 4501 4502 // We can now guarantee that the global stack is empty, since 4503 // all other tasks have finished. We separated the guarantees so 4504 // that, if a condition is false, we can immediately find out 4505 // which one. 4506 guarantee(_cm->out_of_regions(), "only way to reach here"); 4507 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4508 guarantee(_task_queue->size() == 0, "only way to reach here"); 4509 guarantee(!_cm->has_overflown(), "only way to reach here"); 4510 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4511 4512 if (_cm->verbose_low()) { 4513 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4514 } 4515 } else { 4516 // Apparently there's more work to do. Let's abort this task. It 4517 // will restart it and we can hopefully find more things to do. 4518 4519 if (_cm->verbose_low()) { 4520 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4521 _worker_id); 4522 } 4523 4524 set_has_aborted(); 4525 statsOnly( ++_aborted_termination ); 4526 } 4527 } 4528 4529 // Mainly for debugging purposes to make sure that a pointer to the 4530 // closure which was statically allocated in this frame doesn't 4531 // escape it by accident. 4532 set_cm_oop_closure(NULL); 4533 double end_time_ms = os::elapsedVTime() * 1000.0; 4534 double elapsed_time_ms = end_time_ms - _start_time_ms; 4535 // Update the step history. 4536 _step_times_ms.add(elapsed_time_ms); 4537 4538 if (has_aborted()) { 4539 // The task was aborted for some reason. 4540 4541 statsOnly( ++_aborted ); 4542 4543 if (_has_timed_out) { 4544 double diff_ms = elapsed_time_ms - _time_target_ms; 4545 // Keep statistics of how well we did with respect to hitting 4546 // our target only if we actually timed out (if we aborted for 4547 // other reasons, then the results might get skewed). 4548 _marking_step_diffs_ms.add(diff_ms); 4549 } 4550 4551 if (_cm->has_overflown()) { 4552 // This is the interesting one. We aborted because a global 4553 // overflow was raised. This means we have to restart the 4554 // marking phase and start iterating over regions. However, in 4555 // order to do this we have to make sure that all tasks stop 4556 // what they are doing and re-initialize in a safe manner. We 4557 // will achieve this with the use of two barrier sync points. 4558 4559 if (_cm->verbose_low()) { 4560 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4561 } 4562 4563 if (!is_serial) { 4564 // We only need to enter the sync barrier if being called 4565 // from a parallel context 4566 _cm->enter_first_sync_barrier(_worker_id); 4567 4568 // When we exit this sync barrier we know that all tasks have 4569 // stopped doing marking work. So, it's now safe to 4570 // re-initialize our data structures. At the end of this method, 4571 // task 0 will clear the global data structures. 4572 } 4573 4574 statsOnly( ++_aborted_overflow ); 4575 4576 // We clear the local state of this task... 4577 clear_region_fields(); 4578 4579 if (!is_serial) { 4580 // ...and enter the second barrier. 4581 _cm->enter_second_sync_barrier(_worker_id); 4582 } 4583 // At this point, if we're during the concurrent phase of 4584 // marking, everything has been re-initialized and we're 4585 // ready to restart. 4586 } 4587 4588 if (_cm->verbose_low()) { 4589 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4590 "elapsed = %1.2lfms <<<<<<<<<<", 4591 _worker_id, _time_target_ms, elapsed_time_ms); 4592 if (_cm->has_aborted()) { 4593 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4594 _worker_id); 4595 } 4596 } 4597 } else { 4598 if (_cm->verbose_low()) { 4599 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4600 "elapsed = %1.2lfms <<<<<<<<<<", 4601 _worker_id, _time_target_ms, elapsed_time_ms); 4602 } 4603 } 4604 4605 _claimed = false; 4606 } 4607 4608 CMTask::CMTask(uint worker_id, 4609 ConcurrentMark* cm, 4610 size_t* marked_bytes, 4611 BitMap* card_bm, 4612 CMTaskQueue* task_queue, 4613 CMTaskQueueSet* task_queues) 4614 : _g1h(G1CollectedHeap::heap()), 4615 _worker_id(worker_id), _cm(cm), 4616 _claimed(false), 4617 _nextMarkBitMap(NULL), _hash_seed(17), 4618 _task_queue(task_queue), 4619 _task_queues(task_queues), 4620 _cm_oop_closure(NULL), 4621 _marked_bytes_array(marked_bytes), 4622 _card_bm(card_bm) { 4623 guarantee(task_queue != NULL, "invariant"); 4624 guarantee(task_queues != NULL, "invariant"); 4625 4626 statsOnly( _clock_due_to_scanning = 0; 4627 _clock_due_to_marking = 0 ); 4628 4629 _marking_step_diffs_ms.add(0.5); 4630 } 4631 4632 // These are formatting macros that are used below to ensure 4633 // consistent formatting. The *_H_* versions are used to format the 4634 // header for a particular value and they should be kept consistent 4635 // with the corresponding macro. Also note that most of the macros add 4636 // the necessary white space (as a prefix) which makes them a bit 4637 // easier to compose. 4638 4639 // All the output lines are prefixed with this string to be able to 4640 // identify them easily in a large log file. 4641 #define G1PPRL_LINE_PREFIX "###" 4642 4643 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4644 #ifdef _LP64 4645 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4646 #else // _LP64 4647 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4648 #endif // _LP64 4649 4650 // For per-region info 4651 #define G1PPRL_TYPE_FORMAT " %-4s" 4652 #define G1PPRL_TYPE_H_FORMAT " %4s" 4653 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4654 #define G1PPRL_BYTE_H_FORMAT " %9s" 4655 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4656 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4657 4658 // For summary info 4659 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4660 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4661 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4662 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4663 4664 G1PrintRegionLivenessInfoClosure:: 4665 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4666 : _out(out), 4667 _total_used_bytes(0), _total_capacity_bytes(0), 4668 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4669 _hum_used_bytes(0), _hum_capacity_bytes(0), 4670 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4671 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4672 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4673 MemRegion g1_reserved = g1h->g1_reserved(); 4674 double now = os::elapsedTime(); 4675 4676 // Print the header of the output. 4677 _out->cr(); 4678 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4679 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4680 G1PPRL_SUM_ADDR_FORMAT("reserved") 4681 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4682 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4683 HeapRegion::GrainBytes); 4684 _out->print_cr(G1PPRL_LINE_PREFIX); 4685 _out->print_cr(G1PPRL_LINE_PREFIX 4686 G1PPRL_TYPE_H_FORMAT 4687 G1PPRL_ADDR_BASE_H_FORMAT 4688 G1PPRL_BYTE_H_FORMAT 4689 G1PPRL_BYTE_H_FORMAT 4690 G1PPRL_BYTE_H_FORMAT 4691 G1PPRL_DOUBLE_H_FORMAT 4692 G1PPRL_BYTE_H_FORMAT 4693 G1PPRL_BYTE_H_FORMAT, 4694 "type", "address-range", 4695 "used", "prev-live", "next-live", "gc-eff", 4696 "remset", "code-roots"); 4697 _out->print_cr(G1PPRL_LINE_PREFIX 4698 G1PPRL_TYPE_H_FORMAT 4699 G1PPRL_ADDR_BASE_H_FORMAT 4700 G1PPRL_BYTE_H_FORMAT 4701 G1PPRL_BYTE_H_FORMAT 4702 G1PPRL_BYTE_H_FORMAT 4703 G1PPRL_DOUBLE_H_FORMAT 4704 G1PPRL_BYTE_H_FORMAT 4705 G1PPRL_BYTE_H_FORMAT, 4706 "", "", 4707 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4708 "(bytes)", "(bytes)"); 4709 } 4710 4711 // It takes as a parameter a reference to one of the _hum_* fields, it 4712 // deduces the corresponding value for a region in a humongous region 4713 // series (either the region size, or what's left if the _hum_* field 4714 // is < the region size), and updates the _hum_* field accordingly. 4715 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4716 size_t bytes = 0; 4717 // The > 0 check is to deal with the prev and next live bytes which 4718 // could be 0. 4719 if (*hum_bytes > 0) { 4720 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4721 *hum_bytes -= bytes; 4722 } 4723 return bytes; 4724 } 4725 4726 // It deduces the values for a region in a humongous region series 4727 // from the _hum_* fields and updates those accordingly. It assumes 4728 // that that _hum_* fields have already been set up from the "starts 4729 // humongous" region and we visit the regions in address order. 4730 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4731 size_t* capacity_bytes, 4732 size_t* prev_live_bytes, 4733 size_t* next_live_bytes) { 4734 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4735 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4736 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4737 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4738 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4739 } 4740 4741 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4742 const char* type = r->get_type_str(); 4743 HeapWord* bottom = r->bottom(); 4744 HeapWord* end = r->end(); 4745 size_t capacity_bytes = r->capacity(); 4746 size_t used_bytes = r->used(); 4747 size_t prev_live_bytes = r->live_bytes(); 4748 size_t next_live_bytes = r->next_live_bytes(); 4749 double gc_eff = r->gc_efficiency(); 4750 size_t remset_bytes = r->rem_set()->mem_size(); 4751 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4752 4753 if (r->is_starts_humongous()) { 4754 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4755 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4756 "they should have been zeroed after the last time we used them"); 4757 // Set up the _hum_* fields. 4758 _hum_capacity_bytes = capacity_bytes; 4759 _hum_used_bytes = used_bytes; 4760 _hum_prev_live_bytes = prev_live_bytes; 4761 _hum_next_live_bytes = next_live_bytes; 4762 get_hum_bytes(&used_bytes, &capacity_bytes, 4763 &prev_live_bytes, &next_live_bytes); 4764 end = bottom + HeapRegion::GrainWords; 4765 } else if (r->is_continues_humongous()) { 4766 get_hum_bytes(&used_bytes, &capacity_bytes, 4767 &prev_live_bytes, &next_live_bytes); 4768 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4769 } 4770 4771 _total_used_bytes += used_bytes; 4772 _total_capacity_bytes += capacity_bytes; 4773 _total_prev_live_bytes += prev_live_bytes; 4774 _total_next_live_bytes += next_live_bytes; 4775 _total_remset_bytes += remset_bytes; 4776 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4777 4778 // Print a line for this particular region. 4779 _out->print_cr(G1PPRL_LINE_PREFIX 4780 G1PPRL_TYPE_FORMAT 4781 G1PPRL_ADDR_BASE_FORMAT 4782 G1PPRL_BYTE_FORMAT 4783 G1PPRL_BYTE_FORMAT 4784 G1PPRL_BYTE_FORMAT 4785 G1PPRL_DOUBLE_FORMAT 4786 G1PPRL_BYTE_FORMAT 4787 G1PPRL_BYTE_FORMAT, 4788 type, p2i(bottom), p2i(end), 4789 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4790 remset_bytes, strong_code_roots_bytes); 4791 4792 return false; 4793 } 4794 4795 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4796 // add static memory usages to remembered set sizes 4797 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4798 // Print the footer of the output. 4799 _out->print_cr(G1PPRL_LINE_PREFIX); 4800 _out->print_cr(G1PPRL_LINE_PREFIX 4801 " SUMMARY" 4802 G1PPRL_SUM_MB_FORMAT("capacity") 4803 G1PPRL_SUM_MB_PERC_FORMAT("used") 4804 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4805 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4806 G1PPRL_SUM_MB_FORMAT("remset") 4807 G1PPRL_SUM_MB_FORMAT("code-roots"), 4808 bytes_to_mb(_total_capacity_bytes), 4809 bytes_to_mb(_total_used_bytes), 4810 perc(_total_used_bytes, _total_capacity_bytes), 4811 bytes_to_mb(_total_prev_live_bytes), 4812 perc(_total_prev_live_bytes, _total_capacity_bytes), 4813 bytes_to_mb(_total_next_live_bytes), 4814 perc(_total_next_live_bytes, _total_capacity_bytes), 4815 bytes_to_mb(_total_remset_bytes), 4816 bytes_to_mb(_total_strong_code_roots_bytes)); 4817 _out->cr(); 4818 }