1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc_implementation/g1/concurrentMark.inline.hpp" 30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 31 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 32 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 33 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 34 #include "gc_implementation/g1/g1Log.hpp" 35 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 36 #include "gc_implementation/g1/g1RemSet.hpp" 37 #include "gc_implementation/g1/g1StringDedup.hpp" 38 #include "gc_implementation/g1/heapRegion.inline.hpp" 39 #include "gc_implementation/g1/heapRegionManager.inline.hpp" 40 #include "gc_implementation/g1/heapRegionRemSet.hpp" 41 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 42 #include "gc_implementation/shared/vmGCOperations.hpp" 43 #include "gc_implementation/shared/gcTimer.hpp" 44 #include "gc_implementation/shared/gcTrace.hpp" 45 #include "gc_implementation/shared/gcTraceTime.hpp" 46 #include "memory/allocation.hpp" 47 #include "memory/genOopClosures.inline.hpp" 48 #include "memory/referencePolicy.hpp" 49 #include "memory/resourceArea.hpp" 50 #include "memory/strongRootsScope.hpp" 51 #include "oops/oop.inline.hpp" 52 #include "runtime/handles.inline.hpp" 53 #include "runtime/java.hpp" 54 #include "runtime/atomic.inline.hpp" 55 #include "runtime/prefetch.inline.hpp" 56 #include "services/memTracker.hpp" 57 58 // Concurrent marking bit map wrapper 59 60 CMBitMapRO::CMBitMapRO(int shifter) : 61 _bm(), 62 _shifter(shifter) { 63 _bmStartWord = 0; 64 _bmWordSize = 0; 65 } 66 67 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 68 const HeapWord* limit) const { 69 // First we must round addr *up* to a possible object boundary. 70 addr = (HeapWord*)align_size_up((intptr_t)addr, 71 HeapWordSize << _shifter); 72 size_t addrOffset = heapWordToOffset(addr); 73 if (limit == NULL) { 74 limit = _bmStartWord + _bmWordSize; 75 } 76 size_t limitOffset = heapWordToOffset(limit); 77 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 78 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 79 assert(nextAddr >= addr, "get_next_one postcondition"); 80 assert(nextAddr == limit || isMarked(nextAddr), 81 "get_next_one postcondition"); 82 return nextAddr; 83 } 84 85 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, 86 const HeapWord* limit) const { 87 size_t addrOffset = heapWordToOffset(addr); 88 if (limit == NULL) { 89 limit = _bmStartWord + _bmWordSize; 90 } 91 size_t limitOffset = heapWordToOffset(limit); 92 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 93 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 94 assert(nextAddr >= addr, "get_next_one postcondition"); 95 assert(nextAddr == limit || !isMarked(nextAddr), 96 "get_next_one postcondition"); 97 return nextAddr; 98 } 99 100 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 101 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 102 return (int) (diff >> _shifter); 103 } 104 105 #ifndef PRODUCT 106 bool CMBitMapRO::covers(MemRegion heap_rs) const { 107 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 108 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 109 "size inconsistency"); 110 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 111 _bmWordSize == heap_rs.word_size(); 112 } 113 #endif 114 115 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 116 _bm.print_on_error(st, prefix); 117 } 118 119 size_t CMBitMap::compute_size(size_t heap_size) { 120 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 121 } 122 123 size_t CMBitMap::mark_distance() { 124 return MinObjAlignmentInBytes * BitsPerByte; 125 } 126 127 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 128 _bmStartWord = heap.start(); 129 _bmWordSize = heap.word_size(); 130 131 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 132 _bm.set_size(_bmWordSize >> _shifter); 133 134 storage->set_mapping_changed_listener(&_listener); 135 } 136 137 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 138 if (zero_filled) { 139 return; 140 } 141 // We need to clear the bitmap on commit, removing any existing information. 142 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 143 _bm->clearRange(mr); 144 } 145 146 // Closure used for clearing the given mark bitmap. 147 class ClearBitmapHRClosure : public HeapRegionClosure { 148 private: 149 ConcurrentMark* _cm; 150 CMBitMap* _bitmap; 151 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 152 public: 153 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 154 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 155 } 156 157 virtual bool doHeapRegion(HeapRegion* r) { 158 size_t const chunk_size_in_words = M / HeapWordSize; 159 160 HeapWord* cur = r->bottom(); 161 HeapWord* const end = r->end(); 162 163 while (cur < end) { 164 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 165 _bitmap->clearRange(mr); 166 167 cur += chunk_size_in_words; 168 169 // Abort iteration if after yielding the marking has been aborted. 170 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 171 return true; 172 } 173 // Repeat the asserts from before the start of the closure. We will do them 174 // as asserts here to minimize their overhead on the product. However, we 175 // will have them as guarantees at the beginning / end of the bitmap 176 // clearing to get some checking in the product. 177 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 178 assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant"); 179 } 180 181 return false; 182 } 183 }; 184 185 class ParClearNextMarkBitmapTask : public AbstractGangTask { 186 ClearBitmapHRClosure* _cl; 187 HeapRegionClaimer _hrclaimer; 188 bool _suspendible; // If the task is suspendible, workers must join the STS. 189 190 public: 191 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 192 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 193 194 void work(uint worker_id) { 195 if (_suspendible) { 196 SuspendibleThreadSet::join(); 197 } 198 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 199 if (_suspendible) { 200 SuspendibleThreadSet::leave(); 201 } 202 } 203 }; 204 205 void CMBitMap::clearAll() { 206 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 207 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 208 uint n_workers = g1h->workers()->active_workers(); 209 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 210 g1h->workers()->run_task(&task); 211 guarantee(cl.complete(), "Must have completed iteration."); 212 return; 213 } 214 215 void CMBitMap::markRange(MemRegion mr) { 216 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 217 assert(!mr.is_empty(), "unexpected empty region"); 218 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 219 ((HeapWord *) mr.end())), 220 "markRange memory region end is not card aligned"); 221 // convert address range into offset range 222 _bm.at_put_range(heapWordToOffset(mr.start()), 223 heapWordToOffset(mr.end()), true); 224 } 225 226 void CMBitMap::clearRange(MemRegion mr) { 227 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 228 assert(!mr.is_empty(), "unexpected empty region"); 229 // convert address range into offset range 230 _bm.at_put_range(heapWordToOffset(mr.start()), 231 heapWordToOffset(mr.end()), false); 232 } 233 234 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 235 HeapWord* end_addr) { 236 HeapWord* start = getNextMarkedWordAddress(addr); 237 start = MIN2(start, end_addr); 238 HeapWord* end = getNextUnmarkedWordAddress(start); 239 end = MIN2(end, end_addr); 240 assert(start <= end, "Consistency check"); 241 MemRegion mr(start, end); 242 if (!mr.is_empty()) { 243 clearRange(mr); 244 } 245 return mr; 246 } 247 248 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 249 _base(NULL), _cm(cm) 250 #ifdef ASSERT 251 , _drain_in_progress(false) 252 , _drain_in_progress_yields(false) 253 #endif 254 {} 255 256 bool CMMarkStack::allocate(size_t capacity) { 257 // allocate a stack of the requisite depth 258 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 259 if (!rs.is_reserved()) { 260 warning("ConcurrentMark MarkStack allocation failure"); 261 return false; 262 } 263 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 264 if (!_virtual_space.initialize(rs, rs.size())) { 265 warning("ConcurrentMark MarkStack backing store failure"); 266 // Release the virtual memory reserved for the marking stack 267 rs.release(); 268 return false; 269 } 270 assert(_virtual_space.committed_size() == rs.size(), 271 "Didn't reserve backing store for all of ConcurrentMark stack?"); 272 _base = (oop*) _virtual_space.low(); 273 setEmpty(); 274 _capacity = (jint) capacity; 275 _saved_index = -1; 276 _should_expand = false; 277 NOT_PRODUCT(_max_depth = 0); 278 return true; 279 } 280 281 void CMMarkStack::expand() { 282 // Called, during remark, if we've overflown the marking stack during marking. 283 assert(isEmpty(), "stack should been emptied while handling overflow"); 284 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 285 // Clear expansion flag 286 _should_expand = false; 287 if (_capacity == (jint) MarkStackSizeMax) { 288 if (PrintGCDetails && Verbose) { 289 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 290 } 291 return; 292 } 293 // Double capacity if possible 294 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 295 // Do not give up existing stack until we have managed to 296 // get the double capacity that we desired. 297 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 298 sizeof(oop))); 299 if (rs.is_reserved()) { 300 // Release the backing store associated with old stack 301 _virtual_space.release(); 302 // Reinitialize virtual space for new stack 303 if (!_virtual_space.initialize(rs, rs.size())) { 304 fatal("Not enough swap for expanded marking stack capacity"); 305 } 306 _base = (oop*)(_virtual_space.low()); 307 _index = 0; 308 _capacity = new_capacity; 309 } else { 310 if (PrintGCDetails && Verbose) { 311 // Failed to double capacity, continue; 312 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 313 SIZE_FORMAT"K to " SIZE_FORMAT"K", 314 _capacity / K, new_capacity / K); 315 } 316 } 317 } 318 319 void CMMarkStack::set_should_expand() { 320 // If we're resetting the marking state because of an 321 // marking stack overflow, record that we should, if 322 // possible, expand the stack. 323 _should_expand = _cm->has_overflown(); 324 } 325 326 CMMarkStack::~CMMarkStack() { 327 if (_base != NULL) { 328 _base = NULL; 329 _virtual_space.release(); 330 } 331 } 332 333 void CMMarkStack::par_push(oop ptr) { 334 while (true) { 335 if (isFull()) { 336 _overflow = true; 337 return; 338 } 339 // Otherwise... 340 jint index = _index; 341 jint next_index = index+1; 342 jint res = Atomic::cmpxchg(next_index, &_index, index); 343 if (res == index) { 344 _base[index] = ptr; 345 // Note that we don't maintain this atomically. We could, but it 346 // doesn't seem necessary. 347 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 348 return; 349 } 350 // Otherwise, we need to try again. 351 } 352 } 353 354 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { 355 while (true) { 356 if (isFull()) { 357 _overflow = true; 358 return; 359 } 360 // Otherwise... 361 jint index = _index; 362 jint next_index = index + n; 363 if (next_index > _capacity) { 364 _overflow = true; 365 return; 366 } 367 jint res = Atomic::cmpxchg(next_index, &_index, index); 368 if (res == index) { 369 for (int i = 0; i < n; i++) { 370 int ind = index + i; 371 assert(ind < _capacity, "By overflow test above."); 372 _base[ind] = ptr_arr[i]; 373 } 374 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 375 return; 376 } 377 // Otherwise, we need to try again. 378 } 379 } 380 381 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 382 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 383 jint start = _index; 384 jint next_index = start + n; 385 if (next_index > _capacity) { 386 _overflow = true; 387 return; 388 } 389 // Otherwise. 390 _index = next_index; 391 for (int i = 0; i < n; i++) { 392 int ind = start + i; 393 assert(ind < _capacity, "By overflow test above."); 394 _base[ind] = ptr_arr[i]; 395 } 396 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 397 } 398 399 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 400 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 401 jint index = _index; 402 if (index == 0) { 403 *n = 0; 404 return false; 405 } else { 406 int k = MIN2(max, index); 407 jint new_ind = index - k; 408 for (int j = 0; j < k; j++) { 409 ptr_arr[j] = _base[new_ind + j]; 410 } 411 _index = new_ind; 412 *n = k; 413 return true; 414 } 415 } 416 417 template<class OopClosureClass> 418 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 419 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 420 || SafepointSynchronize::is_at_safepoint(), 421 "Drain recursion must be yield-safe."); 422 bool res = true; 423 debug_only(_drain_in_progress = true); 424 debug_only(_drain_in_progress_yields = yield_after); 425 while (!isEmpty()) { 426 oop newOop = pop(); 427 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 428 assert(newOop->is_oop(), "Expected an oop"); 429 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 430 "only grey objects on this stack"); 431 newOop->oop_iterate(cl); 432 if (yield_after && _cm->do_yield_check()) { 433 res = false; 434 break; 435 } 436 } 437 debug_only(_drain_in_progress = false); 438 return res; 439 } 440 441 void CMMarkStack::note_start_of_gc() { 442 assert(_saved_index == -1, 443 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 444 _saved_index = _index; 445 } 446 447 void CMMarkStack::note_end_of_gc() { 448 // This is intentionally a guarantee, instead of an assert. If we 449 // accidentally add something to the mark stack during GC, it 450 // will be a correctness issue so it's better if we crash. we'll 451 // only check this once per GC anyway, so it won't be a performance 452 // issue in any way. 453 guarantee(_saved_index == _index, 454 err_msg("saved index: %d index: %d", _saved_index, _index)); 455 _saved_index = -1; 456 } 457 458 void CMMarkStack::oops_do(OopClosure* f) { 459 assert(_saved_index == _index, 460 err_msg("saved index: %d index: %d", _saved_index, _index)); 461 for (int i = 0; i < _index; i += 1) { 462 f->do_oop(&_base[i]); 463 } 464 } 465 466 CMRootRegions::CMRootRegions() : 467 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 468 _should_abort(false), _next_survivor(NULL) { } 469 470 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 471 _young_list = g1h->young_list(); 472 _cm = cm; 473 } 474 475 void CMRootRegions::prepare_for_scan() { 476 assert(!scan_in_progress(), "pre-condition"); 477 478 // Currently, only survivors can be root regions. 479 assert(_next_survivor == NULL, "pre-condition"); 480 _next_survivor = _young_list->first_survivor_region(); 481 _scan_in_progress = (_next_survivor != NULL); 482 _should_abort = false; 483 } 484 485 HeapRegion* CMRootRegions::claim_next() { 486 if (_should_abort) { 487 // If someone has set the should_abort flag, we return NULL to 488 // force the caller to bail out of their loop. 489 return NULL; 490 } 491 492 // Currently, only survivors can be root regions. 493 HeapRegion* res = _next_survivor; 494 if (res != NULL) { 495 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 496 // Read it again in case it changed while we were waiting for the lock. 497 res = _next_survivor; 498 if (res != NULL) { 499 if (res == _young_list->last_survivor_region()) { 500 // We just claimed the last survivor so store NULL to indicate 501 // that we're done. 502 _next_survivor = NULL; 503 } else { 504 _next_survivor = res->get_next_young_region(); 505 } 506 } else { 507 // Someone else claimed the last survivor while we were trying 508 // to take the lock so nothing else to do. 509 } 510 } 511 assert(res == NULL || res->is_survivor(), "post-condition"); 512 513 return res; 514 } 515 516 void CMRootRegions::scan_finished() { 517 assert(scan_in_progress(), "pre-condition"); 518 519 // Currently, only survivors can be root regions. 520 if (!_should_abort) { 521 assert(_next_survivor == NULL, "we should have claimed all survivors"); 522 } 523 _next_survivor = NULL; 524 525 { 526 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 527 _scan_in_progress = false; 528 RootRegionScan_lock->notify_all(); 529 } 530 } 531 532 bool CMRootRegions::wait_until_scan_finished() { 533 if (!scan_in_progress()) return false; 534 535 { 536 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 537 while (scan_in_progress()) { 538 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 539 } 540 } 541 return true; 542 } 543 544 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 545 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 546 #endif // _MSC_VER 547 548 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 549 return MAX2((n_par_threads + 2) / 4, 1U); 550 } 551 552 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 553 _g1h(g1h), 554 _markBitMap1(), 555 _markBitMap2(), 556 _parallel_marking_threads(0), 557 _max_parallel_marking_threads(0), 558 _sleep_factor(0.0), 559 _marking_task_overhead(1.0), 560 _cleanup_sleep_factor(0.0), 561 _cleanup_task_overhead(1.0), 562 _cleanup_list("Cleanup List"), 563 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 564 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 565 CardTableModRefBS::card_shift, 566 false /* in_resource_area*/), 567 568 _prevMarkBitMap(&_markBitMap1), 569 _nextMarkBitMap(&_markBitMap2), 570 571 _markStack(this), 572 // _finger set in set_non_marking_state 573 574 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 575 // _active_tasks set in set_non_marking_state 576 // _tasks set inside the constructor 577 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 578 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 579 580 _has_overflown(false), 581 _concurrent(false), 582 _has_aborted(false), 583 _aborted_gc_id(GCId::undefined()), 584 _restart_for_overflow(false), 585 _concurrent_marking_in_progress(false), 586 587 // _verbose_level set below 588 589 _init_times(), 590 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 591 _cleanup_times(), 592 _total_counting_time(0.0), 593 _total_rs_scrub_time(0.0), 594 595 _parallel_workers(NULL), 596 597 _count_card_bitmaps(NULL), 598 _count_marked_bytes(NULL), 599 _completed_initialization(false) { 600 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 601 if (verbose_level < no_verbose) { 602 verbose_level = no_verbose; 603 } 604 if (verbose_level > high_verbose) { 605 verbose_level = high_verbose; 606 } 607 _verbose_level = verbose_level; 608 609 if (verbose_low()) { 610 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 611 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 612 } 613 614 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 615 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 616 617 // Create & start a ConcurrentMark thread. 618 _cmThread = new ConcurrentMarkThread(this); 619 assert(cmThread() != NULL, "CM Thread should have been created"); 620 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 621 if (_cmThread->osthread() == NULL) { 622 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 623 } 624 625 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 626 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 627 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 628 629 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 630 satb_qs.set_buffer_size(G1SATBBufferSize); 631 632 _root_regions.init(_g1h, this); 633 634 if (ConcGCThreads > ParallelGCThreads) { 635 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") " 636 "than ParallelGCThreads (" UINTX_FORMAT ").", 637 ConcGCThreads, ParallelGCThreads); 638 return; 639 } 640 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 641 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 642 // if both are set 643 _sleep_factor = 0.0; 644 _marking_task_overhead = 1.0; 645 } else if (G1MarkingOverheadPercent > 0) { 646 // We will calculate the number of parallel marking threads based 647 // on a target overhead with respect to the soft real-time goal 648 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 649 double overall_cm_overhead = 650 (double) MaxGCPauseMillis * marking_overhead / 651 (double) GCPauseIntervalMillis; 652 double cpu_ratio = 1.0 / (double) os::processor_count(); 653 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 654 double marking_task_overhead = 655 overall_cm_overhead / marking_thread_num * 656 (double) os::processor_count(); 657 double sleep_factor = 658 (1.0 - marking_task_overhead) / marking_task_overhead; 659 660 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); 661 _sleep_factor = sleep_factor; 662 _marking_task_overhead = marking_task_overhead; 663 } else { 664 // Calculate the number of parallel marking threads by scaling 665 // the number of parallel GC threads. 666 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); 667 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); 668 _sleep_factor = 0.0; 669 _marking_task_overhead = 1.0; 670 } 671 672 assert(ConcGCThreads > 0, "Should have been set"); 673 _parallel_marking_threads = (uint) ConcGCThreads; 674 _max_parallel_marking_threads = _parallel_marking_threads; 675 676 if (parallel_marking_threads() > 1) { 677 _cleanup_task_overhead = 1.0; 678 } else { 679 _cleanup_task_overhead = marking_task_overhead(); 680 } 681 _cleanup_sleep_factor = 682 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 683 684 #if 0 685 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 686 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 687 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 688 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 689 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 690 #endif 691 692 _parallel_workers = new FlexibleWorkGang("G1 Marker", 693 _max_parallel_marking_threads, false, true); 694 if (_parallel_workers == NULL) { 695 vm_exit_during_initialization("Failed necessary allocation."); 696 } else { 697 _parallel_workers->initialize_workers(); 698 } 699 700 if (FLAG_IS_DEFAULT(MarkStackSize)) { 701 size_t mark_stack_size = 702 MIN2(MarkStackSizeMax, 703 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 704 // Verify that the calculated value for MarkStackSize is in range. 705 // It would be nice to use the private utility routine from Arguments. 706 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 707 warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 708 "must be between 1 and " SIZE_FORMAT, 709 mark_stack_size, MarkStackSizeMax); 710 return; 711 } 712 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 713 } else { 714 // Verify MarkStackSize is in range. 715 if (FLAG_IS_CMDLINE(MarkStackSize)) { 716 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 717 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 718 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 719 "must be between 1 and " SIZE_FORMAT, 720 MarkStackSize, MarkStackSizeMax); 721 return; 722 } 723 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 724 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 725 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 726 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 727 MarkStackSize, MarkStackSizeMax); 728 return; 729 } 730 } 731 } 732 } 733 734 if (!_markStack.allocate(MarkStackSize)) { 735 warning("Failed to allocate CM marking stack"); 736 return; 737 } 738 739 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 740 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 741 742 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 743 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 744 745 BitMap::idx_t card_bm_size = _card_bm.size(); 746 747 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 748 _active_tasks = _max_worker_id; 749 750 uint max_regions = _g1h->max_regions(); 751 for (uint i = 0; i < _max_worker_id; ++i) { 752 CMTaskQueue* task_queue = new CMTaskQueue(); 753 task_queue->initialize(); 754 _task_queues->register_queue(i, task_queue); 755 756 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 757 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 758 759 _tasks[i] = new CMTask(i, this, 760 _count_marked_bytes[i], 761 &_count_card_bitmaps[i], 762 task_queue, _task_queues); 763 764 _accum_task_vtime[i] = 0.0; 765 } 766 767 // Calculate the card number for the bottom of the heap. Used 768 // in biasing indexes into the accounting card bitmaps. 769 _heap_bottom_card_num = 770 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 771 CardTableModRefBS::card_shift); 772 773 // Clear all the liveness counting data 774 clear_all_count_data(); 775 776 // so that the call below can read a sensible value 777 _heap_start = g1h->reserved_region().start(); 778 set_non_marking_state(); 779 _completed_initialization = true; 780 } 781 782 void ConcurrentMark::reset() { 783 // Starting values for these two. This should be called in a STW 784 // phase. 785 MemRegion reserved = _g1h->g1_reserved(); 786 _heap_start = reserved.start(); 787 _heap_end = reserved.end(); 788 789 // Separated the asserts so that we know which one fires. 790 assert(_heap_start != NULL, "heap bounds should look ok"); 791 assert(_heap_end != NULL, "heap bounds should look ok"); 792 assert(_heap_start < _heap_end, "heap bounds should look ok"); 793 794 // Reset all the marking data structures and any necessary flags 795 reset_marking_state(); 796 797 if (verbose_low()) { 798 gclog_or_tty->print_cr("[global] resetting"); 799 } 800 801 // We do reset all of them, since different phases will use 802 // different number of active threads. So, it's easiest to have all 803 // of them ready. 804 for (uint i = 0; i < _max_worker_id; ++i) { 805 _tasks[i]->reset(_nextMarkBitMap); 806 } 807 808 // we need this to make sure that the flag is on during the evac 809 // pause with initial mark piggy-backed 810 set_concurrent_marking_in_progress(); 811 } 812 813 814 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 815 _markStack.set_should_expand(); 816 _markStack.setEmpty(); // Also clears the _markStack overflow flag 817 if (clear_overflow) { 818 clear_has_overflown(); 819 } else { 820 assert(has_overflown(), "pre-condition"); 821 } 822 _finger = _heap_start; 823 824 for (uint i = 0; i < _max_worker_id; ++i) { 825 CMTaskQueue* queue = _task_queues->queue(i); 826 queue->set_empty(); 827 } 828 } 829 830 void ConcurrentMark::set_concurrency(uint active_tasks) { 831 assert(active_tasks <= _max_worker_id, "we should not have more"); 832 833 _active_tasks = active_tasks; 834 // Need to update the three data structures below according to the 835 // number of active threads for this phase. 836 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 837 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 838 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 839 } 840 841 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 842 set_concurrency(active_tasks); 843 844 _concurrent = concurrent; 845 // We propagate this to all tasks, not just the active ones. 846 for (uint i = 0; i < _max_worker_id; ++i) 847 _tasks[i]->set_concurrent(concurrent); 848 849 if (concurrent) { 850 set_concurrent_marking_in_progress(); 851 } else { 852 // We currently assume that the concurrent flag has been set to 853 // false before we start remark. At this point we should also be 854 // in a STW phase. 855 assert(!concurrent_marking_in_progress(), "invariant"); 856 assert(out_of_regions(), 857 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 858 p2i(_finger), p2i(_heap_end))); 859 } 860 } 861 862 void ConcurrentMark::set_non_marking_state() { 863 // We set the global marking state to some default values when we're 864 // not doing marking. 865 reset_marking_state(); 866 _active_tasks = 0; 867 clear_concurrent_marking_in_progress(); 868 } 869 870 void ConcurrentMark::clearNextBitmap() { 871 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 872 873 // Make sure that the concurrent mark thread looks to still be in 874 // the current cycle. 875 guarantee(cmThread()->during_cycle(), "invariant"); 876 877 // We are finishing up the current cycle by clearing the next 878 // marking bitmap and getting it ready for the next cycle. During 879 // this time no other cycle can start. So, let's make sure that this 880 // is the case. 881 guarantee(!g1h->mark_in_progress(), "invariant"); 882 883 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 884 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 885 _parallel_workers->run_task(&task); 886 887 // Clear the liveness counting data. If the marking has been aborted, the abort() 888 // call already did that. 889 if (cl.complete()) { 890 clear_all_count_data(); 891 } 892 893 // Repeat the asserts from above. 894 guarantee(cmThread()->during_cycle(), "invariant"); 895 guarantee(!g1h->mark_in_progress(), "invariant"); 896 } 897 898 class CheckBitmapClearHRClosure : public HeapRegionClosure { 899 CMBitMap* _bitmap; 900 bool _error; 901 public: 902 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 903 } 904 905 virtual bool doHeapRegion(HeapRegion* r) { 906 // This closure can be called concurrently to the mutator, so we must make sure 907 // that the result of the getNextMarkedWordAddress() call is compared to the 908 // value passed to it as limit to detect any found bits. 909 // We can use the region's orig_end() for the limit and the comparison value 910 // as it always contains the "real" end of the region that never changes and 911 // has no side effects. 912 // Due to the latter, there can also be no problem with the compiler generating 913 // reloads of the orig_end() call. 914 HeapWord* end = r->orig_end(); 915 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 916 } 917 }; 918 919 bool ConcurrentMark::nextMarkBitmapIsClear() { 920 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 921 _g1h->heap_region_iterate(&cl); 922 return cl.complete(); 923 } 924 925 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 926 public: 927 bool doHeapRegion(HeapRegion* r) { 928 if (!r->is_continues_humongous()) { 929 r->note_start_of_marking(); 930 } 931 return false; 932 } 933 }; 934 935 void ConcurrentMark::checkpointRootsInitialPre() { 936 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 937 G1CollectorPolicy* g1p = g1h->g1_policy(); 938 939 _has_aborted = false; 940 941 #ifndef PRODUCT 942 if (G1PrintReachableAtInitialMark) { 943 print_reachable("at-cycle-start", 944 VerifyOption_G1UsePrevMarking, true /* all */); 945 } 946 #endif 947 948 // Initialize marking structures. This has to be done in a STW phase. 949 reset(); 950 951 // For each region note start of marking. 952 NoteStartOfMarkHRClosure startcl; 953 g1h->heap_region_iterate(&startcl); 954 } 955 956 957 void ConcurrentMark::checkpointRootsInitialPost() { 958 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 959 960 // If we force an overflow during remark, the remark operation will 961 // actually abort and we'll restart concurrent marking. If we always 962 // force an overflow during remark we'll never actually complete the 963 // marking phase. So, we initialize this here, at the start of the 964 // cycle, so that at the remaining overflow number will decrease at 965 // every remark and we'll eventually not need to cause one. 966 force_overflow_stw()->init(); 967 968 // Start Concurrent Marking weak-reference discovery. 969 ReferenceProcessor* rp = g1h->ref_processor_cm(); 970 // enable ("weak") refs discovery 971 rp->enable_discovery(); 972 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 973 974 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 975 // This is the start of the marking cycle, we're expected all 976 // threads to have SATB queues with active set to false. 977 satb_mq_set.set_active_all_threads(true, /* new active value */ 978 false /* expected_active */); 979 980 _root_regions.prepare_for_scan(); 981 982 // update_g1_committed() will be called at the end of an evac pause 983 // when marking is on. So, it's also called at the end of the 984 // initial-mark pause to update the heap end, if the heap expands 985 // during it. No need to call it here. 986 } 987 988 /* 989 * Notice that in the next two methods, we actually leave the STS 990 * during the barrier sync and join it immediately afterwards. If we 991 * do not do this, the following deadlock can occur: one thread could 992 * be in the barrier sync code, waiting for the other thread to also 993 * sync up, whereas another one could be trying to yield, while also 994 * waiting for the other threads to sync up too. 995 * 996 * Note, however, that this code is also used during remark and in 997 * this case we should not attempt to leave / enter the STS, otherwise 998 * we'll either hit an assert (debug / fastdebug) or deadlock 999 * (product). So we should only leave / enter the STS if we are 1000 * operating concurrently. 1001 * 1002 * Because the thread that does the sync barrier has left the STS, it 1003 * is possible to be suspended for a Full GC or an evacuation pause 1004 * could occur. This is actually safe, since the entering the sync 1005 * barrier is one of the last things do_marking_step() does, and it 1006 * doesn't manipulate any data structures afterwards. 1007 */ 1008 1009 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 1010 if (verbose_low()) { 1011 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 1012 } 1013 1014 if (concurrent()) { 1015 SuspendibleThreadSet::leave(); 1016 } 1017 1018 bool barrier_aborted = !_first_overflow_barrier_sync.enter(); 1019 1020 if (concurrent()) { 1021 SuspendibleThreadSet::join(); 1022 } 1023 // at this point everyone should have synced up and not be doing any 1024 // more work 1025 1026 if (verbose_low()) { 1027 if (barrier_aborted) { 1028 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 1029 } else { 1030 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 1031 } 1032 } 1033 1034 if (barrier_aborted) { 1035 // If the barrier aborted we ignore the overflow condition and 1036 // just abort the whole marking phase as quickly as possible. 1037 return; 1038 } 1039 1040 // If we're executing the concurrent phase of marking, reset the marking 1041 // state; otherwise the marking state is reset after reference processing, 1042 // during the remark pause. 1043 // If we reset here as a result of an overflow during the remark we will 1044 // see assertion failures from any subsequent set_concurrency_and_phase() 1045 // calls. 1046 if (concurrent()) { 1047 // let the task associated with with worker 0 do this 1048 if (worker_id == 0) { 1049 // task 0 is responsible for clearing the global data structures 1050 // We should be here because of an overflow. During STW we should 1051 // not clear the overflow flag since we rely on it being true when 1052 // we exit this method to abort the pause and restart concurrent 1053 // marking. 1054 reset_marking_state(true /* clear_overflow */); 1055 force_overflow()->update(); 1056 1057 if (G1Log::fine()) { 1058 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1059 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1060 } 1061 } 1062 } 1063 1064 // after this, each task should reset its own data structures then 1065 // then go into the second barrier 1066 } 1067 1068 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1069 if (verbose_low()) { 1070 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1071 } 1072 1073 if (concurrent()) { 1074 SuspendibleThreadSet::leave(); 1075 } 1076 1077 bool barrier_aborted = !_second_overflow_barrier_sync.enter(); 1078 1079 if (concurrent()) { 1080 SuspendibleThreadSet::join(); 1081 } 1082 // at this point everything should be re-initialized and ready to go 1083 1084 if (verbose_low()) { 1085 if (barrier_aborted) { 1086 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1087 } else { 1088 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1089 } 1090 } 1091 } 1092 1093 #ifndef PRODUCT 1094 void ForceOverflowSettings::init() { 1095 _num_remaining = G1ConcMarkForceOverflow; 1096 _force = false; 1097 update(); 1098 } 1099 1100 void ForceOverflowSettings::update() { 1101 if (_num_remaining > 0) { 1102 _num_remaining -= 1; 1103 _force = true; 1104 } else { 1105 _force = false; 1106 } 1107 } 1108 1109 bool ForceOverflowSettings::should_force() { 1110 if (_force) { 1111 _force = false; 1112 return true; 1113 } else { 1114 return false; 1115 } 1116 } 1117 #endif // !PRODUCT 1118 1119 class CMConcurrentMarkingTask: public AbstractGangTask { 1120 private: 1121 ConcurrentMark* _cm; 1122 ConcurrentMarkThread* _cmt; 1123 1124 public: 1125 void work(uint worker_id) { 1126 assert(Thread::current()->is_ConcurrentGC_thread(), 1127 "this should only be done by a conc GC thread"); 1128 ResourceMark rm; 1129 1130 double start_vtime = os::elapsedVTime(); 1131 1132 SuspendibleThreadSet::join(); 1133 1134 assert(worker_id < _cm->active_tasks(), "invariant"); 1135 CMTask* the_task = _cm->task(worker_id); 1136 the_task->record_start_time(); 1137 if (!_cm->has_aborted()) { 1138 do { 1139 double start_vtime_sec = os::elapsedVTime(); 1140 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1141 1142 the_task->do_marking_step(mark_step_duration_ms, 1143 true /* do_termination */, 1144 false /* is_serial*/); 1145 1146 double end_vtime_sec = os::elapsedVTime(); 1147 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1148 _cm->clear_has_overflown(); 1149 1150 _cm->do_yield_check(worker_id); 1151 1152 jlong sleep_time_ms; 1153 if (!_cm->has_aborted() && the_task->has_aborted()) { 1154 sleep_time_ms = 1155 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1156 SuspendibleThreadSet::leave(); 1157 os::sleep(Thread::current(), sleep_time_ms, false); 1158 SuspendibleThreadSet::join(); 1159 } 1160 } while (!_cm->has_aborted() && the_task->has_aborted()); 1161 } 1162 the_task->record_end_time(); 1163 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1164 1165 SuspendibleThreadSet::leave(); 1166 1167 double end_vtime = os::elapsedVTime(); 1168 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1169 } 1170 1171 CMConcurrentMarkingTask(ConcurrentMark* cm, 1172 ConcurrentMarkThread* cmt) : 1173 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1174 1175 ~CMConcurrentMarkingTask() { } 1176 }; 1177 1178 // Calculates the number of active workers for a concurrent 1179 // phase. 1180 uint ConcurrentMark::calc_parallel_marking_threads() { 1181 uint n_conc_workers = 0; 1182 if (!UseDynamicNumberOfGCThreads || 1183 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1184 !ForceDynamicNumberOfGCThreads)) { 1185 n_conc_workers = max_parallel_marking_threads(); 1186 } else { 1187 n_conc_workers = 1188 AdaptiveSizePolicy::calc_default_active_workers( 1189 max_parallel_marking_threads(), 1190 1, /* Minimum workers */ 1191 parallel_marking_threads(), 1192 Threads::number_of_non_daemon_threads()); 1193 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1194 // that scaling has already gone into "_max_parallel_marking_threads". 1195 } 1196 assert(n_conc_workers > 0, "Always need at least 1"); 1197 return n_conc_workers; 1198 } 1199 1200 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1201 // Currently, only survivors can be root regions. 1202 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1203 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1204 1205 const uintx interval = PrefetchScanIntervalInBytes; 1206 HeapWord* curr = hr->bottom(); 1207 const HeapWord* end = hr->top(); 1208 while (curr < end) { 1209 Prefetch::read(curr, interval); 1210 oop obj = oop(curr); 1211 int size = obj->oop_iterate(&cl); 1212 assert(size == obj->size(), "sanity"); 1213 curr += size; 1214 } 1215 } 1216 1217 class CMRootRegionScanTask : public AbstractGangTask { 1218 private: 1219 ConcurrentMark* _cm; 1220 1221 public: 1222 CMRootRegionScanTask(ConcurrentMark* cm) : 1223 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1224 1225 void work(uint worker_id) { 1226 assert(Thread::current()->is_ConcurrentGC_thread(), 1227 "this should only be done by a conc GC thread"); 1228 1229 CMRootRegions* root_regions = _cm->root_regions(); 1230 HeapRegion* hr = root_regions->claim_next(); 1231 while (hr != NULL) { 1232 _cm->scanRootRegion(hr, worker_id); 1233 hr = root_regions->claim_next(); 1234 } 1235 } 1236 }; 1237 1238 void ConcurrentMark::scanRootRegions() { 1239 // Start of concurrent marking. 1240 ClassLoaderDataGraph::clear_claimed_marks(); 1241 1242 // scan_in_progress() will have been set to true only if there was 1243 // at least one root region to scan. So, if it's false, we 1244 // should not attempt to do any further work. 1245 if (root_regions()->scan_in_progress()) { 1246 _parallel_marking_threads = calc_parallel_marking_threads(); 1247 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1248 "Maximum number of marking threads exceeded"); 1249 uint active_workers = MAX2(1U, parallel_marking_threads()); 1250 1251 CMRootRegionScanTask task(this); 1252 _parallel_workers->set_active_workers(active_workers); 1253 _parallel_workers->run_task(&task); 1254 1255 // It's possible that has_aborted() is true here without actually 1256 // aborting the survivor scan earlier. This is OK as it's 1257 // mainly used for sanity checking. 1258 root_regions()->scan_finished(); 1259 } 1260 } 1261 1262 void ConcurrentMark::markFromRoots() { 1263 // we might be tempted to assert that: 1264 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1265 // "inconsistent argument?"); 1266 // However that wouldn't be right, because it's possible that 1267 // a safepoint is indeed in progress as a younger generation 1268 // stop-the-world GC happens even as we mark in this generation. 1269 1270 _restart_for_overflow = false; 1271 force_overflow_conc()->init(); 1272 1273 // _g1h has _n_par_threads 1274 _parallel_marking_threads = calc_parallel_marking_threads(); 1275 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1276 "Maximum number of marking threads exceeded"); 1277 1278 uint active_workers = MAX2(1U, parallel_marking_threads()); 1279 1280 // Parallel task terminator is set in "set_concurrency_and_phase()" 1281 set_concurrency_and_phase(active_workers, true /* concurrent */); 1282 1283 CMConcurrentMarkingTask markingTask(this, cmThread()); 1284 _parallel_workers->set_active_workers(active_workers); 1285 // Don't set _n_par_threads because it affects MT in process_roots() 1286 // and the decisions on that MT processing is made elsewhere. 1287 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1288 _parallel_workers->run_task(&markingTask); 1289 print_stats(); 1290 } 1291 1292 // Helper class to get rid of some boilerplate code. 1293 class G1CMTraceTime : public GCTraceTime { 1294 static bool doit_and_prepend(bool doit) { 1295 if (doit) { 1296 gclog_or_tty->put(' '); 1297 } 1298 return doit; 1299 } 1300 1301 public: 1302 G1CMTraceTime(const char* title, bool doit) 1303 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), 1304 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { 1305 } 1306 }; 1307 1308 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1309 // world is stopped at this checkpoint 1310 assert(SafepointSynchronize::is_at_safepoint(), 1311 "world should be stopped"); 1312 1313 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1314 1315 // If a full collection has happened, we shouldn't do this. 1316 if (has_aborted()) { 1317 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1318 return; 1319 } 1320 1321 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1322 1323 if (VerifyDuringGC) { 1324 HandleMark hm; // handle scope 1325 g1h->prepare_for_verify(); 1326 Universe::verify(VerifyOption_G1UsePrevMarking, 1327 " VerifyDuringGC:(before)"); 1328 } 1329 g1h->check_bitmaps("Remark Start"); 1330 1331 G1CollectorPolicy* g1p = g1h->g1_policy(); 1332 g1p->record_concurrent_mark_remark_start(); 1333 1334 double start = os::elapsedTime(); 1335 1336 checkpointRootsFinalWork(); 1337 1338 double mark_work_end = os::elapsedTime(); 1339 1340 weakRefsWork(clear_all_soft_refs); 1341 1342 if (has_overflown()) { 1343 // Oops. We overflowed. Restart concurrent marking. 1344 _restart_for_overflow = true; 1345 if (G1TraceMarkStackOverflow) { 1346 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1347 } 1348 1349 // Verify the heap w.r.t. the previous marking bitmap. 1350 if (VerifyDuringGC) { 1351 HandleMark hm; // handle scope 1352 g1h->prepare_for_verify(); 1353 Universe::verify(VerifyOption_G1UsePrevMarking, 1354 " VerifyDuringGC:(overflow)"); 1355 } 1356 1357 // Clear the marking state because we will be restarting 1358 // marking due to overflowing the global mark stack. 1359 reset_marking_state(); 1360 } else { 1361 { 1362 G1CMTraceTime trace("GC aggregate-data", G1Log::finer()); 1363 1364 // Aggregate the per-task counting data that we have accumulated 1365 // while marking. 1366 aggregate_count_data(); 1367 } 1368 1369 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1370 // We're done with marking. 1371 // This is the end of the marking cycle, we're expected all 1372 // threads to have SATB queues with active set to true. 1373 satb_mq_set.set_active_all_threads(false, /* new active value */ 1374 true /* expected_active */); 1375 1376 if (VerifyDuringGC) { 1377 HandleMark hm; // handle scope 1378 g1h->prepare_for_verify(); 1379 Universe::verify(VerifyOption_G1UseNextMarking, 1380 " VerifyDuringGC:(after)"); 1381 } 1382 g1h->check_bitmaps("Remark End"); 1383 assert(!restart_for_overflow(), "sanity"); 1384 // Completely reset the marking state since marking completed 1385 set_non_marking_state(); 1386 } 1387 1388 // Expand the marking stack, if we have to and if we can. 1389 if (_markStack.should_expand()) { 1390 _markStack.expand(); 1391 } 1392 1393 // Statistics 1394 double now = os::elapsedTime(); 1395 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1396 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1397 _remark_times.add((now - start) * 1000.0); 1398 1399 g1p->record_concurrent_mark_remark_end(); 1400 1401 G1CMIsAliveClosure is_alive(g1h); 1402 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1403 } 1404 1405 // Base class of the closures that finalize and verify the 1406 // liveness counting data. 1407 class CMCountDataClosureBase: public HeapRegionClosure { 1408 protected: 1409 G1CollectedHeap* _g1h; 1410 ConcurrentMark* _cm; 1411 CardTableModRefBS* _ct_bs; 1412 1413 BitMap* _region_bm; 1414 BitMap* _card_bm; 1415 1416 // Takes a region that's not empty (i.e., it has at least one 1417 // live object in it and sets its corresponding bit on the region 1418 // bitmap to 1. If the region is "starts humongous" it will also set 1419 // to 1 the bits on the region bitmap that correspond to its 1420 // associated "continues humongous" regions. 1421 void set_bit_for_region(HeapRegion* hr) { 1422 assert(!hr->is_continues_humongous(), "should have filtered those out"); 1423 1424 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1425 if (!hr->is_starts_humongous()) { 1426 // Normal (non-humongous) case: just set the bit. 1427 _region_bm->par_at_put(index, true); 1428 } else { 1429 // Starts humongous case: calculate how many regions are part of 1430 // this humongous region and then set the bit range. 1431 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1432 _region_bm->par_at_put_range(index, end_index, true); 1433 } 1434 } 1435 1436 public: 1437 CMCountDataClosureBase(G1CollectedHeap* g1h, 1438 BitMap* region_bm, BitMap* card_bm): 1439 _g1h(g1h), _cm(g1h->concurrent_mark()), 1440 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 1441 _region_bm(region_bm), _card_bm(card_bm) { } 1442 }; 1443 1444 // Closure that calculates the # live objects per region. Used 1445 // for verification purposes during the cleanup pause. 1446 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1447 CMBitMapRO* _bm; 1448 size_t _region_marked_bytes; 1449 1450 public: 1451 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1452 BitMap* region_bm, BitMap* card_bm) : 1453 CMCountDataClosureBase(g1h, region_bm, card_bm), 1454 _bm(bm), _region_marked_bytes(0) { } 1455 1456 bool doHeapRegion(HeapRegion* hr) { 1457 1458 if (hr->is_continues_humongous()) { 1459 // We will ignore these here and process them when their 1460 // associated "starts humongous" region is processed (see 1461 // set_bit_for_heap_region()). Note that we cannot rely on their 1462 // associated "starts humongous" region to have their bit set to 1463 // 1 since, due to the region chunking in the parallel region 1464 // iteration, a "continues humongous" region might be visited 1465 // before its associated "starts humongous". 1466 return false; 1467 } 1468 1469 HeapWord* ntams = hr->next_top_at_mark_start(); 1470 HeapWord* start = hr->bottom(); 1471 1472 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1473 err_msg("Preconditions not met - " 1474 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1475 p2i(start), p2i(ntams), p2i(hr->end()))); 1476 1477 // Find the first marked object at or after "start". 1478 start = _bm->getNextMarkedWordAddress(start, ntams); 1479 1480 size_t marked_bytes = 0; 1481 1482 while (start < ntams) { 1483 oop obj = oop(start); 1484 int obj_sz = obj->size(); 1485 HeapWord* obj_end = start + obj_sz; 1486 1487 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1488 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1489 1490 // Note: if we're looking at the last region in heap - obj_end 1491 // could be actually just beyond the end of the heap; end_idx 1492 // will then correspond to a (non-existent) card that is also 1493 // just beyond the heap. 1494 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1495 // end of object is not card aligned - increment to cover 1496 // all the cards spanned by the object 1497 end_idx += 1; 1498 } 1499 1500 // Set the bits in the card BM for the cards spanned by this object. 1501 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1502 1503 // Add the size of this object to the number of marked bytes. 1504 marked_bytes += (size_t)obj_sz * HeapWordSize; 1505 1506 // Find the next marked object after this one. 1507 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1508 } 1509 1510 // Mark the allocated-since-marking portion... 1511 HeapWord* top = hr->top(); 1512 if (ntams < top) { 1513 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1514 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1515 1516 // Note: if we're looking at the last region in heap - top 1517 // could be actually just beyond the end of the heap; end_idx 1518 // will then correspond to a (non-existent) card that is also 1519 // just beyond the heap. 1520 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1521 // end of object is not card aligned - increment to cover 1522 // all the cards spanned by the object 1523 end_idx += 1; 1524 } 1525 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1526 1527 // This definitely means the region has live objects. 1528 set_bit_for_region(hr); 1529 } 1530 1531 // Update the live region bitmap. 1532 if (marked_bytes > 0) { 1533 set_bit_for_region(hr); 1534 } 1535 1536 // Set the marked bytes for the current region so that 1537 // it can be queried by a calling verification routine 1538 _region_marked_bytes = marked_bytes; 1539 1540 return false; 1541 } 1542 1543 size_t region_marked_bytes() const { return _region_marked_bytes; } 1544 }; 1545 1546 // Heap region closure used for verifying the counting data 1547 // that was accumulated concurrently and aggregated during 1548 // the remark pause. This closure is applied to the heap 1549 // regions during the STW cleanup pause. 1550 1551 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1552 G1CollectedHeap* _g1h; 1553 ConcurrentMark* _cm; 1554 CalcLiveObjectsClosure _calc_cl; 1555 BitMap* _region_bm; // Region BM to be verified 1556 BitMap* _card_bm; // Card BM to be verified 1557 bool _verbose; // verbose output? 1558 1559 BitMap* _exp_region_bm; // Expected Region BM values 1560 BitMap* _exp_card_bm; // Expected card BM values 1561 1562 int _failures; 1563 1564 public: 1565 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1566 BitMap* region_bm, 1567 BitMap* card_bm, 1568 BitMap* exp_region_bm, 1569 BitMap* exp_card_bm, 1570 bool verbose) : 1571 _g1h(g1h), _cm(g1h->concurrent_mark()), 1572 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1573 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1574 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1575 _failures(0) { } 1576 1577 int failures() const { return _failures; } 1578 1579 bool doHeapRegion(HeapRegion* hr) { 1580 if (hr->is_continues_humongous()) { 1581 // We will ignore these here and process them when their 1582 // associated "starts humongous" region is processed (see 1583 // set_bit_for_heap_region()). Note that we cannot rely on their 1584 // associated "starts humongous" region to have their bit set to 1585 // 1 since, due to the region chunking in the parallel region 1586 // iteration, a "continues humongous" region might be visited 1587 // before its associated "starts humongous". 1588 return false; 1589 } 1590 1591 int failures = 0; 1592 1593 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1594 // this region and set the corresponding bits in the expected region 1595 // and card bitmaps. 1596 bool res = _calc_cl.doHeapRegion(hr); 1597 assert(res == false, "should be continuing"); 1598 1599 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1600 Mutex::_no_safepoint_check_flag); 1601 1602 // Verify the marked bytes for this region. 1603 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1604 size_t act_marked_bytes = hr->next_marked_bytes(); 1605 1606 // We're not OK if expected marked bytes > actual marked bytes. It means 1607 // we have missed accounting some objects during the actual marking. 1608 if (exp_marked_bytes > act_marked_bytes) { 1609 if (_verbose) { 1610 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1611 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1612 hr->hrm_index(), exp_marked_bytes, act_marked_bytes); 1613 } 1614 failures += 1; 1615 } 1616 1617 // Verify the bit, for this region, in the actual and expected 1618 // (which was just calculated) region bit maps. 1619 // We're not OK if the bit in the calculated expected region 1620 // bitmap is set and the bit in the actual region bitmap is not. 1621 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1622 1623 bool expected = _exp_region_bm->at(index); 1624 bool actual = _region_bm->at(index); 1625 if (expected && !actual) { 1626 if (_verbose) { 1627 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1628 "expected: %s, actual: %s", 1629 hr->hrm_index(), 1630 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1631 } 1632 failures += 1; 1633 } 1634 1635 // Verify that the card bit maps for the cards spanned by the current 1636 // region match. We have an error if we have a set bit in the expected 1637 // bit map and the corresponding bit in the actual bitmap is not set. 1638 1639 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1640 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1641 1642 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1643 expected = _exp_card_bm->at(i); 1644 actual = _card_bm->at(i); 1645 1646 if (expected && !actual) { 1647 if (_verbose) { 1648 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1649 "expected: %s, actual: %s", 1650 hr->hrm_index(), i, 1651 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1652 } 1653 failures += 1; 1654 } 1655 } 1656 1657 if (failures > 0 && _verbose) { 1658 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1659 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1660 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1661 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1662 } 1663 1664 _failures += failures; 1665 1666 // We could stop iteration over the heap when we 1667 // find the first violating region by returning true. 1668 return false; 1669 } 1670 }; 1671 1672 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1673 protected: 1674 G1CollectedHeap* _g1h; 1675 ConcurrentMark* _cm; 1676 BitMap* _actual_region_bm; 1677 BitMap* _actual_card_bm; 1678 1679 uint _n_workers; 1680 1681 BitMap* _expected_region_bm; 1682 BitMap* _expected_card_bm; 1683 1684 int _failures; 1685 bool _verbose; 1686 1687 HeapRegionClaimer _hrclaimer; 1688 1689 public: 1690 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1691 BitMap* region_bm, BitMap* card_bm, 1692 BitMap* expected_region_bm, BitMap* expected_card_bm) 1693 : AbstractGangTask("G1 verify final counting"), 1694 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1695 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1696 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1697 _failures(0), _verbose(false), 1698 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1699 assert(VerifyDuringGC, "don't call this otherwise"); 1700 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1701 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1702 1703 _verbose = _cm->verbose_medium(); 1704 } 1705 1706 void work(uint worker_id) { 1707 assert(worker_id < _n_workers, "invariant"); 1708 1709 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1710 _actual_region_bm, _actual_card_bm, 1711 _expected_region_bm, 1712 _expected_card_bm, 1713 _verbose); 1714 1715 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1716 1717 Atomic::add(verify_cl.failures(), &_failures); 1718 } 1719 1720 int failures() const { return _failures; } 1721 }; 1722 1723 // Closure that finalizes the liveness counting data. 1724 // Used during the cleanup pause. 1725 // Sets the bits corresponding to the interval [NTAMS, top] 1726 // (which contains the implicitly live objects) in the 1727 // card liveness bitmap. Also sets the bit for each region, 1728 // containing live data, in the region liveness bitmap. 1729 1730 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1731 public: 1732 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1733 BitMap* region_bm, 1734 BitMap* card_bm) : 1735 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1736 1737 bool doHeapRegion(HeapRegion* hr) { 1738 1739 if (hr->is_continues_humongous()) { 1740 // We will ignore these here and process them when their 1741 // associated "starts humongous" region is processed (see 1742 // set_bit_for_heap_region()). Note that we cannot rely on their 1743 // associated "starts humongous" region to have their bit set to 1744 // 1 since, due to the region chunking in the parallel region 1745 // iteration, a "continues humongous" region might be visited 1746 // before its associated "starts humongous". 1747 return false; 1748 } 1749 1750 HeapWord* ntams = hr->next_top_at_mark_start(); 1751 HeapWord* top = hr->top(); 1752 1753 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1754 1755 // Mark the allocated-since-marking portion... 1756 if (ntams < top) { 1757 // This definitely means the region has live objects. 1758 set_bit_for_region(hr); 1759 1760 // Now set the bits in the card bitmap for [ntams, top) 1761 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1762 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1763 1764 // Note: if we're looking at the last region in heap - top 1765 // could be actually just beyond the end of the heap; end_idx 1766 // will then correspond to a (non-existent) card that is also 1767 // just beyond the heap. 1768 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1769 // end of object is not card aligned - increment to cover 1770 // all the cards spanned by the object 1771 end_idx += 1; 1772 } 1773 1774 assert(end_idx <= _card_bm->size(), 1775 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1776 end_idx, _card_bm->size())); 1777 assert(start_idx < _card_bm->size(), 1778 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1779 start_idx, _card_bm->size())); 1780 1781 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1782 } 1783 1784 // Set the bit for the region if it contains live data 1785 if (hr->next_marked_bytes() > 0) { 1786 set_bit_for_region(hr); 1787 } 1788 1789 return false; 1790 } 1791 }; 1792 1793 class G1ParFinalCountTask: public AbstractGangTask { 1794 protected: 1795 G1CollectedHeap* _g1h; 1796 ConcurrentMark* _cm; 1797 BitMap* _actual_region_bm; 1798 BitMap* _actual_card_bm; 1799 1800 uint _n_workers; 1801 HeapRegionClaimer _hrclaimer; 1802 1803 public: 1804 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1805 : AbstractGangTask("G1 final counting"), 1806 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1807 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1808 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1809 } 1810 1811 void work(uint worker_id) { 1812 assert(worker_id < _n_workers, "invariant"); 1813 1814 FinalCountDataUpdateClosure final_update_cl(_g1h, 1815 _actual_region_bm, 1816 _actual_card_bm); 1817 1818 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1819 } 1820 }; 1821 1822 class G1ParNoteEndTask; 1823 1824 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1825 G1CollectedHeap* _g1; 1826 size_t _max_live_bytes; 1827 uint _regions_claimed; 1828 size_t _freed_bytes; 1829 FreeRegionList* _local_cleanup_list; 1830 HeapRegionSetCount _old_regions_removed; 1831 HeapRegionSetCount _humongous_regions_removed; 1832 HRRSCleanupTask* _hrrs_cleanup_task; 1833 double _claimed_region_time; 1834 double _max_region_time; 1835 1836 public: 1837 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1838 FreeRegionList* local_cleanup_list, 1839 HRRSCleanupTask* hrrs_cleanup_task) : 1840 _g1(g1), 1841 _max_live_bytes(0), _regions_claimed(0), 1842 _freed_bytes(0), 1843 _claimed_region_time(0.0), _max_region_time(0.0), 1844 _local_cleanup_list(local_cleanup_list), 1845 _old_regions_removed(), 1846 _humongous_regions_removed(), 1847 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1848 1849 size_t freed_bytes() { return _freed_bytes; } 1850 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1851 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1852 1853 bool doHeapRegion(HeapRegion *hr) { 1854 if (hr->is_continues_humongous()) { 1855 return false; 1856 } 1857 // We use a claim value of zero here because all regions 1858 // were claimed with value 1 in the FinalCount task. 1859 _g1->reset_gc_time_stamps(hr); 1860 double start = os::elapsedTime(); 1861 _regions_claimed++; 1862 hr->note_end_of_marking(); 1863 _max_live_bytes += hr->max_live_bytes(); 1864 1865 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1866 _freed_bytes += hr->used(); 1867 hr->set_containing_set(NULL); 1868 if (hr->is_humongous()) { 1869 assert(hr->is_starts_humongous(), "we should only see starts humongous"); 1870 _humongous_regions_removed.increment(1u, hr->capacity()); 1871 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1872 } else { 1873 _old_regions_removed.increment(1u, hr->capacity()); 1874 _g1->free_region(hr, _local_cleanup_list, true); 1875 } 1876 } else { 1877 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1878 } 1879 1880 double region_time = (os::elapsedTime() - start); 1881 _claimed_region_time += region_time; 1882 if (region_time > _max_region_time) { 1883 _max_region_time = region_time; 1884 } 1885 return false; 1886 } 1887 1888 size_t max_live_bytes() { return _max_live_bytes; } 1889 uint regions_claimed() { return _regions_claimed; } 1890 double claimed_region_time_sec() { return _claimed_region_time; } 1891 double max_region_time_sec() { return _max_region_time; } 1892 }; 1893 1894 class G1ParNoteEndTask: public AbstractGangTask { 1895 friend class G1NoteEndOfConcMarkClosure; 1896 1897 protected: 1898 G1CollectedHeap* _g1h; 1899 size_t _max_live_bytes; 1900 size_t _freed_bytes; 1901 FreeRegionList* _cleanup_list; 1902 HeapRegionClaimer _hrclaimer; 1903 1904 public: 1905 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1906 AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1907 } 1908 1909 void work(uint worker_id) { 1910 FreeRegionList local_cleanup_list("Local Cleanup List"); 1911 HRRSCleanupTask hrrs_cleanup_task; 1912 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1913 &hrrs_cleanup_task); 1914 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1915 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1916 1917 // Now update the lists 1918 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1919 { 1920 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1921 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1922 _max_live_bytes += g1_note_end.max_live_bytes(); 1923 _freed_bytes += g1_note_end.freed_bytes(); 1924 1925 // If we iterate over the global cleanup list at the end of 1926 // cleanup to do this printing we will not guarantee to only 1927 // generate output for the newly-reclaimed regions (the list 1928 // might not be empty at the beginning of cleanup; we might 1929 // still be working on its previous contents). So we do the 1930 // printing here, before we append the new regions to the global 1931 // cleanup list. 1932 1933 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1934 if (hr_printer->is_active()) { 1935 FreeRegionListIterator iter(&local_cleanup_list); 1936 while (iter.more_available()) { 1937 HeapRegion* hr = iter.get_next(); 1938 hr_printer->cleanup(hr); 1939 } 1940 } 1941 1942 _cleanup_list->add_ordered(&local_cleanup_list); 1943 assert(local_cleanup_list.is_empty(), "post-condition"); 1944 1945 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1946 } 1947 } 1948 size_t max_live_bytes() { return _max_live_bytes; } 1949 size_t freed_bytes() { return _freed_bytes; } 1950 }; 1951 1952 class G1ParScrubRemSetTask: public AbstractGangTask { 1953 protected: 1954 G1RemSet* _g1rs; 1955 BitMap* _region_bm; 1956 BitMap* _card_bm; 1957 HeapRegionClaimer _hrclaimer; 1958 1959 public: 1960 G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) : 1961 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) { 1962 } 1963 1964 void work(uint worker_id) { 1965 _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer); 1966 } 1967 1968 }; 1969 1970 void ConcurrentMark::cleanup() { 1971 // world is stopped at this checkpoint 1972 assert(SafepointSynchronize::is_at_safepoint(), 1973 "world should be stopped"); 1974 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1975 1976 // If a full collection has happened, we shouldn't do this. 1977 if (has_aborted()) { 1978 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1979 return; 1980 } 1981 1982 g1h->verify_region_sets_optional(); 1983 1984 if (VerifyDuringGC) { 1985 HandleMark hm; // handle scope 1986 g1h->prepare_for_verify(); 1987 Universe::verify(VerifyOption_G1UsePrevMarking, 1988 " VerifyDuringGC:(before)"); 1989 } 1990 g1h->check_bitmaps("Cleanup Start"); 1991 1992 G1CollectorPolicy* g1p = g1h->g1_policy(); 1993 g1p->record_concurrent_mark_cleanup_start(); 1994 1995 double start = os::elapsedTime(); 1996 1997 HeapRegionRemSet::reset_for_cleanup_tasks(); 1998 1999 uint n_workers; 2000 2001 // Do counting once more with the world stopped for good measure. 2002 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 2003 2004 g1h->set_par_threads(); 2005 n_workers = g1h->n_par_threads(); 2006 assert(g1h->n_par_threads() == n_workers, 2007 "Should not have been reset"); 2008 g1h->workers()->run_task(&g1_par_count_task); 2009 // Done with the parallel phase so reset to 0. 2010 g1h->set_par_threads(0); 2011 2012 if (VerifyDuringGC) { 2013 // Verify that the counting data accumulated during marking matches 2014 // that calculated by walking the marking bitmap. 2015 2016 // Bitmaps to hold expected values 2017 BitMap expected_region_bm(_region_bm.size(), true); 2018 BitMap expected_card_bm(_card_bm.size(), true); 2019 2020 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 2021 &_region_bm, 2022 &_card_bm, 2023 &expected_region_bm, 2024 &expected_card_bm); 2025 2026 g1h->set_par_threads((int)n_workers); 2027 g1h->workers()->run_task(&g1_par_verify_task); 2028 // Done with the parallel phase so reset to 0. 2029 g1h->set_par_threads(0); 2030 2031 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 2032 } 2033 2034 size_t start_used_bytes = g1h->used(); 2035 g1h->set_marking_complete(); 2036 2037 double count_end = os::elapsedTime(); 2038 double this_final_counting_time = (count_end - start); 2039 _total_counting_time += this_final_counting_time; 2040 2041 if (G1PrintRegionLivenessInfo) { 2042 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 2043 _g1h->heap_region_iterate(&cl); 2044 } 2045 2046 // Install newly created mark bitMap as "prev". 2047 swapMarkBitMaps(); 2048 2049 g1h->reset_gc_time_stamp(); 2050 2051 // Note end of marking in all heap regions. 2052 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 2053 g1h->set_par_threads((int)n_workers); 2054 g1h->workers()->run_task(&g1_par_note_end_task); 2055 g1h->set_par_threads(0); 2056 g1h->check_gc_time_stamps(); 2057 2058 if (!cleanup_list_is_empty()) { 2059 // The cleanup list is not empty, so we'll have to process it 2060 // concurrently. Notify anyone else that might be wanting free 2061 // regions that there will be more free regions coming soon. 2062 g1h->set_free_regions_coming(); 2063 } 2064 2065 // call below, since it affects the metric by which we sort the heap 2066 // regions. 2067 if (G1ScrubRemSets) { 2068 double rs_scrub_start = os::elapsedTime(); 2069 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers); 2070 g1h->set_par_threads((int)n_workers); 2071 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2072 g1h->set_par_threads(0); 2073 2074 double rs_scrub_end = os::elapsedTime(); 2075 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2076 _total_rs_scrub_time += this_rs_scrub_time; 2077 } 2078 2079 // this will also free any regions totally full of garbage objects, 2080 // and sort the regions. 2081 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2082 2083 // Statistics. 2084 double end = os::elapsedTime(); 2085 _cleanup_times.add((end - start) * 1000.0); 2086 2087 if (G1Log::fine()) { 2088 g1h->g1_policy()->print_heap_transition(start_used_bytes); 2089 } 2090 2091 // Clean up will have freed any regions completely full of garbage. 2092 // Update the soft reference policy with the new heap occupancy. 2093 Universe::update_heap_info_at_gc(); 2094 2095 if (VerifyDuringGC) { 2096 HandleMark hm; // handle scope 2097 g1h->prepare_for_verify(); 2098 Universe::verify(VerifyOption_G1UsePrevMarking, 2099 " VerifyDuringGC:(after)"); 2100 } 2101 2102 g1h->check_bitmaps("Cleanup End"); 2103 2104 g1h->verify_region_sets_optional(); 2105 2106 // We need to make this be a "collection" so any collection pause that 2107 // races with it goes around and waits for completeCleanup to finish. 2108 g1h->increment_total_collections(); 2109 2110 // Clean out dead classes and update Metaspace sizes. 2111 if (ClassUnloadingWithConcurrentMark) { 2112 ClassLoaderDataGraph::purge(); 2113 } 2114 MetaspaceGC::compute_new_size(); 2115 2116 // We reclaimed old regions so we should calculate the sizes to make 2117 // sure we update the old gen/space data. 2118 g1h->g1mm()->update_sizes(); 2119 g1h->allocation_context_stats().update_after_mark(); 2120 2121 g1h->trace_heap_after_concurrent_cycle(); 2122 } 2123 2124 void ConcurrentMark::completeCleanup() { 2125 if (has_aborted()) return; 2126 2127 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2128 2129 _cleanup_list.verify_optional(); 2130 FreeRegionList tmp_free_list("Tmp Free List"); 2131 2132 if (G1ConcRegionFreeingVerbose) { 2133 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2134 "cleanup list has %u entries", 2135 _cleanup_list.length()); 2136 } 2137 2138 // No one else should be accessing the _cleanup_list at this point, 2139 // so it is not necessary to take any locks 2140 while (!_cleanup_list.is_empty()) { 2141 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 2142 assert(hr != NULL, "Got NULL from a non-empty list"); 2143 hr->par_clear(); 2144 tmp_free_list.add_ordered(hr); 2145 2146 // Instead of adding one region at a time to the secondary_free_list, 2147 // we accumulate them in the local list and move them a few at a 2148 // time. This also cuts down on the number of notify_all() calls 2149 // we do during this process. We'll also append the local list when 2150 // _cleanup_list is empty (which means we just removed the last 2151 // region from the _cleanup_list). 2152 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2153 _cleanup_list.is_empty()) { 2154 if (G1ConcRegionFreeingVerbose) { 2155 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2156 "appending %u entries to the secondary_free_list, " 2157 "cleanup list still has %u entries", 2158 tmp_free_list.length(), 2159 _cleanup_list.length()); 2160 } 2161 2162 { 2163 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2164 g1h->secondary_free_list_add(&tmp_free_list); 2165 SecondaryFreeList_lock->notify_all(); 2166 } 2167 #ifndef PRODUCT 2168 if (G1StressConcRegionFreeing) { 2169 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2170 os::sleep(Thread::current(), (jlong) 1, false); 2171 } 2172 } 2173 #endif 2174 } 2175 } 2176 assert(tmp_free_list.is_empty(), "post-condition"); 2177 } 2178 2179 // Supporting Object and Oop closures for reference discovery 2180 // and processing in during marking 2181 2182 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2183 HeapWord* addr = (HeapWord*)obj; 2184 return addr != NULL && 2185 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2186 } 2187 2188 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2189 // Uses the CMTask associated with a worker thread (for serial reference 2190 // processing the CMTask for worker 0 is used) to preserve (mark) and 2191 // trace referent objects. 2192 // 2193 // Using the CMTask and embedded local queues avoids having the worker 2194 // threads operating on the global mark stack. This reduces the risk 2195 // of overflowing the stack - which we would rather avoid at this late 2196 // state. Also using the tasks' local queues removes the potential 2197 // of the workers interfering with each other that could occur if 2198 // operating on the global stack. 2199 2200 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2201 ConcurrentMark* _cm; 2202 CMTask* _task; 2203 int _ref_counter_limit; 2204 int _ref_counter; 2205 bool _is_serial; 2206 public: 2207 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2208 _cm(cm), _task(task), _is_serial(is_serial), 2209 _ref_counter_limit(G1RefProcDrainInterval) { 2210 assert(_ref_counter_limit > 0, "sanity"); 2211 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2212 _ref_counter = _ref_counter_limit; 2213 } 2214 2215 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2216 virtual void do_oop( oop* p) { do_oop_work(p); } 2217 2218 template <class T> void do_oop_work(T* p) { 2219 if (!_cm->has_overflown()) { 2220 oop obj = oopDesc::load_decode_heap_oop(p); 2221 if (_cm->verbose_high()) { 2222 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2223 "*"PTR_FORMAT" = "PTR_FORMAT, 2224 _task->worker_id(), p2i(p), p2i((void*) obj)); 2225 } 2226 2227 _task->deal_with_reference(obj); 2228 _ref_counter--; 2229 2230 if (_ref_counter == 0) { 2231 // We have dealt with _ref_counter_limit references, pushing them 2232 // and objects reachable from them on to the local stack (and 2233 // possibly the global stack). Call CMTask::do_marking_step() to 2234 // process these entries. 2235 // 2236 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2237 // there's nothing more to do (i.e. we're done with the entries that 2238 // were pushed as a result of the CMTask::deal_with_reference() calls 2239 // above) or we overflow. 2240 // 2241 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2242 // flag while there may still be some work to do. (See the comment at 2243 // the beginning of CMTask::do_marking_step() for those conditions - 2244 // one of which is reaching the specified time target.) It is only 2245 // when CMTask::do_marking_step() returns without setting the 2246 // has_aborted() flag that the marking step has completed. 2247 do { 2248 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2249 _task->do_marking_step(mark_step_duration_ms, 2250 false /* do_termination */, 2251 _is_serial); 2252 } while (_task->has_aborted() && !_cm->has_overflown()); 2253 _ref_counter = _ref_counter_limit; 2254 } 2255 } else { 2256 if (_cm->verbose_high()) { 2257 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2258 } 2259 } 2260 } 2261 }; 2262 2263 // 'Drain' oop closure used by both serial and parallel reference processing. 2264 // Uses the CMTask associated with a given worker thread (for serial 2265 // reference processing the CMtask for worker 0 is used). Calls the 2266 // do_marking_step routine, with an unbelievably large timeout value, 2267 // to drain the marking data structures of the remaining entries 2268 // added by the 'keep alive' oop closure above. 2269 2270 class G1CMDrainMarkingStackClosure: public VoidClosure { 2271 ConcurrentMark* _cm; 2272 CMTask* _task; 2273 bool _is_serial; 2274 public: 2275 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2276 _cm(cm), _task(task), _is_serial(is_serial) { 2277 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2278 } 2279 2280 void do_void() { 2281 do { 2282 if (_cm->verbose_high()) { 2283 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2284 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2285 } 2286 2287 // We call CMTask::do_marking_step() to completely drain the local 2288 // and global marking stacks of entries pushed by the 'keep alive' 2289 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2290 // 2291 // CMTask::do_marking_step() is called in a loop, which we'll exit 2292 // if there's nothing more to do (i.e. we've completely drained the 2293 // entries that were pushed as a a result of applying the 'keep alive' 2294 // closure to the entries on the discovered ref lists) or we overflow 2295 // the global marking stack. 2296 // 2297 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2298 // flag while there may still be some work to do. (See the comment at 2299 // the beginning of CMTask::do_marking_step() for those conditions - 2300 // one of which is reaching the specified time target.) It is only 2301 // when CMTask::do_marking_step() returns without setting the 2302 // has_aborted() flag that the marking step has completed. 2303 2304 _task->do_marking_step(1000000000.0 /* something very large */, 2305 true /* do_termination */, 2306 _is_serial); 2307 } while (_task->has_aborted() && !_cm->has_overflown()); 2308 } 2309 }; 2310 2311 // Implementation of AbstractRefProcTaskExecutor for parallel 2312 // reference processing at the end of G1 concurrent marking 2313 2314 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2315 private: 2316 G1CollectedHeap* _g1h; 2317 ConcurrentMark* _cm; 2318 WorkGang* _workers; 2319 int _active_workers; 2320 2321 public: 2322 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2323 ConcurrentMark* cm, 2324 WorkGang* workers, 2325 int n_workers) : 2326 _g1h(g1h), _cm(cm), 2327 _workers(workers), _active_workers(n_workers) { } 2328 2329 // Executes the given task using concurrent marking worker threads. 2330 virtual void execute(ProcessTask& task); 2331 virtual void execute(EnqueueTask& task); 2332 }; 2333 2334 class G1CMRefProcTaskProxy: public AbstractGangTask { 2335 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2336 ProcessTask& _proc_task; 2337 G1CollectedHeap* _g1h; 2338 ConcurrentMark* _cm; 2339 2340 public: 2341 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2342 G1CollectedHeap* g1h, 2343 ConcurrentMark* cm) : 2344 AbstractGangTask("Process reference objects in parallel"), 2345 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2346 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2347 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2348 } 2349 2350 virtual void work(uint worker_id) { 2351 ResourceMark rm; 2352 HandleMark hm; 2353 CMTask* task = _cm->task(worker_id); 2354 G1CMIsAliveClosure g1_is_alive(_g1h); 2355 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2356 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2357 2358 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2359 } 2360 }; 2361 2362 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2363 assert(_workers != NULL, "Need parallel worker threads."); 2364 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2365 2366 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2367 2368 // We need to reset the concurrency level before each 2369 // proxy task execution, so that the termination protocol 2370 // and overflow handling in CMTask::do_marking_step() knows 2371 // how many workers to wait for. 2372 _cm->set_concurrency(_active_workers); 2373 _g1h->set_par_threads(_active_workers); 2374 _workers->run_task(&proc_task_proxy); 2375 _g1h->set_par_threads(0); 2376 } 2377 2378 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2379 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2380 EnqueueTask& _enq_task; 2381 2382 public: 2383 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2384 AbstractGangTask("Enqueue reference objects in parallel"), 2385 _enq_task(enq_task) { } 2386 2387 virtual void work(uint worker_id) { 2388 _enq_task.work(worker_id); 2389 } 2390 }; 2391 2392 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2393 assert(_workers != NULL, "Need parallel worker threads."); 2394 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2395 2396 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2397 2398 // Not strictly necessary but... 2399 // 2400 // We need to reset the concurrency level before each 2401 // proxy task execution, so that the termination protocol 2402 // and overflow handling in CMTask::do_marking_step() knows 2403 // how many workers to wait for. 2404 _cm->set_concurrency(_active_workers); 2405 _g1h->set_par_threads(_active_workers); 2406 _workers->run_task(&enq_task_proxy); 2407 _g1h->set_par_threads(0); 2408 } 2409 2410 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2411 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2412 } 2413 2414 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2415 if (has_overflown()) { 2416 // Skip processing the discovered references if we have 2417 // overflown the global marking stack. Reference objects 2418 // only get discovered once so it is OK to not 2419 // de-populate the discovered reference lists. We could have, 2420 // but the only benefit would be that, when marking restarts, 2421 // less reference objects are discovered. 2422 return; 2423 } 2424 2425 ResourceMark rm; 2426 HandleMark hm; 2427 2428 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2429 2430 // Is alive closure. 2431 G1CMIsAliveClosure g1_is_alive(g1h); 2432 2433 // Inner scope to exclude the cleaning of the string and symbol 2434 // tables from the displayed time. 2435 { 2436 G1CMTraceTime t("GC ref-proc", G1Log::finer()); 2437 2438 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2439 2440 // See the comment in G1CollectedHeap::ref_processing_init() 2441 // about how reference processing currently works in G1. 2442 2443 // Set the soft reference policy 2444 rp->setup_policy(clear_all_soft_refs); 2445 assert(_markStack.isEmpty(), "mark stack should be empty"); 2446 2447 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2448 // in serial reference processing. Note these closures are also 2449 // used for serially processing (by the the current thread) the 2450 // JNI references during parallel reference processing. 2451 // 2452 // These closures do not need to synchronize with the worker 2453 // threads involved in parallel reference processing as these 2454 // instances are executed serially by the current thread (e.g. 2455 // reference processing is not multi-threaded and is thus 2456 // performed by the current thread instead of a gang worker). 2457 // 2458 // The gang tasks involved in parallel reference processing create 2459 // their own instances of these closures, which do their own 2460 // synchronization among themselves. 2461 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2462 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2463 2464 // We need at least one active thread. If reference processing 2465 // is not multi-threaded we use the current (VMThread) thread, 2466 // otherwise we use the work gang from the G1CollectedHeap and 2467 // we utilize all the worker threads we can. 2468 bool processing_is_mt = rp->processing_is_mt(); 2469 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2470 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2471 2472 // Parallel processing task executor. 2473 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2474 g1h->workers(), active_workers); 2475 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2476 2477 // Set the concurrency level. The phase was already set prior to 2478 // executing the remark task. 2479 set_concurrency(active_workers); 2480 2481 // Set the degree of MT processing here. If the discovery was done MT, 2482 // the number of threads involved during discovery could differ from 2483 // the number of active workers. This is OK as long as the discovered 2484 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2485 rp->set_active_mt_degree(active_workers); 2486 2487 // Process the weak references. 2488 const ReferenceProcessorStats& stats = 2489 rp->process_discovered_references(&g1_is_alive, 2490 &g1_keep_alive, 2491 &g1_drain_mark_stack, 2492 executor, 2493 g1h->gc_timer_cm(), 2494 concurrent_gc_id()); 2495 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2496 2497 // The do_oop work routines of the keep_alive and drain_marking_stack 2498 // oop closures will set the has_overflown flag if we overflow the 2499 // global marking stack. 2500 2501 assert(_markStack.overflow() || _markStack.isEmpty(), 2502 "mark stack should be empty (unless it overflowed)"); 2503 2504 if (_markStack.overflow()) { 2505 // This should have been done already when we tried to push an 2506 // entry on to the global mark stack. But let's do it again. 2507 set_has_overflown(); 2508 } 2509 2510 assert(rp->num_q() == active_workers, "why not"); 2511 2512 rp->enqueue_discovered_references(executor); 2513 2514 rp->verify_no_references_recorded(); 2515 assert(!rp->discovery_enabled(), "Post condition"); 2516 } 2517 2518 if (has_overflown()) { 2519 // We can not trust g1_is_alive if the marking stack overflowed 2520 return; 2521 } 2522 2523 assert(_markStack.isEmpty(), "Marking should have completed"); 2524 2525 // Unload Klasses, String, Symbols, Code Cache, etc. 2526 { 2527 G1CMTraceTime trace("Unloading", G1Log::finer()); 2528 2529 if (ClassUnloadingWithConcurrentMark) { 2530 bool purged_classes; 2531 2532 { 2533 G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest()); 2534 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2535 } 2536 2537 { 2538 G1CMTraceTime trace("Parallel Unloading", G1Log::finest()); 2539 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2540 } 2541 } 2542 2543 if (G1StringDedup::is_enabled()) { 2544 G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest()); 2545 G1StringDedup::unlink(&g1_is_alive); 2546 } 2547 } 2548 } 2549 2550 void ConcurrentMark::swapMarkBitMaps() { 2551 CMBitMapRO* temp = _prevMarkBitMap; 2552 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2553 _nextMarkBitMap = (CMBitMap*) temp; 2554 } 2555 2556 class CMObjectClosure; 2557 2558 // Closure for iterating over objects, currently only used for 2559 // processing SATB buffers. 2560 class CMObjectClosure : public ObjectClosure { 2561 private: 2562 CMTask* _task; 2563 2564 public: 2565 void do_object(oop obj) { 2566 _task->deal_with_reference(obj); 2567 } 2568 2569 CMObjectClosure(CMTask* task) : _task(task) { } 2570 }; 2571 2572 class G1RemarkThreadsClosure : public ThreadClosure { 2573 CMObjectClosure _cm_obj; 2574 G1CMOopClosure _cm_cl; 2575 MarkingCodeBlobClosure _code_cl; 2576 int _thread_parity; 2577 2578 public: 2579 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) : 2580 _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2581 _thread_parity(Threads::thread_claim_parity()) {} 2582 2583 void do_thread(Thread* thread) { 2584 if (thread->is_Java_thread()) { 2585 if (thread->claim_oops_do(true, _thread_parity)) { 2586 JavaThread* jt = (JavaThread*)thread; 2587 2588 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2589 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2590 // * Alive if on the stack of an executing method 2591 // * Weakly reachable otherwise 2592 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2593 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2594 jt->nmethods_do(&_code_cl); 2595 2596 jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj); 2597 } 2598 } else if (thread->is_VM_thread()) { 2599 if (thread->claim_oops_do(true, _thread_parity)) { 2600 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj); 2601 } 2602 } 2603 } 2604 }; 2605 2606 class CMRemarkTask: public AbstractGangTask { 2607 private: 2608 ConcurrentMark* _cm; 2609 public: 2610 void work(uint worker_id) { 2611 // Since all available tasks are actually started, we should 2612 // only proceed if we're supposed to be active. 2613 if (worker_id < _cm->active_tasks()) { 2614 CMTask* task = _cm->task(worker_id); 2615 task->record_start_time(); 2616 { 2617 ResourceMark rm; 2618 HandleMark hm; 2619 2620 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2621 Threads::threads_do(&threads_f); 2622 } 2623 2624 do { 2625 task->do_marking_step(1000000000.0 /* something very large */, 2626 true /* do_termination */, 2627 false /* is_serial */); 2628 } while (task->has_aborted() && !_cm->has_overflown()); 2629 // If we overflow, then we do not want to restart. We instead 2630 // want to abort remark and do concurrent marking again. 2631 task->record_end_time(); 2632 } 2633 } 2634 2635 CMRemarkTask(ConcurrentMark* cm, int active_workers) : 2636 AbstractGangTask("Par Remark"), _cm(cm) { 2637 _cm->terminator()->reset_for_reuse(active_workers); 2638 } 2639 }; 2640 2641 void ConcurrentMark::checkpointRootsFinalWork() { 2642 ResourceMark rm; 2643 HandleMark hm; 2644 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2645 2646 G1CMTraceTime trace("Finalize Marking", G1Log::finer()); 2647 2648 g1h->ensure_parsability(false); 2649 2650 StrongRootsScope srs; 2651 // this is remark, so we'll use up all active threads 2652 uint active_workers = g1h->workers()->active_workers(); 2653 if (active_workers == 0) { 2654 assert(active_workers > 0, "Should have been set earlier"); 2655 active_workers = (uint) ParallelGCThreads; 2656 g1h->workers()->set_active_workers(active_workers); 2657 } 2658 set_concurrency_and_phase(active_workers, false /* concurrent */); 2659 // Leave _parallel_marking_threads at it's 2660 // value originally calculated in the ConcurrentMark 2661 // constructor and pass values of the active workers 2662 // through the gang in the task. 2663 2664 CMRemarkTask remarkTask(this, active_workers); 2665 // We will start all available threads, even if we decide that the 2666 // active_workers will be fewer. The extra ones will just bail out 2667 // immediately. 2668 g1h->set_par_threads(active_workers); 2669 g1h->workers()->run_task(&remarkTask); 2670 g1h->set_par_threads(0); 2671 2672 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2673 guarantee(has_overflown() || 2674 satb_mq_set.completed_buffers_num() == 0, 2675 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2676 BOOL_TO_STR(has_overflown()), 2677 satb_mq_set.completed_buffers_num())); 2678 2679 print_stats(); 2680 } 2681 2682 #ifndef PRODUCT 2683 2684 class PrintReachableOopClosure: public OopClosure { 2685 private: 2686 G1CollectedHeap* _g1h; 2687 outputStream* _out; 2688 VerifyOption _vo; 2689 bool _all; 2690 2691 public: 2692 PrintReachableOopClosure(outputStream* out, 2693 VerifyOption vo, 2694 bool all) : 2695 _g1h(G1CollectedHeap::heap()), 2696 _out(out), _vo(vo), _all(all) { } 2697 2698 void do_oop(narrowOop* p) { do_oop_work(p); } 2699 void do_oop( oop* p) { do_oop_work(p); } 2700 2701 template <class T> void do_oop_work(T* p) { 2702 oop obj = oopDesc::load_decode_heap_oop(p); 2703 const char* str = NULL; 2704 const char* str2 = ""; 2705 2706 if (obj == NULL) { 2707 str = ""; 2708 } else if (!_g1h->is_in_g1_reserved(obj)) { 2709 str = " O"; 2710 } else { 2711 HeapRegion* hr = _g1h->heap_region_containing(obj); 2712 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); 2713 bool marked = _g1h->is_marked(obj, _vo); 2714 2715 if (over_tams) { 2716 str = " >"; 2717 if (marked) { 2718 str2 = " AND MARKED"; 2719 } 2720 } else if (marked) { 2721 str = " M"; 2722 } else { 2723 str = " NOT"; 2724 } 2725 } 2726 2727 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", 2728 p2i(p), p2i((void*) obj), str, str2); 2729 } 2730 }; 2731 2732 class PrintReachableObjectClosure : public ObjectClosure { 2733 private: 2734 G1CollectedHeap* _g1h; 2735 outputStream* _out; 2736 VerifyOption _vo; 2737 bool _all; 2738 HeapRegion* _hr; 2739 2740 public: 2741 PrintReachableObjectClosure(outputStream* out, 2742 VerifyOption vo, 2743 bool all, 2744 HeapRegion* hr) : 2745 _g1h(G1CollectedHeap::heap()), 2746 _out(out), _vo(vo), _all(all), _hr(hr) { } 2747 2748 void do_object(oop o) { 2749 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo); 2750 bool marked = _g1h->is_marked(o, _vo); 2751 bool print_it = _all || over_tams || marked; 2752 2753 if (print_it) { 2754 _out->print_cr(" "PTR_FORMAT"%s", 2755 p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : ""); 2756 PrintReachableOopClosure oopCl(_out, _vo, _all); 2757 o->oop_iterate_no_header(&oopCl); 2758 } 2759 } 2760 }; 2761 2762 class PrintReachableRegionClosure : public HeapRegionClosure { 2763 private: 2764 G1CollectedHeap* _g1h; 2765 outputStream* _out; 2766 VerifyOption _vo; 2767 bool _all; 2768 2769 public: 2770 bool doHeapRegion(HeapRegion* hr) { 2771 HeapWord* b = hr->bottom(); 2772 HeapWord* e = hr->end(); 2773 HeapWord* t = hr->top(); 2774 HeapWord* p = _g1h->top_at_mark_start(hr, _vo); 2775 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " 2776 "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p)); 2777 _out->cr(); 2778 2779 HeapWord* from = b; 2780 HeapWord* to = t; 2781 2782 if (to > from) { 2783 _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to)); 2784 _out->cr(); 2785 PrintReachableObjectClosure ocl(_out, _vo, _all, hr); 2786 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); 2787 _out->cr(); 2788 } 2789 2790 return false; 2791 } 2792 2793 PrintReachableRegionClosure(outputStream* out, 2794 VerifyOption vo, 2795 bool all) : 2796 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { } 2797 }; 2798 2799 void ConcurrentMark::print_reachable(const char* str, 2800 VerifyOption vo, 2801 bool all) { 2802 gclog_or_tty->cr(); 2803 gclog_or_tty->print_cr("== Doing heap dump... "); 2804 2805 if (G1PrintReachableBaseFile == NULL) { 2806 gclog_or_tty->print_cr(" #### error: no base file defined"); 2807 return; 2808 } 2809 2810 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) > 2811 (JVM_MAXPATHLEN - 1)) { 2812 gclog_or_tty->print_cr(" #### error: file name too long"); 2813 return; 2814 } 2815 2816 char file_name[JVM_MAXPATHLEN]; 2817 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str); 2818 gclog_or_tty->print_cr(" dumping to file %s", file_name); 2819 2820 fileStream fout(file_name); 2821 if (!fout.is_open()) { 2822 gclog_or_tty->print_cr(" #### error: could not open file"); 2823 return; 2824 } 2825 2826 outputStream* out = &fout; 2827 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo)); 2828 out->cr(); 2829 2830 out->print_cr("--- ITERATING OVER REGIONS"); 2831 out->cr(); 2832 PrintReachableRegionClosure rcl(out, vo, all); 2833 _g1h->heap_region_iterate(&rcl); 2834 out->cr(); 2835 2836 gclog_or_tty->print_cr(" done"); 2837 gclog_or_tty->flush(); 2838 } 2839 2840 #endif // PRODUCT 2841 2842 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2843 // Note we are overriding the read-only view of the prev map here, via 2844 // the cast. 2845 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2846 } 2847 2848 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2849 _nextMarkBitMap->clearRange(mr); 2850 } 2851 2852 HeapRegion* 2853 ConcurrentMark::claim_region(uint worker_id) { 2854 // "checkpoint" the finger 2855 HeapWord* finger = _finger; 2856 2857 // _heap_end will not change underneath our feet; it only changes at 2858 // yield points. 2859 while (finger < _heap_end) { 2860 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2861 2862 // Note on how this code handles humongous regions. In the 2863 // normal case the finger will reach the start of a "starts 2864 // humongous" (SH) region. Its end will either be the end of the 2865 // last "continues humongous" (CH) region in the sequence, or the 2866 // standard end of the SH region (if the SH is the only region in 2867 // the sequence). That way claim_region() will skip over the CH 2868 // regions. However, there is a subtle race between a CM thread 2869 // executing this method and a mutator thread doing a humongous 2870 // object allocation. The two are not mutually exclusive as the CM 2871 // thread does not need to hold the Heap_lock when it gets 2872 // here. So there is a chance that claim_region() will come across 2873 // a free region that's in the progress of becoming a SH or a CH 2874 // region. In the former case, it will either 2875 // a) Miss the update to the region's end, in which case it will 2876 // visit every subsequent CH region, will find their bitmaps 2877 // empty, and do nothing, or 2878 // b) Will observe the update of the region's end (in which case 2879 // it will skip the subsequent CH regions). 2880 // If it comes across a region that suddenly becomes CH, the 2881 // scenario will be similar to b). So, the race between 2882 // claim_region() and a humongous object allocation might force us 2883 // to do a bit of unnecessary work (due to some unnecessary bitmap 2884 // iterations) but it should not introduce and correctness issues. 2885 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2886 2887 // Above heap_region_containing_raw may return NULL as we always scan claim 2888 // until the end of the heap. In this case, just jump to the next region. 2889 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2890 2891 // Is the gap between reading the finger and doing the CAS too long? 2892 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2893 if (res == finger && curr_region != NULL) { 2894 // we succeeded 2895 HeapWord* bottom = curr_region->bottom(); 2896 HeapWord* limit = curr_region->next_top_at_mark_start(); 2897 2898 if (verbose_low()) { 2899 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2900 "["PTR_FORMAT", "PTR_FORMAT"), " 2901 "limit = "PTR_FORMAT, 2902 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2903 } 2904 2905 // notice that _finger == end cannot be guaranteed here since, 2906 // someone else might have moved the finger even further 2907 assert(_finger >= end, "the finger should have moved forward"); 2908 2909 if (verbose_low()) { 2910 gclog_or_tty->print_cr("[%u] we were successful with region = " 2911 PTR_FORMAT, worker_id, p2i(curr_region)); 2912 } 2913 2914 if (limit > bottom) { 2915 if (verbose_low()) { 2916 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 2917 "returning it ", worker_id, p2i(curr_region)); 2918 } 2919 return curr_region; 2920 } else { 2921 assert(limit == bottom, 2922 "the region limit should be at bottom"); 2923 if (verbose_low()) { 2924 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 2925 "returning NULL", worker_id, p2i(curr_region)); 2926 } 2927 // we return NULL and the caller should try calling 2928 // claim_region() again. 2929 return NULL; 2930 } 2931 } else { 2932 assert(_finger > finger, "the finger should have moved forward"); 2933 if (verbose_low()) { 2934 if (curr_region == NULL) { 2935 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " 2936 "global finger = "PTR_FORMAT", " 2937 "our finger = "PTR_FORMAT, 2938 worker_id, p2i(_finger), p2i(finger)); 2939 } else { 2940 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2941 "global finger = "PTR_FORMAT", " 2942 "our finger = "PTR_FORMAT, 2943 worker_id, p2i(_finger), p2i(finger)); 2944 } 2945 } 2946 2947 // read it again 2948 finger = _finger; 2949 } 2950 } 2951 2952 return NULL; 2953 } 2954 2955 #ifndef PRODUCT 2956 enum VerifyNoCSetOopsPhase { 2957 VerifyNoCSetOopsStack, 2958 VerifyNoCSetOopsQueues, 2959 VerifyNoCSetOopsSATBCompleted, 2960 VerifyNoCSetOopsSATBThread 2961 }; 2962 2963 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 2964 private: 2965 G1CollectedHeap* _g1h; 2966 VerifyNoCSetOopsPhase _phase; 2967 int _info; 2968 2969 const char* phase_str() { 2970 switch (_phase) { 2971 case VerifyNoCSetOopsStack: return "Stack"; 2972 case VerifyNoCSetOopsQueues: return "Queue"; 2973 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers"; 2974 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers"; 2975 default: ShouldNotReachHere(); 2976 } 2977 return NULL; 2978 } 2979 2980 void do_object_work(oop obj) { 2981 guarantee(!_g1h->obj_in_cs(obj), 2982 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 2983 p2i((void*) obj), phase_str(), _info)); 2984 } 2985 2986 public: 2987 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 2988 2989 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 2990 _phase = phase; 2991 _info = info; 2992 } 2993 2994 virtual void do_oop(oop* p) { 2995 oop obj = oopDesc::load_decode_heap_oop(p); 2996 do_object_work(obj); 2997 } 2998 2999 virtual void do_oop(narrowOop* p) { 3000 // We should not come across narrow oops while scanning marking 3001 // stacks and SATB buffers. 3002 ShouldNotReachHere(); 3003 } 3004 3005 virtual void do_object(oop obj) { 3006 do_object_work(obj); 3007 } 3008 }; 3009 3010 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, 3011 bool verify_enqueued_buffers, 3012 bool verify_thread_buffers, 3013 bool verify_fingers) { 3014 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 3015 if (!G1CollectedHeap::heap()->mark_in_progress()) { 3016 return; 3017 } 3018 3019 VerifyNoCSetOopsClosure cl; 3020 3021 if (verify_stacks) { 3022 // Verify entries on the global mark stack 3023 cl.set_phase(VerifyNoCSetOopsStack); 3024 _markStack.oops_do(&cl); 3025 3026 // Verify entries on the task queues 3027 for (uint i = 0; i < _max_worker_id; i += 1) { 3028 cl.set_phase(VerifyNoCSetOopsQueues, i); 3029 CMTaskQueue* queue = _task_queues->queue(i); 3030 queue->oops_do(&cl); 3031 } 3032 } 3033 3034 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 3035 3036 // Verify entries on the enqueued SATB buffers 3037 if (verify_enqueued_buffers) { 3038 cl.set_phase(VerifyNoCSetOopsSATBCompleted); 3039 satb_qs.iterate_completed_buffers_read_only(&cl); 3040 } 3041 3042 // Verify entries on the per-thread SATB buffers 3043 if (verify_thread_buffers) { 3044 cl.set_phase(VerifyNoCSetOopsSATBThread); 3045 satb_qs.iterate_thread_buffers_read_only(&cl); 3046 } 3047 3048 if (verify_fingers) { 3049 // Verify the global finger 3050 HeapWord* global_finger = finger(); 3051 if (global_finger != NULL && global_finger < _heap_end) { 3052 // The global finger always points to a heap region boundary. We 3053 // use heap_region_containing_raw() to get the containing region 3054 // given that the global finger could be pointing to a free region 3055 // which subsequently becomes continues humongous. If that 3056 // happens, heap_region_containing() will return the bottom of the 3057 // corresponding starts humongous region and the check below will 3058 // not hold any more. 3059 // Since we always iterate over all regions, we might get a NULL HeapRegion 3060 // here. 3061 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 3062 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 3063 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 3064 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 3065 } 3066 3067 // Verify the task fingers 3068 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 3069 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 3070 CMTask* task = _tasks[i]; 3071 HeapWord* task_finger = task->finger(); 3072 if (task_finger != NULL && task_finger < _heap_end) { 3073 // See above note on the global finger verification. 3074 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 3075 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 3076 !task_hr->in_collection_set(), 3077 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 3078 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 3079 } 3080 } 3081 } 3082 } 3083 #endif // PRODUCT 3084 3085 // Aggregate the counting data that was constructed concurrently 3086 // with marking. 3087 class AggregateCountDataHRClosure: public HeapRegionClosure { 3088 G1CollectedHeap* _g1h; 3089 ConcurrentMark* _cm; 3090 CardTableModRefBS* _ct_bs; 3091 BitMap* _cm_card_bm; 3092 uint _max_worker_id; 3093 3094 public: 3095 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 3096 BitMap* cm_card_bm, 3097 uint max_worker_id) : 3098 _g1h(g1h), _cm(g1h->concurrent_mark()), 3099 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 3100 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 3101 3102 bool doHeapRegion(HeapRegion* hr) { 3103 if (hr->is_continues_humongous()) { 3104 // We will ignore these here and process them when their 3105 // associated "starts humongous" region is processed. 3106 // Note that we cannot rely on their associated 3107 // "starts humongous" region to have their bit set to 1 3108 // since, due to the region chunking in the parallel region 3109 // iteration, a "continues humongous" region might be visited 3110 // before its associated "starts humongous". 3111 return false; 3112 } 3113 3114 HeapWord* start = hr->bottom(); 3115 HeapWord* limit = hr->next_top_at_mark_start(); 3116 HeapWord* end = hr->end(); 3117 3118 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 3119 err_msg("Preconditions not met - " 3120 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 3121 "top: "PTR_FORMAT", end: "PTR_FORMAT, 3122 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 3123 3124 assert(hr->next_marked_bytes() == 0, "Precondition"); 3125 3126 if (start == limit) { 3127 // NTAMS of this region has not been set so nothing to do. 3128 return false; 3129 } 3130 3131 // 'start' should be in the heap. 3132 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 3133 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 3134 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 3135 3136 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 3137 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 3138 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 3139 3140 // If ntams is not card aligned then we bump card bitmap index 3141 // for limit so that we get the all the cards spanned by 3142 // the object ending at ntams. 3143 // Note: if this is the last region in the heap then ntams 3144 // could be actually just beyond the end of the the heap; 3145 // limit_idx will then correspond to a (non-existent) card 3146 // that is also outside the heap. 3147 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 3148 limit_idx += 1; 3149 } 3150 3151 assert(limit_idx <= end_idx, "or else use atomics"); 3152 3153 // Aggregate the "stripe" in the count data associated with hr. 3154 uint hrm_index = hr->hrm_index(); 3155 size_t marked_bytes = 0; 3156 3157 for (uint i = 0; i < _max_worker_id; i += 1) { 3158 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 3159 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 3160 3161 // Fetch the marked_bytes in this region for task i and 3162 // add it to the running total for this region. 3163 marked_bytes += marked_bytes_array[hrm_index]; 3164 3165 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 3166 // into the global card bitmap. 3167 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 3168 3169 while (scan_idx < limit_idx) { 3170 assert(task_card_bm->at(scan_idx) == true, "should be"); 3171 _cm_card_bm->set_bit(scan_idx); 3172 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 3173 3174 // BitMap::get_next_one_offset() can handle the case when 3175 // its left_offset parameter is greater than its right_offset 3176 // parameter. It does, however, have an early exit if 3177 // left_offset == right_offset. So let's limit the value 3178 // passed in for left offset here. 3179 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 3180 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 3181 } 3182 } 3183 3184 // Update the marked bytes for this region. 3185 hr->add_to_marked_bytes(marked_bytes); 3186 3187 // Next heap region 3188 return false; 3189 } 3190 }; 3191 3192 class G1AggregateCountDataTask: public AbstractGangTask { 3193 protected: 3194 G1CollectedHeap* _g1h; 3195 ConcurrentMark* _cm; 3196 BitMap* _cm_card_bm; 3197 uint _max_worker_id; 3198 int _active_workers; 3199 HeapRegionClaimer _hrclaimer; 3200 3201 public: 3202 G1AggregateCountDataTask(G1CollectedHeap* g1h, 3203 ConcurrentMark* cm, 3204 BitMap* cm_card_bm, 3205 uint max_worker_id, 3206 int n_workers) : 3207 AbstractGangTask("Count Aggregation"), 3208 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 3209 _max_worker_id(max_worker_id), 3210 _active_workers(n_workers), 3211 _hrclaimer(_active_workers) { 3212 } 3213 3214 void work(uint worker_id) { 3215 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 3216 3217 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 3218 } 3219 }; 3220 3221 3222 void ConcurrentMark::aggregate_count_data() { 3223 int n_workers = _g1h->workers()->active_workers(); 3224 3225 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 3226 _max_worker_id, n_workers); 3227 3228 _g1h->set_par_threads(n_workers); 3229 _g1h->workers()->run_task(&g1_par_agg_task); 3230 _g1h->set_par_threads(0); 3231 } 3232 3233 // Clear the per-worker arrays used to store the per-region counting data 3234 void ConcurrentMark::clear_all_count_data() { 3235 // Clear the global card bitmap - it will be filled during 3236 // liveness count aggregation (during remark) and the 3237 // final counting task. 3238 _card_bm.clear(); 3239 3240 // Clear the global region bitmap - it will be filled as part 3241 // of the final counting task. 3242 _region_bm.clear(); 3243 3244 uint max_regions = _g1h->max_regions(); 3245 assert(_max_worker_id > 0, "uninitialized"); 3246 3247 for (uint i = 0; i < _max_worker_id; i += 1) { 3248 BitMap* task_card_bm = count_card_bitmap_for(i); 3249 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3250 3251 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3252 assert(marked_bytes_array != NULL, "uninitialized"); 3253 3254 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3255 task_card_bm->clear(); 3256 } 3257 } 3258 3259 void ConcurrentMark::print_stats() { 3260 if (verbose_stats()) { 3261 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3262 for (size_t i = 0; i < _active_tasks; ++i) { 3263 _tasks[i]->print_stats(); 3264 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3265 } 3266 } 3267 } 3268 3269 // abandon current marking iteration due to a Full GC 3270 void ConcurrentMark::abort() { 3271 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 3272 // concurrent bitmap clearing. 3273 _nextMarkBitMap->clearAll(); 3274 3275 // Note we cannot clear the previous marking bitmap here 3276 // since VerifyDuringGC verifies the objects marked during 3277 // a full GC against the previous bitmap. 3278 3279 // Clear the liveness counting data 3280 clear_all_count_data(); 3281 // Empty mark stack 3282 reset_marking_state(); 3283 for (uint i = 0; i < _max_worker_id; ++i) { 3284 _tasks[i]->clear_region_fields(); 3285 } 3286 _first_overflow_barrier_sync.abort(); 3287 _second_overflow_barrier_sync.abort(); 3288 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); 3289 if (!gc_id.is_undefined()) { 3290 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance 3291 // to detect that it was aborted. Only keep track of the first GC id that we aborted. 3292 _aborted_gc_id = gc_id; 3293 } 3294 _has_aborted = true; 3295 3296 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3297 satb_mq_set.abandon_partial_marking(); 3298 // This can be called either during or outside marking, we'll read 3299 // the expected_active value from the SATB queue set. 3300 satb_mq_set.set_active_all_threads( 3301 false, /* new active value */ 3302 satb_mq_set.is_active() /* expected_active */); 3303 3304 _g1h->trace_heap_after_concurrent_cycle(); 3305 _g1h->register_concurrent_cycle_end(); 3306 } 3307 3308 const GCId& ConcurrentMark::concurrent_gc_id() { 3309 if (has_aborted()) { 3310 return _aborted_gc_id; 3311 } 3312 return _g1h->gc_tracer_cm()->gc_id(); 3313 } 3314 3315 static void print_ms_time_info(const char* prefix, const char* name, 3316 NumberSeq& ns) { 3317 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3318 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3319 if (ns.num() > 0) { 3320 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3321 prefix, ns.sd(), ns.maximum()); 3322 } 3323 } 3324 3325 void ConcurrentMark::print_summary_info() { 3326 gclog_or_tty->print_cr(" Concurrent marking:"); 3327 print_ms_time_info(" ", "init marks", _init_times); 3328 print_ms_time_info(" ", "remarks", _remark_times); 3329 { 3330 print_ms_time_info(" ", "final marks", _remark_mark_times); 3331 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3332 3333 } 3334 print_ms_time_info(" ", "cleanups", _cleanup_times); 3335 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3336 _total_counting_time, 3337 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3338 (double)_cleanup_times.num() 3339 : 0.0)); 3340 if (G1ScrubRemSets) { 3341 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3342 _total_rs_scrub_time, 3343 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3344 (double)_cleanup_times.num() 3345 : 0.0)); 3346 } 3347 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3348 (_init_times.sum() + _remark_times.sum() + 3349 _cleanup_times.sum())/1000.0); 3350 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3351 "(%8.2f s marking).", 3352 cmThread()->vtime_accum(), 3353 cmThread()->vtime_mark_accum()); 3354 } 3355 3356 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3357 _parallel_workers->print_worker_threads_on(st); 3358 } 3359 3360 void ConcurrentMark::print_on_error(outputStream* st) const { 3361 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3362 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3363 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3364 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3365 } 3366 3367 // We take a break if someone is trying to stop the world. 3368 bool ConcurrentMark::do_yield_check(uint worker_id) { 3369 if (SuspendibleThreadSet::should_yield()) { 3370 if (worker_id == 0) { 3371 _g1h->g1_policy()->record_concurrent_pause(); 3372 } 3373 SuspendibleThreadSet::yield(); 3374 return true; 3375 } else { 3376 return false; 3377 } 3378 } 3379 3380 #ifndef PRODUCT 3381 // for debugging purposes 3382 void ConcurrentMark::print_finger() { 3383 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3384 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3385 for (uint i = 0; i < _max_worker_id; ++i) { 3386 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3387 } 3388 gclog_or_tty->cr(); 3389 } 3390 #endif 3391 3392 void CMTask::scan_object(oop obj) { 3393 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3394 3395 if (_cm->verbose_high()) { 3396 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT, 3397 _worker_id, p2i((void*) obj)); 3398 } 3399 3400 size_t obj_size = obj->size(); 3401 _words_scanned += obj_size; 3402 3403 obj->oop_iterate(_cm_oop_closure); 3404 statsOnly( ++_objs_scanned ); 3405 check_limits(); 3406 } 3407 3408 // Closure for iteration over bitmaps 3409 class CMBitMapClosure : public BitMapClosure { 3410 private: 3411 // the bitmap that is being iterated over 3412 CMBitMap* _nextMarkBitMap; 3413 ConcurrentMark* _cm; 3414 CMTask* _task; 3415 3416 public: 3417 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3418 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3419 3420 bool do_bit(size_t offset) { 3421 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3422 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3423 assert( addr < _cm->finger(), "invariant"); 3424 3425 statsOnly( _task->increase_objs_found_on_bitmap() ); 3426 assert(addr >= _task->finger(), "invariant"); 3427 3428 // We move that task's local finger along. 3429 _task->move_finger_to(addr); 3430 3431 _task->scan_object(oop(addr)); 3432 // we only partially drain the local queue and global stack 3433 _task->drain_local_queue(true); 3434 _task->drain_global_stack(true); 3435 3436 // if the has_aborted flag has been raised, we need to bail out of 3437 // the iteration 3438 return !_task->has_aborted(); 3439 } 3440 }; 3441 3442 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3443 ConcurrentMark* cm, 3444 CMTask* task) 3445 : _g1h(g1h), _cm(cm), _task(task) { 3446 assert(_ref_processor == NULL, "should be initialized to NULL"); 3447 3448 if (G1UseConcMarkReferenceProcessing) { 3449 _ref_processor = g1h->ref_processor_cm(); 3450 assert(_ref_processor != NULL, "should not be NULL"); 3451 } 3452 } 3453 3454 void CMTask::setup_for_region(HeapRegion* hr) { 3455 assert(hr != NULL, 3456 "claim_region() should have filtered out NULL regions"); 3457 assert(!hr->is_continues_humongous(), 3458 "claim_region() should have filtered out continues humongous regions"); 3459 3460 if (_cm->verbose_low()) { 3461 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3462 _worker_id, p2i(hr)); 3463 } 3464 3465 _curr_region = hr; 3466 _finger = hr->bottom(); 3467 update_region_limit(); 3468 } 3469 3470 void CMTask::update_region_limit() { 3471 HeapRegion* hr = _curr_region; 3472 HeapWord* bottom = hr->bottom(); 3473 HeapWord* limit = hr->next_top_at_mark_start(); 3474 3475 if (limit == bottom) { 3476 if (_cm->verbose_low()) { 3477 gclog_or_tty->print_cr("[%u] found an empty region " 3478 "["PTR_FORMAT", "PTR_FORMAT")", 3479 _worker_id, p2i(bottom), p2i(limit)); 3480 } 3481 // The region was collected underneath our feet. 3482 // We set the finger to bottom to ensure that the bitmap 3483 // iteration that will follow this will not do anything. 3484 // (this is not a condition that holds when we set the region up, 3485 // as the region is not supposed to be empty in the first place) 3486 _finger = bottom; 3487 } else if (limit >= _region_limit) { 3488 assert(limit >= _finger, "peace of mind"); 3489 } else { 3490 assert(limit < _region_limit, "only way to get here"); 3491 // This can happen under some pretty unusual circumstances. An 3492 // evacuation pause empties the region underneath our feet (NTAMS 3493 // at bottom). We then do some allocation in the region (NTAMS 3494 // stays at bottom), followed by the region being used as a GC 3495 // alloc region (NTAMS will move to top() and the objects 3496 // originally below it will be grayed). All objects now marked in 3497 // the region are explicitly grayed, if below the global finger, 3498 // and we do not need in fact to scan anything else. So, we simply 3499 // set _finger to be limit to ensure that the bitmap iteration 3500 // doesn't do anything. 3501 _finger = limit; 3502 } 3503 3504 _region_limit = limit; 3505 } 3506 3507 void CMTask::giveup_current_region() { 3508 assert(_curr_region != NULL, "invariant"); 3509 if (_cm->verbose_low()) { 3510 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3511 _worker_id, p2i(_curr_region)); 3512 } 3513 clear_region_fields(); 3514 } 3515 3516 void CMTask::clear_region_fields() { 3517 // Values for these three fields that indicate that we're not 3518 // holding on to a region. 3519 _curr_region = NULL; 3520 _finger = NULL; 3521 _region_limit = NULL; 3522 } 3523 3524 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3525 if (cm_oop_closure == NULL) { 3526 assert(_cm_oop_closure != NULL, "invariant"); 3527 } else { 3528 assert(_cm_oop_closure == NULL, "invariant"); 3529 } 3530 _cm_oop_closure = cm_oop_closure; 3531 } 3532 3533 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3534 guarantee(nextMarkBitMap != NULL, "invariant"); 3535 3536 if (_cm->verbose_low()) { 3537 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3538 } 3539 3540 _nextMarkBitMap = nextMarkBitMap; 3541 clear_region_fields(); 3542 3543 _calls = 0; 3544 _elapsed_time_ms = 0.0; 3545 _termination_time_ms = 0.0; 3546 _termination_start_time_ms = 0.0; 3547 3548 #if _MARKING_STATS_ 3549 _aborted = 0; 3550 _aborted_overflow = 0; 3551 _aborted_cm_aborted = 0; 3552 _aborted_yield = 0; 3553 _aborted_timed_out = 0; 3554 _aborted_satb = 0; 3555 _aborted_termination = 0; 3556 _steal_attempts = 0; 3557 _steals = 0; 3558 _local_pushes = 0; 3559 _local_pops = 0; 3560 _local_max_size = 0; 3561 _objs_scanned = 0; 3562 _global_pushes = 0; 3563 _global_pops = 0; 3564 _global_max_size = 0; 3565 _global_transfers_to = 0; 3566 _global_transfers_from = 0; 3567 _regions_claimed = 0; 3568 _objs_found_on_bitmap = 0; 3569 _satb_buffers_processed = 0; 3570 #endif // _MARKING_STATS_ 3571 } 3572 3573 bool CMTask::should_exit_termination() { 3574 regular_clock_call(); 3575 // This is called when we are in the termination protocol. We should 3576 // quit if, for some reason, this task wants to abort or the global 3577 // stack is not empty (this means that we can get work from it). 3578 return !_cm->mark_stack_empty() || has_aborted(); 3579 } 3580 3581 void CMTask::reached_limit() { 3582 assert(_words_scanned >= _words_scanned_limit || 3583 _refs_reached >= _refs_reached_limit , 3584 "shouldn't have been called otherwise"); 3585 regular_clock_call(); 3586 } 3587 3588 void CMTask::regular_clock_call() { 3589 if (has_aborted()) return; 3590 3591 // First, we need to recalculate the words scanned and refs reached 3592 // limits for the next clock call. 3593 recalculate_limits(); 3594 3595 // During the regular clock call we do the following 3596 3597 // (1) If an overflow has been flagged, then we abort. 3598 if (_cm->has_overflown()) { 3599 set_has_aborted(); 3600 return; 3601 } 3602 3603 // If we are not concurrent (i.e. we're doing remark) we don't need 3604 // to check anything else. The other steps are only needed during 3605 // the concurrent marking phase. 3606 if (!concurrent()) return; 3607 3608 // (2) If marking has been aborted for Full GC, then we also abort. 3609 if (_cm->has_aborted()) { 3610 set_has_aborted(); 3611 statsOnly( ++_aborted_cm_aborted ); 3612 return; 3613 } 3614 3615 double curr_time_ms = os::elapsedVTime() * 1000.0; 3616 3617 // (3) If marking stats are enabled, then we update the step history. 3618 #if _MARKING_STATS_ 3619 if (_words_scanned >= _words_scanned_limit) { 3620 ++_clock_due_to_scanning; 3621 } 3622 if (_refs_reached >= _refs_reached_limit) { 3623 ++_clock_due_to_marking; 3624 } 3625 3626 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3627 _interval_start_time_ms = curr_time_ms; 3628 _all_clock_intervals_ms.add(last_interval_ms); 3629 3630 if (_cm->verbose_medium()) { 3631 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3632 "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s", 3633 _worker_id, last_interval_ms, 3634 _words_scanned, 3635 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3636 _refs_reached, 3637 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3638 } 3639 #endif // _MARKING_STATS_ 3640 3641 // (4) We check whether we should yield. If we have to, then we abort. 3642 if (SuspendibleThreadSet::should_yield()) { 3643 // We should yield. To do this we abort the task. The caller is 3644 // responsible for yielding. 3645 set_has_aborted(); 3646 statsOnly( ++_aborted_yield ); 3647 return; 3648 } 3649 3650 // (5) We check whether we've reached our time quota. If we have, 3651 // then we abort. 3652 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3653 if (elapsed_time_ms > _time_target_ms) { 3654 set_has_aborted(); 3655 _has_timed_out = true; 3656 statsOnly( ++_aborted_timed_out ); 3657 return; 3658 } 3659 3660 // (6) Finally, we check whether there are enough completed STAB 3661 // buffers available for processing. If there are, we abort. 3662 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3663 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3664 if (_cm->verbose_low()) { 3665 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3666 _worker_id); 3667 } 3668 // we do need to process SATB buffers, we'll abort and restart 3669 // the marking task to do so 3670 set_has_aborted(); 3671 statsOnly( ++_aborted_satb ); 3672 return; 3673 } 3674 } 3675 3676 void CMTask::recalculate_limits() { 3677 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3678 _words_scanned_limit = _real_words_scanned_limit; 3679 3680 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3681 _refs_reached_limit = _real_refs_reached_limit; 3682 } 3683 3684 void CMTask::decrease_limits() { 3685 // This is called when we believe that we're going to do an infrequent 3686 // operation which will increase the per byte scanned cost (i.e. move 3687 // entries to/from the global stack). It basically tries to decrease the 3688 // scanning limit so that the clock is called earlier. 3689 3690 if (_cm->verbose_medium()) { 3691 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3692 } 3693 3694 _words_scanned_limit = _real_words_scanned_limit - 3695 3 * words_scanned_period / 4; 3696 _refs_reached_limit = _real_refs_reached_limit - 3697 3 * refs_reached_period / 4; 3698 } 3699 3700 void CMTask::move_entries_to_global_stack() { 3701 // local array where we'll store the entries that will be popped 3702 // from the local queue 3703 oop buffer[global_stack_transfer_size]; 3704 3705 int n = 0; 3706 oop obj; 3707 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3708 buffer[n] = obj; 3709 ++n; 3710 } 3711 3712 if (n > 0) { 3713 // we popped at least one entry from the local queue 3714 3715 statsOnly( ++_global_transfers_to; _local_pops += n ); 3716 3717 if (!_cm->mark_stack_push(buffer, n)) { 3718 if (_cm->verbose_low()) { 3719 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3720 _worker_id); 3721 } 3722 set_has_aborted(); 3723 } else { 3724 // the transfer was successful 3725 3726 if (_cm->verbose_medium()) { 3727 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3728 _worker_id, n); 3729 } 3730 statsOnly( size_t tmp_size = _cm->mark_stack_size(); 3731 if (tmp_size > _global_max_size) { 3732 _global_max_size = tmp_size; 3733 } 3734 _global_pushes += n ); 3735 } 3736 } 3737 3738 // this operation was quite expensive, so decrease the limits 3739 decrease_limits(); 3740 } 3741 3742 void CMTask::get_entries_from_global_stack() { 3743 // local array where we'll store the entries that will be popped 3744 // from the global stack. 3745 oop buffer[global_stack_transfer_size]; 3746 int n; 3747 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3748 assert(n <= global_stack_transfer_size, 3749 "we should not pop more than the given limit"); 3750 if (n > 0) { 3751 // yes, we did actually pop at least one entry 3752 3753 statsOnly( ++_global_transfers_from; _global_pops += n ); 3754 if (_cm->verbose_medium()) { 3755 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3756 _worker_id, n); 3757 } 3758 for (int i = 0; i < n; ++i) { 3759 bool success = _task_queue->push(buffer[i]); 3760 // We only call this when the local queue is empty or under a 3761 // given target limit. So, we do not expect this push to fail. 3762 assert(success, "invariant"); 3763 } 3764 3765 statsOnly( size_t tmp_size = (size_t)_task_queue->size(); 3766 if (tmp_size > _local_max_size) { 3767 _local_max_size = tmp_size; 3768 } 3769 _local_pushes += n ); 3770 } 3771 3772 // this operation was quite expensive, so decrease the limits 3773 decrease_limits(); 3774 } 3775 3776 void CMTask::drain_local_queue(bool partially) { 3777 if (has_aborted()) return; 3778 3779 // Decide what the target size is, depending whether we're going to 3780 // drain it partially (so that other tasks can steal if they run out 3781 // of things to do) or totally (at the very end). 3782 size_t target_size; 3783 if (partially) { 3784 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3785 } else { 3786 target_size = 0; 3787 } 3788 3789 if (_task_queue->size() > target_size) { 3790 if (_cm->verbose_high()) { 3791 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3792 _worker_id, target_size); 3793 } 3794 3795 oop obj; 3796 bool ret = _task_queue->pop_local(obj); 3797 while (ret) { 3798 statsOnly( ++_local_pops ); 3799 3800 if (_cm->verbose_high()) { 3801 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3802 p2i((void*) obj)); 3803 } 3804 3805 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3806 assert(!_g1h->is_on_master_free_list( 3807 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3808 3809 scan_object(obj); 3810 3811 if (_task_queue->size() <= target_size || has_aborted()) { 3812 ret = false; 3813 } else { 3814 ret = _task_queue->pop_local(obj); 3815 } 3816 } 3817 3818 if (_cm->verbose_high()) { 3819 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3820 _worker_id, _task_queue->size()); 3821 } 3822 } 3823 } 3824 3825 void CMTask::drain_global_stack(bool partially) { 3826 if (has_aborted()) return; 3827 3828 // We have a policy to drain the local queue before we attempt to 3829 // drain the global stack. 3830 assert(partially || _task_queue->size() == 0, "invariant"); 3831 3832 // Decide what the target size is, depending whether we're going to 3833 // drain it partially (so that other tasks can steal if they run out 3834 // of things to do) or totally (at the very end). Notice that, 3835 // because we move entries from the global stack in chunks or 3836 // because another task might be doing the same, we might in fact 3837 // drop below the target. But, this is not a problem. 3838 size_t target_size; 3839 if (partially) { 3840 target_size = _cm->partial_mark_stack_size_target(); 3841 } else { 3842 target_size = 0; 3843 } 3844 3845 if (_cm->mark_stack_size() > target_size) { 3846 if (_cm->verbose_low()) { 3847 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3848 _worker_id, target_size); 3849 } 3850 3851 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3852 get_entries_from_global_stack(); 3853 drain_local_queue(partially); 3854 } 3855 3856 if (_cm->verbose_low()) { 3857 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3858 _worker_id, _cm->mark_stack_size()); 3859 } 3860 } 3861 } 3862 3863 // SATB Queue has several assumptions on whether to call the par or 3864 // non-par versions of the methods. this is why some of the code is 3865 // replicated. We should really get rid of the single-threaded version 3866 // of the code to simplify things. 3867 void CMTask::drain_satb_buffers() { 3868 if (has_aborted()) return; 3869 3870 // We set this so that the regular clock knows that we're in the 3871 // middle of draining buffers and doesn't set the abort flag when it 3872 // notices that SATB buffers are available for draining. It'd be 3873 // very counter productive if it did that. :-) 3874 _draining_satb_buffers = true; 3875 3876 CMObjectClosure oc(this); 3877 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3878 satb_mq_set.set_closure(_worker_id, &oc); 3879 3880 // This keeps claiming and applying the closure to completed buffers 3881 // until we run out of buffers or we need to abort. 3882 while (!has_aborted() && 3883 satb_mq_set.apply_closure_to_completed_buffer(_worker_id)) { 3884 if (_cm->verbose_medium()) { 3885 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3886 } 3887 statsOnly( ++_satb_buffers_processed ); 3888 regular_clock_call(); 3889 } 3890 3891 _draining_satb_buffers = false; 3892 3893 assert(has_aborted() || 3894 concurrent() || 3895 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3896 3897 satb_mq_set.set_closure(_worker_id, NULL); 3898 3899 // again, this was a potentially expensive operation, decrease the 3900 // limits to get the regular clock call early 3901 decrease_limits(); 3902 } 3903 3904 void CMTask::print_stats() { 3905 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3906 _worker_id, _calls); 3907 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3908 _elapsed_time_ms, _termination_time_ms); 3909 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3910 _step_times_ms.num(), _step_times_ms.avg(), 3911 _step_times_ms.sd()); 3912 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3913 _step_times_ms.maximum(), _step_times_ms.sum()); 3914 3915 #if _MARKING_STATS_ 3916 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3917 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3918 _all_clock_intervals_ms.sd()); 3919 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3920 _all_clock_intervals_ms.maximum(), 3921 _all_clock_intervals_ms.sum()); 3922 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = " SIZE_FORMAT ", marking = " SIZE_FORMAT, 3923 _clock_due_to_scanning, _clock_due_to_marking); 3924 gclog_or_tty->print_cr(" Objects: scanned = " SIZE_FORMAT ", found on the bitmap = " SIZE_FORMAT, 3925 _objs_scanned, _objs_found_on_bitmap); 3926 gclog_or_tty->print_cr(" Local Queue: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3927 _local_pushes, _local_pops, _local_max_size); 3928 gclog_or_tty->print_cr(" Global Stack: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3929 _global_pushes, _global_pops, _global_max_size); 3930 gclog_or_tty->print_cr(" transfers to = " SIZE_FORMAT ", transfers from = " SIZE_FORMAT, 3931 _global_transfers_to,_global_transfers_from); 3932 gclog_or_tty->print_cr(" Regions: claimed = " SIZE_FORMAT, _regions_claimed); 3933 gclog_or_tty->print_cr(" SATB buffers: processed = " SIZE_FORMAT, _satb_buffers_processed); 3934 gclog_or_tty->print_cr(" Steals: attempts = " SIZE_FORMAT ", successes = " SIZE_FORMAT, 3935 _steal_attempts, _steals); 3936 gclog_or_tty->print_cr(" Aborted: " SIZE_FORMAT ", due to", _aborted); 3937 gclog_or_tty->print_cr(" overflow: " SIZE_FORMAT ", global abort: " SIZE_FORMAT ", yield: " SIZE_FORMAT, 3938 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3939 gclog_or_tty->print_cr(" time out: " SIZE_FORMAT ", SATB: " SIZE_FORMAT ", termination: " SIZE_FORMAT, 3940 _aborted_timed_out, _aborted_satb, _aborted_termination); 3941 #endif // _MARKING_STATS_ 3942 } 3943 3944 /***************************************************************************** 3945 3946 The do_marking_step(time_target_ms, ...) method is the building 3947 block of the parallel marking framework. It can be called in parallel 3948 with other invocations of do_marking_step() on different tasks 3949 (but only one per task, obviously) and concurrently with the 3950 mutator threads, or during remark, hence it eliminates the need 3951 for two versions of the code. When called during remark, it will 3952 pick up from where the task left off during the concurrent marking 3953 phase. Interestingly, tasks are also claimable during evacuation 3954 pauses too, since do_marking_step() ensures that it aborts before 3955 it needs to yield. 3956 3957 The data structures that it uses to do marking work are the 3958 following: 3959 3960 (1) Marking Bitmap. If there are gray objects that appear only 3961 on the bitmap (this happens either when dealing with an overflow 3962 or when the initial marking phase has simply marked the roots 3963 and didn't push them on the stack), then tasks claim heap 3964 regions whose bitmap they then scan to find gray objects. A 3965 global finger indicates where the end of the last claimed region 3966 is. A local finger indicates how far into the region a task has 3967 scanned. The two fingers are used to determine how to gray an 3968 object (i.e. whether simply marking it is OK, as it will be 3969 visited by a task in the future, or whether it needs to be also 3970 pushed on a stack). 3971 3972 (2) Local Queue. The local queue of the task which is accessed 3973 reasonably efficiently by the task. Other tasks can steal from 3974 it when they run out of work. Throughout the marking phase, a 3975 task attempts to keep its local queue short but not totally 3976 empty, so that entries are available for stealing by other 3977 tasks. Only when there is no more work, a task will totally 3978 drain its local queue. 3979 3980 (3) Global Mark Stack. This handles local queue overflow. During 3981 marking only sets of entries are moved between it and the local 3982 queues, as access to it requires a mutex and more fine-grain 3983 interaction with it which might cause contention. If it 3984 overflows, then the marking phase should restart and iterate 3985 over the bitmap to identify gray objects. Throughout the marking 3986 phase, tasks attempt to keep the global mark stack at a small 3987 length but not totally empty, so that entries are available for 3988 popping by other tasks. Only when there is no more work, tasks 3989 will totally drain the global mark stack. 3990 3991 (4) SATB Buffer Queue. This is where completed SATB buffers are 3992 made available. Buffers are regularly removed from this queue 3993 and scanned for roots, so that the queue doesn't get too 3994 long. During remark, all completed buffers are processed, as 3995 well as the filled in parts of any uncompleted buffers. 3996 3997 The do_marking_step() method tries to abort when the time target 3998 has been reached. There are a few other cases when the 3999 do_marking_step() method also aborts: 4000 4001 (1) When the marking phase has been aborted (after a Full GC). 4002 4003 (2) When a global overflow (on the global stack) has been 4004 triggered. Before the task aborts, it will actually sync up with 4005 the other tasks to ensure that all the marking data structures 4006 (local queues, stacks, fingers etc.) are re-initialized so that 4007 when do_marking_step() completes, the marking phase can 4008 immediately restart. 4009 4010 (3) When enough completed SATB buffers are available. The 4011 do_marking_step() method only tries to drain SATB buffers right 4012 at the beginning. So, if enough buffers are available, the 4013 marking step aborts and the SATB buffers are processed at 4014 the beginning of the next invocation. 4015 4016 (4) To yield. when we have to yield then we abort and yield 4017 right at the end of do_marking_step(). This saves us from a lot 4018 of hassle as, by yielding we might allow a Full GC. If this 4019 happens then objects will be compacted underneath our feet, the 4020 heap might shrink, etc. We save checking for this by just 4021 aborting and doing the yield right at the end. 4022 4023 From the above it follows that the do_marking_step() method should 4024 be called in a loop (or, otherwise, regularly) until it completes. 4025 4026 If a marking step completes without its has_aborted() flag being 4027 true, it means it has completed the current marking phase (and 4028 also all other marking tasks have done so and have all synced up). 4029 4030 A method called regular_clock_call() is invoked "regularly" (in 4031 sub ms intervals) throughout marking. It is this clock method that 4032 checks all the abort conditions which were mentioned above and 4033 decides when the task should abort. A work-based scheme is used to 4034 trigger this clock method: when the number of object words the 4035 marking phase has scanned or the number of references the marking 4036 phase has visited reach a given limit. Additional invocations to 4037 the method clock have been planted in a few other strategic places 4038 too. The initial reason for the clock method was to avoid calling 4039 vtime too regularly, as it is quite expensive. So, once it was in 4040 place, it was natural to piggy-back all the other conditions on it 4041 too and not constantly check them throughout the code. 4042 4043 If do_termination is true then do_marking_step will enter its 4044 termination protocol. 4045 4046 The value of is_serial must be true when do_marking_step is being 4047 called serially (i.e. by the VMThread) and do_marking_step should 4048 skip any synchronization in the termination and overflow code. 4049 Examples include the serial remark code and the serial reference 4050 processing closures. 4051 4052 The value of is_serial must be false when do_marking_step is 4053 being called by any of the worker threads in a work gang. 4054 Examples include the concurrent marking code (CMMarkingTask), 4055 the MT remark code, and the MT reference processing closures. 4056 4057 *****************************************************************************/ 4058 4059 void CMTask::do_marking_step(double time_target_ms, 4060 bool do_termination, 4061 bool is_serial) { 4062 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 4063 assert(concurrent() == _cm->concurrent(), "they should be the same"); 4064 4065 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 4066 assert(_task_queues != NULL, "invariant"); 4067 assert(_task_queue != NULL, "invariant"); 4068 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 4069 4070 assert(!_claimed, 4071 "only one thread should claim this task at any one time"); 4072 4073 // OK, this doesn't safeguard again all possible scenarios, as it is 4074 // possible for two threads to set the _claimed flag at the same 4075 // time. But it is only for debugging purposes anyway and it will 4076 // catch most problems. 4077 _claimed = true; 4078 4079 _start_time_ms = os::elapsedVTime() * 1000.0; 4080 statsOnly( _interval_start_time_ms = _start_time_ms ); 4081 4082 // If do_stealing is true then do_marking_step will attempt to 4083 // steal work from the other CMTasks. It only makes sense to 4084 // enable stealing when the termination protocol is enabled 4085 // and do_marking_step() is not being called serially. 4086 bool do_stealing = do_termination && !is_serial; 4087 4088 double diff_prediction_ms = 4089 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 4090 _time_target_ms = time_target_ms - diff_prediction_ms; 4091 4092 // set up the variables that are used in the work-based scheme to 4093 // call the regular clock method 4094 _words_scanned = 0; 4095 _refs_reached = 0; 4096 recalculate_limits(); 4097 4098 // clear all flags 4099 clear_has_aborted(); 4100 _has_timed_out = false; 4101 _draining_satb_buffers = false; 4102 4103 ++_calls; 4104 4105 if (_cm->verbose_low()) { 4106 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 4107 "target = %1.2lfms >>>>>>>>>>", 4108 _worker_id, _calls, _time_target_ms); 4109 } 4110 4111 // Set up the bitmap and oop closures. Anything that uses them is 4112 // eventually called from this method, so it is OK to allocate these 4113 // statically. 4114 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 4115 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 4116 set_cm_oop_closure(&cm_oop_closure); 4117 4118 if (_cm->has_overflown()) { 4119 // This can happen if the mark stack overflows during a GC pause 4120 // and this task, after a yield point, restarts. We have to abort 4121 // as we need to get into the overflow protocol which happens 4122 // right at the end of this task. 4123 set_has_aborted(); 4124 } 4125 4126 // First drain any available SATB buffers. After this, we will not 4127 // look at SATB buffers before the next invocation of this method. 4128 // If enough completed SATB buffers are queued up, the regular clock 4129 // will abort this task so that it restarts. 4130 drain_satb_buffers(); 4131 // ...then partially drain the local queue and the global stack 4132 drain_local_queue(true); 4133 drain_global_stack(true); 4134 4135 do { 4136 if (!has_aborted() && _curr_region != NULL) { 4137 // This means that we're already holding on to a region. 4138 assert(_finger != NULL, "if region is not NULL, then the finger " 4139 "should not be NULL either"); 4140 4141 // We might have restarted this task after an evacuation pause 4142 // which might have evacuated the region we're holding on to 4143 // underneath our feet. Let's read its limit again to make sure 4144 // that we do not iterate over a region of the heap that 4145 // contains garbage (update_region_limit() will also move 4146 // _finger to the start of the region if it is found empty). 4147 update_region_limit(); 4148 // We will start from _finger not from the start of the region, 4149 // as we might be restarting this task after aborting half-way 4150 // through scanning this region. In this case, _finger points to 4151 // the address where we last found a marked object. If this is a 4152 // fresh region, _finger points to start(). 4153 MemRegion mr = MemRegion(_finger, _region_limit); 4154 4155 if (_cm->verbose_low()) { 4156 gclog_or_tty->print_cr("[%u] we're scanning part " 4157 "["PTR_FORMAT", "PTR_FORMAT") " 4158 "of region "HR_FORMAT, 4159 _worker_id, p2i(_finger), p2i(_region_limit), 4160 HR_FORMAT_PARAMS(_curr_region)); 4161 } 4162 4163 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 4164 "humongous regions should go around loop once only"); 4165 4166 // Some special cases: 4167 // If the memory region is empty, we can just give up the region. 4168 // If the current region is humongous then we only need to check 4169 // the bitmap for the bit associated with the start of the object, 4170 // scan the object if it's live, and give up the region. 4171 // Otherwise, let's iterate over the bitmap of the part of the region 4172 // that is left. 4173 // If the iteration is successful, give up the region. 4174 if (mr.is_empty()) { 4175 giveup_current_region(); 4176 regular_clock_call(); 4177 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 4178 if (_nextMarkBitMap->isMarked(mr.start())) { 4179 // The object is marked - apply the closure 4180 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 4181 bitmap_closure.do_bit(offset); 4182 } 4183 // Even if this task aborted while scanning the humongous object 4184 // we can (and should) give up the current region. 4185 giveup_current_region(); 4186 regular_clock_call(); 4187 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4188 giveup_current_region(); 4189 regular_clock_call(); 4190 } else { 4191 assert(has_aborted(), "currently the only way to do so"); 4192 // The only way to abort the bitmap iteration is to return 4193 // false from the do_bit() method. However, inside the 4194 // do_bit() method we move the _finger to point to the 4195 // object currently being looked at. So, if we bail out, we 4196 // have definitely set _finger to something non-null. 4197 assert(_finger != NULL, "invariant"); 4198 4199 // Region iteration was actually aborted. So now _finger 4200 // points to the address of the object we last scanned. If we 4201 // leave it there, when we restart this task, we will rescan 4202 // the object. It is easy to avoid this. We move the finger by 4203 // enough to point to the next possible object header (the 4204 // bitmap knows by how much we need to move it as it knows its 4205 // granularity). 4206 assert(_finger < _region_limit, "invariant"); 4207 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 4208 // Check if bitmap iteration was aborted while scanning the last object 4209 if (new_finger >= _region_limit) { 4210 giveup_current_region(); 4211 } else { 4212 move_finger_to(new_finger); 4213 } 4214 } 4215 } 4216 // At this point we have either completed iterating over the 4217 // region we were holding on to, or we have aborted. 4218 4219 // We then partially drain the local queue and the global stack. 4220 // (Do we really need this?) 4221 drain_local_queue(true); 4222 drain_global_stack(true); 4223 4224 // Read the note on the claim_region() method on why it might 4225 // return NULL with potentially more regions available for 4226 // claiming and why we have to check out_of_regions() to determine 4227 // whether we're done or not. 4228 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4229 // We are going to try to claim a new region. We should have 4230 // given up on the previous one. 4231 // Separated the asserts so that we know which one fires. 4232 assert(_curr_region == NULL, "invariant"); 4233 assert(_finger == NULL, "invariant"); 4234 assert(_region_limit == NULL, "invariant"); 4235 if (_cm->verbose_low()) { 4236 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4237 } 4238 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4239 if (claimed_region != NULL) { 4240 // Yes, we managed to claim one 4241 statsOnly( ++_regions_claimed ); 4242 4243 if (_cm->verbose_low()) { 4244 gclog_or_tty->print_cr("[%u] we successfully claimed " 4245 "region "PTR_FORMAT, 4246 _worker_id, p2i(claimed_region)); 4247 } 4248 4249 setup_for_region(claimed_region); 4250 assert(_curr_region == claimed_region, "invariant"); 4251 } 4252 // It is important to call the regular clock here. It might take 4253 // a while to claim a region if, for example, we hit a large 4254 // block of empty regions. So we need to call the regular clock 4255 // method once round the loop to make sure it's called 4256 // frequently enough. 4257 regular_clock_call(); 4258 } 4259 4260 if (!has_aborted() && _curr_region == NULL) { 4261 assert(_cm->out_of_regions(), 4262 "at this point we should be out of regions"); 4263 } 4264 } while ( _curr_region != NULL && !has_aborted()); 4265 4266 if (!has_aborted()) { 4267 // We cannot check whether the global stack is empty, since other 4268 // tasks might be pushing objects to it concurrently. 4269 assert(_cm->out_of_regions(), 4270 "at this point we should be out of regions"); 4271 4272 if (_cm->verbose_low()) { 4273 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4274 } 4275 4276 // Try to reduce the number of available SATB buffers so that 4277 // remark has less work to do. 4278 drain_satb_buffers(); 4279 } 4280 4281 // Since we've done everything else, we can now totally drain the 4282 // local queue and global stack. 4283 drain_local_queue(false); 4284 drain_global_stack(false); 4285 4286 // Attempt at work stealing from other task's queues. 4287 if (do_stealing && !has_aborted()) { 4288 // We have not aborted. This means that we have finished all that 4289 // we could. Let's try to do some stealing... 4290 4291 // We cannot check whether the global stack is empty, since other 4292 // tasks might be pushing objects to it concurrently. 4293 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4294 "only way to reach here"); 4295 4296 if (_cm->verbose_low()) { 4297 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4298 } 4299 4300 while (!has_aborted()) { 4301 oop obj; 4302 statsOnly( ++_steal_attempts ); 4303 4304 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4305 if (_cm->verbose_medium()) { 4306 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4307 _worker_id, p2i((void*) obj)); 4308 } 4309 4310 statsOnly( ++_steals ); 4311 4312 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4313 "any stolen object should be marked"); 4314 scan_object(obj); 4315 4316 // And since we're towards the end, let's totally drain the 4317 // local queue and global stack. 4318 drain_local_queue(false); 4319 drain_global_stack(false); 4320 } else { 4321 break; 4322 } 4323 } 4324 } 4325 4326 // If we are about to wrap up and go into termination, check if we 4327 // should raise the overflow flag. 4328 if (do_termination && !has_aborted()) { 4329 if (_cm->force_overflow()->should_force()) { 4330 _cm->set_has_overflown(); 4331 regular_clock_call(); 4332 } 4333 } 4334 4335 // We still haven't aborted. Now, let's try to get into the 4336 // termination protocol. 4337 if (do_termination && !has_aborted()) { 4338 // We cannot check whether the global stack is empty, since other 4339 // tasks might be concurrently pushing objects on it. 4340 // Separated the asserts so that we know which one fires. 4341 assert(_cm->out_of_regions(), "only way to reach here"); 4342 assert(_task_queue->size() == 0, "only way to reach here"); 4343 4344 if (_cm->verbose_low()) { 4345 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4346 } 4347 4348 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4349 4350 // The CMTask class also extends the TerminatorTerminator class, 4351 // hence its should_exit_termination() method will also decide 4352 // whether to exit the termination protocol or not. 4353 bool finished = (is_serial || 4354 _cm->terminator()->offer_termination(this)); 4355 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4356 _termination_time_ms += 4357 termination_end_time_ms - _termination_start_time_ms; 4358 4359 if (finished) { 4360 // We're all done. 4361 4362 if (_worker_id == 0) { 4363 // let's allow task 0 to do this 4364 if (concurrent()) { 4365 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4366 // we need to set this to false before the next 4367 // safepoint. This way we ensure that the marking phase 4368 // doesn't observe any more heap expansions. 4369 _cm->clear_concurrent_marking_in_progress(); 4370 } 4371 } 4372 4373 // We can now guarantee that the global stack is empty, since 4374 // all other tasks have finished. We separated the guarantees so 4375 // that, if a condition is false, we can immediately find out 4376 // which one. 4377 guarantee(_cm->out_of_regions(), "only way to reach here"); 4378 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4379 guarantee(_task_queue->size() == 0, "only way to reach here"); 4380 guarantee(!_cm->has_overflown(), "only way to reach here"); 4381 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4382 4383 if (_cm->verbose_low()) { 4384 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4385 } 4386 } else { 4387 // Apparently there's more work to do. Let's abort this task. It 4388 // will restart it and we can hopefully find more things to do. 4389 4390 if (_cm->verbose_low()) { 4391 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4392 _worker_id); 4393 } 4394 4395 set_has_aborted(); 4396 statsOnly( ++_aborted_termination ); 4397 } 4398 } 4399 4400 // Mainly for debugging purposes to make sure that a pointer to the 4401 // closure which was statically allocated in this frame doesn't 4402 // escape it by accident. 4403 set_cm_oop_closure(NULL); 4404 double end_time_ms = os::elapsedVTime() * 1000.0; 4405 double elapsed_time_ms = end_time_ms - _start_time_ms; 4406 // Update the step history. 4407 _step_times_ms.add(elapsed_time_ms); 4408 4409 if (has_aborted()) { 4410 // The task was aborted for some reason. 4411 4412 statsOnly( ++_aborted ); 4413 4414 if (_has_timed_out) { 4415 double diff_ms = elapsed_time_ms - _time_target_ms; 4416 // Keep statistics of how well we did with respect to hitting 4417 // our target only if we actually timed out (if we aborted for 4418 // other reasons, then the results might get skewed). 4419 _marking_step_diffs_ms.add(diff_ms); 4420 } 4421 4422 if (_cm->has_overflown()) { 4423 // This is the interesting one. We aborted because a global 4424 // overflow was raised. This means we have to restart the 4425 // marking phase and start iterating over regions. However, in 4426 // order to do this we have to make sure that all tasks stop 4427 // what they are doing and re-initialize in a safe manner. We 4428 // will achieve this with the use of two barrier sync points. 4429 4430 if (_cm->verbose_low()) { 4431 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4432 } 4433 4434 if (!is_serial) { 4435 // We only need to enter the sync barrier if being called 4436 // from a parallel context 4437 _cm->enter_first_sync_barrier(_worker_id); 4438 4439 // When we exit this sync barrier we know that all tasks have 4440 // stopped doing marking work. So, it's now safe to 4441 // re-initialize our data structures. At the end of this method, 4442 // task 0 will clear the global data structures. 4443 } 4444 4445 statsOnly( ++_aborted_overflow ); 4446 4447 // We clear the local state of this task... 4448 clear_region_fields(); 4449 4450 if (!is_serial) { 4451 // ...and enter the second barrier. 4452 _cm->enter_second_sync_barrier(_worker_id); 4453 } 4454 // At this point, if we're during the concurrent phase of 4455 // marking, everything has been re-initialized and we're 4456 // ready to restart. 4457 } 4458 4459 if (_cm->verbose_low()) { 4460 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4461 "elapsed = %1.2lfms <<<<<<<<<<", 4462 _worker_id, _time_target_ms, elapsed_time_ms); 4463 if (_cm->has_aborted()) { 4464 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4465 _worker_id); 4466 } 4467 } 4468 } else { 4469 if (_cm->verbose_low()) { 4470 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4471 "elapsed = %1.2lfms <<<<<<<<<<", 4472 _worker_id, _time_target_ms, elapsed_time_ms); 4473 } 4474 } 4475 4476 _claimed = false; 4477 } 4478 4479 CMTask::CMTask(uint worker_id, 4480 ConcurrentMark* cm, 4481 size_t* marked_bytes, 4482 BitMap* card_bm, 4483 CMTaskQueue* task_queue, 4484 CMTaskQueueSet* task_queues) 4485 : _g1h(G1CollectedHeap::heap()), 4486 _worker_id(worker_id), _cm(cm), 4487 _claimed(false), 4488 _nextMarkBitMap(NULL), _hash_seed(17), 4489 _task_queue(task_queue), 4490 _task_queues(task_queues), 4491 _cm_oop_closure(NULL), 4492 _marked_bytes_array(marked_bytes), 4493 _card_bm(card_bm) { 4494 guarantee(task_queue != NULL, "invariant"); 4495 guarantee(task_queues != NULL, "invariant"); 4496 4497 statsOnly( _clock_due_to_scanning = 0; 4498 _clock_due_to_marking = 0 ); 4499 4500 _marking_step_diffs_ms.add(0.5); 4501 } 4502 4503 // These are formatting macros that are used below to ensure 4504 // consistent formatting. The *_H_* versions are used to format the 4505 // header for a particular value and they should be kept consistent 4506 // with the corresponding macro. Also note that most of the macros add 4507 // the necessary white space (as a prefix) which makes them a bit 4508 // easier to compose. 4509 4510 // All the output lines are prefixed with this string to be able to 4511 // identify them easily in a large log file. 4512 #define G1PPRL_LINE_PREFIX "###" 4513 4514 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4515 #ifdef _LP64 4516 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4517 #else // _LP64 4518 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4519 #endif // _LP64 4520 4521 // For per-region info 4522 #define G1PPRL_TYPE_FORMAT " %-4s" 4523 #define G1PPRL_TYPE_H_FORMAT " %4s" 4524 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4525 #define G1PPRL_BYTE_H_FORMAT " %9s" 4526 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4527 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4528 4529 // For summary info 4530 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4531 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4532 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4533 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4534 4535 G1PrintRegionLivenessInfoClosure:: 4536 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4537 : _out(out), 4538 _total_used_bytes(0), _total_capacity_bytes(0), 4539 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4540 _hum_used_bytes(0), _hum_capacity_bytes(0), 4541 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4542 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4543 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4544 MemRegion g1_reserved = g1h->g1_reserved(); 4545 double now = os::elapsedTime(); 4546 4547 // Print the header of the output. 4548 _out->cr(); 4549 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4550 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4551 G1PPRL_SUM_ADDR_FORMAT("reserved") 4552 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4553 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4554 HeapRegion::GrainBytes); 4555 _out->print_cr(G1PPRL_LINE_PREFIX); 4556 _out->print_cr(G1PPRL_LINE_PREFIX 4557 G1PPRL_TYPE_H_FORMAT 4558 G1PPRL_ADDR_BASE_H_FORMAT 4559 G1PPRL_BYTE_H_FORMAT 4560 G1PPRL_BYTE_H_FORMAT 4561 G1PPRL_BYTE_H_FORMAT 4562 G1PPRL_DOUBLE_H_FORMAT 4563 G1PPRL_BYTE_H_FORMAT 4564 G1PPRL_BYTE_H_FORMAT, 4565 "type", "address-range", 4566 "used", "prev-live", "next-live", "gc-eff", 4567 "remset", "code-roots"); 4568 _out->print_cr(G1PPRL_LINE_PREFIX 4569 G1PPRL_TYPE_H_FORMAT 4570 G1PPRL_ADDR_BASE_H_FORMAT 4571 G1PPRL_BYTE_H_FORMAT 4572 G1PPRL_BYTE_H_FORMAT 4573 G1PPRL_BYTE_H_FORMAT 4574 G1PPRL_DOUBLE_H_FORMAT 4575 G1PPRL_BYTE_H_FORMAT 4576 G1PPRL_BYTE_H_FORMAT, 4577 "", "", 4578 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4579 "(bytes)", "(bytes)"); 4580 } 4581 4582 // It takes as a parameter a reference to one of the _hum_* fields, it 4583 // deduces the corresponding value for a region in a humongous region 4584 // series (either the region size, or what's left if the _hum_* field 4585 // is < the region size), and updates the _hum_* field accordingly. 4586 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4587 size_t bytes = 0; 4588 // The > 0 check is to deal with the prev and next live bytes which 4589 // could be 0. 4590 if (*hum_bytes > 0) { 4591 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4592 *hum_bytes -= bytes; 4593 } 4594 return bytes; 4595 } 4596 4597 // It deduces the values for a region in a humongous region series 4598 // from the _hum_* fields and updates those accordingly. It assumes 4599 // that that _hum_* fields have already been set up from the "starts 4600 // humongous" region and we visit the regions in address order. 4601 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4602 size_t* capacity_bytes, 4603 size_t* prev_live_bytes, 4604 size_t* next_live_bytes) { 4605 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4606 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4607 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4608 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4609 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4610 } 4611 4612 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4613 const char* type = r->get_type_str(); 4614 HeapWord* bottom = r->bottom(); 4615 HeapWord* end = r->end(); 4616 size_t capacity_bytes = r->capacity(); 4617 size_t used_bytes = r->used(); 4618 size_t prev_live_bytes = r->live_bytes(); 4619 size_t next_live_bytes = r->next_live_bytes(); 4620 double gc_eff = r->gc_efficiency(); 4621 size_t remset_bytes = r->rem_set()->mem_size(); 4622 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4623 4624 if (r->is_starts_humongous()) { 4625 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4626 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4627 "they should have been zeroed after the last time we used them"); 4628 // Set up the _hum_* fields. 4629 _hum_capacity_bytes = capacity_bytes; 4630 _hum_used_bytes = used_bytes; 4631 _hum_prev_live_bytes = prev_live_bytes; 4632 _hum_next_live_bytes = next_live_bytes; 4633 get_hum_bytes(&used_bytes, &capacity_bytes, 4634 &prev_live_bytes, &next_live_bytes); 4635 end = bottom + HeapRegion::GrainWords; 4636 } else if (r->is_continues_humongous()) { 4637 get_hum_bytes(&used_bytes, &capacity_bytes, 4638 &prev_live_bytes, &next_live_bytes); 4639 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4640 } 4641 4642 _total_used_bytes += used_bytes; 4643 _total_capacity_bytes += capacity_bytes; 4644 _total_prev_live_bytes += prev_live_bytes; 4645 _total_next_live_bytes += next_live_bytes; 4646 _total_remset_bytes += remset_bytes; 4647 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4648 4649 // Print a line for this particular region. 4650 _out->print_cr(G1PPRL_LINE_PREFIX 4651 G1PPRL_TYPE_FORMAT 4652 G1PPRL_ADDR_BASE_FORMAT 4653 G1PPRL_BYTE_FORMAT 4654 G1PPRL_BYTE_FORMAT 4655 G1PPRL_BYTE_FORMAT 4656 G1PPRL_DOUBLE_FORMAT 4657 G1PPRL_BYTE_FORMAT 4658 G1PPRL_BYTE_FORMAT, 4659 type, p2i(bottom), p2i(end), 4660 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4661 remset_bytes, strong_code_roots_bytes); 4662 4663 return false; 4664 } 4665 4666 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4667 // add static memory usages to remembered set sizes 4668 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4669 // Print the footer of the output. 4670 _out->print_cr(G1PPRL_LINE_PREFIX); 4671 _out->print_cr(G1PPRL_LINE_PREFIX 4672 " SUMMARY" 4673 G1PPRL_SUM_MB_FORMAT("capacity") 4674 G1PPRL_SUM_MB_PERC_FORMAT("used") 4675 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4676 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4677 G1PPRL_SUM_MB_FORMAT("remset") 4678 G1PPRL_SUM_MB_FORMAT("code-roots"), 4679 bytes_to_mb(_total_capacity_bytes), 4680 bytes_to_mb(_total_used_bytes), 4681 perc(_total_used_bytes, _total_capacity_bytes), 4682 bytes_to_mb(_total_prev_live_bytes), 4683 perc(_total_prev_live_bytes, _total_capacity_bytes), 4684 bytes_to_mb(_total_next_live_bytes), 4685 perc(_total_next_live_bytes, _total_capacity_bytes), 4686 bytes_to_mb(_total_remset_bytes), 4687 bytes_to_mb(_total_strong_code_roots_bytes)); 4688 _out->cr(); 4689 }