1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc_implementation/g1/concurrentMark.inline.hpp" 30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 31 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 32 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 33 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 34 #include "gc_implementation/g1/g1Log.hpp" 35 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 36 #include "gc_implementation/g1/g1RemSet.hpp" 37 #include "gc_implementation/g1/heapRegion.inline.hpp" 38 #include "gc_implementation/g1/heapRegionManager.inline.hpp" 39 #include "gc_implementation/g1/heapRegionRemSet.hpp" 40 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 41 #include "gc_implementation/shared/vmGCOperations.hpp" 42 #include "gc_implementation/shared/gcTimer.hpp" 43 #include "gc_implementation/shared/gcTrace.hpp" 44 #include "gc_implementation/shared/gcTraceTime.hpp" 45 #include "memory/allocation.hpp" 46 #include "memory/genOopClosures.inline.hpp" 47 #include "memory/referencePolicy.hpp" 48 #include "memory/resourceArea.hpp" 49 #include "oops/oop.inline.hpp" 50 #include "runtime/handles.inline.hpp" 51 #include "runtime/java.hpp" 52 #include "runtime/atomic.inline.hpp" 53 #include "runtime/prefetch.inline.hpp" 54 #include "services/memTracker.hpp" 55 56 // Concurrent marking bit map wrapper 57 58 CMBitMapRO::CMBitMapRO(int shifter) : 59 _bm(), 60 _shifter(shifter) { 61 _bmStartWord = 0; 62 _bmWordSize = 0; 63 } 64 65 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 66 const HeapWord* limit) const { 67 // First we must round addr *up* to a possible object boundary. 68 addr = (HeapWord*)align_size_up((intptr_t)addr, 69 HeapWordSize << _shifter); 70 size_t addrOffset = heapWordToOffset(addr); 71 if (limit == NULL) { 72 limit = _bmStartWord + _bmWordSize; 73 } 74 size_t limitOffset = heapWordToOffset(limit); 75 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 76 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 77 assert(nextAddr >= addr, "get_next_one postcondition"); 78 assert(nextAddr == limit || isMarked(nextAddr), 79 "get_next_one postcondition"); 80 return nextAddr; 81 } 82 83 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, 84 const HeapWord* limit) const { 85 size_t addrOffset = heapWordToOffset(addr); 86 if (limit == NULL) { 87 limit = _bmStartWord + _bmWordSize; 88 } 89 size_t limitOffset = heapWordToOffset(limit); 90 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 91 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 92 assert(nextAddr >= addr, "get_next_one postcondition"); 93 assert(nextAddr == limit || !isMarked(nextAddr), 94 "get_next_one postcondition"); 95 return nextAddr; 96 } 97 98 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 99 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 100 return (int) (diff >> _shifter); 101 } 102 103 #ifndef PRODUCT 104 bool CMBitMapRO::covers(MemRegion heap_rs) const { 105 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 106 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 107 "size inconsistency"); 108 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 109 _bmWordSize == heap_rs.word_size(); 110 } 111 #endif 112 113 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 114 _bm.print_on_error(st, prefix); 115 } 116 117 size_t CMBitMap::compute_size(size_t heap_size) { 118 return heap_size / mark_distance(); 119 } 120 121 size_t CMBitMap::mark_distance() { 122 return MinObjAlignmentInBytes * BitsPerByte; 123 } 124 125 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 126 _bmStartWord = heap.start(); 127 _bmWordSize = heap.word_size(); 128 129 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 130 _bm.set_size(_bmWordSize >> _shifter); 131 132 storage->set_mapping_changed_listener(&_listener); 133 } 134 135 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 136 if (zero_filled) { 137 return; 138 } 139 // We need to clear the bitmap on commit, removing any existing information. 140 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 141 _bm->clearRange(mr); 142 } 143 144 // Closure used for clearing the given mark bitmap. 145 class ClearBitmapHRClosure : public HeapRegionClosure { 146 private: 147 ConcurrentMark* _cm; 148 CMBitMap* _bitmap; 149 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 150 public: 151 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 152 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 153 } 154 155 virtual bool doHeapRegion(HeapRegion* r) { 156 size_t const chunk_size_in_words = M / HeapWordSize; 157 158 HeapWord* cur = r->bottom(); 159 HeapWord* const end = r->end(); 160 161 while (cur < end) { 162 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 163 _bitmap->clearRange(mr); 164 165 cur += chunk_size_in_words; 166 167 // Abort iteration if after yielding the marking has been aborted. 168 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 169 return true; 170 } 171 // Repeat the asserts from before the start of the closure. We will do them 172 // as asserts here to minimize their overhead on the product. However, we 173 // will have them as guarantees at the beginning / end of the bitmap 174 // clearing to get some checking in the product. 175 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 176 assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant"); 177 } 178 179 return false; 180 } 181 }; 182 183 class ParClearNextMarkBitmapTask : public AbstractGangTask { 184 ClearBitmapHRClosure* _cl; 185 HeapRegionClaimer _hrclaimer; 186 bool _suspendible; // If the task is suspendible, workers must join the STS. 187 188 public: 189 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 190 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 191 192 void work(uint worker_id) { 193 if (_suspendible) { 194 SuspendibleThreadSet::join(); 195 } 196 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 197 if (_suspendible) { 198 SuspendibleThreadSet::leave(); 199 } 200 } 201 }; 202 203 void CMBitMap::clearAll() { 204 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 205 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 206 uint n_workers = g1h->workers()->active_workers(); 207 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 208 g1h->workers()->run_task(&task); 209 guarantee(cl.complete(), "Must have completed iteration."); 210 return; 211 } 212 213 void CMBitMap::markRange(MemRegion mr) { 214 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 215 assert(!mr.is_empty(), "unexpected empty region"); 216 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 217 ((HeapWord *) mr.end())), 218 "markRange memory region end is not card aligned"); 219 // convert address range into offset range 220 _bm.at_put_range(heapWordToOffset(mr.start()), 221 heapWordToOffset(mr.end()), true); 222 } 223 224 void CMBitMap::clearRange(MemRegion mr) { 225 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 226 assert(!mr.is_empty(), "unexpected empty region"); 227 // convert address range into offset range 228 _bm.at_put_range(heapWordToOffset(mr.start()), 229 heapWordToOffset(mr.end()), false); 230 } 231 232 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 233 HeapWord* end_addr) { 234 HeapWord* start = getNextMarkedWordAddress(addr); 235 start = MIN2(start, end_addr); 236 HeapWord* end = getNextUnmarkedWordAddress(start); 237 end = MIN2(end, end_addr); 238 assert(start <= end, "Consistency check"); 239 MemRegion mr(start, end); 240 if (!mr.is_empty()) { 241 clearRange(mr); 242 } 243 return mr; 244 } 245 246 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 247 _base(NULL), _cm(cm) 248 #ifdef ASSERT 249 , _drain_in_progress(false) 250 , _drain_in_progress_yields(false) 251 #endif 252 {} 253 254 bool CMMarkStack::allocate(size_t capacity) { 255 // allocate a stack of the requisite depth 256 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 257 if (!rs.is_reserved()) { 258 warning("ConcurrentMark MarkStack allocation failure"); 259 return false; 260 } 261 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 262 if (!_virtual_space.initialize(rs, rs.size())) { 263 warning("ConcurrentMark MarkStack backing store failure"); 264 // Release the virtual memory reserved for the marking stack 265 rs.release(); 266 return false; 267 } 268 assert(_virtual_space.committed_size() == rs.size(), 269 "Didn't reserve backing store for all of ConcurrentMark stack?"); 270 _base = (oop*) _virtual_space.low(); 271 setEmpty(); 272 _capacity = (jint) capacity; 273 _saved_index = -1; 274 _should_expand = false; 275 NOT_PRODUCT(_max_depth = 0); 276 return true; 277 } 278 279 void CMMarkStack::expand() { 280 // Called, during remark, if we've overflown the marking stack during marking. 281 assert(isEmpty(), "stack should been emptied while handling overflow"); 282 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 283 // Clear expansion flag 284 _should_expand = false; 285 if (_capacity == (jint) MarkStackSizeMax) { 286 if (PrintGCDetails && Verbose) { 287 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 288 } 289 return; 290 } 291 // Double capacity if possible 292 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 293 // Do not give up existing stack until we have managed to 294 // get the double capacity that we desired. 295 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 296 sizeof(oop))); 297 if (rs.is_reserved()) { 298 // Release the backing store associated with old stack 299 _virtual_space.release(); 300 // Reinitialize virtual space for new stack 301 if (!_virtual_space.initialize(rs, rs.size())) { 302 fatal("Not enough swap for expanded marking stack capacity"); 303 } 304 _base = (oop*)(_virtual_space.low()); 305 _index = 0; 306 _capacity = new_capacity; 307 } else { 308 if (PrintGCDetails && Verbose) { 309 // Failed to double capacity, continue; 310 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 311 SIZE_FORMAT"K to " SIZE_FORMAT"K", 312 _capacity / K, new_capacity / K); 313 } 314 } 315 } 316 317 void CMMarkStack::set_should_expand() { 318 // If we're resetting the marking state because of an 319 // marking stack overflow, record that we should, if 320 // possible, expand the stack. 321 _should_expand = _cm->has_overflown(); 322 } 323 324 CMMarkStack::~CMMarkStack() { 325 if (_base != NULL) { 326 _base = NULL; 327 _virtual_space.release(); 328 } 329 } 330 331 void CMMarkStack::par_push(oop ptr) { 332 while (true) { 333 if (isFull()) { 334 _overflow = true; 335 return; 336 } 337 // Otherwise... 338 jint index = _index; 339 jint next_index = index+1; 340 jint res = Atomic::cmpxchg(next_index, &_index, index); 341 if (res == index) { 342 _base[index] = ptr; 343 // Note that we don't maintain this atomically. We could, but it 344 // doesn't seem necessary. 345 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 346 return; 347 } 348 // Otherwise, we need to try again. 349 } 350 } 351 352 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { 353 while (true) { 354 if (isFull()) { 355 _overflow = true; 356 return; 357 } 358 // Otherwise... 359 jint index = _index; 360 jint next_index = index + n; 361 if (next_index > _capacity) { 362 _overflow = true; 363 return; 364 } 365 jint res = Atomic::cmpxchg(next_index, &_index, index); 366 if (res == index) { 367 for (int i = 0; i < n; i++) { 368 int ind = index + i; 369 assert(ind < _capacity, "By overflow test above."); 370 _base[ind] = ptr_arr[i]; 371 } 372 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 373 return; 374 } 375 // Otherwise, we need to try again. 376 } 377 } 378 379 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 380 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 381 jint start = _index; 382 jint next_index = start + n; 383 if (next_index > _capacity) { 384 _overflow = true; 385 return; 386 } 387 // Otherwise. 388 _index = next_index; 389 for (int i = 0; i < n; i++) { 390 int ind = start + i; 391 assert(ind < _capacity, "By overflow test above."); 392 _base[ind] = ptr_arr[i]; 393 } 394 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 395 } 396 397 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 398 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 399 jint index = _index; 400 if (index == 0) { 401 *n = 0; 402 return false; 403 } else { 404 int k = MIN2(max, index); 405 jint new_ind = index - k; 406 for (int j = 0; j < k; j++) { 407 ptr_arr[j] = _base[new_ind + j]; 408 } 409 _index = new_ind; 410 *n = k; 411 return true; 412 } 413 } 414 415 template<class OopClosureClass> 416 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 417 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 418 || SafepointSynchronize::is_at_safepoint(), 419 "Drain recursion must be yield-safe."); 420 bool res = true; 421 debug_only(_drain_in_progress = true); 422 debug_only(_drain_in_progress_yields = yield_after); 423 while (!isEmpty()) { 424 oop newOop = pop(); 425 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 426 assert(newOop->is_oop(), "Expected an oop"); 427 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 428 "only grey objects on this stack"); 429 newOop->oop_iterate(cl); 430 if (yield_after && _cm->do_yield_check()) { 431 res = false; 432 break; 433 } 434 } 435 debug_only(_drain_in_progress = false); 436 return res; 437 } 438 439 void CMMarkStack::note_start_of_gc() { 440 assert(_saved_index == -1, 441 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 442 _saved_index = _index; 443 } 444 445 void CMMarkStack::note_end_of_gc() { 446 // This is intentionally a guarantee, instead of an assert. If we 447 // accidentally add something to the mark stack during GC, it 448 // will be a correctness issue so it's better if we crash. we'll 449 // only check this once per GC anyway, so it won't be a performance 450 // issue in any way. 451 guarantee(_saved_index == _index, 452 err_msg("saved index: %d index: %d", _saved_index, _index)); 453 _saved_index = -1; 454 } 455 456 void CMMarkStack::oops_do(OopClosure* f) { 457 assert(_saved_index == _index, 458 err_msg("saved index: %d index: %d", _saved_index, _index)); 459 for (int i = 0; i < _index; i += 1) { 460 f->do_oop(&_base[i]); 461 } 462 } 463 464 CMRootRegions::CMRootRegions() : 465 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 466 _should_abort(false), _next_survivor(NULL) { } 467 468 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 469 _young_list = g1h->young_list(); 470 _cm = cm; 471 } 472 473 void CMRootRegions::prepare_for_scan() { 474 assert(!scan_in_progress(), "pre-condition"); 475 476 // Currently, only survivors can be root regions. 477 assert(_next_survivor == NULL, "pre-condition"); 478 _next_survivor = _young_list->first_survivor_region(); 479 _scan_in_progress = (_next_survivor != NULL); 480 _should_abort = false; 481 } 482 483 HeapRegion* CMRootRegions::claim_next() { 484 if (_should_abort) { 485 // If someone has set the should_abort flag, we return NULL to 486 // force the caller to bail out of their loop. 487 return NULL; 488 } 489 490 // Currently, only survivors can be root regions. 491 HeapRegion* res = _next_survivor; 492 if (res != NULL) { 493 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 494 // Read it again in case it changed while we were waiting for the lock. 495 res = _next_survivor; 496 if (res != NULL) { 497 if (res == _young_list->last_survivor_region()) { 498 // We just claimed the last survivor so store NULL to indicate 499 // that we're done. 500 _next_survivor = NULL; 501 } else { 502 _next_survivor = res->get_next_young_region(); 503 } 504 } else { 505 // Someone else claimed the last survivor while we were trying 506 // to take the lock so nothing else to do. 507 } 508 } 509 assert(res == NULL || res->is_survivor(), "post-condition"); 510 511 return res; 512 } 513 514 void CMRootRegions::scan_finished() { 515 assert(scan_in_progress(), "pre-condition"); 516 517 // Currently, only survivors can be root regions. 518 if (!_should_abort) { 519 assert(_next_survivor == NULL, "we should have claimed all survivors"); 520 } 521 _next_survivor = NULL; 522 523 { 524 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 525 _scan_in_progress = false; 526 RootRegionScan_lock->notify_all(); 527 } 528 } 529 530 bool CMRootRegions::wait_until_scan_finished() { 531 if (!scan_in_progress()) return false; 532 533 { 534 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 535 while (scan_in_progress()) { 536 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 537 } 538 } 539 return true; 540 } 541 542 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 543 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 544 #endif // _MSC_VER 545 546 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 547 return MAX2((n_par_threads + 2) / 4, 1U); 548 } 549 550 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 551 _g1h(g1h), 552 _markBitMap1(), 553 _markBitMap2(), 554 _parallel_marking_threads(0), 555 _max_parallel_marking_threads(0), 556 _sleep_factor(0.0), 557 _marking_task_overhead(1.0), 558 _cleanup_sleep_factor(0.0), 559 _cleanup_task_overhead(1.0), 560 _cleanup_list("Cleanup List"), 561 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 562 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 563 CardTableModRefBS::card_shift, 564 false /* in_resource_area*/), 565 566 _prevMarkBitMap(&_markBitMap1), 567 _nextMarkBitMap(&_markBitMap2), 568 569 _markStack(this), 570 // _finger set in set_non_marking_state 571 572 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 573 // _active_tasks set in set_non_marking_state 574 // _tasks set inside the constructor 575 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 576 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 577 578 _has_overflown(false), 579 _concurrent(false), 580 _has_aborted(false), 581 _aborted_gc_id(GCId::undefined()), 582 _restart_for_overflow(false), 583 _concurrent_marking_in_progress(false), 584 585 // _verbose_level set below 586 587 _init_times(), 588 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 589 _cleanup_times(), 590 _total_counting_time(0.0), 591 _total_rs_scrub_time(0.0), 592 593 _parallel_workers(NULL), 594 595 _count_card_bitmaps(NULL), 596 _count_marked_bytes(NULL), 597 _completed_initialization(false) { 598 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 599 if (verbose_level < no_verbose) { 600 verbose_level = no_verbose; 601 } 602 if (verbose_level > high_verbose) { 603 verbose_level = high_verbose; 604 } 605 _verbose_level = verbose_level; 606 607 if (verbose_low()) { 608 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 609 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 610 } 611 612 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 613 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 614 615 // Create & start a ConcurrentMark thread. 616 _cmThread = new ConcurrentMarkThread(this); 617 assert(cmThread() != NULL, "CM Thread should have been created"); 618 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 619 if (_cmThread->osthread() == NULL) { 620 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 621 } 622 623 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 624 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 625 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 626 627 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 628 satb_qs.set_buffer_size(G1SATBBufferSize); 629 630 _root_regions.init(_g1h, this); 631 632 if (ConcGCThreads > ParallelGCThreads) { 633 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") " 634 "than ParallelGCThreads (" UINTX_FORMAT ").", 635 ConcGCThreads, ParallelGCThreads); 636 return; 637 } 638 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 639 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 640 // if both are set 641 _sleep_factor = 0.0; 642 _marking_task_overhead = 1.0; 643 } else if (G1MarkingOverheadPercent > 0) { 644 // We will calculate the number of parallel marking threads based 645 // on a target overhead with respect to the soft real-time goal 646 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 647 double overall_cm_overhead = 648 (double) MaxGCPauseMillis * marking_overhead / 649 (double) GCPauseIntervalMillis; 650 double cpu_ratio = 1.0 / (double) os::processor_count(); 651 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 652 double marking_task_overhead = 653 overall_cm_overhead / marking_thread_num * 654 (double) os::processor_count(); 655 double sleep_factor = 656 (1.0 - marking_task_overhead) / marking_task_overhead; 657 658 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); 659 _sleep_factor = sleep_factor; 660 _marking_task_overhead = marking_task_overhead; 661 } else { 662 // Calculate the number of parallel marking threads by scaling 663 // the number of parallel GC threads. 664 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); 665 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); 666 _sleep_factor = 0.0; 667 _marking_task_overhead = 1.0; 668 } 669 670 assert(ConcGCThreads > 0, "Should have been set"); 671 _parallel_marking_threads = (uint) ConcGCThreads; 672 _max_parallel_marking_threads = _parallel_marking_threads; 673 674 if (parallel_marking_threads() > 1) { 675 _cleanup_task_overhead = 1.0; 676 } else { 677 _cleanup_task_overhead = marking_task_overhead(); 678 } 679 _cleanup_sleep_factor = 680 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 681 682 #if 0 683 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 684 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 685 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 686 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 687 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 688 #endif 689 690 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", 691 _max_parallel_marking_threads, false, true); 692 if (_parallel_workers == NULL) { 693 vm_exit_during_initialization("Failed necessary allocation."); 694 } else { 695 _parallel_workers->initialize_workers(); 696 } 697 698 if (FLAG_IS_DEFAULT(MarkStackSize)) { 699 uintx mark_stack_size = 700 MIN2(MarkStackSizeMax, 701 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE))); 702 // Verify that the calculated value for MarkStackSize is in range. 703 // It would be nice to use the private utility routine from Arguments. 704 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 705 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): " 706 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 707 mark_stack_size, (uintx) 1, MarkStackSizeMax); 708 return; 709 } 710 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size); 711 } else { 712 // Verify MarkStackSize is in range. 713 if (FLAG_IS_CMDLINE(MarkStackSize)) { 714 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 715 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 716 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): " 717 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 718 MarkStackSize, (uintx) 1, MarkStackSizeMax); 719 return; 720 } 721 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 722 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 723 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")" 724 " or for MarkStackSizeMax (" UINTX_FORMAT ")", 725 MarkStackSize, MarkStackSizeMax); 726 return; 727 } 728 } 729 } 730 } 731 732 if (!_markStack.allocate(MarkStackSize)) { 733 warning("Failed to allocate CM marking stack"); 734 return; 735 } 736 737 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 738 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 739 740 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 741 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 742 743 BitMap::idx_t card_bm_size = _card_bm.size(); 744 745 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 746 _active_tasks = _max_worker_id; 747 748 size_t max_regions = (size_t) _g1h->max_regions(); 749 for (uint i = 0; i < _max_worker_id; ++i) { 750 CMTaskQueue* task_queue = new CMTaskQueue(); 751 task_queue->initialize(); 752 _task_queues->register_queue(i, task_queue); 753 754 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 755 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 756 757 _tasks[i] = new CMTask(i, this, 758 _count_marked_bytes[i], 759 &_count_card_bitmaps[i], 760 task_queue, _task_queues); 761 762 _accum_task_vtime[i] = 0.0; 763 } 764 765 // Calculate the card number for the bottom of the heap. Used 766 // in biasing indexes into the accounting card bitmaps. 767 _heap_bottom_card_num = 768 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 769 CardTableModRefBS::card_shift); 770 771 // Clear all the liveness counting data 772 clear_all_count_data(); 773 774 // so that the call below can read a sensible value 775 _heap_start = g1h->reserved_region().start(); 776 set_non_marking_state(); 777 _completed_initialization = true; 778 } 779 780 void ConcurrentMark::reset() { 781 // Starting values for these two. This should be called in a STW 782 // phase. 783 MemRegion reserved = _g1h->g1_reserved(); 784 _heap_start = reserved.start(); 785 _heap_end = reserved.end(); 786 787 // Separated the asserts so that we know which one fires. 788 assert(_heap_start != NULL, "heap bounds should look ok"); 789 assert(_heap_end != NULL, "heap bounds should look ok"); 790 assert(_heap_start < _heap_end, "heap bounds should look ok"); 791 792 // Reset all the marking data structures and any necessary flags 793 reset_marking_state(); 794 795 if (verbose_low()) { 796 gclog_or_tty->print_cr("[global] resetting"); 797 } 798 799 // We do reset all of them, since different phases will use 800 // different number of active threads. So, it's easiest to have all 801 // of them ready. 802 for (uint i = 0; i < _max_worker_id; ++i) { 803 _tasks[i]->reset(_nextMarkBitMap); 804 } 805 806 // we need this to make sure that the flag is on during the evac 807 // pause with initial mark piggy-backed 808 set_concurrent_marking_in_progress(); 809 } 810 811 812 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 813 _markStack.set_should_expand(); 814 _markStack.setEmpty(); // Also clears the _markStack overflow flag 815 if (clear_overflow) { 816 clear_has_overflown(); 817 } else { 818 assert(has_overflown(), "pre-condition"); 819 } 820 _finger = _heap_start; 821 822 for (uint i = 0; i < _max_worker_id; ++i) { 823 CMTaskQueue* queue = _task_queues->queue(i); 824 queue->set_empty(); 825 } 826 } 827 828 void ConcurrentMark::set_concurrency(uint active_tasks) { 829 assert(active_tasks <= _max_worker_id, "we should not have more"); 830 831 _active_tasks = active_tasks; 832 // Need to update the three data structures below according to the 833 // number of active threads for this phase. 834 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 835 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 836 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 837 } 838 839 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 840 set_concurrency(active_tasks); 841 842 _concurrent = concurrent; 843 // We propagate this to all tasks, not just the active ones. 844 for (uint i = 0; i < _max_worker_id; ++i) 845 _tasks[i]->set_concurrent(concurrent); 846 847 if (concurrent) { 848 set_concurrent_marking_in_progress(); 849 } else { 850 // We currently assume that the concurrent flag has been set to 851 // false before we start remark. At this point we should also be 852 // in a STW phase. 853 assert(!concurrent_marking_in_progress(), "invariant"); 854 assert(out_of_regions(), 855 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 856 p2i(_finger), p2i(_heap_end))); 857 } 858 } 859 860 void ConcurrentMark::set_non_marking_state() { 861 // We set the global marking state to some default values when we're 862 // not doing marking. 863 reset_marking_state(); 864 _active_tasks = 0; 865 clear_concurrent_marking_in_progress(); 866 } 867 868 ConcurrentMark::~ConcurrentMark() { 869 // The ConcurrentMark instance is never freed. 870 ShouldNotReachHere(); 871 } 872 873 void ConcurrentMark::clearNextBitmap() { 874 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 875 876 // Make sure that the concurrent mark thread looks to still be in 877 // the current cycle. 878 guarantee(cmThread()->during_cycle(), "invariant"); 879 880 // We are finishing up the current cycle by clearing the next 881 // marking bitmap and getting it ready for the next cycle. During 882 // this time no other cycle can start. So, let's make sure that this 883 // is the case. 884 guarantee(!g1h->mark_in_progress(), "invariant"); 885 886 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 887 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 888 _parallel_workers->run_task(&task); 889 890 // Clear the liveness counting data. If the marking has been aborted, the abort() 891 // call already did that. 892 if (cl.complete()) { 893 clear_all_count_data(); 894 } 895 896 // Repeat the asserts from above. 897 guarantee(cmThread()->during_cycle(), "invariant"); 898 guarantee(!g1h->mark_in_progress(), "invariant"); 899 } 900 901 class CheckBitmapClearHRClosure : public HeapRegionClosure { 902 CMBitMap* _bitmap; 903 bool _error; 904 public: 905 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 906 } 907 908 virtual bool doHeapRegion(HeapRegion* r) { 909 // This closure can be called concurrently to the mutator, so we must make sure 910 // that the result of the getNextMarkedWordAddress() call is compared to the 911 // value passed to it as limit to detect any found bits. 912 // We can use the region's orig_end() for the limit and the comparison value 913 // as it always contains the "real" end of the region that never changes and 914 // has no side effects. 915 // Due to the latter, there can also be no problem with the compiler generating 916 // reloads of the orig_end() call. 917 HeapWord* end = r->orig_end(); 918 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 919 } 920 }; 921 922 bool ConcurrentMark::nextMarkBitmapIsClear() { 923 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 924 _g1h->heap_region_iterate(&cl); 925 return cl.complete(); 926 } 927 928 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 929 public: 930 bool doHeapRegion(HeapRegion* r) { 931 if (!r->is_continues_humongous()) { 932 r->note_start_of_marking(); 933 } 934 return false; 935 } 936 }; 937 938 void ConcurrentMark::checkpointRootsInitialPre() { 939 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 940 G1CollectorPolicy* g1p = g1h->g1_policy(); 941 942 _has_aborted = false; 943 944 #ifndef PRODUCT 945 if (G1PrintReachableAtInitialMark) { 946 print_reachable("at-cycle-start", 947 VerifyOption_G1UsePrevMarking, true /* all */); 948 } 949 #endif 950 951 // Initialize marking structures. This has to be done in a STW phase. 952 reset(); 953 954 // For each region note start of marking. 955 NoteStartOfMarkHRClosure startcl; 956 g1h->heap_region_iterate(&startcl); 957 } 958 959 960 void ConcurrentMark::checkpointRootsInitialPost() { 961 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 962 963 // If we force an overflow during remark, the remark operation will 964 // actually abort and we'll restart concurrent marking. If we always 965 // force an overflow during remark we'll never actually complete the 966 // marking phase. So, we initialize this here, at the start of the 967 // cycle, so that at the remaining overflow number will decrease at 968 // every remark and we'll eventually not need to cause one. 969 force_overflow_stw()->init(); 970 971 // Start Concurrent Marking weak-reference discovery. 972 ReferenceProcessor* rp = g1h->ref_processor_cm(); 973 // enable ("weak") refs discovery 974 rp->enable_discovery(); 975 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 976 977 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 978 // This is the start of the marking cycle, we're expected all 979 // threads to have SATB queues with active set to false. 980 satb_mq_set.set_active_all_threads(true, /* new active value */ 981 false /* expected_active */); 982 983 _root_regions.prepare_for_scan(); 984 985 // update_g1_committed() will be called at the end of an evac pause 986 // when marking is on. So, it's also called at the end of the 987 // initial-mark pause to update the heap end, if the heap expands 988 // during it. No need to call it here. 989 } 990 991 /* 992 * Notice that in the next two methods, we actually leave the STS 993 * during the barrier sync and join it immediately afterwards. If we 994 * do not do this, the following deadlock can occur: one thread could 995 * be in the barrier sync code, waiting for the other thread to also 996 * sync up, whereas another one could be trying to yield, while also 997 * waiting for the other threads to sync up too. 998 * 999 * Note, however, that this code is also used during remark and in 1000 * this case we should not attempt to leave / enter the STS, otherwise 1001 * we'll either hit an assert (debug / fastdebug) or deadlock 1002 * (product). So we should only leave / enter the STS if we are 1003 * operating concurrently. 1004 * 1005 * Because the thread that does the sync barrier has left the STS, it 1006 * is possible to be suspended for a Full GC or an evacuation pause 1007 * could occur. This is actually safe, since the entering the sync 1008 * barrier is one of the last things do_marking_step() does, and it 1009 * doesn't manipulate any data structures afterwards. 1010 */ 1011 1012 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 1013 if (verbose_low()) { 1014 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 1015 } 1016 1017 if (concurrent()) { 1018 SuspendibleThreadSet::leave(); 1019 } 1020 1021 bool barrier_aborted = !_first_overflow_barrier_sync.enter(); 1022 1023 if (concurrent()) { 1024 SuspendibleThreadSet::join(); 1025 } 1026 // at this point everyone should have synced up and not be doing any 1027 // more work 1028 1029 if (verbose_low()) { 1030 if (barrier_aborted) { 1031 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 1032 } else { 1033 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 1034 } 1035 } 1036 1037 if (barrier_aborted) { 1038 // If the barrier aborted we ignore the overflow condition and 1039 // just abort the whole marking phase as quickly as possible. 1040 return; 1041 } 1042 1043 // If we're executing the concurrent phase of marking, reset the marking 1044 // state; otherwise the marking state is reset after reference processing, 1045 // during the remark pause. 1046 // If we reset here as a result of an overflow during the remark we will 1047 // see assertion failures from any subsequent set_concurrency_and_phase() 1048 // calls. 1049 if (concurrent()) { 1050 // let the task associated with with worker 0 do this 1051 if (worker_id == 0) { 1052 // task 0 is responsible for clearing the global data structures 1053 // We should be here because of an overflow. During STW we should 1054 // not clear the overflow flag since we rely on it being true when 1055 // we exit this method to abort the pause and restart concurrent 1056 // marking. 1057 reset_marking_state(true /* clear_overflow */); 1058 force_overflow()->update(); 1059 1060 if (G1Log::fine()) { 1061 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1062 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1063 } 1064 } 1065 } 1066 1067 // after this, each task should reset its own data structures then 1068 // then go into the second barrier 1069 } 1070 1071 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1072 if (verbose_low()) { 1073 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1074 } 1075 1076 if (concurrent()) { 1077 SuspendibleThreadSet::leave(); 1078 } 1079 1080 bool barrier_aborted = !_second_overflow_barrier_sync.enter(); 1081 1082 if (concurrent()) { 1083 SuspendibleThreadSet::join(); 1084 } 1085 // at this point everything should be re-initialized and ready to go 1086 1087 if (verbose_low()) { 1088 if (barrier_aborted) { 1089 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1090 } else { 1091 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1092 } 1093 } 1094 } 1095 1096 #ifndef PRODUCT 1097 void ForceOverflowSettings::init() { 1098 _num_remaining = G1ConcMarkForceOverflow; 1099 _force = false; 1100 update(); 1101 } 1102 1103 void ForceOverflowSettings::update() { 1104 if (_num_remaining > 0) { 1105 _num_remaining -= 1; 1106 _force = true; 1107 } else { 1108 _force = false; 1109 } 1110 } 1111 1112 bool ForceOverflowSettings::should_force() { 1113 if (_force) { 1114 _force = false; 1115 return true; 1116 } else { 1117 return false; 1118 } 1119 } 1120 #endif // !PRODUCT 1121 1122 class CMConcurrentMarkingTask: public AbstractGangTask { 1123 private: 1124 ConcurrentMark* _cm; 1125 ConcurrentMarkThread* _cmt; 1126 1127 public: 1128 void work(uint worker_id) { 1129 assert(Thread::current()->is_ConcurrentGC_thread(), 1130 "this should only be done by a conc GC thread"); 1131 ResourceMark rm; 1132 1133 double start_vtime = os::elapsedVTime(); 1134 1135 SuspendibleThreadSet::join(); 1136 1137 assert(worker_id < _cm->active_tasks(), "invariant"); 1138 CMTask* the_task = _cm->task(worker_id); 1139 the_task->record_start_time(); 1140 if (!_cm->has_aborted()) { 1141 do { 1142 double start_vtime_sec = os::elapsedVTime(); 1143 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1144 1145 the_task->do_marking_step(mark_step_duration_ms, 1146 true /* do_termination */, 1147 false /* is_serial*/); 1148 1149 double end_vtime_sec = os::elapsedVTime(); 1150 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1151 _cm->clear_has_overflown(); 1152 1153 _cm->do_yield_check(worker_id); 1154 1155 jlong sleep_time_ms; 1156 if (!_cm->has_aborted() && the_task->has_aborted()) { 1157 sleep_time_ms = 1158 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1159 SuspendibleThreadSet::leave(); 1160 os::sleep(Thread::current(), sleep_time_ms, false); 1161 SuspendibleThreadSet::join(); 1162 } 1163 } while (!_cm->has_aborted() && the_task->has_aborted()); 1164 } 1165 the_task->record_end_time(); 1166 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1167 1168 SuspendibleThreadSet::leave(); 1169 1170 double end_vtime = os::elapsedVTime(); 1171 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1172 } 1173 1174 CMConcurrentMarkingTask(ConcurrentMark* cm, 1175 ConcurrentMarkThread* cmt) : 1176 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1177 1178 ~CMConcurrentMarkingTask() { } 1179 }; 1180 1181 // Calculates the number of active workers for a concurrent 1182 // phase. 1183 uint ConcurrentMark::calc_parallel_marking_threads() { 1184 uint n_conc_workers = 0; 1185 if (!UseDynamicNumberOfGCThreads || 1186 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1187 !ForceDynamicNumberOfGCThreads)) { 1188 n_conc_workers = max_parallel_marking_threads(); 1189 } else { 1190 n_conc_workers = 1191 AdaptiveSizePolicy::calc_default_active_workers( 1192 max_parallel_marking_threads(), 1193 1, /* Minimum workers */ 1194 parallel_marking_threads(), 1195 Threads::number_of_non_daemon_threads()); 1196 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1197 // that scaling has already gone into "_max_parallel_marking_threads". 1198 } 1199 assert(n_conc_workers > 0, "Always need at least 1"); 1200 return n_conc_workers; 1201 } 1202 1203 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1204 // Currently, only survivors can be root regions. 1205 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1206 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1207 1208 const uintx interval = PrefetchScanIntervalInBytes; 1209 HeapWord* curr = hr->bottom(); 1210 const HeapWord* end = hr->top(); 1211 while (curr < end) { 1212 Prefetch::read(curr, interval); 1213 oop obj = oop(curr); 1214 int size = obj->oop_iterate(&cl); 1215 assert(size == obj->size(), "sanity"); 1216 curr += size; 1217 } 1218 } 1219 1220 class CMRootRegionScanTask : public AbstractGangTask { 1221 private: 1222 ConcurrentMark* _cm; 1223 1224 public: 1225 CMRootRegionScanTask(ConcurrentMark* cm) : 1226 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1227 1228 void work(uint worker_id) { 1229 assert(Thread::current()->is_ConcurrentGC_thread(), 1230 "this should only be done by a conc GC thread"); 1231 1232 CMRootRegions* root_regions = _cm->root_regions(); 1233 HeapRegion* hr = root_regions->claim_next(); 1234 while (hr != NULL) { 1235 _cm->scanRootRegion(hr, worker_id); 1236 hr = root_regions->claim_next(); 1237 } 1238 } 1239 }; 1240 1241 void ConcurrentMark::scanRootRegions() { 1242 // Start of concurrent marking. 1243 ClassLoaderDataGraph::clear_claimed_marks(); 1244 1245 // scan_in_progress() will have been set to true only if there was 1246 // at least one root region to scan. So, if it's false, we 1247 // should not attempt to do any further work. 1248 if (root_regions()->scan_in_progress()) { 1249 _parallel_marking_threads = calc_parallel_marking_threads(); 1250 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1251 "Maximum number of marking threads exceeded"); 1252 uint active_workers = MAX2(1U, parallel_marking_threads()); 1253 1254 CMRootRegionScanTask task(this); 1255 _parallel_workers->set_active_workers(active_workers); 1256 _parallel_workers->run_task(&task); 1257 1258 // It's possible that has_aborted() is true here without actually 1259 // aborting the survivor scan earlier. This is OK as it's 1260 // mainly used for sanity checking. 1261 root_regions()->scan_finished(); 1262 } 1263 } 1264 1265 void ConcurrentMark::markFromRoots() { 1266 // we might be tempted to assert that: 1267 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1268 // "inconsistent argument?"); 1269 // However that wouldn't be right, because it's possible that 1270 // a safepoint is indeed in progress as a younger generation 1271 // stop-the-world GC happens even as we mark in this generation. 1272 1273 _restart_for_overflow = false; 1274 force_overflow_conc()->init(); 1275 1276 // _g1h has _n_par_threads 1277 _parallel_marking_threads = calc_parallel_marking_threads(); 1278 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1279 "Maximum number of marking threads exceeded"); 1280 1281 uint active_workers = MAX2(1U, parallel_marking_threads()); 1282 1283 // Parallel task terminator is set in "set_concurrency_and_phase()" 1284 set_concurrency_and_phase(active_workers, true /* concurrent */); 1285 1286 CMConcurrentMarkingTask markingTask(this, cmThread()); 1287 _parallel_workers->set_active_workers(active_workers); 1288 // Don't set _n_par_threads because it affects MT in process_roots() 1289 // and the decisions on that MT processing is made elsewhere. 1290 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1291 _parallel_workers->run_task(&markingTask); 1292 print_stats(); 1293 } 1294 1295 // Helper class to get rid of some boilerplate code. 1296 class G1CMTraceTime : public GCTraceTime { 1297 static bool doit_and_prepend(bool doit) { 1298 if (doit) { 1299 gclog_or_tty->put(' '); 1300 } 1301 return doit; 1302 } 1303 1304 public: 1305 G1CMTraceTime(const char* title, bool doit) 1306 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), 1307 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { 1308 } 1309 }; 1310 1311 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1312 // world is stopped at this checkpoint 1313 assert(SafepointSynchronize::is_at_safepoint(), 1314 "world should be stopped"); 1315 1316 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1317 1318 // If a full collection has happened, we shouldn't do this. 1319 if (has_aborted()) { 1320 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1321 return; 1322 } 1323 1324 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1325 1326 if (VerifyDuringGC) { 1327 HandleMark hm; // handle scope 1328 Universe::heap()->prepare_for_verify(); 1329 Universe::verify(VerifyOption_G1UsePrevMarking, 1330 " VerifyDuringGC:(before)"); 1331 } 1332 g1h->check_bitmaps("Remark Start"); 1333 1334 G1CollectorPolicy* g1p = g1h->g1_policy(); 1335 g1p->record_concurrent_mark_remark_start(); 1336 1337 double start = os::elapsedTime(); 1338 1339 checkpointRootsFinalWork(); 1340 1341 double mark_work_end = os::elapsedTime(); 1342 1343 weakRefsWork(clear_all_soft_refs); 1344 1345 if (has_overflown()) { 1346 // Oops. We overflowed. Restart concurrent marking. 1347 _restart_for_overflow = true; 1348 if (G1TraceMarkStackOverflow) { 1349 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1350 } 1351 1352 // Verify the heap w.r.t. the previous marking bitmap. 1353 if (VerifyDuringGC) { 1354 HandleMark hm; // handle scope 1355 Universe::heap()->prepare_for_verify(); 1356 Universe::verify(VerifyOption_G1UsePrevMarking, 1357 " VerifyDuringGC:(overflow)"); 1358 } 1359 1360 // Clear the marking state because we will be restarting 1361 // marking due to overflowing the global mark stack. 1362 reset_marking_state(); 1363 } else { 1364 { 1365 G1CMTraceTime trace("GC aggregate-data", G1Log::finer()); 1366 1367 // Aggregate the per-task counting data that we have accumulated 1368 // while marking. 1369 aggregate_count_data(); 1370 } 1371 1372 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1373 // We're done with marking. 1374 // This is the end of the marking cycle, we're expected all 1375 // threads to have SATB queues with active set to true. 1376 satb_mq_set.set_active_all_threads(false, /* new active value */ 1377 true /* expected_active */); 1378 1379 if (VerifyDuringGC) { 1380 HandleMark hm; // handle scope 1381 Universe::heap()->prepare_for_verify(); 1382 Universe::verify(VerifyOption_G1UseNextMarking, 1383 " VerifyDuringGC:(after)"); 1384 } 1385 g1h->check_bitmaps("Remark End"); 1386 assert(!restart_for_overflow(), "sanity"); 1387 // Completely reset the marking state since marking completed 1388 set_non_marking_state(); 1389 } 1390 1391 // Expand the marking stack, if we have to and if we can. 1392 if (_markStack.should_expand()) { 1393 _markStack.expand(); 1394 } 1395 1396 // Statistics 1397 double now = os::elapsedTime(); 1398 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1399 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1400 _remark_times.add((now - start) * 1000.0); 1401 1402 g1p->record_concurrent_mark_remark_end(); 1403 1404 G1CMIsAliveClosure is_alive(g1h); 1405 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1406 } 1407 1408 // Base class of the closures that finalize and verify the 1409 // liveness counting data. 1410 class CMCountDataClosureBase: public HeapRegionClosure { 1411 protected: 1412 G1CollectedHeap* _g1h; 1413 ConcurrentMark* _cm; 1414 CardTableModRefBS* _ct_bs; 1415 1416 BitMap* _region_bm; 1417 BitMap* _card_bm; 1418 1419 // Takes a region that's not empty (i.e., it has at least one 1420 // live object in it and sets its corresponding bit on the region 1421 // bitmap to 1. If the region is "starts humongous" it will also set 1422 // to 1 the bits on the region bitmap that correspond to its 1423 // associated "continues humongous" regions. 1424 void set_bit_for_region(HeapRegion* hr) { 1425 assert(!hr->is_continues_humongous(), "should have filtered those out"); 1426 1427 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1428 if (!hr->is_starts_humongous()) { 1429 // Normal (non-humongous) case: just set the bit. 1430 _region_bm->par_at_put(index, true); 1431 } else { 1432 // Starts humongous case: calculate how many regions are part of 1433 // this humongous region and then set the bit range. 1434 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1435 _region_bm->par_at_put_range(index, end_index, true); 1436 } 1437 } 1438 1439 public: 1440 CMCountDataClosureBase(G1CollectedHeap* g1h, 1441 BitMap* region_bm, BitMap* card_bm): 1442 _g1h(g1h), _cm(g1h->concurrent_mark()), 1443 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 1444 _region_bm(region_bm), _card_bm(card_bm) { } 1445 }; 1446 1447 // Closure that calculates the # live objects per region. Used 1448 // for verification purposes during the cleanup pause. 1449 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1450 CMBitMapRO* _bm; 1451 size_t _region_marked_bytes; 1452 1453 public: 1454 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1455 BitMap* region_bm, BitMap* card_bm) : 1456 CMCountDataClosureBase(g1h, region_bm, card_bm), 1457 _bm(bm), _region_marked_bytes(0) { } 1458 1459 bool doHeapRegion(HeapRegion* hr) { 1460 1461 if (hr->is_continues_humongous()) { 1462 // We will ignore these here and process them when their 1463 // associated "starts humongous" region is processed (see 1464 // set_bit_for_heap_region()). Note that we cannot rely on their 1465 // associated "starts humongous" region to have their bit set to 1466 // 1 since, due to the region chunking in the parallel region 1467 // iteration, a "continues humongous" region might be visited 1468 // before its associated "starts humongous". 1469 return false; 1470 } 1471 1472 HeapWord* ntams = hr->next_top_at_mark_start(); 1473 HeapWord* start = hr->bottom(); 1474 1475 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1476 err_msg("Preconditions not met - " 1477 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1478 p2i(start), p2i(ntams), p2i(hr->end()))); 1479 1480 // Find the first marked object at or after "start". 1481 start = _bm->getNextMarkedWordAddress(start, ntams); 1482 1483 size_t marked_bytes = 0; 1484 1485 while (start < ntams) { 1486 oop obj = oop(start); 1487 int obj_sz = obj->size(); 1488 HeapWord* obj_end = start + obj_sz; 1489 1490 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1491 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1492 1493 // Note: if we're looking at the last region in heap - obj_end 1494 // could be actually just beyond the end of the heap; end_idx 1495 // will then correspond to a (non-existent) card that is also 1496 // just beyond the heap. 1497 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1498 // end of object is not card aligned - increment to cover 1499 // all the cards spanned by the object 1500 end_idx += 1; 1501 } 1502 1503 // Set the bits in the card BM for the cards spanned by this object. 1504 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1505 1506 // Add the size of this object to the number of marked bytes. 1507 marked_bytes += (size_t)obj_sz * HeapWordSize; 1508 1509 // Find the next marked object after this one. 1510 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1511 } 1512 1513 // Mark the allocated-since-marking portion... 1514 HeapWord* top = hr->top(); 1515 if (ntams < top) { 1516 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1517 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1518 1519 // Note: if we're looking at the last region in heap - top 1520 // could be actually just beyond the end of the heap; end_idx 1521 // will then correspond to a (non-existent) card that is also 1522 // just beyond the heap. 1523 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1524 // end of object is not card aligned - increment to cover 1525 // all the cards spanned by the object 1526 end_idx += 1; 1527 } 1528 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1529 1530 // This definitely means the region has live objects. 1531 set_bit_for_region(hr); 1532 } 1533 1534 // Update the live region bitmap. 1535 if (marked_bytes > 0) { 1536 set_bit_for_region(hr); 1537 } 1538 1539 // Set the marked bytes for the current region so that 1540 // it can be queried by a calling verification routine 1541 _region_marked_bytes = marked_bytes; 1542 1543 return false; 1544 } 1545 1546 size_t region_marked_bytes() const { return _region_marked_bytes; } 1547 }; 1548 1549 // Heap region closure used for verifying the counting data 1550 // that was accumulated concurrently and aggregated during 1551 // the remark pause. This closure is applied to the heap 1552 // regions during the STW cleanup pause. 1553 1554 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1555 G1CollectedHeap* _g1h; 1556 ConcurrentMark* _cm; 1557 CalcLiveObjectsClosure _calc_cl; 1558 BitMap* _region_bm; // Region BM to be verified 1559 BitMap* _card_bm; // Card BM to be verified 1560 bool _verbose; // verbose output? 1561 1562 BitMap* _exp_region_bm; // Expected Region BM values 1563 BitMap* _exp_card_bm; // Expected card BM values 1564 1565 int _failures; 1566 1567 public: 1568 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1569 BitMap* region_bm, 1570 BitMap* card_bm, 1571 BitMap* exp_region_bm, 1572 BitMap* exp_card_bm, 1573 bool verbose) : 1574 _g1h(g1h), _cm(g1h->concurrent_mark()), 1575 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1576 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1577 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1578 _failures(0) { } 1579 1580 int failures() const { return _failures; } 1581 1582 bool doHeapRegion(HeapRegion* hr) { 1583 if (hr->is_continues_humongous()) { 1584 // We will ignore these here and process them when their 1585 // associated "starts humongous" region is processed (see 1586 // set_bit_for_heap_region()). Note that we cannot rely on their 1587 // associated "starts humongous" region to have their bit set to 1588 // 1 since, due to the region chunking in the parallel region 1589 // iteration, a "continues humongous" region might be visited 1590 // before its associated "starts humongous". 1591 return false; 1592 } 1593 1594 int failures = 0; 1595 1596 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1597 // this region and set the corresponding bits in the expected region 1598 // and card bitmaps. 1599 bool res = _calc_cl.doHeapRegion(hr); 1600 assert(res == false, "should be continuing"); 1601 1602 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1603 Mutex::_no_safepoint_check_flag); 1604 1605 // Verify the marked bytes for this region. 1606 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1607 size_t act_marked_bytes = hr->next_marked_bytes(); 1608 1609 // We're not OK if expected marked bytes > actual marked bytes. It means 1610 // we have missed accounting some objects during the actual marking. 1611 if (exp_marked_bytes > act_marked_bytes) { 1612 if (_verbose) { 1613 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1614 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1615 hr->hrm_index(), exp_marked_bytes, act_marked_bytes); 1616 } 1617 failures += 1; 1618 } 1619 1620 // Verify the bit, for this region, in the actual and expected 1621 // (which was just calculated) region bit maps. 1622 // We're not OK if the bit in the calculated expected region 1623 // bitmap is set and the bit in the actual region bitmap is not. 1624 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1625 1626 bool expected = _exp_region_bm->at(index); 1627 bool actual = _region_bm->at(index); 1628 if (expected && !actual) { 1629 if (_verbose) { 1630 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1631 "expected: %s, actual: %s", 1632 hr->hrm_index(), 1633 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1634 } 1635 failures += 1; 1636 } 1637 1638 // Verify that the card bit maps for the cards spanned by the current 1639 // region match. We have an error if we have a set bit in the expected 1640 // bit map and the corresponding bit in the actual bitmap is not set. 1641 1642 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1643 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1644 1645 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1646 expected = _exp_card_bm->at(i); 1647 actual = _card_bm->at(i); 1648 1649 if (expected && !actual) { 1650 if (_verbose) { 1651 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1652 "expected: %s, actual: %s", 1653 hr->hrm_index(), i, 1654 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1655 } 1656 failures += 1; 1657 } 1658 } 1659 1660 if (failures > 0 && _verbose) { 1661 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1662 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1663 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1664 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1665 } 1666 1667 _failures += failures; 1668 1669 // We could stop iteration over the heap when we 1670 // find the first violating region by returning true. 1671 return false; 1672 } 1673 }; 1674 1675 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1676 protected: 1677 G1CollectedHeap* _g1h; 1678 ConcurrentMark* _cm; 1679 BitMap* _actual_region_bm; 1680 BitMap* _actual_card_bm; 1681 1682 uint _n_workers; 1683 1684 BitMap* _expected_region_bm; 1685 BitMap* _expected_card_bm; 1686 1687 int _failures; 1688 bool _verbose; 1689 1690 HeapRegionClaimer _hrclaimer; 1691 1692 public: 1693 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1694 BitMap* region_bm, BitMap* card_bm, 1695 BitMap* expected_region_bm, BitMap* expected_card_bm) 1696 : AbstractGangTask("G1 verify final counting"), 1697 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1698 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1699 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1700 _failures(0), _verbose(false), 1701 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1702 assert(VerifyDuringGC, "don't call this otherwise"); 1703 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1704 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1705 1706 _verbose = _cm->verbose_medium(); 1707 } 1708 1709 void work(uint worker_id) { 1710 assert(worker_id < _n_workers, "invariant"); 1711 1712 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1713 _actual_region_bm, _actual_card_bm, 1714 _expected_region_bm, 1715 _expected_card_bm, 1716 _verbose); 1717 1718 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1719 1720 Atomic::add(verify_cl.failures(), &_failures); 1721 } 1722 1723 int failures() const { return _failures; } 1724 }; 1725 1726 // Closure that finalizes the liveness counting data. 1727 // Used during the cleanup pause. 1728 // Sets the bits corresponding to the interval [NTAMS, top] 1729 // (which contains the implicitly live objects) in the 1730 // card liveness bitmap. Also sets the bit for each region, 1731 // containing live data, in the region liveness bitmap. 1732 1733 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1734 public: 1735 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1736 BitMap* region_bm, 1737 BitMap* card_bm) : 1738 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1739 1740 bool doHeapRegion(HeapRegion* hr) { 1741 1742 if (hr->is_continues_humongous()) { 1743 // We will ignore these here and process them when their 1744 // associated "starts humongous" region is processed (see 1745 // set_bit_for_heap_region()). Note that we cannot rely on their 1746 // associated "starts humongous" region to have their bit set to 1747 // 1 since, due to the region chunking in the parallel region 1748 // iteration, a "continues humongous" region might be visited 1749 // before its associated "starts humongous". 1750 return false; 1751 } 1752 1753 HeapWord* ntams = hr->next_top_at_mark_start(); 1754 HeapWord* top = hr->top(); 1755 1756 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1757 1758 // Mark the allocated-since-marking portion... 1759 if (ntams < top) { 1760 // This definitely means the region has live objects. 1761 set_bit_for_region(hr); 1762 1763 // Now set the bits in the card bitmap for [ntams, top) 1764 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1765 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1766 1767 // Note: if we're looking at the last region in heap - top 1768 // could be actually just beyond the end of the heap; end_idx 1769 // will then correspond to a (non-existent) card that is also 1770 // just beyond the heap. 1771 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1772 // end of object is not card aligned - increment to cover 1773 // all the cards spanned by the object 1774 end_idx += 1; 1775 } 1776 1777 assert(end_idx <= _card_bm->size(), 1778 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1779 end_idx, _card_bm->size())); 1780 assert(start_idx < _card_bm->size(), 1781 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1782 start_idx, _card_bm->size())); 1783 1784 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1785 } 1786 1787 // Set the bit for the region if it contains live data 1788 if (hr->next_marked_bytes() > 0) { 1789 set_bit_for_region(hr); 1790 } 1791 1792 return false; 1793 } 1794 }; 1795 1796 class G1ParFinalCountTask: public AbstractGangTask { 1797 protected: 1798 G1CollectedHeap* _g1h; 1799 ConcurrentMark* _cm; 1800 BitMap* _actual_region_bm; 1801 BitMap* _actual_card_bm; 1802 1803 uint _n_workers; 1804 HeapRegionClaimer _hrclaimer; 1805 1806 public: 1807 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1808 : AbstractGangTask("G1 final counting"), 1809 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1810 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1811 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1812 } 1813 1814 void work(uint worker_id) { 1815 assert(worker_id < _n_workers, "invariant"); 1816 1817 FinalCountDataUpdateClosure final_update_cl(_g1h, 1818 _actual_region_bm, 1819 _actual_card_bm); 1820 1821 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1822 } 1823 }; 1824 1825 class G1ParNoteEndTask; 1826 1827 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1828 G1CollectedHeap* _g1; 1829 size_t _max_live_bytes; 1830 uint _regions_claimed; 1831 size_t _freed_bytes; 1832 FreeRegionList* _local_cleanup_list; 1833 HeapRegionSetCount _old_regions_removed; 1834 HeapRegionSetCount _humongous_regions_removed; 1835 HRRSCleanupTask* _hrrs_cleanup_task; 1836 double _claimed_region_time; 1837 double _max_region_time; 1838 1839 public: 1840 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1841 FreeRegionList* local_cleanup_list, 1842 HRRSCleanupTask* hrrs_cleanup_task) : 1843 _g1(g1), 1844 _max_live_bytes(0), _regions_claimed(0), 1845 _freed_bytes(0), 1846 _claimed_region_time(0.0), _max_region_time(0.0), 1847 _local_cleanup_list(local_cleanup_list), 1848 _old_regions_removed(), 1849 _humongous_regions_removed(), 1850 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1851 1852 size_t freed_bytes() { return _freed_bytes; } 1853 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1854 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1855 1856 bool doHeapRegion(HeapRegion *hr) { 1857 if (hr->is_continues_humongous()) { 1858 return false; 1859 } 1860 // We use a claim value of zero here because all regions 1861 // were claimed with value 1 in the FinalCount task. 1862 _g1->reset_gc_time_stamps(hr); 1863 double start = os::elapsedTime(); 1864 _regions_claimed++; 1865 hr->note_end_of_marking(); 1866 _max_live_bytes += hr->max_live_bytes(); 1867 1868 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1869 _freed_bytes += hr->used(); 1870 hr->set_containing_set(NULL); 1871 if (hr->is_humongous()) { 1872 assert(hr->is_starts_humongous(), "we should only see starts humongous"); 1873 _humongous_regions_removed.increment(1u, hr->capacity()); 1874 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1875 } else { 1876 _old_regions_removed.increment(1u, hr->capacity()); 1877 _g1->free_region(hr, _local_cleanup_list, true); 1878 } 1879 } else { 1880 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1881 } 1882 1883 double region_time = (os::elapsedTime() - start); 1884 _claimed_region_time += region_time; 1885 if (region_time > _max_region_time) { 1886 _max_region_time = region_time; 1887 } 1888 return false; 1889 } 1890 1891 size_t max_live_bytes() { return _max_live_bytes; } 1892 uint regions_claimed() { return _regions_claimed; } 1893 double claimed_region_time_sec() { return _claimed_region_time; } 1894 double max_region_time_sec() { return _max_region_time; } 1895 }; 1896 1897 class G1ParNoteEndTask: public AbstractGangTask { 1898 friend class G1NoteEndOfConcMarkClosure; 1899 1900 protected: 1901 G1CollectedHeap* _g1h; 1902 size_t _max_live_bytes; 1903 size_t _freed_bytes; 1904 FreeRegionList* _cleanup_list; 1905 HeapRegionClaimer _hrclaimer; 1906 1907 public: 1908 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1909 AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1910 } 1911 1912 void work(uint worker_id) { 1913 double start = os::elapsedTime(); 1914 FreeRegionList local_cleanup_list("Local Cleanup List"); 1915 HRRSCleanupTask hrrs_cleanup_task; 1916 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1917 &hrrs_cleanup_task); 1918 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1919 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1920 1921 // Now update the lists 1922 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1923 { 1924 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1925 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1926 _max_live_bytes += g1_note_end.max_live_bytes(); 1927 _freed_bytes += g1_note_end.freed_bytes(); 1928 1929 // If we iterate over the global cleanup list at the end of 1930 // cleanup to do this printing we will not guarantee to only 1931 // generate output for the newly-reclaimed regions (the list 1932 // might not be empty at the beginning of cleanup; we might 1933 // still be working on its previous contents). So we do the 1934 // printing here, before we append the new regions to the global 1935 // cleanup list. 1936 1937 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1938 if (hr_printer->is_active()) { 1939 FreeRegionListIterator iter(&local_cleanup_list); 1940 while (iter.more_available()) { 1941 HeapRegion* hr = iter.get_next(); 1942 hr_printer->cleanup(hr); 1943 } 1944 } 1945 1946 _cleanup_list->add_ordered(&local_cleanup_list); 1947 assert(local_cleanup_list.is_empty(), "post-condition"); 1948 1949 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1950 } 1951 } 1952 size_t max_live_bytes() { return _max_live_bytes; } 1953 size_t freed_bytes() { return _freed_bytes; } 1954 }; 1955 1956 class G1ParScrubRemSetTask: public AbstractGangTask { 1957 protected: 1958 G1RemSet* _g1rs; 1959 BitMap* _region_bm; 1960 BitMap* _card_bm; 1961 HeapRegionClaimer _hrclaimer; 1962 1963 public: 1964 G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) : 1965 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) { 1966 } 1967 1968 void work(uint worker_id) { 1969 _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer); 1970 } 1971 1972 }; 1973 1974 void ConcurrentMark::cleanup() { 1975 // world is stopped at this checkpoint 1976 assert(SafepointSynchronize::is_at_safepoint(), 1977 "world should be stopped"); 1978 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1979 1980 // If a full collection has happened, we shouldn't do this. 1981 if (has_aborted()) { 1982 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1983 return; 1984 } 1985 1986 g1h->verify_region_sets_optional(); 1987 1988 if (VerifyDuringGC) { 1989 HandleMark hm; // handle scope 1990 Universe::heap()->prepare_for_verify(); 1991 Universe::verify(VerifyOption_G1UsePrevMarking, 1992 " VerifyDuringGC:(before)"); 1993 } 1994 g1h->check_bitmaps("Cleanup Start"); 1995 1996 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); 1997 g1p->record_concurrent_mark_cleanup_start(); 1998 1999 double start = os::elapsedTime(); 2000 2001 HeapRegionRemSet::reset_for_cleanup_tasks(); 2002 2003 uint n_workers; 2004 2005 // Do counting once more with the world stopped for good measure. 2006 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 2007 2008 g1h->set_par_threads(); 2009 n_workers = g1h->n_par_threads(); 2010 assert(g1h->n_par_threads() == n_workers, 2011 "Should not have been reset"); 2012 g1h->workers()->run_task(&g1_par_count_task); 2013 // Done with the parallel phase so reset to 0. 2014 g1h->set_par_threads(0); 2015 2016 if (VerifyDuringGC) { 2017 // Verify that the counting data accumulated during marking matches 2018 // that calculated by walking the marking bitmap. 2019 2020 // Bitmaps to hold expected values 2021 BitMap expected_region_bm(_region_bm.size(), true); 2022 BitMap expected_card_bm(_card_bm.size(), true); 2023 2024 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 2025 &_region_bm, 2026 &_card_bm, 2027 &expected_region_bm, 2028 &expected_card_bm); 2029 2030 g1h->set_par_threads((int)n_workers); 2031 g1h->workers()->run_task(&g1_par_verify_task); 2032 // Done with the parallel phase so reset to 0. 2033 g1h->set_par_threads(0); 2034 2035 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 2036 } 2037 2038 size_t start_used_bytes = g1h->used(); 2039 g1h->set_marking_complete(); 2040 2041 double count_end = os::elapsedTime(); 2042 double this_final_counting_time = (count_end - start); 2043 _total_counting_time += this_final_counting_time; 2044 2045 if (G1PrintRegionLivenessInfo) { 2046 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 2047 _g1h->heap_region_iterate(&cl); 2048 } 2049 2050 // Install newly created mark bitMap as "prev". 2051 swapMarkBitMaps(); 2052 2053 g1h->reset_gc_time_stamp(); 2054 2055 // Note end of marking in all heap regions. 2056 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 2057 g1h->set_par_threads((int)n_workers); 2058 g1h->workers()->run_task(&g1_par_note_end_task); 2059 g1h->set_par_threads(0); 2060 g1h->check_gc_time_stamps(); 2061 2062 if (!cleanup_list_is_empty()) { 2063 // The cleanup list is not empty, so we'll have to process it 2064 // concurrently. Notify anyone else that might be wanting free 2065 // regions that there will be more free regions coming soon. 2066 g1h->set_free_regions_coming(); 2067 } 2068 2069 // call below, since it affects the metric by which we sort the heap 2070 // regions. 2071 if (G1ScrubRemSets) { 2072 double rs_scrub_start = os::elapsedTime(); 2073 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers); 2074 g1h->set_par_threads((int)n_workers); 2075 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2076 g1h->set_par_threads(0); 2077 2078 double rs_scrub_end = os::elapsedTime(); 2079 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2080 _total_rs_scrub_time += this_rs_scrub_time; 2081 } 2082 2083 // this will also free any regions totally full of garbage objects, 2084 // and sort the regions. 2085 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2086 2087 // Statistics. 2088 double end = os::elapsedTime(); 2089 _cleanup_times.add((end - start) * 1000.0); 2090 2091 if (G1Log::fine()) { 2092 g1h->print_size_transition(gclog_or_tty, 2093 start_used_bytes, 2094 g1h->used(), 2095 g1h->capacity()); 2096 } 2097 2098 // Clean up will have freed any regions completely full of garbage. 2099 // Update the soft reference policy with the new heap occupancy. 2100 Universe::update_heap_info_at_gc(); 2101 2102 if (VerifyDuringGC) { 2103 HandleMark hm; // handle scope 2104 Universe::heap()->prepare_for_verify(); 2105 Universe::verify(VerifyOption_G1UsePrevMarking, 2106 " VerifyDuringGC:(after)"); 2107 } 2108 2109 g1h->check_bitmaps("Cleanup End"); 2110 2111 g1h->verify_region_sets_optional(); 2112 2113 // We need to make this be a "collection" so any collection pause that 2114 // races with it goes around and waits for completeCleanup to finish. 2115 g1h->increment_total_collections(); 2116 2117 // Clean out dead classes and update Metaspace sizes. 2118 if (ClassUnloadingWithConcurrentMark) { 2119 ClassLoaderDataGraph::purge(); 2120 } 2121 MetaspaceGC::compute_new_size(); 2122 2123 // We reclaimed old regions so we should calculate the sizes to make 2124 // sure we update the old gen/space data. 2125 g1h->g1mm()->update_sizes(); 2126 g1h->allocation_context_stats().update_after_mark(); 2127 2128 g1h->trace_heap_after_concurrent_cycle(); 2129 } 2130 2131 void ConcurrentMark::completeCleanup() { 2132 if (has_aborted()) return; 2133 2134 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2135 2136 _cleanup_list.verify_optional(); 2137 FreeRegionList tmp_free_list("Tmp Free List"); 2138 2139 if (G1ConcRegionFreeingVerbose) { 2140 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2141 "cleanup list has %u entries", 2142 _cleanup_list.length()); 2143 } 2144 2145 // No one else should be accessing the _cleanup_list at this point, 2146 // so it is not necessary to take any locks 2147 while (!_cleanup_list.is_empty()) { 2148 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 2149 assert(hr != NULL, "Got NULL from a non-empty list"); 2150 hr->par_clear(); 2151 tmp_free_list.add_ordered(hr); 2152 2153 // Instead of adding one region at a time to the secondary_free_list, 2154 // we accumulate them in the local list and move them a few at a 2155 // time. This also cuts down on the number of notify_all() calls 2156 // we do during this process. We'll also append the local list when 2157 // _cleanup_list is empty (which means we just removed the last 2158 // region from the _cleanup_list). 2159 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2160 _cleanup_list.is_empty()) { 2161 if (G1ConcRegionFreeingVerbose) { 2162 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2163 "appending %u entries to the secondary_free_list, " 2164 "cleanup list still has %u entries", 2165 tmp_free_list.length(), 2166 _cleanup_list.length()); 2167 } 2168 2169 { 2170 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2171 g1h->secondary_free_list_add(&tmp_free_list); 2172 SecondaryFreeList_lock->notify_all(); 2173 } 2174 2175 if (G1StressConcRegionFreeing) { 2176 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2177 os::sleep(Thread::current(), (jlong) 1, false); 2178 } 2179 } 2180 } 2181 } 2182 assert(tmp_free_list.is_empty(), "post-condition"); 2183 } 2184 2185 // Supporting Object and Oop closures for reference discovery 2186 // and processing in during marking 2187 2188 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2189 HeapWord* addr = (HeapWord*)obj; 2190 return addr != NULL && 2191 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2192 } 2193 2194 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2195 // Uses the CMTask associated with a worker thread (for serial reference 2196 // processing the CMTask for worker 0 is used) to preserve (mark) and 2197 // trace referent objects. 2198 // 2199 // Using the CMTask and embedded local queues avoids having the worker 2200 // threads operating on the global mark stack. This reduces the risk 2201 // of overflowing the stack - which we would rather avoid at this late 2202 // state. Also using the tasks' local queues removes the potential 2203 // of the workers interfering with each other that could occur if 2204 // operating on the global stack. 2205 2206 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2207 ConcurrentMark* _cm; 2208 CMTask* _task; 2209 int _ref_counter_limit; 2210 int _ref_counter; 2211 bool _is_serial; 2212 public: 2213 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2214 _cm(cm), _task(task), _is_serial(is_serial), 2215 _ref_counter_limit(G1RefProcDrainInterval) { 2216 assert(_ref_counter_limit > 0, "sanity"); 2217 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2218 _ref_counter = _ref_counter_limit; 2219 } 2220 2221 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2222 virtual void do_oop( oop* p) { do_oop_work(p); } 2223 2224 template <class T> void do_oop_work(T* p) { 2225 if (!_cm->has_overflown()) { 2226 oop obj = oopDesc::load_decode_heap_oop(p); 2227 if (_cm->verbose_high()) { 2228 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2229 "*"PTR_FORMAT" = "PTR_FORMAT, 2230 _task->worker_id(), p2i(p), p2i((void*) obj)); 2231 } 2232 2233 _task->deal_with_reference(obj); 2234 _ref_counter--; 2235 2236 if (_ref_counter == 0) { 2237 // We have dealt with _ref_counter_limit references, pushing them 2238 // and objects reachable from them on to the local stack (and 2239 // possibly the global stack). Call CMTask::do_marking_step() to 2240 // process these entries. 2241 // 2242 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2243 // there's nothing more to do (i.e. we're done with the entries that 2244 // were pushed as a result of the CMTask::deal_with_reference() calls 2245 // above) or we overflow. 2246 // 2247 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2248 // flag while there may still be some work to do. (See the comment at 2249 // the beginning of CMTask::do_marking_step() for those conditions - 2250 // one of which is reaching the specified time target.) It is only 2251 // when CMTask::do_marking_step() returns without setting the 2252 // has_aborted() flag that the marking step has completed. 2253 do { 2254 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2255 _task->do_marking_step(mark_step_duration_ms, 2256 false /* do_termination */, 2257 _is_serial); 2258 } while (_task->has_aborted() && !_cm->has_overflown()); 2259 _ref_counter = _ref_counter_limit; 2260 } 2261 } else { 2262 if (_cm->verbose_high()) { 2263 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2264 } 2265 } 2266 } 2267 }; 2268 2269 // 'Drain' oop closure used by both serial and parallel reference processing. 2270 // Uses the CMTask associated with a given worker thread (for serial 2271 // reference processing the CMtask for worker 0 is used). Calls the 2272 // do_marking_step routine, with an unbelievably large timeout value, 2273 // to drain the marking data structures of the remaining entries 2274 // added by the 'keep alive' oop closure above. 2275 2276 class G1CMDrainMarkingStackClosure: public VoidClosure { 2277 ConcurrentMark* _cm; 2278 CMTask* _task; 2279 bool _is_serial; 2280 public: 2281 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2282 _cm(cm), _task(task), _is_serial(is_serial) { 2283 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2284 } 2285 2286 void do_void() { 2287 do { 2288 if (_cm->verbose_high()) { 2289 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2290 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2291 } 2292 2293 // We call CMTask::do_marking_step() to completely drain the local 2294 // and global marking stacks of entries pushed by the 'keep alive' 2295 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2296 // 2297 // CMTask::do_marking_step() is called in a loop, which we'll exit 2298 // if there's nothing more to do (i.e. we've completely drained the 2299 // entries that were pushed as a a result of applying the 'keep alive' 2300 // closure to the entries on the discovered ref lists) or we overflow 2301 // the global marking stack. 2302 // 2303 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2304 // flag while there may still be some work to do. (See the comment at 2305 // the beginning of CMTask::do_marking_step() for those conditions - 2306 // one of which is reaching the specified time target.) It is only 2307 // when CMTask::do_marking_step() returns without setting the 2308 // has_aborted() flag that the marking step has completed. 2309 2310 _task->do_marking_step(1000000000.0 /* something very large */, 2311 true /* do_termination */, 2312 _is_serial); 2313 } while (_task->has_aborted() && !_cm->has_overflown()); 2314 } 2315 }; 2316 2317 // Implementation of AbstractRefProcTaskExecutor for parallel 2318 // reference processing at the end of G1 concurrent marking 2319 2320 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2321 private: 2322 G1CollectedHeap* _g1h; 2323 ConcurrentMark* _cm; 2324 WorkGang* _workers; 2325 int _active_workers; 2326 2327 public: 2328 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2329 ConcurrentMark* cm, 2330 WorkGang* workers, 2331 int n_workers) : 2332 _g1h(g1h), _cm(cm), 2333 _workers(workers), _active_workers(n_workers) { } 2334 2335 // Executes the given task using concurrent marking worker threads. 2336 virtual void execute(ProcessTask& task); 2337 virtual void execute(EnqueueTask& task); 2338 }; 2339 2340 class G1CMRefProcTaskProxy: public AbstractGangTask { 2341 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2342 ProcessTask& _proc_task; 2343 G1CollectedHeap* _g1h; 2344 ConcurrentMark* _cm; 2345 2346 public: 2347 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2348 G1CollectedHeap* g1h, 2349 ConcurrentMark* cm) : 2350 AbstractGangTask("Process reference objects in parallel"), 2351 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2352 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2353 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2354 } 2355 2356 virtual void work(uint worker_id) { 2357 ResourceMark rm; 2358 HandleMark hm; 2359 CMTask* task = _cm->task(worker_id); 2360 G1CMIsAliveClosure g1_is_alive(_g1h); 2361 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2362 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2363 2364 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2365 } 2366 }; 2367 2368 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2369 assert(_workers != NULL, "Need parallel worker threads."); 2370 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2371 2372 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2373 2374 // We need to reset the concurrency level before each 2375 // proxy task execution, so that the termination protocol 2376 // and overflow handling in CMTask::do_marking_step() knows 2377 // how many workers to wait for. 2378 _cm->set_concurrency(_active_workers); 2379 _g1h->set_par_threads(_active_workers); 2380 _workers->run_task(&proc_task_proxy); 2381 _g1h->set_par_threads(0); 2382 } 2383 2384 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2385 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2386 EnqueueTask& _enq_task; 2387 2388 public: 2389 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2390 AbstractGangTask("Enqueue reference objects in parallel"), 2391 _enq_task(enq_task) { } 2392 2393 virtual void work(uint worker_id) { 2394 _enq_task.work(worker_id); 2395 } 2396 }; 2397 2398 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2399 assert(_workers != NULL, "Need parallel worker threads."); 2400 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2401 2402 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2403 2404 // Not strictly necessary but... 2405 // 2406 // We need to reset the concurrency level before each 2407 // proxy task execution, so that the termination protocol 2408 // and overflow handling in CMTask::do_marking_step() knows 2409 // how many workers to wait for. 2410 _cm->set_concurrency(_active_workers); 2411 _g1h->set_par_threads(_active_workers); 2412 _workers->run_task(&enq_task_proxy); 2413 _g1h->set_par_threads(0); 2414 } 2415 2416 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2417 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2418 } 2419 2420 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2421 if (has_overflown()) { 2422 // Skip processing the discovered references if we have 2423 // overflown the global marking stack. Reference objects 2424 // only get discovered once so it is OK to not 2425 // de-populate the discovered reference lists. We could have, 2426 // but the only benefit would be that, when marking restarts, 2427 // less reference objects are discovered. 2428 return; 2429 } 2430 2431 ResourceMark rm; 2432 HandleMark hm; 2433 2434 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2435 2436 // Is alive closure. 2437 G1CMIsAliveClosure g1_is_alive(g1h); 2438 2439 // Inner scope to exclude the cleaning of the string and symbol 2440 // tables from the displayed time. 2441 { 2442 G1CMTraceTime t("GC ref-proc", G1Log::finer()); 2443 2444 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2445 2446 // See the comment in G1CollectedHeap::ref_processing_init() 2447 // about how reference processing currently works in G1. 2448 2449 // Set the soft reference policy 2450 rp->setup_policy(clear_all_soft_refs); 2451 assert(_markStack.isEmpty(), "mark stack should be empty"); 2452 2453 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2454 // in serial reference processing. Note these closures are also 2455 // used for serially processing (by the the current thread) the 2456 // JNI references during parallel reference processing. 2457 // 2458 // These closures do not need to synchronize with the worker 2459 // threads involved in parallel reference processing as these 2460 // instances are executed serially by the current thread (e.g. 2461 // reference processing is not multi-threaded and is thus 2462 // performed by the current thread instead of a gang worker). 2463 // 2464 // The gang tasks involved in parallel reference processing create 2465 // their own instances of these closures, which do their own 2466 // synchronization among themselves. 2467 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2468 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2469 2470 // We need at least one active thread. If reference processing 2471 // is not multi-threaded we use the current (VMThread) thread, 2472 // otherwise we use the work gang from the G1CollectedHeap and 2473 // we utilize all the worker threads we can. 2474 bool processing_is_mt = rp->processing_is_mt(); 2475 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2476 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2477 2478 // Parallel processing task executor. 2479 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2480 g1h->workers(), active_workers); 2481 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2482 2483 // Set the concurrency level. The phase was already set prior to 2484 // executing the remark task. 2485 set_concurrency(active_workers); 2486 2487 // Set the degree of MT processing here. If the discovery was done MT, 2488 // the number of threads involved during discovery could differ from 2489 // the number of active workers. This is OK as long as the discovered 2490 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2491 rp->set_active_mt_degree(active_workers); 2492 2493 // Process the weak references. 2494 const ReferenceProcessorStats& stats = 2495 rp->process_discovered_references(&g1_is_alive, 2496 &g1_keep_alive, 2497 &g1_drain_mark_stack, 2498 executor, 2499 g1h->gc_timer_cm(), 2500 concurrent_gc_id()); 2501 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2502 2503 // The do_oop work routines of the keep_alive and drain_marking_stack 2504 // oop closures will set the has_overflown flag if we overflow the 2505 // global marking stack. 2506 2507 assert(_markStack.overflow() || _markStack.isEmpty(), 2508 "mark stack should be empty (unless it overflowed)"); 2509 2510 if (_markStack.overflow()) { 2511 // This should have been done already when we tried to push an 2512 // entry on to the global mark stack. But let's do it again. 2513 set_has_overflown(); 2514 } 2515 2516 assert(rp->num_q() == active_workers, "why not"); 2517 2518 rp->enqueue_discovered_references(executor); 2519 2520 rp->verify_no_references_recorded(); 2521 assert(!rp->discovery_enabled(), "Post condition"); 2522 } 2523 2524 if (has_overflown()) { 2525 // We can not trust g1_is_alive if the marking stack overflowed 2526 return; 2527 } 2528 2529 assert(_markStack.isEmpty(), "Marking should have completed"); 2530 2531 // Unload Klasses, String, Symbols, Code Cache, etc. 2532 { 2533 G1CMTraceTime trace("Unloading", G1Log::finer()); 2534 2535 if (ClassUnloadingWithConcurrentMark) { 2536 // Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack 2537 // part is too slow to be done serially, so it is handled during the weakRefsWorkParallelPart phase. 2538 // Defer the cleaning until we have complete on_stack data. 2539 MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */); 2540 2541 bool purged_classes; 2542 2543 { 2544 G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest()); 2545 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2546 } 2547 2548 { 2549 G1CMTraceTime trace("Parallel Unloading", G1Log::finest()); 2550 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2551 } 2552 2553 { 2554 G1CMTraceTime trace("Deallocate Metadata", G1Log::finest()); 2555 ClassLoaderDataGraph::free_deallocate_lists(); 2556 } 2557 } 2558 2559 if (G1StringDedup::is_enabled()) { 2560 G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest()); 2561 G1StringDedup::unlink(&g1_is_alive); 2562 } 2563 } 2564 } 2565 2566 void ConcurrentMark::swapMarkBitMaps() { 2567 CMBitMapRO* temp = _prevMarkBitMap; 2568 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2569 _nextMarkBitMap = (CMBitMap*) temp; 2570 } 2571 2572 class CMObjectClosure; 2573 2574 // Closure for iterating over objects, currently only used for 2575 // processing SATB buffers. 2576 class CMObjectClosure : public ObjectClosure { 2577 private: 2578 CMTask* _task; 2579 2580 public: 2581 void do_object(oop obj) { 2582 _task->deal_with_reference(obj); 2583 } 2584 2585 CMObjectClosure(CMTask* task) : _task(task) { } 2586 }; 2587 2588 class G1RemarkThreadsClosure : public ThreadClosure { 2589 CMObjectClosure _cm_obj; 2590 G1CMOopClosure _cm_cl; 2591 MarkingCodeBlobClosure _code_cl; 2592 int _thread_parity; 2593 2594 public: 2595 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) : 2596 _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2597 _thread_parity(SharedHeap::heap()->strong_roots_parity()) {} 2598 2599 void do_thread(Thread* thread) { 2600 if (thread->is_Java_thread()) { 2601 if (thread->claim_oops_do(true, _thread_parity)) { 2602 JavaThread* jt = (JavaThread*)thread; 2603 2604 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2605 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2606 // * Alive if on the stack of an executing method 2607 // * Weakly reachable otherwise 2608 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2609 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2610 jt->nmethods_do(&_code_cl); 2611 2612 jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj); 2613 } 2614 } else if (thread->is_VM_thread()) { 2615 if (thread->claim_oops_do(true, _thread_parity)) { 2616 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj); 2617 } 2618 } 2619 } 2620 }; 2621 2622 class CMRemarkTask: public AbstractGangTask { 2623 private: 2624 ConcurrentMark* _cm; 2625 public: 2626 void work(uint worker_id) { 2627 // Since all available tasks are actually started, we should 2628 // only proceed if we're supposed to be active. 2629 if (worker_id < _cm->active_tasks()) { 2630 CMTask* task = _cm->task(worker_id); 2631 task->record_start_time(); 2632 { 2633 ResourceMark rm; 2634 HandleMark hm; 2635 2636 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2637 Threads::threads_do(&threads_f); 2638 } 2639 2640 do { 2641 task->do_marking_step(1000000000.0 /* something very large */, 2642 true /* do_termination */, 2643 false /* is_serial */); 2644 } while (task->has_aborted() && !_cm->has_overflown()); 2645 // If we overflow, then we do not want to restart. We instead 2646 // want to abort remark and do concurrent marking again. 2647 task->record_end_time(); 2648 } 2649 } 2650 2651 CMRemarkTask(ConcurrentMark* cm, int active_workers) : 2652 AbstractGangTask("Par Remark"), _cm(cm) { 2653 _cm->terminator()->reset_for_reuse(active_workers); 2654 } 2655 }; 2656 2657 void ConcurrentMark::checkpointRootsFinalWork() { 2658 ResourceMark rm; 2659 HandleMark hm; 2660 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2661 2662 G1CMTraceTime trace("Finalize Marking", G1Log::finer()); 2663 2664 g1h->ensure_parsability(false); 2665 2666 G1CollectedHeap::StrongRootsScope srs(g1h); 2667 // this is remark, so we'll use up all active threads 2668 uint active_workers = g1h->workers()->active_workers(); 2669 if (active_workers == 0) { 2670 assert(active_workers > 0, "Should have been set earlier"); 2671 active_workers = (uint) ParallelGCThreads; 2672 g1h->workers()->set_active_workers(active_workers); 2673 } 2674 set_concurrency_and_phase(active_workers, false /* concurrent */); 2675 // Leave _parallel_marking_threads at it's 2676 // value originally calculated in the ConcurrentMark 2677 // constructor and pass values of the active workers 2678 // through the gang in the task. 2679 2680 CMRemarkTask remarkTask(this, active_workers); 2681 // We will start all available threads, even if we decide that the 2682 // active_workers will be fewer. The extra ones will just bail out 2683 // immediately. 2684 g1h->set_par_threads(active_workers); 2685 g1h->workers()->run_task(&remarkTask); 2686 g1h->set_par_threads(0); 2687 2688 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2689 guarantee(has_overflown() || 2690 satb_mq_set.completed_buffers_num() == 0, 2691 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2692 BOOL_TO_STR(has_overflown()), 2693 satb_mq_set.completed_buffers_num())); 2694 2695 print_stats(); 2696 } 2697 2698 #ifndef PRODUCT 2699 2700 class PrintReachableOopClosure: public OopClosure { 2701 private: 2702 G1CollectedHeap* _g1h; 2703 outputStream* _out; 2704 VerifyOption _vo; 2705 bool _all; 2706 2707 public: 2708 PrintReachableOopClosure(outputStream* out, 2709 VerifyOption vo, 2710 bool all) : 2711 _g1h(G1CollectedHeap::heap()), 2712 _out(out), _vo(vo), _all(all) { } 2713 2714 void do_oop(narrowOop* p) { do_oop_work(p); } 2715 void do_oop( oop* p) { do_oop_work(p); } 2716 2717 template <class T> void do_oop_work(T* p) { 2718 oop obj = oopDesc::load_decode_heap_oop(p); 2719 const char* str = NULL; 2720 const char* str2 = ""; 2721 2722 if (obj == NULL) { 2723 str = ""; 2724 } else if (!_g1h->is_in_g1_reserved(obj)) { 2725 str = " O"; 2726 } else { 2727 HeapRegion* hr = _g1h->heap_region_containing(obj); 2728 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); 2729 bool marked = _g1h->is_marked(obj, _vo); 2730 2731 if (over_tams) { 2732 str = " >"; 2733 if (marked) { 2734 str2 = " AND MARKED"; 2735 } 2736 } else if (marked) { 2737 str = " M"; 2738 } else { 2739 str = " NOT"; 2740 } 2741 } 2742 2743 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", 2744 p2i(p), p2i((void*) obj), str, str2); 2745 } 2746 }; 2747 2748 class PrintReachableObjectClosure : public ObjectClosure { 2749 private: 2750 G1CollectedHeap* _g1h; 2751 outputStream* _out; 2752 VerifyOption _vo; 2753 bool _all; 2754 HeapRegion* _hr; 2755 2756 public: 2757 PrintReachableObjectClosure(outputStream* out, 2758 VerifyOption vo, 2759 bool all, 2760 HeapRegion* hr) : 2761 _g1h(G1CollectedHeap::heap()), 2762 _out(out), _vo(vo), _all(all), _hr(hr) { } 2763 2764 void do_object(oop o) { 2765 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo); 2766 bool marked = _g1h->is_marked(o, _vo); 2767 bool print_it = _all || over_tams || marked; 2768 2769 if (print_it) { 2770 _out->print_cr(" "PTR_FORMAT"%s", 2771 p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : ""); 2772 PrintReachableOopClosure oopCl(_out, _vo, _all); 2773 o->oop_iterate_no_header(&oopCl); 2774 } 2775 } 2776 }; 2777 2778 class PrintReachableRegionClosure : public HeapRegionClosure { 2779 private: 2780 G1CollectedHeap* _g1h; 2781 outputStream* _out; 2782 VerifyOption _vo; 2783 bool _all; 2784 2785 public: 2786 bool doHeapRegion(HeapRegion* hr) { 2787 HeapWord* b = hr->bottom(); 2788 HeapWord* e = hr->end(); 2789 HeapWord* t = hr->top(); 2790 HeapWord* p = _g1h->top_at_mark_start(hr, _vo); 2791 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " 2792 "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p)); 2793 _out->cr(); 2794 2795 HeapWord* from = b; 2796 HeapWord* to = t; 2797 2798 if (to > from) { 2799 _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to)); 2800 _out->cr(); 2801 PrintReachableObjectClosure ocl(_out, _vo, _all, hr); 2802 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); 2803 _out->cr(); 2804 } 2805 2806 return false; 2807 } 2808 2809 PrintReachableRegionClosure(outputStream* out, 2810 VerifyOption vo, 2811 bool all) : 2812 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { } 2813 }; 2814 2815 void ConcurrentMark::print_reachable(const char* str, 2816 VerifyOption vo, 2817 bool all) { 2818 gclog_or_tty->cr(); 2819 gclog_or_tty->print_cr("== Doing heap dump... "); 2820 2821 if (G1PrintReachableBaseFile == NULL) { 2822 gclog_or_tty->print_cr(" #### error: no base file defined"); 2823 return; 2824 } 2825 2826 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) > 2827 (JVM_MAXPATHLEN - 1)) { 2828 gclog_or_tty->print_cr(" #### error: file name too long"); 2829 return; 2830 } 2831 2832 char file_name[JVM_MAXPATHLEN]; 2833 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str); 2834 gclog_or_tty->print_cr(" dumping to file %s", file_name); 2835 2836 fileStream fout(file_name); 2837 if (!fout.is_open()) { 2838 gclog_or_tty->print_cr(" #### error: could not open file"); 2839 return; 2840 } 2841 2842 outputStream* out = &fout; 2843 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo)); 2844 out->cr(); 2845 2846 out->print_cr("--- ITERATING OVER REGIONS"); 2847 out->cr(); 2848 PrintReachableRegionClosure rcl(out, vo, all); 2849 _g1h->heap_region_iterate(&rcl); 2850 out->cr(); 2851 2852 gclog_or_tty->print_cr(" done"); 2853 gclog_or_tty->flush(); 2854 } 2855 2856 #endif // PRODUCT 2857 2858 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2859 // Note we are overriding the read-only view of the prev map here, via 2860 // the cast. 2861 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2862 } 2863 2864 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2865 _nextMarkBitMap->clearRange(mr); 2866 } 2867 2868 HeapRegion* 2869 ConcurrentMark::claim_region(uint worker_id) { 2870 // "checkpoint" the finger 2871 HeapWord* finger = _finger; 2872 2873 // _heap_end will not change underneath our feet; it only changes at 2874 // yield points. 2875 while (finger < _heap_end) { 2876 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2877 2878 // Note on how this code handles humongous regions. In the 2879 // normal case the finger will reach the start of a "starts 2880 // humongous" (SH) region. Its end will either be the end of the 2881 // last "continues humongous" (CH) region in the sequence, or the 2882 // standard end of the SH region (if the SH is the only region in 2883 // the sequence). That way claim_region() will skip over the CH 2884 // regions. However, there is a subtle race between a CM thread 2885 // executing this method and a mutator thread doing a humongous 2886 // object allocation. The two are not mutually exclusive as the CM 2887 // thread does not need to hold the Heap_lock when it gets 2888 // here. So there is a chance that claim_region() will come across 2889 // a free region that's in the progress of becoming a SH or a CH 2890 // region. In the former case, it will either 2891 // a) Miss the update to the region's end, in which case it will 2892 // visit every subsequent CH region, will find their bitmaps 2893 // empty, and do nothing, or 2894 // b) Will observe the update of the region's end (in which case 2895 // it will skip the subsequent CH regions). 2896 // If it comes across a region that suddenly becomes CH, the 2897 // scenario will be similar to b). So, the race between 2898 // claim_region() and a humongous object allocation might force us 2899 // to do a bit of unnecessary work (due to some unnecessary bitmap 2900 // iterations) but it should not introduce and correctness issues. 2901 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2902 2903 // Above heap_region_containing_raw may return NULL as we always scan claim 2904 // until the end of the heap. In this case, just jump to the next region. 2905 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2906 2907 // Is the gap between reading the finger and doing the CAS too long? 2908 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2909 if (res == finger && curr_region != NULL) { 2910 // we succeeded 2911 HeapWord* bottom = curr_region->bottom(); 2912 HeapWord* limit = curr_region->next_top_at_mark_start(); 2913 2914 if (verbose_low()) { 2915 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2916 "["PTR_FORMAT", "PTR_FORMAT"), " 2917 "limit = "PTR_FORMAT, 2918 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2919 } 2920 2921 // notice that _finger == end cannot be guaranteed here since, 2922 // someone else might have moved the finger even further 2923 assert(_finger >= end, "the finger should have moved forward"); 2924 2925 if (verbose_low()) { 2926 gclog_or_tty->print_cr("[%u] we were successful with region = " 2927 PTR_FORMAT, worker_id, p2i(curr_region)); 2928 } 2929 2930 if (limit > bottom) { 2931 if (verbose_low()) { 2932 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 2933 "returning it ", worker_id, p2i(curr_region)); 2934 } 2935 return curr_region; 2936 } else { 2937 assert(limit == bottom, 2938 "the region limit should be at bottom"); 2939 if (verbose_low()) { 2940 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 2941 "returning NULL", worker_id, p2i(curr_region)); 2942 } 2943 // we return NULL and the caller should try calling 2944 // claim_region() again. 2945 return NULL; 2946 } 2947 } else { 2948 assert(_finger > finger, "the finger should have moved forward"); 2949 if (verbose_low()) { 2950 if (curr_region == NULL) { 2951 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " 2952 "global finger = "PTR_FORMAT", " 2953 "our finger = "PTR_FORMAT, 2954 worker_id, p2i(_finger), p2i(finger)); 2955 } else { 2956 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2957 "global finger = "PTR_FORMAT", " 2958 "our finger = "PTR_FORMAT, 2959 worker_id, p2i(_finger), p2i(finger)); 2960 } 2961 } 2962 2963 // read it again 2964 finger = _finger; 2965 } 2966 } 2967 2968 return NULL; 2969 } 2970 2971 #ifndef PRODUCT 2972 enum VerifyNoCSetOopsPhase { 2973 VerifyNoCSetOopsStack, 2974 VerifyNoCSetOopsQueues, 2975 VerifyNoCSetOopsSATBCompleted, 2976 VerifyNoCSetOopsSATBThread 2977 }; 2978 2979 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 2980 private: 2981 G1CollectedHeap* _g1h; 2982 VerifyNoCSetOopsPhase _phase; 2983 int _info; 2984 2985 const char* phase_str() { 2986 switch (_phase) { 2987 case VerifyNoCSetOopsStack: return "Stack"; 2988 case VerifyNoCSetOopsQueues: return "Queue"; 2989 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers"; 2990 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers"; 2991 default: ShouldNotReachHere(); 2992 } 2993 return NULL; 2994 } 2995 2996 void do_object_work(oop obj) { 2997 guarantee(!_g1h->obj_in_cs(obj), 2998 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 2999 p2i((void*) obj), phase_str(), _info)); 3000 } 3001 3002 public: 3003 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 3004 3005 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 3006 _phase = phase; 3007 _info = info; 3008 } 3009 3010 virtual void do_oop(oop* p) { 3011 oop obj = oopDesc::load_decode_heap_oop(p); 3012 do_object_work(obj); 3013 } 3014 3015 virtual void do_oop(narrowOop* p) { 3016 // We should not come across narrow oops while scanning marking 3017 // stacks and SATB buffers. 3018 ShouldNotReachHere(); 3019 } 3020 3021 virtual void do_object(oop obj) { 3022 do_object_work(obj); 3023 } 3024 }; 3025 3026 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, 3027 bool verify_enqueued_buffers, 3028 bool verify_thread_buffers, 3029 bool verify_fingers) { 3030 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 3031 if (!G1CollectedHeap::heap()->mark_in_progress()) { 3032 return; 3033 } 3034 3035 VerifyNoCSetOopsClosure cl; 3036 3037 if (verify_stacks) { 3038 // Verify entries on the global mark stack 3039 cl.set_phase(VerifyNoCSetOopsStack); 3040 _markStack.oops_do(&cl); 3041 3042 // Verify entries on the task queues 3043 for (uint i = 0; i < _max_worker_id; i += 1) { 3044 cl.set_phase(VerifyNoCSetOopsQueues, i); 3045 CMTaskQueue* queue = _task_queues->queue(i); 3046 queue->oops_do(&cl); 3047 } 3048 } 3049 3050 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 3051 3052 // Verify entries on the enqueued SATB buffers 3053 if (verify_enqueued_buffers) { 3054 cl.set_phase(VerifyNoCSetOopsSATBCompleted); 3055 satb_qs.iterate_completed_buffers_read_only(&cl); 3056 } 3057 3058 // Verify entries on the per-thread SATB buffers 3059 if (verify_thread_buffers) { 3060 cl.set_phase(VerifyNoCSetOopsSATBThread); 3061 satb_qs.iterate_thread_buffers_read_only(&cl); 3062 } 3063 3064 if (verify_fingers) { 3065 // Verify the global finger 3066 HeapWord* global_finger = finger(); 3067 if (global_finger != NULL && global_finger < _heap_end) { 3068 // The global finger always points to a heap region boundary. We 3069 // use heap_region_containing_raw() to get the containing region 3070 // given that the global finger could be pointing to a free region 3071 // which subsequently becomes continues humongous. If that 3072 // happens, heap_region_containing() will return the bottom of the 3073 // corresponding starts humongous region and the check below will 3074 // not hold any more. 3075 // Since we always iterate over all regions, we might get a NULL HeapRegion 3076 // here. 3077 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 3078 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 3079 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 3080 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 3081 } 3082 3083 // Verify the task fingers 3084 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 3085 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 3086 CMTask* task = _tasks[i]; 3087 HeapWord* task_finger = task->finger(); 3088 if (task_finger != NULL && task_finger < _heap_end) { 3089 // See above note on the global finger verification. 3090 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 3091 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 3092 !task_hr->in_collection_set(), 3093 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 3094 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 3095 } 3096 } 3097 } 3098 } 3099 #endif // PRODUCT 3100 3101 // Aggregate the counting data that was constructed concurrently 3102 // with marking. 3103 class AggregateCountDataHRClosure: public HeapRegionClosure { 3104 G1CollectedHeap* _g1h; 3105 ConcurrentMark* _cm; 3106 CardTableModRefBS* _ct_bs; 3107 BitMap* _cm_card_bm; 3108 uint _max_worker_id; 3109 3110 public: 3111 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 3112 BitMap* cm_card_bm, 3113 uint max_worker_id) : 3114 _g1h(g1h), _cm(g1h->concurrent_mark()), 3115 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 3116 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 3117 3118 bool doHeapRegion(HeapRegion* hr) { 3119 if (hr->is_continues_humongous()) { 3120 // We will ignore these here and process them when their 3121 // associated "starts humongous" region is processed. 3122 // Note that we cannot rely on their associated 3123 // "starts humongous" region to have their bit set to 1 3124 // since, due to the region chunking in the parallel region 3125 // iteration, a "continues humongous" region might be visited 3126 // before its associated "starts humongous". 3127 return false; 3128 } 3129 3130 HeapWord* start = hr->bottom(); 3131 HeapWord* limit = hr->next_top_at_mark_start(); 3132 HeapWord* end = hr->end(); 3133 3134 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 3135 err_msg("Preconditions not met - " 3136 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 3137 "top: "PTR_FORMAT", end: "PTR_FORMAT, 3138 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 3139 3140 assert(hr->next_marked_bytes() == 0, "Precondition"); 3141 3142 if (start == limit) { 3143 // NTAMS of this region has not been set so nothing to do. 3144 return false; 3145 } 3146 3147 // 'start' should be in the heap. 3148 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 3149 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 3150 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 3151 3152 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 3153 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 3154 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 3155 3156 // If ntams is not card aligned then we bump card bitmap index 3157 // for limit so that we get the all the cards spanned by 3158 // the object ending at ntams. 3159 // Note: if this is the last region in the heap then ntams 3160 // could be actually just beyond the end of the the heap; 3161 // limit_idx will then correspond to a (non-existent) card 3162 // that is also outside the heap. 3163 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 3164 limit_idx += 1; 3165 } 3166 3167 assert(limit_idx <= end_idx, "or else use atomics"); 3168 3169 // Aggregate the "stripe" in the count data associated with hr. 3170 uint hrm_index = hr->hrm_index(); 3171 size_t marked_bytes = 0; 3172 3173 for (uint i = 0; i < _max_worker_id; i += 1) { 3174 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 3175 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 3176 3177 // Fetch the marked_bytes in this region for task i and 3178 // add it to the running total for this region. 3179 marked_bytes += marked_bytes_array[hrm_index]; 3180 3181 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 3182 // into the global card bitmap. 3183 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 3184 3185 while (scan_idx < limit_idx) { 3186 assert(task_card_bm->at(scan_idx) == true, "should be"); 3187 _cm_card_bm->set_bit(scan_idx); 3188 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 3189 3190 // BitMap::get_next_one_offset() can handle the case when 3191 // its left_offset parameter is greater than its right_offset 3192 // parameter. It does, however, have an early exit if 3193 // left_offset == right_offset. So let's limit the value 3194 // passed in for left offset here. 3195 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 3196 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 3197 } 3198 } 3199 3200 // Update the marked bytes for this region. 3201 hr->add_to_marked_bytes(marked_bytes); 3202 3203 // Next heap region 3204 return false; 3205 } 3206 }; 3207 3208 class G1AggregateCountDataTask: public AbstractGangTask { 3209 protected: 3210 G1CollectedHeap* _g1h; 3211 ConcurrentMark* _cm; 3212 BitMap* _cm_card_bm; 3213 uint _max_worker_id; 3214 int _active_workers; 3215 HeapRegionClaimer _hrclaimer; 3216 3217 public: 3218 G1AggregateCountDataTask(G1CollectedHeap* g1h, 3219 ConcurrentMark* cm, 3220 BitMap* cm_card_bm, 3221 uint max_worker_id, 3222 int n_workers) : 3223 AbstractGangTask("Count Aggregation"), 3224 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 3225 _max_worker_id(max_worker_id), 3226 _active_workers(n_workers), 3227 _hrclaimer(_active_workers) { 3228 } 3229 3230 void work(uint worker_id) { 3231 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 3232 3233 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 3234 } 3235 }; 3236 3237 3238 void ConcurrentMark::aggregate_count_data() { 3239 int n_workers = _g1h->workers()->active_workers(); 3240 3241 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 3242 _max_worker_id, n_workers); 3243 3244 _g1h->set_par_threads(n_workers); 3245 _g1h->workers()->run_task(&g1_par_agg_task); 3246 _g1h->set_par_threads(0); 3247 } 3248 3249 // Clear the per-worker arrays used to store the per-region counting data 3250 void ConcurrentMark::clear_all_count_data() { 3251 // Clear the global card bitmap - it will be filled during 3252 // liveness count aggregation (during remark) and the 3253 // final counting task. 3254 _card_bm.clear(); 3255 3256 // Clear the global region bitmap - it will be filled as part 3257 // of the final counting task. 3258 _region_bm.clear(); 3259 3260 uint max_regions = _g1h->max_regions(); 3261 assert(_max_worker_id > 0, "uninitialized"); 3262 3263 for (uint i = 0; i < _max_worker_id; i += 1) { 3264 BitMap* task_card_bm = count_card_bitmap_for(i); 3265 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3266 3267 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3268 assert(marked_bytes_array != NULL, "uninitialized"); 3269 3270 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3271 task_card_bm->clear(); 3272 } 3273 } 3274 3275 void ConcurrentMark::print_stats() { 3276 if (verbose_stats()) { 3277 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3278 for (size_t i = 0; i < _active_tasks; ++i) { 3279 _tasks[i]->print_stats(); 3280 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3281 } 3282 } 3283 } 3284 3285 // abandon current marking iteration due to a Full GC 3286 void ConcurrentMark::abort() { 3287 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 3288 // concurrent bitmap clearing. 3289 _nextMarkBitMap->clearAll(); 3290 3291 // Note we cannot clear the previous marking bitmap here 3292 // since VerifyDuringGC verifies the objects marked during 3293 // a full GC against the previous bitmap. 3294 3295 // Clear the liveness counting data 3296 clear_all_count_data(); 3297 // Empty mark stack 3298 reset_marking_state(); 3299 for (uint i = 0; i < _max_worker_id; ++i) { 3300 _tasks[i]->clear_region_fields(); 3301 } 3302 _first_overflow_barrier_sync.abort(); 3303 _second_overflow_barrier_sync.abort(); 3304 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); 3305 if (!gc_id.is_undefined()) { 3306 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance 3307 // to detect that it was aborted. Only keep track of the first GC id that we aborted. 3308 _aborted_gc_id = gc_id; 3309 } 3310 _has_aborted = true; 3311 3312 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3313 satb_mq_set.abandon_partial_marking(); 3314 // This can be called either during or outside marking, we'll read 3315 // the expected_active value from the SATB queue set. 3316 satb_mq_set.set_active_all_threads( 3317 false, /* new active value */ 3318 satb_mq_set.is_active() /* expected_active */); 3319 3320 _g1h->trace_heap_after_concurrent_cycle(); 3321 _g1h->register_concurrent_cycle_end(); 3322 } 3323 3324 const GCId& ConcurrentMark::concurrent_gc_id() { 3325 if (has_aborted()) { 3326 return _aborted_gc_id; 3327 } 3328 return _g1h->gc_tracer_cm()->gc_id(); 3329 } 3330 3331 static void print_ms_time_info(const char* prefix, const char* name, 3332 NumberSeq& ns) { 3333 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3334 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3335 if (ns.num() > 0) { 3336 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3337 prefix, ns.sd(), ns.maximum()); 3338 } 3339 } 3340 3341 void ConcurrentMark::print_summary_info() { 3342 gclog_or_tty->print_cr(" Concurrent marking:"); 3343 print_ms_time_info(" ", "init marks", _init_times); 3344 print_ms_time_info(" ", "remarks", _remark_times); 3345 { 3346 print_ms_time_info(" ", "final marks", _remark_mark_times); 3347 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3348 3349 } 3350 print_ms_time_info(" ", "cleanups", _cleanup_times); 3351 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3352 _total_counting_time, 3353 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3354 (double)_cleanup_times.num() 3355 : 0.0)); 3356 if (G1ScrubRemSets) { 3357 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3358 _total_rs_scrub_time, 3359 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3360 (double)_cleanup_times.num() 3361 : 0.0)); 3362 } 3363 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3364 (_init_times.sum() + _remark_times.sum() + 3365 _cleanup_times.sum())/1000.0); 3366 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3367 "(%8.2f s marking).", 3368 cmThread()->vtime_accum(), 3369 cmThread()->vtime_mark_accum()); 3370 } 3371 3372 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3373 _parallel_workers->print_worker_threads_on(st); 3374 } 3375 3376 void ConcurrentMark::print_on_error(outputStream* st) const { 3377 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3378 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3379 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3380 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3381 } 3382 3383 // We take a break if someone is trying to stop the world. 3384 bool ConcurrentMark::do_yield_check(uint worker_id) { 3385 if (SuspendibleThreadSet::should_yield()) { 3386 if (worker_id == 0) { 3387 _g1h->g1_policy()->record_concurrent_pause(); 3388 } 3389 SuspendibleThreadSet::yield(); 3390 return true; 3391 } else { 3392 return false; 3393 } 3394 } 3395 3396 #ifndef PRODUCT 3397 // for debugging purposes 3398 void ConcurrentMark::print_finger() { 3399 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3400 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3401 for (uint i = 0; i < _max_worker_id; ++i) { 3402 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3403 } 3404 gclog_or_tty->cr(); 3405 } 3406 #endif 3407 3408 void CMTask::scan_object(oop obj) { 3409 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3410 3411 if (_cm->verbose_high()) { 3412 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT, 3413 _worker_id, p2i((void*) obj)); 3414 } 3415 3416 size_t obj_size = obj->size(); 3417 _words_scanned += obj_size; 3418 3419 obj->oop_iterate(_cm_oop_closure); 3420 statsOnly( ++_objs_scanned ); 3421 check_limits(); 3422 } 3423 3424 // Closure for iteration over bitmaps 3425 class CMBitMapClosure : public BitMapClosure { 3426 private: 3427 // the bitmap that is being iterated over 3428 CMBitMap* _nextMarkBitMap; 3429 ConcurrentMark* _cm; 3430 CMTask* _task; 3431 3432 public: 3433 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3434 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3435 3436 bool do_bit(size_t offset) { 3437 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3438 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3439 assert( addr < _cm->finger(), "invariant"); 3440 3441 statsOnly( _task->increase_objs_found_on_bitmap() ); 3442 assert(addr >= _task->finger(), "invariant"); 3443 3444 // We move that task's local finger along. 3445 _task->move_finger_to(addr); 3446 3447 _task->scan_object(oop(addr)); 3448 // we only partially drain the local queue and global stack 3449 _task->drain_local_queue(true); 3450 _task->drain_global_stack(true); 3451 3452 // if the has_aborted flag has been raised, we need to bail out of 3453 // the iteration 3454 return !_task->has_aborted(); 3455 } 3456 }; 3457 3458 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3459 ConcurrentMark* cm, 3460 CMTask* task) 3461 : _g1h(g1h), _cm(cm), _task(task) { 3462 assert(_ref_processor == NULL, "should be initialized to NULL"); 3463 3464 if (G1UseConcMarkReferenceProcessing) { 3465 _ref_processor = g1h->ref_processor_cm(); 3466 assert(_ref_processor != NULL, "should not be NULL"); 3467 } 3468 } 3469 3470 void CMTask::setup_for_region(HeapRegion* hr) { 3471 assert(hr != NULL, 3472 "claim_region() should have filtered out NULL regions"); 3473 assert(!hr->is_continues_humongous(), 3474 "claim_region() should have filtered out continues humongous regions"); 3475 3476 if (_cm->verbose_low()) { 3477 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3478 _worker_id, p2i(hr)); 3479 } 3480 3481 _curr_region = hr; 3482 _finger = hr->bottom(); 3483 update_region_limit(); 3484 } 3485 3486 void CMTask::update_region_limit() { 3487 HeapRegion* hr = _curr_region; 3488 HeapWord* bottom = hr->bottom(); 3489 HeapWord* limit = hr->next_top_at_mark_start(); 3490 3491 if (limit == bottom) { 3492 if (_cm->verbose_low()) { 3493 gclog_or_tty->print_cr("[%u] found an empty region " 3494 "["PTR_FORMAT", "PTR_FORMAT")", 3495 _worker_id, p2i(bottom), p2i(limit)); 3496 } 3497 // The region was collected underneath our feet. 3498 // We set the finger to bottom to ensure that the bitmap 3499 // iteration that will follow this will not do anything. 3500 // (this is not a condition that holds when we set the region up, 3501 // as the region is not supposed to be empty in the first place) 3502 _finger = bottom; 3503 } else if (limit >= _region_limit) { 3504 assert(limit >= _finger, "peace of mind"); 3505 } else { 3506 assert(limit < _region_limit, "only way to get here"); 3507 // This can happen under some pretty unusual circumstances. An 3508 // evacuation pause empties the region underneath our feet (NTAMS 3509 // at bottom). We then do some allocation in the region (NTAMS 3510 // stays at bottom), followed by the region being used as a GC 3511 // alloc region (NTAMS will move to top() and the objects 3512 // originally below it will be grayed). All objects now marked in 3513 // the region are explicitly grayed, if below the global finger, 3514 // and we do not need in fact to scan anything else. So, we simply 3515 // set _finger to be limit to ensure that the bitmap iteration 3516 // doesn't do anything. 3517 _finger = limit; 3518 } 3519 3520 _region_limit = limit; 3521 } 3522 3523 void CMTask::giveup_current_region() { 3524 assert(_curr_region != NULL, "invariant"); 3525 if (_cm->verbose_low()) { 3526 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3527 _worker_id, p2i(_curr_region)); 3528 } 3529 clear_region_fields(); 3530 } 3531 3532 void CMTask::clear_region_fields() { 3533 // Values for these three fields that indicate that we're not 3534 // holding on to a region. 3535 _curr_region = NULL; 3536 _finger = NULL; 3537 _region_limit = NULL; 3538 } 3539 3540 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3541 if (cm_oop_closure == NULL) { 3542 assert(_cm_oop_closure != NULL, "invariant"); 3543 } else { 3544 assert(_cm_oop_closure == NULL, "invariant"); 3545 } 3546 _cm_oop_closure = cm_oop_closure; 3547 } 3548 3549 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3550 guarantee(nextMarkBitMap != NULL, "invariant"); 3551 3552 if (_cm->verbose_low()) { 3553 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3554 } 3555 3556 _nextMarkBitMap = nextMarkBitMap; 3557 clear_region_fields(); 3558 3559 _calls = 0; 3560 _elapsed_time_ms = 0.0; 3561 _termination_time_ms = 0.0; 3562 _termination_start_time_ms = 0.0; 3563 3564 #if _MARKING_STATS_ 3565 _local_pushes = 0; 3566 _local_pops = 0; 3567 _local_max_size = 0; 3568 _objs_scanned = 0; 3569 _global_pushes = 0; 3570 _global_pops = 0; 3571 _global_max_size = 0; 3572 _global_transfers_to = 0; 3573 _global_transfers_from = 0; 3574 _regions_claimed = 0; 3575 _objs_found_on_bitmap = 0; 3576 _satb_buffers_processed = 0; 3577 _steal_attempts = 0; 3578 _steals = 0; 3579 _aborted = 0; 3580 _aborted_overflow = 0; 3581 _aborted_cm_aborted = 0; 3582 _aborted_yield = 0; 3583 _aborted_timed_out = 0; 3584 _aborted_satb = 0; 3585 _aborted_termination = 0; 3586 #endif // _MARKING_STATS_ 3587 } 3588 3589 bool CMTask::should_exit_termination() { 3590 regular_clock_call(); 3591 // This is called when we are in the termination protocol. We should 3592 // quit if, for some reason, this task wants to abort or the global 3593 // stack is not empty (this means that we can get work from it). 3594 return !_cm->mark_stack_empty() || has_aborted(); 3595 } 3596 3597 void CMTask::reached_limit() { 3598 assert(_words_scanned >= _words_scanned_limit || 3599 _refs_reached >= _refs_reached_limit , 3600 "shouldn't have been called otherwise"); 3601 regular_clock_call(); 3602 } 3603 3604 void CMTask::regular_clock_call() { 3605 if (has_aborted()) return; 3606 3607 // First, we need to recalculate the words scanned and refs reached 3608 // limits for the next clock call. 3609 recalculate_limits(); 3610 3611 // During the regular clock call we do the following 3612 3613 // (1) If an overflow has been flagged, then we abort. 3614 if (_cm->has_overflown()) { 3615 set_has_aborted(); 3616 return; 3617 } 3618 3619 // If we are not concurrent (i.e. we're doing remark) we don't need 3620 // to check anything else. The other steps are only needed during 3621 // the concurrent marking phase. 3622 if (!concurrent()) return; 3623 3624 // (2) If marking has been aborted for Full GC, then we also abort. 3625 if (_cm->has_aborted()) { 3626 set_has_aborted(); 3627 statsOnly( ++_aborted_cm_aborted ); 3628 return; 3629 } 3630 3631 double curr_time_ms = os::elapsedVTime() * 1000.0; 3632 3633 // (3) If marking stats are enabled, then we update the step history. 3634 #if _MARKING_STATS_ 3635 if (_words_scanned >= _words_scanned_limit) { 3636 ++_clock_due_to_scanning; 3637 } 3638 if (_refs_reached >= _refs_reached_limit) { 3639 ++_clock_due_to_marking; 3640 } 3641 3642 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3643 _interval_start_time_ms = curr_time_ms; 3644 _all_clock_intervals_ms.add(last_interval_ms); 3645 3646 if (_cm->verbose_medium()) { 3647 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3648 "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s", 3649 _worker_id, last_interval_ms, 3650 _words_scanned, 3651 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3652 _refs_reached, 3653 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3654 } 3655 #endif // _MARKING_STATS_ 3656 3657 // (4) We check whether we should yield. If we have to, then we abort. 3658 if (SuspendibleThreadSet::should_yield()) { 3659 // We should yield. To do this we abort the task. The caller is 3660 // responsible for yielding. 3661 set_has_aborted(); 3662 statsOnly( ++_aborted_yield ); 3663 return; 3664 } 3665 3666 // (5) We check whether we've reached our time quota. If we have, 3667 // then we abort. 3668 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3669 if (elapsed_time_ms > _time_target_ms) { 3670 set_has_aborted(); 3671 _has_timed_out = true; 3672 statsOnly( ++_aborted_timed_out ); 3673 return; 3674 } 3675 3676 // (6) Finally, we check whether there are enough completed STAB 3677 // buffers available for processing. If there are, we abort. 3678 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3679 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3680 if (_cm->verbose_low()) { 3681 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3682 _worker_id); 3683 } 3684 // we do need to process SATB buffers, we'll abort and restart 3685 // the marking task to do so 3686 set_has_aborted(); 3687 statsOnly( ++_aborted_satb ); 3688 return; 3689 } 3690 } 3691 3692 void CMTask::recalculate_limits() { 3693 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3694 _words_scanned_limit = _real_words_scanned_limit; 3695 3696 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3697 _refs_reached_limit = _real_refs_reached_limit; 3698 } 3699 3700 void CMTask::decrease_limits() { 3701 // This is called when we believe that we're going to do an infrequent 3702 // operation which will increase the per byte scanned cost (i.e. move 3703 // entries to/from the global stack). It basically tries to decrease the 3704 // scanning limit so that the clock is called earlier. 3705 3706 if (_cm->verbose_medium()) { 3707 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3708 } 3709 3710 _words_scanned_limit = _real_words_scanned_limit - 3711 3 * words_scanned_period / 4; 3712 _refs_reached_limit = _real_refs_reached_limit - 3713 3 * refs_reached_period / 4; 3714 } 3715 3716 void CMTask::move_entries_to_global_stack() { 3717 // local array where we'll store the entries that will be popped 3718 // from the local queue 3719 oop buffer[global_stack_transfer_size]; 3720 3721 int n = 0; 3722 oop obj; 3723 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3724 buffer[n] = obj; 3725 ++n; 3726 } 3727 3728 if (n > 0) { 3729 // we popped at least one entry from the local queue 3730 3731 statsOnly( ++_global_transfers_to; _local_pops += n ); 3732 3733 if (!_cm->mark_stack_push(buffer, n)) { 3734 if (_cm->verbose_low()) { 3735 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3736 _worker_id); 3737 } 3738 set_has_aborted(); 3739 } else { 3740 // the transfer was successful 3741 3742 if (_cm->verbose_medium()) { 3743 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3744 _worker_id, n); 3745 } 3746 statsOnly( int tmp_size = _cm->mark_stack_size(); 3747 if (tmp_size > _global_max_size) { 3748 _global_max_size = tmp_size; 3749 } 3750 _global_pushes += n ); 3751 } 3752 } 3753 3754 // this operation was quite expensive, so decrease the limits 3755 decrease_limits(); 3756 } 3757 3758 void CMTask::get_entries_from_global_stack() { 3759 // local array where we'll store the entries that will be popped 3760 // from the global stack. 3761 oop buffer[global_stack_transfer_size]; 3762 int n; 3763 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3764 assert(n <= global_stack_transfer_size, 3765 "we should not pop more than the given limit"); 3766 if (n > 0) { 3767 // yes, we did actually pop at least one entry 3768 3769 statsOnly( ++_global_transfers_from; _global_pops += n ); 3770 if (_cm->verbose_medium()) { 3771 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3772 _worker_id, n); 3773 } 3774 for (int i = 0; i < n; ++i) { 3775 bool success = _task_queue->push(buffer[i]); 3776 // We only call this when the local queue is empty or under a 3777 // given target limit. So, we do not expect this push to fail. 3778 assert(success, "invariant"); 3779 } 3780 3781 statsOnly( int tmp_size = _task_queue->size(); 3782 if (tmp_size > _local_max_size) { 3783 _local_max_size = tmp_size; 3784 } 3785 _local_pushes += n ); 3786 } 3787 3788 // this operation was quite expensive, so decrease the limits 3789 decrease_limits(); 3790 } 3791 3792 void CMTask::drain_local_queue(bool partially) { 3793 if (has_aborted()) return; 3794 3795 // Decide what the target size is, depending whether we're going to 3796 // drain it partially (so that other tasks can steal if they run out 3797 // of things to do) or totally (at the very end). 3798 size_t target_size; 3799 if (partially) { 3800 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3801 } else { 3802 target_size = 0; 3803 } 3804 3805 if (_task_queue->size() > target_size) { 3806 if (_cm->verbose_high()) { 3807 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3808 _worker_id, target_size); 3809 } 3810 3811 oop obj; 3812 bool ret = _task_queue->pop_local(obj); 3813 while (ret) { 3814 statsOnly( ++_local_pops ); 3815 3816 if (_cm->verbose_high()) { 3817 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3818 p2i((void*) obj)); 3819 } 3820 3821 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3822 assert(!_g1h->is_on_master_free_list( 3823 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3824 3825 scan_object(obj); 3826 3827 if (_task_queue->size() <= target_size || has_aborted()) { 3828 ret = false; 3829 } else { 3830 ret = _task_queue->pop_local(obj); 3831 } 3832 } 3833 3834 if (_cm->verbose_high()) { 3835 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3836 _worker_id, _task_queue->size()); 3837 } 3838 } 3839 } 3840 3841 void CMTask::drain_global_stack(bool partially) { 3842 if (has_aborted()) return; 3843 3844 // We have a policy to drain the local queue before we attempt to 3845 // drain the global stack. 3846 assert(partially || _task_queue->size() == 0, "invariant"); 3847 3848 // Decide what the target size is, depending whether we're going to 3849 // drain it partially (so that other tasks can steal if they run out 3850 // of things to do) or totally (at the very end). Notice that, 3851 // because we move entries from the global stack in chunks or 3852 // because another task might be doing the same, we might in fact 3853 // drop below the target. But, this is not a problem. 3854 size_t target_size; 3855 if (partially) { 3856 target_size = _cm->partial_mark_stack_size_target(); 3857 } else { 3858 target_size = 0; 3859 } 3860 3861 if (_cm->mark_stack_size() > target_size) { 3862 if (_cm->verbose_low()) { 3863 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3864 _worker_id, target_size); 3865 } 3866 3867 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3868 get_entries_from_global_stack(); 3869 drain_local_queue(partially); 3870 } 3871 3872 if (_cm->verbose_low()) { 3873 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3874 _worker_id, _cm->mark_stack_size()); 3875 } 3876 } 3877 } 3878 3879 // SATB Queue has several assumptions on whether to call the par or 3880 // non-par versions of the methods. this is why some of the code is 3881 // replicated. We should really get rid of the single-threaded version 3882 // of the code to simplify things. 3883 void CMTask::drain_satb_buffers() { 3884 if (has_aborted()) return; 3885 3886 // We set this so that the regular clock knows that we're in the 3887 // middle of draining buffers and doesn't set the abort flag when it 3888 // notices that SATB buffers are available for draining. It'd be 3889 // very counter productive if it did that. :-) 3890 _draining_satb_buffers = true; 3891 3892 CMObjectClosure oc(this); 3893 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3894 satb_mq_set.set_closure(_worker_id, &oc); 3895 3896 // This keeps claiming and applying the closure to completed buffers 3897 // until we run out of buffers or we need to abort. 3898 while (!has_aborted() && 3899 satb_mq_set.apply_closure_to_completed_buffer(_worker_id)) { 3900 if (_cm->verbose_medium()) { 3901 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3902 } 3903 statsOnly( ++_satb_buffers_processed ); 3904 regular_clock_call(); 3905 } 3906 3907 _draining_satb_buffers = false; 3908 3909 assert(has_aborted() || 3910 concurrent() || 3911 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3912 3913 satb_mq_set.set_closure(_worker_id, NULL); 3914 3915 // again, this was a potentially expensive operation, decrease the 3916 // limits to get the regular clock call early 3917 decrease_limits(); 3918 } 3919 3920 void CMTask::print_stats() { 3921 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3922 _worker_id, _calls); 3923 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3924 _elapsed_time_ms, _termination_time_ms); 3925 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3926 _step_times_ms.num(), _step_times_ms.avg(), 3927 _step_times_ms.sd()); 3928 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3929 _step_times_ms.maximum(), _step_times_ms.sum()); 3930 3931 #if _MARKING_STATS_ 3932 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3933 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3934 _all_clock_intervals_ms.sd()); 3935 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3936 _all_clock_intervals_ms.maximum(), 3937 _all_clock_intervals_ms.sum()); 3938 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", 3939 _clock_due_to_scanning, _clock_due_to_marking); 3940 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", 3941 _objs_scanned, _objs_found_on_bitmap); 3942 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", 3943 _local_pushes, _local_pops, _local_max_size); 3944 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", 3945 _global_pushes, _global_pops, _global_max_size); 3946 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", 3947 _global_transfers_to,_global_transfers_from); 3948 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed); 3949 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); 3950 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", 3951 _steal_attempts, _steals); 3952 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); 3953 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", 3954 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3955 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", 3956 _aborted_timed_out, _aborted_satb, _aborted_termination); 3957 #endif // _MARKING_STATS_ 3958 } 3959 3960 /***************************************************************************** 3961 3962 The do_marking_step(time_target_ms, ...) method is the building 3963 block of the parallel marking framework. It can be called in parallel 3964 with other invocations of do_marking_step() on different tasks 3965 (but only one per task, obviously) and concurrently with the 3966 mutator threads, or during remark, hence it eliminates the need 3967 for two versions of the code. When called during remark, it will 3968 pick up from where the task left off during the concurrent marking 3969 phase. Interestingly, tasks are also claimable during evacuation 3970 pauses too, since do_marking_step() ensures that it aborts before 3971 it needs to yield. 3972 3973 The data structures that it uses to do marking work are the 3974 following: 3975 3976 (1) Marking Bitmap. If there are gray objects that appear only 3977 on the bitmap (this happens either when dealing with an overflow 3978 or when the initial marking phase has simply marked the roots 3979 and didn't push them on the stack), then tasks claim heap 3980 regions whose bitmap they then scan to find gray objects. A 3981 global finger indicates where the end of the last claimed region 3982 is. A local finger indicates how far into the region a task has 3983 scanned. The two fingers are used to determine how to gray an 3984 object (i.e. whether simply marking it is OK, as it will be 3985 visited by a task in the future, or whether it needs to be also 3986 pushed on a stack). 3987 3988 (2) Local Queue. The local queue of the task which is accessed 3989 reasonably efficiently by the task. Other tasks can steal from 3990 it when they run out of work. Throughout the marking phase, a 3991 task attempts to keep its local queue short but not totally 3992 empty, so that entries are available for stealing by other 3993 tasks. Only when there is no more work, a task will totally 3994 drain its local queue. 3995 3996 (3) Global Mark Stack. This handles local queue overflow. During 3997 marking only sets of entries are moved between it and the local 3998 queues, as access to it requires a mutex and more fine-grain 3999 interaction with it which might cause contention. If it 4000 overflows, then the marking phase should restart and iterate 4001 over the bitmap to identify gray objects. Throughout the marking 4002 phase, tasks attempt to keep the global mark stack at a small 4003 length but not totally empty, so that entries are available for 4004 popping by other tasks. Only when there is no more work, tasks 4005 will totally drain the global mark stack. 4006 4007 (4) SATB Buffer Queue. This is where completed SATB buffers are 4008 made available. Buffers are regularly removed from this queue 4009 and scanned for roots, so that the queue doesn't get too 4010 long. During remark, all completed buffers are processed, as 4011 well as the filled in parts of any uncompleted buffers. 4012 4013 The do_marking_step() method tries to abort when the time target 4014 has been reached. There are a few other cases when the 4015 do_marking_step() method also aborts: 4016 4017 (1) When the marking phase has been aborted (after a Full GC). 4018 4019 (2) When a global overflow (on the global stack) has been 4020 triggered. Before the task aborts, it will actually sync up with 4021 the other tasks to ensure that all the marking data structures 4022 (local queues, stacks, fingers etc.) are re-initialized so that 4023 when do_marking_step() completes, the marking phase can 4024 immediately restart. 4025 4026 (3) When enough completed SATB buffers are available. The 4027 do_marking_step() method only tries to drain SATB buffers right 4028 at the beginning. So, if enough buffers are available, the 4029 marking step aborts and the SATB buffers are processed at 4030 the beginning of the next invocation. 4031 4032 (4) To yield. when we have to yield then we abort and yield 4033 right at the end of do_marking_step(). This saves us from a lot 4034 of hassle as, by yielding we might allow a Full GC. If this 4035 happens then objects will be compacted underneath our feet, the 4036 heap might shrink, etc. We save checking for this by just 4037 aborting and doing the yield right at the end. 4038 4039 From the above it follows that the do_marking_step() method should 4040 be called in a loop (or, otherwise, regularly) until it completes. 4041 4042 If a marking step completes without its has_aborted() flag being 4043 true, it means it has completed the current marking phase (and 4044 also all other marking tasks have done so and have all synced up). 4045 4046 A method called regular_clock_call() is invoked "regularly" (in 4047 sub ms intervals) throughout marking. It is this clock method that 4048 checks all the abort conditions which were mentioned above and 4049 decides when the task should abort. A work-based scheme is used to 4050 trigger this clock method: when the number of object words the 4051 marking phase has scanned or the number of references the marking 4052 phase has visited reach a given limit. Additional invocations to 4053 the method clock have been planted in a few other strategic places 4054 too. The initial reason for the clock method was to avoid calling 4055 vtime too regularly, as it is quite expensive. So, once it was in 4056 place, it was natural to piggy-back all the other conditions on it 4057 too and not constantly check them throughout the code. 4058 4059 If do_termination is true then do_marking_step will enter its 4060 termination protocol. 4061 4062 The value of is_serial must be true when do_marking_step is being 4063 called serially (i.e. by the VMThread) and do_marking_step should 4064 skip any synchronization in the termination and overflow code. 4065 Examples include the serial remark code and the serial reference 4066 processing closures. 4067 4068 The value of is_serial must be false when do_marking_step is 4069 being called by any of the worker threads in a work gang. 4070 Examples include the concurrent marking code (CMMarkingTask), 4071 the MT remark code, and the MT reference processing closures. 4072 4073 *****************************************************************************/ 4074 4075 void CMTask::do_marking_step(double time_target_ms, 4076 bool do_termination, 4077 bool is_serial) { 4078 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 4079 assert(concurrent() == _cm->concurrent(), "they should be the same"); 4080 4081 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 4082 assert(_task_queues != NULL, "invariant"); 4083 assert(_task_queue != NULL, "invariant"); 4084 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 4085 4086 assert(!_claimed, 4087 "only one thread should claim this task at any one time"); 4088 4089 // OK, this doesn't safeguard again all possible scenarios, as it is 4090 // possible for two threads to set the _claimed flag at the same 4091 // time. But it is only for debugging purposes anyway and it will 4092 // catch most problems. 4093 _claimed = true; 4094 4095 _start_time_ms = os::elapsedVTime() * 1000.0; 4096 statsOnly( _interval_start_time_ms = _start_time_ms ); 4097 4098 // If do_stealing is true then do_marking_step will attempt to 4099 // steal work from the other CMTasks. It only makes sense to 4100 // enable stealing when the termination protocol is enabled 4101 // and do_marking_step() is not being called serially. 4102 bool do_stealing = do_termination && !is_serial; 4103 4104 double diff_prediction_ms = 4105 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 4106 _time_target_ms = time_target_ms - diff_prediction_ms; 4107 4108 // set up the variables that are used in the work-based scheme to 4109 // call the regular clock method 4110 _words_scanned = 0; 4111 _refs_reached = 0; 4112 recalculate_limits(); 4113 4114 // clear all flags 4115 clear_has_aborted(); 4116 _has_timed_out = false; 4117 _draining_satb_buffers = false; 4118 4119 ++_calls; 4120 4121 if (_cm->verbose_low()) { 4122 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 4123 "target = %1.2lfms >>>>>>>>>>", 4124 _worker_id, _calls, _time_target_ms); 4125 } 4126 4127 // Set up the bitmap and oop closures. Anything that uses them is 4128 // eventually called from this method, so it is OK to allocate these 4129 // statically. 4130 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 4131 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 4132 set_cm_oop_closure(&cm_oop_closure); 4133 4134 if (_cm->has_overflown()) { 4135 // This can happen if the mark stack overflows during a GC pause 4136 // and this task, after a yield point, restarts. We have to abort 4137 // as we need to get into the overflow protocol which happens 4138 // right at the end of this task. 4139 set_has_aborted(); 4140 } 4141 4142 // First drain any available SATB buffers. After this, we will not 4143 // look at SATB buffers before the next invocation of this method. 4144 // If enough completed SATB buffers are queued up, the regular clock 4145 // will abort this task so that it restarts. 4146 drain_satb_buffers(); 4147 // ...then partially drain the local queue and the global stack 4148 drain_local_queue(true); 4149 drain_global_stack(true); 4150 4151 do { 4152 if (!has_aborted() && _curr_region != NULL) { 4153 // This means that we're already holding on to a region. 4154 assert(_finger != NULL, "if region is not NULL, then the finger " 4155 "should not be NULL either"); 4156 4157 // We might have restarted this task after an evacuation pause 4158 // which might have evacuated the region we're holding on to 4159 // underneath our feet. Let's read its limit again to make sure 4160 // that we do not iterate over a region of the heap that 4161 // contains garbage (update_region_limit() will also move 4162 // _finger to the start of the region if it is found empty). 4163 update_region_limit(); 4164 // We will start from _finger not from the start of the region, 4165 // as we might be restarting this task after aborting half-way 4166 // through scanning this region. In this case, _finger points to 4167 // the address where we last found a marked object. If this is a 4168 // fresh region, _finger points to start(). 4169 MemRegion mr = MemRegion(_finger, _region_limit); 4170 4171 if (_cm->verbose_low()) { 4172 gclog_or_tty->print_cr("[%u] we're scanning part " 4173 "["PTR_FORMAT", "PTR_FORMAT") " 4174 "of region "HR_FORMAT, 4175 _worker_id, p2i(_finger), p2i(_region_limit), 4176 HR_FORMAT_PARAMS(_curr_region)); 4177 } 4178 4179 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 4180 "humongous regions should go around loop once only"); 4181 4182 // Some special cases: 4183 // If the memory region is empty, we can just give up the region. 4184 // If the current region is humongous then we only need to check 4185 // the bitmap for the bit associated with the start of the object, 4186 // scan the object if it's live, and give up the region. 4187 // Otherwise, let's iterate over the bitmap of the part of the region 4188 // that is left. 4189 // If the iteration is successful, give up the region. 4190 if (mr.is_empty()) { 4191 giveup_current_region(); 4192 regular_clock_call(); 4193 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 4194 if (_nextMarkBitMap->isMarked(mr.start())) { 4195 // The object is marked - apply the closure 4196 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 4197 bitmap_closure.do_bit(offset); 4198 } 4199 // Even if this task aborted while scanning the humongous object 4200 // we can (and should) give up the current region. 4201 giveup_current_region(); 4202 regular_clock_call(); 4203 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4204 giveup_current_region(); 4205 regular_clock_call(); 4206 } else { 4207 assert(has_aborted(), "currently the only way to do so"); 4208 // The only way to abort the bitmap iteration is to return 4209 // false from the do_bit() method. However, inside the 4210 // do_bit() method we move the _finger to point to the 4211 // object currently being looked at. So, if we bail out, we 4212 // have definitely set _finger to something non-null. 4213 assert(_finger != NULL, "invariant"); 4214 4215 // Region iteration was actually aborted. So now _finger 4216 // points to the address of the object we last scanned. If we 4217 // leave it there, when we restart this task, we will rescan 4218 // the object. It is easy to avoid this. We move the finger by 4219 // enough to point to the next possible object header (the 4220 // bitmap knows by how much we need to move it as it knows its 4221 // granularity). 4222 assert(_finger < _region_limit, "invariant"); 4223 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 4224 // Check if bitmap iteration was aborted while scanning the last object 4225 if (new_finger >= _region_limit) { 4226 giveup_current_region(); 4227 } else { 4228 move_finger_to(new_finger); 4229 } 4230 } 4231 } 4232 // At this point we have either completed iterating over the 4233 // region we were holding on to, or we have aborted. 4234 4235 // We then partially drain the local queue and the global stack. 4236 // (Do we really need this?) 4237 drain_local_queue(true); 4238 drain_global_stack(true); 4239 4240 // Read the note on the claim_region() method on why it might 4241 // return NULL with potentially more regions available for 4242 // claiming and why we have to check out_of_regions() to determine 4243 // whether we're done or not. 4244 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4245 // We are going to try to claim a new region. We should have 4246 // given up on the previous one. 4247 // Separated the asserts so that we know which one fires. 4248 assert(_curr_region == NULL, "invariant"); 4249 assert(_finger == NULL, "invariant"); 4250 assert(_region_limit == NULL, "invariant"); 4251 if (_cm->verbose_low()) { 4252 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4253 } 4254 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4255 if (claimed_region != NULL) { 4256 // Yes, we managed to claim one 4257 statsOnly( ++_regions_claimed ); 4258 4259 if (_cm->verbose_low()) { 4260 gclog_or_tty->print_cr("[%u] we successfully claimed " 4261 "region "PTR_FORMAT, 4262 _worker_id, p2i(claimed_region)); 4263 } 4264 4265 setup_for_region(claimed_region); 4266 assert(_curr_region == claimed_region, "invariant"); 4267 } 4268 // It is important to call the regular clock here. It might take 4269 // a while to claim a region if, for example, we hit a large 4270 // block of empty regions. So we need to call the regular clock 4271 // method once round the loop to make sure it's called 4272 // frequently enough. 4273 regular_clock_call(); 4274 } 4275 4276 if (!has_aborted() && _curr_region == NULL) { 4277 assert(_cm->out_of_regions(), 4278 "at this point we should be out of regions"); 4279 } 4280 } while ( _curr_region != NULL && !has_aborted()); 4281 4282 if (!has_aborted()) { 4283 // We cannot check whether the global stack is empty, since other 4284 // tasks might be pushing objects to it concurrently. 4285 assert(_cm->out_of_regions(), 4286 "at this point we should be out of regions"); 4287 4288 if (_cm->verbose_low()) { 4289 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4290 } 4291 4292 // Try to reduce the number of available SATB buffers so that 4293 // remark has less work to do. 4294 drain_satb_buffers(); 4295 } 4296 4297 // Since we've done everything else, we can now totally drain the 4298 // local queue and global stack. 4299 drain_local_queue(false); 4300 drain_global_stack(false); 4301 4302 // Attempt at work stealing from other task's queues. 4303 if (do_stealing && !has_aborted()) { 4304 // We have not aborted. This means that we have finished all that 4305 // we could. Let's try to do some stealing... 4306 4307 // We cannot check whether the global stack is empty, since other 4308 // tasks might be pushing objects to it concurrently. 4309 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4310 "only way to reach here"); 4311 4312 if (_cm->verbose_low()) { 4313 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4314 } 4315 4316 while (!has_aborted()) { 4317 oop obj; 4318 statsOnly( ++_steal_attempts ); 4319 4320 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4321 if (_cm->verbose_medium()) { 4322 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4323 _worker_id, p2i((void*) obj)); 4324 } 4325 4326 statsOnly( ++_steals ); 4327 4328 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4329 "any stolen object should be marked"); 4330 scan_object(obj); 4331 4332 // And since we're towards the end, let's totally drain the 4333 // local queue and global stack. 4334 drain_local_queue(false); 4335 drain_global_stack(false); 4336 } else { 4337 break; 4338 } 4339 } 4340 } 4341 4342 // If we are about to wrap up and go into termination, check if we 4343 // should raise the overflow flag. 4344 if (do_termination && !has_aborted()) { 4345 if (_cm->force_overflow()->should_force()) { 4346 _cm->set_has_overflown(); 4347 regular_clock_call(); 4348 } 4349 } 4350 4351 // We still haven't aborted. Now, let's try to get into the 4352 // termination protocol. 4353 if (do_termination && !has_aborted()) { 4354 // We cannot check whether the global stack is empty, since other 4355 // tasks might be concurrently pushing objects on it. 4356 // Separated the asserts so that we know which one fires. 4357 assert(_cm->out_of_regions(), "only way to reach here"); 4358 assert(_task_queue->size() == 0, "only way to reach here"); 4359 4360 if (_cm->verbose_low()) { 4361 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4362 } 4363 4364 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4365 4366 // The CMTask class also extends the TerminatorTerminator class, 4367 // hence its should_exit_termination() method will also decide 4368 // whether to exit the termination protocol or not. 4369 bool finished = (is_serial || 4370 _cm->terminator()->offer_termination(this)); 4371 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4372 _termination_time_ms += 4373 termination_end_time_ms - _termination_start_time_ms; 4374 4375 if (finished) { 4376 // We're all done. 4377 4378 if (_worker_id == 0) { 4379 // let's allow task 0 to do this 4380 if (concurrent()) { 4381 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4382 // we need to set this to false before the next 4383 // safepoint. This way we ensure that the marking phase 4384 // doesn't observe any more heap expansions. 4385 _cm->clear_concurrent_marking_in_progress(); 4386 } 4387 } 4388 4389 // We can now guarantee that the global stack is empty, since 4390 // all other tasks have finished. We separated the guarantees so 4391 // that, if a condition is false, we can immediately find out 4392 // which one. 4393 guarantee(_cm->out_of_regions(), "only way to reach here"); 4394 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4395 guarantee(_task_queue->size() == 0, "only way to reach here"); 4396 guarantee(!_cm->has_overflown(), "only way to reach here"); 4397 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4398 4399 if (_cm->verbose_low()) { 4400 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4401 } 4402 } else { 4403 // Apparently there's more work to do. Let's abort this task. It 4404 // will restart it and we can hopefully find more things to do. 4405 4406 if (_cm->verbose_low()) { 4407 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4408 _worker_id); 4409 } 4410 4411 set_has_aborted(); 4412 statsOnly( ++_aborted_termination ); 4413 } 4414 } 4415 4416 // Mainly for debugging purposes to make sure that a pointer to the 4417 // closure which was statically allocated in this frame doesn't 4418 // escape it by accident. 4419 set_cm_oop_closure(NULL); 4420 double end_time_ms = os::elapsedVTime() * 1000.0; 4421 double elapsed_time_ms = end_time_ms - _start_time_ms; 4422 // Update the step history. 4423 _step_times_ms.add(elapsed_time_ms); 4424 4425 if (has_aborted()) { 4426 // The task was aborted for some reason. 4427 4428 statsOnly( ++_aborted ); 4429 4430 if (_has_timed_out) { 4431 double diff_ms = elapsed_time_ms - _time_target_ms; 4432 // Keep statistics of how well we did with respect to hitting 4433 // our target only if we actually timed out (if we aborted for 4434 // other reasons, then the results might get skewed). 4435 _marking_step_diffs_ms.add(diff_ms); 4436 } 4437 4438 if (_cm->has_overflown()) { 4439 // This is the interesting one. We aborted because a global 4440 // overflow was raised. This means we have to restart the 4441 // marking phase and start iterating over regions. However, in 4442 // order to do this we have to make sure that all tasks stop 4443 // what they are doing and re-initialize in a safe manner. We 4444 // will achieve this with the use of two barrier sync points. 4445 4446 if (_cm->verbose_low()) { 4447 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4448 } 4449 4450 if (!is_serial) { 4451 // We only need to enter the sync barrier if being called 4452 // from a parallel context 4453 _cm->enter_first_sync_barrier(_worker_id); 4454 4455 // When we exit this sync barrier we know that all tasks have 4456 // stopped doing marking work. So, it's now safe to 4457 // re-initialize our data structures. At the end of this method, 4458 // task 0 will clear the global data structures. 4459 } 4460 4461 statsOnly( ++_aborted_overflow ); 4462 4463 // We clear the local state of this task... 4464 clear_region_fields(); 4465 4466 if (!is_serial) { 4467 // ...and enter the second barrier. 4468 _cm->enter_second_sync_barrier(_worker_id); 4469 } 4470 // At this point, if we're during the concurrent phase of 4471 // marking, everything has been re-initialized and we're 4472 // ready to restart. 4473 } 4474 4475 if (_cm->verbose_low()) { 4476 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4477 "elapsed = %1.2lfms <<<<<<<<<<", 4478 _worker_id, _time_target_ms, elapsed_time_ms); 4479 if (_cm->has_aborted()) { 4480 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4481 _worker_id); 4482 } 4483 } 4484 } else { 4485 if (_cm->verbose_low()) { 4486 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4487 "elapsed = %1.2lfms <<<<<<<<<<", 4488 _worker_id, _time_target_ms, elapsed_time_ms); 4489 } 4490 } 4491 4492 _claimed = false; 4493 } 4494 4495 CMTask::CMTask(uint worker_id, 4496 ConcurrentMark* cm, 4497 size_t* marked_bytes, 4498 BitMap* card_bm, 4499 CMTaskQueue* task_queue, 4500 CMTaskQueueSet* task_queues) 4501 : _g1h(G1CollectedHeap::heap()), 4502 _worker_id(worker_id), _cm(cm), 4503 _claimed(false), 4504 _nextMarkBitMap(NULL), _hash_seed(17), 4505 _task_queue(task_queue), 4506 _task_queues(task_queues), 4507 _cm_oop_closure(NULL), 4508 _marked_bytes_array(marked_bytes), 4509 _card_bm(card_bm) { 4510 guarantee(task_queue != NULL, "invariant"); 4511 guarantee(task_queues != NULL, "invariant"); 4512 4513 statsOnly( _clock_due_to_scanning = 0; 4514 _clock_due_to_marking = 0 ); 4515 4516 _marking_step_diffs_ms.add(0.5); 4517 } 4518 4519 // These are formatting macros that are used below to ensure 4520 // consistent formatting. The *_H_* versions are used to format the 4521 // header for a particular value and they should be kept consistent 4522 // with the corresponding macro. Also note that most of the macros add 4523 // the necessary white space (as a prefix) which makes them a bit 4524 // easier to compose. 4525 4526 // All the output lines are prefixed with this string to be able to 4527 // identify them easily in a large log file. 4528 #define G1PPRL_LINE_PREFIX "###" 4529 4530 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4531 #ifdef _LP64 4532 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4533 #else // _LP64 4534 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4535 #endif // _LP64 4536 4537 // For per-region info 4538 #define G1PPRL_TYPE_FORMAT " %-4s" 4539 #define G1PPRL_TYPE_H_FORMAT " %4s" 4540 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4541 #define G1PPRL_BYTE_H_FORMAT " %9s" 4542 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4543 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4544 4545 // For summary info 4546 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4547 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4548 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4549 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4550 4551 G1PrintRegionLivenessInfoClosure:: 4552 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4553 : _out(out), 4554 _total_used_bytes(0), _total_capacity_bytes(0), 4555 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4556 _hum_used_bytes(0), _hum_capacity_bytes(0), 4557 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4558 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4559 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4560 MemRegion g1_reserved = g1h->g1_reserved(); 4561 double now = os::elapsedTime(); 4562 4563 // Print the header of the output. 4564 _out->cr(); 4565 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4566 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4567 G1PPRL_SUM_ADDR_FORMAT("reserved") 4568 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4569 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4570 HeapRegion::GrainBytes); 4571 _out->print_cr(G1PPRL_LINE_PREFIX); 4572 _out->print_cr(G1PPRL_LINE_PREFIX 4573 G1PPRL_TYPE_H_FORMAT 4574 G1PPRL_ADDR_BASE_H_FORMAT 4575 G1PPRL_BYTE_H_FORMAT 4576 G1PPRL_BYTE_H_FORMAT 4577 G1PPRL_BYTE_H_FORMAT 4578 G1PPRL_DOUBLE_H_FORMAT 4579 G1PPRL_BYTE_H_FORMAT 4580 G1PPRL_BYTE_H_FORMAT, 4581 "type", "address-range", 4582 "used", "prev-live", "next-live", "gc-eff", 4583 "remset", "code-roots"); 4584 _out->print_cr(G1PPRL_LINE_PREFIX 4585 G1PPRL_TYPE_H_FORMAT 4586 G1PPRL_ADDR_BASE_H_FORMAT 4587 G1PPRL_BYTE_H_FORMAT 4588 G1PPRL_BYTE_H_FORMAT 4589 G1PPRL_BYTE_H_FORMAT 4590 G1PPRL_DOUBLE_H_FORMAT 4591 G1PPRL_BYTE_H_FORMAT 4592 G1PPRL_BYTE_H_FORMAT, 4593 "", "", 4594 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4595 "(bytes)", "(bytes)"); 4596 } 4597 4598 // It takes as a parameter a reference to one of the _hum_* fields, it 4599 // deduces the corresponding value for a region in a humongous region 4600 // series (either the region size, or what's left if the _hum_* field 4601 // is < the region size), and updates the _hum_* field accordingly. 4602 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4603 size_t bytes = 0; 4604 // The > 0 check is to deal with the prev and next live bytes which 4605 // could be 0. 4606 if (*hum_bytes > 0) { 4607 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4608 *hum_bytes -= bytes; 4609 } 4610 return bytes; 4611 } 4612 4613 // It deduces the values for a region in a humongous region series 4614 // from the _hum_* fields and updates those accordingly. It assumes 4615 // that that _hum_* fields have already been set up from the "starts 4616 // humongous" region and we visit the regions in address order. 4617 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4618 size_t* capacity_bytes, 4619 size_t* prev_live_bytes, 4620 size_t* next_live_bytes) { 4621 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4622 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4623 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4624 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4625 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4626 } 4627 4628 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4629 const char* type = r->get_type_str(); 4630 HeapWord* bottom = r->bottom(); 4631 HeapWord* end = r->end(); 4632 size_t capacity_bytes = r->capacity(); 4633 size_t used_bytes = r->used(); 4634 size_t prev_live_bytes = r->live_bytes(); 4635 size_t next_live_bytes = r->next_live_bytes(); 4636 double gc_eff = r->gc_efficiency(); 4637 size_t remset_bytes = r->rem_set()->mem_size(); 4638 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4639 4640 if (r->is_starts_humongous()) { 4641 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4642 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4643 "they should have been zeroed after the last time we used them"); 4644 // Set up the _hum_* fields. 4645 _hum_capacity_bytes = capacity_bytes; 4646 _hum_used_bytes = used_bytes; 4647 _hum_prev_live_bytes = prev_live_bytes; 4648 _hum_next_live_bytes = next_live_bytes; 4649 get_hum_bytes(&used_bytes, &capacity_bytes, 4650 &prev_live_bytes, &next_live_bytes); 4651 end = bottom + HeapRegion::GrainWords; 4652 } else if (r->is_continues_humongous()) { 4653 get_hum_bytes(&used_bytes, &capacity_bytes, 4654 &prev_live_bytes, &next_live_bytes); 4655 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4656 } 4657 4658 _total_used_bytes += used_bytes; 4659 _total_capacity_bytes += capacity_bytes; 4660 _total_prev_live_bytes += prev_live_bytes; 4661 _total_next_live_bytes += next_live_bytes; 4662 _total_remset_bytes += remset_bytes; 4663 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4664 4665 // Print a line for this particular region. 4666 _out->print_cr(G1PPRL_LINE_PREFIX 4667 G1PPRL_TYPE_FORMAT 4668 G1PPRL_ADDR_BASE_FORMAT 4669 G1PPRL_BYTE_FORMAT 4670 G1PPRL_BYTE_FORMAT 4671 G1PPRL_BYTE_FORMAT 4672 G1PPRL_DOUBLE_FORMAT 4673 G1PPRL_BYTE_FORMAT 4674 G1PPRL_BYTE_FORMAT, 4675 type, p2i(bottom), p2i(end), 4676 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4677 remset_bytes, strong_code_roots_bytes); 4678 4679 return false; 4680 } 4681 4682 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4683 // add static memory usages to remembered set sizes 4684 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4685 // Print the footer of the output. 4686 _out->print_cr(G1PPRL_LINE_PREFIX); 4687 _out->print_cr(G1PPRL_LINE_PREFIX 4688 " SUMMARY" 4689 G1PPRL_SUM_MB_FORMAT("capacity") 4690 G1PPRL_SUM_MB_PERC_FORMAT("used") 4691 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4692 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4693 G1PPRL_SUM_MB_FORMAT("remset") 4694 G1PPRL_SUM_MB_FORMAT("code-roots"), 4695 bytes_to_mb(_total_capacity_bytes), 4696 bytes_to_mb(_total_used_bytes), 4697 perc(_total_used_bytes, _total_capacity_bytes), 4698 bytes_to_mb(_total_prev_live_bytes), 4699 perc(_total_prev_live_bytes, _total_capacity_bytes), 4700 bytes_to_mb(_total_next_live_bytes), 4701 perc(_total_next_live_bytes, _total_capacity_bytes), 4702 bytes_to_mb(_total_remset_bytes), 4703 bytes_to_mb(_total_strong_code_roots_bytes)); 4704 _out->cr(); 4705 }