1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc_implementation/g1/concurrentMark.inline.hpp" 30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 31 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 32 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 33 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 34 #include "gc_implementation/g1/g1Log.hpp" 35 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 36 #include "gc_implementation/g1/g1RemSet.hpp" 37 #include "gc_implementation/g1/g1StringDedup.hpp" 38 #include "gc_implementation/g1/heapRegion.inline.hpp" 39 #include "gc_implementation/g1/heapRegionManager.inline.hpp" 40 #include "gc_implementation/g1/heapRegionRemSet.hpp" 41 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 42 #include "gc_implementation/shared/vmGCOperations.hpp" 43 #include "gc_implementation/shared/gcTimer.hpp" 44 #include "gc_implementation/shared/gcTrace.hpp" 45 #include "gc_implementation/shared/gcTraceTime.hpp" 46 #include "memory/allocation.hpp" 47 #include "memory/genOopClosures.inline.hpp" 48 #include "memory/referencePolicy.hpp" 49 #include "memory/resourceArea.hpp" 50 #include "memory/strongRootsScope.hpp" 51 #include "oops/oop.inline.hpp" 52 #include "runtime/handles.inline.hpp" 53 #include "runtime/java.hpp" 54 #include "runtime/atomic.inline.hpp" 55 #include "runtime/prefetch.inline.hpp" 56 #include "services/memTracker.hpp" 57 58 // Concurrent marking bit map wrapper 59 60 CMBitMapRO::CMBitMapRO(int shifter) : 61 _bm(), 62 _shifter(shifter) { 63 _bmStartWord = 0; 64 _bmWordSize = 0; 65 } 66 67 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 68 const HeapWord* limit) const { 69 // First we must round addr *up* to a possible object boundary. 70 addr = (HeapWord*)align_size_up((intptr_t)addr, 71 HeapWordSize << _shifter); 72 size_t addrOffset = heapWordToOffset(addr); 73 if (limit == NULL) { 74 limit = _bmStartWord + _bmWordSize; 75 } 76 size_t limitOffset = heapWordToOffset(limit); 77 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 78 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 79 assert(nextAddr >= addr, "get_next_one postcondition"); 80 assert(nextAddr == limit || isMarked(nextAddr), 81 "get_next_one postcondition"); 82 return nextAddr; 83 } 84 85 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, 86 const HeapWord* limit) const { 87 size_t addrOffset = heapWordToOffset(addr); 88 if (limit == NULL) { 89 limit = _bmStartWord + _bmWordSize; 90 } 91 size_t limitOffset = heapWordToOffset(limit); 92 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 93 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 94 assert(nextAddr >= addr, "get_next_one postcondition"); 95 assert(nextAddr == limit || !isMarked(nextAddr), 96 "get_next_one postcondition"); 97 return nextAddr; 98 } 99 100 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 101 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 102 return (int) (diff >> _shifter); 103 } 104 105 #ifndef PRODUCT 106 bool CMBitMapRO::covers(MemRegion heap_rs) const { 107 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 108 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 109 "size inconsistency"); 110 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 111 _bmWordSize == heap_rs.word_size(); 112 } 113 #endif 114 115 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 116 _bm.print_on_error(st, prefix); 117 } 118 119 size_t CMBitMap::compute_size(size_t heap_size) { 120 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 121 } 122 123 size_t CMBitMap::mark_distance() { 124 return MinObjAlignmentInBytes * BitsPerByte; 125 } 126 127 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 128 _bmStartWord = heap.start(); 129 _bmWordSize = heap.word_size(); 130 131 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 132 _bm.set_size(_bmWordSize >> _shifter); 133 134 storage->set_mapping_changed_listener(&_listener); 135 } 136 137 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 138 if (zero_filled) { 139 return; 140 } 141 // We need to clear the bitmap on commit, removing any existing information. 142 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 143 _bm->clearRange(mr); 144 } 145 146 // Closure used for clearing the given mark bitmap. 147 class ClearBitmapHRClosure : public HeapRegionClosure { 148 private: 149 ConcurrentMark* _cm; 150 CMBitMap* _bitmap; 151 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 152 public: 153 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 154 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 155 } 156 157 virtual bool doHeapRegion(HeapRegion* r) { 158 size_t const chunk_size_in_words = M / HeapWordSize; 159 160 HeapWord* cur = r->bottom(); 161 HeapWord* const end = r->end(); 162 163 while (cur < end) { 164 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 165 _bitmap->clearRange(mr); 166 167 cur += chunk_size_in_words; 168 169 // Abort iteration if after yielding the marking has been aborted. 170 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 171 return true; 172 } 173 // Repeat the asserts from before the start of the closure. We will do them 174 // as asserts here to minimize their overhead on the product. However, we 175 // will have them as guarantees at the beginning / end of the bitmap 176 // clearing to get some checking in the product. 177 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 178 assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant"); 179 } 180 181 return false; 182 } 183 }; 184 185 class ParClearNextMarkBitmapTask : public AbstractGangTask { 186 ClearBitmapHRClosure* _cl; 187 HeapRegionClaimer _hrclaimer; 188 bool _suspendible; // If the task is suspendible, workers must join the STS. 189 190 public: 191 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 192 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 193 194 void work(uint worker_id) { 195 if (_suspendible) { 196 SuspendibleThreadSet::join(); 197 } 198 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 199 if (_suspendible) { 200 SuspendibleThreadSet::leave(); 201 } 202 } 203 }; 204 205 void CMBitMap::clearAll() { 206 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 207 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 208 uint n_workers = g1h->workers()->active_workers(); 209 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 210 g1h->workers()->run_task(&task); 211 guarantee(cl.complete(), "Must have completed iteration."); 212 return; 213 } 214 215 void CMBitMap::markRange(MemRegion mr) { 216 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 217 assert(!mr.is_empty(), "unexpected empty region"); 218 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 219 ((HeapWord *) mr.end())), 220 "markRange memory region end is not card aligned"); 221 // convert address range into offset range 222 _bm.at_put_range(heapWordToOffset(mr.start()), 223 heapWordToOffset(mr.end()), true); 224 } 225 226 void CMBitMap::clearRange(MemRegion mr) { 227 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 228 assert(!mr.is_empty(), "unexpected empty region"); 229 // convert address range into offset range 230 _bm.at_put_range(heapWordToOffset(mr.start()), 231 heapWordToOffset(mr.end()), false); 232 } 233 234 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 235 HeapWord* end_addr) { 236 HeapWord* start = getNextMarkedWordAddress(addr); 237 start = MIN2(start, end_addr); 238 HeapWord* end = getNextUnmarkedWordAddress(start); 239 end = MIN2(end, end_addr); 240 assert(start <= end, "Consistency check"); 241 MemRegion mr(start, end); 242 if (!mr.is_empty()) { 243 clearRange(mr); 244 } 245 return mr; 246 } 247 248 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 249 _base(NULL), _cm(cm) 250 #ifdef ASSERT 251 , _drain_in_progress(false) 252 , _drain_in_progress_yields(false) 253 #endif 254 {} 255 256 bool CMMarkStack::allocate(size_t capacity) { 257 // allocate a stack of the requisite depth 258 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 259 if (!rs.is_reserved()) { 260 warning("ConcurrentMark MarkStack allocation failure"); 261 return false; 262 } 263 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 264 if (!_virtual_space.initialize(rs, rs.size())) { 265 warning("ConcurrentMark MarkStack backing store failure"); 266 // Release the virtual memory reserved for the marking stack 267 rs.release(); 268 return false; 269 } 270 assert(_virtual_space.committed_size() == rs.size(), 271 "Didn't reserve backing store for all of ConcurrentMark stack?"); 272 _base = (oop*) _virtual_space.low(); 273 setEmpty(); 274 _capacity = (jint) capacity; 275 _saved_index = -1; 276 _should_expand = false; 277 NOT_PRODUCT(_max_depth = 0); 278 return true; 279 } 280 281 void CMMarkStack::expand() { 282 // Called, during remark, if we've overflown the marking stack during marking. 283 assert(isEmpty(), "stack should been emptied while handling overflow"); 284 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 285 // Clear expansion flag 286 _should_expand = false; 287 if (_capacity == (jint) MarkStackSizeMax) { 288 if (PrintGCDetails && Verbose) { 289 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 290 } 291 return; 292 } 293 // Double capacity if possible 294 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 295 // Do not give up existing stack until we have managed to 296 // get the double capacity that we desired. 297 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 298 sizeof(oop))); 299 if (rs.is_reserved()) { 300 // Release the backing store associated with old stack 301 _virtual_space.release(); 302 // Reinitialize virtual space for new stack 303 if (!_virtual_space.initialize(rs, rs.size())) { 304 fatal("Not enough swap for expanded marking stack capacity"); 305 } 306 _base = (oop*)(_virtual_space.low()); 307 _index = 0; 308 _capacity = new_capacity; 309 } else { 310 if (PrintGCDetails && Verbose) { 311 // Failed to double capacity, continue; 312 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 313 SIZE_FORMAT"K to " SIZE_FORMAT"K", 314 _capacity / K, new_capacity / K); 315 } 316 } 317 } 318 319 void CMMarkStack::set_should_expand() { 320 // If we're resetting the marking state because of an 321 // marking stack overflow, record that we should, if 322 // possible, expand the stack. 323 _should_expand = _cm->has_overflown(); 324 } 325 326 CMMarkStack::~CMMarkStack() { 327 if (_base != NULL) { 328 _base = NULL; 329 _virtual_space.release(); 330 } 331 } 332 333 void CMMarkStack::par_push(oop ptr) { 334 while (true) { 335 if (isFull()) { 336 _overflow = true; 337 return; 338 } 339 // Otherwise... 340 jint index = _index; 341 jint next_index = index+1; 342 jint res = Atomic::cmpxchg(next_index, &_index, index); 343 if (res == index) { 344 _base[index] = ptr; 345 // Note that we don't maintain this atomically. We could, but it 346 // doesn't seem necessary. 347 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 348 return; 349 } 350 // Otherwise, we need to try again. 351 } 352 } 353 354 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { 355 while (true) { 356 if (isFull()) { 357 _overflow = true; 358 return; 359 } 360 // Otherwise... 361 jint index = _index; 362 jint next_index = index + n; 363 if (next_index > _capacity) { 364 _overflow = true; 365 return; 366 } 367 jint res = Atomic::cmpxchg(next_index, &_index, index); 368 if (res == index) { 369 for (int i = 0; i < n; i++) { 370 int ind = index + i; 371 assert(ind < _capacity, "By overflow test above."); 372 _base[ind] = ptr_arr[i]; 373 } 374 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 375 return; 376 } 377 // Otherwise, we need to try again. 378 } 379 } 380 381 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 382 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 383 jint start = _index; 384 jint next_index = start + n; 385 if (next_index > _capacity) { 386 _overflow = true; 387 return; 388 } 389 // Otherwise. 390 _index = next_index; 391 for (int i = 0; i < n; i++) { 392 int ind = start + i; 393 assert(ind < _capacity, "By overflow test above."); 394 _base[ind] = ptr_arr[i]; 395 } 396 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 397 } 398 399 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 400 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 401 jint index = _index; 402 if (index == 0) { 403 *n = 0; 404 return false; 405 } else { 406 int k = MIN2(max, index); 407 jint new_ind = index - k; 408 for (int j = 0; j < k; j++) { 409 ptr_arr[j] = _base[new_ind + j]; 410 } 411 _index = new_ind; 412 *n = k; 413 return true; 414 } 415 } 416 417 template<class OopClosureClass> 418 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 419 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 420 || SafepointSynchronize::is_at_safepoint(), 421 "Drain recursion must be yield-safe."); 422 bool res = true; 423 debug_only(_drain_in_progress = true); 424 debug_only(_drain_in_progress_yields = yield_after); 425 while (!isEmpty()) { 426 oop newOop = pop(); 427 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 428 assert(newOop->is_oop(), "Expected an oop"); 429 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 430 "only grey objects on this stack"); 431 newOop->oop_iterate(cl); 432 if (yield_after && _cm->do_yield_check()) { 433 res = false; 434 break; 435 } 436 } 437 debug_only(_drain_in_progress = false); 438 return res; 439 } 440 441 void CMMarkStack::note_start_of_gc() { 442 assert(_saved_index == -1, 443 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 444 _saved_index = _index; 445 } 446 447 void CMMarkStack::note_end_of_gc() { 448 // This is intentionally a guarantee, instead of an assert. If we 449 // accidentally add something to the mark stack during GC, it 450 // will be a correctness issue so it's better if we crash. we'll 451 // only check this once per GC anyway, so it won't be a performance 452 // issue in any way. 453 guarantee(_saved_index == _index, 454 err_msg("saved index: %d index: %d", _saved_index, _index)); 455 _saved_index = -1; 456 } 457 458 void CMMarkStack::oops_do(OopClosure* f) { 459 assert(_saved_index == _index, 460 err_msg("saved index: %d index: %d", _saved_index, _index)); 461 for (int i = 0; i < _index; i += 1) { 462 f->do_oop(&_base[i]); 463 } 464 } 465 466 CMRootRegions::CMRootRegions() : 467 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 468 _should_abort(false), _next_survivor(NULL) { } 469 470 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 471 _young_list = g1h->young_list(); 472 _cm = cm; 473 } 474 475 void CMRootRegions::prepare_for_scan() { 476 assert(!scan_in_progress(), "pre-condition"); 477 478 // Currently, only survivors can be root regions. 479 assert(_next_survivor == NULL, "pre-condition"); 480 _next_survivor = _young_list->first_survivor_region(); 481 _scan_in_progress = (_next_survivor != NULL); 482 _should_abort = false; 483 } 484 485 HeapRegion* CMRootRegions::claim_next() { 486 if (_should_abort) { 487 // If someone has set the should_abort flag, we return NULL to 488 // force the caller to bail out of their loop. 489 return NULL; 490 } 491 492 // Currently, only survivors can be root regions. 493 HeapRegion* res = _next_survivor; 494 if (res != NULL) { 495 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 496 // Read it again in case it changed while we were waiting for the lock. 497 res = _next_survivor; 498 if (res != NULL) { 499 if (res == _young_list->last_survivor_region()) { 500 // We just claimed the last survivor so store NULL to indicate 501 // that we're done. 502 _next_survivor = NULL; 503 } else { 504 _next_survivor = res->get_next_young_region(); 505 } 506 } else { 507 // Someone else claimed the last survivor while we were trying 508 // to take the lock so nothing else to do. 509 } 510 } 511 assert(res == NULL || res->is_survivor(), "post-condition"); 512 513 return res; 514 } 515 516 void CMRootRegions::scan_finished() { 517 assert(scan_in_progress(), "pre-condition"); 518 519 // Currently, only survivors can be root regions. 520 if (!_should_abort) { 521 assert(_next_survivor == NULL, "we should have claimed all survivors"); 522 } 523 _next_survivor = NULL; 524 525 { 526 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 527 _scan_in_progress = false; 528 RootRegionScan_lock->notify_all(); 529 } 530 } 531 532 bool CMRootRegions::wait_until_scan_finished() { 533 if (!scan_in_progress()) return false; 534 535 { 536 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 537 while (scan_in_progress()) { 538 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 539 } 540 } 541 return true; 542 } 543 544 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 545 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 546 #endif // _MSC_VER 547 548 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 549 return MAX2((n_par_threads + 2) / 4, 1U); 550 } 551 552 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 553 _g1h(g1h), 554 _markBitMap1(), 555 _markBitMap2(), 556 _parallel_marking_threads(0), 557 _max_parallel_marking_threads(0), 558 _sleep_factor(0.0), 559 _marking_task_overhead(1.0), 560 _cleanup_sleep_factor(0.0), 561 _cleanup_task_overhead(1.0), 562 _cleanup_list("Cleanup List"), 563 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 564 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 565 CardTableModRefBS::card_shift, 566 false /* in_resource_area*/), 567 568 _prevMarkBitMap(&_markBitMap1), 569 _nextMarkBitMap(&_markBitMap2), 570 571 _markStack(this), 572 // _finger set in set_non_marking_state 573 574 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 575 // _active_tasks set in set_non_marking_state 576 // _tasks set inside the constructor 577 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 578 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 579 580 _has_overflown(false), 581 _concurrent(false), 582 _has_aborted(false), 583 _aborted_gc_id(GCId::undefined()), 584 _restart_for_overflow(false), 585 _concurrent_marking_in_progress(false), 586 587 // _verbose_level set below 588 589 _init_times(), 590 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 591 _cleanup_times(), 592 _total_counting_time(0.0), 593 _total_rs_scrub_time(0.0), 594 595 _parallel_workers(NULL), 596 597 _count_card_bitmaps(NULL), 598 _count_marked_bytes(NULL), 599 _completed_initialization(false) { 600 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 601 if (verbose_level < no_verbose) { 602 verbose_level = no_verbose; 603 } 604 if (verbose_level > high_verbose) { 605 verbose_level = high_verbose; 606 } 607 _verbose_level = verbose_level; 608 609 if (verbose_low()) { 610 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 611 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 612 } 613 614 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 615 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 616 617 // Create & start a ConcurrentMark thread. 618 _cmThread = new ConcurrentMarkThread(this); 619 assert(cmThread() != NULL, "CM Thread should have been created"); 620 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 621 if (_cmThread->osthread() == NULL) { 622 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 623 } 624 625 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 626 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 627 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 628 629 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 630 satb_qs.set_buffer_size(G1SATBBufferSize); 631 632 _root_regions.init(_g1h, this); 633 634 if (ConcGCThreads > ParallelGCThreads) { 635 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") " 636 "than ParallelGCThreads (" UINTX_FORMAT ").", 637 ConcGCThreads, ParallelGCThreads); 638 return; 639 } 640 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 641 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 642 // if both are set 643 _sleep_factor = 0.0; 644 _marking_task_overhead = 1.0; 645 } else if (G1MarkingOverheadPercent > 0) { 646 // We will calculate the number of parallel marking threads based 647 // on a target overhead with respect to the soft real-time goal 648 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 649 double overall_cm_overhead = 650 (double) MaxGCPauseMillis * marking_overhead / 651 (double) GCPauseIntervalMillis; 652 double cpu_ratio = 1.0 / (double) os::processor_count(); 653 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 654 double marking_task_overhead = 655 overall_cm_overhead / marking_thread_num * 656 (double) os::processor_count(); 657 double sleep_factor = 658 (1.0 - marking_task_overhead) / marking_task_overhead; 659 660 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); 661 _sleep_factor = sleep_factor; 662 _marking_task_overhead = marking_task_overhead; 663 } else { 664 // Calculate the number of parallel marking threads by scaling 665 // the number of parallel GC threads. 666 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); 667 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); 668 _sleep_factor = 0.0; 669 _marking_task_overhead = 1.0; 670 } 671 672 assert(ConcGCThreads > 0, "Should have been set"); 673 _parallel_marking_threads = (uint) ConcGCThreads; 674 _max_parallel_marking_threads = _parallel_marking_threads; 675 676 if (parallel_marking_threads() > 1) { 677 _cleanup_task_overhead = 1.0; 678 } else { 679 _cleanup_task_overhead = marking_task_overhead(); 680 } 681 _cleanup_sleep_factor = 682 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 683 684 #if 0 685 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 686 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 687 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 688 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 689 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 690 #endif 691 692 _parallel_workers = new FlexibleWorkGang("G1 Marker", 693 _max_parallel_marking_threads, false, true); 694 if (_parallel_workers == NULL) { 695 vm_exit_during_initialization("Failed necessary allocation."); 696 } else { 697 _parallel_workers->initialize_workers(); 698 } 699 700 if (FLAG_IS_DEFAULT(MarkStackSize)) { 701 size_t mark_stack_size = 702 MIN2(MarkStackSizeMax, 703 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 704 // Verify that the calculated value for MarkStackSize is in range. 705 // It would be nice to use the private utility routine from Arguments. 706 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 707 warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 708 "must be between 1 and " SIZE_FORMAT, 709 mark_stack_size, MarkStackSizeMax); 710 return; 711 } 712 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 713 } else { 714 // Verify MarkStackSize is in range. 715 if (FLAG_IS_CMDLINE(MarkStackSize)) { 716 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 717 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 718 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 719 "must be between 1 and " SIZE_FORMAT, 720 MarkStackSize, MarkStackSizeMax); 721 return; 722 } 723 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 724 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 725 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 726 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 727 MarkStackSize, MarkStackSizeMax); 728 return; 729 } 730 } 731 } 732 } 733 734 if (!_markStack.allocate(MarkStackSize)) { 735 warning("Failed to allocate CM marking stack"); 736 return; 737 } 738 739 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 740 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 741 742 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 743 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 744 745 BitMap::idx_t card_bm_size = _card_bm.size(); 746 747 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 748 _active_tasks = _max_worker_id; 749 750 uint max_regions = _g1h->max_regions(); 751 for (uint i = 0; i < _max_worker_id; ++i) { 752 CMTaskQueue* task_queue = new CMTaskQueue(); 753 task_queue->initialize(); 754 _task_queues->register_queue(i, task_queue); 755 756 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 757 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 758 759 _tasks[i] = new CMTask(i, this, 760 _count_marked_bytes[i], 761 &_count_card_bitmaps[i], 762 task_queue, _task_queues); 763 764 _accum_task_vtime[i] = 0.0; 765 } 766 767 // Calculate the card number for the bottom of the heap. Used 768 // in biasing indexes into the accounting card bitmaps. 769 _heap_bottom_card_num = 770 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 771 CardTableModRefBS::card_shift); 772 773 // Clear all the liveness counting data 774 clear_all_count_data(); 775 776 // so that the call below can read a sensible value 777 _heap_start = g1h->reserved_region().start(); 778 set_non_marking_state(); 779 _completed_initialization = true; 780 } 781 782 void ConcurrentMark::reset() { 783 // Starting values for these two. This should be called in a STW 784 // phase. 785 MemRegion reserved = _g1h->g1_reserved(); 786 _heap_start = reserved.start(); 787 _heap_end = reserved.end(); 788 789 // Separated the asserts so that we know which one fires. 790 assert(_heap_start != NULL, "heap bounds should look ok"); 791 assert(_heap_end != NULL, "heap bounds should look ok"); 792 assert(_heap_start < _heap_end, "heap bounds should look ok"); 793 794 // Reset all the marking data structures and any necessary flags 795 reset_marking_state(); 796 797 if (verbose_low()) { 798 gclog_or_tty->print_cr("[global] resetting"); 799 } 800 801 // We do reset all of them, since different phases will use 802 // different number of active threads. So, it's easiest to have all 803 // of them ready. 804 for (uint i = 0; i < _max_worker_id; ++i) { 805 _tasks[i]->reset(_nextMarkBitMap); 806 } 807 808 // we need this to make sure that the flag is on during the evac 809 // pause with initial mark piggy-backed 810 set_concurrent_marking_in_progress(); 811 } 812 813 814 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 815 _markStack.set_should_expand(); 816 _markStack.setEmpty(); // Also clears the _markStack overflow flag 817 if (clear_overflow) { 818 clear_has_overflown(); 819 } else { 820 assert(has_overflown(), "pre-condition"); 821 } 822 _finger = _heap_start; 823 824 for (uint i = 0; i < _max_worker_id; ++i) { 825 CMTaskQueue* queue = _task_queues->queue(i); 826 queue->set_empty(); 827 } 828 } 829 830 void ConcurrentMark::set_concurrency(uint active_tasks) { 831 assert(active_tasks <= _max_worker_id, "we should not have more"); 832 833 _active_tasks = active_tasks; 834 // Need to update the three data structures below according to the 835 // number of active threads for this phase. 836 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 837 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 838 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 839 } 840 841 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 842 set_concurrency(active_tasks); 843 844 _concurrent = concurrent; 845 // We propagate this to all tasks, not just the active ones. 846 for (uint i = 0; i < _max_worker_id; ++i) 847 _tasks[i]->set_concurrent(concurrent); 848 849 if (concurrent) { 850 set_concurrent_marking_in_progress(); 851 } else { 852 // We currently assume that the concurrent flag has been set to 853 // false before we start remark. At this point we should also be 854 // in a STW phase. 855 assert(!concurrent_marking_in_progress(), "invariant"); 856 assert(out_of_regions(), 857 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 858 p2i(_finger), p2i(_heap_end))); 859 } 860 } 861 862 void ConcurrentMark::set_non_marking_state() { 863 // We set the global marking state to some default values when we're 864 // not doing marking. 865 reset_marking_state(); 866 _active_tasks = 0; 867 clear_concurrent_marking_in_progress(); 868 } 869 870 ConcurrentMark::~ConcurrentMark() { 871 // The ConcurrentMark instance is never freed. 872 ShouldNotReachHere(); 873 } 874 875 void ConcurrentMark::clearNextBitmap() { 876 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 877 878 // Make sure that the concurrent mark thread looks to still be in 879 // the current cycle. 880 guarantee(cmThread()->during_cycle(), "invariant"); 881 882 // We are finishing up the current cycle by clearing the next 883 // marking bitmap and getting it ready for the next cycle. During 884 // this time no other cycle can start. So, let's make sure that this 885 // is the case. 886 guarantee(!g1h->mark_in_progress(), "invariant"); 887 888 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 889 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 890 _parallel_workers->run_task(&task); 891 892 // Clear the liveness counting data. If the marking has been aborted, the abort() 893 // call already did that. 894 if (cl.complete()) { 895 clear_all_count_data(); 896 } 897 898 // Repeat the asserts from above. 899 guarantee(cmThread()->during_cycle(), "invariant"); 900 guarantee(!g1h->mark_in_progress(), "invariant"); 901 } 902 903 class CheckBitmapClearHRClosure : public HeapRegionClosure { 904 CMBitMap* _bitmap; 905 bool _error; 906 public: 907 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 908 } 909 910 virtual bool doHeapRegion(HeapRegion* r) { 911 // This closure can be called concurrently to the mutator, so we must make sure 912 // that the result of the getNextMarkedWordAddress() call is compared to the 913 // value passed to it as limit to detect any found bits. 914 // We can use the region's orig_end() for the limit and the comparison value 915 // as it always contains the "real" end of the region that never changes and 916 // has no side effects. 917 // Due to the latter, there can also be no problem with the compiler generating 918 // reloads of the orig_end() call. 919 HeapWord* end = r->orig_end(); 920 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 921 } 922 }; 923 924 bool ConcurrentMark::nextMarkBitmapIsClear() { 925 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 926 _g1h->heap_region_iterate(&cl); 927 return cl.complete(); 928 } 929 930 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 931 public: 932 bool doHeapRegion(HeapRegion* r) { 933 if (!r->is_continues_humongous()) { 934 r->note_start_of_marking(); 935 } 936 return false; 937 } 938 }; 939 940 void ConcurrentMark::checkpointRootsInitialPre() { 941 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 942 G1CollectorPolicy* g1p = g1h->g1_policy(); 943 944 _has_aborted = false; 945 946 // Initialize marking structures. This has to be done in a STW phase. 947 reset(); 948 949 // For each region note start of marking. 950 NoteStartOfMarkHRClosure startcl; 951 g1h->heap_region_iterate(&startcl); 952 } 953 954 955 void ConcurrentMark::checkpointRootsInitialPost() { 956 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 957 958 // If we force an overflow during remark, the remark operation will 959 // actually abort and we'll restart concurrent marking. If we always 960 // force an overflow during remark we'll never actually complete the 961 // marking phase. So, we initialize this here, at the start of the 962 // cycle, so that at the remaining overflow number will decrease at 963 // every remark and we'll eventually not need to cause one. 964 force_overflow_stw()->init(); 965 966 // Start Concurrent Marking weak-reference discovery. 967 ReferenceProcessor* rp = g1h->ref_processor_cm(); 968 // enable ("weak") refs discovery 969 rp->enable_discovery(); 970 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 971 972 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 973 // This is the start of the marking cycle, we're expected all 974 // threads to have SATB queues with active set to false. 975 satb_mq_set.set_active_all_threads(true, /* new active value */ 976 false /* expected_active */); 977 978 _root_regions.prepare_for_scan(); 979 980 // update_g1_committed() will be called at the end of an evac pause 981 // when marking is on. So, it's also called at the end of the 982 // initial-mark pause to update the heap end, if the heap expands 983 // during it. No need to call it here. 984 } 985 986 /* 987 * Notice that in the next two methods, we actually leave the STS 988 * during the barrier sync and join it immediately afterwards. If we 989 * do not do this, the following deadlock can occur: one thread could 990 * be in the barrier sync code, waiting for the other thread to also 991 * sync up, whereas another one could be trying to yield, while also 992 * waiting for the other threads to sync up too. 993 * 994 * Note, however, that this code is also used during remark and in 995 * this case we should not attempt to leave / enter the STS, otherwise 996 * we'll either hit an assert (debug / fastdebug) or deadlock 997 * (product). So we should only leave / enter the STS if we are 998 * operating concurrently. 999 * 1000 * Because the thread that does the sync barrier has left the STS, it 1001 * is possible to be suspended for a Full GC or an evacuation pause 1002 * could occur. This is actually safe, since the entering the sync 1003 * barrier is one of the last things do_marking_step() does, and it 1004 * doesn't manipulate any data structures afterwards. 1005 */ 1006 1007 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 1008 if (verbose_low()) { 1009 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 1010 } 1011 1012 if (concurrent()) { 1013 SuspendibleThreadSet::leave(); 1014 } 1015 1016 bool barrier_aborted = !_first_overflow_barrier_sync.enter(); 1017 1018 if (concurrent()) { 1019 SuspendibleThreadSet::join(); 1020 } 1021 // at this point everyone should have synced up and not be doing any 1022 // more work 1023 1024 if (verbose_low()) { 1025 if (barrier_aborted) { 1026 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 1027 } else { 1028 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 1029 } 1030 } 1031 1032 if (barrier_aborted) { 1033 // If the barrier aborted we ignore the overflow condition and 1034 // just abort the whole marking phase as quickly as possible. 1035 return; 1036 } 1037 1038 // If we're executing the concurrent phase of marking, reset the marking 1039 // state; otherwise the marking state is reset after reference processing, 1040 // during the remark pause. 1041 // If we reset here as a result of an overflow during the remark we will 1042 // see assertion failures from any subsequent set_concurrency_and_phase() 1043 // calls. 1044 if (concurrent()) { 1045 // let the task associated with with worker 0 do this 1046 if (worker_id == 0) { 1047 // task 0 is responsible for clearing the global data structures 1048 // We should be here because of an overflow. During STW we should 1049 // not clear the overflow flag since we rely on it being true when 1050 // we exit this method to abort the pause and restart concurrent 1051 // marking. 1052 reset_marking_state(true /* clear_overflow */); 1053 force_overflow()->update(); 1054 1055 if (G1Log::fine()) { 1056 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1057 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1058 } 1059 } 1060 } 1061 1062 // after this, each task should reset its own data structures then 1063 // then go into the second barrier 1064 } 1065 1066 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1067 if (verbose_low()) { 1068 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1069 } 1070 1071 if (concurrent()) { 1072 SuspendibleThreadSet::leave(); 1073 } 1074 1075 bool barrier_aborted = !_second_overflow_barrier_sync.enter(); 1076 1077 if (concurrent()) { 1078 SuspendibleThreadSet::join(); 1079 } 1080 // at this point everything should be re-initialized and ready to go 1081 1082 if (verbose_low()) { 1083 if (barrier_aborted) { 1084 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1085 } else { 1086 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1087 } 1088 } 1089 } 1090 1091 #ifndef PRODUCT 1092 void ForceOverflowSettings::init() { 1093 _num_remaining = G1ConcMarkForceOverflow; 1094 _force = false; 1095 update(); 1096 } 1097 1098 void ForceOverflowSettings::update() { 1099 if (_num_remaining > 0) { 1100 _num_remaining -= 1; 1101 _force = true; 1102 } else { 1103 _force = false; 1104 } 1105 } 1106 1107 bool ForceOverflowSettings::should_force() { 1108 if (_force) { 1109 _force = false; 1110 return true; 1111 } else { 1112 return false; 1113 } 1114 } 1115 #endif // !PRODUCT 1116 1117 class CMConcurrentMarkingTask: public AbstractGangTask { 1118 private: 1119 ConcurrentMark* _cm; 1120 ConcurrentMarkThread* _cmt; 1121 1122 public: 1123 void work(uint worker_id) { 1124 assert(Thread::current()->is_ConcurrentGC_thread(), 1125 "this should only be done by a conc GC thread"); 1126 ResourceMark rm; 1127 1128 double start_vtime = os::elapsedVTime(); 1129 1130 SuspendibleThreadSet::join(); 1131 1132 assert(worker_id < _cm->active_tasks(), "invariant"); 1133 CMTask* the_task = _cm->task(worker_id); 1134 the_task->record_start_time(); 1135 if (!_cm->has_aborted()) { 1136 do { 1137 double start_vtime_sec = os::elapsedVTime(); 1138 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1139 1140 the_task->do_marking_step(mark_step_duration_ms, 1141 true /* do_termination */, 1142 false /* is_serial*/); 1143 1144 double end_vtime_sec = os::elapsedVTime(); 1145 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1146 _cm->clear_has_overflown(); 1147 1148 _cm->do_yield_check(worker_id); 1149 1150 jlong sleep_time_ms; 1151 if (!_cm->has_aborted() && the_task->has_aborted()) { 1152 sleep_time_ms = 1153 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1154 SuspendibleThreadSet::leave(); 1155 os::sleep(Thread::current(), sleep_time_ms, false); 1156 SuspendibleThreadSet::join(); 1157 } 1158 } while (!_cm->has_aborted() && the_task->has_aborted()); 1159 } 1160 the_task->record_end_time(); 1161 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1162 1163 SuspendibleThreadSet::leave(); 1164 1165 double end_vtime = os::elapsedVTime(); 1166 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1167 } 1168 1169 CMConcurrentMarkingTask(ConcurrentMark* cm, 1170 ConcurrentMarkThread* cmt) : 1171 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1172 1173 ~CMConcurrentMarkingTask() { } 1174 }; 1175 1176 // Calculates the number of active workers for a concurrent 1177 // phase. 1178 uint ConcurrentMark::calc_parallel_marking_threads() { 1179 uint n_conc_workers = 0; 1180 if (!UseDynamicNumberOfGCThreads || 1181 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1182 !ForceDynamicNumberOfGCThreads)) { 1183 n_conc_workers = max_parallel_marking_threads(); 1184 } else { 1185 n_conc_workers = 1186 AdaptiveSizePolicy::calc_default_active_workers( 1187 max_parallel_marking_threads(), 1188 1, /* Minimum workers */ 1189 parallel_marking_threads(), 1190 Threads::number_of_non_daemon_threads()); 1191 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1192 // that scaling has already gone into "_max_parallel_marking_threads". 1193 } 1194 assert(n_conc_workers > 0, "Always need at least 1"); 1195 return n_conc_workers; 1196 } 1197 1198 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1199 // Currently, only survivors can be root regions. 1200 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1201 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1202 1203 const uintx interval = PrefetchScanIntervalInBytes; 1204 HeapWord* curr = hr->bottom(); 1205 const HeapWord* end = hr->top(); 1206 while (curr < end) { 1207 Prefetch::read(curr, interval); 1208 oop obj = oop(curr); 1209 int size = obj->oop_iterate(&cl); 1210 assert(size == obj->size(), "sanity"); 1211 curr += size; 1212 } 1213 } 1214 1215 class CMRootRegionScanTask : public AbstractGangTask { 1216 private: 1217 ConcurrentMark* _cm; 1218 1219 public: 1220 CMRootRegionScanTask(ConcurrentMark* cm) : 1221 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1222 1223 void work(uint worker_id) { 1224 assert(Thread::current()->is_ConcurrentGC_thread(), 1225 "this should only be done by a conc GC thread"); 1226 1227 CMRootRegions* root_regions = _cm->root_regions(); 1228 HeapRegion* hr = root_regions->claim_next(); 1229 while (hr != NULL) { 1230 _cm->scanRootRegion(hr, worker_id); 1231 hr = root_regions->claim_next(); 1232 } 1233 } 1234 }; 1235 1236 void ConcurrentMark::scanRootRegions() { 1237 // Start of concurrent marking. 1238 ClassLoaderDataGraph::clear_claimed_marks(); 1239 1240 // scan_in_progress() will have been set to true only if there was 1241 // at least one root region to scan. So, if it's false, we 1242 // should not attempt to do any further work. 1243 if (root_regions()->scan_in_progress()) { 1244 _parallel_marking_threads = calc_parallel_marking_threads(); 1245 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1246 "Maximum number of marking threads exceeded"); 1247 uint active_workers = MAX2(1U, parallel_marking_threads()); 1248 1249 CMRootRegionScanTask task(this); 1250 _parallel_workers->set_active_workers(active_workers); 1251 _parallel_workers->run_task(&task); 1252 1253 // It's possible that has_aborted() is true here without actually 1254 // aborting the survivor scan earlier. This is OK as it's 1255 // mainly used for sanity checking. 1256 root_regions()->scan_finished(); 1257 } 1258 } 1259 1260 void ConcurrentMark::markFromRoots() { 1261 // we might be tempted to assert that: 1262 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1263 // "inconsistent argument?"); 1264 // However that wouldn't be right, because it's possible that 1265 // a safepoint is indeed in progress as a younger generation 1266 // stop-the-world GC happens even as we mark in this generation. 1267 1268 _restart_for_overflow = false; 1269 force_overflow_conc()->init(); 1270 1271 // _g1h has _n_par_threads 1272 _parallel_marking_threads = calc_parallel_marking_threads(); 1273 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1274 "Maximum number of marking threads exceeded"); 1275 1276 uint active_workers = MAX2(1U, parallel_marking_threads()); 1277 1278 // Parallel task terminator is set in "set_concurrency_and_phase()" 1279 set_concurrency_and_phase(active_workers, true /* concurrent */); 1280 1281 CMConcurrentMarkingTask markingTask(this, cmThread()); 1282 _parallel_workers->set_active_workers(active_workers); 1283 // Don't set _n_par_threads because it affects MT in process_roots() 1284 // and the decisions on that MT processing is made elsewhere. 1285 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1286 _parallel_workers->run_task(&markingTask); 1287 print_stats(); 1288 } 1289 1290 // Helper class to get rid of some boilerplate code. 1291 class G1CMTraceTime : public GCTraceTime { 1292 static bool doit_and_prepend(bool doit) { 1293 if (doit) { 1294 gclog_or_tty->put(' '); 1295 } 1296 return doit; 1297 } 1298 1299 public: 1300 G1CMTraceTime(const char* title, bool doit) 1301 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), 1302 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { 1303 } 1304 }; 1305 1306 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1307 // world is stopped at this checkpoint 1308 assert(SafepointSynchronize::is_at_safepoint(), 1309 "world should be stopped"); 1310 1311 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1312 1313 // If a full collection has happened, we shouldn't do this. 1314 if (has_aborted()) { 1315 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1316 return; 1317 } 1318 1319 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1320 1321 if (VerifyDuringGC) { 1322 HandleMark hm; // handle scope 1323 g1h->prepare_for_verify(); 1324 Universe::verify(VerifyOption_G1UsePrevMarking, 1325 " VerifyDuringGC:(before)"); 1326 } 1327 g1h->check_bitmaps("Remark Start"); 1328 1329 G1CollectorPolicy* g1p = g1h->g1_policy(); 1330 g1p->record_concurrent_mark_remark_start(); 1331 1332 double start = os::elapsedTime(); 1333 1334 checkpointRootsFinalWork(); 1335 1336 double mark_work_end = os::elapsedTime(); 1337 1338 weakRefsWork(clear_all_soft_refs); 1339 1340 if (has_overflown()) { 1341 // Oops. We overflowed. Restart concurrent marking. 1342 _restart_for_overflow = true; 1343 if (G1TraceMarkStackOverflow) { 1344 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1345 } 1346 1347 // Verify the heap w.r.t. the previous marking bitmap. 1348 if (VerifyDuringGC) { 1349 HandleMark hm; // handle scope 1350 g1h->prepare_for_verify(); 1351 Universe::verify(VerifyOption_G1UsePrevMarking, 1352 " VerifyDuringGC:(overflow)"); 1353 } 1354 1355 // Clear the marking state because we will be restarting 1356 // marking due to overflowing the global mark stack. 1357 reset_marking_state(); 1358 } else { 1359 { 1360 G1CMTraceTime trace("GC aggregate-data", G1Log::finer()); 1361 1362 // Aggregate the per-task counting data that we have accumulated 1363 // while marking. 1364 aggregate_count_data(); 1365 } 1366 1367 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1368 // We're done with marking. 1369 // This is the end of the marking cycle, we're expected all 1370 // threads to have SATB queues with active set to true. 1371 satb_mq_set.set_active_all_threads(false, /* new active value */ 1372 true /* expected_active */); 1373 1374 if (VerifyDuringGC) { 1375 HandleMark hm; // handle scope 1376 g1h->prepare_for_verify(); 1377 Universe::verify(VerifyOption_G1UseNextMarking, 1378 " VerifyDuringGC:(after)"); 1379 } 1380 g1h->check_bitmaps("Remark End"); 1381 assert(!restart_for_overflow(), "sanity"); 1382 // Completely reset the marking state since marking completed 1383 set_non_marking_state(); 1384 } 1385 1386 // Expand the marking stack, if we have to and if we can. 1387 if (_markStack.should_expand()) { 1388 _markStack.expand(); 1389 } 1390 1391 // Statistics 1392 double now = os::elapsedTime(); 1393 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1394 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1395 _remark_times.add((now - start) * 1000.0); 1396 1397 g1p->record_concurrent_mark_remark_end(); 1398 1399 G1CMIsAliveClosure is_alive(g1h); 1400 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1401 } 1402 1403 // Base class of the closures that finalize and verify the 1404 // liveness counting data. 1405 class CMCountDataClosureBase: public HeapRegionClosure { 1406 protected: 1407 G1CollectedHeap* _g1h; 1408 ConcurrentMark* _cm; 1409 CardTableModRefBS* _ct_bs; 1410 1411 BitMap* _region_bm; 1412 BitMap* _card_bm; 1413 1414 // Takes a region that's not empty (i.e., it has at least one 1415 // live object in it and sets its corresponding bit on the region 1416 // bitmap to 1. If the region is "starts humongous" it will also set 1417 // to 1 the bits on the region bitmap that correspond to its 1418 // associated "continues humongous" regions. 1419 void set_bit_for_region(HeapRegion* hr) { 1420 assert(!hr->is_continues_humongous(), "should have filtered those out"); 1421 1422 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1423 if (!hr->is_starts_humongous()) { 1424 // Normal (non-humongous) case: just set the bit. 1425 _region_bm->par_at_put(index, true); 1426 } else { 1427 // Starts humongous case: calculate how many regions are part of 1428 // this humongous region and then set the bit range. 1429 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1430 _region_bm->par_at_put_range(index, end_index, true); 1431 } 1432 } 1433 1434 public: 1435 CMCountDataClosureBase(G1CollectedHeap* g1h, 1436 BitMap* region_bm, BitMap* card_bm): 1437 _g1h(g1h), _cm(g1h->concurrent_mark()), 1438 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 1439 _region_bm(region_bm), _card_bm(card_bm) { } 1440 }; 1441 1442 // Closure that calculates the # live objects per region. Used 1443 // for verification purposes during the cleanup pause. 1444 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1445 CMBitMapRO* _bm; 1446 size_t _region_marked_bytes; 1447 1448 public: 1449 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1450 BitMap* region_bm, BitMap* card_bm) : 1451 CMCountDataClosureBase(g1h, region_bm, card_bm), 1452 _bm(bm), _region_marked_bytes(0) { } 1453 1454 bool doHeapRegion(HeapRegion* hr) { 1455 1456 if (hr->is_continues_humongous()) { 1457 // We will ignore these here and process them when their 1458 // associated "starts humongous" region is processed (see 1459 // set_bit_for_heap_region()). Note that we cannot rely on their 1460 // associated "starts humongous" region to have their bit set to 1461 // 1 since, due to the region chunking in the parallel region 1462 // iteration, a "continues humongous" region might be visited 1463 // before its associated "starts humongous". 1464 return false; 1465 } 1466 1467 HeapWord* ntams = hr->next_top_at_mark_start(); 1468 HeapWord* start = hr->bottom(); 1469 1470 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1471 err_msg("Preconditions not met - " 1472 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1473 p2i(start), p2i(ntams), p2i(hr->end()))); 1474 1475 // Find the first marked object at or after "start". 1476 start = _bm->getNextMarkedWordAddress(start, ntams); 1477 1478 size_t marked_bytes = 0; 1479 1480 while (start < ntams) { 1481 oop obj = oop(start); 1482 int obj_sz = obj->size(); 1483 HeapWord* obj_end = start + obj_sz; 1484 1485 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1486 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1487 1488 // Note: if we're looking at the last region in heap - obj_end 1489 // could be actually just beyond the end of the heap; end_idx 1490 // will then correspond to a (non-existent) card that is also 1491 // just beyond the heap. 1492 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1493 // end of object is not card aligned - increment to cover 1494 // all the cards spanned by the object 1495 end_idx += 1; 1496 } 1497 1498 // Set the bits in the card BM for the cards spanned by this object. 1499 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1500 1501 // Add the size of this object to the number of marked bytes. 1502 marked_bytes += (size_t)obj_sz * HeapWordSize; 1503 1504 // Find the next marked object after this one. 1505 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1506 } 1507 1508 // Mark the allocated-since-marking portion... 1509 HeapWord* top = hr->top(); 1510 if (ntams < top) { 1511 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1512 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1513 1514 // Note: if we're looking at the last region in heap - top 1515 // could be actually just beyond the end of the heap; end_idx 1516 // will then correspond to a (non-existent) card that is also 1517 // just beyond the heap. 1518 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1519 // end of object is not card aligned - increment to cover 1520 // all the cards spanned by the object 1521 end_idx += 1; 1522 } 1523 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1524 1525 // This definitely means the region has live objects. 1526 set_bit_for_region(hr); 1527 } 1528 1529 // Update the live region bitmap. 1530 if (marked_bytes > 0) { 1531 set_bit_for_region(hr); 1532 } 1533 1534 // Set the marked bytes for the current region so that 1535 // it can be queried by a calling verification routine 1536 _region_marked_bytes = marked_bytes; 1537 1538 return false; 1539 } 1540 1541 size_t region_marked_bytes() const { return _region_marked_bytes; } 1542 }; 1543 1544 // Heap region closure used for verifying the counting data 1545 // that was accumulated concurrently and aggregated during 1546 // the remark pause. This closure is applied to the heap 1547 // regions during the STW cleanup pause. 1548 1549 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1550 G1CollectedHeap* _g1h; 1551 ConcurrentMark* _cm; 1552 CalcLiveObjectsClosure _calc_cl; 1553 BitMap* _region_bm; // Region BM to be verified 1554 BitMap* _card_bm; // Card BM to be verified 1555 bool _verbose; // verbose output? 1556 1557 BitMap* _exp_region_bm; // Expected Region BM values 1558 BitMap* _exp_card_bm; // Expected card BM values 1559 1560 int _failures; 1561 1562 public: 1563 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1564 BitMap* region_bm, 1565 BitMap* card_bm, 1566 BitMap* exp_region_bm, 1567 BitMap* exp_card_bm, 1568 bool verbose) : 1569 _g1h(g1h), _cm(g1h->concurrent_mark()), 1570 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1571 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1572 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1573 _failures(0) { } 1574 1575 int failures() const { return _failures; } 1576 1577 bool doHeapRegion(HeapRegion* hr) { 1578 if (hr->is_continues_humongous()) { 1579 // We will ignore these here and process them when their 1580 // associated "starts humongous" region is processed (see 1581 // set_bit_for_heap_region()). Note that we cannot rely on their 1582 // associated "starts humongous" region to have their bit set to 1583 // 1 since, due to the region chunking in the parallel region 1584 // iteration, a "continues humongous" region might be visited 1585 // before its associated "starts humongous". 1586 return false; 1587 } 1588 1589 int failures = 0; 1590 1591 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1592 // this region and set the corresponding bits in the expected region 1593 // and card bitmaps. 1594 bool res = _calc_cl.doHeapRegion(hr); 1595 assert(res == false, "should be continuing"); 1596 1597 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1598 Mutex::_no_safepoint_check_flag); 1599 1600 // Verify the marked bytes for this region. 1601 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1602 size_t act_marked_bytes = hr->next_marked_bytes(); 1603 1604 // We're not OK if expected marked bytes > actual marked bytes. It means 1605 // we have missed accounting some objects during the actual marking. 1606 if (exp_marked_bytes > act_marked_bytes) { 1607 if (_verbose) { 1608 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1609 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1610 hr->hrm_index(), exp_marked_bytes, act_marked_bytes); 1611 } 1612 failures += 1; 1613 } 1614 1615 // Verify the bit, for this region, in the actual and expected 1616 // (which was just calculated) region bit maps. 1617 // We're not OK if the bit in the calculated expected region 1618 // bitmap is set and the bit in the actual region bitmap is not. 1619 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1620 1621 bool expected = _exp_region_bm->at(index); 1622 bool actual = _region_bm->at(index); 1623 if (expected && !actual) { 1624 if (_verbose) { 1625 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1626 "expected: %s, actual: %s", 1627 hr->hrm_index(), 1628 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1629 } 1630 failures += 1; 1631 } 1632 1633 // Verify that the card bit maps for the cards spanned by the current 1634 // region match. We have an error if we have a set bit in the expected 1635 // bit map and the corresponding bit in the actual bitmap is not set. 1636 1637 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1638 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1639 1640 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1641 expected = _exp_card_bm->at(i); 1642 actual = _card_bm->at(i); 1643 1644 if (expected && !actual) { 1645 if (_verbose) { 1646 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1647 "expected: %s, actual: %s", 1648 hr->hrm_index(), i, 1649 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1650 } 1651 failures += 1; 1652 } 1653 } 1654 1655 if (failures > 0 && _verbose) { 1656 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1657 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1658 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1659 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1660 } 1661 1662 _failures += failures; 1663 1664 // We could stop iteration over the heap when we 1665 // find the first violating region by returning true. 1666 return false; 1667 } 1668 }; 1669 1670 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1671 protected: 1672 G1CollectedHeap* _g1h; 1673 ConcurrentMark* _cm; 1674 BitMap* _actual_region_bm; 1675 BitMap* _actual_card_bm; 1676 1677 uint _n_workers; 1678 1679 BitMap* _expected_region_bm; 1680 BitMap* _expected_card_bm; 1681 1682 int _failures; 1683 bool _verbose; 1684 1685 HeapRegionClaimer _hrclaimer; 1686 1687 public: 1688 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1689 BitMap* region_bm, BitMap* card_bm, 1690 BitMap* expected_region_bm, BitMap* expected_card_bm) 1691 : AbstractGangTask("G1 verify final counting"), 1692 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1693 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1694 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1695 _failures(0), _verbose(false), 1696 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1697 assert(VerifyDuringGC, "don't call this otherwise"); 1698 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1699 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1700 1701 _verbose = _cm->verbose_medium(); 1702 } 1703 1704 void work(uint worker_id) { 1705 assert(worker_id < _n_workers, "invariant"); 1706 1707 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1708 _actual_region_bm, _actual_card_bm, 1709 _expected_region_bm, 1710 _expected_card_bm, 1711 _verbose); 1712 1713 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1714 1715 Atomic::add(verify_cl.failures(), &_failures); 1716 } 1717 1718 int failures() const { return _failures; } 1719 }; 1720 1721 // Closure that finalizes the liveness counting data. 1722 // Used during the cleanup pause. 1723 // Sets the bits corresponding to the interval [NTAMS, top] 1724 // (which contains the implicitly live objects) in the 1725 // card liveness bitmap. Also sets the bit for each region, 1726 // containing live data, in the region liveness bitmap. 1727 1728 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1729 public: 1730 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1731 BitMap* region_bm, 1732 BitMap* card_bm) : 1733 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1734 1735 bool doHeapRegion(HeapRegion* hr) { 1736 1737 if (hr->is_continues_humongous()) { 1738 // We will ignore these here and process them when their 1739 // associated "starts humongous" region is processed (see 1740 // set_bit_for_heap_region()). Note that we cannot rely on their 1741 // associated "starts humongous" region to have their bit set to 1742 // 1 since, due to the region chunking in the parallel region 1743 // iteration, a "continues humongous" region might be visited 1744 // before its associated "starts humongous". 1745 return false; 1746 } 1747 1748 HeapWord* ntams = hr->next_top_at_mark_start(); 1749 HeapWord* top = hr->top(); 1750 1751 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1752 1753 // Mark the allocated-since-marking portion... 1754 if (ntams < top) { 1755 // This definitely means the region has live objects. 1756 set_bit_for_region(hr); 1757 1758 // Now set the bits in the card bitmap for [ntams, top) 1759 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1760 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1761 1762 // Note: if we're looking at the last region in heap - top 1763 // could be actually just beyond the end of the heap; end_idx 1764 // will then correspond to a (non-existent) card that is also 1765 // just beyond the heap. 1766 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1767 // end of object is not card aligned - increment to cover 1768 // all the cards spanned by the object 1769 end_idx += 1; 1770 } 1771 1772 assert(end_idx <= _card_bm->size(), 1773 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1774 end_idx, _card_bm->size())); 1775 assert(start_idx < _card_bm->size(), 1776 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1777 start_idx, _card_bm->size())); 1778 1779 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1780 } 1781 1782 // Set the bit for the region if it contains live data 1783 if (hr->next_marked_bytes() > 0) { 1784 set_bit_for_region(hr); 1785 } 1786 1787 return false; 1788 } 1789 }; 1790 1791 class G1ParFinalCountTask: public AbstractGangTask { 1792 protected: 1793 G1CollectedHeap* _g1h; 1794 ConcurrentMark* _cm; 1795 BitMap* _actual_region_bm; 1796 BitMap* _actual_card_bm; 1797 1798 uint _n_workers; 1799 HeapRegionClaimer _hrclaimer; 1800 1801 public: 1802 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1803 : AbstractGangTask("G1 final counting"), 1804 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1805 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1806 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1807 } 1808 1809 void work(uint worker_id) { 1810 assert(worker_id < _n_workers, "invariant"); 1811 1812 FinalCountDataUpdateClosure final_update_cl(_g1h, 1813 _actual_region_bm, 1814 _actual_card_bm); 1815 1816 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1817 } 1818 }; 1819 1820 class G1ParNoteEndTask; 1821 1822 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1823 G1CollectedHeap* _g1; 1824 size_t _max_live_bytes; 1825 uint _regions_claimed; 1826 size_t _freed_bytes; 1827 FreeRegionList* _local_cleanup_list; 1828 HeapRegionSetCount _old_regions_removed; 1829 HeapRegionSetCount _humongous_regions_removed; 1830 HRRSCleanupTask* _hrrs_cleanup_task; 1831 double _claimed_region_time; 1832 double _max_region_time; 1833 1834 public: 1835 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1836 FreeRegionList* local_cleanup_list, 1837 HRRSCleanupTask* hrrs_cleanup_task) : 1838 _g1(g1), 1839 _max_live_bytes(0), _regions_claimed(0), 1840 _freed_bytes(0), 1841 _claimed_region_time(0.0), _max_region_time(0.0), 1842 _local_cleanup_list(local_cleanup_list), 1843 _old_regions_removed(), 1844 _humongous_regions_removed(), 1845 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1846 1847 size_t freed_bytes() { return _freed_bytes; } 1848 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1849 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1850 1851 bool doHeapRegion(HeapRegion *hr) { 1852 if (hr->is_continues_humongous()) { 1853 return false; 1854 } 1855 // We use a claim value of zero here because all regions 1856 // were claimed with value 1 in the FinalCount task. 1857 _g1->reset_gc_time_stamps(hr); 1858 double start = os::elapsedTime(); 1859 _regions_claimed++; 1860 hr->note_end_of_marking(); 1861 _max_live_bytes += hr->max_live_bytes(); 1862 1863 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1864 _freed_bytes += hr->used(); 1865 hr->set_containing_set(NULL); 1866 if (hr->is_humongous()) { 1867 assert(hr->is_starts_humongous(), "we should only see starts humongous"); 1868 _humongous_regions_removed.increment(1u, hr->capacity()); 1869 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1870 } else { 1871 _old_regions_removed.increment(1u, hr->capacity()); 1872 _g1->free_region(hr, _local_cleanup_list, true); 1873 } 1874 } else { 1875 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1876 } 1877 1878 double region_time = (os::elapsedTime() - start); 1879 _claimed_region_time += region_time; 1880 if (region_time > _max_region_time) { 1881 _max_region_time = region_time; 1882 } 1883 return false; 1884 } 1885 1886 size_t max_live_bytes() { return _max_live_bytes; } 1887 uint regions_claimed() { return _regions_claimed; } 1888 double claimed_region_time_sec() { return _claimed_region_time; } 1889 double max_region_time_sec() { return _max_region_time; } 1890 }; 1891 1892 class G1ParNoteEndTask: public AbstractGangTask { 1893 friend class G1NoteEndOfConcMarkClosure; 1894 1895 protected: 1896 G1CollectedHeap* _g1h; 1897 size_t _max_live_bytes; 1898 size_t _freed_bytes; 1899 FreeRegionList* _cleanup_list; 1900 HeapRegionClaimer _hrclaimer; 1901 1902 public: 1903 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1904 AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1905 } 1906 1907 void work(uint worker_id) { 1908 FreeRegionList local_cleanup_list("Local Cleanup List"); 1909 HRRSCleanupTask hrrs_cleanup_task; 1910 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1911 &hrrs_cleanup_task); 1912 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1913 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1914 1915 // Now update the lists 1916 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1917 { 1918 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1919 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1920 _max_live_bytes += g1_note_end.max_live_bytes(); 1921 _freed_bytes += g1_note_end.freed_bytes(); 1922 1923 // If we iterate over the global cleanup list at the end of 1924 // cleanup to do this printing we will not guarantee to only 1925 // generate output for the newly-reclaimed regions (the list 1926 // might not be empty at the beginning of cleanup; we might 1927 // still be working on its previous contents). So we do the 1928 // printing here, before we append the new regions to the global 1929 // cleanup list. 1930 1931 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1932 if (hr_printer->is_active()) { 1933 FreeRegionListIterator iter(&local_cleanup_list); 1934 while (iter.more_available()) { 1935 HeapRegion* hr = iter.get_next(); 1936 hr_printer->cleanup(hr); 1937 } 1938 } 1939 1940 _cleanup_list->add_ordered(&local_cleanup_list); 1941 assert(local_cleanup_list.is_empty(), "post-condition"); 1942 1943 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1944 } 1945 } 1946 size_t max_live_bytes() { return _max_live_bytes; } 1947 size_t freed_bytes() { return _freed_bytes; } 1948 }; 1949 1950 class G1ParScrubRemSetTask: public AbstractGangTask { 1951 protected: 1952 G1RemSet* _g1rs; 1953 BitMap* _region_bm; 1954 BitMap* _card_bm; 1955 HeapRegionClaimer _hrclaimer; 1956 1957 public: 1958 G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) : 1959 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) { 1960 } 1961 1962 void work(uint worker_id) { 1963 _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer); 1964 } 1965 1966 }; 1967 1968 void ConcurrentMark::cleanup() { 1969 // world is stopped at this checkpoint 1970 assert(SafepointSynchronize::is_at_safepoint(), 1971 "world should be stopped"); 1972 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1973 1974 // If a full collection has happened, we shouldn't do this. 1975 if (has_aborted()) { 1976 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1977 return; 1978 } 1979 1980 g1h->verify_region_sets_optional(); 1981 1982 if (VerifyDuringGC) { 1983 HandleMark hm; // handle scope 1984 g1h->prepare_for_verify(); 1985 Universe::verify(VerifyOption_G1UsePrevMarking, 1986 " VerifyDuringGC:(before)"); 1987 } 1988 g1h->check_bitmaps("Cleanup Start"); 1989 1990 G1CollectorPolicy* g1p = g1h->g1_policy(); 1991 g1p->record_concurrent_mark_cleanup_start(); 1992 1993 double start = os::elapsedTime(); 1994 1995 HeapRegionRemSet::reset_for_cleanup_tasks(); 1996 1997 uint n_workers; 1998 1999 // Do counting once more with the world stopped for good measure. 2000 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 2001 2002 g1h->set_par_threads(); 2003 n_workers = g1h->n_par_threads(); 2004 assert(g1h->n_par_threads() == n_workers, 2005 "Should not have been reset"); 2006 g1h->workers()->run_task(&g1_par_count_task); 2007 // Done with the parallel phase so reset to 0. 2008 g1h->set_par_threads(0); 2009 2010 if (VerifyDuringGC) { 2011 // Verify that the counting data accumulated during marking matches 2012 // that calculated by walking the marking bitmap. 2013 2014 // Bitmaps to hold expected values 2015 BitMap expected_region_bm(_region_bm.size(), true); 2016 BitMap expected_card_bm(_card_bm.size(), true); 2017 2018 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 2019 &_region_bm, 2020 &_card_bm, 2021 &expected_region_bm, 2022 &expected_card_bm); 2023 2024 g1h->set_par_threads((int)n_workers); 2025 g1h->workers()->run_task(&g1_par_verify_task); 2026 // Done with the parallel phase so reset to 0. 2027 g1h->set_par_threads(0); 2028 2029 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 2030 } 2031 2032 size_t start_used_bytes = g1h->used(); 2033 g1h->set_marking_complete(); 2034 2035 double count_end = os::elapsedTime(); 2036 double this_final_counting_time = (count_end - start); 2037 _total_counting_time += this_final_counting_time; 2038 2039 if (G1PrintRegionLivenessInfo) { 2040 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 2041 _g1h->heap_region_iterate(&cl); 2042 } 2043 2044 // Install newly created mark bitMap as "prev". 2045 swapMarkBitMaps(); 2046 2047 g1h->reset_gc_time_stamp(); 2048 2049 // Note end of marking in all heap regions. 2050 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 2051 g1h->set_par_threads((int)n_workers); 2052 g1h->workers()->run_task(&g1_par_note_end_task); 2053 g1h->set_par_threads(0); 2054 g1h->check_gc_time_stamps(); 2055 2056 if (!cleanup_list_is_empty()) { 2057 // The cleanup list is not empty, so we'll have to process it 2058 // concurrently. Notify anyone else that might be wanting free 2059 // regions that there will be more free regions coming soon. 2060 g1h->set_free_regions_coming(); 2061 } 2062 2063 // call below, since it affects the metric by which we sort the heap 2064 // regions. 2065 if (G1ScrubRemSets) { 2066 double rs_scrub_start = os::elapsedTime(); 2067 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers); 2068 g1h->set_par_threads((int)n_workers); 2069 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2070 g1h->set_par_threads(0); 2071 2072 double rs_scrub_end = os::elapsedTime(); 2073 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2074 _total_rs_scrub_time += this_rs_scrub_time; 2075 } 2076 2077 // this will also free any regions totally full of garbage objects, 2078 // and sort the regions. 2079 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2080 2081 // Statistics. 2082 double end = os::elapsedTime(); 2083 _cleanup_times.add((end - start) * 1000.0); 2084 2085 if (G1Log::fine()) { 2086 g1h->g1_policy()->print_heap_transition(start_used_bytes); 2087 } 2088 2089 // Clean up will have freed any regions completely full of garbage. 2090 // Update the soft reference policy with the new heap occupancy. 2091 Universe::update_heap_info_at_gc(); 2092 2093 if (VerifyDuringGC) { 2094 HandleMark hm; // handle scope 2095 g1h->prepare_for_verify(); 2096 Universe::verify(VerifyOption_G1UsePrevMarking, 2097 " VerifyDuringGC:(after)"); 2098 } 2099 2100 g1h->check_bitmaps("Cleanup End"); 2101 2102 g1h->verify_region_sets_optional(); 2103 2104 // We need to make this be a "collection" so any collection pause that 2105 // races with it goes around and waits for completeCleanup to finish. 2106 g1h->increment_total_collections(); 2107 2108 // Clean out dead classes and update Metaspace sizes. 2109 if (ClassUnloadingWithConcurrentMark) { 2110 ClassLoaderDataGraph::purge(); 2111 } 2112 MetaspaceGC::compute_new_size(); 2113 2114 // We reclaimed old regions so we should calculate the sizes to make 2115 // sure we update the old gen/space data. 2116 g1h->g1mm()->update_sizes(); 2117 g1h->allocation_context_stats().update_after_mark(); 2118 2119 g1h->trace_heap_after_concurrent_cycle(); 2120 } 2121 2122 void ConcurrentMark::completeCleanup() { 2123 if (has_aborted()) return; 2124 2125 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2126 2127 _cleanup_list.verify_optional(); 2128 FreeRegionList tmp_free_list("Tmp Free List"); 2129 2130 if (G1ConcRegionFreeingVerbose) { 2131 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2132 "cleanup list has %u entries", 2133 _cleanup_list.length()); 2134 } 2135 2136 // No one else should be accessing the _cleanup_list at this point, 2137 // so it is not necessary to take any locks 2138 while (!_cleanup_list.is_empty()) { 2139 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 2140 assert(hr != NULL, "Got NULL from a non-empty list"); 2141 hr->par_clear(); 2142 tmp_free_list.add_ordered(hr); 2143 2144 // Instead of adding one region at a time to the secondary_free_list, 2145 // we accumulate them in the local list and move them a few at a 2146 // time. This also cuts down on the number of notify_all() calls 2147 // we do during this process. We'll also append the local list when 2148 // _cleanup_list is empty (which means we just removed the last 2149 // region from the _cleanup_list). 2150 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2151 _cleanup_list.is_empty()) { 2152 if (G1ConcRegionFreeingVerbose) { 2153 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2154 "appending %u entries to the secondary_free_list, " 2155 "cleanup list still has %u entries", 2156 tmp_free_list.length(), 2157 _cleanup_list.length()); 2158 } 2159 2160 { 2161 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2162 g1h->secondary_free_list_add(&tmp_free_list); 2163 SecondaryFreeList_lock->notify_all(); 2164 } 2165 #ifndef PRODUCT 2166 if (G1StressConcRegionFreeing) { 2167 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2168 os::sleep(Thread::current(), (jlong) 1, false); 2169 } 2170 } 2171 #endif 2172 } 2173 } 2174 assert(tmp_free_list.is_empty(), "post-condition"); 2175 } 2176 2177 // Supporting Object and Oop closures for reference discovery 2178 // and processing in during marking 2179 2180 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2181 HeapWord* addr = (HeapWord*)obj; 2182 return addr != NULL && 2183 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2184 } 2185 2186 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2187 // Uses the CMTask associated with a worker thread (for serial reference 2188 // processing the CMTask for worker 0 is used) to preserve (mark) and 2189 // trace referent objects. 2190 // 2191 // Using the CMTask and embedded local queues avoids having the worker 2192 // threads operating on the global mark stack. This reduces the risk 2193 // of overflowing the stack - which we would rather avoid at this late 2194 // state. Also using the tasks' local queues removes the potential 2195 // of the workers interfering with each other that could occur if 2196 // operating on the global stack. 2197 2198 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2199 ConcurrentMark* _cm; 2200 CMTask* _task; 2201 int _ref_counter_limit; 2202 int _ref_counter; 2203 bool _is_serial; 2204 public: 2205 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2206 _cm(cm), _task(task), _is_serial(is_serial), 2207 _ref_counter_limit(G1RefProcDrainInterval) { 2208 assert(_ref_counter_limit > 0, "sanity"); 2209 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2210 _ref_counter = _ref_counter_limit; 2211 } 2212 2213 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2214 virtual void do_oop( oop* p) { do_oop_work(p); } 2215 2216 template <class T> void do_oop_work(T* p) { 2217 if (!_cm->has_overflown()) { 2218 oop obj = oopDesc::load_decode_heap_oop(p); 2219 if (_cm->verbose_high()) { 2220 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2221 "*"PTR_FORMAT" = "PTR_FORMAT, 2222 _task->worker_id(), p2i(p), p2i((void*) obj)); 2223 } 2224 2225 _task->deal_with_reference(obj); 2226 _ref_counter--; 2227 2228 if (_ref_counter == 0) { 2229 // We have dealt with _ref_counter_limit references, pushing them 2230 // and objects reachable from them on to the local stack (and 2231 // possibly the global stack). Call CMTask::do_marking_step() to 2232 // process these entries. 2233 // 2234 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2235 // there's nothing more to do (i.e. we're done with the entries that 2236 // were pushed as a result of the CMTask::deal_with_reference() calls 2237 // above) or we overflow. 2238 // 2239 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2240 // flag while there may still be some work to do. (See the comment at 2241 // the beginning of CMTask::do_marking_step() for those conditions - 2242 // one of which is reaching the specified time target.) It is only 2243 // when CMTask::do_marking_step() returns without setting the 2244 // has_aborted() flag that the marking step has completed. 2245 do { 2246 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2247 _task->do_marking_step(mark_step_duration_ms, 2248 false /* do_termination */, 2249 _is_serial); 2250 } while (_task->has_aborted() && !_cm->has_overflown()); 2251 _ref_counter = _ref_counter_limit; 2252 } 2253 } else { 2254 if (_cm->verbose_high()) { 2255 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2256 } 2257 } 2258 } 2259 }; 2260 2261 // 'Drain' oop closure used by both serial and parallel reference processing. 2262 // Uses the CMTask associated with a given worker thread (for serial 2263 // reference processing the CMtask for worker 0 is used). Calls the 2264 // do_marking_step routine, with an unbelievably large timeout value, 2265 // to drain the marking data structures of the remaining entries 2266 // added by the 'keep alive' oop closure above. 2267 2268 class G1CMDrainMarkingStackClosure: public VoidClosure { 2269 ConcurrentMark* _cm; 2270 CMTask* _task; 2271 bool _is_serial; 2272 public: 2273 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2274 _cm(cm), _task(task), _is_serial(is_serial) { 2275 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2276 } 2277 2278 void do_void() { 2279 do { 2280 if (_cm->verbose_high()) { 2281 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2282 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2283 } 2284 2285 // We call CMTask::do_marking_step() to completely drain the local 2286 // and global marking stacks of entries pushed by the 'keep alive' 2287 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2288 // 2289 // CMTask::do_marking_step() is called in a loop, which we'll exit 2290 // if there's nothing more to do (i.e. we've completely drained the 2291 // entries that were pushed as a a result of applying the 'keep alive' 2292 // closure to the entries on the discovered ref lists) or we overflow 2293 // the global marking stack. 2294 // 2295 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2296 // flag while there may still be some work to do. (See the comment at 2297 // the beginning of CMTask::do_marking_step() for those conditions - 2298 // one of which is reaching the specified time target.) It is only 2299 // when CMTask::do_marking_step() returns without setting the 2300 // has_aborted() flag that the marking step has completed. 2301 2302 _task->do_marking_step(1000000000.0 /* something very large */, 2303 true /* do_termination */, 2304 _is_serial); 2305 } while (_task->has_aborted() && !_cm->has_overflown()); 2306 } 2307 }; 2308 2309 // Implementation of AbstractRefProcTaskExecutor for parallel 2310 // reference processing at the end of G1 concurrent marking 2311 2312 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2313 private: 2314 G1CollectedHeap* _g1h; 2315 ConcurrentMark* _cm; 2316 WorkGang* _workers; 2317 int _active_workers; 2318 2319 public: 2320 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2321 ConcurrentMark* cm, 2322 WorkGang* workers, 2323 int n_workers) : 2324 _g1h(g1h), _cm(cm), 2325 _workers(workers), _active_workers(n_workers) { } 2326 2327 // Executes the given task using concurrent marking worker threads. 2328 virtual void execute(ProcessTask& task); 2329 virtual void execute(EnqueueTask& task); 2330 }; 2331 2332 class G1CMRefProcTaskProxy: public AbstractGangTask { 2333 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2334 ProcessTask& _proc_task; 2335 G1CollectedHeap* _g1h; 2336 ConcurrentMark* _cm; 2337 2338 public: 2339 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2340 G1CollectedHeap* g1h, 2341 ConcurrentMark* cm) : 2342 AbstractGangTask("Process reference objects in parallel"), 2343 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2344 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2345 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2346 } 2347 2348 virtual void work(uint worker_id) { 2349 ResourceMark rm; 2350 HandleMark hm; 2351 CMTask* task = _cm->task(worker_id); 2352 G1CMIsAliveClosure g1_is_alive(_g1h); 2353 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2354 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2355 2356 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2357 } 2358 }; 2359 2360 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2361 assert(_workers != NULL, "Need parallel worker threads."); 2362 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2363 2364 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2365 2366 // We need to reset the concurrency level before each 2367 // proxy task execution, so that the termination protocol 2368 // and overflow handling in CMTask::do_marking_step() knows 2369 // how many workers to wait for. 2370 _cm->set_concurrency(_active_workers); 2371 _g1h->set_par_threads(_active_workers); 2372 _workers->run_task(&proc_task_proxy); 2373 _g1h->set_par_threads(0); 2374 } 2375 2376 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2377 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2378 EnqueueTask& _enq_task; 2379 2380 public: 2381 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2382 AbstractGangTask("Enqueue reference objects in parallel"), 2383 _enq_task(enq_task) { } 2384 2385 virtual void work(uint worker_id) { 2386 _enq_task.work(worker_id); 2387 } 2388 }; 2389 2390 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2391 assert(_workers != NULL, "Need parallel worker threads."); 2392 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2393 2394 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2395 2396 // Not strictly necessary but... 2397 // 2398 // We need to reset the concurrency level before each 2399 // proxy task execution, so that the termination protocol 2400 // and overflow handling in CMTask::do_marking_step() knows 2401 // how many workers to wait for. 2402 _cm->set_concurrency(_active_workers); 2403 _g1h->set_par_threads(_active_workers); 2404 _workers->run_task(&enq_task_proxy); 2405 _g1h->set_par_threads(0); 2406 } 2407 2408 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2409 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2410 } 2411 2412 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2413 if (has_overflown()) { 2414 // Skip processing the discovered references if we have 2415 // overflown the global marking stack. Reference objects 2416 // only get discovered once so it is OK to not 2417 // de-populate the discovered reference lists. We could have, 2418 // but the only benefit would be that, when marking restarts, 2419 // less reference objects are discovered. 2420 return; 2421 } 2422 2423 ResourceMark rm; 2424 HandleMark hm; 2425 2426 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2427 2428 // Is alive closure. 2429 G1CMIsAliveClosure g1_is_alive(g1h); 2430 2431 // Inner scope to exclude the cleaning of the string and symbol 2432 // tables from the displayed time. 2433 { 2434 G1CMTraceTime t("GC ref-proc", G1Log::finer()); 2435 2436 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2437 2438 // See the comment in G1CollectedHeap::ref_processing_init() 2439 // about how reference processing currently works in G1. 2440 2441 // Set the soft reference policy 2442 rp->setup_policy(clear_all_soft_refs); 2443 assert(_markStack.isEmpty(), "mark stack should be empty"); 2444 2445 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2446 // in serial reference processing. Note these closures are also 2447 // used for serially processing (by the the current thread) the 2448 // JNI references during parallel reference processing. 2449 // 2450 // These closures do not need to synchronize with the worker 2451 // threads involved in parallel reference processing as these 2452 // instances are executed serially by the current thread (e.g. 2453 // reference processing is not multi-threaded and is thus 2454 // performed by the current thread instead of a gang worker). 2455 // 2456 // The gang tasks involved in parallel reference processing create 2457 // their own instances of these closures, which do their own 2458 // synchronization among themselves. 2459 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2460 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2461 2462 // We need at least one active thread. If reference processing 2463 // is not multi-threaded we use the current (VMThread) thread, 2464 // otherwise we use the work gang from the G1CollectedHeap and 2465 // we utilize all the worker threads we can. 2466 bool processing_is_mt = rp->processing_is_mt(); 2467 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2468 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2469 2470 // Parallel processing task executor. 2471 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2472 g1h->workers(), active_workers); 2473 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2474 2475 // Set the concurrency level. The phase was already set prior to 2476 // executing the remark task. 2477 set_concurrency(active_workers); 2478 2479 // Set the degree of MT processing here. If the discovery was done MT, 2480 // the number of threads involved during discovery could differ from 2481 // the number of active workers. This is OK as long as the discovered 2482 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2483 rp->set_active_mt_degree(active_workers); 2484 2485 // Process the weak references. 2486 const ReferenceProcessorStats& stats = 2487 rp->process_discovered_references(&g1_is_alive, 2488 &g1_keep_alive, 2489 &g1_drain_mark_stack, 2490 executor, 2491 g1h->gc_timer_cm(), 2492 concurrent_gc_id()); 2493 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2494 2495 // The do_oop work routines of the keep_alive and drain_marking_stack 2496 // oop closures will set the has_overflown flag if we overflow the 2497 // global marking stack. 2498 2499 assert(_markStack.overflow() || _markStack.isEmpty(), 2500 "mark stack should be empty (unless it overflowed)"); 2501 2502 if (_markStack.overflow()) { 2503 // This should have been done already when we tried to push an 2504 // entry on to the global mark stack. But let's do it again. 2505 set_has_overflown(); 2506 } 2507 2508 assert(rp->num_q() == active_workers, "why not"); 2509 2510 rp->enqueue_discovered_references(executor); 2511 2512 rp->verify_no_references_recorded(); 2513 assert(!rp->discovery_enabled(), "Post condition"); 2514 } 2515 2516 if (has_overflown()) { 2517 // We can not trust g1_is_alive if the marking stack overflowed 2518 return; 2519 } 2520 2521 assert(_markStack.isEmpty(), "Marking should have completed"); 2522 2523 // Unload Klasses, String, Symbols, Code Cache, etc. 2524 { 2525 G1CMTraceTime trace("Unloading", G1Log::finer()); 2526 2527 if (ClassUnloadingWithConcurrentMark) { 2528 bool purged_classes; 2529 2530 { 2531 G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest()); 2532 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2533 } 2534 2535 { 2536 G1CMTraceTime trace("Parallel Unloading", G1Log::finest()); 2537 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2538 } 2539 } 2540 2541 if (G1StringDedup::is_enabled()) { 2542 G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest()); 2543 G1StringDedup::unlink(&g1_is_alive); 2544 } 2545 } 2546 } 2547 2548 void ConcurrentMark::swapMarkBitMaps() { 2549 CMBitMapRO* temp = _prevMarkBitMap; 2550 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2551 _nextMarkBitMap = (CMBitMap*) temp; 2552 } 2553 2554 class CMObjectClosure; 2555 2556 // Closure for iterating over objects, currently only used for 2557 // processing SATB buffers. 2558 class CMObjectClosure : public ObjectClosure { 2559 private: 2560 CMTask* _task; 2561 2562 public: 2563 void do_object(oop obj) { 2564 _task->deal_with_reference(obj); 2565 } 2566 2567 CMObjectClosure(CMTask* task) : _task(task) { } 2568 }; 2569 2570 class G1RemarkThreadsClosure : public ThreadClosure { 2571 CMObjectClosure _cm_obj; 2572 G1CMOopClosure _cm_cl; 2573 MarkingCodeBlobClosure _code_cl; 2574 int _thread_parity; 2575 2576 public: 2577 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) : 2578 _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2579 _thread_parity(Threads::thread_claim_parity()) {} 2580 2581 void do_thread(Thread* thread) { 2582 if (thread->is_Java_thread()) { 2583 if (thread->claim_oops_do(true, _thread_parity)) { 2584 JavaThread* jt = (JavaThread*)thread; 2585 2586 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2587 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2588 // * Alive if on the stack of an executing method 2589 // * Weakly reachable otherwise 2590 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2591 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2592 jt->nmethods_do(&_code_cl); 2593 2594 jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj); 2595 } 2596 } else if (thread->is_VM_thread()) { 2597 if (thread->claim_oops_do(true, _thread_parity)) { 2598 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj); 2599 } 2600 } 2601 } 2602 }; 2603 2604 class CMRemarkTask: public AbstractGangTask { 2605 private: 2606 ConcurrentMark* _cm; 2607 public: 2608 void work(uint worker_id) { 2609 // Since all available tasks are actually started, we should 2610 // only proceed if we're supposed to be active. 2611 if (worker_id < _cm->active_tasks()) { 2612 CMTask* task = _cm->task(worker_id); 2613 task->record_start_time(); 2614 { 2615 ResourceMark rm; 2616 HandleMark hm; 2617 2618 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2619 Threads::threads_do(&threads_f); 2620 } 2621 2622 do { 2623 task->do_marking_step(1000000000.0 /* something very large */, 2624 true /* do_termination */, 2625 false /* is_serial */); 2626 } while (task->has_aborted() && !_cm->has_overflown()); 2627 // If we overflow, then we do not want to restart. We instead 2628 // want to abort remark and do concurrent marking again. 2629 task->record_end_time(); 2630 } 2631 } 2632 2633 CMRemarkTask(ConcurrentMark* cm, int active_workers) : 2634 AbstractGangTask("Par Remark"), _cm(cm) { 2635 _cm->terminator()->reset_for_reuse(active_workers); 2636 } 2637 }; 2638 2639 void ConcurrentMark::checkpointRootsFinalWork() { 2640 ResourceMark rm; 2641 HandleMark hm; 2642 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2643 2644 G1CMTraceTime trace("Finalize Marking", G1Log::finer()); 2645 2646 g1h->ensure_parsability(false); 2647 2648 StrongRootsScope srs; 2649 // this is remark, so we'll use up all active threads 2650 uint active_workers = g1h->workers()->active_workers(); 2651 if (active_workers == 0) { 2652 assert(active_workers > 0, "Should have been set earlier"); 2653 active_workers = (uint) ParallelGCThreads; 2654 g1h->workers()->set_active_workers(active_workers); 2655 } 2656 set_concurrency_and_phase(active_workers, false /* concurrent */); 2657 // Leave _parallel_marking_threads at it's 2658 // value originally calculated in the ConcurrentMark 2659 // constructor and pass values of the active workers 2660 // through the gang in the task. 2661 2662 CMRemarkTask remarkTask(this, active_workers); 2663 // We will start all available threads, even if we decide that the 2664 // active_workers will be fewer. The extra ones will just bail out 2665 // immediately. 2666 g1h->set_par_threads(active_workers); 2667 g1h->workers()->run_task(&remarkTask); 2668 g1h->set_par_threads(0); 2669 2670 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2671 guarantee(has_overflown() || 2672 satb_mq_set.completed_buffers_num() == 0, 2673 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2674 BOOL_TO_STR(has_overflown()), 2675 satb_mq_set.completed_buffers_num())); 2676 2677 print_stats(); 2678 } 2679 2680 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2681 // Note we are overriding the read-only view of the prev map here, via 2682 // the cast. 2683 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2684 } 2685 2686 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2687 _nextMarkBitMap->clearRange(mr); 2688 } 2689 2690 HeapRegion* 2691 ConcurrentMark::claim_region(uint worker_id) { 2692 // "checkpoint" the finger 2693 HeapWord* finger = _finger; 2694 2695 // _heap_end will not change underneath our feet; it only changes at 2696 // yield points. 2697 while (finger < _heap_end) { 2698 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2699 2700 // Note on how this code handles humongous regions. In the 2701 // normal case the finger will reach the start of a "starts 2702 // humongous" (SH) region. Its end will either be the end of the 2703 // last "continues humongous" (CH) region in the sequence, or the 2704 // standard end of the SH region (if the SH is the only region in 2705 // the sequence). That way claim_region() will skip over the CH 2706 // regions. However, there is a subtle race between a CM thread 2707 // executing this method and a mutator thread doing a humongous 2708 // object allocation. The two are not mutually exclusive as the CM 2709 // thread does not need to hold the Heap_lock when it gets 2710 // here. So there is a chance that claim_region() will come across 2711 // a free region that's in the progress of becoming a SH or a CH 2712 // region. In the former case, it will either 2713 // a) Miss the update to the region's end, in which case it will 2714 // visit every subsequent CH region, will find their bitmaps 2715 // empty, and do nothing, or 2716 // b) Will observe the update of the region's end (in which case 2717 // it will skip the subsequent CH regions). 2718 // If it comes across a region that suddenly becomes CH, the 2719 // scenario will be similar to b). So, the race between 2720 // claim_region() and a humongous object allocation might force us 2721 // to do a bit of unnecessary work (due to some unnecessary bitmap 2722 // iterations) but it should not introduce and correctness issues. 2723 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2724 2725 // Above heap_region_containing_raw may return NULL as we always scan claim 2726 // until the end of the heap. In this case, just jump to the next region. 2727 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2728 2729 // Is the gap between reading the finger and doing the CAS too long? 2730 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2731 if (res == finger && curr_region != NULL) { 2732 // we succeeded 2733 HeapWord* bottom = curr_region->bottom(); 2734 HeapWord* limit = curr_region->next_top_at_mark_start(); 2735 2736 if (verbose_low()) { 2737 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2738 "["PTR_FORMAT", "PTR_FORMAT"), " 2739 "limit = "PTR_FORMAT, 2740 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2741 } 2742 2743 // notice that _finger == end cannot be guaranteed here since, 2744 // someone else might have moved the finger even further 2745 assert(_finger >= end, "the finger should have moved forward"); 2746 2747 if (verbose_low()) { 2748 gclog_or_tty->print_cr("[%u] we were successful with region = " 2749 PTR_FORMAT, worker_id, p2i(curr_region)); 2750 } 2751 2752 if (limit > bottom) { 2753 if (verbose_low()) { 2754 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 2755 "returning it ", worker_id, p2i(curr_region)); 2756 } 2757 return curr_region; 2758 } else { 2759 assert(limit == bottom, 2760 "the region limit should be at bottom"); 2761 if (verbose_low()) { 2762 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 2763 "returning NULL", worker_id, p2i(curr_region)); 2764 } 2765 // we return NULL and the caller should try calling 2766 // claim_region() again. 2767 return NULL; 2768 } 2769 } else { 2770 assert(_finger > finger, "the finger should have moved forward"); 2771 if (verbose_low()) { 2772 if (curr_region == NULL) { 2773 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " 2774 "global finger = "PTR_FORMAT", " 2775 "our finger = "PTR_FORMAT, 2776 worker_id, p2i(_finger), p2i(finger)); 2777 } else { 2778 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2779 "global finger = "PTR_FORMAT", " 2780 "our finger = "PTR_FORMAT, 2781 worker_id, p2i(_finger), p2i(finger)); 2782 } 2783 } 2784 2785 // read it again 2786 finger = _finger; 2787 } 2788 } 2789 2790 return NULL; 2791 } 2792 2793 #ifndef PRODUCT 2794 enum VerifyNoCSetOopsPhase { 2795 VerifyNoCSetOopsStack, 2796 VerifyNoCSetOopsQueues, 2797 VerifyNoCSetOopsSATBCompleted, 2798 VerifyNoCSetOopsSATBThread 2799 }; 2800 2801 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 2802 private: 2803 G1CollectedHeap* _g1h; 2804 VerifyNoCSetOopsPhase _phase; 2805 int _info; 2806 2807 const char* phase_str() { 2808 switch (_phase) { 2809 case VerifyNoCSetOopsStack: return "Stack"; 2810 case VerifyNoCSetOopsQueues: return "Queue"; 2811 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers"; 2812 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers"; 2813 default: ShouldNotReachHere(); 2814 } 2815 return NULL; 2816 } 2817 2818 void do_object_work(oop obj) { 2819 guarantee(!_g1h->obj_in_cs(obj), 2820 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 2821 p2i((void*) obj), phase_str(), _info)); 2822 } 2823 2824 public: 2825 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 2826 2827 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 2828 _phase = phase; 2829 _info = info; 2830 } 2831 2832 virtual void do_oop(oop* p) { 2833 oop obj = oopDesc::load_decode_heap_oop(p); 2834 do_object_work(obj); 2835 } 2836 2837 virtual void do_oop(narrowOop* p) { 2838 // We should not come across narrow oops while scanning marking 2839 // stacks and SATB buffers. 2840 ShouldNotReachHere(); 2841 } 2842 2843 virtual void do_object(oop obj) { 2844 do_object_work(obj); 2845 } 2846 }; 2847 2848 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, 2849 bool verify_enqueued_buffers, 2850 bool verify_thread_buffers, 2851 bool verify_fingers) { 2852 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2853 if (!G1CollectedHeap::heap()->mark_in_progress()) { 2854 return; 2855 } 2856 2857 VerifyNoCSetOopsClosure cl; 2858 2859 if (verify_stacks) { 2860 // Verify entries on the global mark stack 2861 cl.set_phase(VerifyNoCSetOopsStack); 2862 _markStack.oops_do(&cl); 2863 2864 // Verify entries on the task queues 2865 for (uint i = 0; i < _max_worker_id; i += 1) { 2866 cl.set_phase(VerifyNoCSetOopsQueues, i); 2867 CMTaskQueue* queue = _task_queues->queue(i); 2868 queue->oops_do(&cl); 2869 } 2870 } 2871 2872 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 2873 2874 // Verify entries on the enqueued SATB buffers 2875 if (verify_enqueued_buffers) { 2876 cl.set_phase(VerifyNoCSetOopsSATBCompleted); 2877 satb_qs.iterate_completed_buffers_read_only(&cl); 2878 } 2879 2880 // Verify entries on the per-thread SATB buffers 2881 if (verify_thread_buffers) { 2882 cl.set_phase(VerifyNoCSetOopsSATBThread); 2883 satb_qs.iterate_thread_buffers_read_only(&cl); 2884 } 2885 2886 if (verify_fingers) { 2887 // Verify the global finger 2888 HeapWord* global_finger = finger(); 2889 if (global_finger != NULL && global_finger < _heap_end) { 2890 // The global finger always points to a heap region boundary. We 2891 // use heap_region_containing_raw() to get the containing region 2892 // given that the global finger could be pointing to a free region 2893 // which subsequently becomes continues humongous. If that 2894 // happens, heap_region_containing() will return the bottom of the 2895 // corresponding starts humongous region and the check below will 2896 // not hold any more. 2897 // Since we always iterate over all regions, we might get a NULL HeapRegion 2898 // here. 2899 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 2900 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2901 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 2902 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 2903 } 2904 2905 // Verify the task fingers 2906 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2907 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 2908 CMTask* task = _tasks[i]; 2909 HeapWord* task_finger = task->finger(); 2910 if (task_finger != NULL && task_finger < _heap_end) { 2911 // See above note on the global finger verification. 2912 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 2913 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2914 !task_hr->in_collection_set(), 2915 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 2916 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 2917 } 2918 } 2919 } 2920 } 2921 #endif // PRODUCT 2922 2923 // Aggregate the counting data that was constructed concurrently 2924 // with marking. 2925 class AggregateCountDataHRClosure: public HeapRegionClosure { 2926 G1CollectedHeap* _g1h; 2927 ConcurrentMark* _cm; 2928 CardTableModRefBS* _ct_bs; 2929 BitMap* _cm_card_bm; 2930 uint _max_worker_id; 2931 2932 public: 2933 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 2934 BitMap* cm_card_bm, 2935 uint max_worker_id) : 2936 _g1h(g1h), _cm(g1h->concurrent_mark()), 2937 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 2938 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 2939 2940 bool doHeapRegion(HeapRegion* hr) { 2941 if (hr->is_continues_humongous()) { 2942 // We will ignore these here and process them when their 2943 // associated "starts humongous" region is processed. 2944 // Note that we cannot rely on their associated 2945 // "starts humongous" region to have their bit set to 1 2946 // since, due to the region chunking in the parallel region 2947 // iteration, a "continues humongous" region might be visited 2948 // before its associated "starts humongous". 2949 return false; 2950 } 2951 2952 HeapWord* start = hr->bottom(); 2953 HeapWord* limit = hr->next_top_at_mark_start(); 2954 HeapWord* end = hr->end(); 2955 2956 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 2957 err_msg("Preconditions not met - " 2958 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 2959 "top: "PTR_FORMAT", end: "PTR_FORMAT, 2960 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 2961 2962 assert(hr->next_marked_bytes() == 0, "Precondition"); 2963 2964 if (start == limit) { 2965 // NTAMS of this region has not been set so nothing to do. 2966 return false; 2967 } 2968 2969 // 'start' should be in the heap. 2970 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 2971 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 2972 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 2973 2974 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 2975 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 2976 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 2977 2978 // If ntams is not card aligned then we bump card bitmap index 2979 // for limit so that we get the all the cards spanned by 2980 // the object ending at ntams. 2981 // Note: if this is the last region in the heap then ntams 2982 // could be actually just beyond the end of the the heap; 2983 // limit_idx will then correspond to a (non-existent) card 2984 // that is also outside the heap. 2985 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 2986 limit_idx += 1; 2987 } 2988 2989 assert(limit_idx <= end_idx, "or else use atomics"); 2990 2991 // Aggregate the "stripe" in the count data associated with hr. 2992 uint hrm_index = hr->hrm_index(); 2993 size_t marked_bytes = 0; 2994 2995 for (uint i = 0; i < _max_worker_id; i += 1) { 2996 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 2997 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 2998 2999 // Fetch the marked_bytes in this region for task i and 3000 // add it to the running total for this region. 3001 marked_bytes += marked_bytes_array[hrm_index]; 3002 3003 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 3004 // into the global card bitmap. 3005 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 3006 3007 while (scan_idx < limit_idx) { 3008 assert(task_card_bm->at(scan_idx) == true, "should be"); 3009 _cm_card_bm->set_bit(scan_idx); 3010 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 3011 3012 // BitMap::get_next_one_offset() can handle the case when 3013 // its left_offset parameter is greater than its right_offset 3014 // parameter. It does, however, have an early exit if 3015 // left_offset == right_offset. So let's limit the value 3016 // passed in for left offset here. 3017 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 3018 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 3019 } 3020 } 3021 3022 // Update the marked bytes for this region. 3023 hr->add_to_marked_bytes(marked_bytes); 3024 3025 // Next heap region 3026 return false; 3027 } 3028 }; 3029 3030 class G1AggregateCountDataTask: public AbstractGangTask { 3031 protected: 3032 G1CollectedHeap* _g1h; 3033 ConcurrentMark* _cm; 3034 BitMap* _cm_card_bm; 3035 uint _max_worker_id; 3036 int _active_workers; 3037 HeapRegionClaimer _hrclaimer; 3038 3039 public: 3040 G1AggregateCountDataTask(G1CollectedHeap* g1h, 3041 ConcurrentMark* cm, 3042 BitMap* cm_card_bm, 3043 uint max_worker_id, 3044 int n_workers) : 3045 AbstractGangTask("Count Aggregation"), 3046 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 3047 _max_worker_id(max_worker_id), 3048 _active_workers(n_workers), 3049 _hrclaimer(_active_workers) { 3050 } 3051 3052 void work(uint worker_id) { 3053 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 3054 3055 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 3056 } 3057 }; 3058 3059 3060 void ConcurrentMark::aggregate_count_data() { 3061 int n_workers = _g1h->workers()->active_workers(); 3062 3063 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 3064 _max_worker_id, n_workers); 3065 3066 _g1h->set_par_threads(n_workers); 3067 _g1h->workers()->run_task(&g1_par_agg_task); 3068 _g1h->set_par_threads(0); 3069 } 3070 3071 // Clear the per-worker arrays used to store the per-region counting data 3072 void ConcurrentMark::clear_all_count_data() { 3073 // Clear the global card bitmap - it will be filled during 3074 // liveness count aggregation (during remark) and the 3075 // final counting task. 3076 _card_bm.clear(); 3077 3078 // Clear the global region bitmap - it will be filled as part 3079 // of the final counting task. 3080 _region_bm.clear(); 3081 3082 uint max_regions = _g1h->max_regions(); 3083 assert(_max_worker_id > 0, "uninitialized"); 3084 3085 for (uint i = 0; i < _max_worker_id; i += 1) { 3086 BitMap* task_card_bm = count_card_bitmap_for(i); 3087 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3088 3089 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3090 assert(marked_bytes_array != NULL, "uninitialized"); 3091 3092 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3093 task_card_bm->clear(); 3094 } 3095 } 3096 3097 void ConcurrentMark::print_stats() { 3098 if (verbose_stats()) { 3099 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3100 for (size_t i = 0; i < _active_tasks; ++i) { 3101 _tasks[i]->print_stats(); 3102 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3103 } 3104 } 3105 } 3106 3107 // abandon current marking iteration due to a Full GC 3108 void ConcurrentMark::abort() { 3109 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 3110 // concurrent bitmap clearing. 3111 _nextMarkBitMap->clearAll(); 3112 3113 // Note we cannot clear the previous marking bitmap here 3114 // since VerifyDuringGC verifies the objects marked during 3115 // a full GC against the previous bitmap. 3116 3117 // Clear the liveness counting data 3118 clear_all_count_data(); 3119 // Empty mark stack 3120 reset_marking_state(); 3121 for (uint i = 0; i < _max_worker_id; ++i) { 3122 _tasks[i]->clear_region_fields(); 3123 } 3124 _first_overflow_barrier_sync.abort(); 3125 _second_overflow_barrier_sync.abort(); 3126 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); 3127 if (!gc_id.is_undefined()) { 3128 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance 3129 // to detect that it was aborted. Only keep track of the first GC id that we aborted. 3130 _aborted_gc_id = gc_id; 3131 } 3132 _has_aborted = true; 3133 3134 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3135 satb_mq_set.abandon_partial_marking(); 3136 // This can be called either during or outside marking, we'll read 3137 // the expected_active value from the SATB queue set. 3138 satb_mq_set.set_active_all_threads( 3139 false, /* new active value */ 3140 satb_mq_set.is_active() /* expected_active */); 3141 3142 _g1h->trace_heap_after_concurrent_cycle(); 3143 _g1h->register_concurrent_cycle_end(); 3144 } 3145 3146 const GCId& ConcurrentMark::concurrent_gc_id() { 3147 if (has_aborted()) { 3148 return _aborted_gc_id; 3149 } 3150 return _g1h->gc_tracer_cm()->gc_id(); 3151 } 3152 3153 static void print_ms_time_info(const char* prefix, const char* name, 3154 NumberSeq& ns) { 3155 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3156 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3157 if (ns.num() > 0) { 3158 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3159 prefix, ns.sd(), ns.maximum()); 3160 } 3161 } 3162 3163 void ConcurrentMark::print_summary_info() { 3164 gclog_or_tty->print_cr(" Concurrent marking:"); 3165 print_ms_time_info(" ", "init marks", _init_times); 3166 print_ms_time_info(" ", "remarks", _remark_times); 3167 { 3168 print_ms_time_info(" ", "final marks", _remark_mark_times); 3169 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3170 3171 } 3172 print_ms_time_info(" ", "cleanups", _cleanup_times); 3173 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3174 _total_counting_time, 3175 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3176 (double)_cleanup_times.num() 3177 : 0.0)); 3178 if (G1ScrubRemSets) { 3179 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3180 _total_rs_scrub_time, 3181 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3182 (double)_cleanup_times.num() 3183 : 0.0)); 3184 } 3185 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3186 (_init_times.sum() + _remark_times.sum() + 3187 _cleanup_times.sum())/1000.0); 3188 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3189 "(%8.2f s marking).", 3190 cmThread()->vtime_accum(), 3191 cmThread()->vtime_mark_accum()); 3192 } 3193 3194 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3195 _parallel_workers->print_worker_threads_on(st); 3196 } 3197 3198 void ConcurrentMark::print_on_error(outputStream* st) const { 3199 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3200 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3201 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3202 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3203 } 3204 3205 // We take a break if someone is trying to stop the world. 3206 bool ConcurrentMark::do_yield_check(uint worker_id) { 3207 if (SuspendibleThreadSet::should_yield()) { 3208 if (worker_id == 0) { 3209 _g1h->g1_policy()->record_concurrent_pause(); 3210 } 3211 SuspendibleThreadSet::yield(); 3212 return true; 3213 } else { 3214 return false; 3215 } 3216 } 3217 3218 #ifndef PRODUCT 3219 // for debugging purposes 3220 void ConcurrentMark::print_finger() { 3221 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3222 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3223 for (uint i = 0; i < _max_worker_id; ++i) { 3224 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3225 } 3226 gclog_or_tty->cr(); 3227 } 3228 #endif 3229 3230 template<bool scan> 3231 inline void CMTask::process_grey_object(oop obj) { 3232 assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray"); 3233 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3234 3235 if (_cm->verbose_high()) { 3236 gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT, 3237 _worker_id, p2i((void*) obj)); 3238 } 3239 3240 size_t obj_size = obj->size(); 3241 _words_scanned += obj_size; 3242 3243 if (scan) { 3244 obj->oop_iterate(_cm_oop_closure); 3245 } 3246 statsOnly( ++_objs_scanned ); 3247 check_limits(); 3248 } 3249 3250 template void CMTask::process_grey_object<true>(oop); 3251 template void CMTask::process_grey_object<false>(oop); 3252 3253 // Closure for iteration over bitmaps 3254 class CMBitMapClosure : public BitMapClosure { 3255 private: 3256 // the bitmap that is being iterated over 3257 CMBitMap* _nextMarkBitMap; 3258 ConcurrentMark* _cm; 3259 CMTask* _task; 3260 3261 public: 3262 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3263 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3264 3265 bool do_bit(size_t offset) { 3266 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3267 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3268 assert( addr < _cm->finger(), "invariant"); 3269 3270 statsOnly( _task->increase_objs_found_on_bitmap() ); 3271 assert(addr >= _task->finger(), "invariant"); 3272 3273 // We move that task's local finger along. 3274 _task->move_finger_to(addr); 3275 3276 _task->scan_object(oop(addr)); 3277 // we only partially drain the local queue and global stack 3278 _task->drain_local_queue(true); 3279 _task->drain_global_stack(true); 3280 3281 // if the has_aborted flag has been raised, we need to bail out of 3282 // the iteration 3283 return !_task->has_aborted(); 3284 } 3285 }; 3286 3287 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3288 ConcurrentMark* cm, 3289 CMTask* task) 3290 : _g1h(g1h), _cm(cm), _task(task) { 3291 assert(_ref_processor == NULL, "should be initialized to NULL"); 3292 3293 if (G1UseConcMarkReferenceProcessing) { 3294 _ref_processor = g1h->ref_processor_cm(); 3295 assert(_ref_processor != NULL, "should not be NULL"); 3296 } 3297 } 3298 3299 void CMTask::setup_for_region(HeapRegion* hr) { 3300 assert(hr != NULL, 3301 "claim_region() should have filtered out NULL regions"); 3302 assert(!hr->is_continues_humongous(), 3303 "claim_region() should have filtered out continues humongous regions"); 3304 3305 if (_cm->verbose_low()) { 3306 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3307 _worker_id, p2i(hr)); 3308 } 3309 3310 _curr_region = hr; 3311 _finger = hr->bottom(); 3312 update_region_limit(); 3313 } 3314 3315 void CMTask::update_region_limit() { 3316 HeapRegion* hr = _curr_region; 3317 HeapWord* bottom = hr->bottom(); 3318 HeapWord* limit = hr->next_top_at_mark_start(); 3319 3320 if (limit == bottom) { 3321 if (_cm->verbose_low()) { 3322 gclog_or_tty->print_cr("[%u] found an empty region " 3323 "["PTR_FORMAT", "PTR_FORMAT")", 3324 _worker_id, p2i(bottom), p2i(limit)); 3325 } 3326 // The region was collected underneath our feet. 3327 // We set the finger to bottom to ensure that the bitmap 3328 // iteration that will follow this will not do anything. 3329 // (this is not a condition that holds when we set the region up, 3330 // as the region is not supposed to be empty in the first place) 3331 _finger = bottom; 3332 } else if (limit >= _region_limit) { 3333 assert(limit >= _finger, "peace of mind"); 3334 } else { 3335 assert(limit < _region_limit, "only way to get here"); 3336 // This can happen under some pretty unusual circumstances. An 3337 // evacuation pause empties the region underneath our feet (NTAMS 3338 // at bottom). We then do some allocation in the region (NTAMS 3339 // stays at bottom), followed by the region being used as a GC 3340 // alloc region (NTAMS will move to top() and the objects 3341 // originally below it will be grayed). All objects now marked in 3342 // the region are explicitly grayed, if below the global finger, 3343 // and we do not need in fact to scan anything else. So, we simply 3344 // set _finger to be limit to ensure that the bitmap iteration 3345 // doesn't do anything. 3346 _finger = limit; 3347 } 3348 3349 _region_limit = limit; 3350 } 3351 3352 void CMTask::giveup_current_region() { 3353 assert(_curr_region != NULL, "invariant"); 3354 if (_cm->verbose_low()) { 3355 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3356 _worker_id, p2i(_curr_region)); 3357 } 3358 clear_region_fields(); 3359 } 3360 3361 void CMTask::clear_region_fields() { 3362 // Values for these three fields that indicate that we're not 3363 // holding on to a region. 3364 _curr_region = NULL; 3365 _finger = NULL; 3366 _region_limit = NULL; 3367 } 3368 3369 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3370 if (cm_oop_closure == NULL) { 3371 assert(_cm_oop_closure != NULL, "invariant"); 3372 } else { 3373 assert(_cm_oop_closure == NULL, "invariant"); 3374 } 3375 _cm_oop_closure = cm_oop_closure; 3376 } 3377 3378 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3379 guarantee(nextMarkBitMap != NULL, "invariant"); 3380 3381 if (_cm->verbose_low()) { 3382 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3383 } 3384 3385 _nextMarkBitMap = nextMarkBitMap; 3386 clear_region_fields(); 3387 3388 _calls = 0; 3389 _elapsed_time_ms = 0.0; 3390 _termination_time_ms = 0.0; 3391 _termination_start_time_ms = 0.0; 3392 3393 #if _MARKING_STATS_ 3394 _aborted = 0; 3395 _aborted_overflow = 0; 3396 _aborted_cm_aborted = 0; 3397 _aborted_yield = 0; 3398 _aborted_timed_out = 0; 3399 _aborted_satb = 0; 3400 _aborted_termination = 0; 3401 _steal_attempts = 0; 3402 _steals = 0; 3403 _local_pushes = 0; 3404 _local_pops = 0; 3405 _local_max_size = 0; 3406 _objs_scanned = 0; 3407 _global_pushes = 0; 3408 _global_pops = 0; 3409 _global_max_size = 0; 3410 _global_transfers_to = 0; 3411 _global_transfers_from = 0; 3412 _regions_claimed = 0; 3413 _objs_found_on_bitmap = 0; 3414 _satb_buffers_processed = 0; 3415 #endif // _MARKING_STATS_ 3416 } 3417 3418 bool CMTask::should_exit_termination() { 3419 regular_clock_call(); 3420 // This is called when we are in the termination protocol. We should 3421 // quit if, for some reason, this task wants to abort or the global 3422 // stack is not empty (this means that we can get work from it). 3423 return !_cm->mark_stack_empty() || has_aborted(); 3424 } 3425 3426 void CMTask::reached_limit() { 3427 assert(_words_scanned >= _words_scanned_limit || 3428 _refs_reached >= _refs_reached_limit , 3429 "shouldn't have been called otherwise"); 3430 regular_clock_call(); 3431 } 3432 3433 void CMTask::regular_clock_call() { 3434 if (has_aborted()) return; 3435 3436 // First, we need to recalculate the words scanned and refs reached 3437 // limits for the next clock call. 3438 recalculate_limits(); 3439 3440 // During the regular clock call we do the following 3441 3442 // (1) If an overflow has been flagged, then we abort. 3443 if (_cm->has_overflown()) { 3444 set_has_aborted(); 3445 return; 3446 } 3447 3448 // If we are not concurrent (i.e. we're doing remark) we don't need 3449 // to check anything else. The other steps are only needed during 3450 // the concurrent marking phase. 3451 if (!concurrent()) return; 3452 3453 // (2) If marking has been aborted for Full GC, then we also abort. 3454 if (_cm->has_aborted()) { 3455 set_has_aborted(); 3456 statsOnly( ++_aborted_cm_aborted ); 3457 return; 3458 } 3459 3460 double curr_time_ms = os::elapsedVTime() * 1000.0; 3461 3462 // (3) If marking stats are enabled, then we update the step history. 3463 #if _MARKING_STATS_ 3464 if (_words_scanned >= _words_scanned_limit) { 3465 ++_clock_due_to_scanning; 3466 } 3467 if (_refs_reached >= _refs_reached_limit) { 3468 ++_clock_due_to_marking; 3469 } 3470 3471 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3472 _interval_start_time_ms = curr_time_ms; 3473 _all_clock_intervals_ms.add(last_interval_ms); 3474 3475 if (_cm->verbose_medium()) { 3476 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3477 "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s", 3478 _worker_id, last_interval_ms, 3479 _words_scanned, 3480 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3481 _refs_reached, 3482 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3483 } 3484 #endif // _MARKING_STATS_ 3485 3486 // (4) We check whether we should yield. If we have to, then we abort. 3487 if (SuspendibleThreadSet::should_yield()) { 3488 // We should yield. To do this we abort the task. The caller is 3489 // responsible for yielding. 3490 set_has_aborted(); 3491 statsOnly( ++_aborted_yield ); 3492 return; 3493 } 3494 3495 // (5) We check whether we've reached our time quota. If we have, 3496 // then we abort. 3497 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3498 if (elapsed_time_ms > _time_target_ms) { 3499 set_has_aborted(); 3500 _has_timed_out = true; 3501 statsOnly( ++_aborted_timed_out ); 3502 return; 3503 } 3504 3505 // (6) Finally, we check whether there are enough completed STAB 3506 // buffers available for processing. If there are, we abort. 3507 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3508 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3509 if (_cm->verbose_low()) { 3510 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3511 _worker_id); 3512 } 3513 // we do need to process SATB buffers, we'll abort and restart 3514 // the marking task to do so 3515 set_has_aborted(); 3516 statsOnly( ++_aborted_satb ); 3517 return; 3518 } 3519 } 3520 3521 void CMTask::recalculate_limits() { 3522 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3523 _words_scanned_limit = _real_words_scanned_limit; 3524 3525 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3526 _refs_reached_limit = _real_refs_reached_limit; 3527 } 3528 3529 void CMTask::decrease_limits() { 3530 // This is called when we believe that we're going to do an infrequent 3531 // operation which will increase the per byte scanned cost (i.e. move 3532 // entries to/from the global stack). It basically tries to decrease the 3533 // scanning limit so that the clock is called earlier. 3534 3535 if (_cm->verbose_medium()) { 3536 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3537 } 3538 3539 _words_scanned_limit = _real_words_scanned_limit - 3540 3 * words_scanned_period / 4; 3541 _refs_reached_limit = _real_refs_reached_limit - 3542 3 * refs_reached_period / 4; 3543 } 3544 3545 void CMTask::move_entries_to_global_stack() { 3546 // local array where we'll store the entries that will be popped 3547 // from the local queue 3548 oop buffer[global_stack_transfer_size]; 3549 3550 int n = 0; 3551 oop obj; 3552 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3553 buffer[n] = obj; 3554 ++n; 3555 } 3556 3557 if (n > 0) { 3558 // we popped at least one entry from the local queue 3559 3560 statsOnly( ++_global_transfers_to; _local_pops += n ); 3561 3562 if (!_cm->mark_stack_push(buffer, n)) { 3563 if (_cm->verbose_low()) { 3564 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3565 _worker_id); 3566 } 3567 set_has_aborted(); 3568 } else { 3569 // the transfer was successful 3570 3571 if (_cm->verbose_medium()) { 3572 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3573 _worker_id, n); 3574 } 3575 statsOnly( size_t tmp_size = _cm->mark_stack_size(); 3576 if (tmp_size > _global_max_size) { 3577 _global_max_size = tmp_size; 3578 } 3579 _global_pushes += n ); 3580 } 3581 } 3582 3583 // this operation was quite expensive, so decrease the limits 3584 decrease_limits(); 3585 } 3586 3587 void CMTask::get_entries_from_global_stack() { 3588 // local array where we'll store the entries that will be popped 3589 // from the global stack. 3590 oop buffer[global_stack_transfer_size]; 3591 int n; 3592 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3593 assert(n <= global_stack_transfer_size, 3594 "we should not pop more than the given limit"); 3595 if (n > 0) { 3596 // yes, we did actually pop at least one entry 3597 3598 statsOnly( ++_global_transfers_from; _global_pops += n ); 3599 if (_cm->verbose_medium()) { 3600 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3601 _worker_id, n); 3602 } 3603 for (int i = 0; i < n; ++i) { 3604 bool success = _task_queue->push(buffer[i]); 3605 // We only call this when the local queue is empty or under a 3606 // given target limit. So, we do not expect this push to fail. 3607 assert(success, "invariant"); 3608 } 3609 3610 statsOnly( size_t tmp_size = (size_t)_task_queue->size(); 3611 if (tmp_size > _local_max_size) { 3612 _local_max_size = tmp_size; 3613 } 3614 _local_pushes += n ); 3615 } 3616 3617 // this operation was quite expensive, so decrease the limits 3618 decrease_limits(); 3619 } 3620 3621 void CMTask::drain_local_queue(bool partially) { 3622 if (has_aborted()) return; 3623 3624 // Decide what the target size is, depending whether we're going to 3625 // drain it partially (so that other tasks can steal if they run out 3626 // of things to do) or totally (at the very end). 3627 size_t target_size; 3628 if (partially) { 3629 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3630 } else { 3631 target_size = 0; 3632 } 3633 3634 if (_task_queue->size() > target_size) { 3635 if (_cm->verbose_high()) { 3636 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3637 _worker_id, target_size); 3638 } 3639 3640 oop obj; 3641 bool ret = _task_queue->pop_local(obj); 3642 while (ret) { 3643 statsOnly( ++_local_pops ); 3644 3645 if (_cm->verbose_high()) { 3646 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3647 p2i((void*) obj)); 3648 } 3649 3650 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3651 assert(!_g1h->is_on_master_free_list( 3652 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3653 3654 scan_object(obj); 3655 3656 if (_task_queue->size() <= target_size || has_aborted()) { 3657 ret = false; 3658 } else { 3659 ret = _task_queue->pop_local(obj); 3660 } 3661 } 3662 3663 if (_cm->verbose_high()) { 3664 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3665 _worker_id, _task_queue->size()); 3666 } 3667 } 3668 } 3669 3670 void CMTask::drain_global_stack(bool partially) { 3671 if (has_aborted()) return; 3672 3673 // We have a policy to drain the local queue before we attempt to 3674 // drain the global stack. 3675 assert(partially || _task_queue->size() == 0, "invariant"); 3676 3677 // Decide what the target size is, depending whether we're going to 3678 // drain it partially (so that other tasks can steal if they run out 3679 // of things to do) or totally (at the very end). Notice that, 3680 // because we move entries from the global stack in chunks or 3681 // because another task might be doing the same, we might in fact 3682 // drop below the target. But, this is not a problem. 3683 size_t target_size; 3684 if (partially) { 3685 target_size = _cm->partial_mark_stack_size_target(); 3686 } else { 3687 target_size = 0; 3688 } 3689 3690 if (_cm->mark_stack_size() > target_size) { 3691 if (_cm->verbose_low()) { 3692 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3693 _worker_id, target_size); 3694 } 3695 3696 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3697 get_entries_from_global_stack(); 3698 drain_local_queue(partially); 3699 } 3700 3701 if (_cm->verbose_low()) { 3702 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3703 _worker_id, _cm->mark_stack_size()); 3704 } 3705 } 3706 } 3707 3708 // SATB Queue has several assumptions on whether to call the par or 3709 // non-par versions of the methods. this is why some of the code is 3710 // replicated. We should really get rid of the single-threaded version 3711 // of the code to simplify things. 3712 void CMTask::drain_satb_buffers() { 3713 if (has_aborted()) return; 3714 3715 // We set this so that the regular clock knows that we're in the 3716 // middle of draining buffers and doesn't set the abort flag when it 3717 // notices that SATB buffers are available for draining. It'd be 3718 // very counter productive if it did that. :-) 3719 _draining_satb_buffers = true; 3720 3721 CMObjectClosure oc(this); 3722 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3723 satb_mq_set.set_closure(_worker_id, &oc); 3724 3725 // This keeps claiming and applying the closure to completed buffers 3726 // until we run out of buffers or we need to abort. 3727 while (!has_aborted() && 3728 satb_mq_set.apply_closure_to_completed_buffer(_worker_id)) { 3729 if (_cm->verbose_medium()) { 3730 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3731 } 3732 statsOnly( ++_satb_buffers_processed ); 3733 regular_clock_call(); 3734 } 3735 3736 _draining_satb_buffers = false; 3737 3738 assert(has_aborted() || 3739 concurrent() || 3740 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3741 3742 satb_mq_set.set_closure(_worker_id, NULL); 3743 3744 // again, this was a potentially expensive operation, decrease the 3745 // limits to get the regular clock call early 3746 decrease_limits(); 3747 } 3748 3749 void CMTask::print_stats() { 3750 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3751 _worker_id, _calls); 3752 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3753 _elapsed_time_ms, _termination_time_ms); 3754 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3755 _step_times_ms.num(), _step_times_ms.avg(), 3756 _step_times_ms.sd()); 3757 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3758 _step_times_ms.maximum(), _step_times_ms.sum()); 3759 3760 #if _MARKING_STATS_ 3761 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3762 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3763 _all_clock_intervals_ms.sd()); 3764 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3765 _all_clock_intervals_ms.maximum(), 3766 _all_clock_intervals_ms.sum()); 3767 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = " SIZE_FORMAT ", marking = " SIZE_FORMAT, 3768 _clock_due_to_scanning, _clock_due_to_marking); 3769 gclog_or_tty->print_cr(" Objects: scanned = " SIZE_FORMAT ", found on the bitmap = " SIZE_FORMAT, 3770 _objs_scanned, _objs_found_on_bitmap); 3771 gclog_or_tty->print_cr(" Local Queue: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3772 _local_pushes, _local_pops, _local_max_size); 3773 gclog_or_tty->print_cr(" Global Stack: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3774 _global_pushes, _global_pops, _global_max_size); 3775 gclog_or_tty->print_cr(" transfers to = " SIZE_FORMAT ", transfers from = " SIZE_FORMAT, 3776 _global_transfers_to,_global_transfers_from); 3777 gclog_or_tty->print_cr(" Regions: claimed = " SIZE_FORMAT, _regions_claimed); 3778 gclog_or_tty->print_cr(" SATB buffers: processed = " SIZE_FORMAT, _satb_buffers_processed); 3779 gclog_or_tty->print_cr(" Steals: attempts = " SIZE_FORMAT ", successes = " SIZE_FORMAT, 3780 _steal_attempts, _steals); 3781 gclog_or_tty->print_cr(" Aborted: " SIZE_FORMAT ", due to", _aborted); 3782 gclog_or_tty->print_cr(" overflow: " SIZE_FORMAT ", global abort: " SIZE_FORMAT ", yield: " SIZE_FORMAT, 3783 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3784 gclog_or_tty->print_cr(" time out: " SIZE_FORMAT ", SATB: " SIZE_FORMAT ", termination: " SIZE_FORMAT, 3785 _aborted_timed_out, _aborted_satb, _aborted_termination); 3786 #endif // _MARKING_STATS_ 3787 } 3788 3789 /***************************************************************************** 3790 3791 The do_marking_step(time_target_ms, ...) method is the building 3792 block of the parallel marking framework. It can be called in parallel 3793 with other invocations of do_marking_step() on different tasks 3794 (but only one per task, obviously) and concurrently with the 3795 mutator threads, or during remark, hence it eliminates the need 3796 for two versions of the code. When called during remark, it will 3797 pick up from where the task left off during the concurrent marking 3798 phase. Interestingly, tasks are also claimable during evacuation 3799 pauses too, since do_marking_step() ensures that it aborts before 3800 it needs to yield. 3801 3802 The data structures that it uses to do marking work are the 3803 following: 3804 3805 (1) Marking Bitmap. If there are gray objects that appear only 3806 on the bitmap (this happens either when dealing with an overflow 3807 or when the initial marking phase has simply marked the roots 3808 and didn't push them on the stack), then tasks claim heap 3809 regions whose bitmap they then scan to find gray objects. A 3810 global finger indicates where the end of the last claimed region 3811 is. A local finger indicates how far into the region a task has 3812 scanned. The two fingers are used to determine how to gray an 3813 object (i.e. whether simply marking it is OK, as it will be 3814 visited by a task in the future, or whether it needs to be also 3815 pushed on a stack). 3816 3817 (2) Local Queue. The local queue of the task which is accessed 3818 reasonably efficiently by the task. Other tasks can steal from 3819 it when they run out of work. Throughout the marking phase, a 3820 task attempts to keep its local queue short but not totally 3821 empty, so that entries are available for stealing by other 3822 tasks. Only when there is no more work, a task will totally 3823 drain its local queue. 3824 3825 (3) Global Mark Stack. This handles local queue overflow. During 3826 marking only sets of entries are moved between it and the local 3827 queues, as access to it requires a mutex and more fine-grain 3828 interaction with it which might cause contention. If it 3829 overflows, then the marking phase should restart and iterate 3830 over the bitmap to identify gray objects. Throughout the marking 3831 phase, tasks attempt to keep the global mark stack at a small 3832 length but not totally empty, so that entries are available for 3833 popping by other tasks. Only when there is no more work, tasks 3834 will totally drain the global mark stack. 3835 3836 (4) SATB Buffer Queue. This is where completed SATB buffers are 3837 made available. Buffers are regularly removed from this queue 3838 and scanned for roots, so that the queue doesn't get too 3839 long. During remark, all completed buffers are processed, as 3840 well as the filled in parts of any uncompleted buffers. 3841 3842 The do_marking_step() method tries to abort when the time target 3843 has been reached. There are a few other cases when the 3844 do_marking_step() method also aborts: 3845 3846 (1) When the marking phase has been aborted (after a Full GC). 3847 3848 (2) When a global overflow (on the global stack) has been 3849 triggered. Before the task aborts, it will actually sync up with 3850 the other tasks to ensure that all the marking data structures 3851 (local queues, stacks, fingers etc.) are re-initialized so that 3852 when do_marking_step() completes, the marking phase can 3853 immediately restart. 3854 3855 (3) When enough completed SATB buffers are available. The 3856 do_marking_step() method only tries to drain SATB buffers right 3857 at the beginning. So, if enough buffers are available, the 3858 marking step aborts and the SATB buffers are processed at 3859 the beginning of the next invocation. 3860 3861 (4) To yield. when we have to yield then we abort and yield 3862 right at the end of do_marking_step(). This saves us from a lot 3863 of hassle as, by yielding we might allow a Full GC. If this 3864 happens then objects will be compacted underneath our feet, the 3865 heap might shrink, etc. We save checking for this by just 3866 aborting and doing the yield right at the end. 3867 3868 From the above it follows that the do_marking_step() method should 3869 be called in a loop (or, otherwise, regularly) until it completes. 3870 3871 If a marking step completes without its has_aborted() flag being 3872 true, it means it has completed the current marking phase (and 3873 also all other marking tasks have done so and have all synced up). 3874 3875 A method called regular_clock_call() is invoked "regularly" (in 3876 sub ms intervals) throughout marking. It is this clock method that 3877 checks all the abort conditions which were mentioned above and 3878 decides when the task should abort. A work-based scheme is used to 3879 trigger this clock method: when the number of object words the 3880 marking phase has scanned or the number of references the marking 3881 phase has visited reach a given limit. Additional invocations to 3882 the method clock have been planted in a few other strategic places 3883 too. The initial reason for the clock method was to avoid calling 3884 vtime too regularly, as it is quite expensive. So, once it was in 3885 place, it was natural to piggy-back all the other conditions on it 3886 too and not constantly check them throughout the code. 3887 3888 If do_termination is true then do_marking_step will enter its 3889 termination protocol. 3890 3891 The value of is_serial must be true when do_marking_step is being 3892 called serially (i.e. by the VMThread) and do_marking_step should 3893 skip any synchronization in the termination and overflow code. 3894 Examples include the serial remark code and the serial reference 3895 processing closures. 3896 3897 The value of is_serial must be false when do_marking_step is 3898 being called by any of the worker threads in a work gang. 3899 Examples include the concurrent marking code (CMMarkingTask), 3900 the MT remark code, and the MT reference processing closures. 3901 3902 *****************************************************************************/ 3903 3904 void CMTask::do_marking_step(double time_target_ms, 3905 bool do_termination, 3906 bool is_serial) { 3907 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3908 assert(concurrent() == _cm->concurrent(), "they should be the same"); 3909 3910 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3911 assert(_task_queues != NULL, "invariant"); 3912 assert(_task_queue != NULL, "invariant"); 3913 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 3914 3915 assert(!_claimed, 3916 "only one thread should claim this task at any one time"); 3917 3918 // OK, this doesn't safeguard again all possible scenarios, as it is 3919 // possible for two threads to set the _claimed flag at the same 3920 // time. But it is only for debugging purposes anyway and it will 3921 // catch most problems. 3922 _claimed = true; 3923 3924 _start_time_ms = os::elapsedVTime() * 1000.0; 3925 statsOnly( _interval_start_time_ms = _start_time_ms ); 3926 3927 // If do_stealing is true then do_marking_step will attempt to 3928 // steal work from the other CMTasks. It only makes sense to 3929 // enable stealing when the termination protocol is enabled 3930 // and do_marking_step() is not being called serially. 3931 bool do_stealing = do_termination && !is_serial; 3932 3933 double diff_prediction_ms = 3934 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 3935 _time_target_ms = time_target_ms - diff_prediction_ms; 3936 3937 // set up the variables that are used in the work-based scheme to 3938 // call the regular clock method 3939 _words_scanned = 0; 3940 _refs_reached = 0; 3941 recalculate_limits(); 3942 3943 // clear all flags 3944 clear_has_aborted(); 3945 _has_timed_out = false; 3946 _draining_satb_buffers = false; 3947 3948 ++_calls; 3949 3950 if (_cm->verbose_low()) { 3951 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 3952 "target = %1.2lfms >>>>>>>>>>", 3953 _worker_id, _calls, _time_target_ms); 3954 } 3955 3956 // Set up the bitmap and oop closures. Anything that uses them is 3957 // eventually called from this method, so it is OK to allocate these 3958 // statically. 3959 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 3960 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 3961 set_cm_oop_closure(&cm_oop_closure); 3962 3963 if (_cm->has_overflown()) { 3964 // This can happen if the mark stack overflows during a GC pause 3965 // and this task, after a yield point, restarts. We have to abort 3966 // as we need to get into the overflow protocol which happens 3967 // right at the end of this task. 3968 set_has_aborted(); 3969 } 3970 3971 // First drain any available SATB buffers. After this, we will not 3972 // look at SATB buffers before the next invocation of this method. 3973 // If enough completed SATB buffers are queued up, the regular clock 3974 // will abort this task so that it restarts. 3975 drain_satb_buffers(); 3976 // ...then partially drain the local queue and the global stack 3977 drain_local_queue(true); 3978 drain_global_stack(true); 3979 3980 do { 3981 if (!has_aborted() && _curr_region != NULL) { 3982 // This means that we're already holding on to a region. 3983 assert(_finger != NULL, "if region is not NULL, then the finger " 3984 "should not be NULL either"); 3985 3986 // We might have restarted this task after an evacuation pause 3987 // which might have evacuated the region we're holding on to 3988 // underneath our feet. Let's read its limit again to make sure 3989 // that we do not iterate over a region of the heap that 3990 // contains garbage (update_region_limit() will also move 3991 // _finger to the start of the region if it is found empty). 3992 update_region_limit(); 3993 // We will start from _finger not from the start of the region, 3994 // as we might be restarting this task after aborting half-way 3995 // through scanning this region. In this case, _finger points to 3996 // the address where we last found a marked object. If this is a 3997 // fresh region, _finger points to start(). 3998 MemRegion mr = MemRegion(_finger, _region_limit); 3999 4000 if (_cm->verbose_low()) { 4001 gclog_or_tty->print_cr("[%u] we're scanning part " 4002 "["PTR_FORMAT", "PTR_FORMAT") " 4003 "of region "HR_FORMAT, 4004 _worker_id, p2i(_finger), p2i(_region_limit), 4005 HR_FORMAT_PARAMS(_curr_region)); 4006 } 4007 4008 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 4009 "humongous regions should go around loop once only"); 4010 4011 // Some special cases: 4012 // If the memory region is empty, we can just give up the region. 4013 // If the current region is humongous then we only need to check 4014 // the bitmap for the bit associated with the start of the object, 4015 // scan the object if it's live, and give up the region. 4016 // Otherwise, let's iterate over the bitmap of the part of the region 4017 // that is left. 4018 // If the iteration is successful, give up the region. 4019 if (mr.is_empty()) { 4020 giveup_current_region(); 4021 regular_clock_call(); 4022 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 4023 if (_nextMarkBitMap->isMarked(mr.start())) { 4024 // The object is marked - apply the closure 4025 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 4026 bitmap_closure.do_bit(offset); 4027 } 4028 // Even if this task aborted while scanning the humongous object 4029 // we can (and should) give up the current region. 4030 giveup_current_region(); 4031 regular_clock_call(); 4032 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4033 giveup_current_region(); 4034 regular_clock_call(); 4035 } else { 4036 assert(has_aborted(), "currently the only way to do so"); 4037 // The only way to abort the bitmap iteration is to return 4038 // false from the do_bit() method. However, inside the 4039 // do_bit() method we move the _finger to point to the 4040 // object currently being looked at. So, if we bail out, we 4041 // have definitely set _finger to something non-null. 4042 assert(_finger != NULL, "invariant"); 4043 4044 // Region iteration was actually aborted. So now _finger 4045 // points to the address of the object we last scanned. If we 4046 // leave it there, when we restart this task, we will rescan 4047 // the object. It is easy to avoid this. We move the finger by 4048 // enough to point to the next possible object header (the 4049 // bitmap knows by how much we need to move it as it knows its 4050 // granularity). 4051 assert(_finger < _region_limit, "invariant"); 4052 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 4053 // Check if bitmap iteration was aborted while scanning the last object 4054 if (new_finger >= _region_limit) { 4055 giveup_current_region(); 4056 } else { 4057 move_finger_to(new_finger); 4058 } 4059 } 4060 } 4061 // At this point we have either completed iterating over the 4062 // region we were holding on to, or we have aborted. 4063 4064 // We then partially drain the local queue and the global stack. 4065 // (Do we really need this?) 4066 drain_local_queue(true); 4067 drain_global_stack(true); 4068 4069 // Read the note on the claim_region() method on why it might 4070 // return NULL with potentially more regions available for 4071 // claiming and why we have to check out_of_regions() to determine 4072 // whether we're done or not. 4073 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4074 // We are going to try to claim a new region. We should have 4075 // given up on the previous one. 4076 // Separated the asserts so that we know which one fires. 4077 assert(_curr_region == NULL, "invariant"); 4078 assert(_finger == NULL, "invariant"); 4079 assert(_region_limit == NULL, "invariant"); 4080 if (_cm->verbose_low()) { 4081 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4082 } 4083 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4084 if (claimed_region != NULL) { 4085 // Yes, we managed to claim one 4086 statsOnly( ++_regions_claimed ); 4087 4088 if (_cm->verbose_low()) { 4089 gclog_or_tty->print_cr("[%u] we successfully claimed " 4090 "region "PTR_FORMAT, 4091 _worker_id, p2i(claimed_region)); 4092 } 4093 4094 setup_for_region(claimed_region); 4095 assert(_curr_region == claimed_region, "invariant"); 4096 } 4097 // It is important to call the regular clock here. It might take 4098 // a while to claim a region if, for example, we hit a large 4099 // block of empty regions. So we need to call the regular clock 4100 // method once round the loop to make sure it's called 4101 // frequently enough. 4102 regular_clock_call(); 4103 } 4104 4105 if (!has_aborted() && _curr_region == NULL) { 4106 assert(_cm->out_of_regions(), 4107 "at this point we should be out of regions"); 4108 } 4109 } while ( _curr_region != NULL && !has_aborted()); 4110 4111 if (!has_aborted()) { 4112 // We cannot check whether the global stack is empty, since other 4113 // tasks might be pushing objects to it concurrently. 4114 assert(_cm->out_of_regions(), 4115 "at this point we should be out of regions"); 4116 4117 if (_cm->verbose_low()) { 4118 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4119 } 4120 4121 // Try to reduce the number of available SATB buffers so that 4122 // remark has less work to do. 4123 drain_satb_buffers(); 4124 } 4125 4126 // Since we've done everything else, we can now totally drain the 4127 // local queue and global stack. 4128 drain_local_queue(false); 4129 drain_global_stack(false); 4130 4131 // Attempt at work stealing from other task's queues. 4132 if (do_stealing && !has_aborted()) { 4133 // We have not aborted. This means that we have finished all that 4134 // we could. Let's try to do some stealing... 4135 4136 // We cannot check whether the global stack is empty, since other 4137 // tasks might be pushing objects to it concurrently. 4138 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4139 "only way to reach here"); 4140 4141 if (_cm->verbose_low()) { 4142 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4143 } 4144 4145 while (!has_aborted()) { 4146 oop obj; 4147 statsOnly( ++_steal_attempts ); 4148 4149 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4150 if (_cm->verbose_medium()) { 4151 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4152 _worker_id, p2i((void*) obj)); 4153 } 4154 4155 statsOnly( ++_steals ); 4156 4157 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4158 "any stolen object should be marked"); 4159 scan_object(obj); 4160 4161 // And since we're towards the end, let's totally drain the 4162 // local queue and global stack. 4163 drain_local_queue(false); 4164 drain_global_stack(false); 4165 } else { 4166 break; 4167 } 4168 } 4169 } 4170 4171 // If we are about to wrap up and go into termination, check if we 4172 // should raise the overflow flag. 4173 if (do_termination && !has_aborted()) { 4174 if (_cm->force_overflow()->should_force()) { 4175 _cm->set_has_overflown(); 4176 regular_clock_call(); 4177 } 4178 } 4179 4180 // We still haven't aborted. Now, let's try to get into the 4181 // termination protocol. 4182 if (do_termination && !has_aborted()) { 4183 // We cannot check whether the global stack is empty, since other 4184 // tasks might be concurrently pushing objects on it. 4185 // Separated the asserts so that we know which one fires. 4186 assert(_cm->out_of_regions(), "only way to reach here"); 4187 assert(_task_queue->size() == 0, "only way to reach here"); 4188 4189 if (_cm->verbose_low()) { 4190 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4191 } 4192 4193 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4194 4195 // The CMTask class also extends the TerminatorTerminator class, 4196 // hence its should_exit_termination() method will also decide 4197 // whether to exit the termination protocol or not. 4198 bool finished = (is_serial || 4199 _cm->terminator()->offer_termination(this)); 4200 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4201 _termination_time_ms += 4202 termination_end_time_ms - _termination_start_time_ms; 4203 4204 if (finished) { 4205 // We're all done. 4206 4207 if (_worker_id == 0) { 4208 // let's allow task 0 to do this 4209 if (concurrent()) { 4210 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4211 // we need to set this to false before the next 4212 // safepoint. This way we ensure that the marking phase 4213 // doesn't observe any more heap expansions. 4214 _cm->clear_concurrent_marking_in_progress(); 4215 } 4216 } 4217 4218 // We can now guarantee that the global stack is empty, since 4219 // all other tasks have finished. We separated the guarantees so 4220 // that, if a condition is false, we can immediately find out 4221 // which one. 4222 guarantee(_cm->out_of_regions(), "only way to reach here"); 4223 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4224 guarantee(_task_queue->size() == 0, "only way to reach here"); 4225 guarantee(!_cm->has_overflown(), "only way to reach here"); 4226 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4227 4228 if (_cm->verbose_low()) { 4229 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4230 } 4231 } else { 4232 // Apparently there's more work to do. Let's abort this task. It 4233 // will restart it and we can hopefully find more things to do. 4234 4235 if (_cm->verbose_low()) { 4236 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4237 _worker_id); 4238 } 4239 4240 set_has_aborted(); 4241 statsOnly( ++_aborted_termination ); 4242 } 4243 } 4244 4245 // Mainly for debugging purposes to make sure that a pointer to the 4246 // closure which was statically allocated in this frame doesn't 4247 // escape it by accident. 4248 set_cm_oop_closure(NULL); 4249 double end_time_ms = os::elapsedVTime() * 1000.0; 4250 double elapsed_time_ms = end_time_ms - _start_time_ms; 4251 // Update the step history. 4252 _step_times_ms.add(elapsed_time_ms); 4253 4254 if (has_aborted()) { 4255 // The task was aborted for some reason. 4256 4257 statsOnly( ++_aborted ); 4258 4259 if (_has_timed_out) { 4260 double diff_ms = elapsed_time_ms - _time_target_ms; 4261 // Keep statistics of how well we did with respect to hitting 4262 // our target only if we actually timed out (if we aborted for 4263 // other reasons, then the results might get skewed). 4264 _marking_step_diffs_ms.add(diff_ms); 4265 } 4266 4267 if (_cm->has_overflown()) { 4268 // This is the interesting one. We aborted because a global 4269 // overflow was raised. This means we have to restart the 4270 // marking phase and start iterating over regions. However, in 4271 // order to do this we have to make sure that all tasks stop 4272 // what they are doing and re-initialize in a safe manner. We 4273 // will achieve this with the use of two barrier sync points. 4274 4275 if (_cm->verbose_low()) { 4276 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4277 } 4278 4279 if (!is_serial) { 4280 // We only need to enter the sync barrier if being called 4281 // from a parallel context 4282 _cm->enter_first_sync_barrier(_worker_id); 4283 4284 // When we exit this sync barrier we know that all tasks have 4285 // stopped doing marking work. So, it's now safe to 4286 // re-initialize our data structures. At the end of this method, 4287 // task 0 will clear the global data structures. 4288 } 4289 4290 statsOnly( ++_aborted_overflow ); 4291 4292 // We clear the local state of this task... 4293 clear_region_fields(); 4294 4295 if (!is_serial) { 4296 // ...and enter the second barrier. 4297 _cm->enter_second_sync_barrier(_worker_id); 4298 } 4299 // At this point, if we're during the concurrent phase of 4300 // marking, everything has been re-initialized and we're 4301 // ready to restart. 4302 } 4303 4304 if (_cm->verbose_low()) { 4305 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4306 "elapsed = %1.2lfms <<<<<<<<<<", 4307 _worker_id, _time_target_ms, elapsed_time_ms); 4308 if (_cm->has_aborted()) { 4309 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4310 _worker_id); 4311 } 4312 } 4313 } else { 4314 if (_cm->verbose_low()) { 4315 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4316 "elapsed = %1.2lfms <<<<<<<<<<", 4317 _worker_id, _time_target_ms, elapsed_time_ms); 4318 } 4319 } 4320 4321 _claimed = false; 4322 } 4323 4324 CMTask::CMTask(uint worker_id, 4325 ConcurrentMark* cm, 4326 size_t* marked_bytes, 4327 BitMap* card_bm, 4328 CMTaskQueue* task_queue, 4329 CMTaskQueueSet* task_queues) 4330 : _g1h(G1CollectedHeap::heap()), 4331 _worker_id(worker_id), _cm(cm), 4332 _claimed(false), 4333 _nextMarkBitMap(NULL), _hash_seed(17), 4334 _task_queue(task_queue), 4335 _task_queues(task_queues), 4336 _cm_oop_closure(NULL), 4337 _marked_bytes_array(marked_bytes), 4338 _card_bm(card_bm) { 4339 guarantee(task_queue != NULL, "invariant"); 4340 guarantee(task_queues != NULL, "invariant"); 4341 4342 statsOnly( _clock_due_to_scanning = 0; 4343 _clock_due_to_marking = 0 ); 4344 4345 _marking_step_diffs_ms.add(0.5); 4346 } 4347 4348 // These are formatting macros that are used below to ensure 4349 // consistent formatting. The *_H_* versions are used to format the 4350 // header for a particular value and they should be kept consistent 4351 // with the corresponding macro. Also note that most of the macros add 4352 // the necessary white space (as a prefix) which makes them a bit 4353 // easier to compose. 4354 4355 // All the output lines are prefixed with this string to be able to 4356 // identify them easily in a large log file. 4357 #define G1PPRL_LINE_PREFIX "###" 4358 4359 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4360 #ifdef _LP64 4361 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4362 #else // _LP64 4363 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4364 #endif // _LP64 4365 4366 // For per-region info 4367 #define G1PPRL_TYPE_FORMAT " %-4s" 4368 #define G1PPRL_TYPE_H_FORMAT " %4s" 4369 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4370 #define G1PPRL_BYTE_H_FORMAT " %9s" 4371 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4372 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4373 4374 // For summary info 4375 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4376 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4377 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4378 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4379 4380 G1PrintRegionLivenessInfoClosure:: 4381 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4382 : _out(out), 4383 _total_used_bytes(0), _total_capacity_bytes(0), 4384 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4385 _hum_used_bytes(0), _hum_capacity_bytes(0), 4386 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4387 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4388 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4389 MemRegion g1_reserved = g1h->g1_reserved(); 4390 double now = os::elapsedTime(); 4391 4392 // Print the header of the output. 4393 _out->cr(); 4394 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4395 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4396 G1PPRL_SUM_ADDR_FORMAT("reserved") 4397 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4398 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4399 HeapRegion::GrainBytes); 4400 _out->print_cr(G1PPRL_LINE_PREFIX); 4401 _out->print_cr(G1PPRL_LINE_PREFIX 4402 G1PPRL_TYPE_H_FORMAT 4403 G1PPRL_ADDR_BASE_H_FORMAT 4404 G1PPRL_BYTE_H_FORMAT 4405 G1PPRL_BYTE_H_FORMAT 4406 G1PPRL_BYTE_H_FORMAT 4407 G1PPRL_DOUBLE_H_FORMAT 4408 G1PPRL_BYTE_H_FORMAT 4409 G1PPRL_BYTE_H_FORMAT, 4410 "type", "address-range", 4411 "used", "prev-live", "next-live", "gc-eff", 4412 "remset", "code-roots"); 4413 _out->print_cr(G1PPRL_LINE_PREFIX 4414 G1PPRL_TYPE_H_FORMAT 4415 G1PPRL_ADDR_BASE_H_FORMAT 4416 G1PPRL_BYTE_H_FORMAT 4417 G1PPRL_BYTE_H_FORMAT 4418 G1PPRL_BYTE_H_FORMAT 4419 G1PPRL_DOUBLE_H_FORMAT 4420 G1PPRL_BYTE_H_FORMAT 4421 G1PPRL_BYTE_H_FORMAT, 4422 "", "", 4423 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4424 "(bytes)", "(bytes)"); 4425 } 4426 4427 // It takes as a parameter a reference to one of the _hum_* fields, it 4428 // deduces the corresponding value for a region in a humongous region 4429 // series (either the region size, or what's left if the _hum_* field 4430 // is < the region size), and updates the _hum_* field accordingly. 4431 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4432 size_t bytes = 0; 4433 // The > 0 check is to deal with the prev and next live bytes which 4434 // could be 0. 4435 if (*hum_bytes > 0) { 4436 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4437 *hum_bytes -= bytes; 4438 } 4439 return bytes; 4440 } 4441 4442 // It deduces the values for a region in a humongous region series 4443 // from the _hum_* fields and updates those accordingly. It assumes 4444 // that that _hum_* fields have already been set up from the "starts 4445 // humongous" region and we visit the regions in address order. 4446 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4447 size_t* capacity_bytes, 4448 size_t* prev_live_bytes, 4449 size_t* next_live_bytes) { 4450 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4451 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4452 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4453 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4454 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4455 } 4456 4457 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4458 const char* type = r->get_type_str(); 4459 HeapWord* bottom = r->bottom(); 4460 HeapWord* end = r->end(); 4461 size_t capacity_bytes = r->capacity(); 4462 size_t used_bytes = r->used(); 4463 size_t prev_live_bytes = r->live_bytes(); 4464 size_t next_live_bytes = r->next_live_bytes(); 4465 double gc_eff = r->gc_efficiency(); 4466 size_t remset_bytes = r->rem_set()->mem_size(); 4467 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4468 4469 if (r->is_starts_humongous()) { 4470 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4471 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4472 "they should have been zeroed after the last time we used them"); 4473 // Set up the _hum_* fields. 4474 _hum_capacity_bytes = capacity_bytes; 4475 _hum_used_bytes = used_bytes; 4476 _hum_prev_live_bytes = prev_live_bytes; 4477 _hum_next_live_bytes = next_live_bytes; 4478 get_hum_bytes(&used_bytes, &capacity_bytes, 4479 &prev_live_bytes, &next_live_bytes); 4480 end = bottom + HeapRegion::GrainWords; 4481 } else if (r->is_continues_humongous()) { 4482 get_hum_bytes(&used_bytes, &capacity_bytes, 4483 &prev_live_bytes, &next_live_bytes); 4484 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4485 } 4486 4487 _total_used_bytes += used_bytes; 4488 _total_capacity_bytes += capacity_bytes; 4489 _total_prev_live_bytes += prev_live_bytes; 4490 _total_next_live_bytes += next_live_bytes; 4491 _total_remset_bytes += remset_bytes; 4492 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4493 4494 // Print a line for this particular region. 4495 _out->print_cr(G1PPRL_LINE_PREFIX 4496 G1PPRL_TYPE_FORMAT 4497 G1PPRL_ADDR_BASE_FORMAT 4498 G1PPRL_BYTE_FORMAT 4499 G1PPRL_BYTE_FORMAT 4500 G1PPRL_BYTE_FORMAT 4501 G1PPRL_DOUBLE_FORMAT 4502 G1PPRL_BYTE_FORMAT 4503 G1PPRL_BYTE_FORMAT, 4504 type, p2i(bottom), p2i(end), 4505 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4506 remset_bytes, strong_code_roots_bytes); 4507 4508 return false; 4509 } 4510 4511 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4512 // add static memory usages to remembered set sizes 4513 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4514 // Print the footer of the output. 4515 _out->print_cr(G1PPRL_LINE_PREFIX); 4516 _out->print_cr(G1PPRL_LINE_PREFIX 4517 " SUMMARY" 4518 G1PPRL_SUM_MB_FORMAT("capacity") 4519 G1PPRL_SUM_MB_PERC_FORMAT("used") 4520 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4521 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4522 G1PPRL_SUM_MB_FORMAT("remset") 4523 G1PPRL_SUM_MB_FORMAT("code-roots"), 4524 bytes_to_mb(_total_capacity_bytes), 4525 bytes_to_mb(_total_used_bytes), 4526 perc(_total_used_bytes, _total_capacity_bytes), 4527 bytes_to_mb(_total_prev_live_bytes), 4528 perc(_total_prev_live_bytes, _total_capacity_bytes), 4529 bytes_to_mb(_total_next_live_bytes), 4530 perc(_total_next_live_bytes, _total_capacity_bytes), 4531 bytes_to_mb(_total_remset_bytes), 4532 bytes_to_mb(_total_strong_code_roots_bytes)); 4533 _out->cr(); 4534 }