1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc_implementation/g1/concurrentMark.inline.hpp" 30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 31 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 32 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 33 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 34 #include "gc_implementation/g1/g1Log.hpp" 35 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 36 #include "gc_implementation/g1/g1RemSet.hpp" 37 #include "gc_implementation/g1/g1StringDedup.hpp" 38 #include "gc_implementation/g1/heapRegion.inline.hpp" 39 #include "gc_implementation/g1/heapRegionManager.inline.hpp" 40 #include "gc_implementation/g1/heapRegionRemSet.hpp" 41 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 42 #include "gc_implementation/shared/vmGCOperations.hpp" 43 #include "gc_implementation/shared/gcTimer.hpp" 44 #include "gc_implementation/shared/gcTrace.hpp" 45 #include "gc_implementation/shared/gcTraceTime.hpp" 46 #include "memory/allocation.hpp" 47 #include "memory/genOopClosures.inline.hpp" 48 #include "memory/referencePolicy.hpp" 49 #include "memory/resourceArea.hpp" 50 #include "memory/strongRootsScope.hpp" 51 #include "oops/oop.inline.hpp" 52 #include "runtime/handles.inline.hpp" 53 #include "runtime/java.hpp" 54 #include "runtime/atomic.inline.hpp" 55 #include "runtime/prefetch.inline.hpp" 56 #include "services/memTracker.hpp" 57 #include "utilities/taskqueue.inline.hpp" 58 59 // Concurrent marking bit map wrapper 60 61 CMBitMapRO::CMBitMapRO(int shifter) : 62 _bm(), 63 _shifter(shifter) { 64 _bmStartWord = 0; 65 _bmWordSize = 0; 66 } 67 68 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 69 const HeapWord* limit) const { 70 // First we must round addr *up* to a possible object boundary. 71 addr = (HeapWord*)align_size_up((intptr_t)addr, 72 HeapWordSize << _shifter); 73 size_t addrOffset = heapWordToOffset(addr); 74 if (limit == NULL) { 75 limit = _bmStartWord + _bmWordSize; 76 } 77 size_t limitOffset = heapWordToOffset(limit); 78 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 79 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 80 assert(nextAddr >= addr, "get_next_one postcondition"); 81 assert(nextAddr == limit || isMarked(nextAddr), 82 "get_next_one postcondition"); 83 return nextAddr; 84 } 85 86 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, 87 const HeapWord* limit) const { 88 size_t addrOffset = heapWordToOffset(addr); 89 if (limit == NULL) { 90 limit = _bmStartWord + _bmWordSize; 91 } 92 size_t limitOffset = heapWordToOffset(limit); 93 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 94 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 95 assert(nextAddr >= addr, "get_next_one postcondition"); 96 assert(nextAddr == limit || !isMarked(nextAddr), 97 "get_next_one postcondition"); 98 return nextAddr; 99 } 100 101 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 102 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 103 return (int) (diff >> _shifter); 104 } 105 106 #ifndef PRODUCT 107 bool CMBitMapRO::covers(MemRegion heap_rs) const { 108 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 109 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 110 "size inconsistency"); 111 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 112 _bmWordSize == heap_rs.word_size(); 113 } 114 #endif 115 116 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 117 _bm.print_on_error(st, prefix); 118 } 119 120 size_t CMBitMap::compute_size(size_t heap_size) { 121 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 122 } 123 124 size_t CMBitMap::mark_distance() { 125 return MinObjAlignmentInBytes * BitsPerByte; 126 } 127 128 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 129 _bmStartWord = heap.start(); 130 _bmWordSize = heap.word_size(); 131 132 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 133 _bm.set_size(_bmWordSize >> _shifter); 134 135 storage->set_mapping_changed_listener(&_listener); 136 } 137 138 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 139 if (zero_filled) { 140 return; 141 } 142 // We need to clear the bitmap on commit, removing any existing information. 143 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 144 _bm->clearRange(mr); 145 } 146 147 // Closure used for clearing the given mark bitmap. 148 class ClearBitmapHRClosure : public HeapRegionClosure { 149 private: 150 ConcurrentMark* _cm; 151 CMBitMap* _bitmap; 152 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 153 public: 154 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 155 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 156 } 157 158 virtual bool doHeapRegion(HeapRegion* r) { 159 size_t const chunk_size_in_words = M / HeapWordSize; 160 161 HeapWord* cur = r->bottom(); 162 HeapWord* const end = r->end(); 163 164 while (cur < end) { 165 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 166 _bitmap->clearRange(mr); 167 168 cur += chunk_size_in_words; 169 170 // Abort iteration if after yielding the marking has been aborted. 171 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 172 return true; 173 } 174 // Repeat the asserts from before the start of the closure. We will do them 175 // as asserts here to minimize their overhead on the product. However, we 176 // will have them as guarantees at the beginning / end of the bitmap 177 // clearing to get some checking in the product. 178 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 179 assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant"); 180 } 181 182 return false; 183 } 184 }; 185 186 class ParClearNextMarkBitmapTask : public AbstractGangTask { 187 ClearBitmapHRClosure* _cl; 188 HeapRegionClaimer _hrclaimer; 189 bool _suspendible; // If the task is suspendible, workers must join the STS. 190 191 public: 192 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 193 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 194 195 void work(uint worker_id) { 196 if (_suspendible) { 197 SuspendibleThreadSet::join(); 198 } 199 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 200 if (_suspendible) { 201 SuspendibleThreadSet::leave(); 202 } 203 } 204 }; 205 206 void CMBitMap::clearAll() { 207 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 208 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 209 uint n_workers = g1h->workers()->active_workers(); 210 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 211 g1h->workers()->run_task(&task); 212 guarantee(cl.complete(), "Must have completed iteration."); 213 return; 214 } 215 216 void CMBitMap::markRange(MemRegion mr) { 217 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 218 assert(!mr.is_empty(), "unexpected empty region"); 219 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 220 ((HeapWord *) mr.end())), 221 "markRange memory region end is not card aligned"); 222 // convert address range into offset range 223 _bm.at_put_range(heapWordToOffset(mr.start()), 224 heapWordToOffset(mr.end()), true); 225 } 226 227 void CMBitMap::clearRange(MemRegion mr) { 228 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 229 assert(!mr.is_empty(), "unexpected empty region"); 230 // convert address range into offset range 231 _bm.at_put_range(heapWordToOffset(mr.start()), 232 heapWordToOffset(mr.end()), false); 233 } 234 235 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 236 HeapWord* end_addr) { 237 HeapWord* start = getNextMarkedWordAddress(addr); 238 start = MIN2(start, end_addr); 239 HeapWord* end = getNextUnmarkedWordAddress(start); 240 end = MIN2(end, end_addr); 241 assert(start <= end, "Consistency check"); 242 MemRegion mr(start, end); 243 if (!mr.is_empty()) { 244 clearRange(mr); 245 } 246 return mr; 247 } 248 249 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 250 _base(NULL), _cm(cm) 251 #ifdef ASSERT 252 , _drain_in_progress(false) 253 , _drain_in_progress_yields(false) 254 #endif 255 {} 256 257 bool CMMarkStack::allocate(size_t capacity) { 258 // allocate a stack of the requisite depth 259 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 260 if (!rs.is_reserved()) { 261 warning("ConcurrentMark MarkStack allocation failure"); 262 return false; 263 } 264 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 265 if (!_virtual_space.initialize(rs, rs.size())) { 266 warning("ConcurrentMark MarkStack backing store failure"); 267 // Release the virtual memory reserved for the marking stack 268 rs.release(); 269 return false; 270 } 271 assert(_virtual_space.committed_size() == rs.size(), 272 "Didn't reserve backing store for all of ConcurrentMark stack?"); 273 _base = (oop*) _virtual_space.low(); 274 setEmpty(); 275 _capacity = (jint) capacity; 276 _saved_index = -1; 277 _should_expand = false; 278 NOT_PRODUCT(_max_depth = 0); 279 return true; 280 } 281 282 void CMMarkStack::expand() { 283 // Called, during remark, if we've overflown the marking stack during marking. 284 assert(isEmpty(), "stack should been emptied while handling overflow"); 285 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 286 // Clear expansion flag 287 _should_expand = false; 288 if (_capacity == (jint) MarkStackSizeMax) { 289 if (PrintGCDetails && Verbose) { 290 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 291 } 292 return; 293 } 294 // Double capacity if possible 295 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 296 // Do not give up existing stack until we have managed to 297 // get the double capacity that we desired. 298 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 299 sizeof(oop))); 300 if (rs.is_reserved()) { 301 // Release the backing store associated with old stack 302 _virtual_space.release(); 303 // Reinitialize virtual space for new stack 304 if (!_virtual_space.initialize(rs, rs.size())) { 305 fatal("Not enough swap for expanded marking stack capacity"); 306 } 307 _base = (oop*)(_virtual_space.low()); 308 _index = 0; 309 _capacity = new_capacity; 310 } else { 311 if (PrintGCDetails && Verbose) { 312 // Failed to double capacity, continue; 313 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 314 SIZE_FORMAT"K to " SIZE_FORMAT"K", 315 _capacity / K, new_capacity / K); 316 } 317 } 318 } 319 320 void CMMarkStack::set_should_expand() { 321 // If we're resetting the marking state because of an 322 // marking stack overflow, record that we should, if 323 // possible, expand the stack. 324 _should_expand = _cm->has_overflown(); 325 } 326 327 CMMarkStack::~CMMarkStack() { 328 if (_base != NULL) { 329 _base = NULL; 330 _virtual_space.release(); 331 } 332 } 333 334 void CMMarkStack::par_push(oop ptr) { 335 while (true) { 336 if (isFull()) { 337 _overflow = true; 338 return; 339 } 340 // Otherwise... 341 jint index = _index; 342 jint next_index = index+1; 343 jint res = Atomic::cmpxchg(next_index, &_index, index); 344 if (res == index) { 345 _base[index] = ptr; 346 // Note that we don't maintain this atomically. We could, but it 347 // doesn't seem necessary. 348 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 349 return; 350 } 351 // Otherwise, we need to try again. 352 } 353 } 354 355 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { 356 while (true) { 357 if (isFull()) { 358 _overflow = true; 359 return; 360 } 361 // Otherwise... 362 jint index = _index; 363 jint next_index = index + n; 364 if (next_index > _capacity) { 365 _overflow = true; 366 return; 367 } 368 jint res = Atomic::cmpxchg(next_index, &_index, index); 369 if (res == index) { 370 for (int i = 0; i < n; i++) { 371 int ind = index + i; 372 assert(ind < _capacity, "By overflow test above."); 373 _base[ind] = ptr_arr[i]; 374 } 375 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 376 return; 377 } 378 // Otherwise, we need to try again. 379 } 380 } 381 382 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 383 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 384 jint start = _index; 385 jint next_index = start + n; 386 if (next_index > _capacity) { 387 _overflow = true; 388 return; 389 } 390 // Otherwise. 391 _index = next_index; 392 for (int i = 0; i < n; i++) { 393 int ind = start + i; 394 assert(ind < _capacity, "By overflow test above."); 395 _base[ind] = ptr_arr[i]; 396 } 397 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 398 } 399 400 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 401 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 402 jint index = _index; 403 if (index == 0) { 404 *n = 0; 405 return false; 406 } else { 407 int k = MIN2(max, index); 408 jint new_ind = index - k; 409 for (int j = 0; j < k; j++) { 410 ptr_arr[j] = _base[new_ind + j]; 411 } 412 _index = new_ind; 413 *n = k; 414 return true; 415 } 416 } 417 418 template<class OopClosureClass> 419 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 420 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 421 || SafepointSynchronize::is_at_safepoint(), 422 "Drain recursion must be yield-safe."); 423 bool res = true; 424 debug_only(_drain_in_progress = true); 425 debug_only(_drain_in_progress_yields = yield_after); 426 while (!isEmpty()) { 427 oop newOop = pop(); 428 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 429 assert(newOop->is_oop(), "Expected an oop"); 430 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 431 "only grey objects on this stack"); 432 newOop->oop_iterate(cl); 433 if (yield_after && _cm->do_yield_check()) { 434 res = false; 435 break; 436 } 437 } 438 debug_only(_drain_in_progress = false); 439 return res; 440 } 441 442 void CMMarkStack::note_start_of_gc() { 443 assert(_saved_index == -1, 444 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 445 _saved_index = _index; 446 } 447 448 void CMMarkStack::note_end_of_gc() { 449 // This is intentionally a guarantee, instead of an assert. If we 450 // accidentally add something to the mark stack during GC, it 451 // will be a correctness issue so it's better if we crash. we'll 452 // only check this once per GC anyway, so it won't be a performance 453 // issue in any way. 454 guarantee(_saved_index == _index, 455 err_msg("saved index: %d index: %d", _saved_index, _index)); 456 _saved_index = -1; 457 } 458 459 void CMMarkStack::oops_do(OopClosure* f) { 460 assert(_saved_index == _index, 461 err_msg("saved index: %d index: %d", _saved_index, _index)); 462 for (int i = 0; i < _index; i += 1) { 463 f->do_oop(&_base[i]); 464 } 465 } 466 467 CMRootRegions::CMRootRegions() : 468 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 469 _should_abort(false), _next_survivor(NULL) { } 470 471 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 472 _young_list = g1h->young_list(); 473 _cm = cm; 474 } 475 476 void CMRootRegions::prepare_for_scan() { 477 assert(!scan_in_progress(), "pre-condition"); 478 479 // Currently, only survivors can be root regions. 480 assert(_next_survivor == NULL, "pre-condition"); 481 _next_survivor = _young_list->first_survivor_region(); 482 _scan_in_progress = (_next_survivor != NULL); 483 _should_abort = false; 484 } 485 486 HeapRegion* CMRootRegions::claim_next() { 487 if (_should_abort) { 488 // If someone has set the should_abort flag, we return NULL to 489 // force the caller to bail out of their loop. 490 return NULL; 491 } 492 493 // Currently, only survivors can be root regions. 494 HeapRegion* res = _next_survivor; 495 if (res != NULL) { 496 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 497 // Read it again in case it changed while we were waiting for the lock. 498 res = _next_survivor; 499 if (res != NULL) { 500 if (res == _young_list->last_survivor_region()) { 501 // We just claimed the last survivor so store NULL to indicate 502 // that we're done. 503 _next_survivor = NULL; 504 } else { 505 _next_survivor = res->get_next_young_region(); 506 } 507 } else { 508 // Someone else claimed the last survivor while we were trying 509 // to take the lock so nothing else to do. 510 } 511 } 512 assert(res == NULL || res->is_survivor(), "post-condition"); 513 514 return res; 515 } 516 517 void CMRootRegions::scan_finished() { 518 assert(scan_in_progress(), "pre-condition"); 519 520 // Currently, only survivors can be root regions. 521 if (!_should_abort) { 522 assert(_next_survivor == NULL, "we should have claimed all survivors"); 523 } 524 _next_survivor = NULL; 525 526 { 527 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 528 _scan_in_progress = false; 529 RootRegionScan_lock->notify_all(); 530 } 531 } 532 533 bool CMRootRegions::wait_until_scan_finished() { 534 if (!scan_in_progress()) return false; 535 536 { 537 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 538 while (scan_in_progress()) { 539 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 540 } 541 } 542 return true; 543 } 544 545 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 546 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 547 #endif // _MSC_VER 548 549 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 550 return MAX2((n_par_threads + 2) / 4, 1U); 551 } 552 553 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 554 _g1h(g1h), 555 _markBitMap1(), 556 _markBitMap2(), 557 _parallel_marking_threads(0), 558 _max_parallel_marking_threads(0), 559 _sleep_factor(0.0), 560 _marking_task_overhead(1.0), 561 _cleanup_sleep_factor(0.0), 562 _cleanup_task_overhead(1.0), 563 _cleanup_list("Cleanup List"), 564 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 565 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 566 CardTableModRefBS::card_shift, 567 false /* in_resource_area*/), 568 569 _prevMarkBitMap(&_markBitMap1), 570 _nextMarkBitMap(&_markBitMap2), 571 572 _markStack(this), 573 // _finger set in set_non_marking_state 574 575 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 576 // _active_tasks set in set_non_marking_state 577 // _tasks set inside the constructor 578 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 579 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 580 581 _has_overflown(false), 582 _concurrent(false), 583 _has_aborted(false), 584 _aborted_gc_id(GCId::undefined()), 585 _restart_for_overflow(false), 586 _concurrent_marking_in_progress(false), 587 588 // _verbose_level set below 589 590 _init_times(), 591 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 592 _cleanup_times(), 593 _total_counting_time(0.0), 594 _total_rs_scrub_time(0.0), 595 596 _parallel_workers(NULL), 597 598 _count_card_bitmaps(NULL), 599 _count_marked_bytes(NULL), 600 _completed_initialization(false) { 601 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 602 if (verbose_level < no_verbose) { 603 verbose_level = no_verbose; 604 } 605 if (verbose_level > high_verbose) { 606 verbose_level = high_verbose; 607 } 608 _verbose_level = verbose_level; 609 610 if (verbose_low()) { 611 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 612 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 613 } 614 615 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 616 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 617 618 // Create & start a ConcurrentMark thread. 619 _cmThread = new ConcurrentMarkThread(this); 620 assert(cmThread() != NULL, "CM Thread should have been created"); 621 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 622 if (_cmThread->osthread() == NULL) { 623 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 624 } 625 626 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 627 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 628 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 629 630 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 631 satb_qs.set_buffer_size(G1SATBBufferSize); 632 633 _root_regions.init(_g1h, this); 634 635 if (ConcGCThreads > ParallelGCThreads) { 636 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") " 637 "than ParallelGCThreads (" UINTX_FORMAT ").", 638 ConcGCThreads, ParallelGCThreads); 639 return; 640 } 641 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 642 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 643 // if both are set 644 _sleep_factor = 0.0; 645 _marking_task_overhead = 1.0; 646 } else if (G1MarkingOverheadPercent > 0) { 647 // We will calculate the number of parallel marking threads based 648 // on a target overhead with respect to the soft real-time goal 649 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 650 double overall_cm_overhead = 651 (double) MaxGCPauseMillis * marking_overhead / 652 (double) GCPauseIntervalMillis; 653 double cpu_ratio = 1.0 / (double) os::processor_count(); 654 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 655 double marking_task_overhead = 656 overall_cm_overhead / marking_thread_num * 657 (double) os::processor_count(); 658 double sleep_factor = 659 (1.0 - marking_task_overhead) / marking_task_overhead; 660 661 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); 662 _sleep_factor = sleep_factor; 663 _marking_task_overhead = marking_task_overhead; 664 } else { 665 // Calculate the number of parallel marking threads by scaling 666 // the number of parallel GC threads. 667 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); 668 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); 669 _sleep_factor = 0.0; 670 _marking_task_overhead = 1.0; 671 } 672 673 assert(ConcGCThreads > 0, "Should have been set"); 674 _parallel_marking_threads = (uint) ConcGCThreads; 675 _max_parallel_marking_threads = _parallel_marking_threads; 676 677 if (parallel_marking_threads() > 1) { 678 _cleanup_task_overhead = 1.0; 679 } else { 680 _cleanup_task_overhead = marking_task_overhead(); 681 } 682 _cleanup_sleep_factor = 683 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 684 685 #if 0 686 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 687 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 688 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 689 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 690 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 691 #endif 692 693 _parallel_workers = new FlexibleWorkGang("G1 Marker", 694 _max_parallel_marking_threads, false, true); 695 if (_parallel_workers == NULL) { 696 vm_exit_during_initialization("Failed necessary allocation."); 697 } else { 698 _parallel_workers->initialize_workers(); 699 } 700 701 if (FLAG_IS_DEFAULT(MarkStackSize)) { 702 size_t mark_stack_size = 703 MIN2(MarkStackSizeMax, 704 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 705 // Verify that the calculated value for MarkStackSize is in range. 706 // It would be nice to use the private utility routine from Arguments. 707 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 708 warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 709 "must be between 1 and " SIZE_FORMAT, 710 mark_stack_size, MarkStackSizeMax); 711 return; 712 } 713 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 714 } else { 715 // Verify MarkStackSize is in range. 716 if (FLAG_IS_CMDLINE(MarkStackSize)) { 717 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 718 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 719 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 720 "must be between 1 and " SIZE_FORMAT, 721 MarkStackSize, MarkStackSizeMax); 722 return; 723 } 724 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 725 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 726 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 727 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 728 MarkStackSize, MarkStackSizeMax); 729 return; 730 } 731 } 732 } 733 } 734 735 if (!_markStack.allocate(MarkStackSize)) { 736 warning("Failed to allocate CM marking stack"); 737 return; 738 } 739 740 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 741 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 742 743 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 744 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 745 746 BitMap::idx_t card_bm_size = _card_bm.size(); 747 748 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 749 _active_tasks = _max_worker_id; 750 751 uint max_regions = _g1h->max_regions(); 752 for (uint i = 0; i < _max_worker_id; ++i) { 753 CMTaskQueue* task_queue = new CMTaskQueue(); 754 task_queue->initialize(); 755 _task_queues->register_queue(i, task_queue); 756 757 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 758 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 759 760 _tasks[i] = new CMTask(i, this, 761 _count_marked_bytes[i], 762 &_count_card_bitmaps[i], 763 task_queue, _task_queues); 764 765 _accum_task_vtime[i] = 0.0; 766 } 767 768 // Calculate the card number for the bottom of the heap. Used 769 // in biasing indexes into the accounting card bitmaps. 770 _heap_bottom_card_num = 771 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 772 CardTableModRefBS::card_shift); 773 774 // Clear all the liveness counting data 775 clear_all_count_data(); 776 777 // so that the call below can read a sensible value 778 _heap_start = g1h->reserved_region().start(); 779 set_non_marking_state(); 780 _completed_initialization = true; 781 } 782 783 void ConcurrentMark::reset() { 784 // Starting values for these two. This should be called in a STW 785 // phase. 786 MemRegion reserved = _g1h->g1_reserved(); 787 _heap_start = reserved.start(); 788 _heap_end = reserved.end(); 789 790 // Separated the asserts so that we know which one fires. 791 assert(_heap_start != NULL, "heap bounds should look ok"); 792 assert(_heap_end != NULL, "heap bounds should look ok"); 793 assert(_heap_start < _heap_end, "heap bounds should look ok"); 794 795 // Reset all the marking data structures and any necessary flags 796 reset_marking_state(); 797 798 if (verbose_low()) { 799 gclog_or_tty->print_cr("[global] resetting"); 800 } 801 802 // We do reset all of them, since different phases will use 803 // different number of active threads. So, it's easiest to have all 804 // of them ready. 805 for (uint i = 0; i < _max_worker_id; ++i) { 806 _tasks[i]->reset(_nextMarkBitMap); 807 } 808 809 // we need this to make sure that the flag is on during the evac 810 // pause with initial mark piggy-backed 811 set_concurrent_marking_in_progress(); 812 } 813 814 815 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 816 _markStack.set_should_expand(); 817 _markStack.setEmpty(); // Also clears the _markStack overflow flag 818 if (clear_overflow) { 819 clear_has_overflown(); 820 } else { 821 assert(has_overflown(), "pre-condition"); 822 } 823 _finger = _heap_start; 824 825 for (uint i = 0; i < _max_worker_id; ++i) { 826 CMTaskQueue* queue = _task_queues->queue(i); 827 queue->set_empty(); 828 } 829 } 830 831 void ConcurrentMark::set_concurrency(uint active_tasks) { 832 assert(active_tasks <= _max_worker_id, "we should not have more"); 833 834 _active_tasks = active_tasks; 835 // Need to update the three data structures below according to the 836 // number of active threads for this phase. 837 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 838 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 839 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 840 } 841 842 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 843 set_concurrency(active_tasks); 844 845 _concurrent = concurrent; 846 // We propagate this to all tasks, not just the active ones. 847 for (uint i = 0; i < _max_worker_id; ++i) 848 _tasks[i]->set_concurrent(concurrent); 849 850 if (concurrent) { 851 set_concurrent_marking_in_progress(); 852 } else { 853 // We currently assume that the concurrent flag has been set to 854 // false before we start remark. At this point we should also be 855 // in a STW phase. 856 assert(!concurrent_marking_in_progress(), "invariant"); 857 assert(out_of_regions(), 858 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 859 p2i(_finger), p2i(_heap_end))); 860 } 861 } 862 863 void ConcurrentMark::set_non_marking_state() { 864 // We set the global marking state to some default values when we're 865 // not doing marking. 866 reset_marking_state(); 867 _active_tasks = 0; 868 clear_concurrent_marking_in_progress(); 869 } 870 871 ConcurrentMark::~ConcurrentMark() { 872 // The ConcurrentMark instance is never freed. 873 ShouldNotReachHere(); 874 } 875 876 void ConcurrentMark::clearNextBitmap() { 877 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 878 879 // Make sure that the concurrent mark thread looks to still be in 880 // the current cycle. 881 guarantee(cmThread()->during_cycle(), "invariant"); 882 883 // We are finishing up the current cycle by clearing the next 884 // marking bitmap and getting it ready for the next cycle. During 885 // this time no other cycle can start. So, let's make sure that this 886 // is the case. 887 guarantee(!g1h->mark_in_progress(), "invariant"); 888 889 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 890 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 891 _parallel_workers->run_task(&task); 892 893 // Clear the liveness counting data. If the marking has been aborted, the abort() 894 // call already did that. 895 if (cl.complete()) { 896 clear_all_count_data(); 897 } 898 899 // Repeat the asserts from above. 900 guarantee(cmThread()->during_cycle(), "invariant"); 901 guarantee(!g1h->mark_in_progress(), "invariant"); 902 } 903 904 class CheckBitmapClearHRClosure : public HeapRegionClosure { 905 CMBitMap* _bitmap; 906 bool _error; 907 public: 908 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 909 } 910 911 virtual bool doHeapRegion(HeapRegion* r) { 912 // This closure can be called concurrently to the mutator, so we must make sure 913 // that the result of the getNextMarkedWordAddress() call is compared to the 914 // value passed to it as limit to detect any found bits. 915 // We can use the region's orig_end() for the limit and the comparison value 916 // as it always contains the "real" end of the region that never changes and 917 // has no side effects. 918 // Due to the latter, there can also be no problem with the compiler generating 919 // reloads of the orig_end() call. 920 HeapWord* end = r->orig_end(); 921 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 922 } 923 }; 924 925 bool ConcurrentMark::nextMarkBitmapIsClear() { 926 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 927 _g1h->heap_region_iterate(&cl); 928 return cl.complete(); 929 } 930 931 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 932 public: 933 bool doHeapRegion(HeapRegion* r) { 934 if (!r->is_continues_humongous()) { 935 r->note_start_of_marking(); 936 } 937 return false; 938 } 939 }; 940 941 void ConcurrentMark::checkpointRootsInitialPre() { 942 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 943 G1CollectorPolicy* g1p = g1h->g1_policy(); 944 945 _has_aborted = false; 946 947 // Initialize marking structures. This has to be done in a STW phase. 948 reset(); 949 950 // For each region note start of marking. 951 NoteStartOfMarkHRClosure startcl; 952 g1h->heap_region_iterate(&startcl); 953 } 954 955 956 void ConcurrentMark::checkpointRootsInitialPost() { 957 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 958 959 // If we force an overflow during remark, the remark operation will 960 // actually abort and we'll restart concurrent marking. If we always 961 // force an overflow during remark we'll never actually complete the 962 // marking phase. So, we initialize this here, at the start of the 963 // cycle, so that at the remaining overflow number will decrease at 964 // every remark and we'll eventually not need to cause one. 965 force_overflow_stw()->init(); 966 967 // Start Concurrent Marking weak-reference discovery. 968 ReferenceProcessor* rp = g1h->ref_processor_cm(); 969 // enable ("weak") refs discovery 970 rp->enable_discovery(); 971 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 972 973 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 974 // This is the start of the marking cycle, we're expected all 975 // threads to have SATB queues with active set to false. 976 satb_mq_set.set_active_all_threads(true, /* new active value */ 977 false /* expected_active */); 978 979 _root_regions.prepare_for_scan(); 980 981 // update_g1_committed() will be called at the end of an evac pause 982 // when marking is on. So, it's also called at the end of the 983 // initial-mark pause to update the heap end, if the heap expands 984 // during it. No need to call it here. 985 } 986 987 /* 988 * Notice that in the next two methods, we actually leave the STS 989 * during the barrier sync and join it immediately afterwards. If we 990 * do not do this, the following deadlock can occur: one thread could 991 * be in the barrier sync code, waiting for the other thread to also 992 * sync up, whereas another one could be trying to yield, while also 993 * waiting for the other threads to sync up too. 994 * 995 * Note, however, that this code is also used during remark and in 996 * this case we should not attempt to leave / enter the STS, otherwise 997 * we'll either hit an assert (debug / fastdebug) or deadlock 998 * (product). So we should only leave / enter the STS if we are 999 * operating concurrently. 1000 * 1001 * Because the thread that does the sync barrier has left the STS, it 1002 * is possible to be suspended for a Full GC or an evacuation pause 1003 * could occur. This is actually safe, since the entering the sync 1004 * barrier is one of the last things do_marking_step() does, and it 1005 * doesn't manipulate any data structures afterwards. 1006 */ 1007 1008 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 1009 if (verbose_low()) { 1010 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 1011 } 1012 1013 if (concurrent()) { 1014 SuspendibleThreadSet::leave(); 1015 } 1016 1017 bool barrier_aborted = !_first_overflow_barrier_sync.enter(); 1018 1019 if (concurrent()) { 1020 SuspendibleThreadSet::join(); 1021 } 1022 // at this point everyone should have synced up and not be doing any 1023 // more work 1024 1025 if (verbose_low()) { 1026 if (barrier_aborted) { 1027 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 1028 } else { 1029 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 1030 } 1031 } 1032 1033 if (barrier_aborted) { 1034 // If the barrier aborted we ignore the overflow condition and 1035 // just abort the whole marking phase as quickly as possible. 1036 return; 1037 } 1038 1039 // If we're executing the concurrent phase of marking, reset the marking 1040 // state; otherwise the marking state is reset after reference processing, 1041 // during the remark pause. 1042 // If we reset here as a result of an overflow during the remark we will 1043 // see assertion failures from any subsequent set_concurrency_and_phase() 1044 // calls. 1045 if (concurrent()) { 1046 // let the task associated with with worker 0 do this 1047 if (worker_id == 0) { 1048 // task 0 is responsible for clearing the global data structures 1049 // We should be here because of an overflow. During STW we should 1050 // not clear the overflow flag since we rely on it being true when 1051 // we exit this method to abort the pause and restart concurrent 1052 // marking. 1053 reset_marking_state(true /* clear_overflow */); 1054 force_overflow()->update(); 1055 1056 if (G1Log::fine()) { 1057 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1058 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1059 } 1060 } 1061 } 1062 1063 // after this, each task should reset its own data structures then 1064 // then go into the second barrier 1065 } 1066 1067 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1068 if (verbose_low()) { 1069 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1070 } 1071 1072 if (concurrent()) { 1073 SuspendibleThreadSet::leave(); 1074 } 1075 1076 bool barrier_aborted = !_second_overflow_barrier_sync.enter(); 1077 1078 if (concurrent()) { 1079 SuspendibleThreadSet::join(); 1080 } 1081 // at this point everything should be re-initialized and ready to go 1082 1083 if (verbose_low()) { 1084 if (barrier_aborted) { 1085 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1086 } else { 1087 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1088 } 1089 } 1090 } 1091 1092 #ifndef PRODUCT 1093 void ForceOverflowSettings::init() { 1094 _num_remaining = G1ConcMarkForceOverflow; 1095 _force = false; 1096 update(); 1097 } 1098 1099 void ForceOverflowSettings::update() { 1100 if (_num_remaining > 0) { 1101 _num_remaining -= 1; 1102 _force = true; 1103 } else { 1104 _force = false; 1105 } 1106 } 1107 1108 bool ForceOverflowSettings::should_force() { 1109 if (_force) { 1110 _force = false; 1111 return true; 1112 } else { 1113 return false; 1114 } 1115 } 1116 #endif // !PRODUCT 1117 1118 class CMConcurrentMarkingTask: public AbstractGangTask { 1119 private: 1120 ConcurrentMark* _cm; 1121 ConcurrentMarkThread* _cmt; 1122 1123 public: 1124 void work(uint worker_id) { 1125 assert(Thread::current()->is_ConcurrentGC_thread(), 1126 "this should only be done by a conc GC thread"); 1127 ResourceMark rm; 1128 1129 double start_vtime = os::elapsedVTime(); 1130 1131 SuspendibleThreadSet::join(); 1132 1133 assert(worker_id < _cm->active_tasks(), "invariant"); 1134 CMTask* the_task = _cm->task(worker_id); 1135 the_task->record_start_time(); 1136 if (!_cm->has_aborted()) { 1137 do { 1138 double start_vtime_sec = os::elapsedVTime(); 1139 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1140 1141 the_task->do_marking_step(mark_step_duration_ms, 1142 true /* do_termination */, 1143 false /* is_serial*/); 1144 1145 double end_vtime_sec = os::elapsedVTime(); 1146 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1147 _cm->clear_has_overflown(); 1148 1149 _cm->do_yield_check(worker_id); 1150 1151 jlong sleep_time_ms; 1152 if (!_cm->has_aborted() && the_task->has_aborted()) { 1153 sleep_time_ms = 1154 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1155 SuspendibleThreadSet::leave(); 1156 os::sleep(Thread::current(), sleep_time_ms, false); 1157 SuspendibleThreadSet::join(); 1158 } 1159 } while (!_cm->has_aborted() && the_task->has_aborted()); 1160 } 1161 the_task->record_end_time(); 1162 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1163 1164 SuspendibleThreadSet::leave(); 1165 1166 double end_vtime = os::elapsedVTime(); 1167 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1168 } 1169 1170 CMConcurrentMarkingTask(ConcurrentMark* cm, 1171 ConcurrentMarkThread* cmt) : 1172 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1173 1174 ~CMConcurrentMarkingTask() { } 1175 }; 1176 1177 // Calculates the number of active workers for a concurrent 1178 // phase. 1179 uint ConcurrentMark::calc_parallel_marking_threads() { 1180 uint n_conc_workers = 0; 1181 if (!UseDynamicNumberOfGCThreads || 1182 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1183 !ForceDynamicNumberOfGCThreads)) { 1184 n_conc_workers = max_parallel_marking_threads(); 1185 } else { 1186 n_conc_workers = 1187 AdaptiveSizePolicy::calc_default_active_workers( 1188 max_parallel_marking_threads(), 1189 1, /* Minimum workers */ 1190 parallel_marking_threads(), 1191 Threads::number_of_non_daemon_threads()); 1192 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1193 // that scaling has already gone into "_max_parallel_marking_threads". 1194 } 1195 assert(n_conc_workers > 0, "Always need at least 1"); 1196 return n_conc_workers; 1197 } 1198 1199 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1200 // Currently, only survivors can be root regions. 1201 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1202 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1203 1204 const uintx interval = PrefetchScanIntervalInBytes; 1205 HeapWord* curr = hr->bottom(); 1206 const HeapWord* end = hr->top(); 1207 while (curr < end) { 1208 Prefetch::read(curr, interval); 1209 oop obj = oop(curr); 1210 int size = obj->oop_iterate(&cl); 1211 assert(size == obj->size(), "sanity"); 1212 curr += size; 1213 } 1214 } 1215 1216 class CMRootRegionScanTask : public AbstractGangTask { 1217 private: 1218 ConcurrentMark* _cm; 1219 1220 public: 1221 CMRootRegionScanTask(ConcurrentMark* cm) : 1222 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1223 1224 void work(uint worker_id) { 1225 assert(Thread::current()->is_ConcurrentGC_thread(), 1226 "this should only be done by a conc GC thread"); 1227 1228 CMRootRegions* root_regions = _cm->root_regions(); 1229 HeapRegion* hr = root_regions->claim_next(); 1230 while (hr != NULL) { 1231 _cm->scanRootRegion(hr, worker_id); 1232 hr = root_regions->claim_next(); 1233 } 1234 } 1235 }; 1236 1237 void ConcurrentMark::scanRootRegions() { 1238 // Start of concurrent marking. 1239 ClassLoaderDataGraph::clear_claimed_marks(); 1240 1241 // scan_in_progress() will have been set to true only if there was 1242 // at least one root region to scan. So, if it's false, we 1243 // should not attempt to do any further work. 1244 if (root_regions()->scan_in_progress()) { 1245 _parallel_marking_threads = calc_parallel_marking_threads(); 1246 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1247 "Maximum number of marking threads exceeded"); 1248 uint active_workers = MAX2(1U, parallel_marking_threads()); 1249 1250 CMRootRegionScanTask task(this); 1251 _parallel_workers->set_active_workers(active_workers); 1252 _parallel_workers->run_task(&task); 1253 1254 // It's possible that has_aborted() is true here without actually 1255 // aborting the survivor scan earlier. This is OK as it's 1256 // mainly used for sanity checking. 1257 root_regions()->scan_finished(); 1258 } 1259 } 1260 1261 void ConcurrentMark::markFromRoots() { 1262 // we might be tempted to assert that: 1263 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1264 // "inconsistent argument?"); 1265 // However that wouldn't be right, because it's possible that 1266 // a safepoint is indeed in progress as a younger generation 1267 // stop-the-world GC happens even as we mark in this generation. 1268 1269 _restart_for_overflow = false; 1270 force_overflow_conc()->init(); 1271 1272 // _g1h has _n_par_threads 1273 _parallel_marking_threads = calc_parallel_marking_threads(); 1274 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1275 "Maximum number of marking threads exceeded"); 1276 1277 uint active_workers = MAX2(1U, parallel_marking_threads()); 1278 1279 // Parallel task terminator is set in "set_concurrency_and_phase()" 1280 set_concurrency_and_phase(active_workers, true /* concurrent */); 1281 1282 CMConcurrentMarkingTask markingTask(this, cmThread()); 1283 _parallel_workers->set_active_workers(active_workers); 1284 // Don't set _n_par_threads because it affects MT in process_roots() 1285 // and the decisions on that MT processing is made elsewhere. 1286 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1287 _parallel_workers->run_task(&markingTask); 1288 print_stats(); 1289 } 1290 1291 // Helper class to get rid of some boilerplate code. 1292 class G1CMTraceTime : public GCTraceTime { 1293 static bool doit_and_prepend(bool doit) { 1294 if (doit) { 1295 gclog_or_tty->put(' '); 1296 } 1297 return doit; 1298 } 1299 1300 public: 1301 G1CMTraceTime(const char* title, bool doit) 1302 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), 1303 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { 1304 } 1305 }; 1306 1307 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1308 // world is stopped at this checkpoint 1309 assert(SafepointSynchronize::is_at_safepoint(), 1310 "world should be stopped"); 1311 1312 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1313 1314 // If a full collection has happened, we shouldn't do this. 1315 if (has_aborted()) { 1316 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1317 return; 1318 } 1319 1320 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1321 1322 if (VerifyDuringGC) { 1323 HandleMark hm; // handle scope 1324 g1h->prepare_for_verify(); 1325 Universe::verify(VerifyOption_G1UsePrevMarking, 1326 " VerifyDuringGC:(before)"); 1327 } 1328 g1h->check_bitmaps("Remark Start"); 1329 1330 G1CollectorPolicy* g1p = g1h->g1_policy(); 1331 g1p->record_concurrent_mark_remark_start(); 1332 1333 double start = os::elapsedTime(); 1334 1335 checkpointRootsFinalWork(); 1336 1337 double mark_work_end = os::elapsedTime(); 1338 1339 weakRefsWork(clear_all_soft_refs); 1340 1341 if (has_overflown()) { 1342 // Oops. We overflowed. Restart concurrent marking. 1343 _restart_for_overflow = true; 1344 if (G1TraceMarkStackOverflow) { 1345 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1346 } 1347 1348 // Verify the heap w.r.t. the previous marking bitmap. 1349 if (VerifyDuringGC) { 1350 HandleMark hm; // handle scope 1351 g1h->prepare_for_verify(); 1352 Universe::verify(VerifyOption_G1UsePrevMarking, 1353 " VerifyDuringGC:(overflow)"); 1354 } 1355 1356 // Clear the marking state because we will be restarting 1357 // marking due to overflowing the global mark stack. 1358 reset_marking_state(); 1359 } else { 1360 { 1361 G1CMTraceTime trace("GC aggregate-data", G1Log::finer()); 1362 1363 // Aggregate the per-task counting data that we have accumulated 1364 // while marking. 1365 aggregate_count_data(); 1366 } 1367 1368 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1369 // We're done with marking. 1370 // This is the end of the marking cycle, we're expected all 1371 // threads to have SATB queues with active set to true. 1372 satb_mq_set.set_active_all_threads(false, /* new active value */ 1373 true /* expected_active */); 1374 1375 if (VerifyDuringGC) { 1376 HandleMark hm; // handle scope 1377 g1h->prepare_for_verify(); 1378 Universe::verify(VerifyOption_G1UseNextMarking, 1379 " VerifyDuringGC:(after)"); 1380 } 1381 g1h->check_bitmaps("Remark End"); 1382 assert(!restart_for_overflow(), "sanity"); 1383 // Completely reset the marking state since marking completed 1384 set_non_marking_state(); 1385 } 1386 1387 // Expand the marking stack, if we have to and if we can. 1388 if (_markStack.should_expand()) { 1389 _markStack.expand(); 1390 } 1391 1392 // Statistics 1393 double now = os::elapsedTime(); 1394 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1395 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1396 _remark_times.add((now - start) * 1000.0); 1397 1398 g1p->record_concurrent_mark_remark_end(); 1399 1400 G1CMIsAliveClosure is_alive(g1h); 1401 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1402 } 1403 1404 // Base class of the closures that finalize and verify the 1405 // liveness counting data. 1406 class CMCountDataClosureBase: public HeapRegionClosure { 1407 protected: 1408 G1CollectedHeap* _g1h; 1409 ConcurrentMark* _cm; 1410 CardTableModRefBS* _ct_bs; 1411 1412 BitMap* _region_bm; 1413 BitMap* _card_bm; 1414 1415 // Takes a region that's not empty (i.e., it has at least one 1416 // live object in it and sets its corresponding bit on the region 1417 // bitmap to 1. If the region is "starts humongous" it will also set 1418 // to 1 the bits on the region bitmap that correspond to its 1419 // associated "continues humongous" regions. 1420 void set_bit_for_region(HeapRegion* hr) { 1421 assert(!hr->is_continues_humongous(), "should have filtered those out"); 1422 1423 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1424 if (!hr->is_starts_humongous()) { 1425 // Normal (non-humongous) case: just set the bit. 1426 _region_bm->par_at_put(index, true); 1427 } else { 1428 // Starts humongous case: calculate how many regions are part of 1429 // this humongous region and then set the bit range. 1430 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1431 _region_bm->par_at_put_range(index, end_index, true); 1432 } 1433 } 1434 1435 public: 1436 CMCountDataClosureBase(G1CollectedHeap* g1h, 1437 BitMap* region_bm, BitMap* card_bm): 1438 _g1h(g1h), _cm(g1h->concurrent_mark()), 1439 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 1440 _region_bm(region_bm), _card_bm(card_bm) { } 1441 }; 1442 1443 // Closure that calculates the # live objects per region. Used 1444 // for verification purposes during the cleanup pause. 1445 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1446 CMBitMapRO* _bm; 1447 size_t _region_marked_bytes; 1448 1449 public: 1450 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1451 BitMap* region_bm, BitMap* card_bm) : 1452 CMCountDataClosureBase(g1h, region_bm, card_bm), 1453 _bm(bm), _region_marked_bytes(0) { } 1454 1455 bool doHeapRegion(HeapRegion* hr) { 1456 1457 if (hr->is_continues_humongous()) { 1458 // We will ignore these here and process them when their 1459 // associated "starts humongous" region is processed (see 1460 // set_bit_for_heap_region()). Note that we cannot rely on their 1461 // associated "starts humongous" region to have their bit set to 1462 // 1 since, due to the region chunking in the parallel region 1463 // iteration, a "continues humongous" region might be visited 1464 // before its associated "starts humongous". 1465 return false; 1466 } 1467 1468 HeapWord* ntams = hr->next_top_at_mark_start(); 1469 HeapWord* start = hr->bottom(); 1470 1471 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1472 err_msg("Preconditions not met - " 1473 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1474 p2i(start), p2i(ntams), p2i(hr->end()))); 1475 1476 // Find the first marked object at or after "start". 1477 start = _bm->getNextMarkedWordAddress(start, ntams); 1478 1479 size_t marked_bytes = 0; 1480 1481 while (start < ntams) { 1482 oop obj = oop(start); 1483 int obj_sz = obj->size(); 1484 HeapWord* obj_end = start + obj_sz; 1485 1486 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1487 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1488 1489 // Note: if we're looking at the last region in heap - obj_end 1490 // could be actually just beyond the end of the heap; end_idx 1491 // will then correspond to a (non-existent) card that is also 1492 // just beyond the heap. 1493 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1494 // end of object is not card aligned - increment to cover 1495 // all the cards spanned by the object 1496 end_idx += 1; 1497 } 1498 1499 // Set the bits in the card BM for the cards spanned by this object. 1500 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1501 1502 // Add the size of this object to the number of marked bytes. 1503 marked_bytes += (size_t)obj_sz * HeapWordSize; 1504 1505 // Find the next marked object after this one. 1506 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1507 } 1508 1509 // Mark the allocated-since-marking portion... 1510 HeapWord* top = hr->top(); 1511 if (ntams < top) { 1512 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1513 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1514 1515 // Note: if we're looking at the last region in heap - top 1516 // could be actually just beyond the end of the heap; end_idx 1517 // will then correspond to a (non-existent) card that is also 1518 // just beyond the heap. 1519 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1520 // end of object is not card aligned - increment to cover 1521 // all the cards spanned by the object 1522 end_idx += 1; 1523 } 1524 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1525 1526 // This definitely means the region has live objects. 1527 set_bit_for_region(hr); 1528 } 1529 1530 // Update the live region bitmap. 1531 if (marked_bytes > 0) { 1532 set_bit_for_region(hr); 1533 } 1534 1535 // Set the marked bytes for the current region so that 1536 // it can be queried by a calling verification routine 1537 _region_marked_bytes = marked_bytes; 1538 1539 return false; 1540 } 1541 1542 size_t region_marked_bytes() const { return _region_marked_bytes; } 1543 }; 1544 1545 // Heap region closure used for verifying the counting data 1546 // that was accumulated concurrently and aggregated during 1547 // the remark pause. This closure is applied to the heap 1548 // regions during the STW cleanup pause. 1549 1550 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1551 G1CollectedHeap* _g1h; 1552 ConcurrentMark* _cm; 1553 CalcLiveObjectsClosure _calc_cl; 1554 BitMap* _region_bm; // Region BM to be verified 1555 BitMap* _card_bm; // Card BM to be verified 1556 bool _verbose; // verbose output? 1557 1558 BitMap* _exp_region_bm; // Expected Region BM values 1559 BitMap* _exp_card_bm; // Expected card BM values 1560 1561 int _failures; 1562 1563 public: 1564 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1565 BitMap* region_bm, 1566 BitMap* card_bm, 1567 BitMap* exp_region_bm, 1568 BitMap* exp_card_bm, 1569 bool verbose) : 1570 _g1h(g1h), _cm(g1h->concurrent_mark()), 1571 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1572 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1573 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1574 _failures(0) { } 1575 1576 int failures() const { return _failures; } 1577 1578 bool doHeapRegion(HeapRegion* hr) { 1579 if (hr->is_continues_humongous()) { 1580 // We will ignore these here and process them when their 1581 // associated "starts humongous" region is processed (see 1582 // set_bit_for_heap_region()). Note that we cannot rely on their 1583 // associated "starts humongous" region to have their bit set to 1584 // 1 since, due to the region chunking in the parallel region 1585 // iteration, a "continues humongous" region might be visited 1586 // before its associated "starts humongous". 1587 return false; 1588 } 1589 1590 int failures = 0; 1591 1592 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1593 // this region and set the corresponding bits in the expected region 1594 // and card bitmaps. 1595 bool res = _calc_cl.doHeapRegion(hr); 1596 assert(res == false, "should be continuing"); 1597 1598 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1599 Mutex::_no_safepoint_check_flag); 1600 1601 // Verify the marked bytes for this region. 1602 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1603 size_t act_marked_bytes = hr->next_marked_bytes(); 1604 1605 // We're not OK if expected marked bytes > actual marked bytes. It means 1606 // we have missed accounting some objects during the actual marking. 1607 if (exp_marked_bytes > act_marked_bytes) { 1608 if (_verbose) { 1609 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1610 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1611 hr->hrm_index(), exp_marked_bytes, act_marked_bytes); 1612 } 1613 failures += 1; 1614 } 1615 1616 // Verify the bit, for this region, in the actual and expected 1617 // (which was just calculated) region bit maps. 1618 // We're not OK if the bit in the calculated expected region 1619 // bitmap is set and the bit in the actual region bitmap is not. 1620 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1621 1622 bool expected = _exp_region_bm->at(index); 1623 bool actual = _region_bm->at(index); 1624 if (expected && !actual) { 1625 if (_verbose) { 1626 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1627 "expected: %s, actual: %s", 1628 hr->hrm_index(), 1629 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1630 } 1631 failures += 1; 1632 } 1633 1634 // Verify that the card bit maps for the cards spanned by the current 1635 // region match. We have an error if we have a set bit in the expected 1636 // bit map and the corresponding bit in the actual bitmap is not set. 1637 1638 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1639 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1640 1641 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1642 expected = _exp_card_bm->at(i); 1643 actual = _card_bm->at(i); 1644 1645 if (expected && !actual) { 1646 if (_verbose) { 1647 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1648 "expected: %s, actual: %s", 1649 hr->hrm_index(), i, 1650 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1651 } 1652 failures += 1; 1653 } 1654 } 1655 1656 if (failures > 0 && _verbose) { 1657 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1658 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1659 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1660 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1661 } 1662 1663 _failures += failures; 1664 1665 // We could stop iteration over the heap when we 1666 // find the first violating region by returning true. 1667 return false; 1668 } 1669 }; 1670 1671 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1672 protected: 1673 G1CollectedHeap* _g1h; 1674 ConcurrentMark* _cm; 1675 BitMap* _actual_region_bm; 1676 BitMap* _actual_card_bm; 1677 1678 uint _n_workers; 1679 1680 BitMap* _expected_region_bm; 1681 BitMap* _expected_card_bm; 1682 1683 int _failures; 1684 bool _verbose; 1685 1686 HeapRegionClaimer _hrclaimer; 1687 1688 public: 1689 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1690 BitMap* region_bm, BitMap* card_bm, 1691 BitMap* expected_region_bm, BitMap* expected_card_bm) 1692 : AbstractGangTask("G1 verify final counting"), 1693 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1694 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1695 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1696 _failures(0), _verbose(false), 1697 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1698 assert(VerifyDuringGC, "don't call this otherwise"); 1699 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1700 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1701 1702 _verbose = _cm->verbose_medium(); 1703 } 1704 1705 void work(uint worker_id) { 1706 assert(worker_id < _n_workers, "invariant"); 1707 1708 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1709 _actual_region_bm, _actual_card_bm, 1710 _expected_region_bm, 1711 _expected_card_bm, 1712 _verbose); 1713 1714 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1715 1716 Atomic::add(verify_cl.failures(), &_failures); 1717 } 1718 1719 int failures() const { return _failures; } 1720 }; 1721 1722 // Closure that finalizes the liveness counting data. 1723 // Used during the cleanup pause. 1724 // Sets the bits corresponding to the interval [NTAMS, top] 1725 // (which contains the implicitly live objects) in the 1726 // card liveness bitmap. Also sets the bit for each region, 1727 // containing live data, in the region liveness bitmap. 1728 1729 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1730 public: 1731 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1732 BitMap* region_bm, 1733 BitMap* card_bm) : 1734 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1735 1736 bool doHeapRegion(HeapRegion* hr) { 1737 1738 if (hr->is_continues_humongous()) { 1739 // We will ignore these here and process them when their 1740 // associated "starts humongous" region is processed (see 1741 // set_bit_for_heap_region()). Note that we cannot rely on their 1742 // associated "starts humongous" region to have their bit set to 1743 // 1 since, due to the region chunking in the parallel region 1744 // iteration, a "continues humongous" region might be visited 1745 // before its associated "starts humongous". 1746 return false; 1747 } 1748 1749 HeapWord* ntams = hr->next_top_at_mark_start(); 1750 HeapWord* top = hr->top(); 1751 1752 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1753 1754 // Mark the allocated-since-marking portion... 1755 if (ntams < top) { 1756 // This definitely means the region has live objects. 1757 set_bit_for_region(hr); 1758 1759 // Now set the bits in the card bitmap for [ntams, top) 1760 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1761 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1762 1763 // Note: if we're looking at the last region in heap - top 1764 // could be actually just beyond the end of the heap; end_idx 1765 // will then correspond to a (non-existent) card that is also 1766 // just beyond the heap. 1767 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1768 // end of object is not card aligned - increment to cover 1769 // all the cards spanned by the object 1770 end_idx += 1; 1771 } 1772 1773 assert(end_idx <= _card_bm->size(), 1774 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1775 end_idx, _card_bm->size())); 1776 assert(start_idx < _card_bm->size(), 1777 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1778 start_idx, _card_bm->size())); 1779 1780 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1781 } 1782 1783 // Set the bit for the region if it contains live data 1784 if (hr->next_marked_bytes() > 0) { 1785 set_bit_for_region(hr); 1786 } 1787 1788 return false; 1789 } 1790 }; 1791 1792 class G1ParFinalCountTask: public AbstractGangTask { 1793 protected: 1794 G1CollectedHeap* _g1h; 1795 ConcurrentMark* _cm; 1796 BitMap* _actual_region_bm; 1797 BitMap* _actual_card_bm; 1798 1799 uint _n_workers; 1800 HeapRegionClaimer _hrclaimer; 1801 1802 public: 1803 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1804 : AbstractGangTask("G1 final counting"), 1805 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1806 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1807 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1808 } 1809 1810 void work(uint worker_id) { 1811 assert(worker_id < _n_workers, "invariant"); 1812 1813 FinalCountDataUpdateClosure final_update_cl(_g1h, 1814 _actual_region_bm, 1815 _actual_card_bm); 1816 1817 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1818 } 1819 }; 1820 1821 class G1ParNoteEndTask; 1822 1823 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1824 G1CollectedHeap* _g1; 1825 size_t _max_live_bytes; 1826 uint _regions_claimed; 1827 size_t _freed_bytes; 1828 FreeRegionList* _local_cleanup_list; 1829 HeapRegionSetCount _old_regions_removed; 1830 HeapRegionSetCount _humongous_regions_removed; 1831 HRRSCleanupTask* _hrrs_cleanup_task; 1832 double _claimed_region_time; 1833 double _max_region_time; 1834 1835 public: 1836 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1837 FreeRegionList* local_cleanup_list, 1838 HRRSCleanupTask* hrrs_cleanup_task) : 1839 _g1(g1), 1840 _max_live_bytes(0), _regions_claimed(0), 1841 _freed_bytes(0), 1842 _claimed_region_time(0.0), _max_region_time(0.0), 1843 _local_cleanup_list(local_cleanup_list), 1844 _old_regions_removed(), 1845 _humongous_regions_removed(), 1846 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1847 1848 size_t freed_bytes() { return _freed_bytes; } 1849 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1850 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1851 1852 bool doHeapRegion(HeapRegion *hr) { 1853 if (hr->is_continues_humongous()) { 1854 return false; 1855 } 1856 // We use a claim value of zero here because all regions 1857 // were claimed with value 1 in the FinalCount task. 1858 _g1->reset_gc_time_stamps(hr); 1859 double start = os::elapsedTime(); 1860 _regions_claimed++; 1861 hr->note_end_of_marking(); 1862 _max_live_bytes += hr->max_live_bytes(); 1863 1864 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1865 _freed_bytes += hr->used(); 1866 hr->set_containing_set(NULL); 1867 if (hr->is_humongous()) { 1868 assert(hr->is_starts_humongous(), "we should only see starts humongous"); 1869 _humongous_regions_removed.increment(1u, hr->capacity()); 1870 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1871 } else { 1872 _old_regions_removed.increment(1u, hr->capacity()); 1873 _g1->free_region(hr, _local_cleanup_list, true); 1874 } 1875 } else { 1876 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1877 } 1878 1879 double region_time = (os::elapsedTime() - start); 1880 _claimed_region_time += region_time; 1881 if (region_time > _max_region_time) { 1882 _max_region_time = region_time; 1883 } 1884 return false; 1885 } 1886 1887 size_t max_live_bytes() { return _max_live_bytes; } 1888 uint regions_claimed() { return _regions_claimed; } 1889 double claimed_region_time_sec() { return _claimed_region_time; } 1890 double max_region_time_sec() { return _max_region_time; } 1891 }; 1892 1893 class G1ParNoteEndTask: public AbstractGangTask { 1894 friend class G1NoteEndOfConcMarkClosure; 1895 1896 protected: 1897 G1CollectedHeap* _g1h; 1898 size_t _max_live_bytes; 1899 size_t _freed_bytes; 1900 FreeRegionList* _cleanup_list; 1901 HeapRegionClaimer _hrclaimer; 1902 1903 public: 1904 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1905 AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1906 } 1907 1908 void work(uint worker_id) { 1909 FreeRegionList local_cleanup_list("Local Cleanup List"); 1910 HRRSCleanupTask hrrs_cleanup_task; 1911 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1912 &hrrs_cleanup_task); 1913 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1914 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1915 1916 // Now update the lists 1917 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1918 { 1919 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1920 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1921 _max_live_bytes += g1_note_end.max_live_bytes(); 1922 _freed_bytes += g1_note_end.freed_bytes(); 1923 1924 // If we iterate over the global cleanup list at the end of 1925 // cleanup to do this printing we will not guarantee to only 1926 // generate output for the newly-reclaimed regions (the list 1927 // might not be empty at the beginning of cleanup; we might 1928 // still be working on its previous contents). So we do the 1929 // printing here, before we append the new regions to the global 1930 // cleanup list. 1931 1932 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1933 if (hr_printer->is_active()) { 1934 FreeRegionListIterator iter(&local_cleanup_list); 1935 while (iter.more_available()) { 1936 HeapRegion* hr = iter.get_next(); 1937 hr_printer->cleanup(hr); 1938 } 1939 } 1940 1941 _cleanup_list->add_ordered(&local_cleanup_list); 1942 assert(local_cleanup_list.is_empty(), "post-condition"); 1943 1944 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1945 } 1946 } 1947 size_t max_live_bytes() { return _max_live_bytes; } 1948 size_t freed_bytes() { return _freed_bytes; } 1949 }; 1950 1951 class G1ParScrubRemSetTask: public AbstractGangTask { 1952 protected: 1953 G1RemSet* _g1rs; 1954 BitMap* _region_bm; 1955 BitMap* _card_bm; 1956 HeapRegionClaimer _hrclaimer; 1957 1958 public: 1959 G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) : 1960 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) { 1961 } 1962 1963 void work(uint worker_id) { 1964 _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer); 1965 } 1966 1967 }; 1968 1969 void ConcurrentMark::cleanup() { 1970 // world is stopped at this checkpoint 1971 assert(SafepointSynchronize::is_at_safepoint(), 1972 "world should be stopped"); 1973 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1974 1975 // If a full collection has happened, we shouldn't do this. 1976 if (has_aborted()) { 1977 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1978 return; 1979 } 1980 1981 g1h->verify_region_sets_optional(); 1982 1983 if (VerifyDuringGC) { 1984 HandleMark hm; // handle scope 1985 g1h->prepare_for_verify(); 1986 Universe::verify(VerifyOption_G1UsePrevMarking, 1987 " VerifyDuringGC:(before)"); 1988 } 1989 g1h->check_bitmaps("Cleanup Start"); 1990 1991 G1CollectorPolicy* g1p = g1h->g1_policy(); 1992 g1p->record_concurrent_mark_cleanup_start(); 1993 1994 double start = os::elapsedTime(); 1995 1996 HeapRegionRemSet::reset_for_cleanup_tasks(); 1997 1998 uint n_workers; 1999 2000 // Do counting once more with the world stopped for good measure. 2001 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 2002 2003 g1h->set_par_threads(); 2004 n_workers = g1h->n_par_threads(); 2005 assert(g1h->n_par_threads() == n_workers, 2006 "Should not have been reset"); 2007 g1h->workers()->run_task(&g1_par_count_task); 2008 // Done with the parallel phase so reset to 0. 2009 g1h->set_par_threads(0); 2010 2011 if (VerifyDuringGC) { 2012 // Verify that the counting data accumulated during marking matches 2013 // that calculated by walking the marking bitmap. 2014 2015 // Bitmaps to hold expected values 2016 BitMap expected_region_bm(_region_bm.size(), true); 2017 BitMap expected_card_bm(_card_bm.size(), true); 2018 2019 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 2020 &_region_bm, 2021 &_card_bm, 2022 &expected_region_bm, 2023 &expected_card_bm); 2024 2025 g1h->set_par_threads((int)n_workers); 2026 g1h->workers()->run_task(&g1_par_verify_task); 2027 // Done with the parallel phase so reset to 0. 2028 g1h->set_par_threads(0); 2029 2030 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 2031 } 2032 2033 size_t start_used_bytes = g1h->used(); 2034 g1h->set_marking_complete(); 2035 2036 double count_end = os::elapsedTime(); 2037 double this_final_counting_time = (count_end - start); 2038 _total_counting_time += this_final_counting_time; 2039 2040 if (G1PrintRegionLivenessInfo) { 2041 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 2042 _g1h->heap_region_iterate(&cl); 2043 } 2044 2045 // Install newly created mark bitMap as "prev". 2046 swapMarkBitMaps(); 2047 2048 g1h->reset_gc_time_stamp(); 2049 2050 // Note end of marking in all heap regions. 2051 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 2052 g1h->set_par_threads((int)n_workers); 2053 g1h->workers()->run_task(&g1_par_note_end_task); 2054 g1h->set_par_threads(0); 2055 g1h->check_gc_time_stamps(); 2056 2057 if (!cleanup_list_is_empty()) { 2058 // The cleanup list is not empty, so we'll have to process it 2059 // concurrently. Notify anyone else that might be wanting free 2060 // regions that there will be more free regions coming soon. 2061 g1h->set_free_regions_coming(); 2062 } 2063 2064 // call below, since it affects the metric by which we sort the heap 2065 // regions. 2066 if (G1ScrubRemSets) { 2067 double rs_scrub_start = os::elapsedTime(); 2068 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers); 2069 g1h->set_par_threads((int)n_workers); 2070 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2071 g1h->set_par_threads(0); 2072 2073 double rs_scrub_end = os::elapsedTime(); 2074 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2075 _total_rs_scrub_time += this_rs_scrub_time; 2076 } 2077 2078 // this will also free any regions totally full of garbage objects, 2079 // and sort the regions. 2080 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2081 2082 // Statistics. 2083 double end = os::elapsedTime(); 2084 _cleanup_times.add((end - start) * 1000.0); 2085 2086 if (G1Log::fine()) { 2087 g1h->g1_policy()->print_heap_transition(start_used_bytes); 2088 } 2089 2090 // Clean up will have freed any regions completely full of garbage. 2091 // Update the soft reference policy with the new heap occupancy. 2092 Universe::update_heap_info_at_gc(); 2093 2094 if (VerifyDuringGC) { 2095 HandleMark hm; // handle scope 2096 g1h->prepare_for_verify(); 2097 Universe::verify(VerifyOption_G1UsePrevMarking, 2098 " VerifyDuringGC:(after)"); 2099 } 2100 2101 g1h->check_bitmaps("Cleanup End"); 2102 2103 g1h->verify_region_sets_optional(); 2104 2105 // We need to make this be a "collection" so any collection pause that 2106 // races with it goes around and waits for completeCleanup to finish. 2107 g1h->increment_total_collections(); 2108 2109 // Clean out dead classes and update Metaspace sizes. 2110 if (ClassUnloadingWithConcurrentMark) { 2111 ClassLoaderDataGraph::purge(); 2112 } 2113 MetaspaceGC::compute_new_size(); 2114 2115 // We reclaimed old regions so we should calculate the sizes to make 2116 // sure we update the old gen/space data. 2117 g1h->g1mm()->update_sizes(); 2118 g1h->allocation_context_stats().update_after_mark(); 2119 2120 g1h->trace_heap_after_concurrent_cycle(); 2121 } 2122 2123 void ConcurrentMark::completeCleanup() { 2124 if (has_aborted()) return; 2125 2126 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2127 2128 _cleanup_list.verify_optional(); 2129 FreeRegionList tmp_free_list("Tmp Free List"); 2130 2131 if (G1ConcRegionFreeingVerbose) { 2132 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2133 "cleanup list has %u entries", 2134 _cleanup_list.length()); 2135 } 2136 2137 // No one else should be accessing the _cleanup_list at this point, 2138 // so it is not necessary to take any locks 2139 while (!_cleanup_list.is_empty()) { 2140 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 2141 assert(hr != NULL, "Got NULL from a non-empty list"); 2142 hr->par_clear(); 2143 tmp_free_list.add_ordered(hr); 2144 2145 // Instead of adding one region at a time to the secondary_free_list, 2146 // we accumulate them in the local list and move them a few at a 2147 // time. This also cuts down on the number of notify_all() calls 2148 // we do during this process. We'll also append the local list when 2149 // _cleanup_list is empty (which means we just removed the last 2150 // region from the _cleanup_list). 2151 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2152 _cleanup_list.is_empty()) { 2153 if (G1ConcRegionFreeingVerbose) { 2154 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2155 "appending %u entries to the secondary_free_list, " 2156 "cleanup list still has %u entries", 2157 tmp_free_list.length(), 2158 _cleanup_list.length()); 2159 } 2160 2161 { 2162 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2163 g1h->secondary_free_list_add(&tmp_free_list); 2164 SecondaryFreeList_lock->notify_all(); 2165 } 2166 #ifndef PRODUCT 2167 if (G1StressConcRegionFreeing) { 2168 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2169 os::sleep(Thread::current(), (jlong) 1, false); 2170 } 2171 } 2172 #endif 2173 } 2174 } 2175 assert(tmp_free_list.is_empty(), "post-condition"); 2176 } 2177 2178 // Supporting Object and Oop closures for reference discovery 2179 // and processing in during marking 2180 2181 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2182 HeapWord* addr = (HeapWord*)obj; 2183 return addr != NULL && 2184 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2185 } 2186 2187 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2188 // Uses the CMTask associated with a worker thread (for serial reference 2189 // processing the CMTask for worker 0 is used) to preserve (mark) and 2190 // trace referent objects. 2191 // 2192 // Using the CMTask and embedded local queues avoids having the worker 2193 // threads operating on the global mark stack. This reduces the risk 2194 // of overflowing the stack - which we would rather avoid at this late 2195 // state. Also using the tasks' local queues removes the potential 2196 // of the workers interfering with each other that could occur if 2197 // operating on the global stack. 2198 2199 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2200 ConcurrentMark* _cm; 2201 CMTask* _task; 2202 int _ref_counter_limit; 2203 int _ref_counter; 2204 bool _is_serial; 2205 public: 2206 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2207 _cm(cm), _task(task), _is_serial(is_serial), 2208 _ref_counter_limit(G1RefProcDrainInterval) { 2209 assert(_ref_counter_limit > 0, "sanity"); 2210 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2211 _ref_counter = _ref_counter_limit; 2212 } 2213 2214 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2215 virtual void do_oop( oop* p) { do_oop_work(p); } 2216 2217 template <class T> void do_oop_work(T* p) { 2218 if (!_cm->has_overflown()) { 2219 oop obj = oopDesc::load_decode_heap_oop(p); 2220 if (_cm->verbose_high()) { 2221 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2222 "*"PTR_FORMAT" = "PTR_FORMAT, 2223 _task->worker_id(), p2i(p), p2i((void*) obj)); 2224 } 2225 2226 _task->deal_with_reference(obj); 2227 _ref_counter--; 2228 2229 if (_ref_counter == 0) { 2230 // We have dealt with _ref_counter_limit references, pushing them 2231 // and objects reachable from them on to the local stack (and 2232 // possibly the global stack). Call CMTask::do_marking_step() to 2233 // process these entries. 2234 // 2235 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2236 // there's nothing more to do (i.e. we're done with the entries that 2237 // were pushed as a result of the CMTask::deal_with_reference() calls 2238 // above) or we overflow. 2239 // 2240 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2241 // flag while there may still be some work to do. (See the comment at 2242 // the beginning of CMTask::do_marking_step() for those conditions - 2243 // one of which is reaching the specified time target.) It is only 2244 // when CMTask::do_marking_step() returns without setting the 2245 // has_aborted() flag that the marking step has completed. 2246 do { 2247 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2248 _task->do_marking_step(mark_step_duration_ms, 2249 false /* do_termination */, 2250 _is_serial); 2251 } while (_task->has_aborted() && !_cm->has_overflown()); 2252 _ref_counter = _ref_counter_limit; 2253 } 2254 } else { 2255 if (_cm->verbose_high()) { 2256 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2257 } 2258 } 2259 } 2260 }; 2261 2262 // 'Drain' oop closure used by both serial and parallel reference processing. 2263 // Uses the CMTask associated with a given worker thread (for serial 2264 // reference processing the CMtask for worker 0 is used). Calls the 2265 // do_marking_step routine, with an unbelievably large timeout value, 2266 // to drain the marking data structures of the remaining entries 2267 // added by the 'keep alive' oop closure above. 2268 2269 class G1CMDrainMarkingStackClosure: public VoidClosure { 2270 ConcurrentMark* _cm; 2271 CMTask* _task; 2272 bool _is_serial; 2273 public: 2274 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2275 _cm(cm), _task(task), _is_serial(is_serial) { 2276 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2277 } 2278 2279 void do_void() { 2280 do { 2281 if (_cm->verbose_high()) { 2282 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2283 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2284 } 2285 2286 // We call CMTask::do_marking_step() to completely drain the local 2287 // and global marking stacks of entries pushed by the 'keep alive' 2288 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2289 // 2290 // CMTask::do_marking_step() is called in a loop, which we'll exit 2291 // if there's nothing more to do (i.e. we've completely drained the 2292 // entries that were pushed as a a result of applying the 'keep alive' 2293 // closure to the entries on the discovered ref lists) or we overflow 2294 // the global marking stack. 2295 // 2296 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2297 // flag while there may still be some work to do. (See the comment at 2298 // the beginning of CMTask::do_marking_step() for those conditions - 2299 // one of which is reaching the specified time target.) It is only 2300 // when CMTask::do_marking_step() returns without setting the 2301 // has_aborted() flag that the marking step has completed. 2302 2303 _task->do_marking_step(1000000000.0 /* something very large */, 2304 true /* do_termination */, 2305 _is_serial); 2306 } while (_task->has_aborted() && !_cm->has_overflown()); 2307 } 2308 }; 2309 2310 // Implementation of AbstractRefProcTaskExecutor for parallel 2311 // reference processing at the end of G1 concurrent marking 2312 2313 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2314 private: 2315 G1CollectedHeap* _g1h; 2316 ConcurrentMark* _cm; 2317 WorkGang* _workers; 2318 uint _active_workers; 2319 2320 public: 2321 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2322 ConcurrentMark* cm, 2323 WorkGang* workers, 2324 uint n_workers) : 2325 _g1h(g1h), _cm(cm), 2326 _workers(workers), _active_workers(n_workers) { } 2327 2328 // Executes the given task using concurrent marking worker threads. 2329 virtual void execute(ProcessTask& task); 2330 virtual void execute(EnqueueTask& task); 2331 }; 2332 2333 class G1CMRefProcTaskProxy: public AbstractGangTask { 2334 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2335 ProcessTask& _proc_task; 2336 G1CollectedHeap* _g1h; 2337 ConcurrentMark* _cm; 2338 2339 public: 2340 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2341 G1CollectedHeap* g1h, 2342 ConcurrentMark* cm) : 2343 AbstractGangTask("Process reference objects in parallel"), 2344 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2345 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2346 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2347 } 2348 2349 virtual void work(uint worker_id) { 2350 ResourceMark rm; 2351 HandleMark hm; 2352 CMTask* task = _cm->task(worker_id); 2353 G1CMIsAliveClosure g1_is_alive(_g1h); 2354 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2355 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2356 2357 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2358 } 2359 }; 2360 2361 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2362 assert(_workers != NULL, "Need parallel worker threads."); 2363 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2364 2365 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2366 2367 // We need to reset the concurrency level before each 2368 // proxy task execution, so that the termination protocol 2369 // and overflow handling in CMTask::do_marking_step() knows 2370 // how many workers to wait for. 2371 _cm->set_concurrency(_active_workers); 2372 _g1h->set_par_threads(_active_workers); 2373 _workers->run_task(&proc_task_proxy); 2374 _g1h->set_par_threads(0); 2375 } 2376 2377 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2378 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2379 EnqueueTask& _enq_task; 2380 2381 public: 2382 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2383 AbstractGangTask("Enqueue reference objects in parallel"), 2384 _enq_task(enq_task) { } 2385 2386 virtual void work(uint worker_id) { 2387 _enq_task.work(worker_id); 2388 } 2389 }; 2390 2391 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2392 assert(_workers != NULL, "Need parallel worker threads."); 2393 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2394 2395 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2396 2397 // Not strictly necessary but... 2398 // 2399 // We need to reset the concurrency level before each 2400 // proxy task execution, so that the termination protocol 2401 // and overflow handling in CMTask::do_marking_step() knows 2402 // how many workers to wait for. 2403 _cm->set_concurrency(_active_workers); 2404 _g1h->set_par_threads(_active_workers); 2405 _workers->run_task(&enq_task_proxy); 2406 _g1h->set_par_threads(0); 2407 } 2408 2409 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2410 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2411 } 2412 2413 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2414 if (has_overflown()) { 2415 // Skip processing the discovered references if we have 2416 // overflown the global marking stack. Reference objects 2417 // only get discovered once so it is OK to not 2418 // de-populate the discovered reference lists. We could have, 2419 // but the only benefit would be that, when marking restarts, 2420 // less reference objects are discovered. 2421 return; 2422 } 2423 2424 ResourceMark rm; 2425 HandleMark hm; 2426 2427 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2428 2429 // Is alive closure. 2430 G1CMIsAliveClosure g1_is_alive(g1h); 2431 2432 // Inner scope to exclude the cleaning of the string and symbol 2433 // tables from the displayed time. 2434 { 2435 G1CMTraceTime t("GC ref-proc", G1Log::finer()); 2436 2437 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2438 2439 // See the comment in G1CollectedHeap::ref_processing_init() 2440 // about how reference processing currently works in G1. 2441 2442 // Set the soft reference policy 2443 rp->setup_policy(clear_all_soft_refs); 2444 assert(_markStack.isEmpty(), "mark stack should be empty"); 2445 2446 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2447 // in serial reference processing. Note these closures are also 2448 // used for serially processing (by the the current thread) the 2449 // JNI references during parallel reference processing. 2450 // 2451 // These closures do not need to synchronize with the worker 2452 // threads involved in parallel reference processing as these 2453 // instances are executed serially by the current thread (e.g. 2454 // reference processing is not multi-threaded and is thus 2455 // performed by the current thread instead of a gang worker). 2456 // 2457 // The gang tasks involved in parallel reference processing create 2458 // their own instances of these closures, which do their own 2459 // synchronization among themselves. 2460 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2461 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2462 2463 // We need at least one active thread. If reference processing 2464 // is not multi-threaded we use the current (VMThread) thread, 2465 // otherwise we use the work gang from the G1CollectedHeap and 2466 // we utilize all the worker threads we can. 2467 bool processing_is_mt = rp->processing_is_mt(); 2468 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2469 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2470 2471 // Parallel processing task executor. 2472 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2473 g1h->workers(), active_workers); 2474 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2475 2476 // Set the concurrency level. The phase was already set prior to 2477 // executing the remark task. 2478 set_concurrency(active_workers); 2479 2480 // Set the degree of MT processing here. If the discovery was done MT, 2481 // the number of threads involved during discovery could differ from 2482 // the number of active workers. This is OK as long as the discovered 2483 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2484 rp->set_active_mt_degree(active_workers); 2485 2486 // Process the weak references. 2487 const ReferenceProcessorStats& stats = 2488 rp->process_discovered_references(&g1_is_alive, 2489 &g1_keep_alive, 2490 &g1_drain_mark_stack, 2491 executor, 2492 g1h->gc_timer_cm(), 2493 concurrent_gc_id()); 2494 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2495 2496 // The do_oop work routines of the keep_alive and drain_marking_stack 2497 // oop closures will set the has_overflown flag if we overflow the 2498 // global marking stack. 2499 2500 assert(_markStack.overflow() || _markStack.isEmpty(), 2501 "mark stack should be empty (unless it overflowed)"); 2502 2503 if (_markStack.overflow()) { 2504 // This should have been done already when we tried to push an 2505 // entry on to the global mark stack. But let's do it again. 2506 set_has_overflown(); 2507 } 2508 2509 assert(rp->num_q() == active_workers, "why not"); 2510 2511 rp->enqueue_discovered_references(executor); 2512 2513 rp->verify_no_references_recorded(); 2514 assert(!rp->discovery_enabled(), "Post condition"); 2515 } 2516 2517 if (has_overflown()) { 2518 // We can not trust g1_is_alive if the marking stack overflowed 2519 return; 2520 } 2521 2522 assert(_markStack.isEmpty(), "Marking should have completed"); 2523 2524 // Unload Klasses, String, Symbols, Code Cache, etc. 2525 { 2526 G1CMTraceTime trace("Unloading", G1Log::finer()); 2527 2528 if (ClassUnloadingWithConcurrentMark) { 2529 bool purged_classes; 2530 2531 { 2532 G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest()); 2533 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2534 } 2535 2536 { 2537 G1CMTraceTime trace("Parallel Unloading", G1Log::finest()); 2538 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2539 } 2540 } 2541 2542 if (G1StringDedup::is_enabled()) { 2543 G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest()); 2544 G1StringDedup::unlink(&g1_is_alive); 2545 } 2546 } 2547 } 2548 2549 void ConcurrentMark::swapMarkBitMaps() { 2550 CMBitMapRO* temp = _prevMarkBitMap; 2551 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2552 _nextMarkBitMap = (CMBitMap*) temp; 2553 } 2554 2555 // Closure for marking entries in SATB buffers. 2556 class CMSATBBufferClosure : public SATBBufferClosure { 2557 private: 2558 CMTask* _task; 2559 G1CollectedHeap* _g1h; 2560 2561 // This is very similar to CMTask::deal_with_reference, but with 2562 // more relaxed requirements for the argument, so this must be more 2563 // circumspect about treating the argument as an object. 2564 void do_entry(void* entry) const { 2565 _task->increment_refs_reached(); 2566 HeapRegion* hr = _g1h->heap_region_containing_raw(entry); 2567 if (entry < hr->next_top_at_mark_start()) { 2568 // Until we get here, we don't know whether entry refers to a valid 2569 // object; it could instead have been a stale reference. 2570 oop obj = static_cast<oop>(entry); 2571 assert(obj->is_oop(true /* ignore mark word */), 2572 err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj))); 2573 _task->make_reference_grey(obj, hr); 2574 } 2575 } 2576 2577 public: 2578 CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h) 2579 : _task(task), _g1h(g1h) { } 2580 2581 virtual void do_buffer(void** buffer, size_t size) { 2582 for (size_t i = 0; i < size; ++i) { 2583 do_entry(buffer[i]); 2584 } 2585 } 2586 }; 2587 2588 class G1RemarkThreadsClosure : public ThreadClosure { 2589 CMSATBBufferClosure _cm_satb_cl; 2590 G1CMOopClosure _cm_cl; 2591 MarkingCodeBlobClosure _code_cl; 2592 int _thread_parity; 2593 2594 public: 2595 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) : 2596 _cm_satb_cl(task, g1h), 2597 _cm_cl(g1h, g1h->concurrent_mark(), task), 2598 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2599 _thread_parity(Threads::thread_claim_parity()) {} 2600 2601 void do_thread(Thread* thread) { 2602 if (thread->is_Java_thread()) { 2603 if (thread->claim_oops_do(true, _thread_parity)) { 2604 JavaThread* jt = (JavaThread*)thread; 2605 2606 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2607 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2608 // * Alive if on the stack of an executing method 2609 // * Weakly reachable otherwise 2610 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2611 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2612 jt->nmethods_do(&_code_cl); 2613 2614 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 2615 } 2616 } else if (thread->is_VM_thread()) { 2617 if (thread->claim_oops_do(true, _thread_parity)) { 2618 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 2619 } 2620 } 2621 } 2622 }; 2623 2624 class CMRemarkTask: public AbstractGangTask { 2625 private: 2626 ConcurrentMark* _cm; 2627 public: 2628 void work(uint worker_id) { 2629 // Since all available tasks are actually started, we should 2630 // only proceed if we're supposed to be active. 2631 if (worker_id < _cm->active_tasks()) { 2632 CMTask* task = _cm->task(worker_id); 2633 task->record_start_time(); 2634 { 2635 ResourceMark rm; 2636 HandleMark hm; 2637 2638 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2639 Threads::threads_do(&threads_f); 2640 } 2641 2642 do { 2643 task->do_marking_step(1000000000.0 /* something very large */, 2644 true /* do_termination */, 2645 false /* is_serial */); 2646 } while (task->has_aborted() && !_cm->has_overflown()); 2647 // If we overflow, then we do not want to restart. We instead 2648 // want to abort remark and do concurrent marking again. 2649 task->record_end_time(); 2650 } 2651 } 2652 2653 CMRemarkTask(ConcurrentMark* cm, uint active_workers) : 2654 AbstractGangTask("Par Remark"), _cm(cm) { 2655 _cm->terminator()->reset_for_reuse(active_workers); 2656 } 2657 }; 2658 2659 void ConcurrentMark::checkpointRootsFinalWork() { 2660 ResourceMark rm; 2661 HandleMark hm; 2662 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2663 2664 G1CMTraceTime trace("Finalize Marking", G1Log::finer()); 2665 2666 g1h->ensure_parsability(false); 2667 2668 StrongRootsScope srs; 2669 // this is remark, so we'll use up all active threads 2670 uint active_workers = g1h->workers()->active_workers(); 2671 if (active_workers == 0) { 2672 assert(active_workers > 0, "Should have been set earlier"); 2673 active_workers = (uint) ParallelGCThreads; 2674 g1h->workers()->set_active_workers(active_workers); 2675 } 2676 set_concurrency_and_phase(active_workers, false /* concurrent */); 2677 // Leave _parallel_marking_threads at it's 2678 // value originally calculated in the ConcurrentMark 2679 // constructor and pass values of the active workers 2680 // through the gang in the task. 2681 2682 CMRemarkTask remarkTask(this, active_workers); 2683 // We will start all available threads, even if we decide that the 2684 // active_workers will be fewer. The extra ones will just bail out 2685 // immediately. 2686 g1h->set_par_threads(active_workers); 2687 g1h->workers()->run_task(&remarkTask); 2688 g1h->set_par_threads(0); 2689 2690 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2691 guarantee(has_overflown() || 2692 satb_mq_set.completed_buffers_num() == 0, 2693 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2694 BOOL_TO_STR(has_overflown()), 2695 satb_mq_set.completed_buffers_num())); 2696 2697 print_stats(); 2698 } 2699 2700 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2701 // Note we are overriding the read-only view of the prev map here, via 2702 // the cast. 2703 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2704 } 2705 2706 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2707 _nextMarkBitMap->clearRange(mr); 2708 } 2709 2710 HeapRegion* 2711 ConcurrentMark::claim_region(uint worker_id) { 2712 // "checkpoint" the finger 2713 HeapWord* finger = _finger; 2714 2715 // _heap_end will not change underneath our feet; it only changes at 2716 // yield points. 2717 while (finger < _heap_end) { 2718 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2719 2720 // Note on how this code handles humongous regions. In the 2721 // normal case the finger will reach the start of a "starts 2722 // humongous" (SH) region. Its end will either be the end of the 2723 // last "continues humongous" (CH) region in the sequence, or the 2724 // standard end of the SH region (if the SH is the only region in 2725 // the sequence). That way claim_region() will skip over the CH 2726 // regions. However, there is a subtle race between a CM thread 2727 // executing this method and a mutator thread doing a humongous 2728 // object allocation. The two are not mutually exclusive as the CM 2729 // thread does not need to hold the Heap_lock when it gets 2730 // here. So there is a chance that claim_region() will come across 2731 // a free region that's in the progress of becoming a SH or a CH 2732 // region. In the former case, it will either 2733 // a) Miss the update to the region's end, in which case it will 2734 // visit every subsequent CH region, will find their bitmaps 2735 // empty, and do nothing, or 2736 // b) Will observe the update of the region's end (in which case 2737 // it will skip the subsequent CH regions). 2738 // If it comes across a region that suddenly becomes CH, the 2739 // scenario will be similar to b). So, the race between 2740 // claim_region() and a humongous object allocation might force us 2741 // to do a bit of unnecessary work (due to some unnecessary bitmap 2742 // iterations) but it should not introduce and correctness issues. 2743 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2744 2745 // Above heap_region_containing_raw may return NULL as we always scan claim 2746 // until the end of the heap. In this case, just jump to the next region. 2747 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2748 2749 // Is the gap between reading the finger and doing the CAS too long? 2750 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2751 if (res == finger && curr_region != NULL) { 2752 // we succeeded 2753 HeapWord* bottom = curr_region->bottom(); 2754 HeapWord* limit = curr_region->next_top_at_mark_start(); 2755 2756 if (verbose_low()) { 2757 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2758 "["PTR_FORMAT", "PTR_FORMAT"), " 2759 "limit = "PTR_FORMAT, 2760 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2761 } 2762 2763 // notice that _finger == end cannot be guaranteed here since, 2764 // someone else might have moved the finger even further 2765 assert(_finger >= end, "the finger should have moved forward"); 2766 2767 if (verbose_low()) { 2768 gclog_or_tty->print_cr("[%u] we were successful with region = " 2769 PTR_FORMAT, worker_id, p2i(curr_region)); 2770 } 2771 2772 if (limit > bottom) { 2773 if (verbose_low()) { 2774 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 2775 "returning it ", worker_id, p2i(curr_region)); 2776 } 2777 return curr_region; 2778 } else { 2779 assert(limit == bottom, 2780 "the region limit should be at bottom"); 2781 if (verbose_low()) { 2782 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 2783 "returning NULL", worker_id, p2i(curr_region)); 2784 } 2785 // we return NULL and the caller should try calling 2786 // claim_region() again. 2787 return NULL; 2788 } 2789 } else { 2790 assert(_finger > finger, "the finger should have moved forward"); 2791 if (verbose_low()) { 2792 if (curr_region == NULL) { 2793 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " 2794 "global finger = "PTR_FORMAT", " 2795 "our finger = "PTR_FORMAT, 2796 worker_id, p2i(_finger), p2i(finger)); 2797 } else { 2798 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2799 "global finger = "PTR_FORMAT", " 2800 "our finger = "PTR_FORMAT, 2801 worker_id, p2i(_finger), p2i(finger)); 2802 } 2803 } 2804 2805 // read it again 2806 finger = _finger; 2807 } 2808 } 2809 2810 return NULL; 2811 } 2812 2813 #ifndef PRODUCT 2814 enum VerifyNoCSetOopsPhase { 2815 VerifyNoCSetOopsStack, 2816 VerifyNoCSetOopsQueues 2817 }; 2818 2819 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 2820 private: 2821 G1CollectedHeap* _g1h; 2822 VerifyNoCSetOopsPhase _phase; 2823 int _info; 2824 2825 const char* phase_str() { 2826 switch (_phase) { 2827 case VerifyNoCSetOopsStack: return "Stack"; 2828 case VerifyNoCSetOopsQueues: return "Queue"; 2829 default: ShouldNotReachHere(); 2830 } 2831 return NULL; 2832 } 2833 2834 void do_object_work(oop obj) { 2835 guarantee(!_g1h->obj_in_cs(obj), 2836 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 2837 p2i((void*) obj), phase_str(), _info)); 2838 } 2839 2840 public: 2841 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 2842 2843 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 2844 _phase = phase; 2845 _info = info; 2846 } 2847 2848 virtual void do_oop(oop* p) { 2849 oop obj = oopDesc::load_decode_heap_oop(p); 2850 do_object_work(obj); 2851 } 2852 2853 virtual void do_oop(narrowOop* p) { 2854 // We should not come across narrow oops while scanning marking 2855 // stacks 2856 ShouldNotReachHere(); 2857 } 2858 2859 virtual void do_object(oop obj) { 2860 do_object_work(obj); 2861 } 2862 }; 2863 2864 void ConcurrentMark::verify_no_cset_oops() { 2865 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2866 if (!G1CollectedHeap::heap()->mark_in_progress()) { 2867 return; 2868 } 2869 2870 VerifyNoCSetOopsClosure cl; 2871 2872 // Verify entries on the global mark stack 2873 cl.set_phase(VerifyNoCSetOopsStack); 2874 _markStack.oops_do(&cl); 2875 2876 // Verify entries on the task queues 2877 for (uint i = 0; i < _max_worker_id; i += 1) { 2878 cl.set_phase(VerifyNoCSetOopsQueues, i); 2879 CMTaskQueue* queue = _task_queues->queue(i); 2880 queue->oops_do(&cl); 2881 } 2882 2883 // Verify the global finger 2884 HeapWord* global_finger = finger(); 2885 if (global_finger != NULL && global_finger < _heap_end) { 2886 // The global finger always points to a heap region boundary. We 2887 // use heap_region_containing_raw() to get the containing region 2888 // given that the global finger could be pointing to a free region 2889 // which subsequently becomes continues humongous. If that 2890 // happens, heap_region_containing() will return the bottom of the 2891 // corresponding starts humongous region and the check below will 2892 // not hold any more. 2893 // Since we always iterate over all regions, we might get a NULL HeapRegion 2894 // here. 2895 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 2896 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2897 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 2898 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 2899 } 2900 2901 // Verify the task fingers 2902 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2903 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 2904 CMTask* task = _tasks[i]; 2905 HeapWord* task_finger = task->finger(); 2906 if (task_finger != NULL && task_finger < _heap_end) { 2907 // See above note on the global finger verification. 2908 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 2909 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2910 !task_hr->in_collection_set(), 2911 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 2912 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 2913 } 2914 } 2915 } 2916 #endif // PRODUCT 2917 2918 // Aggregate the counting data that was constructed concurrently 2919 // with marking. 2920 class AggregateCountDataHRClosure: public HeapRegionClosure { 2921 G1CollectedHeap* _g1h; 2922 ConcurrentMark* _cm; 2923 CardTableModRefBS* _ct_bs; 2924 BitMap* _cm_card_bm; 2925 uint _max_worker_id; 2926 2927 public: 2928 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 2929 BitMap* cm_card_bm, 2930 uint max_worker_id) : 2931 _g1h(g1h), _cm(g1h->concurrent_mark()), 2932 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 2933 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 2934 2935 bool doHeapRegion(HeapRegion* hr) { 2936 if (hr->is_continues_humongous()) { 2937 // We will ignore these here and process them when their 2938 // associated "starts humongous" region is processed. 2939 // Note that we cannot rely on their associated 2940 // "starts humongous" region to have their bit set to 1 2941 // since, due to the region chunking in the parallel region 2942 // iteration, a "continues humongous" region might be visited 2943 // before its associated "starts humongous". 2944 return false; 2945 } 2946 2947 HeapWord* start = hr->bottom(); 2948 HeapWord* limit = hr->next_top_at_mark_start(); 2949 HeapWord* end = hr->end(); 2950 2951 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 2952 err_msg("Preconditions not met - " 2953 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 2954 "top: "PTR_FORMAT", end: "PTR_FORMAT, 2955 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 2956 2957 assert(hr->next_marked_bytes() == 0, "Precondition"); 2958 2959 if (start == limit) { 2960 // NTAMS of this region has not been set so nothing to do. 2961 return false; 2962 } 2963 2964 // 'start' should be in the heap. 2965 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 2966 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 2967 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 2968 2969 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 2970 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 2971 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 2972 2973 // If ntams is not card aligned then we bump card bitmap index 2974 // for limit so that we get the all the cards spanned by 2975 // the object ending at ntams. 2976 // Note: if this is the last region in the heap then ntams 2977 // could be actually just beyond the end of the the heap; 2978 // limit_idx will then correspond to a (non-existent) card 2979 // that is also outside the heap. 2980 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 2981 limit_idx += 1; 2982 } 2983 2984 assert(limit_idx <= end_idx, "or else use atomics"); 2985 2986 // Aggregate the "stripe" in the count data associated with hr. 2987 uint hrm_index = hr->hrm_index(); 2988 size_t marked_bytes = 0; 2989 2990 for (uint i = 0; i < _max_worker_id; i += 1) { 2991 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 2992 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 2993 2994 // Fetch the marked_bytes in this region for task i and 2995 // add it to the running total for this region. 2996 marked_bytes += marked_bytes_array[hrm_index]; 2997 2998 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 2999 // into the global card bitmap. 3000 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 3001 3002 while (scan_idx < limit_idx) { 3003 assert(task_card_bm->at(scan_idx) == true, "should be"); 3004 _cm_card_bm->set_bit(scan_idx); 3005 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 3006 3007 // BitMap::get_next_one_offset() can handle the case when 3008 // its left_offset parameter is greater than its right_offset 3009 // parameter. It does, however, have an early exit if 3010 // left_offset == right_offset. So let's limit the value 3011 // passed in for left offset here. 3012 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 3013 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 3014 } 3015 } 3016 3017 // Update the marked bytes for this region. 3018 hr->add_to_marked_bytes(marked_bytes); 3019 3020 // Next heap region 3021 return false; 3022 } 3023 }; 3024 3025 class G1AggregateCountDataTask: public AbstractGangTask { 3026 protected: 3027 G1CollectedHeap* _g1h; 3028 ConcurrentMark* _cm; 3029 BitMap* _cm_card_bm; 3030 uint _max_worker_id; 3031 uint _active_workers; 3032 HeapRegionClaimer _hrclaimer; 3033 3034 public: 3035 G1AggregateCountDataTask(G1CollectedHeap* g1h, 3036 ConcurrentMark* cm, 3037 BitMap* cm_card_bm, 3038 uint max_worker_id, 3039 uint n_workers) : 3040 AbstractGangTask("Count Aggregation"), 3041 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 3042 _max_worker_id(max_worker_id), 3043 _active_workers(n_workers), 3044 _hrclaimer(_active_workers) { 3045 } 3046 3047 void work(uint worker_id) { 3048 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 3049 3050 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 3051 } 3052 }; 3053 3054 3055 void ConcurrentMark::aggregate_count_data() { 3056 uint n_workers = _g1h->workers()->active_workers(); 3057 3058 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 3059 _max_worker_id, n_workers); 3060 3061 _g1h->set_par_threads(n_workers); 3062 _g1h->workers()->run_task(&g1_par_agg_task); 3063 _g1h->set_par_threads(0); 3064 } 3065 3066 // Clear the per-worker arrays used to store the per-region counting data 3067 void ConcurrentMark::clear_all_count_data() { 3068 // Clear the global card bitmap - it will be filled during 3069 // liveness count aggregation (during remark) and the 3070 // final counting task. 3071 _card_bm.clear(); 3072 3073 // Clear the global region bitmap - it will be filled as part 3074 // of the final counting task. 3075 _region_bm.clear(); 3076 3077 uint max_regions = _g1h->max_regions(); 3078 assert(_max_worker_id > 0, "uninitialized"); 3079 3080 for (uint i = 0; i < _max_worker_id; i += 1) { 3081 BitMap* task_card_bm = count_card_bitmap_for(i); 3082 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3083 3084 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3085 assert(marked_bytes_array != NULL, "uninitialized"); 3086 3087 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3088 task_card_bm->clear(); 3089 } 3090 } 3091 3092 void ConcurrentMark::print_stats() { 3093 if (verbose_stats()) { 3094 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3095 for (size_t i = 0; i < _active_tasks; ++i) { 3096 _tasks[i]->print_stats(); 3097 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3098 } 3099 } 3100 } 3101 3102 // abandon current marking iteration due to a Full GC 3103 void ConcurrentMark::abort() { 3104 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 3105 // concurrent bitmap clearing. 3106 _nextMarkBitMap->clearAll(); 3107 3108 // Note we cannot clear the previous marking bitmap here 3109 // since VerifyDuringGC verifies the objects marked during 3110 // a full GC against the previous bitmap. 3111 3112 // Clear the liveness counting data 3113 clear_all_count_data(); 3114 // Empty mark stack 3115 reset_marking_state(); 3116 for (uint i = 0; i < _max_worker_id; ++i) { 3117 _tasks[i]->clear_region_fields(); 3118 } 3119 _first_overflow_barrier_sync.abort(); 3120 _second_overflow_barrier_sync.abort(); 3121 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); 3122 if (!gc_id.is_undefined()) { 3123 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance 3124 // to detect that it was aborted. Only keep track of the first GC id that we aborted. 3125 _aborted_gc_id = gc_id; 3126 } 3127 _has_aborted = true; 3128 3129 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3130 satb_mq_set.abandon_partial_marking(); 3131 // This can be called either during or outside marking, we'll read 3132 // the expected_active value from the SATB queue set. 3133 satb_mq_set.set_active_all_threads( 3134 false, /* new active value */ 3135 satb_mq_set.is_active() /* expected_active */); 3136 3137 _g1h->trace_heap_after_concurrent_cycle(); 3138 _g1h->register_concurrent_cycle_end(); 3139 } 3140 3141 const GCId& ConcurrentMark::concurrent_gc_id() { 3142 if (has_aborted()) { 3143 return _aborted_gc_id; 3144 } 3145 return _g1h->gc_tracer_cm()->gc_id(); 3146 } 3147 3148 static void print_ms_time_info(const char* prefix, const char* name, 3149 NumberSeq& ns) { 3150 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3151 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3152 if (ns.num() > 0) { 3153 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3154 prefix, ns.sd(), ns.maximum()); 3155 } 3156 } 3157 3158 void ConcurrentMark::print_summary_info() { 3159 gclog_or_tty->print_cr(" Concurrent marking:"); 3160 print_ms_time_info(" ", "init marks", _init_times); 3161 print_ms_time_info(" ", "remarks", _remark_times); 3162 { 3163 print_ms_time_info(" ", "final marks", _remark_mark_times); 3164 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3165 3166 } 3167 print_ms_time_info(" ", "cleanups", _cleanup_times); 3168 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3169 _total_counting_time, 3170 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3171 (double)_cleanup_times.num() 3172 : 0.0)); 3173 if (G1ScrubRemSets) { 3174 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3175 _total_rs_scrub_time, 3176 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3177 (double)_cleanup_times.num() 3178 : 0.0)); 3179 } 3180 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3181 (_init_times.sum() + _remark_times.sum() + 3182 _cleanup_times.sum())/1000.0); 3183 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3184 "(%8.2f s marking).", 3185 cmThread()->vtime_accum(), 3186 cmThread()->vtime_mark_accum()); 3187 } 3188 3189 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3190 _parallel_workers->print_worker_threads_on(st); 3191 } 3192 3193 void ConcurrentMark::print_on_error(outputStream* st) const { 3194 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3195 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3196 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3197 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3198 } 3199 3200 // We take a break if someone is trying to stop the world. 3201 bool ConcurrentMark::do_yield_check(uint worker_id) { 3202 if (SuspendibleThreadSet::should_yield()) { 3203 if (worker_id == 0) { 3204 _g1h->g1_policy()->record_concurrent_pause(); 3205 } 3206 SuspendibleThreadSet::yield(); 3207 return true; 3208 } else { 3209 return false; 3210 } 3211 } 3212 3213 #ifndef PRODUCT 3214 // for debugging purposes 3215 void ConcurrentMark::print_finger() { 3216 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3217 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3218 for (uint i = 0; i < _max_worker_id; ++i) { 3219 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3220 } 3221 gclog_or_tty->cr(); 3222 } 3223 #endif 3224 3225 template<bool scan> 3226 inline void CMTask::process_grey_object(oop obj) { 3227 assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray"); 3228 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3229 3230 if (_cm->verbose_high()) { 3231 gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT, 3232 _worker_id, p2i((void*) obj)); 3233 } 3234 3235 size_t obj_size = obj->size(); 3236 _words_scanned += obj_size; 3237 3238 if (scan) { 3239 obj->oop_iterate(_cm_oop_closure); 3240 } 3241 statsOnly( ++_objs_scanned ); 3242 check_limits(); 3243 } 3244 3245 template void CMTask::process_grey_object<true>(oop); 3246 template void CMTask::process_grey_object<false>(oop); 3247 3248 // Closure for iteration over bitmaps 3249 class CMBitMapClosure : public BitMapClosure { 3250 private: 3251 // the bitmap that is being iterated over 3252 CMBitMap* _nextMarkBitMap; 3253 ConcurrentMark* _cm; 3254 CMTask* _task; 3255 3256 public: 3257 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3258 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3259 3260 bool do_bit(size_t offset) { 3261 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3262 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3263 assert( addr < _cm->finger(), "invariant"); 3264 3265 statsOnly( _task->increase_objs_found_on_bitmap() ); 3266 assert(addr >= _task->finger(), "invariant"); 3267 3268 // We move that task's local finger along. 3269 _task->move_finger_to(addr); 3270 3271 _task->scan_object(oop(addr)); 3272 // we only partially drain the local queue and global stack 3273 _task->drain_local_queue(true); 3274 _task->drain_global_stack(true); 3275 3276 // if the has_aborted flag has been raised, we need to bail out of 3277 // the iteration 3278 return !_task->has_aborted(); 3279 } 3280 }; 3281 3282 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3283 ConcurrentMark* cm, 3284 CMTask* task) 3285 : _g1h(g1h), _cm(cm), _task(task) { 3286 assert(_ref_processor == NULL, "should be initialized to NULL"); 3287 3288 if (G1UseConcMarkReferenceProcessing) { 3289 _ref_processor = g1h->ref_processor_cm(); 3290 assert(_ref_processor != NULL, "should not be NULL"); 3291 } 3292 } 3293 3294 void CMTask::setup_for_region(HeapRegion* hr) { 3295 assert(hr != NULL, 3296 "claim_region() should have filtered out NULL regions"); 3297 assert(!hr->is_continues_humongous(), 3298 "claim_region() should have filtered out continues humongous regions"); 3299 3300 if (_cm->verbose_low()) { 3301 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3302 _worker_id, p2i(hr)); 3303 } 3304 3305 _curr_region = hr; 3306 _finger = hr->bottom(); 3307 update_region_limit(); 3308 } 3309 3310 void CMTask::update_region_limit() { 3311 HeapRegion* hr = _curr_region; 3312 HeapWord* bottom = hr->bottom(); 3313 HeapWord* limit = hr->next_top_at_mark_start(); 3314 3315 if (limit == bottom) { 3316 if (_cm->verbose_low()) { 3317 gclog_or_tty->print_cr("[%u] found an empty region " 3318 "["PTR_FORMAT", "PTR_FORMAT")", 3319 _worker_id, p2i(bottom), p2i(limit)); 3320 } 3321 // The region was collected underneath our feet. 3322 // We set the finger to bottom to ensure that the bitmap 3323 // iteration that will follow this will not do anything. 3324 // (this is not a condition that holds when we set the region up, 3325 // as the region is not supposed to be empty in the first place) 3326 _finger = bottom; 3327 } else if (limit >= _region_limit) { 3328 assert(limit >= _finger, "peace of mind"); 3329 } else { 3330 assert(limit < _region_limit, "only way to get here"); 3331 // This can happen under some pretty unusual circumstances. An 3332 // evacuation pause empties the region underneath our feet (NTAMS 3333 // at bottom). We then do some allocation in the region (NTAMS 3334 // stays at bottom), followed by the region being used as a GC 3335 // alloc region (NTAMS will move to top() and the objects 3336 // originally below it will be grayed). All objects now marked in 3337 // the region are explicitly grayed, if below the global finger, 3338 // and we do not need in fact to scan anything else. So, we simply 3339 // set _finger to be limit to ensure that the bitmap iteration 3340 // doesn't do anything. 3341 _finger = limit; 3342 } 3343 3344 _region_limit = limit; 3345 } 3346 3347 void CMTask::giveup_current_region() { 3348 assert(_curr_region != NULL, "invariant"); 3349 if (_cm->verbose_low()) { 3350 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3351 _worker_id, p2i(_curr_region)); 3352 } 3353 clear_region_fields(); 3354 } 3355 3356 void CMTask::clear_region_fields() { 3357 // Values for these three fields that indicate that we're not 3358 // holding on to a region. 3359 _curr_region = NULL; 3360 _finger = NULL; 3361 _region_limit = NULL; 3362 } 3363 3364 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3365 if (cm_oop_closure == NULL) { 3366 assert(_cm_oop_closure != NULL, "invariant"); 3367 } else { 3368 assert(_cm_oop_closure == NULL, "invariant"); 3369 } 3370 _cm_oop_closure = cm_oop_closure; 3371 } 3372 3373 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3374 guarantee(nextMarkBitMap != NULL, "invariant"); 3375 3376 if (_cm->verbose_low()) { 3377 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3378 } 3379 3380 _nextMarkBitMap = nextMarkBitMap; 3381 clear_region_fields(); 3382 3383 _calls = 0; 3384 _elapsed_time_ms = 0.0; 3385 _termination_time_ms = 0.0; 3386 _termination_start_time_ms = 0.0; 3387 3388 #if _MARKING_STATS_ 3389 _aborted = 0; 3390 _aborted_overflow = 0; 3391 _aborted_cm_aborted = 0; 3392 _aborted_yield = 0; 3393 _aborted_timed_out = 0; 3394 _aborted_satb = 0; 3395 _aborted_termination = 0; 3396 _steal_attempts = 0; 3397 _steals = 0; 3398 _local_pushes = 0; 3399 _local_pops = 0; 3400 _local_max_size = 0; 3401 _objs_scanned = 0; 3402 _global_pushes = 0; 3403 _global_pops = 0; 3404 _global_max_size = 0; 3405 _global_transfers_to = 0; 3406 _global_transfers_from = 0; 3407 _regions_claimed = 0; 3408 _objs_found_on_bitmap = 0; 3409 _satb_buffers_processed = 0; 3410 #endif // _MARKING_STATS_ 3411 } 3412 3413 bool CMTask::should_exit_termination() { 3414 regular_clock_call(); 3415 // This is called when we are in the termination protocol. We should 3416 // quit if, for some reason, this task wants to abort or the global 3417 // stack is not empty (this means that we can get work from it). 3418 return !_cm->mark_stack_empty() || has_aborted(); 3419 } 3420 3421 void CMTask::reached_limit() { 3422 assert(_words_scanned >= _words_scanned_limit || 3423 _refs_reached >= _refs_reached_limit , 3424 "shouldn't have been called otherwise"); 3425 regular_clock_call(); 3426 } 3427 3428 void CMTask::regular_clock_call() { 3429 if (has_aborted()) return; 3430 3431 // First, we need to recalculate the words scanned and refs reached 3432 // limits for the next clock call. 3433 recalculate_limits(); 3434 3435 // During the regular clock call we do the following 3436 3437 // (1) If an overflow has been flagged, then we abort. 3438 if (_cm->has_overflown()) { 3439 set_has_aborted(); 3440 return; 3441 } 3442 3443 // If we are not concurrent (i.e. we're doing remark) we don't need 3444 // to check anything else. The other steps are only needed during 3445 // the concurrent marking phase. 3446 if (!concurrent()) return; 3447 3448 // (2) If marking has been aborted for Full GC, then we also abort. 3449 if (_cm->has_aborted()) { 3450 set_has_aborted(); 3451 statsOnly( ++_aborted_cm_aborted ); 3452 return; 3453 } 3454 3455 double curr_time_ms = os::elapsedVTime() * 1000.0; 3456 3457 // (3) If marking stats are enabled, then we update the step history. 3458 #if _MARKING_STATS_ 3459 if (_words_scanned >= _words_scanned_limit) { 3460 ++_clock_due_to_scanning; 3461 } 3462 if (_refs_reached >= _refs_reached_limit) { 3463 ++_clock_due_to_marking; 3464 } 3465 3466 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3467 _interval_start_time_ms = curr_time_ms; 3468 _all_clock_intervals_ms.add(last_interval_ms); 3469 3470 if (_cm->verbose_medium()) { 3471 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3472 "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s", 3473 _worker_id, last_interval_ms, 3474 _words_scanned, 3475 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3476 _refs_reached, 3477 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3478 } 3479 #endif // _MARKING_STATS_ 3480 3481 // (4) We check whether we should yield. If we have to, then we abort. 3482 if (SuspendibleThreadSet::should_yield()) { 3483 // We should yield. To do this we abort the task. The caller is 3484 // responsible for yielding. 3485 set_has_aborted(); 3486 statsOnly( ++_aborted_yield ); 3487 return; 3488 } 3489 3490 // (5) We check whether we've reached our time quota. If we have, 3491 // then we abort. 3492 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3493 if (elapsed_time_ms > _time_target_ms) { 3494 set_has_aborted(); 3495 _has_timed_out = true; 3496 statsOnly( ++_aborted_timed_out ); 3497 return; 3498 } 3499 3500 // (6) Finally, we check whether there are enough completed STAB 3501 // buffers available for processing. If there are, we abort. 3502 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3503 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3504 if (_cm->verbose_low()) { 3505 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3506 _worker_id); 3507 } 3508 // we do need to process SATB buffers, we'll abort and restart 3509 // the marking task to do so 3510 set_has_aborted(); 3511 statsOnly( ++_aborted_satb ); 3512 return; 3513 } 3514 } 3515 3516 void CMTask::recalculate_limits() { 3517 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3518 _words_scanned_limit = _real_words_scanned_limit; 3519 3520 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3521 _refs_reached_limit = _real_refs_reached_limit; 3522 } 3523 3524 void CMTask::decrease_limits() { 3525 // This is called when we believe that we're going to do an infrequent 3526 // operation which will increase the per byte scanned cost (i.e. move 3527 // entries to/from the global stack). It basically tries to decrease the 3528 // scanning limit so that the clock is called earlier. 3529 3530 if (_cm->verbose_medium()) { 3531 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3532 } 3533 3534 _words_scanned_limit = _real_words_scanned_limit - 3535 3 * words_scanned_period / 4; 3536 _refs_reached_limit = _real_refs_reached_limit - 3537 3 * refs_reached_period / 4; 3538 } 3539 3540 void CMTask::move_entries_to_global_stack() { 3541 // local array where we'll store the entries that will be popped 3542 // from the local queue 3543 oop buffer[global_stack_transfer_size]; 3544 3545 int n = 0; 3546 oop obj; 3547 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3548 buffer[n] = obj; 3549 ++n; 3550 } 3551 3552 if (n > 0) { 3553 // we popped at least one entry from the local queue 3554 3555 statsOnly( ++_global_transfers_to; _local_pops += n ); 3556 3557 if (!_cm->mark_stack_push(buffer, n)) { 3558 if (_cm->verbose_low()) { 3559 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3560 _worker_id); 3561 } 3562 set_has_aborted(); 3563 } else { 3564 // the transfer was successful 3565 3566 if (_cm->verbose_medium()) { 3567 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3568 _worker_id, n); 3569 } 3570 statsOnly( size_t tmp_size = _cm->mark_stack_size(); 3571 if (tmp_size > _global_max_size) { 3572 _global_max_size = tmp_size; 3573 } 3574 _global_pushes += n ); 3575 } 3576 } 3577 3578 // this operation was quite expensive, so decrease the limits 3579 decrease_limits(); 3580 } 3581 3582 void CMTask::get_entries_from_global_stack() { 3583 // local array where we'll store the entries that will be popped 3584 // from the global stack. 3585 oop buffer[global_stack_transfer_size]; 3586 int n; 3587 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3588 assert(n <= global_stack_transfer_size, 3589 "we should not pop more than the given limit"); 3590 if (n > 0) { 3591 // yes, we did actually pop at least one entry 3592 3593 statsOnly( ++_global_transfers_from; _global_pops += n ); 3594 if (_cm->verbose_medium()) { 3595 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3596 _worker_id, n); 3597 } 3598 for (int i = 0; i < n; ++i) { 3599 bool success = _task_queue->push(buffer[i]); 3600 // We only call this when the local queue is empty or under a 3601 // given target limit. So, we do not expect this push to fail. 3602 assert(success, "invariant"); 3603 } 3604 3605 statsOnly( size_t tmp_size = (size_t)_task_queue->size(); 3606 if (tmp_size > _local_max_size) { 3607 _local_max_size = tmp_size; 3608 } 3609 _local_pushes += n ); 3610 } 3611 3612 // this operation was quite expensive, so decrease the limits 3613 decrease_limits(); 3614 } 3615 3616 void CMTask::drain_local_queue(bool partially) { 3617 if (has_aborted()) return; 3618 3619 // Decide what the target size is, depending whether we're going to 3620 // drain it partially (so that other tasks can steal if they run out 3621 // of things to do) or totally (at the very end). 3622 size_t target_size; 3623 if (partially) { 3624 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3625 } else { 3626 target_size = 0; 3627 } 3628 3629 if (_task_queue->size() > target_size) { 3630 if (_cm->verbose_high()) { 3631 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3632 _worker_id, target_size); 3633 } 3634 3635 oop obj; 3636 bool ret = _task_queue->pop_local(obj); 3637 while (ret) { 3638 statsOnly( ++_local_pops ); 3639 3640 if (_cm->verbose_high()) { 3641 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3642 p2i((void*) obj)); 3643 } 3644 3645 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3646 assert(!_g1h->is_on_master_free_list( 3647 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3648 3649 scan_object(obj); 3650 3651 if (_task_queue->size() <= target_size || has_aborted()) { 3652 ret = false; 3653 } else { 3654 ret = _task_queue->pop_local(obj); 3655 } 3656 } 3657 3658 if (_cm->verbose_high()) { 3659 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3660 _worker_id, _task_queue->size()); 3661 } 3662 } 3663 } 3664 3665 void CMTask::drain_global_stack(bool partially) { 3666 if (has_aborted()) return; 3667 3668 // We have a policy to drain the local queue before we attempt to 3669 // drain the global stack. 3670 assert(partially || _task_queue->size() == 0, "invariant"); 3671 3672 // Decide what the target size is, depending whether we're going to 3673 // drain it partially (so that other tasks can steal if they run out 3674 // of things to do) or totally (at the very end). Notice that, 3675 // because we move entries from the global stack in chunks or 3676 // because another task might be doing the same, we might in fact 3677 // drop below the target. But, this is not a problem. 3678 size_t target_size; 3679 if (partially) { 3680 target_size = _cm->partial_mark_stack_size_target(); 3681 } else { 3682 target_size = 0; 3683 } 3684 3685 if (_cm->mark_stack_size() > target_size) { 3686 if (_cm->verbose_low()) { 3687 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3688 _worker_id, target_size); 3689 } 3690 3691 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3692 get_entries_from_global_stack(); 3693 drain_local_queue(partially); 3694 } 3695 3696 if (_cm->verbose_low()) { 3697 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3698 _worker_id, _cm->mark_stack_size()); 3699 } 3700 } 3701 } 3702 3703 // SATB Queue has several assumptions on whether to call the par or 3704 // non-par versions of the methods. this is why some of the code is 3705 // replicated. We should really get rid of the single-threaded version 3706 // of the code to simplify things. 3707 void CMTask::drain_satb_buffers() { 3708 if (has_aborted()) return; 3709 3710 // We set this so that the regular clock knows that we're in the 3711 // middle of draining buffers and doesn't set the abort flag when it 3712 // notices that SATB buffers are available for draining. It'd be 3713 // very counter productive if it did that. :-) 3714 _draining_satb_buffers = true; 3715 3716 CMSATBBufferClosure satb_cl(this, _g1h); 3717 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3718 3719 // This keeps claiming and applying the closure to completed buffers 3720 // until we run out of buffers or we need to abort. 3721 while (!has_aborted() && 3722 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 3723 if (_cm->verbose_medium()) { 3724 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3725 } 3726 statsOnly( ++_satb_buffers_processed ); 3727 regular_clock_call(); 3728 } 3729 3730 _draining_satb_buffers = false; 3731 3732 assert(has_aborted() || 3733 concurrent() || 3734 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3735 3736 // again, this was a potentially expensive operation, decrease the 3737 // limits to get the regular clock call early 3738 decrease_limits(); 3739 } 3740 3741 void CMTask::print_stats() { 3742 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3743 _worker_id, _calls); 3744 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3745 _elapsed_time_ms, _termination_time_ms); 3746 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3747 _step_times_ms.num(), _step_times_ms.avg(), 3748 _step_times_ms.sd()); 3749 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3750 _step_times_ms.maximum(), _step_times_ms.sum()); 3751 3752 #if _MARKING_STATS_ 3753 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3754 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3755 _all_clock_intervals_ms.sd()); 3756 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3757 _all_clock_intervals_ms.maximum(), 3758 _all_clock_intervals_ms.sum()); 3759 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = " SIZE_FORMAT ", marking = " SIZE_FORMAT, 3760 _clock_due_to_scanning, _clock_due_to_marking); 3761 gclog_or_tty->print_cr(" Objects: scanned = " SIZE_FORMAT ", found on the bitmap = " SIZE_FORMAT, 3762 _objs_scanned, _objs_found_on_bitmap); 3763 gclog_or_tty->print_cr(" Local Queue: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3764 _local_pushes, _local_pops, _local_max_size); 3765 gclog_or_tty->print_cr(" Global Stack: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3766 _global_pushes, _global_pops, _global_max_size); 3767 gclog_or_tty->print_cr(" transfers to = " SIZE_FORMAT ", transfers from = " SIZE_FORMAT, 3768 _global_transfers_to,_global_transfers_from); 3769 gclog_or_tty->print_cr(" Regions: claimed = " SIZE_FORMAT, _regions_claimed); 3770 gclog_or_tty->print_cr(" SATB buffers: processed = " SIZE_FORMAT, _satb_buffers_processed); 3771 gclog_or_tty->print_cr(" Steals: attempts = " SIZE_FORMAT ", successes = " SIZE_FORMAT, 3772 _steal_attempts, _steals); 3773 gclog_or_tty->print_cr(" Aborted: " SIZE_FORMAT ", due to", _aborted); 3774 gclog_or_tty->print_cr(" overflow: " SIZE_FORMAT ", global abort: " SIZE_FORMAT ", yield: " SIZE_FORMAT, 3775 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3776 gclog_or_tty->print_cr(" time out: " SIZE_FORMAT ", SATB: " SIZE_FORMAT ", termination: " SIZE_FORMAT, 3777 _aborted_timed_out, _aborted_satb, _aborted_termination); 3778 #endif // _MARKING_STATS_ 3779 } 3780 3781 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 3782 return _task_queues->steal(worker_id, hash_seed, obj); 3783 } 3784 3785 /***************************************************************************** 3786 3787 The do_marking_step(time_target_ms, ...) method is the building 3788 block of the parallel marking framework. It can be called in parallel 3789 with other invocations of do_marking_step() on different tasks 3790 (but only one per task, obviously) and concurrently with the 3791 mutator threads, or during remark, hence it eliminates the need 3792 for two versions of the code. When called during remark, it will 3793 pick up from where the task left off during the concurrent marking 3794 phase. Interestingly, tasks are also claimable during evacuation 3795 pauses too, since do_marking_step() ensures that it aborts before 3796 it needs to yield. 3797 3798 The data structures that it uses to do marking work are the 3799 following: 3800 3801 (1) Marking Bitmap. If there are gray objects that appear only 3802 on the bitmap (this happens either when dealing with an overflow 3803 or when the initial marking phase has simply marked the roots 3804 and didn't push them on the stack), then tasks claim heap 3805 regions whose bitmap they then scan to find gray objects. A 3806 global finger indicates where the end of the last claimed region 3807 is. A local finger indicates how far into the region a task has 3808 scanned. The two fingers are used to determine how to gray an 3809 object (i.e. whether simply marking it is OK, as it will be 3810 visited by a task in the future, or whether it needs to be also 3811 pushed on a stack). 3812 3813 (2) Local Queue. The local queue of the task which is accessed 3814 reasonably efficiently by the task. Other tasks can steal from 3815 it when they run out of work. Throughout the marking phase, a 3816 task attempts to keep its local queue short but not totally 3817 empty, so that entries are available for stealing by other 3818 tasks. Only when there is no more work, a task will totally 3819 drain its local queue. 3820 3821 (3) Global Mark Stack. This handles local queue overflow. During 3822 marking only sets of entries are moved between it and the local 3823 queues, as access to it requires a mutex and more fine-grain 3824 interaction with it which might cause contention. If it 3825 overflows, then the marking phase should restart and iterate 3826 over the bitmap to identify gray objects. Throughout the marking 3827 phase, tasks attempt to keep the global mark stack at a small 3828 length but not totally empty, so that entries are available for 3829 popping by other tasks. Only when there is no more work, tasks 3830 will totally drain the global mark stack. 3831 3832 (4) SATB Buffer Queue. This is where completed SATB buffers are 3833 made available. Buffers are regularly removed from this queue 3834 and scanned for roots, so that the queue doesn't get too 3835 long. During remark, all completed buffers are processed, as 3836 well as the filled in parts of any uncompleted buffers. 3837 3838 The do_marking_step() method tries to abort when the time target 3839 has been reached. There are a few other cases when the 3840 do_marking_step() method also aborts: 3841 3842 (1) When the marking phase has been aborted (after a Full GC). 3843 3844 (2) When a global overflow (on the global stack) has been 3845 triggered. Before the task aborts, it will actually sync up with 3846 the other tasks to ensure that all the marking data structures 3847 (local queues, stacks, fingers etc.) are re-initialized so that 3848 when do_marking_step() completes, the marking phase can 3849 immediately restart. 3850 3851 (3) When enough completed SATB buffers are available. The 3852 do_marking_step() method only tries to drain SATB buffers right 3853 at the beginning. So, if enough buffers are available, the 3854 marking step aborts and the SATB buffers are processed at 3855 the beginning of the next invocation. 3856 3857 (4) To yield. when we have to yield then we abort and yield 3858 right at the end of do_marking_step(). This saves us from a lot 3859 of hassle as, by yielding we might allow a Full GC. If this 3860 happens then objects will be compacted underneath our feet, the 3861 heap might shrink, etc. We save checking for this by just 3862 aborting and doing the yield right at the end. 3863 3864 From the above it follows that the do_marking_step() method should 3865 be called in a loop (or, otherwise, regularly) until it completes. 3866 3867 If a marking step completes without its has_aborted() flag being 3868 true, it means it has completed the current marking phase (and 3869 also all other marking tasks have done so and have all synced up). 3870 3871 A method called regular_clock_call() is invoked "regularly" (in 3872 sub ms intervals) throughout marking. It is this clock method that 3873 checks all the abort conditions which were mentioned above and 3874 decides when the task should abort. A work-based scheme is used to 3875 trigger this clock method: when the number of object words the 3876 marking phase has scanned or the number of references the marking 3877 phase has visited reach a given limit. Additional invocations to 3878 the method clock have been planted in a few other strategic places 3879 too. The initial reason for the clock method was to avoid calling 3880 vtime too regularly, as it is quite expensive. So, once it was in 3881 place, it was natural to piggy-back all the other conditions on it 3882 too and not constantly check them throughout the code. 3883 3884 If do_termination is true then do_marking_step will enter its 3885 termination protocol. 3886 3887 The value of is_serial must be true when do_marking_step is being 3888 called serially (i.e. by the VMThread) and do_marking_step should 3889 skip any synchronization in the termination and overflow code. 3890 Examples include the serial remark code and the serial reference 3891 processing closures. 3892 3893 The value of is_serial must be false when do_marking_step is 3894 being called by any of the worker threads in a work gang. 3895 Examples include the concurrent marking code (CMMarkingTask), 3896 the MT remark code, and the MT reference processing closures. 3897 3898 *****************************************************************************/ 3899 3900 void CMTask::do_marking_step(double time_target_ms, 3901 bool do_termination, 3902 bool is_serial) { 3903 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3904 assert(concurrent() == _cm->concurrent(), "they should be the same"); 3905 3906 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3907 assert(_task_queues != NULL, "invariant"); 3908 assert(_task_queue != NULL, "invariant"); 3909 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 3910 3911 assert(!_claimed, 3912 "only one thread should claim this task at any one time"); 3913 3914 // OK, this doesn't safeguard again all possible scenarios, as it is 3915 // possible for two threads to set the _claimed flag at the same 3916 // time. But it is only for debugging purposes anyway and it will 3917 // catch most problems. 3918 _claimed = true; 3919 3920 _start_time_ms = os::elapsedVTime() * 1000.0; 3921 statsOnly( _interval_start_time_ms = _start_time_ms ); 3922 3923 // If do_stealing is true then do_marking_step will attempt to 3924 // steal work from the other CMTasks. It only makes sense to 3925 // enable stealing when the termination protocol is enabled 3926 // and do_marking_step() is not being called serially. 3927 bool do_stealing = do_termination && !is_serial; 3928 3929 double diff_prediction_ms = 3930 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 3931 _time_target_ms = time_target_ms - diff_prediction_ms; 3932 3933 // set up the variables that are used in the work-based scheme to 3934 // call the regular clock method 3935 _words_scanned = 0; 3936 _refs_reached = 0; 3937 recalculate_limits(); 3938 3939 // clear all flags 3940 clear_has_aborted(); 3941 _has_timed_out = false; 3942 _draining_satb_buffers = false; 3943 3944 ++_calls; 3945 3946 if (_cm->verbose_low()) { 3947 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 3948 "target = %1.2lfms >>>>>>>>>>", 3949 _worker_id, _calls, _time_target_ms); 3950 } 3951 3952 // Set up the bitmap and oop closures. Anything that uses them is 3953 // eventually called from this method, so it is OK to allocate these 3954 // statically. 3955 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 3956 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 3957 set_cm_oop_closure(&cm_oop_closure); 3958 3959 if (_cm->has_overflown()) { 3960 // This can happen if the mark stack overflows during a GC pause 3961 // and this task, after a yield point, restarts. We have to abort 3962 // as we need to get into the overflow protocol which happens 3963 // right at the end of this task. 3964 set_has_aborted(); 3965 } 3966 3967 // First drain any available SATB buffers. After this, we will not 3968 // look at SATB buffers before the next invocation of this method. 3969 // If enough completed SATB buffers are queued up, the regular clock 3970 // will abort this task so that it restarts. 3971 drain_satb_buffers(); 3972 // ...then partially drain the local queue and the global stack 3973 drain_local_queue(true); 3974 drain_global_stack(true); 3975 3976 do { 3977 if (!has_aborted() && _curr_region != NULL) { 3978 // This means that we're already holding on to a region. 3979 assert(_finger != NULL, "if region is not NULL, then the finger " 3980 "should not be NULL either"); 3981 3982 // We might have restarted this task after an evacuation pause 3983 // which might have evacuated the region we're holding on to 3984 // underneath our feet. Let's read its limit again to make sure 3985 // that we do not iterate over a region of the heap that 3986 // contains garbage (update_region_limit() will also move 3987 // _finger to the start of the region if it is found empty). 3988 update_region_limit(); 3989 // We will start from _finger not from the start of the region, 3990 // as we might be restarting this task after aborting half-way 3991 // through scanning this region. In this case, _finger points to 3992 // the address where we last found a marked object. If this is a 3993 // fresh region, _finger points to start(). 3994 MemRegion mr = MemRegion(_finger, _region_limit); 3995 3996 if (_cm->verbose_low()) { 3997 gclog_or_tty->print_cr("[%u] we're scanning part " 3998 "["PTR_FORMAT", "PTR_FORMAT") " 3999 "of region "HR_FORMAT, 4000 _worker_id, p2i(_finger), p2i(_region_limit), 4001 HR_FORMAT_PARAMS(_curr_region)); 4002 } 4003 4004 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 4005 "humongous regions should go around loop once only"); 4006 4007 // Some special cases: 4008 // If the memory region is empty, we can just give up the region. 4009 // If the current region is humongous then we only need to check 4010 // the bitmap for the bit associated with the start of the object, 4011 // scan the object if it's live, and give up the region. 4012 // Otherwise, let's iterate over the bitmap of the part of the region 4013 // that is left. 4014 // If the iteration is successful, give up the region. 4015 if (mr.is_empty()) { 4016 giveup_current_region(); 4017 regular_clock_call(); 4018 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 4019 if (_nextMarkBitMap->isMarked(mr.start())) { 4020 // The object is marked - apply the closure 4021 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 4022 bitmap_closure.do_bit(offset); 4023 } 4024 // Even if this task aborted while scanning the humongous object 4025 // we can (and should) give up the current region. 4026 giveup_current_region(); 4027 regular_clock_call(); 4028 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4029 giveup_current_region(); 4030 regular_clock_call(); 4031 } else { 4032 assert(has_aborted(), "currently the only way to do so"); 4033 // The only way to abort the bitmap iteration is to return 4034 // false from the do_bit() method. However, inside the 4035 // do_bit() method we move the _finger to point to the 4036 // object currently being looked at. So, if we bail out, we 4037 // have definitely set _finger to something non-null. 4038 assert(_finger != NULL, "invariant"); 4039 4040 // Region iteration was actually aborted. So now _finger 4041 // points to the address of the object we last scanned. If we 4042 // leave it there, when we restart this task, we will rescan 4043 // the object. It is easy to avoid this. We move the finger by 4044 // enough to point to the next possible object header (the 4045 // bitmap knows by how much we need to move it as it knows its 4046 // granularity). 4047 assert(_finger < _region_limit, "invariant"); 4048 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 4049 // Check if bitmap iteration was aborted while scanning the last object 4050 if (new_finger >= _region_limit) { 4051 giveup_current_region(); 4052 } else { 4053 move_finger_to(new_finger); 4054 } 4055 } 4056 } 4057 // At this point we have either completed iterating over the 4058 // region we were holding on to, or we have aborted. 4059 4060 // We then partially drain the local queue and the global stack. 4061 // (Do we really need this?) 4062 drain_local_queue(true); 4063 drain_global_stack(true); 4064 4065 // Read the note on the claim_region() method on why it might 4066 // return NULL with potentially more regions available for 4067 // claiming and why we have to check out_of_regions() to determine 4068 // whether we're done or not. 4069 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4070 // We are going to try to claim a new region. We should have 4071 // given up on the previous one. 4072 // Separated the asserts so that we know which one fires. 4073 assert(_curr_region == NULL, "invariant"); 4074 assert(_finger == NULL, "invariant"); 4075 assert(_region_limit == NULL, "invariant"); 4076 if (_cm->verbose_low()) { 4077 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4078 } 4079 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4080 if (claimed_region != NULL) { 4081 // Yes, we managed to claim one 4082 statsOnly( ++_regions_claimed ); 4083 4084 if (_cm->verbose_low()) { 4085 gclog_or_tty->print_cr("[%u] we successfully claimed " 4086 "region "PTR_FORMAT, 4087 _worker_id, p2i(claimed_region)); 4088 } 4089 4090 setup_for_region(claimed_region); 4091 assert(_curr_region == claimed_region, "invariant"); 4092 } 4093 // It is important to call the regular clock here. It might take 4094 // a while to claim a region if, for example, we hit a large 4095 // block of empty regions. So we need to call the regular clock 4096 // method once round the loop to make sure it's called 4097 // frequently enough. 4098 regular_clock_call(); 4099 } 4100 4101 if (!has_aborted() && _curr_region == NULL) { 4102 assert(_cm->out_of_regions(), 4103 "at this point we should be out of regions"); 4104 } 4105 } while ( _curr_region != NULL && !has_aborted()); 4106 4107 if (!has_aborted()) { 4108 // We cannot check whether the global stack is empty, since other 4109 // tasks might be pushing objects to it concurrently. 4110 assert(_cm->out_of_regions(), 4111 "at this point we should be out of regions"); 4112 4113 if (_cm->verbose_low()) { 4114 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4115 } 4116 4117 // Try to reduce the number of available SATB buffers so that 4118 // remark has less work to do. 4119 drain_satb_buffers(); 4120 } 4121 4122 // Since we've done everything else, we can now totally drain the 4123 // local queue and global stack. 4124 drain_local_queue(false); 4125 drain_global_stack(false); 4126 4127 // Attempt at work stealing from other task's queues. 4128 if (do_stealing && !has_aborted()) { 4129 // We have not aborted. This means that we have finished all that 4130 // we could. Let's try to do some stealing... 4131 4132 // We cannot check whether the global stack is empty, since other 4133 // tasks might be pushing objects to it concurrently. 4134 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4135 "only way to reach here"); 4136 4137 if (_cm->verbose_low()) { 4138 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4139 } 4140 4141 while (!has_aborted()) { 4142 oop obj; 4143 statsOnly( ++_steal_attempts ); 4144 4145 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4146 if (_cm->verbose_medium()) { 4147 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4148 _worker_id, p2i((void*) obj)); 4149 } 4150 4151 statsOnly( ++_steals ); 4152 4153 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4154 "any stolen object should be marked"); 4155 scan_object(obj); 4156 4157 // And since we're towards the end, let's totally drain the 4158 // local queue and global stack. 4159 drain_local_queue(false); 4160 drain_global_stack(false); 4161 } else { 4162 break; 4163 } 4164 } 4165 } 4166 4167 // If we are about to wrap up and go into termination, check if we 4168 // should raise the overflow flag. 4169 if (do_termination && !has_aborted()) { 4170 if (_cm->force_overflow()->should_force()) { 4171 _cm->set_has_overflown(); 4172 regular_clock_call(); 4173 } 4174 } 4175 4176 // We still haven't aborted. Now, let's try to get into the 4177 // termination protocol. 4178 if (do_termination && !has_aborted()) { 4179 // We cannot check whether the global stack is empty, since other 4180 // tasks might be concurrently pushing objects on it. 4181 // Separated the asserts so that we know which one fires. 4182 assert(_cm->out_of_regions(), "only way to reach here"); 4183 assert(_task_queue->size() == 0, "only way to reach here"); 4184 4185 if (_cm->verbose_low()) { 4186 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4187 } 4188 4189 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4190 4191 // The CMTask class also extends the TerminatorTerminator class, 4192 // hence its should_exit_termination() method will also decide 4193 // whether to exit the termination protocol or not. 4194 bool finished = (is_serial || 4195 _cm->terminator()->offer_termination(this)); 4196 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4197 _termination_time_ms += 4198 termination_end_time_ms - _termination_start_time_ms; 4199 4200 if (finished) { 4201 // We're all done. 4202 4203 if (_worker_id == 0) { 4204 // let's allow task 0 to do this 4205 if (concurrent()) { 4206 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4207 // we need to set this to false before the next 4208 // safepoint. This way we ensure that the marking phase 4209 // doesn't observe any more heap expansions. 4210 _cm->clear_concurrent_marking_in_progress(); 4211 } 4212 } 4213 4214 // We can now guarantee that the global stack is empty, since 4215 // all other tasks have finished. We separated the guarantees so 4216 // that, if a condition is false, we can immediately find out 4217 // which one. 4218 guarantee(_cm->out_of_regions(), "only way to reach here"); 4219 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4220 guarantee(_task_queue->size() == 0, "only way to reach here"); 4221 guarantee(!_cm->has_overflown(), "only way to reach here"); 4222 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4223 4224 if (_cm->verbose_low()) { 4225 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4226 } 4227 } else { 4228 // Apparently there's more work to do. Let's abort this task. It 4229 // will restart it and we can hopefully find more things to do. 4230 4231 if (_cm->verbose_low()) { 4232 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4233 _worker_id); 4234 } 4235 4236 set_has_aborted(); 4237 statsOnly( ++_aborted_termination ); 4238 } 4239 } 4240 4241 // Mainly for debugging purposes to make sure that a pointer to the 4242 // closure which was statically allocated in this frame doesn't 4243 // escape it by accident. 4244 set_cm_oop_closure(NULL); 4245 double end_time_ms = os::elapsedVTime() * 1000.0; 4246 double elapsed_time_ms = end_time_ms - _start_time_ms; 4247 // Update the step history. 4248 _step_times_ms.add(elapsed_time_ms); 4249 4250 if (has_aborted()) { 4251 // The task was aborted for some reason. 4252 4253 statsOnly( ++_aborted ); 4254 4255 if (_has_timed_out) { 4256 double diff_ms = elapsed_time_ms - _time_target_ms; 4257 // Keep statistics of how well we did with respect to hitting 4258 // our target only if we actually timed out (if we aborted for 4259 // other reasons, then the results might get skewed). 4260 _marking_step_diffs_ms.add(diff_ms); 4261 } 4262 4263 if (_cm->has_overflown()) { 4264 // This is the interesting one. We aborted because a global 4265 // overflow was raised. This means we have to restart the 4266 // marking phase and start iterating over regions. However, in 4267 // order to do this we have to make sure that all tasks stop 4268 // what they are doing and re-initialize in a safe manner. We 4269 // will achieve this with the use of two barrier sync points. 4270 4271 if (_cm->verbose_low()) { 4272 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4273 } 4274 4275 if (!is_serial) { 4276 // We only need to enter the sync barrier if being called 4277 // from a parallel context 4278 _cm->enter_first_sync_barrier(_worker_id); 4279 4280 // When we exit this sync barrier we know that all tasks have 4281 // stopped doing marking work. So, it's now safe to 4282 // re-initialize our data structures. At the end of this method, 4283 // task 0 will clear the global data structures. 4284 } 4285 4286 statsOnly( ++_aborted_overflow ); 4287 4288 // We clear the local state of this task... 4289 clear_region_fields(); 4290 4291 if (!is_serial) { 4292 // ...and enter the second barrier. 4293 _cm->enter_second_sync_barrier(_worker_id); 4294 } 4295 // At this point, if we're during the concurrent phase of 4296 // marking, everything has been re-initialized and we're 4297 // ready to restart. 4298 } 4299 4300 if (_cm->verbose_low()) { 4301 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4302 "elapsed = %1.2lfms <<<<<<<<<<", 4303 _worker_id, _time_target_ms, elapsed_time_ms); 4304 if (_cm->has_aborted()) { 4305 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4306 _worker_id); 4307 } 4308 } 4309 } else { 4310 if (_cm->verbose_low()) { 4311 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4312 "elapsed = %1.2lfms <<<<<<<<<<", 4313 _worker_id, _time_target_ms, elapsed_time_ms); 4314 } 4315 } 4316 4317 _claimed = false; 4318 } 4319 4320 CMTask::CMTask(uint worker_id, 4321 ConcurrentMark* cm, 4322 size_t* marked_bytes, 4323 BitMap* card_bm, 4324 CMTaskQueue* task_queue, 4325 CMTaskQueueSet* task_queues) 4326 : _g1h(G1CollectedHeap::heap()), 4327 _worker_id(worker_id), _cm(cm), 4328 _claimed(false), 4329 _nextMarkBitMap(NULL), _hash_seed(17), 4330 _task_queue(task_queue), 4331 _task_queues(task_queues), 4332 _cm_oop_closure(NULL), 4333 _marked_bytes_array(marked_bytes), 4334 _card_bm(card_bm) { 4335 guarantee(task_queue != NULL, "invariant"); 4336 guarantee(task_queues != NULL, "invariant"); 4337 4338 statsOnly( _clock_due_to_scanning = 0; 4339 _clock_due_to_marking = 0 ); 4340 4341 _marking_step_diffs_ms.add(0.5); 4342 } 4343 4344 // These are formatting macros that are used below to ensure 4345 // consistent formatting. The *_H_* versions are used to format the 4346 // header for a particular value and they should be kept consistent 4347 // with the corresponding macro. Also note that most of the macros add 4348 // the necessary white space (as a prefix) which makes them a bit 4349 // easier to compose. 4350 4351 // All the output lines are prefixed with this string to be able to 4352 // identify them easily in a large log file. 4353 #define G1PPRL_LINE_PREFIX "###" 4354 4355 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4356 #ifdef _LP64 4357 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4358 #else // _LP64 4359 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4360 #endif // _LP64 4361 4362 // For per-region info 4363 #define G1PPRL_TYPE_FORMAT " %-4s" 4364 #define G1PPRL_TYPE_H_FORMAT " %4s" 4365 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4366 #define G1PPRL_BYTE_H_FORMAT " %9s" 4367 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4368 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4369 4370 // For summary info 4371 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4372 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4373 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4374 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4375 4376 G1PrintRegionLivenessInfoClosure:: 4377 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4378 : _out(out), 4379 _total_used_bytes(0), _total_capacity_bytes(0), 4380 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4381 _hum_used_bytes(0), _hum_capacity_bytes(0), 4382 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4383 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4384 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4385 MemRegion g1_reserved = g1h->g1_reserved(); 4386 double now = os::elapsedTime(); 4387 4388 // Print the header of the output. 4389 _out->cr(); 4390 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4391 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4392 G1PPRL_SUM_ADDR_FORMAT("reserved") 4393 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4394 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4395 HeapRegion::GrainBytes); 4396 _out->print_cr(G1PPRL_LINE_PREFIX); 4397 _out->print_cr(G1PPRL_LINE_PREFIX 4398 G1PPRL_TYPE_H_FORMAT 4399 G1PPRL_ADDR_BASE_H_FORMAT 4400 G1PPRL_BYTE_H_FORMAT 4401 G1PPRL_BYTE_H_FORMAT 4402 G1PPRL_BYTE_H_FORMAT 4403 G1PPRL_DOUBLE_H_FORMAT 4404 G1PPRL_BYTE_H_FORMAT 4405 G1PPRL_BYTE_H_FORMAT, 4406 "type", "address-range", 4407 "used", "prev-live", "next-live", "gc-eff", 4408 "remset", "code-roots"); 4409 _out->print_cr(G1PPRL_LINE_PREFIX 4410 G1PPRL_TYPE_H_FORMAT 4411 G1PPRL_ADDR_BASE_H_FORMAT 4412 G1PPRL_BYTE_H_FORMAT 4413 G1PPRL_BYTE_H_FORMAT 4414 G1PPRL_BYTE_H_FORMAT 4415 G1PPRL_DOUBLE_H_FORMAT 4416 G1PPRL_BYTE_H_FORMAT 4417 G1PPRL_BYTE_H_FORMAT, 4418 "", "", 4419 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4420 "(bytes)", "(bytes)"); 4421 } 4422 4423 // It takes as a parameter a reference to one of the _hum_* fields, it 4424 // deduces the corresponding value for a region in a humongous region 4425 // series (either the region size, or what's left if the _hum_* field 4426 // is < the region size), and updates the _hum_* field accordingly. 4427 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4428 size_t bytes = 0; 4429 // The > 0 check is to deal with the prev and next live bytes which 4430 // could be 0. 4431 if (*hum_bytes > 0) { 4432 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4433 *hum_bytes -= bytes; 4434 } 4435 return bytes; 4436 } 4437 4438 // It deduces the values for a region in a humongous region series 4439 // from the _hum_* fields and updates those accordingly. It assumes 4440 // that that _hum_* fields have already been set up from the "starts 4441 // humongous" region and we visit the regions in address order. 4442 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4443 size_t* capacity_bytes, 4444 size_t* prev_live_bytes, 4445 size_t* next_live_bytes) { 4446 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4447 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4448 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4449 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4450 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4451 } 4452 4453 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4454 const char* type = r->get_type_str(); 4455 HeapWord* bottom = r->bottom(); 4456 HeapWord* end = r->end(); 4457 size_t capacity_bytes = r->capacity(); 4458 size_t used_bytes = r->used(); 4459 size_t prev_live_bytes = r->live_bytes(); 4460 size_t next_live_bytes = r->next_live_bytes(); 4461 double gc_eff = r->gc_efficiency(); 4462 size_t remset_bytes = r->rem_set()->mem_size(); 4463 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4464 4465 if (r->is_starts_humongous()) { 4466 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4467 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4468 "they should have been zeroed after the last time we used them"); 4469 // Set up the _hum_* fields. 4470 _hum_capacity_bytes = capacity_bytes; 4471 _hum_used_bytes = used_bytes; 4472 _hum_prev_live_bytes = prev_live_bytes; 4473 _hum_next_live_bytes = next_live_bytes; 4474 get_hum_bytes(&used_bytes, &capacity_bytes, 4475 &prev_live_bytes, &next_live_bytes); 4476 end = bottom + HeapRegion::GrainWords; 4477 } else if (r->is_continues_humongous()) { 4478 get_hum_bytes(&used_bytes, &capacity_bytes, 4479 &prev_live_bytes, &next_live_bytes); 4480 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4481 } 4482 4483 _total_used_bytes += used_bytes; 4484 _total_capacity_bytes += capacity_bytes; 4485 _total_prev_live_bytes += prev_live_bytes; 4486 _total_next_live_bytes += next_live_bytes; 4487 _total_remset_bytes += remset_bytes; 4488 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4489 4490 // Print a line for this particular region. 4491 _out->print_cr(G1PPRL_LINE_PREFIX 4492 G1PPRL_TYPE_FORMAT 4493 G1PPRL_ADDR_BASE_FORMAT 4494 G1PPRL_BYTE_FORMAT 4495 G1PPRL_BYTE_FORMAT 4496 G1PPRL_BYTE_FORMAT 4497 G1PPRL_DOUBLE_FORMAT 4498 G1PPRL_BYTE_FORMAT 4499 G1PPRL_BYTE_FORMAT, 4500 type, p2i(bottom), p2i(end), 4501 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4502 remset_bytes, strong_code_roots_bytes); 4503 4504 return false; 4505 } 4506 4507 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4508 // add static memory usages to remembered set sizes 4509 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4510 // Print the footer of the output. 4511 _out->print_cr(G1PPRL_LINE_PREFIX); 4512 _out->print_cr(G1PPRL_LINE_PREFIX 4513 " SUMMARY" 4514 G1PPRL_SUM_MB_FORMAT("capacity") 4515 G1PPRL_SUM_MB_PERC_FORMAT("used") 4516 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4517 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4518 G1PPRL_SUM_MB_FORMAT("remset") 4519 G1PPRL_SUM_MB_FORMAT("code-roots"), 4520 bytes_to_mb(_total_capacity_bytes), 4521 bytes_to_mb(_total_used_bytes), 4522 perc(_total_used_bytes, _total_capacity_bytes), 4523 bytes_to_mb(_total_prev_live_bytes), 4524 perc(_total_prev_live_bytes, _total_capacity_bytes), 4525 bytes_to_mb(_total_next_live_bytes), 4526 perc(_total_next_live_bytes, _total_capacity_bytes), 4527 bytes_to_mb(_total_remset_bytes), 4528 bytes_to_mb(_total_strong_code_roots_bytes)); 4529 _out->cr(); 4530 }