1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc_implementation/g1/concurrentMark.inline.hpp" 30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 31 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 32 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 33 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 34 #include "gc_implementation/g1/g1Log.hpp" 35 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 36 #include "gc_implementation/g1/g1RemSet.hpp" 37 #include "gc_implementation/g1/g1StringDedup.hpp" 38 #include "gc_implementation/g1/heapRegion.inline.hpp" 39 #include "gc_implementation/g1/heapRegionManager.inline.hpp" 40 #include "gc_implementation/g1/heapRegionRemSet.hpp" 41 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 42 #include "gc_implementation/shared/vmGCOperations.hpp" 43 #include "gc_implementation/shared/gcTimer.hpp" 44 #include "gc_implementation/shared/gcTrace.hpp" 45 #include "gc_implementation/shared/gcTraceTime.hpp" 46 #include "memory/allocation.hpp" 47 #include "memory/genOopClosures.inline.hpp" 48 #include "memory/referencePolicy.hpp" 49 #include "memory/resourceArea.hpp" 50 #include "memory/strongRootsScope.hpp" 51 #include "oops/oop.inline.hpp" 52 #include "runtime/handles.inline.hpp" 53 #include "runtime/java.hpp" 54 #include "runtime/atomic.inline.hpp" 55 #include "runtime/prefetch.inline.hpp" 56 #include "services/memTracker.hpp" 57 #include "utilities/taskqueue.inline.hpp" 58 59 // Concurrent marking bit map wrapper 60 61 CMBitMapRO::CMBitMapRO(int shifter) : 62 _bm(), 63 _shifter(shifter) { 64 _bmStartWord = 0; 65 _bmWordSize = 0; 66 } 67 68 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 69 const HeapWord* limit) const { 70 // First we must round addr *up* to a possible object boundary. 71 addr = (HeapWord*)align_size_up((intptr_t)addr, 72 HeapWordSize << _shifter); 73 size_t addrOffset = heapWordToOffset(addr); 74 if (limit == NULL) { 75 limit = _bmStartWord + _bmWordSize; 76 } 77 size_t limitOffset = heapWordToOffset(limit); 78 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 79 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 80 assert(nextAddr >= addr, "get_next_one postcondition"); 81 assert(nextAddr == limit || isMarked(nextAddr), 82 "get_next_one postcondition"); 83 return nextAddr; 84 } 85 86 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, 87 const HeapWord* limit) const { 88 size_t addrOffset = heapWordToOffset(addr); 89 if (limit == NULL) { 90 limit = _bmStartWord + _bmWordSize; 91 } 92 size_t limitOffset = heapWordToOffset(limit); 93 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 94 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 95 assert(nextAddr >= addr, "get_next_one postcondition"); 96 assert(nextAddr == limit || !isMarked(nextAddr), 97 "get_next_one postcondition"); 98 return nextAddr; 99 } 100 101 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 102 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 103 return (int) (diff >> _shifter); 104 } 105 106 #ifndef PRODUCT 107 bool CMBitMapRO::covers(MemRegion heap_rs) const { 108 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 109 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 110 "size inconsistency"); 111 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 112 _bmWordSize == heap_rs.word_size(); 113 } 114 #endif 115 116 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 117 _bm.print_on_error(st, prefix); 118 } 119 120 size_t CMBitMap::compute_size(size_t heap_size) { 121 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 122 } 123 124 size_t CMBitMap::mark_distance() { 125 return MinObjAlignmentInBytes * BitsPerByte; 126 } 127 128 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 129 _bmStartWord = heap.start(); 130 _bmWordSize = heap.word_size(); 131 132 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 133 _bm.set_size(_bmWordSize >> _shifter); 134 135 storage->set_mapping_changed_listener(&_listener); 136 } 137 138 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 139 if (zero_filled) { 140 return; 141 } 142 // We need to clear the bitmap on commit, removing any existing information. 143 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 144 _bm->clearRange(mr); 145 } 146 147 // Closure used for clearing the given mark bitmap. 148 class ClearBitmapHRClosure : public HeapRegionClosure { 149 private: 150 ConcurrentMark* _cm; 151 CMBitMap* _bitmap; 152 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 153 public: 154 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 155 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 156 } 157 158 virtual bool doHeapRegion(HeapRegion* r) { 159 size_t const chunk_size_in_words = M / HeapWordSize; 160 161 HeapWord* cur = r->bottom(); 162 HeapWord* const end = r->end(); 163 164 while (cur < end) { 165 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 166 _bitmap->clearRange(mr); 167 168 cur += chunk_size_in_words; 169 170 // Abort iteration if after yielding the marking has been aborted. 171 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 172 return true; 173 } 174 // Repeat the asserts from before the start of the closure. We will do them 175 // as asserts here to minimize their overhead on the product. However, we 176 // will have them as guarantees at the beginning / end of the bitmap 177 // clearing to get some checking in the product. 178 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 179 assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant"); 180 } 181 182 return false; 183 } 184 }; 185 186 class ParClearNextMarkBitmapTask : public AbstractGangTask { 187 ClearBitmapHRClosure* _cl; 188 HeapRegionClaimer _hrclaimer; 189 bool _suspendible; // If the task is suspendible, workers must join the STS. 190 191 public: 192 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 193 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 194 195 void work(uint worker_id) { 196 if (_suspendible) { 197 SuspendibleThreadSet::join(); 198 } 199 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 200 if (_suspendible) { 201 SuspendibleThreadSet::leave(); 202 } 203 } 204 }; 205 206 void CMBitMap::clearAll() { 207 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 208 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 209 uint n_workers = g1h->workers()->active_workers(); 210 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 211 g1h->workers()->run_task(&task); 212 guarantee(cl.complete(), "Must have completed iteration."); 213 return; 214 } 215 216 void CMBitMap::markRange(MemRegion mr) { 217 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 218 assert(!mr.is_empty(), "unexpected empty region"); 219 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 220 ((HeapWord *) mr.end())), 221 "markRange memory region end is not card aligned"); 222 // convert address range into offset range 223 _bm.at_put_range(heapWordToOffset(mr.start()), 224 heapWordToOffset(mr.end()), true); 225 } 226 227 void CMBitMap::clearRange(MemRegion mr) { 228 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 229 assert(!mr.is_empty(), "unexpected empty region"); 230 // convert address range into offset range 231 _bm.at_put_range(heapWordToOffset(mr.start()), 232 heapWordToOffset(mr.end()), false); 233 } 234 235 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 236 HeapWord* end_addr) { 237 HeapWord* start = getNextMarkedWordAddress(addr); 238 start = MIN2(start, end_addr); 239 HeapWord* end = getNextUnmarkedWordAddress(start); 240 end = MIN2(end, end_addr); 241 assert(start <= end, "Consistency check"); 242 MemRegion mr(start, end); 243 if (!mr.is_empty()) { 244 clearRange(mr); 245 } 246 return mr; 247 } 248 249 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 250 _base(NULL), _cm(cm) 251 #ifdef ASSERT 252 , _drain_in_progress(false) 253 , _drain_in_progress_yields(false) 254 #endif 255 {} 256 257 bool CMMarkStack::allocate(size_t capacity) { 258 // allocate a stack of the requisite depth 259 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 260 if (!rs.is_reserved()) { 261 warning("ConcurrentMark MarkStack allocation failure"); 262 return false; 263 } 264 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 265 if (!_virtual_space.initialize(rs, rs.size())) { 266 warning("ConcurrentMark MarkStack backing store failure"); 267 // Release the virtual memory reserved for the marking stack 268 rs.release(); 269 return false; 270 } 271 assert(_virtual_space.committed_size() == rs.size(), 272 "Didn't reserve backing store for all of ConcurrentMark stack?"); 273 _base = (oop*) _virtual_space.low(); 274 setEmpty(); 275 _capacity = (jint) capacity; 276 _saved_index = -1; 277 _should_expand = false; 278 NOT_PRODUCT(_max_depth = 0); 279 return true; 280 } 281 282 void CMMarkStack::expand() { 283 // Called, during remark, if we've overflown the marking stack during marking. 284 assert(isEmpty(), "stack should been emptied while handling overflow"); 285 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 286 // Clear expansion flag 287 _should_expand = false; 288 if (_capacity == (jint) MarkStackSizeMax) { 289 if (PrintGCDetails && Verbose) { 290 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 291 } 292 return; 293 } 294 // Double capacity if possible 295 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 296 // Do not give up existing stack until we have managed to 297 // get the double capacity that we desired. 298 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 299 sizeof(oop))); 300 if (rs.is_reserved()) { 301 // Release the backing store associated with old stack 302 _virtual_space.release(); 303 // Reinitialize virtual space for new stack 304 if (!_virtual_space.initialize(rs, rs.size())) { 305 fatal("Not enough swap for expanded marking stack capacity"); 306 } 307 _base = (oop*)(_virtual_space.low()); 308 _index = 0; 309 _capacity = new_capacity; 310 } else { 311 if (PrintGCDetails && Verbose) { 312 // Failed to double capacity, continue; 313 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 314 SIZE_FORMAT"K to " SIZE_FORMAT"K", 315 _capacity / K, new_capacity / K); 316 } 317 } 318 } 319 320 void CMMarkStack::set_should_expand() { 321 // If we're resetting the marking state because of an 322 // marking stack overflow, record that we should, if 323 // possible, expand the stack. 324 _should_expand = _cm->has_overflown(); 325 } 326 327 CMMarkStack::~CMMarkStack() { 328 if (_base != NULL) { 329 _base = NULL; 330 _virtual_space.release(); 331 } 332 } 333 334 void CMMarkStack::par_push(oop ptr) { 335 while (true) { 336 if (isFull()) { 337 _overflow = true; 338 return; 339 } 340 // Otherwise... 341 jint index = _index; 342 jint next_index = index+1; 343 jint res = Atomic::cmpxchg(next_index, &_index, index); 344 if (res == index) { 345 _base[index] = ptr; 346 // Note that we don't maintain this atomically. We could, but it 347 // doesn't seem necessary. 348 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 349 return; 350 } 351 // Otherwise, we need to try again. 352 } 353 } 354 355 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { 356 while (true) { 357 if (isFull()) { 358 _overflow = true; 359 return; 360 } 361 // Otherwise... 362 jint index = _index; 363 jint next_index = index + n; 364 if (next_index > _capacity) { 365 _overflow = true; 366 return; 367 } 368 jint res = Atomic::cmpxchg(next_index, &_index, index); 369 if (res == index) { 370 for (int i = 0; i < n; i++) { 371 int ind = index + i; 372 assert(ind < _capacity, "By overflow test above."); 373 _base[ind] = ptr_arr[i]; 374 } 375 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 376 return; 377 } 378 // Otherwise, we need to try again. 379 } 380 } 381 382 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 383 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 384 jint start = _index; 385 jint next_index = start + n; 386 if (next_index > _capacity) { 387 _overflow = true; 388 return; 389 } 390 // Otherwise. 391 _index = next_index; 392 for (int i = 0; i < n; i++) { 393 int ind = start + i; 394 assert(ind < _capacity, "By overflow test above."); 395 _base[ind] = ptr_arr[i]; 396 } 397 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 398 } 399 400 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 401 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 402 jint index = _index; 403 if (index == 0) { 404 *n = 0; 405 return false; 406 } else { 407 int k = MIN2(max, index); 408 jint new_ind = index - k; 409 for (int j = 0; j < k; j++) { 410 ptr_arr[j] = _base[new_ind + j]; 411 } 412 _index = new_ind; 413 *n = k; 414 return true; 415 } 416 } 417 418 template<class OopClosureClass> 419 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 420 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 421 || SafepointSynchronize::is_at_safepoint(), 422 "Drain recursion must be yield-safe."); 423 bool res = true; 424 debug_only(_drain_in_progress = true); 425 debug_only(_drain_in_progress_yields = yield_after); 426 while (!isEmpty()) { 427 oop newOop = pop(); 428 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 429 assert(newOop->is_oop(), "Expected an oop"); 430 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 431 "only grey objects on this stack"); 432 newOop->oop_iterate(cl); 433 if (yield_after && _cm->do_yield_check()) { 434 res = false; 435 break; 436 } 437 } 438 debug_only(_drain_in_progress = false); 439 return res; 440 } 441 442 void CMMarkStack::note_start_of_gc() { 443 assert(_saved_index == -1, 444 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 445 _saved_index = _index; 446 } 447 448 void CMMarkStack::note_end_of_gc() { 449 // This is intentionally a guarantee, instead of an assert. If we 450 // accidentally add something to the mark stack during GC, it 451 // will be a correctness issue so it's better if we crash. we'll 452 // only check this once per GC anyway, so it won't be a performance 453 // issue in any way. 454 guarantee(_saved_index == _index, 455 err_msg("saved index: %d index: %d", _saved_index, _index)); 456 _saved_index = -1; 457 } 458 459 void CMMarkStack::oops_do(OopClosure* f) { 460 assert(_saved_index == _index, 461 err_msg("saved index: %d index: %d", _saved_index, _index)); 462 for (int i = 0; i < _index; i += 1) { 463 f->do_oop(&_base[i]); 464 } 465 } 466 467 CMRootRegions::CMRootRegions() : 468 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 469 _should_abort(false), _next_survivor(NULL) { } 470 471 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 472 _young_list = g1h->young_list(); 473 _cm = cm; 474 } 475 476 void CMRootRegions::prepare_for_scan() { 477 assert(!scan_in_progress(), "pre-condition"); 478 479 // Currently, only survivors can be root regions. 480 assert(_next_survivor == NULL, "pre-condition"); 481 _next_survivor = _young_list->first_survivor_region(); 482 _scan_in_progress = (_next_survivor != NULL); 483 _should_abort = false; 484 } 485 486 HeapRegion* CMRootRegions::claim_next() { 487 if (_should_abort) { 488 // If someone has set the should_abort flag, we return NULL to 489 // force the caller to bail out of their loop. 490 return NULL; 491 } 492 493 // Currently, only survivors can be root regions. 494 HeapRegion* res = _next_survivor; 495 if (res != NULL) { 496 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 497 // Read it again in case it changed while we were waiting for the lock. 498 res = _next_survivor; 499 if (res != NULL) { 500 if (res == _young_list->last_survivor_region()) { 501 // We just claimed the last survivor so store NULL to indicate 502 // that we're done. 503 _next_survivor = NULL; 504 } else { 505 _next_survivor = res->get_next_young_region(); 506 } 507 } else { 508 // Someone else claimed the last survivor while we were trying 509 // to take the lock so nothing else to do. 510 } 511 } 512 assert(res == NULL || res->is_survivor(), "post-condition"); 513 514 return res; 515 } 516 517 void CMRootRegions::scan_finished() { 518 assert(scan_in_progress(), "pre-condition"); 519 520 // Currently, only survivors can be root regions. 521 if (!_should_abort) { 522 assert(_next_survivor == NULL, "we should have claimed all survivors"); 523 } 524 _next_survivor = NULL; 525 526 { 527 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 528 _scan_in_progress = false; 529 RootRegionScan_lock->notify_all(); 530 } 531 } 532 533 bool CMRootRegions::wait_until_scan_finished() { 534 if (!scan_in_progress()) return false; 535 536 { 537 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 538 while (scan_in_progress()) { 539 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 540 } 541 } 542 return true; 543 } 544 545 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 546 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 547 #endif // _MSC_VER 548 549 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 550 return MAX2((n_par_threads + 2) / 4, 1U); 551 } 552 553 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 554 _g1h(g1h), 555 _markBitMap1(), 556 _markBitMap2(), 557 _parallel_marking_threads(0), 558 _max_parallel_marking_threads(0), 559 _sleep_factor(0.0), 560 _marking_task_overhead(1.0), 561 _cleanup_sleep_factor(0.0), 562 _cleanup_task_overhead(1.0), 563 _cleanup_list("Cleanup List"), 564 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 565 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 566 CardTableModRefBS::card_shift, 567 false /* in_resource_area*/), 568 569 _prevMarkBitMap(&_markBitMap1), 570 _nextMarkBitMap(&_markBitMap2), 571 572 _markStack(this), 573 // _finger set in set_non_marking_state 574 575 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 576 // _active_tasks set in set_non_marking_state 577 // _tasks set inside the constructor 578 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 579 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 580 581 _has_overflown(false), 582 _concurrent(false), 583 _has_aborted(false), 584 _aborted_gc_id(GCId::undefined()), 585 _restart_for_overflow(false), 586 _concurrent_marking_in_progress(false), 587 588 // _verbose_level set below 589 590 _init_times(), 591 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 592 _cleanup_times(), 593 _total_counting_time(0.0), 594 _total_rs_scrub_time(0.0), 595 596 _parallel_workers(NULL), 597 598 _count_card_bitmaps(NULL), 599 _count_marked_bytes(NULL), 600 _completed_initialization(false) { 601 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 602 if (verbose_level < no_verbose) { 603 verbose_level = no_verbose; 604 } 605 if (verbose_level > high_verbose) { 606 verbose_level = high_verbose; 607 } 608 _verbose_level = verbose_level; 609 610 if (verbose_low()) { 611 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 612 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 613 } 614 615 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 616 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 617 618 // Create & start a ConcurrentMark thread. 619 _cmThread = new ConcurrentMarkThread(this); 620 assert(cmThread() != NULL, "CM Thread should have been created"); 621 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 622 if (_cmThread->osthread() == NULL) { 623 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 624 } 625 626 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 627 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 628 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 629 630 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 631 satb_qs.set_buffer_size(G1SATBBufferSize); 632 633 _root_regions.init(_g1h, this); 634 635 if (ConcGCThreads > ParallelGCThreads) { 636 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") " 637 "than ParallelGCThreads (" UINTX_FORMAT ").", 638 ConcGCThreads, ParallelGCThreads); 639 return; 640 } 641 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 642 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 643 // if both are set 644 _sleep_factor = 0.0; 645 _marking_task_overhead = 1.0; 646 } else if (G1MarkingOverheadPercent > 0) { 647 // We will calculate the number of parallel marking threads based 648 // on a target overhead with respect to the soft real-time goal 649 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 650 double overall_cm_overhead = 651 (double) MaxGCPauseMillis * marking_overhead / 652 (double) GCPauseIntervalMillis; 653 double cpu_ratio = 1.0 / (double) os::processor_count(); 654 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 655 double marking_task_overhead = 656 overall_cm_overhead / marking_thread_num * 657 (double) os::processor_count(); 658 double sleep_factor = 659 (1.0 - marking_task_overhead) / marking_task_overhead; 660 661 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); 662 _sleep_factor = sleep_factor; 663 _marking_task_overhead = marking_task_overhead; 664 } else { 665 // Calculate the number of parallel marking threads by scaling 666 // the number of parallel GC threads. 667 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); 668 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); 669 _sleep_factor = 0.0; 670 _marking_task_overhead = 1.0; 671 } 672 673 assert(ConcGCThreads > 0, "Should have been set"); 674 _parallel_marking_threads = (uint) ConcGCThreads; 675 _max_parallel_marking_threads = _parallel_marking_threads; 676 677 if (parallel_marking_threads() > 1) { 678 _cleanup_task_overhead = 1.0; 679 } else { 680 _cleanup_task_overhead = marking_task_overhead(); 681 } 682 _cleanup_sleep_factor = 683 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 684 685 #if 0 686 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 687 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 688 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 689 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 690 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 691 #endif 692 693 _parallel_workers = new FlexibleWorkGang("G1 Marker", 694 _max_parallel_marking_threads, false, true); 695 if (_parallel_workers == NULL) { 696 vm_exit_during_initialization("Failed necessary allocation."); 697 } else { 698 _parallel_workers->initialize_workers(); 699 } 700 701 if (FLAG_IS_DEFAULT(MarkStackSize)) { 702 size_t mark_stack_size = 703 MIN2(MarkStackSizeMax, 704 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 705 // Verify that the calculated value for MarkStackSize is in range. 706 // It would be nice to use the private utility routine from Arguments. 707 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 708 warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 709 "must be between 1 and " SIZE_FORMAT, 710 mark_stack_size, MarkStackSizeMax); 711 return; 712 } 713 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 714 } else { 715 // Verify MarkStackSize is in range. 716 if (FLAG_IS_CMDLINE(MarkStackSize)) { 717 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 718 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 719 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 720 "must be between 1 and " SIZE_FORMAT, 721 MarkStackSize, MarkStackSizeMax); 722 return; 723 } 724 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 725 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 726 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 727 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 728 MarkStackSize, MarkStackSizeMax); 729 return; 730 } 731 } 732 } 733 } 734 735 if (!_markStack.allocate(MarkStackSize)) { 736 warning("Failed to allocate CM marking stack"); 737 return; 738 } 739 740 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 741 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 742 743 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 744 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 745 746 BitMap::idx_t card_bm_size = _card_bm.size(); 747 748 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 749 _active_tasks = _max_worker_id; 750 751 uint max_regions = _g1h->max_regions(); 752 for (uint i = 0; i < _max_worker_id; ++i) { 753 CMTaskQueue* task_queue = new CMTaskQueue(); 754 task_queue->initialize(); 755 _task_queues->register_queue(i, task_queue); 756 757 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 758 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 759 760 _tasks[i] = new CMTask(i, this, 761 _count_marked_bytes[i], 762 &_count_card_bitmaps[i], 763 task_queue, _task_queues); 764 765 _accum_task_vtime[i] = 0.0; 766 } 767 768 // Calculate the card number for the bottom of the heap. Used 769 // in biasing indexes into the accounting card bitmaps. 770 _heap_bottom_card_num = 771 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 772 CardTableModRefBS::card_shift); 773 774 // Clear all the liveness counting data 775 clear_all_count_data(); 776 777 // so that the call below can read a sensible value 778 _heap_start = g1h->reserved_region().start(); 779 set_non_marking_state(); 780 _completed_initialization = true; 781 } 782 783 void ConcurrentMark::reset() { 784 // Starting values for these two. This should be called in a STW 785 // phase. 786 MemRegion reserved = _g1h->g1_reserved(); 787 _heap_start = reserved.start(); 788 _heap_end = reserved.end(); 789 790 // Separated the asserts so that we know which one fires. 791 assert(_heap_start != NULL, "heap bounds should look ok"); 792 assert(_heap_end != NULL, "heap bounds should look ok"); 793 assert(_heap_start < _heap_end, "heap bounds should look ok"); 794 795 // Reset all the marking data structures and any necessary flags 796 reset_marking_state(); 797 798 if (verbose_low()) { 799 gclog_or_tty->print_cr("[global] resetting"); 800 } 801 802 // We do reset all of them, since different phases will use 803 // different number of active threads. So, it's easiest to have all 804 // of them ready. 805 for (uint i = 0; i < _max_worker_id; ++i) { 806 _tasks[i]->reset(_nextMarkBitMap); 807 } 808 809 // we need this to make sure that the flag is on during the evac 810 // pause with initial mark piggy-backed 811 set_concurrent_marking_in_progress(); 812 } 813 814 815 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 816 _markStack.set_should_expand(); 817 _markStack.setEmpty(); // Also clears the _markStack overflow flag 818 if (clear_overflow) { 819 clear_has_overflown(); 820 } else { 821 assert(has_overflown(), "pre-condition"); 822 } 823 _finger = _heap_start; 824 825 for (uint i = 0; i < _max_worker_id; ++i) { 826 CMTaskQueue* queue = _task_queues->queue(i); 827 queue->set_empty(); 828 } 829 } 830 831 void ConcurrentMark::set_concurrency(uint active_tasks) { 832 assert(active_tasks <= _max_worker_id, "we should not have more"); 833 834 _active_tasks = active_tasks; 835 // Need to update the three data structures below according to the 836 // number of active threads for this phase. 837 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 838 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 839 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 840 } 841 842 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 843 set_concurrency(active_tasks); 844 845 _concurrent = concurrent; 846 // We propagate this to all tasks, not just the active ones. 847 for (uint i = 0; i < _max_worker_id; ++i) 848 _tasks[i]->set_concurrent(concurrent); 849 850 if (concurrent) { 851 set_concurrent_marking_in_progress(); 852 } else { 853 // We currently assume that the concurrent flag has been set to 854 // false before we start remark. At this point we should also be 855 // in a STW phase. 856 assert(!concurrent_marking_in_progress(), "invariant"); 857 assert(out_of_regions(), 858 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 859 p2i(_finger), p2i(_heap_end))); 860 } 861 } 862 863 void ConcurrentMark::set_non_marking_state() { 864 // We set the global marking state to some default values when we're 865 // not doing marking. 866 reset_marking_state(); 867 _active_tasks = 0; 868 clear_concurrent_marking_in_progress(); 869 } 870 871 ConcurrentMark::~ConcurrentMark() { 872 // The ConcurrentMark instance is never freed. 873 ShouldNotReachHere(); 874 } 875 876 void ConcurrentMark::clearNextBitmap() { 877 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 878 879 // Make sure that the concurrent mark thread looks to still be in 880 // the current cycle. 881 guarantee(cmThread()->during_cycle(), "invariant"); 882 883 // We are finishing up the current cycle by clearing the next 884 // marking bitmap and getting it ready for the next cycle. During 885 // this time no other cycle can start. So, let's make sure that this 886 // is the case. 887 guarantee(!g1h->mark_in_progress(), "invariant"); 888 889 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 890 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 891 _parallel_workers->run_task(&task); 892 893 // Clear the liveness counting data. If the marking has been aborted, the abort() 894 // call already did that. 895 if (cl.complete()) { 896 clear_all_count_data(); 897 } 898 899 // Repeat the asserts from above. 900 guarantee(cmThread()->during_cycle(), "invariant"); 901 guarantee(!g1h->mark_in_progress(), "invariant"); 902 } 903 904 class CheckBitmapClearHRClosure : public HeapRegionClosure { 905 CMBitMap* _bitmap; 906 bool _error; 907 public: 908 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 909 } 910 911 virtual bool doHeapRegion(HeapRegion* r) { 912 // This closure can be called concurrently to the mutator, so we must make sure 913 // that the result of the getNextMarkedWordAddress() call is compared to the 914 // value passed to it as limit to detect any found bits. 915 // We can use the region's orig_end() for the limit and the comparison value 916 // as it always contains the "real" end of the region that never changes and 917 // has no side effects. 918 // Due to the latter, there can also be no problem with the compiler generating 919 // reloads of the orig_end() call. 920 HeapWord* end = r->orig_end(); 921 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 922 } 923 }; 924 925 bool ConcurrentMark::nextMarkBitmapIsClear() { 926 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 927 _g1h->heap_region_iterate(&cl); 928 return cl.complete(); 929 } 930 931 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 932 public: 933 bool doHeapRegion(HeapRegion* r) { 934 if (!r->is_continues_humongous()) { 935 r->note_start_of_marking(); 936 } 937 return false; 938 } 939 }; 940 941 void ConcurrentMark::checkpointRootsInitialPre() { 942 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 943 G1CollectorPolicy* g1p = g1h->g1_policy(); 944 945 _has_aborted = false; 946 947 // Initialize marking structures. This has to be done in a STW phase. 948 reset(); 949 950 // For each region note start of marking. 951 NoteStartOfMarkHRClosure startcl; 952 g1h->heap_region_iterate(&startcl); 953 } 954 955 956 void ConcurrentMark::checkpointRootsInitialPost() { 957 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 958 959 // If we force an overflow during remark, the remark operation will 960 // actually abort and we'll restart concurrent marking. If we always 961 // force an overflow during remark we'll never actually complete the 962 // marking phase. So, we initialize this here, at the start of the 963 // cycle, so that at the remaining overflow number will decrease at 964 // every remark and we'll eventually not need to cause one. 965 force_overflow_stw()->init(); 966 967 // Start Concurrent Marking weak-reference discovery. 968 ReferenceProcessor* rp = g1h->ref_processor_cm(); 969 // enable ("weak") refs discovery 970 rp->enable_discovery(); 971 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 972 973 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 974 // This is the start of the marking cycle, we're expected all 975 // threads to have SATB queues with active set to false. 976 satb_mq_set.set_active_all_threads(true, /* new active value */ 977 false /* expected_active */); 978 979 _root_regions.prepare_for_scan(); 980 981 // update_g1_committed() will be called at the end of an evac pause 982 // when marking is on. So, it's also called at the end of the 983 // initial-mark pause to update the heap end, if the heap expands 984 // during it. No need to call it here. 985 } 986 987 /* 988 * Notice that in the next two methods, we actually leave the STS 989 * during the barrier sync and join it immediately afterwards. If we 990 * do not do this, the following deadlock can occur: one thread could 991 * be in the barrier sync code, waiting for the other thread to also 992 * sync up, whereas another one could be trying to yield, while also 993 * waiting for the other threads to sync up too. 994 * 995 * Note, however, that this code is also used during remark and in 996 * this case we should not attempt to leave / enter the STS, otherwise 997 * we'll either hit an assert (debug / fastdebug) or deadlock 998 * (product). So we should only leave / enter the STS if we are 999 * operating concurrently. 1000 * 1001 * Because the thread that does the sync barrier has left the STS, it 1002 * is possible to be suspended for a Full GC or an evacuation pause 1003 * could occur. This is actually safe, since the entering the sync 1004 * barrier is one of the last things do_marking_step() does, and it 1005 * doesn't manipulate any data structures afterwards. 1006 */ 1007 1008 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 1009 if (verbose_low()) { 1010 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 1011 } 1012 1013 if (concurrent()) { 1014 SuspendibleThreadSet::leave(); 1015 } 1016 1017 bool barrier_aborted = !_first_overflow_barrier_sync.enter(); 1018 1019 if (concurrent()) { 1020 SuspendibleThreadSet::join(); 1021 } 1022 // at this point everyone should have synced up and not be doing any 1023 // more work 1024 1025 if (verbose_low()) { 1026 if (barrier_aborted) { 1027 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 1028 } else { 1029 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 1030 } 1031 } 1032 1033 if (barrier_aborted) { 1034 // If the barrier aborted we ignore the overflow condition and 1035 // just abort the whole marking phase as quickly as possible. 1036 return; 1037 } 1038 1039 // If we're executing the concurrent phase of marking, reset the marking 1040 // state; otherwise the marking state is reset after reference processing, 1041 // during the remark pause. 1042 // If we reset here as a result of an overflow during the remark we will 1043 // see assertion failures from any subsequent set_concurrency_and_phase() 1044 // calls. 1045 if (concurrent()) { 1046 // let the task associated with with worker 0 do this 1047 if (worker_id == 0) { 1048 // task 0 is responsible for clearing the global data structures 1049 // We should be here because of an overflow. During STW we should 1050 // not clear the overflow flag since we rely on it being true when 1051 // we exit this method to abort the pause and restart concurrent 1052 // marking. 1053 reset_marking_state(true /* clear_overflow */); 1054 force_overflow()->update(); 1055 1056 if (G1Log::fine()) { 1057 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1058 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1059 } 1060 } 1061 } 1062 1063 // after this, each task should reset its own data structures then 1064 // then go into the second barrier 1065 } 1066 1067 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1068 if (verbose_low()) { 1069 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1070 } 1071 1072 if (concurrent()) { 1073 SuspendibleThreadSet::leave(); 1074 } 1075 1076 bool barrier_aborted = !_second_overflow_barrier_sync.enter(); 1077 1078 if (concurrent()) { 1079 SuspendibleThreadSet::join(); 1080 } 1081 // at this point everything should be re-initialized and ready to go 1082 1083 if (verbose_low()) { 1084 if (barrier_aborted) { 1085 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1086 } else { 1087 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1088 } 1089 } 1090 } 1091 1092 #ifndef PRODUCT 1093 void ForceOverflowSettings::init() { 1094 _num_remaining = G1ConcMarkForceOverflow; 1095 _force = false; 1096 update(); 1097 } 1098 1099 void ForceOverflowSettings::update() { 1100 if (_num_remaining > 0) { 1101 _num_remaining -= 1; 1102 _force = true; 1103 } else { 1104 _force = false; 1105 } 1106 } 1107 1108 bool ForceOverflowSettings::should_force() { 1109 if (_force) { 1110 _force = false; 1111 return true; 1112 } else { 1113 return false; 1114 } 1115 } 1116 #endif // !PRODUCT 1117 1118 class CMConcurrentMarkingTask: public AbstractGangTask { 1119 private: 1120 ConcurrentMark* _cm; 1121 ConcurrentMarkThread* _cmt; 1122 1123 public: 1124 void work(uint worker_id) { 1125 assert(Thread::current()->is_ConcurrentGC_thread(), 1126 "this should only be done by a conc GC thread"); 1127 ResourceMark rm; 1128 1129 double start_vtime = os::elapsedVTime(); 1130 1131 SuspendibleThreadSet::join(); 1132 1133 assert(worker_id < _cm->active_tasks(), "invariant"); 1134 CMTask* the_task = _cm->task(worker_id); 1135 the_task->record_start_time(); 1136 if (!_cm->has_aborted()) { 1137 do { 1138 double start_vtime_sec = os::elapsedVTime(); 1139 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1140 1141 the_task->do_marking_step(mark_step_duration_ms, 1142 true /* do_termination */, 1143 false /* is_serial*/); 1144 1145 double end_vtime_sec = os::elapsedVTime(); 1146 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1147 _cm->clear_has_overflown(); 1148 1149 _cm->do_yield_check(worker_id); 1150 1151 jlong sleep_time_ms; 1152 if (!_cm->has_aborted() && the_task->has_aborted()) { 1153 sleep_time_ms = 1154 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1155 SuspendibleThreadSet::leave(); 1156 os::sleep(Thread::current(), sleep_time_ms, false); 1157 SuspendibleThreadSet::join(); 1158 } 1159 } while (!_cm->has_aborted() && the_task->has_aborted()); 1160 } 1161 the_task->record_end_time(); 1162 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1163 1164 SuspendibleThreadSet::leave(); 1165 1166 double end_vtime = os::elapsedVTime(); 1167 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1168 } 1169 1170 CMConcurrentMarkingTask(ConcurrentMark* cm, 1171 ConcurrentMarkThread* cmt) : 1172 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1173 1174 ~CMConcurrentMarkingTask() { } 1175 }; 1176 1177 // Calculates the number of active workers for a concurrent 1178 // phase. 1179 uint ConcurrentMark::calc_parallel_marking_threads() { 1180 uint n_conc_workers = 0; 1181 if (!UseDynamicNumberOfGCThreads || 1182 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1183 !ForceDynamicNumberOfGCThreads)) { 1184 n_conc_workers = max_parallel_marking_threads(); 1185 } else { 1186 n_conc_workers = 1187 AdaptiveSizePolicy::calc_default_active_workers( 1188 max_parallel_marking_threads(), 1189 1, /* Minimum workers */ 1190 parallel_marking_threads(), 1191 Threads::number_of_non_daemon_threads()); 1192 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1193 // that scaling has already gone into "_max_parallel_marking_threads". 1194 } 1195 assert(n_conc_workers > 0, "Always need at least 1"); 1196 return n_conc_workers; 1197 } 1198 1199 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1200 // Currently, only survivors can be root regions. 1201 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1202 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1203 1204 const uintx interval = PrefetchScanIntervalInBytes; 1205 HeapWord* curr = hr->bottom(); 1206 const HeapWord* end = hr->top(); 1207 while (curr < end) { 1208 Prefetch::read(curr, interval); 1209 oop obj = oop(curr); 1210 int size = obj->oop_iterate(&cl); 1211 assert(size == obj->size(), "sanity"); 1212 curr += size; 1213 } 1214 } 1215 1216 class CMRootRegionScanTask : public AbstractGangTask { 1217 private: 1218 ConcurrentMark* _cm; 1219 1220 public: 1221 CMRootRegionScanTask(ConcurrentMark* cm) : 1222 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1223 1224 void work(uint worker_id) { 1225 assert(Thread::current()->is_ConcurrentGC_thread(), 1226 "this should only be done by a conc GC thread"); 1227 1228 CMRootRegions* root_regions = _cm->root_regions(); 1229 HeapRegion* hr = root_regions->claim_next(); 1230 while (hr != NULL) { 1231 _cm->scanRootRegion(hr, worker_id); 1232 hr = root_regions->claim_next(); 1233 } 1234 } 1235 }; 1236 1237 void ConcurrentMark::scanRootRegions() { 1238 // Start of concurrent marking. 1239 ClassLoaderDataGraph::clear_claimed_marks(); 1240 1241 // scan_in_progress() will have been set to true only if there was 1242 // at least one root region to scan. So, if it's false, we 1243 // should not attempt to do any further work. 1244 if (root_regions()->scan_in_progress()) { 1245 _parallel_marking_threads = calc_parallel_marking_threads(); 1246 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1247 "Maximum number of marking threads exceeded"); 1248 uint active_workers = MAX2(1U, parallel_marking_threads()); 1249 1250 CMRootRegionScanTask task(this); 1251 _parallel_workers->set_active_workers(active_workers); 1252 _parallel_workers->run_task(&task); 1253 1254 // It's possible that has_aborted() is true here without actually 1255 // aborting the survivor scan earlier. This is OK as it's 1256 // mainly used for sanity checking. 1257 root_regions()->scan_finished(); 1258 } 1259 } 1260 1261 void ConcurrentMark::markFromRoots() { 1262 // we might be tempted to assert that: 1263 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1264 // "inconsistent argument?"); 1265 // However that wouldn't be right, because it's possible that 1266 // a safepoint is indeed in progress as a younger generation 1267 // stop-the-world GC happens even as we mark in this generation. 1268 1269 _restart_for_overflow = false; 1270 force_overflow_conc()->init(); 1271 1272 // _g1h has _n_par_threads 1273 _parallel_marking_threads = calc_parallel_marking_threads(); 1274 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1275 "Maximum number of marking threads exceeded"); 1276 1277 uint active_workers = MAX2(1U, parallel_marking_threads()); 1278 1279 // Parallel task terminator is set in "set_concurrency_and_phase()" 1280 set_concurrency_and_phase(active_workers, true /* concurrent */); 1281 1282 CMConcurrentMarkingTask markingTask(this, cmThread()); 1283 _parallel_workers->set_active_workers(active_workers); 1284 // Don't set _n_par_threads because it affects MT in process_roots() 1285 // and the decisions on that MT processing is made elsewhere. 1286 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1287 _parallel_workers->run_task(&markingTask); 1288 print_stats(); 1289 } 1290 1291 // Helper class to get rid of some boilerplate code. 1292 class G1CMTraceTime : public GCTraceTime { 1293 static bool doit_and_prepend(bool doit) { 1294 if (doit) { 1295 gclog_or_tty->put(' '); 1296 } 1297 return doit; 1298 } 1299 1300 public: 1301 G1CMTraceTime(const char* title, bool doit) 1302 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), 1303 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { 1304 } 1305 }; 1306 1307 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1308 // world is stopped at this checkpoint 1309 assert(SafepointSynchronize::is_at_safepoint(), 1310 "world should be stopped"); 1311 1312 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1313 1314 // If a full collection has happened, we shouldn't do this. 1315 if (has_aborted()) { 1316 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1317 return; 1318 } 1319 1320 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1321 1322 if (VerifyDuringGC) { 1323 HandleMark hm; // handle scope 1324 g1h->prepare_for_verify(); 1325 Universe::verify(VerifyOption_G1UsePrevMarking, 1326 " VerifyDuringGC:(before)"); 1327 } 1328 g1h->check_bitmaps("Remark Start"); 1329 1330 G1CollectorPolicy* g1p = g1h->g1_policy(); 1331 g1p->record_concurrent_mark_remark_start(); 1332 1333 double start = os::elapsedTime(); 1334 1335 checkpointRootsFinalWork(); 1336 1337 double mark_work_end = os::elapsedTime(); 1338 1339 weakRefsWork(clear_all_soft_refs); 1340 1341 if (has_overflown()) { 1342 // Oops. We overflowed. Restart concurrent marking. 1343 _restart_for_overflow = true; 1344 if (G1TraceMarkStackOverflow) { 1345 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1346 } 1347 1348 // Verify the heap w.r.t. the previous marking bitmap. 1349 if (VerifyDuringGC) { 1350 HandleMark hm; // handle scope 1351 g1h->prepare_for_verify(); 1352 Universe::verify(VerifyOption_G1UsePrevMarking, 1353 " VerifyDuringGC:(overflow)"); 1354 } 1355 1356 // Clear the marking state because we will be restarting 1357 // marking due to overflowing the global mark stack. 1358 reset_marking_state(); 1359 } else { 1360 { 1361 G1CMTraceTime trace("GC aggregate-data", G1Log::finer()); 1362 1363 // Aggregate the per-task counting data that we have accumulated 1364 // while marking. 1365 aggregate_count_data(); 1366 } 1367 1368 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1369 // We're done with marking. 1370 // This is the end of the marking cycle, we're expected all 1371 // threads to have SATB queues with active set to true. 1372 satb_mq_set.set_active_all_threads(false, /* new active value */ 1373 true /* expected_active */); 1374 1375 if (VerifyDuringGC) { 1376 HandleMark hm; // handle scope 1377 g1h->prepare_for_verify(); 1378 Universe::verify(VerifyOption_G1UseNextMarking, 1379 " VerifyDuringGC:(after)"); 1380 } 1381 g1h->check_bitmaps("Remark End"); 1382 assert(!restart_for_overflow(), "sanity"); 1383 // Completely reset the marking state since marking completed 1384 set_non_marking_state(); 1385 } 1386 1387 // Expand the marking stack, if we have to and if we can. 1388 if (_markStack.should_expand()) { 1389 _markStack.expand(); 1390 } 1391 1392 // Statistics 1393 double now = os::elapsedTime(); 1394 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1395 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1396 _remark_times.add((now - start) * 1000.0); 1397 1398 g1p->record_concurrent_mark_remark_end(); 1399 1400 G1CMIsAliveClosure is_alive(g1h); 1401 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1402 } 1403 1404 // Base class of the closures that finalize and verify the 1405 // liveness counting data. 1406 class CMCountDataClosureBase: public HeapRegionClosure { 1407 protected: 1408 G1CollectedHeap* _g1h; 1409 ConcurrentMark* _cm; 1410 CardTableModRefBS* _ct_bs; 1411 1412 BitMap* _region_bm; 1413 BitMap* _card_bm; 1414 1415 // Takes a region that's not empty (i.e., it has at least one 1416 // live object in it and sets its corresponding bit on the region 1417 // bitmap to 1. If the region is "starts humongous" it will also set 1418 // to 1 the bits on the region bitmap that correspond to its 1419 // associated "continues humongous" regions. 1420 void set_bit_for_region(HeapRegion* hr) { 1421 assert(!hr->is_continues_humongous(), "should have filtered those out"); 1422 1423 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1424 if (!hr->is_starts_humongous()) { 1425 // Normal (non-humongous) case: just set the bit. 1426 _region_bm->par_at_put(index, true); 1427 } else { 1428 // Starts humongous case: calculate how many regions are part of 1429 // this humongous region and then set the bit range. 1430 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1431 _region_bm->par_at_put_range(index, end_index, true); 1432 } 1433 } 1434 1435 public: 1436 CMCountDataClosureBase(G1CollectedHeap* g1h, 1437 BitMap* region_bm, BitMap* card_bm): 1438 _g1h(g1h), _cm(g1h->concurrent_mark()), 1439 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 1440 _region_bm(region_bm), _card_bm(card_bm) { } 1441 }; 1442 1443 // Closure that calculates the # live objects per region. Used 1444 // for verification purposes during the cleanup pause. 1445 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1446 CMBitMapRO* _bm; 1447 size_t _region_marked_bytes; 1448 1449 public: 1450 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1451 BitMap* region_bm, BitMap* card_bm) : 1452 CMCountDataClosureBase(g1h, region_bm, card_bm), 1453 _bm(bm), _region_marked_bytes(0) { } 1454 1455 bool doHeapRegion(HeapRegion* hr) { 1456 1457 if (hr->is_continues_humongous()) { 1458 // We will ignore these here and process them when their 1459 // associated "starts humongous" region is processed (see 1460 // set_bit_for_heap_region()). Note that we cannot rely on their 1461 // associated "starts humongous" region to have their bit set to 1462 // 1 since, due to the region chunking in the parallel region 1463 // iteration, a "continues humongous" region might be visited 1464 // before its associated "starts humongous". 1465 return false; 1466 } 1467 1468 HeapWord* ntams = hr->next_top_at_mark_start(); 1469 HeapWord* start = hr->bottom(); 1470 1471 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1472 err_msg("Preconditions not met - " 1473 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1474 p2i(start), p2i(ntams), p2i(hr->end()))); 1475 1476 // Find the first marked object at or after "start". 1477 start = _bm->getNextMarkedWordAddress(start, ntams); 1478 1479 size_t marked_bytes = 0; 1480 1481 while (start < ntams) { 1482 oop obj = oop(start); 1483 int obj_sz = obj->size(); 1484 HeapWord* obj_end = start + obj_sz; 1485 1486 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1487 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1488 1489 // Note: if we're looking at the last region in heap - obj_end 1490 // could be actually just beyond the end of the heap; end_idx 1491 // will then correspond to a (non-existent) card that is also 1492 // just beyond the heap. 1493 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1494 // end of object is not card aligned - increment to cover 1495 // all the cards spanned by the object 1496 end_idx += 1; 1497 } 1498 1499 // Set the bits in the card BM for the cards spanned by this object. 1500 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1501 1502 // Add the size of this object to the number of marked bytes. 1503 marked_bytes += (size_t)obj_sz * HeapWordSize; 1504 1505 // Find the next marked object after this one. 1506 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1507 } 1508 1509 // Mark the allocated-since-marking portion... 1510 HeapWord* top = hr->top(); 1511 if (ntams < top) { 1512 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1513 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1514 1515 // Note: if we're looking at the last region in heap - top 1516 // could be actually just beyond the end of the heap; end_idx 1517 // will then correspond to a (non-existent) card that is also 1518 // just beyond the heap. 1519 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1520 // end of object is not card aligned - increment to cover 1521 // all the cards spanned by the object 1522 end_idx += 1; 1523 } 1524 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1525 1526 // This definitely means the region has live objects. 1527 set_bit_for_region(hr); 1528 } 1529 1530 // Update the live region bitmap. 1531 if (marked_bytes > 0) { 1532 set_bit_for_region(hr); 1533 } 1534 1535 // Set the marked bytes for the current region so that 1536 // it can be queried by a calling verification routine 1537 _region_marked_bytes = marked_bytes; 1538 1539 return false; 1540 } 1541 1542 size_t region_marked_bytes() const { return _region_marked_bytes; } 1543 }; 1544 1545 // Heap region closure used for verifying the counting data 1546 // that was accumulated concurrently and aggregated during 1547 // the remark pause. This closure is applied to the heap 1548 // regions during the STW cleanup pause. 1549 1550 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1551 G1CollectedHeap* _g1h; 1552 ConcurrentMark* _cm; 1553 CalcLiveObjectsClosure _calc_cl; 1554 BitMap* _region_bm; // Region BM to be verified 1555 BitMap* _card_bm; // Card BM to be verified 1556 bool _verbose; // verbose output? 1557 1558 BitMap* _exp_region_bm; // Expected Region BM values 1559 BitMap* _exp_card_bm; // Expected card BM values 1560 1561 int _failures; 1562 1563 public: 1564 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1565 BitMap* region_bm, 1566 BitMap* card_bm, 1567 BitMap* exp_region_bm, 1568 BitMap* exp_card_bm, 1569 bool verbose) : 1570 _g1h(g1h), _cm(g1h->concurrent_mark()), 1571 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1572 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1573 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1574 _failures(0) { } 1575 1576 int failures() const { return _failures; } 1577 1578 bool doHeapRegion(HeapRegion* hr) { 1579 if (hr->is_continues_humongous()) { 1580 // We will ignore these here and process them when their 1581 // associated "starts humongous" region is processed (see 1582 // set_bit_for_heap_region()). Note that we cannot rely on their 1583 // associated "starts humongous" region to have their bit set to 1584 // 1 since, due to the region chunking in the parallel region 1585 // iteration, a "continues humongous" region might be visited 1586 // before its associated "starts humongous". 1587 return false; 1588 } 1589 1590 int failures = 0; 1591 1592 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1593 // this region and set the corresponding bits in the expected region 1594 // and card bitmaps. 1595 bool res = _calc_cl.doHeapRegion(hr); 1596 assert(res == false, "should be continuing"); 1597 1598 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1599 Mutex::_no_safepoint_check_flag); 1600 1601 // Verify the marked bytes for this region. 1602 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1603 size_t act_marked_bytes = hr->next_marked_bytes(); 1604 1605 // We're not OK if expected marked bytes > actual marked bytes. It means 1606 // we have missed accounting some objects during the actual marking. 1607 if (exp_marked_bytes > act_marked_bytes) { 1608 if (_verbose) { 1609 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1610 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1611 hr->hrm_index(), exp_marked_bytes, act_marked_bytes); 1612 } 1613 failures += 1; 1614 } 1615 1616 // Verify the bit, for this region, in the actual and expected 1617 // (which was just calculated) region bit maps. 1618 // We're not OK if the bit in the calculated expected region 1619 // bitmap is set and the bit in the actual region bitmap is not. 1620 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1621 1622 bool expected = _exp_region_bm->at(index); 1623 bool actual = _region_bm->at(index); 1624 if (expected && !actual) { 1625 if (_verbose) { 1626 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1627 "expected: %s, actual: %s", 1628 hr->hrm_index(), 1629 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1630 } 1631 failures += 1; 1632 } 1633 1634 // Verify that the card bit maps for the cards spanned by the current 1635 // region match. We have an error if we have a set bit in the expected 1636 // bit map and the corresponding bit in the actual bitmap is not set. 1637 1638 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1639 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1640 1641 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1642 expected = _exp_card_bm->at(i); 1643 actual = _card_bm->at(i); 1644 1645 if (expected && !actual) { 1646 if (_verbose) { 1647 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1648 "expected: %s, actual: %s", 1649 hr->hrm_index(), i, 1650 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1651 } 1652 failures += 1; 1653 } 1654 } 1655 1656 if (failures > 0 && _verbose) { 1657 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1658 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1659 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1660 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1661 } 1662 1663 _failures += failures; 1664 1665 // We could stop iteration over the heap when we 1666 // find the first violating region by returning true. 1667 return false; 1668 } 1669 }; 1670 1671 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1672 protected: 1673 G1CollectedHeap* _g1h; 1674 ConcurrentMark* _cm; 1675 BitMap* _actual_region_bm; 1676 BitMap* _actual_card_bm; 1677 1678 uint _n_workers; 1679 1680 BitMap* _expected_region_bm; 1681 BitMap* _expected_card_bm; 1682 1683 int _failures; 1684 bool _verbose; 1685 1686 HeapRegionClaimer _hrclaimer; 1687 1688 public: 1689 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1690 BitMap* region_bm, BitMap* card_bm, 1691 BitMap* expected_region_bm, BitMap* expected_card_bm) 1692 : AbstractGangTask("G1 verify final counting"), 1693 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1694 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1695 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1696 _failures(0), _verbose(false), 1697 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1698 assert(VerifyDuringGC, "don't call this otherwise"); 1699 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1700 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1701 1702 _verbose = _cm->verbose_medium(); 1703 } 1704 1705 void work(uint worker_id) { 1706 assert(worker_id < _n_workers, "invariant"); 1707 1708 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1709 _actual_region_bm, _actual_card_bm, 1710 _expected_region_bm, 1711 _expected_card_bm, 1712 _verbose); 1713 1714 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1715 1716 Atomic::add(verify_cl.failures(), &_failures); 1717 } 1718 1719 int failures() const { return _failures; } 1720 }; 1721 1722 // Closure that finalizes the liveness counting data. 1723 // Used during the cleanup pause. 1724 // Sets the bits corresponding to the interval [NTAMS, top] 1725 // (which contains the implicitly live objects) in the 1726 // card liveness bitmap. Also sets the bit for each region, 1727 // containing live data, in the region liveness bitmap. 1728 1729 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1730 public: 1731 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1732 BitMap* region_bm, 1733 BitMap* card_bm) : 1734 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1735 1736 bool doHeapRegion(HeapRegion* hr) { 1737 1738 if (hr->is_continues_humongous()) { 1739 // We will ignore these here and process them when their 1740 // associated "starts humongous" region is processed (see 1741 // set_bit_for_heap_region()). Note that we cannot rely on their 1742 // associated "starts humongous" region to have their bit set to 1743 // 1 since, due to the region chunking in the parallel region 1744 // iteration, a "continues humongous" region might be visited 1745 // before its associated "starts humongous". 1746 return false; 1747 } 1748 1749 HeapWord* ntams = hr->next_top_at_mark_start(); 1750 HeapWord* top = hr->top(); 1751 1752 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1753 1754 // Mark the allocated-since-marking portion... 1755 if (ntams < top) { 1756 // This definitely means the region has live objects. 1757 set_bit_for_region(hr); 1758 1759 // Now set the bits in the card bitmap for [ntams, top) 1760 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1761 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1762 1763 // Note: if we're looking at the last region in heap - top 1764 // could be actually just beyond the end of the heap; end_idx 1765 // will then correspond to a (non-existent) card that is also 1766 // just beyond the heap. 1767 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1768 // end of object is not card aligned - increment to cover 1769 // all the cards spanned by the object 1770 end_idx += 1; 1771 } 1772 1773 assert(end_idx <= _card_bm->size(), 1774 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1775 end_idx, _card_bm->size())); 1776 assert(start_idx < _card_bm->size(), 1777 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1778 start_idx, _card_bm->size())); 1779 1780 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1781 } 1782 1783 // Set the bit for the region if it contains live data 1784 if (hr->next_marked_bytes() > 0) { 1785 set_bit_for_region(hr); 1786 } 1787 1788 return false; 1789 } 1790 }; 1791 1792 class G1ParFinalCountTask: public AbstractGangTask { 1793 protected: 1794 G1CollectedHeap* _g1h; 1795 ConcurrentMark* _cm; 1796 BitMap* _actual_region_bm; 1797 BitMap* _actual_card_bm; 1798 1799 uint _n_workers; 1800 HeapRegionClaimer _hrclaimer; 1801 1802 public: 1803 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1804 : AbstractGangTask("G1 final counting"), 1805 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1806 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1807 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1808 } 1809 1810 void work(uint worker_id) { 1811 assert(worker_id < _n_workers, "invariant"); 1812 1813 FinalCountDataUpdateClosure final_update_cl(_g1h, 1814 _actual_region_bm, 1815 _actual_card_bm); 1816 1817 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1818 } 1819 }; 1820 1821 class G1ParNoteEndTask; 1822 1823 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1824 G1CollectedHeap* _g1; 1825 size_t _max_live_bytes; 1826 uint _regions_claimed; 1827 size_t _freed_bytes; 1828 FreeRegionList* _local_cleanup_list; 1829 HeapRegionSetCount _old_regions_removed; 1830 HeapRegionSetCount _humongous_regions_removed; 1831 HRRSCleanupTask* _hrrs_cleanup_task; 1832 double _claimed_region_time; 1833 double _max_region_time; 1834 1835 public: 1836 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1837 FreeRegionList* local_cleanup_list, 1838 HRRSCleanupTask* hrrs_cleanup_task) : 1839 _g1(g1), 1840 _max_live_bytes(0), _regions_claimed(0), 1841 _freed_bytes(0), 1842 _claimed_region_time(0.0), _max_region_time(0.0), 1843 _local_cleanup_list(local_cleanup_list), 1844 _old_regions_removed(), 1845 _humongous_regions_removed(), 1846 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1847 1848 size_t freed_bytes() { return _freed_bytes; } 1849 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1850 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1851 1852 bool doHeapRegion(HeapRegion *hr) { 1853 if (hr->is_continues_humongous()) { 1854 return false; 1855 } 1856 // We use a claim value of zero here because all regions 1857 // were claimed with value 1 in the FinalCount task. 1858 _g1->reset_gc_time_stamps(hr); 1859 double start = os::elapsedTime(); 1860 _regions_claimed++; 1861 hr->note_end_of_marking(); 1862 _max_live_bytes += hr->max_live_bytes(); 1863 1864 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1865 _freed_bytes += hr->used(); 1866 hr->set_containing_set(NULL); 1867 if (hr->is_humongous()) { 1868 assert(hr->is_starts_humongous(), "we should only see starts humongous"); 1869 _humongous_regions_removed.increment(1u, hr->capacity()); 1870 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1871 } else { 1872 _old_regions_removed.increment(1u, hr->capacity()); 1873 _g1->free_region(hr, _local_cleanup_list, true); 1874 } 1875 } else { 1876 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1877 } 1878 1879 double region_time = (os::elapsedTime() - start); 1880 _claimed_region_time += region_time; 1881 if (region_time > _max_region_time) { 1882 _max_region_time = region_time; 1883 } 1884 return false; 1885 } 1886 1887 size_t max_live_bytes() { return _max_live_bytes; } 1888 uint regions_claimed() { return _regions_claimed; } 1889 double claimed_region_time_sec() { return _claimed_region_time; } 1890 double max_region_time_sec() { return _max_region_time; } 1891 }; 1892 1893 class G1ParNoteEndTask: public AbstractGangTask { 1894 friend class G1NoteEndOfConcMarkClosure; 1895 1896 protected: 1897 G1CollectedHeap* _g1h; 1898 size_t _max_live_bytes; 1899 size_t _freed_bytes; 1900 FreeRegionList* _cleanup_list; 1901 HeapRegionClaimer _hrclaimer; 1902 1903 public: 1904 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1905 AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1906 } 1907 1908 void work(uint worker_id) { 1909 FreeRegionList local_cleanup_list("Local Cleanup List"); 1910 HRRSCleanupTask hrrs_cleanup_task; 1911 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1912 &hrrs_cleanup_task); 1913 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1914 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1915 1916 // Now update the lists 1917 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1918 { 1919 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1920 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1921 _max_live_bytes += g1_note_end.max_live_bytes(); 1922 _freed_bytes += g1_note_end.freed_bytes(); 1923 1924 // If we iterate over the global cleanup list at the end of 1925 // cleanup to do this printing we will not guarantee to only 1926 // generate output for the newly-reclaimed regions (the list 1927 // might not be empty at the beginning of cleanup; we might 1928 // still be working on its previous contents). So we do the 1929 // printing here, before we append the new regions to the global 1930 // cleanup list. 1931 1932 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1933 if (hr_printer->is_active()) { 1934 FreeRegionListIterator iter(&local_cleanup_list); 1935 while (iter.more_available()) { 1936 HeapRegion* hr = iter.get_next(); 1937 hr_printer->cleanup(hr); 1938 } 1939 } 1940 1941 _cleanup_list->add_ordered(&local_cleanup_list); 1942 assert(local_cleanup_list.is_empty(), "post-condition"); 1943 1944 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1945 } 1946 } 1947 size_t max_live_bytes() { return _max_live_bytes; } 1948 size_t freed_bytes() { return _freed_bytes; } 1949 }; 1950 1951 class G1ParScrubRemSetTask: public AbstractGangTask { 1952 protected: 1953 G1RemSet* _g1rs; 1954 BitMap* _region_bm; 1955 BitMap* _card_bm; 1956 HeapRegionClaimer _hrclaimer; 1957 1958 public: 1959 G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) : 1960 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) { 1961 } 1962 1963 void work(uint worker_id) { 1964 _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer); 1965 } 1966 1967 }; 1968 1969 void ConcurrentMark::cleanup() { 1970 // world is stopped at this checkpoint 1971 assert(SafepointSynchronize::is_at_safepoint(), 1972 "world should be stopped"); 1973 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1974 1975 // If a full collection has happened, we shouldn't do this. 1976 if (has_aborted()) { 1977 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1978 return; 1979 } 1980 1981 g1h->verify_region_sets_optional(); 1982 1983 if (VerifyDuringGC) { 1984 HandleMark hm; // handle scope 1985 g1h->prepare_for_verify(); 1986 Universe::verify(VerifyOption_G1UsePrevMarking, 1987 " VerifyDuringGC:(before)"); 1988 } 1989 g1h->check_bitmaps("Cleanup Start"); 1990 1991 G1CollectorPolicy* g1p = g1h->g1_policy(); 1992 g1p->record_concurrent_mark_cleanup_start(); 1993 1994 double start = os::elapsedTime(); 1995 1996 HeapRegionRemSet::reset_for_cleanup_tasks(); 1997 1998 uint n_workers; 1999 2000 // Do counting once more with the world stopped for good measure. 2001 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 2002 2003 g1h->set_par_threads(); 2004 n_workers = g1h->n_par_threads(); 2005 assert(g1h->n_par_threads() == n_workers, 2006 "Should not have been reset"); 2007 g1h->workers()->run_task(&g1_par_count_task); 2008 // Done with the parallel phase so reset to 0. 2009 g1h->set_par_threads(0); 2010 2011 if (VerifyDuringGC) { 2012 // Verify that the counting data accumulated during marking matches 2013 // that calculated by walking the marking bitmap. 2014 2015 // Bitmaps to hold expected values 2016 BitMap expected_region_bm(_region_bm.size(), true); 2017 BitMap expected_card_bm(_card_bm.size(), true); 2018 2019 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 2020 &_region_bm, 2021 &_card_bm, 2022 &expected_region_bm, 2023 &expected_card_bm); 2024 2025 g1h->set_par_threads((int)n_workers); 2026 g1h->workers()->run_task(&g1_par_verify_task); 2027 // Done with the parallel phase so reset to 0. 2028 g1h->set_par_threads(0); 2029 2030 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 2031 } 2032 2033 size_t start_used_bytes = g1h->used(); 2034 g1h->set_marking_complete(); 2035 2036 double count_end = os::elapsedTime(); 2037 double this_final_counting_time = (count_end - start); 2038 _total_counting_time += this_final_counting_time; 2039 2040 if (G1PrintRegionLivenessInfo) { 2041 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 2042 _g1h->heap_region_iterate(&cl); 2043 } 2044 2045 // Install newly created mark bitMap as "prev". 2046 swapMarkBitMaps(); 2047 2048 g1h->reset_gc_time_stamp(); 2049 2050 // Note end of marking in all heap regions. 2051 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 2052 g1h->set_par_threads((int)n_workers); 2053 g1h->workers()->run_task(&g1_par_note_end_task); 2054 g1h->set_par_threads(0); 2055 g1h->check_gc_time_stamps(); 2056 2057 if (!cleanup_list_is_empty()) { 2058 // The cleanup list is not empty, so we'll have to process it 2059 // concurrently. Notify anyone else that might be wanting free 2060 // regions that there will be more free regions coming soon. 2061 g1h->set_free_regions_coming(); 2062 } 2063 2064 // call below, since it affects the metric by which we sort the heap 2065 // regions. 2066 if (G1ScrubRemSets) { 2067 double rs_scrub_start = os::elapsedTime(); 2068 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers); 2069 g1h->set_par_threads((int)n_workers); 2070 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2071 g1h->set_par_threads(0); 2072 2073 double rs_scrub_end = os::elapsedTime(); 2074 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2075 _total_rs_scrub_time += this_rs_scrub_time; 2076 } 2077 2078 // this will also free any regions totally full of garbage objects, 2079 // and sort the regions. 2080 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2081 2082 // Statistics. 2083 double end = os::elapsedTime(); 2084 _cleanup_times.add((end - start) * 1000.0); 2085 2086 if (G1Log::fine()) { 2087 g1h->g1_policy()->print_heap_transition(start_used_bytes); 2088 } 2089 2090 // Clean up will have freed any regions completely full of garbage. 2091 // Update the soft reference policy with the new heap occupancy. 2092 Universe::update_heap_info_at_gc(); 2093 2094 if (VerifyDuringGC) { 2095 HandleMark hm; // handle scope 2096 g1h->prepare_for_verify(); 2097 Universe::verify(VerifyOption_G1UsePrevMarking, 2098 " VerifyDuringGC:(after)"); 2099 } 2100 2101 g1h->check_bitmaps("Cleanup End"); 2102 2103 g1h->verify_region_sets_optional(); 2104 2105 // We need to make this be a "collection" so any collection pause that 2106 // races with it goes around and waits for completeCleanup to finish. 2107 g1h->increment_total_collections(); 2108 2109 // Clean out dead classes and update Metaspace sizes. 2110 if (ClassUnloadingWithConcurrentMark) { 2111 ClassLoaderDataGraph::purge(); 2112 } 2113 MetaspaceGC::compute_new_size(); 2114 2115 // We reclaimed old regions so we should calculate the sizes to make 2116 // sure we update the old gen/space data. 2117 g1h->g1mm()->update_sizes(); 2118 g1h->allocation_context_stats().update_after_mark(); 2119 2120 g1h->trace_heap_after_concurrent_cycle(); 2121 } 2122 2123 void ConcurrentMark::completeCleanup() { 2124 if (has_aborted()) return; 2125 2126 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2127 2128 _cleanup_list.verify_optional(); 2129 FreeRegionList tmp_free_list("Tmp Free List"); 2130 2131 if (G1ConcRegionFreeingVerbose) { 2132 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2133 "cleanup list has %u entries", 2134 _cleanup_list.length()); 2135 } 2136 2137 // No one else should be accessing the _cleanup_list at this point, 2138 // so it is not necessary to take any locks 2139 while (!_cleanup_list.is_empty()) { 2140 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 2141 assert(hr != NULL, "Got NULL from a non-empty list"); 2142 hr->par_clear(); 2143 tmp_free_list.add_ordered(hr); 2144 2145 // Instead of adding one region at a time to the secondary_free_list, 2146 // we accumulate them in the local list and move them a few at a 2147 // time. This also cuts down on the number of notify_all() calls 2148 // we do during this process. We'll also append the local list when 2149 // _cleanup_list is empty (which means we just removed the last 2150 // region from the _cleanup_list). 2151 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2152 _cleanup_list.is_empty()) { 2153 if (G1ConcRegionFreeingVerbose) { 2154 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2155 "appending %u entries to the secondary_free_list, " 2156 "cleanup list still has %u entries", 2157 tmp_free_list.length(), 2158 _cleanup_list.length()); 2159 } 2160 2161 { 2162 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2163 g1h->secondary_free_list_add(&tmp_free_list); 2164 SecondaryFreeList_lock->notify_all(); 2165 } 2166 #ifndef PRODUCT 2167 if (G1StressConcRegionFreeing) { 2168 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2169 os::sleep(Thread::current(), (jlong) 1, false); 2170 } 2171 } 2172 #endif 2173 } 2174 } 2175 assert(tmp_free_list.is_empty(), "post-condition"); 2176 } 2177 2178 // Supporting Object and Oop closures for reference discovery 2179 // and processing in during marking 2180 2181 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2182 HeapWord* addr = (HeapWord*)obj; 2183 return addr != NULL && 2184 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2185 } 2186 2187 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2188 // Uses the CMTask associated with a worker thread (for serial reference 2189 // processing the CMTask for worker 0 is used) to preserve (mark) and 2190 // trace referent objects. 2191 // 2192 // Using the CMTask and embedded local queues avoids having the worker 2193 // threads operating on the global mark stack. This reduces the risk 2194 // of overflowing the stack - which we would rather avoid at this late 2195 // state. Also using the tasks' local queues removes the potential 2196 // of the workers interfering with each other that could occur if 2197 // operating on the global stack. 2198 2199 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2200 ConcurrentMark* _cm; 2201 CMTask* _task; 2202 int _ref_counter_limit; 2203 int _ref_counter; 2204 bool _is_serial; 2205 public: 2206 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2207 _cm(cm), _task(task), _is_serial(is_serial), 2208 _ref_counter_limit(G1RefProcDrainInterval) { 2209 assert(_ref_counter_limit > 0, "sanity"); 2210 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2211 _ref_counter = _ref_counter_limit; 2212 } 2213 2214 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2215 virtual void do_oop( oop* p) { do_oop_work(p); } 2216 2217 template <class T> void do_oop_work(T* p) { 2218 if (!_cm->has_overflown()) { 2219 oop obj = oopDesc::load_decode_heap_oop(p); 2220 if (_cm->verbose_high()) { 2221 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2222 "*"PTR_FORMAT" = "PTR_FORMAT, 2223 _task->worker_id(), p2i(p), p2i((void*) obj)); 2224 } 2225 2226 _task->deal_with_reference(obj); 2227 _ref_counter--; 2228 2229 if (_ref_counter == 0) { 2230 // We have dealt with _ref_counter_limit references, pushing them 2231 // and objects reachable from them on to the local stack (and 2232 // possibly the global stack). Call CMTask::do_marking_step() to 2233 // process these entries. 2234 // 2235 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2236 // there's nothing more to do (i.e. we're done with the entries that 2237 // were pushed as a result of the CMTask::deal_with_reference() calls 2238 // above) or we overflow. 2239 // 2240 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2241 // flag while there may still be some work to do. (See the comment at 2242 // the beginning of CMTask::do_marking_step() for those conditions - 2243 // one of which is reaching the specified time target.) It is only 2244 // when CMTask::do_marking_step() returns without setting the 2245 // has_aborted() flag that the marking step has completed. 2246 do { 2247 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2248 _task->do_marking_step(mark_step_duration_ms, 2249 false /* do_termination */, 2250 _is_serial); 2251 } while (_task->has_aborted() && !_cm->has_overflown()); 2252 _ref_counter = _ref_counter_limit; 2253 } 2254 } else { 2255 if (_cm->verbose_high()) { 2256 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2257 } 2258 } 2259 } 2260 }; 2261 2262 // 'Drain' oop closure used by both serial and parallel reference processing. 2263 // Uses the CMTask associated with a given worker thread (for serial 2264 // reference processing the CMtask for worker 0 is used). Calls the 2265 // do_marking_step routine, with an unbelievably large timeout value, 2266 // to drain the marking data structures of the remaining entries 2267 // added by the 'keep alive' oop closure above. 2268 2269 class G1CMDrainMarkingStackClosure: public VoidClosure { 2270 ConcurrentMark* _cm; 2271 CMTask* _task; 2272 bool _is_serial; 2273 public: 2274 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2275 _cm(cm), _task(task), _is_serial(is_serial) { 2276 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2277 } 2278 2279 void do_void() { 2280 do { 2281 if (_cm->verbose_high()) { 2282 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2283 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2284 } 2285 2286 // We call CMTask::do_marking_step() to completely drain the local 2287 // and global marking stacks of entries pushed by the 'keep alive' 2288 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2289 // 2290 // CMTask::do_marking_step() is called in a loop, which we'll exit 2291 // if there's nothing more to do (i.e. we've completely drained the 2292 // entries that were pushed as a a result of applying the 'keep alive' 2293 // closure to the entries on the discovered ref lists) or we overflow 2294 // the global marking stack. 2295 // 2296 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2297 // flag while there may still be some work to do. (See the comment at 2298 // the beginning of CMTask::do_marking_step() for those conditions - 2299 // one of which is reaching the specified time target.) It is only 2300 // when CMTask::do_marking_step() returns without setting the 2301 // has_aborted() flag that the marking step has completed. 2302 2303 _task->do_marking_step(1000000000.0 /* something very large */, 2304 true /* do_termination */, 2305 _is_serial); 2306 } while (_task->has_aborted() && !_cm->has_overflown()); 2307 } 2308 }; 2309 2310 // Implementation of AbstractRefProcTaskExecutor for parallel 2311 // reference processing at the end of G1 concurrent marking 2312 2313 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2314 private: 2315 G1CollectedHeap* _g1h; 2316 ConcurrentMark* _cm; 2317 WorkGang* _workers; 2318 int _active_workers; 2319 2320 public: 2321 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2322 ConcurrentMark* cm, 2323 WorkGang* workers, 2324 int n_workers) : 2325 _g1h(g1h), _cm(cm), 2326 _workers(workers), _active_workers(n_workers) { } 2327 2328 // Executes the given task using concurrent marking worker threads. 2329 virtual void execute(ProcessTask& task); 2330 virtual void execute(EnqueueTask& task); 2331 }; 2332 2333 class G1CMRefProcTaskProxy: public AbstractGangTask { 2334 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2335 ProcessTask& _proc_task; 2336 G1CollectedHeap* _g1h; 2337 ConcurrentMark* _cm; 2338 2339 public: 2340 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2341 G1CollectedHeap* g1h, 2342 ConcurrentMark* cm) : 2343 AbstractGangTask("Process reference objects in parallel"), 2344 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2345 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2346 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2347 } 2348 2349 virtual void work(uint worker_id) { 2350 ResourceMark rm; 2351 HandleMark hm; 2352 CMTask* task = _cm->task(worker_id); 2353 G1CMIsAliveClosure g1_is_alive(_g1h); 2354 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2355 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2356 2357 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2358 } 2359 }; 2360 2361 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2362 assert(_workers != NULL, "Need parallel worker threads."); 2363 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2364 2365 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2366 2367 // We need to reset the concurrency level before each 2368 // proxy task execution, so that the termination protocol 2369 // and overflow handling in CMTask::do_marking_step() knows 2370 // how many workers to wait for. 2371 _cm->set_concurrency(_active_workers); 2372 _g1h->set_par_threads(_active_workers); 2373 _workers->run_task(&proc_task_proxy); 2374 _g1h->set_par_threads(0); 2375 } 2376 2377 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2378 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2379 EnqueueTask& _enq_task; 2380 2381 public: 2382 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2383 AbstractGangTask("Enqueue reference objects in parallel"), 2384 _enq_task(enq_task) { } 2385 2386 virtual void work(uint worker_id) { 2387 _enq_task.work(worker_id); 2388 } 2389 }; 2390 2391 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2392 assert(_workers != NULL, "Need parallel worker threads."); 2393 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2394 2395 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2396 2397 // Not strictly necessary but... 2398 // 2399 // We need to reset the concurrency level before each 2400 // proxy task execution, so that the termination protocol 2401 // and overflow handling in CMTask::do_marking_step() knows 2402 // how many workers to wait for. 2403 _cm->set_concurrency(_active_workers); 2404 _g1h->set_par_threads(_active_workers); 2405 _workers->run_task(&enq_task_proxy); 2406 _g1h->set_par_threads(0); 2407 } 2408 2409 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2410 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2411 } 2412 2413 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2414 if (has_overflown()) { 2415 // Skip processing the discovered references if we have 2416 // overflown the global marking stack. Reference objects 2417 // only get discovered once so it is OK to not 2418 // de-populate the discovered reference lists. We could have, 2419 // but the only benefit would be that, when marking restarts, 2420 // less reference objects are discovered. 2421 return; 2422 } 2423 2424 ResourceMark rm; 2425 HandleMark hm; 2426 2427 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2428 2429 // Is alive closure. 2430 G1CMIsAliveClosure g1_is_alive(g1h); 2431 2432 // Inner scope to exclude the cleaning of the string and symbol 2433 // tables from the displayed time. 2434 { 2435 G1CMTraceTime t("GC ref-proc", G1Log::finer()); 2436 2437 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2438 2439 // See the comment in G1CollectedHeap::ref_processing_init() 2440 // about how reference processing currently works in G1. 2441 2442 // Set the soft reference policy 2443 rp->setup_policy(clear_all_soft_refs); 2444 assert(_markStack.isEmpty(), "mark stack should be empty"); 2445 2446 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2447 // in serial reference processing. Note these closures are also 2448 // used for serially processing (by the the current thread) the 2449 // JNI references during parallel reference processing. 2450 // 2451 // These closures do not need to synchronize with the worker 2452 // threads involved in parallel reference processing as these 2453 // instances are executed serially by the current thread (e.g. 2454 // reference processing is not multi-threaded and is thus 2455 // performed by the current thread instead of a gang worker). 2456 // 2457 // The gang tasks involved in parallel reference processing create 2458 // their own instances of these closures, which do their own 2459 // synchronization among themselves. 2460 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2461 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2462 2463 // We need at least one active thread. If reference processing 2464 // is not multi-threaded we use the current (VMThread) thread, 2465 // otherwise we use the work gang from the G1CollectedHeap and 2466 // we utilize all the worker threads we can. 2467 bool processing_is_mt = rp->processing_is_mt(); 2468 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2469 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2470 2471 // Parallel processing task executor. 2472 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2473 g1h->workers(), active_workers); 2474 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2475 2476 // Set the concurrency level. The phase was already set prior to 2477 // executing the remark task. 2478 set_concurrency(active_workers); 2479 2480 // Set the degree of MT processing here. If the discovery was done MT, 2481 // the number of threads involved during discovery could differ from 2482 // the number of active workers. This is OK as long as the discovered 2483 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2484 rp->set_active_mt_degree(active_workers); 2485 2486 // Process the weak references. 2487 const ReferenceProcessorStats& stats = 2488 rp->process_discovered_references(&g1_is_alive, 2489 &g1_keep_alive, 2490 &g1_drain_mark_stack, 2491 executor, 2492 g1h->gc_timer_cm(), 2493 concurrent_gc_id()); 2494 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2495 2496 // The do_oop work routines of the keep_alive and drain_marking_stack 2497 // oop closures will set the has_overflown flag if we overflow the 2498 // global marking stack. 2499 2500 assert(_markStack.overflow() || _markStack.isEmpty(), 2501 "mark stack should be empty (unless it overflowed)"); 2502 2503 if (_markStack.overflow()) { 2504 // This should have been done already when we tried to push an 2505 // entry on to the global mark stack. But let's do it again. 2506 set_has_overflown(); 2507 } 2508 2509 assert(rp->num_q() == active_workers, "why not"); 2510 2511 rp->enqueue_discovered_references(executor); 2512 2513 rp->verify_no_references_recorded(); 2514 assert(!rp->discovery_enabled(), "Post condition"); 2515 } 2516 2517 if (has_overflown()) { 2518 // We can not trust g1_is_alive if the marking stack overflowed 2519 return; 2520 } 2521 2522 assert(_markStack.isEmpty(), "Marking should have completed"); 2523 2524 // Unload Klasses, String, Symbols, Code Cache, etc. 2525 { 2526 G1CMTraceTime trace("Unloading", G1Log::finer()); 2527 2528 if (ClassUnloadingWithConcurrentMark) { 2529 bool purged_classes; 2530 2531 { 2532 G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest()); 2533 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2534 } 2535 2536 { 2537 G1CMTraceTime trace("Parallel Unloading", G1Log::finest()); 2538 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2539 } 2540 } 2541 2542 if (G1StringDedup::is_enabled()) { 2543 G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest()); 2544 G1StringDedup::unlink(&g1_is_alive); 2545 } 2546 } 2547 } 2548 2549 void ConcurrentMark::swapMarkBitMaps() { 2550 CMBitMapRO* temp = _prevMarkBitMap; 2551 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2552 _nextMarkBitMap = (CMBitMap*) temp; 2553 } 2554 2555 class CMObjectClosure; 2556 2557 // Closure for iterating over objects, currently only used for 2558 // processing SATB buffers. 2559 class CMObjectClosure : public ObjectClosure { 2560 private: 2561 CMTask* _task; 2562 2563 public: 2564 void do_object(oop obj) { 2565 _task->deal_with_reference(obj); 2566 } 2567 2568 CMObjectClosure(CMTask* task) : _task(task) { } 2569 }; 2570 2571 class G1RemarkThreadsClosure : public ThreadClosure { 2572 CMObjectClosure _cm_obj; 2573 G1CMOopClosure _cm_cl; 2574 MarkingCodeBlobClosure _code_cl; 2575 int _thread_parity; 2576 2577 public: 2578 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) : 2579 _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2580 _thread_parity(Threads::thread_claim_parity()) {} 2581 2582 void do_thread(Thread* thread) { 2583 if (thread->is_Java_thread()) { 2584 if (thread->claim_oops_do(true, _thread_parity)) { 2585 JavaThread* jt = (JavaThread*)thread; 2586 2587 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2588 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2589 // * Alive if on the stack of an executing method 2590 // * Weakly reachable otherwise 2591 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2592 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2593 jt->nmethods_do(&_code_cl); 2594 2595 jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj); 2596 } 2597 } else if (thread->is_VM_thread()) { 2598 if (thread->claim_oops_do(true, _thread_parity)) { 2599 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj); 2600 } 2601 } 2602 } 2603 }; 2604 2605 class CMRemarkTask: public AbstractGangTask { 2606 private: 2607 ConcurrentMark* _cm; 2608 public: 2609 void work(uint worker_id) { 2610 // Since all available tasks are actually started, we should 2611 // only proceed if we're supposed to be active. 2612 if (worker_id < _cm->active_tasks()) { 2613 CMTask* task = _cm->task(worker_id); 2614 task->record_start_time(); 2615 { 2616 ResourceMark rm; 2617 HandleMark hm; 2618 2619 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2620 Threads::threads_do(&threads_f); 2621 } 2622 2623 do { 2624 task->do_marking_step(1000000000.0 /* something very large */, 2625 true /* do_termination */, 2626 false /* is_serial */); 2627 } while (task->has_aborted() && !_cm->has_overflown()); 2628 // If we overflow, then we do not want to restart. We instead 2629 // want to abort remark and do concurrent marking again. 2630 task->record_end_time(); 2631 } 2632 } 2633 2634 CMRemarkTask(ConcurrentMark* cm, int active_workers) : 2635 AbstractGangTask("Par Remark"), _cm(cm) { 2636 _cm->terminator()->reset_for_reuse(active_workers); 2637 } 2638 }; 2639 2640 void ConcurrentMark::checkpointRootsFinalWork() { 2641 ResourceMark rm; 2642 HandleMark hm; 2643 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2644 2645 G1CMTraceTime trace("Finalize Marking", G1Log::finer()); 2646 2647 g1h->ensure_parsability(false); 2648 2649 StrongRootsScope srs; 2650 // this is remark, so we'll use up all active threads 2651 uint active_workers = g1h->workers()->active_workers(); 2652 if (active_workers == 0) { 2653 assert(active_workers > 0, "Should have been set earlier"); 2654 active_workers = (uint) ParallelGCThreads; 2655 g1h->workers()->set_active_workers(active_workers); 2656 } 2657 set_concurrency_and_phase(active_workers, false /* concurrent */); 2658 // Leave _parallel_marking_threads at it's 2659 // value originally calculated in the ConcurrentMark 2660 // constructor and pass values of the active workers 2661 // through the gang in the task. 2662 2663 CMRemarkTask remarkTask(this, active_workers); 2664 // We will start all available threads, even if we decide that the 2665 // active_workers will be fewer. The extra ones will just bail out 2666 // immediately. 2667 g1h->set_par_threads(active_workers); 2668 g1h->workers()->run_task(&remarkTask); 2669 g1h->set_par_threads(0); 2670 2671 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2672 guarantee(has_overflown() || 2673 satb_mq_set.completed_buffers_num() == 0, 2674 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2675 BOOL_TO_STR(has_overflown()), 2676 satb_mq_set.completed_buffers_num())); 2677 2678 print_stats(); 2679 } 2680 2681 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2682 // Note we are overriding the read-only view of the prev map here, via 2683 // the cast. 2684 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2685 } 2686 2687 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2688 _nextMarkBitMap->clearRange(mr); 2689 } 2690 2691 HeapRegion* 2692 ConcurrentMark::claim_region(uint worker_id) { 2693 // "checkpoint" the finger 2694 HeapWord* finger = _finger; 2695 2696 // _heap_end will not change underneath our feet; it only changes at 2697 // yield points. 2698 while (finger < _heap_end) { 2699 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2700 2701 // Note on how this code handles humongous regions. In the 2702 // normal case the finger will reach the start of a "starts 2703 // humongous" (SH) region. Its end will either be the end of the 2704 // last "continues humongous" (CH) region in the sequence, or the 2705 // standard end of the SH region (if the SH is the only region in 2706 // the sequence). That way claim_region() will skip over the CH 2707 // regions. However, there is a subtle race between a CM thread 2708 // executing this method and a mutator thread doing a humongous 2709 // object allocation. The two are not mutually exclusive as the CM 2710 // thread does not need to hold the Heap_lock when it gets 2711 // here. So there is a chance that claim_region() will come across 2712 // a free region that's in the progress of becoming a SH or a CH 2713 // region. In the former case, it will either 2714 // a) Miss the update to the region's end, in which case it will 2715 // visit every subsequent CH region, will find their bitmaps 2716 // empty, and do nothing, or 2717 // b) Will observe the update of the region's end (in which case 2718 // it will skip the subsequent CH regions). 2719 // If it comes across a region that suddenly becomes CH, the 2720 // scenario will be similar to b). So, the race between 2721 // claim_region() and a humongous object allocation might force us 2722 // to do a bit of unnecessary work (due to some unnecessary bitmap 2723 // iterations) but it should not introduce and correctness issues. 2724 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2725 2726 // Above heap_region_containing_raw may return NULL as we always scan claim 2727 // until the end of the heap. In this case, just jump to the next region. 2728 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2729 2730 // Is the gap between reading the finger and doing the CAS too long? 2731 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2732 if (res == finger && curr_region != NULL) { 2733 // we succeeded 2734 HeapWord* bottom = curr_region->bottom(); 2735 HeapWord* limit = curr_region->next_top_at_mark_start(); 2736 2737 if (verbose_low()) { 2738 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2739 "["PTR_FORMAT", "PTR_FORMAT"), " 2740 "limit = "PTR_FORMAT, 2741 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2742 } 2743 2744 // notice that _finger == end cannot be guaranteed here since, 2745 // someone else might have moved the finger even further 2746 assert(_finger >= end, "the finger should have moved forward"); 2747 2748 if (verbose_low()) { 2749 gclog_or_tty->print_cr("[%u] we were successful with region = " 2750 PTR_FORMAT, worker_id, p2i(curr_region)); 2751 } 2752 2753 if (limit > bottom) { 2754 if (verbose_low()) { 2755 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 2756 "returning it ", worker_id, p2i(curr_region)); 2757 } 2758 return curr_region; 2759 } else { 2760 assert(limit == bottom, 2761 "the region limit should be at bottom"); 2762 if (verbose_low()) { 2763 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 2764 "returning NULL", worker_id, p2i(curr_region)); 2765 } 2766 // we return NULL and the caller should try calling 2767 // claim_region() again. 2768 return NULL; 2769 } 2770 } else { 2771 assert(_finger > finger, "the finger should have moved forward"); 2772 if (verbose_low()) { 2773 if (curr_region == NULL) { 2774 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " 2775 "global finger = "PTR_FORMAT", " 2776 "our finger = "PTR_FORMAT, 2777 worker_id, p2i(_finger), p2i(finger)); 2778 } else { 2779 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2780 "global finger = "PTR_FORMAT", " 2781 "our finger = "PTR_FORMAT, 2782 worker_id, p2i(_finger), p2i(finger)); 2783 } 2784 } 2785 2786 // read it again 2787 finger = _finger; 2788 } 2789 } 2790 2791 return NULL; 2792 } 2793 2794 #ifndef PRODUCT 2795 enum VerifyNoCSetOopsPhase { 2796 VerifyNoCSetOopsStack, 2797 VerifyNoCSetOopsQueues, 2798 VerifyNoCSetOopsSATBCompleted, 2799 VerifyNoCSetOopsSATBThread 2800 }; 2801 2802 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 2803 private: 2804 G1CollectedHeap* _g1h; 2805 VerifyNoCSetOopsPhase _phase; 2806 int _info; 2807 2808 const char* phase_str() { 2809 switch (_phase) { 2810 case VerifyNoCSetOopsStack: return "Stack"; 2811 case VerifyNoCSetOopsQueues: return "Queue"; 2812 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers"; 2813 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers"; 2814 default: ShouldNotReachHere(); 2815 } 2816 return NULL; 2817 } 2818 2819 void do_object_work(oop obj) { 2820 guarantee(!_g1h->obj_in_cs(obj), 2821 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 2822 p2i((void*) obj), phase_str(), _info)); 2823 } 2824 2825 public: 2826 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 2827 2828 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 2829 _phase = phase; 2830 _info = info; 2831 } 2832 2833 virtual void do_oop(oop* p) { 2834 oop obj = oopDesc::load_decode_heap_oop(p); 2835 do_object_work(obj); 2836 } 2837 2838 virtual void do_oop(narrowOop* p) { 2839 // We should not come across narrow oops while scanning marking 2840 // stacks and SATB buffers. 2841 ShouldNotReachHere(); 2842 } 2843 2844 virtual void do_object(oop obj) { 2845 do_object_work(obj); 2846 } 2847 }; 2848 2849 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, 2850 bool verify_enqueued_buffers, 2851 bool verify_thread_buffers, 2852 bool verify_fingers) { 2853 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2854 if (!G1CollectedHeap::heap()->mark_in_progress()) { 2855 return; 2856 } 2857 2858 VerifyNoCSetOopsClosure cl; 2859 2860 if (verify_stacks) { 2861 // Verify entries on the global mark stack 2862 cl.set_phase(VerifyNoCSetOopsStack); 2863 _markStack.oops_do(&cl); 2864 2865 // Verify entries on the task queues 2866 for (uint i = 0; i < _max_worker_id; i += 1) { 2867 cl.set_phase(VerifyNoCSetOopsQueues, i); 2868 CMTaskQueue* queue = _task_queues->queue(i); 2869 queue->oops_do(&cl); 2870 } 2871 } 2872 2873 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 2874 2875 // Verify entries on the enqueued SATB buffers 2876 if (verify_enqueued_buffers) { 2877 cl.set_phase(VerifyNoCSetOopsSATBCompleted); 2878 satb_qs.iterate_completed_buffers_read_only(&cl); 2879 } 2880 2881 // Verify entries on the per-thread SATB buffers 2882 if (verify_thread_buffers) { 2883 cl.set_phase(VerifyNoCSetOopsSATBThread); 2884 satb_qs.iterate_thread_buffers_read_only(&cl); 2885 } 2886 2887 if (verify_fingers) { 2888 // Verify the global finger 2889 HeapWord* global_finger = finger(); 2890 if (global_finger != NULL && global_finger < _heap_end) { 2891 // The global finger always points to a heap region boundary. We 2892 // use heap_region_containing_raw() to get the containing region 2893 // given that the global finger could be pointing to a free region 2894 // which subsequently becomes continues humongous. If that 2895 // happens, heap_region_containing() will return the bottom of the 2896 // corresponding starts humongous region and the check below will 2897 // not hold any more. 2898 // Since we always iterate over all regions, we might get a NULL HeapRegion 2899 // here. 2900 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 2901 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2902 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 2903 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 2904 } 2905 2906 // Verify the task fingers 2907 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2908 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 2909 CMTask* task = _tasks[i]; 2910 HeapWord* task_finger = task->finger(); 2911 if (task_finger != NULL && task_finger < _heap_end) { 2912 // See above note on the global finger verification. 2913 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 2914 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2915 !task_hr->in_collection_set(), 2916 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 2917 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 2918 } 2919 } 2920 } 2921 } 2922 #endif // PRODUCT 2923 2924 // Aggregate the counting data that was constructed concurrently 2925 // with marking. 2926 class AggregateCountDataHRClosure: public HeapRegionClosure { 2927 G1CollectedHeap* _g1h; 2928 ConcurrentMark* _cm; 2929 CardTableModRefBS* _ct_bs; 2930 BitMap* _cm_card_bm; 2931 uint _max_worker_id; 2932 2933 public: 2934 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 2935 BitMap* cm_card_bm, 2936 uint max_worker_id) : 2937 _g1h(g1h), _cm(g1h->concurrent_mark()), 2938 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 2939 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 2940 2941 bool doHeapRegion(HeapRegion* hr) { 2942 if (hr->is_continues_humongous()) { 2943 // We will ignore these here and process them when their 2944 // associated "starts humongous" region is processed. 2945 // Note that we cannot rely on their associated 2946 // "starts humongous" region to have their bit set to 1 2947 // since, due to the region chunking in the parallel region 2948 // iteration, a "continues humongous" region might be visited 2949 // before its associated "starts humongous". 2950 return false; 2951 } 2952 2953 HeapWord* start = hr->bottom(); 2954 HeapWord* limit = hr->next_top_at_mark_start(); 2955 HeapWord* end = hr->end(); 2956 2957 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 2958 err_msg("Preconditions not met - " 2959 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 2960 "top: "PTR_FORMAT", end: "PTR_FORMAT, 2961 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 2962 2963 assert(hr->next_marked_bytes() == 0, "Precondition"); 2964 2965 if (start == limit) { 2966 // NTAMS of this region has not been set so nothing to do. 2967 return false; 2968 } 2969 2970 // 'start' should be in the heap. 2971 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 2972 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 2973 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 2974 2975 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 2976 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 2977 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 2978 2979 // If ntams is not card aligned then we bump card bitmap index 2980 // for limit so that we get the all the cards spanned by 2981 // the object ending at ntams. 2982 // Note: if this is the last region in the heap then ntams 2983 // could be actually just beyond the end of the the heap; 2984 // limit_idx will then correspond to a (non-existent) card 2985 // that is also outside the heap. 2986 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 2987 limit_idx += 1; 2988 } 2989 2990 assert(limit_idx <= end_idx, "or else use atomics"); 2991 2992 // Aggregate the "stripe" in the count data associated with hr. 2993 uint hrm_index = hr->hrm_index(); 2994 size_t marked_bytes = 0; 2995 2996 for (uint i = 0; i < _max_worker_id; i += 1) { 2997 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 2998 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 2999 3000 // Fetch the marked_bytes in this region for task i and 3001 // add it to the running total for this region. 3002 marked_bytes += marked_bytes_array[hrm_index]; 3003 3004 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 3005 // into the global card bitmap. 3006 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 3007 3008 while (scan_idx < limit_idx) { 3009 assert(task_card_bm->at(scan_idx) == true, "should be"); 3010 _cm_card_bm->set_bit(scan_idx); 3011 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 3012 3013 // BitMap::get_next_one_offset() can handle the case when 3014 // its left_offset parameter is greater than its right_offset 3015 // parameter. It does, however, have an early exit if 3016 // left_offset == right_offset. So let's limit the value 3017 // passed in for left offset here. 3018 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 3019 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 3020 } 3021 } 3022 3023 // Update the marked bytes for this region. 3024 hr->add_to_marked_bytes(marked_bytes); 3025 3026 // Next heap region 3027 return false; 3028 } 3029 }; 3030 3031 class G1AggregateCountDataTask: public AbstractGangTask { 3032 protected: 3033 G1CollectedHeap* _g1h; 3034 ConcurrentMark* _cm; 3035 BitMap* _cm_card_bm; 3036 uint _max_worker_id; 3037 int _active_workers; 3038 HeapRegionClaimer _hrclaimer; 3039 3040 public: 3041 G1AggregateCountDataTask(G1CollectedHeap* g1h, 3042 ConcurrentMark* cm, 3043 BitMap* cm_card_bm, 3044 uint max_worker_id, 3045 int n_workers) : 3046 AbstractGangTask("Count Aggregation"), 3047 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 3048 _max_worker_id(max_worker_id), 3049 _active_workers(n_workers), 3050 _hrclaimer(_active_workers) { 3051 } 3052 3053 void work(uint worker_id) { 3054 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 3055 3056 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 3057 } 3058 }; 3059 3060 3061 void ConcurrentMark::aggregate_count_data() { 3062 int n_workers = _g1h->workers()->active_workers(); 3063 3064 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 3065 _max_worker_id, n_workers); 3066 3067 _g1h->set_par_threads(n_workers); 3068 _g1h->workers()->run_task(&g1_par_agg_task); 3069 _g1h->set_par_threads(0); 3070 } 3071 3072 // Clear the per-worker arrays used to store the per-region counting data 3073 void ConcurrentMark::clear_all_count_data() { 3074 // Clear the global card bitmap - it will be filled during 3075 // liveness count aggregation (during remark) and the 3076 // final counting task. 3077 _card_bm.clear(); 3078 3079 // Clear the global region bitmap - it will be filled as part 3080 // of the final counting task. 3081 _region_bm.clear(); 3082 3083 uint max_regions = _g1h->max_regions(); 3084 assert(_max_worker_id > 0, "uninitialized"); 3085 3086 for (uint i = 0; i < _max_worker_id; i += 1) { 3087 BitMap* task_card_bm = count_card_bitmap_for(i); 3088 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3089 3090 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3091 assert(marked_bytes_array != NULL, "uninitialized"); 3092 3093 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3094 task_card_bm->clear(); 3095 } 3096 } 3097 3098 void ConcurrentMark::print_stats() { 3099 if (verbose_stats()) { 3100 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3101 for (size_t i = 0; i < _active_tasks; ++i) { 3102 _tasks[i]->print_stats(); 3103 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3104 } 3105 } 3106 } 3107 3108 // abandon current marking iteration due to a Full GC 3109 void ConcurrentMark::abort() { 3110 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 3111 // concurrent bitmap clearing. 3112 _nextMarkBitMap->clearAll(); 3113 3114 // Note we cannot clear the previous marking bitmap here 3115 // since VerifyDuringGC verifies the objects marked during 3116 // a full GC against the previous bitmap. 3117 3118 // Clear the liveness counting data 3119 clear_all_count_data(); 3120 // Empty mark stack 3121 reset_marking_state(); 3122 for (uint i = 0; i < _max_worker_id; ++i) { 3123 _tasks[i]->clear_region_fields(); 3124 } 3125 _first_overflow_barrier_sync.abort(); 3126 _second_overflow_barrier_sync.abort(); 3127 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); 3128 if (!gc_id.is_undefined()) { 3129 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance 3130 // to detect that it was aborted. Only keep track of the first GC id that we aborted. 3131 _aborted_gc_id = gc_id; 3132 } 3133 _has_aborted = true; 3134 3135 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3136 satb_mq_set.abandon_partial_marking(); 3137 // This can be called either during or outside marking, we'll read 3138 // the expected_active value from the SATB queue set. 3139 satb_mq_set.set_active_all_threads( 3140 false, /* new active value */ 3141 satb_mq_set.is_active() /* expected_active */); 3142 3143 _g1h->trace_heap_after_concurrent_cycle(); 3144 _g1h->register_concurrent_cycle_end(); 3145 } 3146 3147 const GCId& ConcurrentMark::concurrent_gc_id() { 3148 if (has_aborted()) { 3149 return _aborted_gc_id; 3150 } 3151 return _g1h->gc_tracer_cm()->gc_id(); 3152 } 3153 3154 static void print_ms_time_info(const char* prefix, const char* name, 3155 NumberSeq& ns) { 3156 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3157 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3158 if (ns.num() > 0) { 3159 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3160 prefix, ns.sd(), ns.maximum()); 3161 } 3162 } 3163 3164 void ConcurrentMark::print_summary_info() { 3165 gclog_or_tty->print_cr(" Concurrent marking:"); 3166 print_ms_time_info(" ", "init marks", _init_times); 3167 print_ms_time_info(" ", "remarks", _remark_times); 3168 { 3169 print_ms_time_info(" ", "final marks", _remark_mark_times); 3170 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3171 3172 } 3173 print_ms_time_info(" ", "cleanups", _cleanup_times); 3174 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3175 _total_counting_time, 3176 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3177 (double)_cleanup_times.num() 3178 : 0.0)); 3179 if (G1ScrubRemSets) { 3180 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3181 _total_rs_scrub_time, 3182 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3183 (double)_cleanup_times.num() 3184 : 0.0)); 3185 } 3186 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3187 (_init_times.sum() + _remark_times.sum() + 3188 _cleanup_times.sum())/1000.0); 3189 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3190 "(%8.2f s marking).", 3191 cmThread()->vtime_accum(), 3192 cmThread()->vtime_mark_accum()); 3193 } 3194 3195 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3196 _parallel_workers->print_worker_threads_on(st); 3197 } 3198 3199 void ConcurrentMark::print_on_error(outputStream* st) const { 3200 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3201 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3202 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3203 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3204 } 3205 3206 // We take a break if someone is trying to stop the world. 3207 bool ConcurrentMark::do_yield_check(uint worker_id) { 3208 if (SuspendibleThreadSet::should_yield()) { 3209 if (worker_id == 0) { 3210 _g1h->g1_policy()->record_concurrent_pause(); 3211 } 3212 SuspendibleThreadSet::yield(); 3213 return true; 3214 } else { 3215 return false; 3216 } 3217 } 3218 3219 #ifndef PRODUCT 3220 // for debugging purposes 3221 void ConcurrentMark::print_finger() { 3222 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3223 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3224 for (uint i = 0; i < _max_worker_id; ++i) { 3225 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3226 } 3227 gclog_or_tty->cr(); 3228 } 3229 #endif 3230 3231 template<bool scan> 3232 inline void CMTask::process_grey_object(oop obj) { 3233 assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray"); 3234 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3235 3236 if (_cm->verbose_high()) { 3237 gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT, 3238 _worker_id, p2i((void*) obj)); 3239 } 3240 3241 size_t obj_size = obj->size(); 3242 _words_scanned += obj_size; 3243 3244 if (scan) { 3245 obj->oop_iterate(_cm_oop_closure); 3246 } 3247 statsOnly( ++_objs_scanned ); 3248 check_limits(); 3249 } 3250 3251 template void CMTask::process_grey_object<true>(oop); 3252 template void CMTask::process_grey_object<false>(oop); 3253 3254 // Closure for iteration over bitmaps 3255 class CMBitMapClosure : public BitMapClosure { 3256 private: 3257 // the bitmap that is being iterated over 3258 CMBitMap* _nextMarkBitMap; 3259 ConcurrentMark* _cm; 3260 CMTask* _task; 3261 3262 public: 3263 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3264 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3265 3266 bool do_bit(size_t offset) { 3267 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3268 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3269 assert( addr < _cm->finger(), "invariant"); 3270 3271 statsOnly( _task->increase_objs_found_on_bitmap() ); 3272 assert(addr >= _task->finger(), "invariant"); 3273 3274 // We move that task's local finger along. 3275 _task->move_finger_to(addr); 3276 3277 _task->scan_object(oop(addr)); 3278 // we only partially drain the local queue and global stack 3279 _task->drain_local_queue(true); 3280 _task->drain_global_stack(true); 3281 3282 // if the has_aborted flag has been raised, we need to bail out of 3283 // the iteration 3284 return !_task->has_aborted(); 3285 } 3286 }; 3287 3288 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3289 ConcurrentMark* cm, 3290 CMTask* task) 3291 : _g1h(g1h), _cm(cm), _task(task) { 3292 assert(_ref_processor == NULL, "should be initialized to NULL"); 3293 3294 if (G1UseConcMarkReferenceProcessing) { 3295 _ref_processor = g1h->ref_processor_cm(); 3296 assert(_ref_processor != NULL, "should not be NULL"); 3297 } 3298 } 3299 3300 void CMTask::setup_for_region(HeapRegion* hr) { 3301 assert(hr != NULL, 3302 "claim_region() should have filtered out NULL regions"); 3303 assert(!hr->is_continues_humongous(), 3304 "claim_region() should have filtered out continues humongous regions"); 3305 3306 if (_cm->verbose_low()) { 3307 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3308 _worker_id, p2i(hr)); 3309 } 3310 3311 _curr_region = hr; 3312 _finger = hr->bottom(); 3313 update_region_limit(); 3314 } 3315 3316 void CMTask::update_region_limit() { 3317 HeapRegion* hr = _curr_region; 3318 HeapWord* bottom = hr->bottom(); 3319 HeapWord* limit = hr->next_top_at_mark_start(); 3320 3321 if (limit == bottom) { 3322 if (_cm->verbose_low()) { 3323 gclog_or_tty->print_cr("[%u] found an empty region " 3324 "["PTR_FORMAT", "PTR_FORMAT")", 3325 _worker_id, p2i(bottom), p2i(limit)); 3326 } 3327 // The region was collected underneath our feet. 3328 // We set the finger to bottom to ensure that the bitmap 3329 // iteration that will follow this will not do anything. 3330 // (this is not a condition that holds when we set the region up, 3331 // as the region is not supposed to be empty in the first place) 3332 _finger = bottom; 3333 } else if (limit >= _region_limit) { 3334 assert(limit >= _finger, "peace of mind"); 3335 } else { 3336 assert(limit < _region_limit, "only way to get here"); 3337 // This can happen under some pretty unusual circumstances. An 3338 // evacuation pause empties the region underneath our feet (NTAMS 3339 // at bottom). We then do some allocation in the region (NTAMS 3340 // stays at bottom), followed by the region being used as a GC 3341 // alloc region (NTAMS will move to top() and the objects 3342 // originally below it will be grayed). All objects now marked in 3343 // the region are explicitly grayed, if below the global finger, 3344 // and we do not need in fact to scan anything else. So, we simply 3345 // set _finger to be limit to ensure that the bitmap iteration 3346 // doesn't do anything. 3347 _finger = limit; 3348 } 3349 3350 _region_limit = limit; 3351 } 3352 3353 void CMTask::giveup_current_region() { 3354 assert(_curr_region != NULL, "invariant"); 3355 if (_cm->verbose_low()) { 3356 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3357 _worker_id, p2i(_curr_region)); 3358 } 3359 clear_region_fields(); 3360 } 3361 3362 void CMTask::clear_region_fields() { 3363 // Values for these three fields that indicate that we're not 3364 // holding on to a region. 3365 _curr_region = NULL; 3366 _finger = NULL; 3367 _region_limit = NULL; 3368 } 3369 3370 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3371 if (cm_oop_closure == NULL) { 3372 assert(_cm_oop_closure != NULL, "invariant"); 3373 } else { 3374 assert(_cm_oop_closure == NULL, "invariant"); 3375 } 3376 _cm_oop_closure = cm_oop_closure; 3377 } 3378 3379 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3380 guarantee(nextMarkBitMap != NULL, "invariant"); 3381 3382 if (_cm->verbose_low()) { 3383 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3384 } 3385 3386 _nextMarkBitMap = nextMarkBitMap; 3387 clear_region_fields(); 3388 3389 _calls = 0; 3390 _elapsed_time_ms = 0.0; 3391 _termination_time_ms = 0.0; 3392 _termination_start_time_ms = 0.0; 3393 3394 #if _MARKING_STATS_ 3395 _aborted = 0; 3396 _aborted_overflow = 0; 3397 _aborted_cm_aborted = 0; 3398 _aborted_yield = 0; 3399 _aborted_timed_out = 0; 3400 _aborted_satb = 0; 3401 _aborted_termination = 0; 3402 _steal_attempts = 0; 3403 _steals = 0; 3404 _local_pushes = 0; 3405 _local_pops = 0; 3406 _local_max_size = 0; 3407 _objs_scanned = 0; 3408 _global_pushes = 0; 3409 _global_pops = 0; 3410 _global_max_size = 0; 3411 _global_transfers_to = 0; 3412 _global_transfers_from = 0; 3413 _regions_claimed = 0; 3414 _objs_found_on_bitmap = 0; 3415 _satb_buffers_processed = 0; 3416 #endif // _MARKING_STATS_ 3417 } 3418 3419 bool CMTask::should_exit_termination() { 3420 regular_clock_call(); 3421 // This is called when we are in the termination protocol. We should 3422 // quit if, for some reason, this task wants to abort or the global 3423 // stack is not empty (this means that we can get work from it). 3424 return !_cm->mark_stack_empty() || has_aborted(); 3425 } 3426 3427 void CMTask::reached_limit() { 3428 assert(_words_scanned >= _words_scanned_limit || 3429 _refs_reached >= _refs_reached_limit , 3430 "shouldn't have been called otherwise"); 3431 regular_clock_call(); 3432 } 3433 3434 void CMTask::regular_clock_call() { 3435 if (has_aborted()) return; 3436 3437 // First, we need to recalculate the words scanned and refs reached 3438 // limits for the next clock call. 3439 recalculate_limits(); 3440 3441 // During the regular clock call we do the following 3442 3443 // (1) If an overflow has been flagged, then we abort. 3444 if (_cm->has_overflown()) { 3445 set_has_aborted(); 3446 return; 3447 } 3448 3449 // If we are not concurrent (i.e. we're doing remark) we don't need 3450 // to check anything else. The other steps are only needed during 3451 // the concurrent marking phase. 3452 if (!concurrent()) return; 3453 3454 // (2) If marking has been aborted for Full GC, then we also abort. 3455 if (_cm->has_aborted()) { 3456 set_has_aborted(); 3457 statsOnly( ++_aborted_cm_aborted ); 3458 return; 3459 } 3460 3461 double curr_time_ms = os::elapsedVTime() * 1000.0; 3462 3463 // (3) If marking stats are enabled, then we update the step history. 3464 #if _MARKING_STATS_ 3465 if (_words_scanned >= _words_scanned_limit) { 3466 ++_clock_due_to_scanning; 3467 } 3468 if (_refs_reached >= _refs_reached_limit) { 3469 ++_clock_due_to_marking; 3470 } 3471 3472 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3473 _interval_start_time_ms = curr_time_ms; 3474 _all_clock_intervals_ms.add(last_interval_ms); 3475 3476 if (_cm->verbose_medium()) { 3477 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3478 "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s", 3479 _worker_id, last_interval_ms, 3480 _words_scanned, 3481 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3482 _refs_reached, 3483 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3484 } 3485 #endif // _MARKING_STATS_ 3486 3487 // (4) We check whether we should yield. If we have to, then we abort. 3488 if (SuspendibleThreadSet::should_yield()) { 3489 // We should yield. To do this we abort the task. The caller is 3490 // responsible for yielding. 3491 set_has_aborted(); 3492 statsOnly( ++_aborted_yield ); 3493 return; 3494 } 3495 3496 // (5) We check whether we've reached our time quota. If we have, 3497 // then we abort. 3498 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3499 if (elapsed_time_ms > _time_target_ms) { 3500 set_has_aborted(); 3501 _has_timed_out = true; 3502 statsOnly( ++_aborted_timed_out ); 3503 return; 3504 } 3505 3506 // (6) Finally, we check whether there are enough completed STAB 3507 // buffers available for processing. If there are, we abort. 3508 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3509 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3510 if (_cm->verbose_low()) { 3511 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3512 _worker_id); 3513 } 3514 // we do need to process SATB buffers, we'll abort and restart 3515 // the marking task to do so 3516 set_has_aborted(); 3517 statsOnly( ++_aborted_satb ); 3518 return; 3519 } 3520 } 3521 3522 void CMTask::recalculate_limits() { 3523 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3524 _words_scanned_limit = _real_words_scanned_limit; 3525 3526 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3527 _refs_reached_limit = _real_refs_reached_limit; 3528 } 3529 3530 void CMTask::decrease_limits() { 3531 // This is called when we believe that we're going to do an infrequent 3532 // operation which will increase the per byte scanned cost (i.e. move 3533 // entries to/from the global stack). It basically tries to decrease the 3534 // scanning limit so that the clock is called earlier. 3535 3536 if (_cm->verbose_medium()) { 3537 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3538 } 3539 3540 _words_scanned_limit = _real_words_scanned_limit - 3541 3 * words_scanned_period / 4; 3542 _refs_reached_limit = _real_refs_reached_limit - 3543 3 * refs_reached_period / 4; 3544 } 3545 3546 void CMTask::move_entries_to_global_stack() { 3547 // local array where we'll store the entries that will be popped 3548 // from the local queue 3549 oop buffer[global_stack_transfer_size]; 3550 3551 int n = 0; 3552 oop obj; 3553 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3554 buffer[n] = obj; 3555 ++n; 3556 } 3557 3558 if (n > 0) { 3559 // we popped at least one entry from the local queue 3560 3561 statsOnly( ++_global_transfers_to; _local_pops += n ); 3562 3563 if (!_cm->mark_stack_push(buffer, n)) { 3564 if (_cm->verbose_low()) { 3565 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3566 _worker_id); 3567 } 3568 set_has_aborted(); 3569 } else { 3570 // the transfer was successful 3571 3572 if (_cm->verbose_medium()) { 3573 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3574 _worker_id, n); 3575 } 3576 statsOnly( size_t tmp_size = _cm->mark_stack_size(); 3577 if (tmp_size > _global_max_size) { 3578 _global_max_size = tmp_size; 3579 } 3580 _global_pushes += n ); 3581 } 3582 } 3583 3584 // this operation was quite expensive, so decrease the limits 3585 decrease_limits(); 3586 } 3587 3588 void CMTask::get_entries_from_global_stack() { 3589 // local array where we'll store the entries that will be popped 3590 // from the global stack. 3591 oop buffer[global_stack_transfer_size]; 3592 int n; 3593 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3594 assert(n <= global_stack_transfer_size, 3595 "we should not pop more than the given limit"); 3596 if (n > 0) { 3597 // yes, we did actually pop at least one entry 3598 3599 statsOnly( ++_global_transfers_from; _global_pops += n ); 3600 if (_cm->verbose_medium()) { 3601 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3602 _worker_id, n); 3603 } 3604 for (int i = 0; i < n; ++i) { 3605 bool success = _task_queue->push(buffer[i]); 3606 // We only call this when the local queue is empty or under a 3607 // given target limit. So, we do not expect this push to fail. 3608 assert(success, "invariant"); 3609 } 3610 3611 statsOnly( size_t tmp_size = (size_t)_task_queue->size(); 3612 if (tmp_size > _local_max_size) { 3613 _local_max_size = tmp_size; 3614 } 3615 _local_pushes += n ); 3616 } 3617 3618 // this operation was quite expensive, so decrease the limits 3619 decrease_limits(); 3620 } 3621 3622 void CMTask::drain_local_queue(bool partially) { 3623 if (has_aborted()) return; 3624 3625 // Decide what the target size is, depending whether we're going to 3626 // drain it partially (so that other tasks can steal if they run out 3627 // of things to do) or totally (at the very end). 3628 size_t target_size; 3629 if (partially) { 3630 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3631 } else { 3632 target_size = 0; 3633 } 3634 3635 if (_task_queue->size() > target_size) { 3636 if (_cm->verbose_high()) { 3637 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3638 _worker_id, target_size); 3639 } 3640 3641 oop obj; 3642 bool ret = _task_queue->pop_local(obj); 3643 while (ret) { 3644 statsOnly( ++_local_pops ); 3645 3646 if (_cm->verbose_high()) { 3647 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3648 p2i((void*) obj)); 3649 } 3650 3651 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3652 assert(!_g1h->is_on_master_free_list( 3653 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3654 3655 scan_object(obj); 3656 3657 if (_task_queue->size() <= target_size || has_aborted()) { 3658 ret = false; 3659 } else { 3660 ret = _task_queue->pop_local(obj); 3661 } 3662 } 3663 3664 if (_cm->verbose_high()) { 3665 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3666 _worker_id, _task_queue->size()); 3667 } 3668 } 3669 } 3670 3671 void CMTask::drain_global_stack(bool partially) { 3672 if (has_aborted()) return; 3673 3674 // We have a policy to drain the local queue before we attempt to 3675 // drain the global stack. 3676 assert(partially || _task_queue->size() == 0, "invariant"); 3677 3678 // Decide what the target size is, depending whether we're going to 3679 // drain it partially (so that other tasks can steal if they run out 3680 // of things to do) or totally (at the very end). Notice that, 3681 // because we move entries from the global stack in chunks or 3682 // because another task might be doing the same, we might in fact 3683 // drop below the target. But, this is not a problem. 3684 size_t target_size; 3685 if (partially) { 3686 target_size = _cm->partial_mark_stack_size_target(); 3687 } else { 3688 target_size = 0; 3689 } 3690 3691 if (_cm->mark_stack_size() > target_size) { 3692 if (_cm->verbose_low()) { 3693 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3694 _worker_id, target_size); 3695 } 3696 3697 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3698 get_entries_from_global_stack(); 3699 drain_local_queue(partially); 3700 } 3701 3702 if (_cm->verbose_low()) { 3703 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3704 _worker_id, _cm->mark_stack_size()); 3705 } 3706 } 3707 } 3708 3709 // SATB Queue has several assumptions on whether to call the par or 3710 // non-par versions of the methods. this is why some of the code is 3711 // replicated. We should really get rid of the single-threaded version 3712 // of the code to simplify things. 3713 void CMTask::drain_satb_buffers() { 3714 if (has_aborted()) return; 3715 3716 // We set this so that the regular clock knows that we're in the 3717 // middle of draining buffers and doesn't set the abort flag when it 3718 // notices that SATB buffers are available for draining. It'd be 3719 // very counter productive if it did that. :-) 3720 _draining_satb_buffers = true; 3721 3722 CMObjectClosure oc(this); 3723 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3724 3725 // This keeps claiming and applying the closure to completed buffers 3726 // until we run out of buffers or we need to abort. 3727 while (!has_aborted() && 3728 satb_mq_set.apply_closure_to_completed_buffer(&oc)) { 3729 if (_cm->verbose_medium()) { 3730 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3731 } 3732 statsOnly( ++_satb_buffers_processed ); 3733 regular_clock_call(); 3734 } 3735 3736 _draining_satb_buffers = false; 3737 3738 assert(has_aborted() || 3739 concurrent() || 3740 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3741 3742 // again, this was a potentially expensive operation, decrease the 3743 // limits to get the regular clock call early 3744 decrease_limits(); 3745 } 3746 3747 void CMTask::print_stats() { 3748 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3749 _worker_id, _calls); 3750 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3751 _elapsed_time_ms, _termination_time_ms); 3752 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3753 _step_times_ms.num(), _step_times_ms.avg(), 3754 _step_times_ms.sd()); 3755 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3756 _step_times_ms.maximum(), _step_times_ms.sum()); 3757 3758 #if _MARKING_STATS_ 3759 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3760 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3761 _all_clock_intervals_ms.sd()); 3762 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3763 _all_clock_intervals_ms.maximum(), 3764 _all_clock_intervals_ms.sum()); 3765 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = " SIZE_FORMAT ", marking = " SIZE_FORMAT, 3766 _clock_due_to_scanning, _clock_due_to_marking); 3767 gclog_or_tty->print_cr(" Objects: scanned = " SIZE_FORMAT ", found on the bitmap = " SIZE_FORMAT, 3768 _objs_scanned, _objs_found_on_bitmap); 3769 gclog_or_tty->print_cr(" Local Queue: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3770 _local_pushes, _local_pops, _local_max_size); 3771 gclog_or_tty->print_cr(" Global Stack: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3772 _global_pushes, _global_pops, _global_max_size); 3773 gclog_or_tty->print_cr(" transfers to = " SIZE_FORMAT ", transfers from = " SIZE_FORMAT, 3774 _global_transfers_to,_global_transfers_from); 3775 gclog_or_tty->print_cr(" Regions: claimed = " SIZE_FORMAT, _regions_claimed); 3776 gclog_or_tty->print_cr(" SATB buffers: processed = " SIZE_FORMAT, _satb_buffers_processed); 3777 gclog_or_tty->print_cr(" Steals: attempts = " SIZE_FORMAT ", successes = " SIZE_FORMAT, 3778 _steal_attempts, _steals); 3779 gclog_or_tty->print_cr(" Aborted: " SIZE_FORMAT ", due to", _aborted); 3780 gclog_or_tty->print_cr(" overflow: " SIZE_FORMAT ", global abort: " SIZE_FORMAT ", yield: " SIZE_FORMAT, 3781 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3782 gclog_or_tty->print_cr(" time out: " SIZE_FORMAT ", SATB: " SIZE_FORMAT ", termination: " SIZE_FORMAT, 3783 _aborted_timed_out, _aborted_satb, _aborted_termination); 3784 #endif // _MARKING_STATS_ 3785 } 3786 3787 /***************************************************************************** 3788 3789 The do_marking_step(time_target_ms, ...) method is the building 3790 block of the parallel marking framework. It can be called in parallel 3791 with other invocations of do_marking_step() on different tasks 3792 (but only one per task, obviously) and concurrently with the 3793 mutator threads, or during remark, hence it eliminates the need 3794 for two versions of the code. When called during remark, it will 3795 pick up from where the task left off during the concurrent marking 3796 phase. Interestingly, tasks are also claimable during evacuation 3797 pauses too, since do_marking_step() ensures that it aborts before 3798 it needs to yield. 3799 3800 The data structures that it uses to do marking work are the 3801 following: 3802 3803 (1) Marking Bitmap. If there are gray objects that appear only 3804 on the bitmap (this happens either when dealing with an overflow 3805 or when the initial marking phase has simply marked the roots 3806 and didn't push them on the stack), then tasks claim heap 3807 regions whose bitmap they then scan to find gray objects. A 3808 global finger indicates where the end of the last claimed region 3809 is. A local finger indicates how far into the region a task has 3810 scanned. The two fingers are used to determine how to gray an 3811 object (i.e. whether simply marking it is OK, as it will be 3812 visited by a task in the future, or whether it needs to be also 3813 pushed on a stack). 3814 3815 (2) Local Queue. The local queue of the task which is accessed 3816 reasonably efficiently by the task. Other tasks can steal from 3817 it when they run out of work. Throughout the marking phase, a 3818 task attempts to keep its local queue short but not totally 3819 empty, so that entries are available for stealing by other 3820 tasks. Only when there is no more work, a task will totally 3821 drain its local queue. 3822 3823 (3) Global Mark Stack. This handles local queue overflow. During 3824 marking only sets of entries are moved between it and the local 3825 queues, as access to it requires a mutex and more fine-grain 3826 interaction with it which might cause contention. If it 3827 overflows, then the marking phase should restart and iterate 3828 over the bitmap to identify gray objects. Throughout the marking 3829 phase, tasks attempt to keep the global mark stack at a small 3830 length but not totally empty, so that entries are available for 3831 popping by other tasks. Only when there is no more work, tasks 3832 will totally drain the global mark stack. 3833 3834 (4) SATB Buffer Queue. This is where completed SATB buffers are 3835 made available. Buffers are regularly removed from this queue 3836 and scanned for roots, so that the queue doesn't get too 3837 long. During remark, all completed buffers are processed, as 3838 well as the filled in parts of any uncompleted buffers. 3839 3840 The do_marking_step() method tries to abort when the time target 3841 has been reached. There are a few other cases when the 3842 do_marking_step() method also aborts: 3843 3844 (1) When the marking phase has been aborted (after a Full GC). 3845 3846 (2) When a global overflow (on the global stack) has been 3847 triggered. Before the task aborts, it will actually sync up with 3848 the other tasks to ensure that all the marking data structures 3849 (local queues, stacks, fingers etc.) are re-initialized so that 3850 when do_marking_step() completes, the marking phase can 3851 immediately restart. 3852 3853 (3) When enough completed SATB buffers are available. The 3854 do_marking_step() method only tries to drain SATB buffers right 3855 at the beginning. So, if enough buffers are available, the 3856 marking step aborts and the SATB buffers are processed at 3857 the beginning of the next invocation. 3858 3859 (4) To yield. when we have to yield then we abort and yield 3860 right at the end of do_marking_step(). This saves us from a lot 3861 of hassle as, by yielding we might allow a Full GC. If this 3862 happens then objects will be compacted underneath our feet, the 3863 heap might shrink, etc. We save checking for this by just 3864 aborting and doing the yield right at the end. 3865 3866 From the above it follows that the do_marking_step() method should 3867 be called in a loop (or, otherwise, regularly) until it completes. 3868 3869 If a marking step completes without its has_aborted() flag being 3870 true, it means it has completed the current marking phase (and 3871 also all other marking tasks have done so and have all synced up). 3872 3873 A method called regular_clock_call() is invoked "regularly" (in 3874 sub ms intervals) throughout marking. It is this clock method that 3875 checks all the abort conditions which were mentioned above and 3876 decides when the task should abort. A work-based scheme is used to 3877 trigger this clock method: when the number of object words the 3878 marking phase has scanned or the number of references the marking 3879 phase has visited reach a given limit. Additional invocations to 3880 the method clock have been planted in a few other strategic places 3881 too. The initial reason for the clock method was to avoid calling 3882 vtime too regularly, as it is quite expensive. So, once it was in 3883 place, it was natural to piggy-back all the other conditions on it 3884 too and not constantly check them throughout the code. 3885 3886 If do_termination is true then do_marking_step will enter its 3887 termination protocol. 3888 3889 The value of is_serial must be true when do_marking_step is being 3890 called serially (i.e. by the VMThread) and do_marking_step should 3891 skip any synchronization in the termination and overflow code. 3892 Examples include the serial remark code and the serial reference 3893 processing closures. 3894 3895 The value of is_serial must be false when do_marking_step is 3896 being called by any of the worker threads in a work gang. 3897 Examples include the concurrent marking code (CMMarkingTask), 3898 the MT remark code, and the MT reference processing closures. 3899 3900 *****************************************************************************/ 3901 3902 void CMTask::do_marking_step(double time_target_ms, 3903 bool do_termination, 3904 bool is_serial) { 3905 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3906 assert(concurrent() == _cm->concurrent(), "they should be the same"); 3907 3908 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3909 assert(_task_queues != NULL, "invariant"); 3910 assert(_task_queue != NULL, "invariant"); 3911 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 3912 3913 assert(!_claimed, 3914 "only one thread should claim this task at any one time"); 3915 3916 // OK, this doesn't safeguard again all possible scenarios, as it is 3917 // possible for two threads to set the _claimed flag at the same 3918 // time. But it is only for debugging purposes anyway and it will 3919 // catch most problems. 3920 _claimed = true; 3921 3922 _start_time_ms = os::elapsedVTime() * 1000.0; 3923 statsOnly( _interval_start_time_ms = _start_time_ms ); 3924 3925 // If do_stealing is true then do_marking_step will attempt to 3926 // steal work from the other CMTasks. It only makes sense to 3927 // enable stealing when the termination protocol is enabled 3928 // and do_marking_step() is not being called serially. 3929 bool do_stealing = do_termination && !is_serial; 3930 3931 double diff_prediction_ms = 3932 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 3933 _time_target_ms = time_target_ms - diff_prediction_ms; 3934 3935 // set up the variables that are used in the work-based scheme to 3936 // call the regular clock method 3937 _words_scanned = 0; 3938 _refs_reached = 0; 3939 recalculate_limits(); 3940 3941 // clear all flags 3942 clear_has_aborted(); 3943 _has_timed_out = false; 3944 _draining_satb_buffers = false; 3945 3946 ++_calls; 3947 3948 if (_cm->verbose_low()) { 3949 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 3950 "target = %1.2lfms >>>>>>>>>>", 3951 _worker_id, _calls, _time_target_ms); 3952 } 3953 3954 // Set up the bitmap and oop closures. Anything that uses them is 3955 // eventually called from this method, so it is OK to allocate these 3956 // statically. 3957 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 3958 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 3959 set_cm_oop_closure(&cm_oop_closure); 3960 3961 if (_cm->has_overflown()) { 3962 // This can happen if the mark stack overflows during a GC pause 3963 // and this task, after a yield point, restarts. We have to abort 3964 // as we need to get into the overflow protocol which happens 3965 // right at the end of this task. 3966 set_has_aborted(); 3967 } 3968 3969 // First drain any available SATB buffers. After this, we will not 3970 // look at SATB buffers before the next invocation of this method. 3971 // If enough completed SATB buffers are queued up, the regular clock 3972 // will abort this task so that it restarts. 3973 drain_satb_buffers(); 3974 // ...then partially drain the local queue and the global stack 3975 drain_local_queue(true); 3976 drain_global_stack(true); 3977 3978 do { 3979 if (!has_aborted() && _curr_region != NULL) { 3980 // This means that we're already holding on to a region. 3981 assert(_finger != NULL, "if region is not NULL, then the finger " 3982 "should not be NULL either"); 3983 3984 // We might have restarted this task after an evacuation pause 3985 // which might have evacuated the region we're holding on to 3986 // underneath our feet. Let's read its limit again to make sure 3987 // that we do not iterate over a region of the heap that 3988 // contains garbage (update_region_limit() will also move 3989 // _finger to the start of the region if it is found empty). 3990 update_region_limit(); 3991 // We will start from _finger not from the start of the region, 3992 // as we might be restarting this task after aborting half-way 3993 // through scanning this region. In this case, _finger points to 3994 // the address where we last found a marked object. If this is a 3995 // fresh region, _finger points to start(). 3996 MemRegion mr = MemRegion(_finger, _region_limit); 3997 3998 if (_cm->verbose_low()) { 3999 gclog_or_tty->print_cr("[%u] we're scanning part " 4000 "["PTR_FORMAT", "PTR_FORMAT") " 4001 "of region "HR_FORMAT, 4002 _worker_id, p2i(_finger), p2i(_region_limit), 4003 HR_FORMAT_PARAMS(_curr_region)); 4004 } 4005 4006 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 4007 "humongous regions should go around loop once only"); 4008 4009 // Some special cases: 4010 // If the memory region is empty, we can just give up the region. 4011 // If the current region is humongous then we only need to check 4012 // the bitmap for the bit associated with the start of the object, 4013 // scan the object if it's live, and give up the region. 4014 // Otherwise, let's iterate over the bitmap of the part of the region 4015 // that is left. 4016 // If the iteration is successful, give up the region. 4017 if (mr.is_empty()) { 4018 giveup_current_region(); 4019 regular_clock_call(); 4020 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 4021 if (_nextMarkBitMap->isMarked(mr.start())) { 4022 // The object is marked - apply the closure 4023 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 4024 bitmap_closure.do_bit(offset); 4025 } 4026 // Even if this task aborted while scanning the humongous object 4027 // we can (and should) give up the current region. 4028 giveup_current_region(); 4029 regular_clock_call(); 4030 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4031 giveup_current_region(); 4032 regular_clock_call(); 4033 } else { 4034 assert(has_aborted(), "currently the only way to do so"); 4035 // The only way to abort the bitmap iteration is to return 4036 // false from the do_bit() method. However, inside the 4037 // do_bit() method we move the _finger to point to the 4038 // object currently being looked at. So, if we bail out, we 4039 // have definitely set _finger to something non-null. 4040 assert(_finger != NULL, "invariant"); 4041 4042 // Region iteration was actually aborted. So now _finger 4043 // points to the address of the object we last scanned. If we 4044 // leave it there, when we restart this task, we will rescan 4045 // the object. It is easy to avoid this. We move the finger by 4046 // enough to point to the next possible object header (the 4047 // bitmap knows by how much we need to move it as it knows its 4048 // granularity). 4049 assert(_finger < _region_limit, "invariant"); 4050 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 4051 // Check if bitmap iteration was aborted while scanning the last object 4052 if (new_finger >= _region_limit) { 4053 giveup_current_region(); 4054 } else { 4055 move_finger_to(new_finger); 4056 } 4057 } 4058 } 4059 // At this point we have either completed iterating over the 4060 // region we were holding on to, or we have aborted. 4061 4062 // We then partially drain the local queue and the global stack. 4063 // (Do we really need this?) 4064 drain_local_queue(true); 4065 drain_global_stack(true); 4066 4067 // Read the note on the claim_region() method on why it might 4068 // return NULL with potentially more regions available for 4069 // claiming and why we have to check out_of_regions() to determine 4070 // whether we're done or not. 4071 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4072 // We are going to try to claim a new region. We should have 4073 // given up on the previous one. 4074 // Separated the asserts so that we know which one fires. 4075 assert(_curr_region == NULL, "invariant"); 4076 assert(_finger == NULL, "invariant"); 4077 assert(_region_limit == NULL, "invariant"); 4078 if (_cm->verbose_low()) { 4079 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4080 } 4081 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4082 if (claimed_region != NULL) { 4083 // Yes, we managed to claim one 4084 statsOnly( ++_regions_claimed ); 4085 4086 if (_cm->verbose_low()) { 4087 gclog_or_tty->print_cr("[%u] we successfully claimed " 4088 "region "PTR_FORMAT, 4089 _worker_id, p2i(claimed_region)); 4090 } 4091 4092 setup_for_region(claimed_region); 4093 assert(_curr_region == claimed_region, "invariant"); 4094 } 4095 // It is important to call the regular clock here. It might take 4096 // a while to claim a region if, for example, we hit a large 4097 // block of empty regions. So we need to call the regular clock 4098 // method once round the loop to make sure it's called 4099 // frequently enough. 4100 regular_clock_call(); 4101 } 4102 4103 if (!has_aborted() && _curr_region == NULL) { 4104 assert(_cm->out_of_regions(), 4105 "at this point we should be out of regions"); 4106 } 4107 } while ( _curr_region != NULL && !has_aborted()); 4108 4109 if (!has_aborted()) { 4110 // We cannot check whether the global stack is empty, since other 4111 // tasks might be pushing objects to it concurrently. 4112 assert(_cm->out_of_regions(), 4113 "at this point we should be out of regions"); 4114 4115 if (_cm->verbose_low()) { 4116 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4117 } 4118 4119 // Try to reduce the number of available SATB buffers so that 4120 // remark has less work to do. 4121 drain_satb_buffers(); 4122 } 4123 4124 // Since we've done everything else, we can now totally drain the 4125 // local queue and global stack. 4126 drain_local_queue(false); 4127 drain_global_stack(false); 4128 4129 // Attempt at work stealing from other task's queues. 4130 if (do_stealing && !has_aborted()) { 4131 // We have not aborted. This means that we have finished all that 4132 // we could. Let's try to do some stealing... 4133 4134 // We cannot check whether the global stack is empty, since other 4135 // tasks might be pushing objects to it concurrently. 4136 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4137 "only way to reach here"); 4138 4139 if (_cm->verbose_low()) { 4140 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4141 } 4142 4143 while (!has_aborted()) { 4144 oop obj; 4145 statsOnly( ++_steal_attempts ); 4146 4147 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4148 if (_cm->verbose_medium()) { 4149 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4150 _worker_id, p2i((void*) obj)); 4151 } 4152 4153 statsOnly( ++_steals ); 4154 4155 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4156 "any stolen object should be marked"); 4157 scan_object(obj); 4158 4159 // And since we're towards the end, let's totally drain the 4160 // local queue and global stack. 4161 drain_local_queue(false); 4162 drain_global_stack(false); 4163 } else { 4164 break; 4165 } 4166 } 4167 } 4168 4169 // If we are about to wrap up and go into termination, check if we 4170 // should raise the overflow flag. 4171 if (do_termination && !has_aborted()) { 4172 if (_cm->force_overflow()->should_force()) { 4173 _cm->set_has_overflown(); 4174 regular_clock_call(); 4175 } 4176 } 4177 4178 // We still haven't aborted. Now, let's try to get into the 4179 // termination protocol. 4180 if (do_termination && !has_aborted()) { 4181 // We cannot check whether the global stack is empty, since other 4182 // tasks might be concurrently pushing objects on it. 4183 // Separated the asserts so that we know which one fires. 4184 assert(_cm->out_of_regions(), "only way to reach here"); 4185 assert(_task_queue->size() == 0, "only way to reach here"); 4186 4187 if (_cm->verbose_low()) { 4188 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4189 } 4190 4191 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4192 4193 // The CMTask class also extends the TerminatorTerminator class, 4194 // hence its should_exit_termination() method will also decide 4195 // whether to exit the termination protocol or not. 4196 bool finished = (is_serial || 4197 _cm->terminator()->offer_termination(this)); 4198 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4199 _termination_time_ms += 4200 termination_end_time_ms - _termination_start_time_ms; 4201 4202 if (finished) { 4203 // We're all done. 4204 4205 if (_worker_id == 0) { 4206 // let's allow task 0 to do this 4207 if (concurrent()) { 4208 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4209 // we need to set this to false before the next 4210 // safepoint. This way we ensure that the marking phase 4211 // doesn't observe any more heap expansions. 4212 _cm->clear_concurrent_marking_in_progress(); 4213 } 4214 } 4215 4216 // We can now guarantee that the global stack is empty, since 4217 // all other tasks have finished. We separated the guarantees so 4218 // that, if a condition is false, we can immediately find out 4219 // which one. 4220 guarantee(_cm->out_of_regions(), "only way to reach here"); 4221 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4222 guarantee(_task_queue->size() == 0, "only way to reach here"); 4223 guarantee(!_cm->has_overflown(), "only way to reach here"); 4224 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4225 4226 if (_cm->verbose_low()) { 4227 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4228 } 4229 } else { 4230 // Apparently there's more work to do. Let's abort this task. It 4231 // will restart it and we can hopefully find more things to do. 4232 4233 if (_cm->verbose_low()) { 4234 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4235 _worker_id); 4236 } 4237 4238 set_has_aborted(); 4239 statsOnly( ++_aborted_termination ); 4240 } 4241 } 4242 4243 // Mainly for debugging purposes to make sure that a pointer to the 4244 // closure which was statically allocated in this frame doesn't 4245 // escape it by accident. 4246 set_cm_oop_closure(NULL); 4247 double end_time_ms = os::elapsedVTime() * 1000.0; 4248 double elapsed_time_ms = end_time_ms - _start_time_ms; 4249 // Update the step history. 4250 _step_times_ms.add(elapsed_time_ms); 4251 4252 if (has_aborted()) { 4253 // The task was aborted for some reason. 4254 4255 statsOnly( ++_aborted ); 4256 4257 if (_has_timed_out) { 4258 double diff_ms = elapsed_time_ms - _time_target_ms; 4259 // Keep statistics of how well we did with respect to hitting 4260 // our target only if we actually timed out (if we aborted for 4261 // other reasons, then the results might get skewed). 4262 _marking_step_diffs_ms.add(diff_ms); 4263 } 4264 4265 if (_cm->has_overflown()) { 4266 // This is the interesting one. We aborted because a global 4267 // overflow was raised. This means we have to restart the 4268 // marking phase and start iterating over regions. However, in 4269 // order to do this we have to make sure that all tasks stop 4270 // what they are doing and re-initialize in a safe manner. We 4271 // will achieve this with the use of two barrier sync points. 4272 4273 if (_cm->verbose_low()) { 4274 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4275 } 4276 4277 if (!is_serial) { 4278 // We only need to enter the sync barrier if being called 4279 // from a parallel context 4280 _cm->enter_first_sync_barrier(_worker_id); 4281 4282 // When we exit this sync barrier we know that all tasks have 4283 // stopped doing marking work. So, it's now safe to 4284 // re-initialize our data structures. At the end of this method, 4285 // task 0 will clear the global data structures. 4286 } 4287 4288 statsOnly( ++_aborted_overflow ); 4289 4290 // We clear the local state of this task... 4291 clear_region_fields(); 4292 4293 if (!is_serial) { 4294 // ...and enter the second barrier. 4295 _cm->enter_second_sync_barrier(_worker_id); 4296 } 4297 // At this point, if we're during the concurrent phase of 4298 // marking, everything has been re-initialized and we're 4299 // ready to restart. 4300 } 4301 4302 if (_cm->verbose_low()) { 4303 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4304 "elapsed = %1.2lfms <<<<<<<<<<", 4305 _worker_id, _time_target_ms, elapsed_time_ms); 4306 if (_cm->has_aborted()) { 4307 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4308 _worker_id); 4309 } 4310 } 4311 } else { 4312 if (_cm->verbose_low()) { 4313 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4314 "elapsed = %1.2lfms <<<<<<<<<<", 4315 _worker_id, _time_target_ms, elapsed_time_ms); 4316 } 4317 } 4318 4319 _claimed = false; 4320 } 4321 4322 CMTask::CMTask(uint worker_id, 4323 ConcurrentMark* cm, 4324 size_t* marked_bytes, 4325 BitMap* card_bm, 4326 CMTaskQueue* task_queue, 4327 CMTaskQueueSet* task_queues) 4328 : _g1h(G1CollectedHeap::heap()), 4329 _worker_id(worker_id), _cm(cm), 4330 _claimed(false), 4331 _nextMarkBitMap(NULL), _hash_seed(17), 4332 _task_queue(task_queue), 4333 _task_queues(task_queues), 4334 _cm_oop_closure(NULL), 4335 _marked_bytes_array(marked_bytes), 4336 _card_bm(card_bm) { 4337 guarantee(task_queue != NULL, "invariant"); 4338 guarantee(task_queues != NULL, "invariant"); 4339 4340 statsOnly( _clock_due_to_scanning = 0; 4341 _clock_due_to_marking = 0 ); 4342 4343 _marking_step_diffs_ms.add(0.5); 4344 } 4345 4346 // These are formatting macros that are used below to ensure 4347 // consistent formatting. The *_H_* versions are used to format the 4348 // header for a particular value and they should be kept consistent 4349 // with the corresponding macro. Also note that most of the macros add 4350 // the necessary white space (as a prefix) which makes them a bit 4351 // easier to compose. 4352 4353 // All the output lines are prefixed with this string to be able to 4354 // identify them easily in a large log file. 4355 #define G1PPRL_LINE_PREFIX "###" 4356 4357 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4358 #ifdef _LP64 4359 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4360 #else // _LP64 4361 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4362 #endif // _LP64 4363 4364 // For per-region info 4365 #define G1PPRL_TYPE_FORMAT " %-4s" 4366 #define G1PPRL_TYPE_H_FORMAT " %4s" 4367 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4368 #define G1PPRL_BYTE_H_FORMAT " %9s" 4369 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4370 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4371 4372 // For summary info 4373 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4374 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4375 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4376 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4377 4378 G1PrintRegionLivenessInfoClosure:: 4379 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4380 : _out(out), 4381 _total_used_bytes(0), _total_capacity_bytes(0), 4382 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4383 _hum_used_bytes(0), _hum_capacity_bytes(0), 4384 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4385 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4386 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4387 MemRegion g1_reserved = g1h->g1_reserved(); 4388 double now = os::elapsedTime(); 4389 4390 // Print the header of the output. 4391 _out->cr(); 4392 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4393 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4394 G1PPRL_SUM_ADDR_FORMAT("reserved") 4395 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4396 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4397 HeapRegion::GrainBytes); 4398 _out->print_cr(G1PPRL_LINE_PREFIX); 4399 _out->print_cr(G1PPRL_LINE_PREFIX 4400 G1PPRL_TYPE_H_FORMAT 4401 G1PPRL_ADDR_BASE_H_FORMAT 4402 G1PPRL_BYTE_H_FORMAT 4403 G1PPRL_BYTE_H_FORMAT 4404 G1PPRL_BYTE_H_FORMAT 4405 G1PPRL_DOUBLE_H_FORMAT 4406 G1PPRL_BYTE_H_FORMAT 4407 G1PPRL_BYTE_H_FORMAT, 4408 "type", "address-range", 4409 "used", "prev-live", "next-live", "gc-eff", 4410 "remset", "code-roots"); 4411 _out->print_cr(G1PPRL_LINE_PREFIX 4412 G1PPRL_TYPE_H_FORMAT 4413 G1PPRL_ADDR_BASE_H_FORMAT 4414 G1PPRL_BYTE_H_FORMAT 4415 G1PPRL_BYTE_H_FORMAT 4416 G1PPRL_BYTE_H_FORMAT 4417 G1PPRL_DOUBLE_H_FORMAT 4418 G1PPRL_BYTE_H_FORMAT 4419 G1PPRL_BYTE_H_FORMAT, 4420 "", "", 4421 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4422 "(bytes)", "(bytes)"); 4423 } 4424 4425 // It takes as a parameter a reference to one of the _hum_* fields, it 4426 // deduces the corresponding value for a region in a humongous region 4427 // series (either the region size, or what's left if the _hum_* field 4428 // is < the region size), and updates the _hum_* field accordingly. 4429 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4430 size_t bytes = 0; 4431 // The > 0 check is to deal with the prev and next live bytes which 4432 // could be 0. 4433 if (*hum_bytes > 0) { 4434 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4435 *hum_bytes -= bytes; 4436 } 4437 return bytes; 4438 } 4439 4440 // It deduces the values for a region in a humongous region series 4441 // from the _hum_* fields and updates those accordingly. It assumes 4442 // that that _hum_* fields have already been set up from the "starts 4443 // humongous" region and we visit the regions in address order. 4444 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4445 size_t* capacity_bytes, 4446 size_t* prev_live_bytes, 4447 size_t* next_live_bytes) { 4448 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4449 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4450 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4451 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4452 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4453 } 4454 4455 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4456 const char* type = r->get_type_str(); 4457 HeapWord* bottom = r->bottom(); 4458 HeapWord* end = r->end(); 4459 size_t capacity_bytes = r->capacity(); 4460 size_t used_bytes = r->used(); 4461 size_t prev_live_bytes = r->live_bytes(); 4462 size_t next_live_bytes = r->next_live_bytes(); 4463 double gc_eff = r->gc_efficiency(); 4464 size_t remset_bytes = r->rem_set()->mem_size(); 4465 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4466 4467 if (r->is_starts_humongous()) { 4468 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4469 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4470 "they should have been zeroed after the last time we used them"); 4471 // Set up the _hum_* fields. 4472 _hum_capacity_bytes = capacity_bytes; 4473 _hum_used_bytes = used_bytes; 4474 _hum_prev_live_bytes = prev_live_bytes; 4475 _hum_next_live_bytes = next_live_bytes; 4476 get_hum_bytes(&used_bytes, &capacity_bytes, 4477 &prev_live_bytes, &next_live_bytes); 4478 end = bottom + HeapRegion::GrainWords; 4479 } else if (r->is_continues_humongous()) { 4480 get_hum_bytes(&used_bytes, &capacity_bytes, 4481 &prev_live_bytes, &next_live_bytes); 4482 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4483 } 4484 4485 _total_used_bytes += used_bytes; 4486 _total_capacity_bytes += capacity_bytes; 4487 _total_prev_live_bytes += prev_live_bytes; 4488 _total_next_live_bytes += next_live_bytes; 4489 _total_remset_bytes += remset_bytes; 4490 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4491 4492 // Print a line for this particular region. 4493 _out->print_cr(G1PPRL_LINE_PREFIX 4494 G1PPRL_TYPE_FORMAT 4495 G1PPRL_ADDR_BASE_FORMAT 4496 G1PPRL_BYTE_FORMAT 4497 G1PPRL_BYTE_FORMAT 4498 G1PPRL_BYTE_FORMAT 4499 G1PPRL_DOUBLE_FORMAT 4500 G1PPRL_BYTE_FORMAT 4501 G1PPRL_BYTE_FORMAT, 4502 type, p2i(bottom), p2i(end), 4503 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4504 remset_bytes, strong_code_roots_bytes); 4505 4506 return false; 4507 } 4508 4509 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4510 // add static memory usages to remembered set sizes 4511 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4512 // Print the footer of the output. 4513 _out->print_cr(G1PPRL_LINE_PREFIX); 4514 _out->print_cr(G1PPRL_LINE_PREFIX 4515 " SUMMARY" 4516 G1PPRL_SUM_MB_FORMAT("capacity") 4517 G1PPRL_SUM_MB_PERC_FORMAT("used") 4518 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4519 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4520 G1PPRL_SUM_MB_FORMAT("remset") 4521 G1PPRL_SUM_MB_FORMAT("code-roots"), 4522 bytes_to_mb(_total_capacity_bytes), 4523 bytes_to_mb(_total_used_bytes), 4524 perc(_total_used_bytes, _total_capacity_bytes), 4525 bytes_to_mb(_total_prev_live_bytes), 4526 perc(_total_prev_live_bytes, _total_capacity_bytes), 4527 bytes_to_mb(_total_next_live_bytes), 4528 perc(_total_next_live_bytes, _total_capacity_bytes), 4529 bytes_to_mb(_total_remset_bytes), 4530 bytes_to_mb(_total_strong_code_roots_bytes)); 4531 _out->cr(); 4532 }