1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/symbolTable.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc_implementation/g1/concurrentMark.inline.hpp" 29 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 32 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 33 #include "gc_implementation/g1/g1Log.hpp" 34 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 35 #include "gc_implementation/g1/g1RemSet.hpp" 36 #include "gc_implementation/g1/heapRegion.inline.hpp" 37 #include "gc_implementation/g1/heapRegionManager.inline.hpp" 38 #include "gc_implementation/g1/heapRegionRemSet.hpp" 39 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 40 #include "gc_implementation/shared/vmGCOperations.hpp" 41 #include "gc_implementation/shared/gcTimer.hpp" 42 #include "gc_implementation/shared/gcTrace.hpp" 43 #include "gc_implementation/shared/gcTraceTime.hpp" 44 #include "memory/allocation.hpp" 45 #include "memory/genOopClosures.inline.hpp" 46 #include "memory/referencePolicy.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "oops/oop.inline.hpp" 49 #include "runtime/handles.inline.hpp" 50 #include "runtime/java.hpp" 51 #include "runtime/atomic.inline.hpp" 52 #include "runtime/prefetch.inline.hpp" 53 #include "services/memTracker.hpp" 54 55 // Concurrent marking bit map wrapper 56 57 CMBitMapRO::CMBitMapRO(int shifter) : 58 _bm(), 59 _shifter(shifter) { 60 _bmStartWord = 0; 61 _bmWordSize = 0; 62 } 63 64 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 65 const HeapWord* limit) const { 66 // First we must round addr *up* to a possible object boundary. 67 addr = (HeapWord*)align_size_up((intptr_t)addr, 68 HeapWordSize << _shifter); 69 size_t addrOffset = heapWordToOffset(addr); 70 if (limit == NULL) { 71 limit = _bmStartWord + _bmWordSize; 72 } 73 size_t limitOffset = heapWordToOffset(limit); 74 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 75 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 76 assert(nextAddr >= addr, "get_next_one postcondition"); 77 assert(nextAddr == limit || isMarked(nextAddr), 78 "get_next_one postcondition"); 79 return nextAddr; 80 } 81 82 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, 83 const HeapWord* limit) const { 84 size_t addrOffset = heapWordToOffset(addr); 85 if (limit == NULL) { 86 limit = _bmStartWord + _bmWordSize; 87 } 88 size_t limitOffset = heapWordToOffset(limit); 89 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 90 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 91 assert(nextAddr >= addr, "get_next_one postcondition"); 92 assert(nextAddr == limit || !isMarked(nextAddr), 93 "get_next_one postcondition"); 94 return nextAddr; 95 } 96 97 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 98 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 99 return (int) (diff >> _shifter); 100 } 101 102 #ifndef PRODUCT 103 bool CMBitMapRO::covers(MemRegion heap_rs) const { 104 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 105 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 106 "size inconsistency"); 107 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 108 _bmWordSize == heap_rs.word_size(); 109 } 110 #endif 111 112 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 113 _bm.print_on_error(st, prefix); 114 } 115 116 size_t CMBitMap::compute_size(size_t heap_size) { 117 return heap_size / mark_distance(); 118 } 119 120 size_t CMBitMap::mark_distance() { 121 return MinObjAlignmentInBytes * BitsPerByte; 122 } 123 124 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 125 _bmStartWord = heap.start(); 126 _bmWordSize = heap.word_size(); 127 128 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 129 _bm.set_size(_bmWordSize >> _shifter); 130 131 storage->set_mapping_changed_listener(&_listener); 132 } 133 134 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions) { 135 // We need to clear the bitmap on commit, removing any existing information. 136 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 137 _bm->clearRange(mr); 138 } 139 140 // Closure used for clearing the given mark bitmap. 141 class ClearBitmapHRClosure : public HeapRegionClosure { 142 private: 143 ConcurrentMark* _cm; 144 CMBitMap* _bitmap; 145 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 146 public: 147 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 148 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 149 } 150 151 virtual bool doHeapRegion(HeapRegion* r) { 152 size_t const chunk_size_in_words = M / HeapWordSize; 153 154 HeapWord* cur = r->bottom(); 155 HeapWord* const end = r->end(); 156 157 while (cur < end) { 158 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 159 _bitmap->clearRange(mr); 160 161 cur += chunk_size_in_words; 162 163 // Abort iteration if after yielding the marking has been aborted. 164 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 165 return true; 166 } 167 // Repeat the asserts from before the start of the closure. We will do them 168 // as asserts here to minimize their overhead on the product. However, we 169 // will have them as guarantees at the beginning / end of the bitmap 170 // clearing to get some checking in the product. 171 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 172 assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant"); 173 } 174 175 return false; 176 } 177 }; 178 179 void CMBitMap::clearAll() { 180 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 181 G1CollectedHeap::heap()->heap_region_iterate(&cl); 182 guarantee(cl.complete(), "Must have completed iteration."); 183 return; 184 } 185 186 void CMBitMap::markRange(MemRegion mr) { 187 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 188 assert(!mr.is_empty(), "unexpected empty region"); 189 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 190 ((HeapWord *) mr.end())), 191 "markRange memory region end is not card aligned"); 192 // convert address range into offset range 193 _bm.at_put_range(heapWordToOffset(mr.start()), 194 heapWordToOffset(mr.end()), true); 195 } 196 197 void CMBitMap::clearRange(MemRegion mr) { 198 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 199 assert(!mr.is_empty(), "unexpected empty region"); 200 // convert address range into offset range 201 _bm.at_put_range(heapWordToOffset(mr.start()), 202 heapWordToOffset(mr.end()), false); 203 } 204 205 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 206 HeapWord* end_addr) { 207 HeapWord* start = getNextMarkedWordAddress(addr); 208 start = MIN2(start, end_addr); 209 HeapWord* end = getNextUnmarkedWordAddress(start); 210 end = MIN2(end, end_addr); 211 assert(start <= end, "Consistency check"); 212 MemRegion mr(start, end); 213 if (!mr.is_empty()) { 214 clearRange(mr); 215 } 216 return mr; 217 } 218 219 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 220 _base(NULL), _cm(cm) 221 #ifdef ASSERT 222 , _drain_in_progress(false) 223 , _drain_in_progress_yields(false) 224 #endif 225 {} 226 227 bool CMMarkStack::allocate(size_t capacity) { 228 // allocate a stack of the requisite depth 229 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 230 if (!rs.is_reserved()) { 231 warning("ConcurrentMark MarkStack allocation failure"); 232 return false; 233 } 234 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 235 if (!_virtual_space.initialize(rs, rs.size())) { 236 warning("ConcurrentMark MarkStack backing store failure"); 237 // Release the virtual memory reserved for the marking stack 238 rs.release(); 239 return false; 240 } 241 assert(_virtual_space.committed_size() == rs.size(), 242 "Didn't reserve backing store for all of ConcurrentMark stack?"); 243 _base = (oop*) _virtual_space.low(); 244 setEmpty(); 245 _capacity = (jint) capacity; 246 _saved_index = -1; 247 _should_expand = false; 248 NOT_PRODUCT(_max_depth = 0); 249 return true; 250 } 251 252 void CMMarkStack::expand() { 253 // Called, during remark, if we've overflown the marking stack during marking. 254 assert(isEmpty(), "stack should been emptied while handling overflow"); 255 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 256 // Clear expansion flag 257 _should_expand = false; 258 if (_capacity == (jint) MarkStackSizeMax) { 259 if (PrintGCDetails && Verbose) { 260 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 261 } 262 return; 263 } 264 // Double capacity if possible 265 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 266 // Do not give up existing stack until we have managed to 267 // get the double capacity that we desired. 268 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 269 sizeof(oop))); 270 if (rs.is_reserved()) { 271 // Release the backing store associated with old stack 272 _virtual_space.release(); 273 // Reinitialize virtual space for new stack 274 if (!_virtual_space.initialize(rs, rs.size())) { 275 fatal("Not enough swap for expanded marking stack capacity"); 276 } 277 _base = (oop*)(_virtual_space.low()); 278 _index = 0; 279 _capacity = new_capacity; 280 } else { 281 if (PrintGCDetails && Verbose) { 282 // Failed to double capacity, continue; 283 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 284 SIZE_FORMAT"K to " SIZE_FORMAT"K", 285 _capacity / K, new_capacity / K); 286 } 287 } 288 } 289 290 void CMMarkStack::set_should_expand() { 291 // If we're resetting the marking state because of an 292 // marking stack overflow, record that we should, if 293 // possible, expand the stack. 294 _should_expand = _cm->has_overflown(); 295 } 296 297 CMMarkStack::~CMMarkStack() { 298 if (_base != NULL) { 299 _base = NULL; 300 _virtual_space.release(); 301 } 302 } 303 304 void CMMarkStack::par_push(oop ptr) { 305 while (true) { 306 if (isFull()) { 307 _overflow = true; 308 return; 309 } 310 // Otherwise... 311 jint index = _index; 312 jint next_index = index+1; 313 jint res = Atomic::cmpxchg(next_index, &_index, index); 314 if (res == index) { 315 _base[index] = ptr; 316 // Note that we don't maintain this atomically. We could, but it 317 // doesn't seem necessary. 318 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 319 return; 320 } 321 // Otherwise, we need to try again. 322 } 323 } 324 325 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { 326 while (true) { 327 if (isFull()) { 328 _overflow = true; 329 return; 330 } 331 // Otherwise... 332 jint index = _index; 333 jint next_index = index + n; 334 if (next_index > _capacity) { 335 _overflow = true; 336 return; 337 } 338 jint res = Atomic::cmpxchg(next_index, &_index, index); 339 if (res == index) { 340 for (int i = 0; i < n; i++) { 341 int ind = index + i; 342 assert(ind < _capacity, "By overflow test above."); 343 _base[ind] = ptr_arr[i]; 344 } 345 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 346 return; 347 } 348 // Otherwise, we need to try again. 349 } 350 } 351 352 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 353 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 354 jint start = _index; 355 jint next_index = start + n; 356 if (next_index > _capacity) { 357 _overflow = true; 358 return; 359 } 360 // Otherwise. 361 _index = next_index; 362 for (int i = 0; i < n; i++) { 363 int ind = start + i; 364 assert(ind < _capacity, "By overflow test above."); 365 _base[ind] = ptr_arr[i]; 366 } 367 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 368 } 369 370 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 371 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 372 jint index = _index; 373 if (index == 0) { 374 *n = 0; 375 return false; 376 } else { 377 int k = MIN2(max, index); 378 jint new_ind = index - k; 379 for (int j = 0; j < k; j++) { 380 ptr_arr[j] = _base[new_ind + j]; 381 } 382 _index = new_ind; 383 *n = k; 384 return true; 385 } 386 } 387 388 template<class OopClosureClass> 389 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 390 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 391 || SafepointSynchronize::is_at_safepoint(), 392 "Drain recursion must be yield-safe."); 393 bool res = true; 394 debug_only(_drain_in_progress = true); 395 debug_only(_drain_in_progress_yields = yield_after); 396 while (!isEmpty()) { 397 oop newOop = pop(); 398 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 399 assert(newOop->is_oop(), "Expected an oop"); 400 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 401 "only grey objects on this stack"); 402 newOop->oop_iterate(cl); 403 if (yield_after && _cm->do_yield_check()) { 404 res = false; 405 break; 406 } 407 } 408 debug_only(_drain_in_progress = false); 409 return res; 410 } 411 412 void CMMarkStack::note_start_of_gc() { 413 assert(_saved_index == -1, 414 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 415 _saved_index = _index; 416 } 417 418 void CMMarkStack::note_end_of_gc() { 419 // This is intentionally a guarantee, instead of an assert. If we 420 // accidentally add something to the mark stack during GC, it 421 // will be a correctness issue so it's better if we crash. we'll 422 // only check this once per GC anyway, so it won't be a performance 423 // issue in any way. 424 guarantee(_saved_index == _index, 425 err_msg("saved index: %d index: %d", _saved_index, _index)); 426 _saved_index = -1; 427 } 428 429 void CMMarkStack::oops_do(OopClosure* f) { 430 assert(_saved_index == _index, 431 err_msg("saved index: %d index: %d", _saved_index, _index)); 432 for (int i = 0; i < _index; i += 1) { 433 f->do_oop(&_base[i]); 434 } 435 } 436 437 CMRootRegions::CMRootRegions() : 438 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 439 _should_abort(false), _next_survivor(NULL) { } 440 441 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 442 _young_list = g1h->young_list(); 443 _cm = cm; 444 } 445 446 void CMRootRegions::prepare_for_scan() { 447 assert(!scan_in_progress(), "pre-condition"); 448 449 // Currently, only survivors can be root regions. 450 assert(_next_survivor == NULL, "pre-condition"); 451 _next_survivor = _young_list->first_survivor_region(); 452 _scan_in_progress = (_next_survivor != NULL); 453 _should_abort = false; 454 } 455 456 HeapRegion* CMRootRegions::claim_next() { 457 if (_should_abort) { 458 // If someone has set the should_abort flag, we return NULL to 459 // force the caller to bail out of their loop. 460 return NULL; 461 } 462 463 // Currently, only survivors can be root regions. 464 HeapRegion* res = _next_survivor; 465 if (res != NULL) { 466 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 467 // Read it again in case it changed while we were waiting for the lock. 468 res = _next_survivor; 469 if (res != NULL) { 470 if (res == _young_list->last_survivor_region()) { 471 // We just claimed the last survivor so store NULL to indicate 472 // that we're done. 473 _next_survivor = NULL; 474 } else { 475 _next_survivor = res->get_next_young_region(); 476 } 477 } else { 478 // Someone else claimed the last survivor while we were trying 479 // to take the lock so nothing else to do. 480 } 481 } 482 assert(res == NULL || res->is_survivor(), "post-condition"); 483 484 return res; 485 } 486 487 void CMRootRegions::scan_finished() { 488 assert(scan_in_progress(), "pre-condition"); 489 490 // Currently, only survivors can be root regions. 491 if (!_should_abort) { 492 assert(_next_survivor == NULL, "we should have claimed all survivors"); 493 } 494 _next_survivor = NULL; 495 496 { 497 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 498 _scan_in_progress = false; 499 RootRegionScan_lock->notify_all(); 500 } 501 } 502 503 bool CMRootRegions::wait_until_scan_finished() { 504 if (!scan_in_progress()) return false; 505 506 { 507 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 508 while (scan_in_progress()) { 509 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 510 } 511 } 512 return true; 513 } 514 515 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 516 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 517 #endif // _MSC_VER 518 519 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 520 return MAX2((n_par_threads + 2) / 4, 1U); 521 } 522 523 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 524 _g1h(g1h), 525 _markBitMap1(), 526 _markBitMap2(), 527 _parallel_marking_threads(0), 528 _max_parallel_marking_threads(0), 529 _sleep_factor(0.0), 530 _marking_task_overhead(1.0), 531 _cleanup_sleep_factor(0.0), 532 _cleanup_task_overhead(1.0), 533 _cleanup_list("Cleanup List"), 534 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 535 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 536 CardTableModRefBS::card_shift, 537 false /* in_resource_area*/), 538 539 _prevMarkBitMap(&_markBitMap1), 540 _nextMarkBitMap(&_markBitMap2), 541 542 _markStack(this), 543 // _finger set in set_non_marking_state 544 545 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 546 // _active_tasks set in set_non_marking_state 547 // _tasks set inside the constructor 548 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 549 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 550 551 _has_overflown(false), 552 _concurrent(false), 553 _has_aborted(false), 554 _aborted_gc_id(GCId::undefined()), 555 _restart_for_overflow(false), 556 _concurrent_marking_in_progress(false), 557 558 // _verbose_level set below 559 560 _init_times(), 561 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 562 _cleanup_times(), 563 _total_counting_time(0.0), 564 _total_rs_scrub_time(0.0), 565 566 _parallel_workers(NULL), 567 568 _count_card_bitmaps(NULL), 569 _count_marked_bytes(NULL), 570 _completed_initialization(false) { 571 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 572 if (verbose_level < no_verbose) { 573 verbose_level = no_verbose; 574 } 575 if (verbose_level > high_verbose) { 576 verbose_level = high_verbose; 577 } 578 _verbose_level = verbose_level; 579 580 if (verbose_low()) { 581 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 582 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 583 } 584 585 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 586 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 587 588 // Create & start a ConcurrentMark thread. 589 _cmThread = new ConcurrentMarkThread(this); 590 assert(cmThread() != NULL, "CM Thread should have been created"); 591 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 592 if (_cmThread->osthread() == NULL) { 593 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 594 } 595 596 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 597 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 598 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 599 600 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 601 satb_qs.set_buffer_size(G1SATBBufferSize); 602 603 _root_regions.init(_g1h, this); 604 605 if (ConcGCThreads > ParallelGCThreads) { 606 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") " 607 "than ParallelGCThreads (" UINTX_FORMAT ").", 608 ConcGCThreads, ParallelGCThreads); 609 return; 610 } 611 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 612 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 613 // if both are set 614 _sleep_factor = 0.0; 615 _marking_task_overhead = 1.0; 616 } else if (G1MarkingOverheadPercent > 0) { 617 // We will calculate the number of parallel marking threads based 618 // on a target overhead with respect to the soft real-time goal 619 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 620 double overall_cm_overhead = 621 (double) MaxGCPauseMillis * marking_overhead / 622 (double) GCPauseIntervalMillis; 623 double cpu_ratio = 1.0 / (double) os::processor_count(); 624 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 625 double marking_task_overhead = 626 overall_cm_overhead / marking_thread_num * 627 (double) os::processor_count(); 628 double sleep_factor = 629 (1.0 - marking_task_overhead) / marking_task_overhead; 630 631 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); 632 _sleep_factor = sleep_factor; 633 _marking_task_overhead = marking_task_overhead; 634 } else { 635 // Calculate the number of parallel marking threads by scaling 636 // the number of parallel GC threads. 637 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); 638 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); 639 _sleep_factor = 0.0; 640 _marking_task_overhead = 1.0; 641 } 642 643 assert(ConcGCThreads > 0, "Should have been set"); 644 _parallel_marking_threads = (uint) ConcGCThreads; 645 _max_parallel_marking_threads = _parallel_marking_threads; 646 647 if (parallel_marking_threads() > 1) { 648 _cleanup_task_overhead = 1.0; 649 } else { 650 _cleanup_task_overhead = marking_task_overhead(); 651 } 652 _cleanup_sleep_factor = 653 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 654 655 #if 0 656 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 657 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 658 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 659 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 660 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 661 #endif 662 663 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", 664 _max_parallel_marking_threads, false, true); 665 if (_parallel_workers == NULL) { 666 vm_exit_during_initialization("Failed necessary allocation."); 667 } else { 668 _parallel_workers->initialize_workers(); 669 } 670 671 if (FLAG_IS_DEFAULT(MarkStackSize)) { 672 uintx mark_stack_size = 673 MIN2(MarkStackSizeMax, 674 MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE))); 675 // Verify that the calculated value for MarkStackSize is in range. 676 // It would be nice to use the private utility routine from Arguments. 677 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 678 warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): " 679 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 680 mark_stack_size, (uintx) 1, MarkStackSizeMax); 681 return; 682 } 683 FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size); 684 } else { 685 // Verify MarkStackSize is in range. 686 if (FLAG_IS_CMDLINE(MarkStackSize)) { 687 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 688 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 689 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): " 690 "must be between " UINTX_FORMAT " and " UINTX_FORMAT, 691 MarkStackSize, (uintx) 1, MarkStackSizeMax); 692 return; 693 } 694 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 695 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 696 warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")" 697 " or for MarkStackSizeMax (" UINTX_FORMAT ")", 698 MarkStackSize, MarkStackSizeMax); 699 return; 700 } 701 } 702 } 703 } 704 705 if (!_markStack.allocate(MarkStackSize)) { 706 warning("Failed to allocate CM marking stack"); 707 return; 708 } 709 710 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 711 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 712 713 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 714 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 715 716 BitMap::idx_t card_bm_size = _card_bm.size(); 717 718 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 719 _active_tasks = _max_worker_id; 720 721 size_t max_regions = (size_t) _g1h->max_regions(); 722 for (uint i = 0; i < _max_worker_id; ++i) { 723 CMTaskQueue* task_queue = new CMTaskQueue(); 724 task_queue->initialize(); 725 _task_queues->register_queue(i, task_queue); 726 727 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 728 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 729 730 _tasks[i] = new CMTask(i, this, 731 _count_marked_bytes[i], 732 &_count_card_bitmaps[i], 733 task_queue, _task_queues); 734 735 _accum_task_vtime[i] = 0.0; 736 } 737 738 // Calculate the card number for the bottom of the heap. Used 739 // in biasing indexes into the accounting card bitmaps. 740 _heap_bottom_card_num = 741 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 742 CardTableModRefBS::card_shift); 743 744 // Clear all the liveness counting data 745 clear_all_count_data(); 746 747 // so that the call below can read a sensible value 748 _heap_start = g1h->reserved_region().start(); 749 set_non_marking_state(); 750 _completed_initialization = true; 751 } 752 753 void ConcurrentMark::reset() { 754 // Starting values for these two. This should be called in a STW 755 // phase. 756 MemRegion reserved = _g1h->g1_reserved(); 757 _heap_start = reserved.start(); 758 _heap_end = reserved.end(); 759 760 // Separated the asserts so that we know which one fires. 761 assert(_heap_start != NULL, "heap bounds should look ok"); 762 assert(_heap_end != NULL, "heap bounds should look ok"); 763 assert(_heap_start < _heap_end, "heap bounds should look ok"); 764 765 // Reset all the marking data structures and any necessary flags 766 reset_marking_state(); 767 768 if (verbose_low()) { 769 gclog_or_tty->print_cr("[global] resetting"); 770 } 771 772 // We do reset all of them, since different phases will use 773 // different number of active threads. So, it's easiest to have all 774 // of them ready. 775 for (uint i = 0; i < _max_worker_id; ++i) { 776 _tasks[i]->reset(_nextMarkBitMap); 777 } 778 779 // we need this to make sure that the flag is on during the evac 780 // pause with initial mark piggy-backed 781 set_concurrent_marking_in_progress(); 782 } 783 784 785 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 786 _markStack.set_should_expand(); 787 _markStack.setEmpty(); // Also clears the _markStack overflow flag 788 if (clear_overflow) { 789 clear_has_overflown(); 790 } else { 791 assert(has_overflown(), "pre-condition"); 792 } 793 _finger = _heap_start; 794 795 for (uint i = 0; i < _max_worker_id; ++i) { 796 CMTaskQueue* queue = _task_queues->queue(i); 797 queue->set_empty(); 798 } 799 } 800 801 void ConcurrentMark::set_concurrency(uint active_tasks) { 802 assert(active_tasks <= _max_worker_id, "we should not have more"); 803 804 _active_tasks = active_tasks; 805 // Need to update the three data structures below according to the 806 // number of active threads for this phase. 807 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 808 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 809 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 810 } 811 812 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 813 set_concurrency(active_tasks); 814 815 _concurrent = concurrent; 816 // We propagate this to all tasks, not just the active ones. 817 for (uint i = 0; i < _max_worker_id; ++i) 818 _tasks[i]->set_concurrent(concurrent); 819 820 if (concurrent) { 821 set_concurrent_marking_in_progress(); 822 } else { 823 // We currently assume that the concurrent flag has been set to 824 // false before we start remark. At this point we should also be 825 // in a STW phase. 826 assert(!concurrent_marking_in_progress(), "invariant"); 827 assert(out_of_regions(), 828 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 829 p2i(_finger), p2i(_heap_end))); 830 } 831 } 832 833 void ConcurrentMark::set_non_marking_state() { 834 // We set the global marking state to some default values when we're 835 // not doing marking. 836 reset_marking_state(); 837 _active_tasks = 0; 838 clear_concurrent_marking_in_progress(); 839 } 840 841 ConcurrentMark::~ConcurrentMark() { 842 // The ConcurrentMark instance is never freed. 843 ShouldNotReachHere(); 844 } 845 846 void ConcurrentMark::clearNextBitmap() { 847 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 848 849 // Make sure that the concurrent mark thread looks to still be in 850 // the current cycle. 851 guarantee(cmThread()->during_cycle(), "invariant"); 852 853 // We are finishing up the current cycle by clearing the next 854 // marking bitmap and getting it ready for the next cycle. During 855 // this time no other cycle can start. So, let's make sure that this 856 // is the case. 857 guarantee(!g1h->mark_in_progress(), "invariant"); 858 859 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 860 g1h->heap_region_iterate(&cl); 861 862 // Clear the liveness counting data. If the marking has been aborted, the abort() 863 // call already did that. 864 if (cl.complete()) { 865 clear_all_count_data(); 866 } 867 868 // Repeat the asserts from above. 869 guarantee(cmThread()->during_cycle(), "invariant"); 870 guarantee(!g1h->mark_in_progress(), "invariant"); 871 } 872 873 class CheckBitmapClearHRClosure : public HeapRegionClosure { 874 CMBitMap* _bitmap; 875 bool _error; 876 public: 877 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 878 } 879 880 virtual bool doHeapRegion(HeapRegion* r) { 881 // This closure can be called concurrently to the mutator, so we must make sure 882 // that the result of the getNextMarkedWordAddress() call is compared to the 883 // value passed to it as limit to detect any found bits. 884 // We can use the region's orig_end() for the limit and the comparison value 885 // as it always contains the "real" end of the region that never changes and 886 // has no side effects. 887 // Due to the latter, there can also be no problem with the compiler generating 888 // reloads of the orig_end() call. 889 HeapWord* end = r->orig_end(); 890 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 891 } 892 }; 893 894 bool ConcurrentMark::nextMarkBitmapIsClear() { 895 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 896 _g1h->heap_region_iterate(&cl); 897 return cl.complete(); 898 } 899 900 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 901 public: 902 bool doHeapRegion(HeapRegion* r) { 903 if (!r->is_continues_humongous()) { 904 r->note_start_of_marking(); 905 } 906 return false; 907 } 908 }; 909 910 void ConcurrentMark::checkpointRootsInitialPre() { 911 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 912 G1CollectorPolicy* g1p = g1h->g1_policy(); 913 914 _has_aborted = false; 915 916 #ifndef PRODUCT 917 if (G1PrintReachableAtInitialMark) { 918 print_reachable("at-cycle-start", 919 VerifyOption_G1UsePrevMarking, true /* all */); 920 } 921 #endif 922 923 // Initialize marking structures. This has to be done in a STW phase. 924 reset(); 925 926 // For each region note start of marking. 927 NoteStartOfMarkHRClosure startcl; 928 g1h->heap_region_iterate(&startcl); 929 } 930 931 932 void ConcurrentMark::checkpointRootsInitialPost() { 933 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 934 935 // If we force an overflow during remark, the remark operation will 936 // actually abort and we'll restart concurrent marking. If we always 937 // force an overflow during remark we'll never actually complete the 938 // marking phase. So, we initialize this here, at the start of the 939 // cycle, so that at the remaining overflow number will decrease at 940 // every remark and we'll eventually not need to cause one. 941 force_overflow_stw()->init(); 942 943 // Start Concurrent Marking weak-reference discovery. 944 ReferenceProcessor* rp = g1h->ref_processor_cm(); 945 // enable ("weak") refs discovery 946 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); 947 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 948 949 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 950 // This is the start of the marking cycle, we're expected all 951 // threads to have SATB queues with active set to false. 952 satb_mq_set.set_active_all_threads(true, /* new active value */ 953 false /* expected_active */); 954 955 _root_regions.prepare_for_scan(); 956 957 // update_g1_committed() will be called at the end of an evac pause 958 // when marking is on. So, it's also called at the end of the 959 // initial-mark pause to update the heap end, if the heap expands 960 // during it. No need to call it here. 961 } 962 963 /* 964 * Notice that in the next two methods, we actually leave the STS 965 * during the barrier sync and join it immediately afterwards. If we 966 * do not do this, the following deadlock can occur: one thread could 967 * be in the barrier sync code, waiting for the other thread to also 968 * sync up, whereas another one could be trying to yield, while also 969 * waiting for the other threads to sync up too. 970 * 971 * Note, however, that this code is also used during remark and in 972 * this case we should not attempt to leave / enter the STS, otherwise 973 * we'll either hit an assert (debug / fastdebug) or deadlock 974 * (product). So we should only leave / enter the STS if we are 975 * operating concurrently. 976 * 977 * Because the thread that does the sync barrier has left the STS, it 978 * is possible to be suspended for a Full GC or an evacuation pause 979 * could occur. This is actually safe, since the entering the sync 980 * barrier is one of the last things do_marking_step() does, and it 981 * doesn't manipulate any data structures afterwards. 982 */ 983 984 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 985 if (verbose_low()) { 986 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 987 } 988 989 if (concurrent()) { 990 SuspendibleThreadSet::leave(); 991 } 992 993 bool barrier_aborted = !_first_overflow_barrier_sync.enter(); 994 995 if (concurrent()) { 996 SuspendibleThreadSet::join(); 997 } 998 // at this point everyone should have synced up and not be doing any 999 // more work 1000 1001 if (verbose_low()) { 1002 if (barrier_aborted) { 1003 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 1004 } else { 1005 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 1006 } 1007 } 1008 1009 if (barrier_aborted) { 1010 // If the barrier aborted we ignore the overflow condition and 1011 // just abort the whole marking phase as quickly as possible. 1012 return; 1013 } 1014 1015 // If we're executing the concurrent phase of marking, reset the marking 1016 // state; otherwise the marking state is reset after reference processing, 1017 // during the remark pause. 1018 // If we reset here as a result of an overflow during the remark we will 1019 // see assertion failures from any subsequent set_concurrency_and_phase() 1020 // calls. 1021 if (concurrent()) { 1022 // let the task associated with with worker 0 do this 1023 if (worker_id == 0) { 1024 // task 0 is responsible for clearing the global data structures 1025 // We should be here because of an overflow. During STW we should 1026 // not clear the overflow flag since we rely on it being true when 1027 // we exit this method to abort the pause and restart concurrent 1028 // marking. 1029 reset_marking_state(true /* clear_overflow */); 1030 force_overflow()->update(); 1031 1032 if (G1Log::fine()) { 1033 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1034 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1035 } 1036 } 1037 } 1038 1039 // after this, each task should reset its own data structures then 1040 // then go into the second barrier 1041 } 1042 1043 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1044 if (verbose_low()) { 1045 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1046 } 1047 1048 if (concurrent()) { 1049 SuspendibleThreadSet::leave(); 1050 } 1051 1052 bool barrier_aborted = !_second_overflow_barrier_sync.enter(); 1053 1054 if (concurrent()) { 1055 SuspendibleThreadSet::join(); 1056 } 1057 // at this point everything should be re-initialized and ready to go 1058 1059 if (verbose_low()) { 1060 if (barrier_aborted) { 1061 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1062 } else { 1063 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1064 } 1065 } 1066 } 1067 1068 #ifndef PRODUCT 1069 void ForceOverflowSettings::init() { 1070 _num_remaining = G1ConcMarkForceOverflow; 1071 _force = false; 1072 update(); 1073 } 1074 1075 void ForceOverflowSettings::update() { 1076 if (_num_remaining > 0) { 1077 _num_remaining -= 1; 1078 _force = true; 1079 } else { 1080 _force = false; 1081 } 1082 } 1083 1084 bool ForceOverflowSettings::should_force() { 1085 if (_force) { 1086 _force = false; 1087 return true; 1088 } else { 1089 return false; 1090 } 1091 } 1092 #endif // !PRODUCT 1093 1094 class CMConcurrentMarkingTask: public AbstractGangTask { 1095 private: 1096 ConcurrentMark* _cm; 1097 ConcurrentMarkThread* _cmt; 1098 1099 public: 1100 void work(uint worker_id) { 1101 assert(Thread::current()->is_ConcurrentGC_thread(), 1102 "this should only be done by a conc GC thread"); 1103 ResourceMark rm; 1104 1105 double start_vtime = os::elapsedVTime(); 1106 1107 SuspendibleThreadSet::join(); 1108 1109 assert(worker_id < _cm->active_tasks(), "invariant"); 1110 CMTask* the_task = _cm->task(worker_id); 1111 the_task->record_start_time(); 1112 if (!_cm->has_aborted()) { 1113 do { 1114 double start_vtime_sec = os::elapsedVTime(); 1115 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1116 1117 the_task->do_marking_step(mark_step_duration_ms, 1118 true /* do_termination */, 1119 false /* is_serial*/); 1120 1121 double end_vtime_sec = os::elapsedVTime(); 1122 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1123 _cm->clear_has_overflown(); 1124 1125 _cm->do_yield_check(worker_id); 1126 1127 jlong sleep_time_ms; 1128 if (!_cm->has_aborted() && the_task->has_aborted()) { 1129 sleep_time_ms = 1130 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1131 SuspendibleThreadSet::leave(); 1132 os::sleep(Thread::current(), sleep_time_ms, false); 1133 SuspendibleThreadSet::join(); 1134 } 1135 } while (!_cm->has_aborted() && the_task->has_aborted()); 1136 } 1137 the_task->record_end_time(); 1138 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1139 1140 SuspendibleThreadSet::leave(); 1141 1142 double end_vtime = os::elapsedVTime(); 1143 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1144 } 1145 1146 CMConcurrentMarkingTask(ConcurrentMark* cm, 1147 ConcurrentMarkThread* cmt) : 1148 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1149 1150 ~CMConcurrentMarkingTask() { } 1151 }; 1152 1153 // Calculates the number of active workers for a concurrent 1154 // phase. 1155 uint ConcurrentMark::calc_parallel_marking_threads() { 1156 uint n_conc_workers = 0; 1157 if (!UseDynamicNumberOfGCThreads || 1158 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1159 !ForceDynamicNumberOfGCThreads)) { 1160 n_conc_workers = max_parallel_marking_threads(); 1161 } else { 1162 n_conc_workers = 1163 AdaptiveSizePolicy::calc_default_active_workers( 1164 max_parallel_marking_threads(), 1165 1, /* Minimum workers */ 1166 parallel_marking_threads(), 1167 Threads::number_of_non_daemon_threads()); 1168 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1169 // that scaling has already gone into "_max_parallel_marking_threads". 1170 } 1171 assert(n_conc_workers > 0, "Always need at least 1"); 1172 return n_conc_workers; 1173 } 1174 1175 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1176 // Currently, only survivors can be root regions. 1177 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1178 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1179 1180 const uintx interval = PrefetchScanIntervalInBytes; 1181 HeapWord* curr = hr->bottom(); 1182 const HeapWord* end = hr->top(); 1183 while (curr < end) { 1184 Prefetch::read(curr, interval); 1185 oop obj = oop(curr); 1186 int size = obj->oop_iterate(&cl); 1187 assert(size == obj->size(), "sanity"); 1188 curr += size; 1189 } 1190 } 1191 1192 class CMRootRegionScanTask : public AbstractGangTask { 1193 private: 1194 ConcurrentMark* _cm; 1195 1196 public: 1197 CMRootRegionScanTask(ConcurrentMark* cm) : 1198 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1199 1200 void work(uint worker_id) { 1201 assert(Thread::current()->is_ConcurrentGC_thread(), 1202 "this should only be done by a conc GC thread"); 1203 1204 CMRootRegions* root_regions = _cm->root_regions(); 1205 HeapRegion* hr = root_regions->claim_next(); 1206 while (hr != NULL) { 1207 _cm->scanRootRegion(hr, worker_id); 1208 hr = root_regions->claim_next(); 1209 } 1210 } 1211 }; 1212 1213 void ConcurrentMark::scanRootRegions() { 1214 // Start of concurrent marking. 1215 ClassLoaderDataGraph::clear_claimed_marks(); 1216 1217 // scan_in_progress() will have been set to true only if there was 1218 // at least one root region to scan. So, if it's false, we 1219 // should not attempt to do any further work. 1220 if (root_regions()->scan_in_progress()) { 1221 _parallel_marking_threads = calc_parallel_marking_threads(); 1222 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1223 "Maximum number of marking threads exceeded"); 1224 uint active_workers = MAX2(1U, parallel_marking_threads()); 1225 1226 CMRootRegionScanTask task(this); 1227 _parallel_workers->set_active_workers(active_workers); 1228 _parallel_workers->run_task(&task); 1229 1230 // It's possible that has_aborted() is true here without actually 1231 // aborting the survivor scan earlier. This is OK as it's 1232 // mainly used for sanity checking. 1233 root_regions()->scan_finished(); 1234 } 1235 } 1236 1237 void ConcurrentMark::markFromRoots() { 1238 // we might be tempted to assert that: 1239 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1240 // "inconsistent argument?"); 1241 // However that wouldn't be right, because it's possible that 1242 // a safepoint is indeed in progress as a younger generation 1243 // stop-the-world GC happens even as we mark in this generation. 1244 1245 _restart_for_overflow = false; 1246 force_overflow_conc()->init(); 1247 1248 // _g1h has _n_par_threads 1249 _parallel_marking_threads = calc_parallel_marking_threads(); 1250 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1251 "Maximum number of marking threads exceeded"); 1252 1253 uint active_workers = MAX2(1U, parallel_marking_threads()); 1254 1255 // Parallel task terminator is set in "set_concurrency_and_phase()" 1256 set_concurrency_and_phase(active_workers, true /* concurrent */); 1257 1258 CMConcurrentMarkingTask markingTask(this, cmThread()); 1259 _parallel_workers->set_active_workers(active_workers); 1260 // Don't set _n_par_threads because it affects MT in process_roots() 1261 // and the decisions on that MT processing is made elsewhere. 1262 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1263 _parallel_workers->run_task(&markingTask); 1264 print_stats(); 1265 } 1266 1267 // Helper class to get rid of some boilerplate code. 1268 class G1CMTraceTime : public GCTraceTime { 1269 static bool doit_and_prepend(bool doit) { 1270 if (doit) { 1271 gclog_or_tty->put(' '); 1272 } 1273 return doit; 1274 } 1275 1276 public: 1277 G1CMTraceTime(const char* title, bool doit) 1278 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), 1279 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { 1280 } 1281 }; 1282 1283 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1284 // world is stopped at this checkpoint 1285 assert(SafepointSynchronize::is_at_safepoint(), 1286 "world should be stopped"); 1287 1288 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1289 1290 // If a full collection has happened, we shouldn't do this. 1291 if (has_aborted()) { 1292 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1293 return; 1294 } 1295 1296 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1297 1298 if (VerifyDuringGC) { 1299 HandleMark hm; // handle scope 1300 Universe::heap()->prepare_for_verify(); 1301 Universe::verify(VerifyOption_G1UsePrevMarking, 1302 " VerifyDuringGC:(before)"); 1303 } 1304 g1h->check_bitmaps("Remark Start"); 1305 1306 G1CollectorPolicy* g1p = g1h->g1_policy(); 1307 g1p->record_concurrent_mark_remark_start(); 1308 1309 double start = os::elapsedTime(); 1310 1311 checkpointRootsFinalWork(); 1312 1313 double mark_work_end = os::elapsedTime(); 1314 1315 weakRefsWork(clear_all_soft_refs); 1316 1317 if (has_overflown()) { 1318 // Oops. We overflowed. Restart concurrent marking. 1319 _restart_for_overflow = true; 1320 if (G1TraceMarkStackOverflow) { 1321 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1322 } 1323 1324 // Verify the heap w.r.t. the previous marking bitmap. 1325 if (VerifyDuringGC) { 1326 HandleMark hm; // handle scope 1327 Universe::heap()->prepare_for_verify(); 1328 Universe::verify(VerifyOption_G1UsePrevMarking, 1329 " VerifyDuringGC:(overflow)"); 1330 } 1331 1332 // Clear the marking state because we will be restarting 1333 // marking due to overflowing the global mark stack. 1334 reset_marking_state(); 1335 } else { 1336 { 1337 G1CMTraceTime trace("GC aggregate-data", G1Log::finer()); 1338 1339 // Aggregate the per-task counting data that we have accumulated 1340 // while marking. 1341 aggregate_count_data(); 1342 } 1343 1344 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1345 // We're done with marking. 1346 // This is the end of the marking cycle, we're expected all 1347 // threads to have SATB queues with active set to true. 1348 satb_mq_set.set_active_all_threads(false, /* new active value */ 1349 true /* expected_active */); 1350 1351 if (VerifyDuringGC) { 1352 HandleMark hm; // handle scope 1353 Universe::heap()->prepare_for_verify(); 1354 Universe::verify(VerifyOption_G1UseNextMarking, 1355 " VerifyDuringGC:(after)"); 1356 } 1357 g1h->check_bitmaps("Remark End"); 1358 assert(!restart_for_overflow(), "sanity"); 1359 // Completely reset the marking state since marking completed 1360 set_non_marking_state(); 1361 } 1362 1363 // Expand the marking stack, if we have to and if we can. 1364 if (_markStack.should_expand()) { 1365 _markStack.expand(); 1366 } 1367 1368 // Statistics 1369 double now = os::elapsedTime(); 1370 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1371 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1372 _remark_times.add((now - start) * 1000.0); 1373 1374 g1p->record_concurrent_mark_remark_end(); 1375 1376 G1CMIsAliveClosure is_alive(g1h); 1377 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1378 } 1379 1380 // Base class of the closures that finalize and verify the 1381 // liveness counting data. 1382 class CMCountDataClosureBase: public HeapRegionClosure { 1383 protected: 1384 G1CollectedHeap* _g1h; 1385 ConcurrentMark* _cm; 1386 CardTableModRefBS* _ct_bs; 1387 1388 BitMap* _region_bm; 1389 BitMap* _card_bm; 1390 1391 // Takes a region that's not empty (i.e., it has at least one 1392 // live object in it and sets its corresponding bit on the region 1393 // bitmap to 1. If the region is "starts humongous" it will also set 1394 // to 1 the bits on the region bitmap that correspond to its 1395 // associated "continues humongous" regions. 1396 void set_bit_for_region(HeapRegion* hr) { 1397 assert(!hr->is_continues_humongous(), "should have filtered those out"); 1398 1399 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1400 if (!hr->is_starts_humongous()) { 1401 // Normal (non-humongous) case: just set the bit. 1402 _region_bm->par_at_put(index, true); 1403 } else { 1404 // Starts humongous case: calculate how many regions are part of 1405 // this humongous region and then set the bit range. 1406 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1407 _region_bm->par_at_put_range(index, end_index, true); 1408 } 1409 } 1410 1411 public: 1412 CMCountDataClosureBase(G1CollectedHeap* g1h, 1413 BitMap* region_bm, BitMap* card_bm): 1414 _g1h(g1h), _cm(g1h->concurrent_mark()), 1415 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 1416 _region_bm(region_bm), _card_bm(card_bm) { } 1417 }; 1418 1419 // Closure that calculates the # live objects per region. Used 1420 // for verification purposes during the cleanup pause. 1421 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1422 CMBitMapRO* _bm; 1423 size_t _region_marked_bytes; 1424 1425 public: 1426 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1427 BitMap* region_bm, BitMap* card_bm) : 1428 CMCountDataClosureBase(g1h, region_bm, card_bm), 1429 _bm(bm), _region_marked_bytes(0) { } 1430 1431 bool doHeapRegion(HeapRegion* hr) { 1432 1433 if (hr->is_continues_humongous()) { 1434 // We will ignore these here and process them when their 1435 // associated "starts humongous" region is processed (see 1436 // set_bit_for_heap_region()). Note that we cannot rely on their 1437 // associated "starts humongous" region to have their bit set to 1438 // 1 since, due to the region chunking in the parallel region 1439 // iteration, a "continues humongous" region might be visited 1440 // before its associated "starts humongous". 1441 return false; 1442 } 1443 1444 HeapWord* ntams = hr->next_top_at_mark_start(); 1445 HeapWord* start = hr->bottom(); 1446 1447 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1448 err_msg("Preconditions not met - " 1449 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1450 p2i(start), p2i(ntams), p2i(hr->end()))); 1451 1452 // Find the first marked object at or after "start". 1453 start = _bm->getNextMarkedWordAddress(start, ntams); 1454 1455 size_t marked_bytes = 0; 1456 1457 while (start < ntams) { 1458 oop obj = oop(start); 1459 int obj_sz = obj->size(); 1460 HeapWord* obj_end = start + obj_sz; 1461 1462 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1463 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1464 1465 // Note: if we're looking at the last region in heap - obj_end 1466 // could be actually just beyond the end of the heap; end_idx 1467 // will then correspond to a (non-existent) card that is also 1468 // just beyond the heap. 1469 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1470 // end of object is not card aligned - increment to cover 1471 // all the cards spanned by the object 1472 end_idx += 1; 1473 } 1474 1475 // Set the bits in the card BM for the cards spanned by this object. 1476 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1477 1478 // Add the size of this object to the number of marked bytes. 1479 marked_bytes += (size_t)obj_sz * HeapWordSize; 1480 1481 // Find the next marked object after this one. 1482 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1483 } 1484 1485 // Mark the allocated-since-marking portion... 1486 HeapWord* top = hr->top(); 1487 if (ntams < top) { 1488 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1489 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1490 1491 // Note: if we're looking at the last region in heap - top 1492 // could be actually just beyond the end of the heap; end_idx 1493 // will then correspond to a (non-existent) card that is also 1494 // just beyond the heap. 1495 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1496 // end of object is not card aligned - increment to cover 1497 // all the cards spanned by the object 1498 end_idx += 1; 1499 } 1500 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1501 1502 // This definitely means the region has live objects. 1503 set_bit_for_region(hr); 1504 } 1505 1506 // Update the live region bitmap. 1507 if (marked_bytes > 0) { 1508 set_bit_for_region(hr); 1509 } 1510 1511 // Set the marked bytes for the current region so that 1512 // it can be queried by a calling verification routine 1513 _region_marked_bytes = marked_bytes; 1514 1515 return false; 1516 } 1517 1518 size_t region_marked_bytes() const { return _region_marked_bytes; } 1519 }; 1520 1521 // Heap region closure used for verifying the counting data 1522 // that was accumulated concurrently and aggregated during 1523 // the remark pause. This closure is applied to the heap 1524 // regions during the STW cleanup pause. 1525 1526 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1527 G1CollectedHeap* _g1h; 1528 ConcurrentMark* _cm; 1529 CalcLiveObjectsClosure _calc_cl; 1530 BitMap* _region_bm; // Region BM to be verified 1531 BitMap* _card_bm; // Card BM to be verified 1532 bool _verbose; // verbose output? 1533 1534 BitMap* _exp_region_bm; // Expected Region BM values 1535 BitMap* _exp_card_bm; // Expected card BM values 1536 1537 int _failures; 1538 1539 public: 1540 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1541 BitMap* region_bm, 1542 BitMap* card_bm, 1543 BitMap* exp_region_bm, 1544 BitMap* exp_card_bm, 1545 bool verbose) : 1546 _g1h(g1h), _cm(g1h->concurrent_mark()), 1547 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1548 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1549 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1550 _failures(0) { } 1551 1552 int failures() const { return _failures; } 1553 1554 bool doHeapRegion(HeapRegion* hr) { 1555 if (hr->is_continues_humongous()) { 1556 // We will ignore these here and process them when their 1557 // associated "starts humongous" region is processed (see 1558 // set_bit_for_heap_region()). Note that we cannot rely on their 1559 // associated "starts humongous" region to have their bit set to 1560 // 1 since, due to the region chunking in the parallel region 1561 // iteration, a "continues humongous" region might be visited 1562 // before its associated "starts humongous". 1563 return false; 1564 } 1565 1566 int failures = 0; 1567 1568 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1569 // this region and set the corresponding bits in the expected region 1570 // and card bitmaps. 1571 bool res = _calc_cl.doHeapRegion(hr); 1572 assert(res == false, "should be continuing"); 1573 1574 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1575 Mutex::_no_safepoint_check_flag); 1576 1577 // Verify the marked bytes for this region. 1578 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1579 size_t act_marked_bytes = hr->next_marked_bytes(); 1580 1581 // We're not OK if expected marked bytes > actual marked bytes. It means 1582 // we have missed accounting some objects during the actual marking. 1583 if (exp_marked_bytes > act_marked_bytes) { 1584 if (_verbose) { 1585 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1586 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1587 hr->hrm_index(), exp_marked_bytes, act_marked_bytes); 1588 } 1589 failures += 1; 1590 } 1591 1592 // Verify the bit, for this region, in the actual and expected 1593 // (which was just calculated) region bit maps. 1594 // We're not OK if the bit in the calculated expected region 1595 // bitmap is set and the bit in the actual region bitmap is not. 1596 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1597 1598 bool expected = _exp_region_bm->at(index); 1599 bool actual = _region_bm->at(index); 1600 if (expected && !actual) { 1601 if (_verbose) { 1602 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1603 "expected: %s, actual: %s", 1604 hr->hrm_index(), 1605 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1606 } 1607 failures += 1; 1608 } 1609 1610 // Verify that the card bit maps for the cards spanned by the current 1611 // region match. We have an error if we have a set bit in the expected 1612 // bit map and the corresponding bit in the actual bitmap is not set. 1613 1614 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1615 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1616 1617 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1618 expected = _exp_card_bm->at(i); 1619 actual = _card_bm->at(i); 1620 1621 if (expected && !actual) { 1622 if (_verbose) { 1623 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1624 "expected: %s, actual: %s", 1625 hr->hrm_index(), i, 1626 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1627 } 1628 failures += 1; 1629 } 1630 } 1631 1632 if (failures > 0 && _verbose) { 1633 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1634 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1635 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1636 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1637 } 1638 1639 _failures += failures; 1640 1641 // We could stop iteration over the heap when we 1642 // find the first violating region by returning true. 1643 return false; 1644 } 1645 }; 1646 1647 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1648 protected: 1649 G1CollectedHeap* _g1h; 1650 ConcurrentMark* _cm; 1651 BitMap* _actual_region_bm; 1652 BitMap* _actual_card_bm; 1653 1654 uint _n_workers; 1655 1656 BitMap* _expected_region_bm; 1657 BitMap* _expected_card_bm; 1658 1659 int _failures; 1660 bool _verbose; 1661 1662 HeapRegionClaimer _hrclaimer; 1663 1664 public: 1665 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1666 BitMap* region_bm, BitMap* card_bm, 1667 BitMap* expected_region_bm, BitMap* expected_card_bm) 1668 : AbstractGangTask("G1 verify final counting"), 1669 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1670 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1671 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1672 _failures(0), _verbose(false), 1673 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1674 assert(VerifyDuringGC, "don't call this otherwise"); 1675 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1676 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1677 1678 _verbose = _cm->verbose_medium(); 1679 } 1680 1681 void work(uint worker_id) { 1682 assert(worker_id < _n_workers, "invariant"); 1683 1684 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1685 _actual_region_bm, _actual_card_bm, 1686 _expected_region_bm, 1687 _expected_card_bm, 1688 _verbose); 1689 1690 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1691 1692 Atomic::add(verify_cl.failures(), &_failures); 1693 } 1694 1695 int failures() const { return _failures; } 1696 }; 1697 1698 // Closure that finalizes the liveness counting data. 1699 // Used during the cleanup pause. 1700 // Sets the bits corresponding to the interval [NTAMS, top] 1701 // (which contains the implicitly live objects) in the 1702 // card liveness bitmap. Also sets the bit for each region, 1703 // containing live data, in the region liveness bitmap. 1704 1705 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1706 public: 1707 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1708 BitMap* region_bm, 1709 BitMap* card_bm) : 1710 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1711 1712 bool doHeapRegion(HeapRegion* hr) { 1713 1714 if (hr->is_continues_humongous()) { 1715 // We will ignore these here and process them when their 1716 // associated "starts humongous" region is processed (see 1717 // set_bit_for_heap_region()). Note that we cannot rely on their 1718 // associated "starts humongous" region to have their bit set to 1719 // 1 since, due to the region chunking in the parallel region 1720 // iteration, a "continues humongous" region might be visited 1721 // before its associated "starts humongous". 1722 return false; 1723 } 1724 1725 HeapWord* ntams = hr->next_top_at_mark_start(); 1726 HeapWord* top = hr->top(); 1727 1728 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1729 1730 // Mark the allocated-since-marking portion... 1731 if (ntams < top) { 1732 // This definitely means the region has live objects. 1733 set_bit_for_region(hr); 1734 1735 // Now set the bits in the card bitmap for [ntams, top) 1736 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1737 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1738 1739 // Note: if we're looking at the last region in heap - top 1740 // could be actually just beyond the end of the heap; end_idx 1741 // will then correspond to a (non-existent) card that is also 1742 // just beyond the heap. 1743 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1744 // end of object is not card aligned - increment to cover 1745 // all the cards spanned by the object 1746 end_idx += 1; 1747 } 1748 1749 assert(end_idx <= _card_bm->size(), 1750 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1751 end_idx, _card_bm->size())); 1752 assert(start_idx < _card_bm->size(), 1753 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1754 start_idx, _card_bm->size())); 1755 1756 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1757 } 1758 1759 // Set the bit for the region if it contains live data 1760 if (hr->next_marked_bytes() > 0) { 1761 set_bit_for_region(hr); 1762 } 1763 1764 return false; 1765 } 1766 }; 1767 1768 class G1ParFinalCountTask: public AbstractGangTask { 1769 protected: 1770 G1CollectedHeap* _g1h; 1771 ConcurrentMark* _cm; 1772 BitMap* _actual_region_bm; 1773 BitMap* _actual_card_bm; 1774 1775 uint _n_workers; 1776 HeapRegionClaimer _hrclaimer; 1777 1778 public: 1779 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1780 : AbstractGangTask("G1 final counting"), 1781 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1782 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1783 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1784 } 1785 1786 void work(uint worker_id) { 1787 assert(worker_id < _n_workers, "invariant"); 1788 1789 FinalCountDataUpdateClosure final_update_cl(_g1h, 1790 _actual_region_bm, 1791 _actual_card_bm); 1792 1793 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1794 } 1795 }; 1796 1797 class G1ParNoteEndTask; 1798 1799 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1800 G1CollectedHeap* _g1; 1801 size_t _max_live_bytes; 1802 uint _regions_claimed; 1803 size_t _freed_bytes; 1804 FreeRegionList* _local_cleanup_list; 1805 HeapRegionSetCount _old_regions_removed; 1806 HeapRegionSetCount _humongous_regions_removed; 1807 HRRSCleanupTask* _hrrs_cleanup_task; 1808 double _claimed_region_time; 1809 double _max_region_time; 1810 1811 public: 1812 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1813 FreeRegionList* local_cleanup_list, 1814 HRRSCleanupTask* hrrs_cleanup_task) : 1815 _g1(g1), 1816 _max_live_bytes(0), _regions_claimed(0), 1817 _freed_bytes(0), 1818 _claimed_region_time(0.0), _max_region_time(0.0), 1819 _local_cleanup_list(local_cleanup_list), 1820 _old_regions_removed(), 1821 _humongous_regions_removed(), 1822 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1823 1824 size_t freed_bytes() { return _freed_bytes; } 1825 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1826 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1827 1828 bool doHeapRegion(HeapRegion *hr) { 1829 if (hr->is_continues_humongous()) { 1830 return false; 1831 } 1832 // We use a claim value of zero here because all regions 1833 // were claimed with value 1 in the FinalCount task. 1834 _g1->reset_gc_time_stamps(hr); 1835 double start = os::elapsedTime(); 1836 _regions_claimed++; 1837 hr->note_end_of_marking(); 1838 _max_live_bytes += hr->max_live_bytes(); 1839 1840 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1841 _freed_bytes += hr->used(); 1842 hr->set_containing_set(NULL); 1843 if (hr->is_humongous()) { 1844 assert(hr->is_starts_humongous(), "we should only see starts humongous"); 1845 _humongous_regions_removed.increment(1u, hr->capacity()); 1846 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1847 } else { 1848 _old_regions_removed.increment(1u, hr->capacity()); 1849 _g1->free_region(hr, _local_cleanup_list, true); 1850 } 1851 } else { 1852 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1853 } 1854 1855 double region_time = (os::elapsedTime() - start); 1856 _claimed_region_time += region_time; 1857 if (region_time > _max_region_time) { 1858 _max_region_time = region_time; 1859 } 1860 return false; 1861 } 1862 1863 size_t max_live_bytes() { return _max_live_bytes; } 1864 uint regions_claimed() { return _regions_claimed; } 1865 double claimed_region_time_sec() { return _claimed_region_time; } 1866 double max_region_time_sec() { return _max_region_time; } 1867 }; 1868 1869 class G1ParNoteEndTask: public AbstractGangTask { 1870 friend class G1NoteEndOfConcMarkClosure; 1871 1872 protected: 1873 G1CollectedHeap* _g1h; 1874 size_t _max_live_bytes; 1875 size_t _freed_bytes; 1876 FreeRegionList* _cleanup_list; 1877 HeapRegionClaimer _hrclaimer; 1878 1879 public: 1880 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1881 AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1882 } 1883 1884 void work(uint worker_id) { 1885 double start = os::elapsedTime(); 1886 FreeRegionList local_cleanup_list("Local Cleanup List"); 1887 HRRSCleanupTask hrrs_cleanup_task; 1888 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1889 &hrrs_cleanup_task); 1890 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1891 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1892 1893 // Now update the lists 1894 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1895 { 1896 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1897 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1898 _max_live_bytes += g1_note_end.max_live_bytes(); 1899 _freed_bytes += g1_note_end.freed_bytes(); 1900 1901 // If we iterate over the global cleanup list at the end of 1902 // cleanup to do this printing we will not guarantee to only 1903 // generate output for the newly-reclaimed regions (the list 1904 // might not be empty at the beginning of cleanup; we might 1905 // still be working on its previous contents). So we do the 1906 // printing here, before we append the new regions to the global 1907 // cleanup list. 1908 1909 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1910 if (hr_printer->is_active()) { 1911 FreeRegionListIterator iter(&local_cleanup_list); 1912 while (iter.more_available()) { 1913 HeapRegion* hr = iter.get_next(); 1914 hr_printer->cleanup(hr); 1915 } 1916 } 1917 1918 _cleanup_list->add_ordered(&local_cleanup_list); 1919 assert(local_cleanup_list.is_empty(), "post-condition"); 1920 1921 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1922 } 1923 } 1924 size_t max_live_bytes() { return _max_live_bytes; } 1925 size_t freed_bytes() { return _freed_bytes; } 1926 }; 1927 1928 class G1ParScrubRemSetTask: public AbstractGangTask { 1929 protected: 1930 G1RemSet* _g1rs; 1931 BitMap* _region_bm; 1932 BitMap* _card_bm; 1933 HeapRegionClaimer _hrclaimer; 1934 1935 public: 1936 G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) : 1937 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) { 1938 } 1939 1940 void work(uint worker_id) { 1941 _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer); 1942 } 1943 1944 }; 1945 1946 void ConcurrentMark::cleanup() { 1947 // world is stopped at this checkpoint 1948 assert(SafepointSynchronize::is_at_safepoint(), 1949 "world should be stopped"); 1950 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1951 1952 // If a full collection has happened, we shouldn't do this. 1953 if (has_aborted()) { 1954 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1955 return; 1956 } 1957 1958 g1h->verify_region_sets_optional(); 1959 1960 if (VerifyDuringGC) { 1961 HandleMark hm; // handle scope 1962 Universe::heap()->prepare_for_verify(); 1963 Universe::verify(VerifyOption_G1UsePrevMarking, 1964 " VerifyDuringGC:(before)"); 1965 } 1966 g1h->check_bitmaps("Cleanup Start"); 1967 1968 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); 1969 g1p->record_concurrent_mark_cleanup_start(); 1970 1971 double start = os::elapsedTime(); 1972 1973 HeapRegionRemSet::reset_for_cleanup_tasks(); 1974 1975 uint n_workers; 1976 1977 // Do counting once more with the world stopped for good measure. 1978 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 1979 1980 g1h->set_par_threads(); 1981 n_workers = g1h->n_par_threads(); 1982 assert(g1h->n_par_threads() == n_workers, 1983 "Should not have been reset"); 1984 g1h->workers()->run_task(&g1_par_count_task); 1985 // Done with the parallel phase so reset to 0. 1986 g1h->set_par_threads(0); 1987 1988 if (VerifyDuringGC) { 1989 // Verify that the counting data accumulated during marking matches 1990 // that calculated by walking the marking bitmap. 1991 1992 // Bitmaps to hold expected values 1993 BitMap expected_region_bm(_region_bm.size(), true); 1994 BitMap expected_card_bm(_card_bm.size(), true); 1995 1996 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 1997 &_region_bm, 1998 &_card_bm, 1999 &expected_region_bm, 2000 &expected_card_bm); 2001 2002 g1h->set_par_threads((int)n_workers); 2003 g1h->workers()->run_task(&g1_par_verify_task); 2004 // Done with the parallel phase so reset to 0. 2005 g1h->set_par_threads(0); 2006 2007 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 2008 } 2009 2010 size_t start_used_bytes = g1h->used(); 2011 g1h->set_marking_complete(); 2012 2013 double count_end = os::elapsedTime(); 2014 double this_final_counting_time = (count_end - start); 2015 _total_counting_time += this_final_counting_time; 2016 2017 if (G1PrintRegionLivenessInfo) { 2018 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 2019 _g1h->heap_region_iterate(&cl); 2020 } 2021 2022 // Install newly created mark bitMap as "prev". 2023 swapMarkBitMaps(); 2024 2025 g1h->reset_gc_time_stamp(); 2026 2027 // Note end of marking in all heap regions. 2028 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 2029 g1h->set_par_threads((int)n_workers); 2030 g1h->workers()->run_task(&g1_par_note_end_task); 2031 g1h->set_par_threads(0); 2032 g1h->check_gc_time_stamps(); 2033 2034 if (!cleanup_list_is_empty()) { 2035 // The cleanup list is not empty, so we'll have to process it 2036 // concurrently. Notify anyone else that might be wanting free 2037 // regions that there will be more free regions coming soon. 2038 g1h->set_free_regions_coming(); 2039 } 2040 2041 // call below, since it affects the metric by which we sort the heap 2042 // regions. 2043 if (G1ScrubRemSets) { 2044 double rs_scrub_start = os::elapsedTime(); 2045 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers); 2046 g1h->set_par_threads((int)n_workers); 2047 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2048 g1h->set_par_threads(0); 2049 2050 double rs_scrub_end = os::elapsedTime(); 2051 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2052 _total_rs_scrub_time += this_rs_scrub_time; 2053 } 2054 2055 // this will also free any regions totally full of garbage objects, 2056 // and sort the regions. 2057 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2058 2059 // Statistics. 2060 double end = os::elapsedTime(); 2061 _cleanup_times.add((end - start) * 1000.0); 2062 2063 if (G1Log::fine()) { 2064 g1h->print_size_transition(gclog_or_tty, 2065 start_used_bytes, 2066 g1h->used(), 2067 g1h->capacity()); 2068 } 2069 2070 // Clean up will have freed any regions completely full of garbage. 2071 // Update the soft reference policy with the new heap occupancy. 2072 Universe::update_heap_info_at_gc(); 2073 2074 if (VerifyDuringGC) { 2075 HandleMark hm; // handle scope 2076 Universe::heap()->prepare_for_verify(); 2077 Universe::verify(VerifyOption_G1UsePrevMarking, 2078 " VerifyDuringGC:(after)"); 2079 } 2080 2081 g1h->check_bitmaps("Cleanup End"); 2082 2083 g1h->verify_region_sets_optional(); 2084 2085 // We need to make this be a "collection" so any collection pause that 2086 // races with it goes around and waits for completeCleanup to finish. 2087 g1h->increment_total_collections(); 2088 2089 // Clean out dead classes and update Metaspace sizes. 2090 if (ClassUnloadingWithConcurrentMark) { 2091 ClassLoaderDataGraph::purge(); 2092 } 2093 MetaspaceGC::compute_new_size(); 2094 2095 // We reclaimed old regions so we should calculate the sizes to make 2096 // sure we update the old gen/space data. 2097 g1h->g1mm()->update_sizes(); 2098 2099 g1h->trace_heap_after_concurrent_cycle(); 2100 } 2101 2102 void ConcurrentMark::completeCleanup() { 2103 if (has_aborted()) return; 2104 2105 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2106 2107 _cleanup_list.verify_optional(); 2108 FreeRegionList tmp_free_list("Tmp Free List"); 2109 2110 if (G1ConcRegionFreeingVerbose) { 2111 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2112 "cleanup list has %u entries", 2113 _cleanup_list.length()); 2114 } 2115 2116 // No one else should be accessing the _cleanup_list at this point, 2117 // so it is not necessary to take any locks 2118 while (!_cleanup_list.is_empty()) { 2119 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 2120 assert(hr != NULL, "Got NULL from a non-empty list"); 2121 hr->par_clear(); 2122 tmp_free_list.add_ordered(hr); 2123 2124 // Instead of adding one region at a time to the secondary_free_list, 2125 // we accumulate them in the local list and move them a few at a 2126 // time. This also cuts down on the number of notify_all() calls 2127 // we do during this process. We'll also append the local list when 2128 // _cleanup_list is empty (which means we just removed the last 2129 // region from the _cleanup_list). 2130 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2131 _cleanup_list.is_empty()) { 2132 if (G1ConcRegionFreeingVerbose) { 2133 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2134 "appending %u entries to the secondary_free_list, " 2135 "cleanup list still has %u entries", 2136 tmp_free_list.length(), 2137 _cleanup_list.length()); 2138 } 2139 2140 { 2141 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2142 g1h->secondary_free_list_add(&tmp_free_list); 2143 SecondaryFreeList_lock->notify_all(); 2144 } 2145 2146 if (G1StressConcRegionFreeing) { 2147 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2148 os::sleep(Thread::current(), (jlong) 1, false); 2149 } 2150 } 2151 } 2152 } 2153 assert(tmp_free_list.is_empty(), "post-condition"); 2154 } 2155 2156 // Supporting Object and Oop closures for reference discovery 2157 // and processing in during marking 2158 2159 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2160 HeapWord* addr = (HeapWord*)obj; 2161 return addr != NULL && 2162 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2163 } 2164 2165 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2166 // Uses the CMTask associated with a worker thread (for serial reference 2167 // processing the CMTask for worker 0 is used) to preserve (mark) and 2168 // trace referent objects. 2169 // 2170 // Using the CMTask and embedded local queues avoids having the worker 2171 // threads operating on the global mark stack. This reduces the risk 2172 // of overflowing the stack - which we would rather avoid at this late 2173 // state. Also using the tasks' local queues removes the potential 2174 // of the workers interfering with each other that could occur if 2175 // operating on the global stack. 2176 2177 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2178 ConcurrentMark* _cm; 2179 CMTask* _task; 2180 int _ref_counter_limit; 2181 int _ref_counter; 2182 bool _is_serial; 2183 public: 2184 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2185 _cm(cm), _task(task), _is_serial(is_serial), 2186 _ref_counter_limit(G1RefProcDrainInterval) { 2187 assert(_ref_counter_limit > 0, "sanity"); 2188 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2189 _ref_counter = _ref_counter_limit; 2190 } 2191 2192 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2193 virtual void do_oop( oop* p) { do_oop_work(p); } 2194 2195 template <class T> void do_oop_work(T* p) { 2196 if (!_cm->has_overflown()) { 2197 oop obj = oopDesc::load_decode_heap_oop(p); 2198 if (_cm->verbose_high()) { 2199 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2200 "*"PTR_FORMAT" = "PTR_FORMAT, 2201 _task->worker_id(), p2i(p), p2i((void*) obj)); 2202 } 2203 2204 _task->deal_with_reference(obj); 2205 _ref_counter--; 2206 2207 if (_ref_counter == 0) { 2208 // We have dealt with _ref_counter_limit references, pushing them 2209 // and objects reachable from them on to the local stack (and 2210 // possibly the global stack). Call CMTask::do_marking_step() to 2211 // process these entries. 2212 // 2213 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2214 // there's nothing more to do (i.e. we're done with the entries that 2215 // were pushed as a result of the CMTask::deal_with_reference() calls 2216 // above) or we overflow. 2217 // 2218 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2219 // flag while there may still be some work to do. (See the comment at 2220 // the beginning of CMTask::do_marking_step() for those conditions - 2221 // one of which is reaching the specified time target.) It is only 2222 // when CMTask::do_marking_step() returns without setting the 2223 // has_aborted() flag that the marking step has completed. 2224 do { 2225 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2226 _task->do_marking_step(mark_step_duration_ms, 2227 false /* do_termination */, 2228 _is_serial); 2229 } while (_task->has_aborted() && !_cm->has_overflown()); 2230 _ref_counter = _ref_counter_limit; 2231 } 2232 } else { 2233 if (_cm->verbose_high()) { 2234 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2235 } 2236 } 2237 } 2238 }; 2239 2240 // 'Drain' oop closure used by both serial and parallel reference processing. 2241 // Uses the CMTask associated with a given worker thread (for serial 2242 // reference processing the CMtask for worker 0 is used). Calls the 2243 // do_marking_step routine, with an unbelievably large timeout value, 2244 // to drain the marking data structures of the remaining entries 2245 // added by the 'keep alive' oop closure above. 2246 2247 class G1CMDrainMarkingStackClosure: public VoidClosure { 2248 ConcurrentMark* _cm; 2249 CMTask* _task; 2250 bool _is_serial; 2251 public: 2252 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2253 _cm(cm), _task(task), _is_serial(is_serial) { 2254 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2255 } 2256 2257 void do_void() { 2258 do { 2259 if (_cm->verbose_high()) { 2260 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2261 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2262 } 2263 2264 // We call CMTask::do_marking_step() to completely drain the local 2265 // and global marking stacks of entries pushed by the 'keep alive' 2266 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2267 // 2268 // CMTask::do_marking_step() is called in a loop, which we'll exit 2269 // if there's nothing more to do (i.e. we've completely drained the 2270 // entries that were pushed as a a result of applying the 'keep alive' 2271 // closure to the entries on the discovered ref lists) or we overflow 2272 // the global marking stack. 2273 // 2274 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2275 // flag while there may still be some work to do. (See the comment at 2276 // the beginning of CMTask::do_marking_step() for those conditions - 2277 // one of which is reaching the specified time target.) It is only 2278 // when CMTask::do_marking_step() returns without setting the 2279 // has_aborted() flag that the marking step has completed. 2280 2281 _task->do_marking_step(1000000000.0 /* something very large */, 2282 true /* do_termination */, 2283 _is_serial); 2284 } while (_task->has_aborted() && !_cm->has_overflown()); 2285 } 2286 }; 2287 2288 // Implementation of AbstractRefProcTaskExecutor for parallel 2289 // reference processing at the end of G1 concurrent marking 2290 2291 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2292 private: 2293 G1CollectedHeap* _g1h; 2294 ConcurrentMark* _cm; 2295 WorkGang* _workers; 2296 int _active_workers; 2297 2298 public: 2299 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2300 ConcurrentMark* cm, 2301 WorkGang* workers, 2302 int n_workers) : 2303 _g1h(g1h), _cm(cm), 2304 _workers(workers), _active_workers(n_workers) { } 2305 2306 // Executes the given task using concurrent marking worker threads. 2307 virtual void execute(ProcessTask& task); 2308 virtual void execute(EnqueueTask& task); 2309 }; 2310 2311 class G1CMRefProcTaskProxy: public AbstractGangTask { 2312 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2313 ProcessTask& _proc_task; 2314 G1CollectedHeap* _g1h; 2315 ConcurrentMark* _cm; 2316 2317 public: 2318 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2319 G1CollectedHeap* g1h, 2320 ConcurrentMark* cm) : 2321 AbstractGangTask("Process reference objects in parallel"), 2322 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2323 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2324 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2325 } 2326 2327 virtual void work(uint worker_id) { 2328 ResourceMark rm; 2329 HandleMark hm; 2330 CMTask* task = _cm->task(worker_id); 2331 G1CMIsAliveClosure g1_is_alive(_g1h); 2332 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2333 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2334 2335 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2336 } 2337 }; 2338 2339 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2340 assert(_workers != NULL, "Need parallel worker threads."); 2341 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2342 2343 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2344 2345 // We need to reset the concurrency level before each 2346 // proxy task execution, so that the termination protocol 2347 // and overflow handling in CMTask::do_marking_step() knows 2348 // how many workers to wait for. 2349 _cm->set_concurrency(_active_workers); 2350 _g1h->set_par_threads(_active_workers); 2351 _workers->run_task(&proc_task_proxy); 2352 _g1h->set_par_threads(0); 2353 } 2354 2355 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2356 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2357 EnqueueTask& _enq_task; 2358 2359 public: 2360 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2361 AbstractGangTask("Enqueue reference objects in parallel"), 2362 _enq_task(enq_task) { } 2363 2364 virtual void work(uint worker_id) { 2365 _enq_task.work(worker_id); 2366 } 2367 }; 2368 2369 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2370 assert(_workers != NULL, "Need parallel worker threads."); 2371 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2372 2373 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2374 2375 // Not strictly necessary but... 2376 // 2377 // We need to reset the concurrency level before each 2378 // proxy task execution, so that the termination protocol 2379 // and overflow handling in CMTask::do_marking_step() knows 2380 // how many workers to wait for. 2381 _cm->set_concurrency(_active_workers); 2382 _g1h->set_par_threads(_active_workers); 2383 _workers->run_task(&enq_task_proxy); 2384 _g1h->set_par_threads(0); 2385 } 2386 2387 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2388 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2389 } 2390 2391 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2392 if (has_overflown()) { 2393 // Skip processing the discovered references if we have 2394 // overflown the global marking stack. Reference objects 2395 // only get discovered once so it is OK to not 2396 // de-populate the discovered reference lists. We could have, 2397 // but the only benefit would be that, when marking restarts, 2398 // less reference objects are discovered. 2399 return; 2400 } 2401 2402 ResourceMark rm; 2403 HandleMark hm; 2404 2405 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2406 2407 // Is alive closure. 2408 G1CMIsAliveClosure g1_is_alive(g1h); 2409 2410 // Inner scope to exclude the cleaning of the string and symbol 2411 // tables from the displayed time. 2412 { 2413 G1CMTraceTime t("GC ref-proc", G1Log::finer()); 2414 2415 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2416 2417 // See the comment in G1CollectedHeap::ref_processing_init() 2418 // about how reference processing currently works in G1. 2419 2420 // Set the soft reference policy 2421 rp->setup_policy(clear_all_soft_refs); 2422 assert(_markStack.isEmpty(), "mark stack should be empty"); 2423 2424 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2425 // in serial reference processing. Note these closures are also 2426 // used for serially processing (by the the current thread) the 2427 // JNI references during parallel reference processing. 2428 // 2429 // These closures do not need to synchronize with the worker 2430 // threads involved in parallel reference processing as these 2431 // instances are executed serially by the current thread (e.g. 2432 // reference processing is not multi-threaded and is thus 2433 // performed by the current thread instead of a gang worker). 2434 // 2435 // The gang tasks involved in parallel reference processing create 2436 // their own instances of these closures, which do their own 2437 // synchronization among themselves. 2438 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2439 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2440 2441 // We need at least one active thread. If reference processing 2442 // is not multi-threaded we use the current (VMThread) thread, 2443 // otherwise we use the work gang from the G1CollectedHeap and 2444 // we utilize all the worker threads we can. 2445 bool processing_is_mt = rp->processing_is_mt(); 2446 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2447 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2448 2449 // Parallel processing task executor. 2450 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2451 g1h->workers(), active_workers); 2452 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2453 2454 // Set the concurrency level. The phase was already set prior to 2455 // executing the remark task. 2456 set_concurrency(active_workers); 2457 2458 // Set the degree of MT processing here. If the discovery was done MT, 2459 // the number of threads involved during discovery could differ from 2460 // the number of active workers. This is OK as long as the discovered 2461 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2462 rp->set_active_mt_degree(active_workers); 2463 2464 // Process the weak references. 2465 const ReferenceProcessorStats& stats = 2466 rp->process_discovered_references(&g1_is_alive, 2467 &g1_keep_alive, 2468 &g1_drain_mark_stack, 2469 executor, 2470 g1h->gc_timer_cm(), 2471 concurrent_gc_id()); 2472 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2473 2474 // The do_oop work routines of the keep_alive and drain_marking_stack 2475 // oop closures will set the has_overflown flag if we overflow the 2476 // global marking stack. 2477 2478 assert(_markStack.overflow() || _markStack.isEmpty(), 2479 "mark stack should be empty (unless it overflowed)"); 2480 2481 if (_markStack.overflow()) { 2482 // This should have been done already when we tried to push an 2483 // entry on to the global mark stack. But let's do it again. 2484 set_has_overflown(); 2485 } 2486 2487 assert(rp->num_q() == active_workers, "why not"); 2488 2489 rp->enqueue_discovered_references(executor); 2490 2491 rp->verify_no_references_recorded(); 2492 assert(!rp->discovery_enabled(), "Post condition"); 2493 } 2494 2495 if (has_overflown()) { 2496 // We can not trust g1_is_alive if the marking stack overflowed 2497 return; 2498 } 2499 2500 assert(_markStack.isEmpty(), "Marking should have completed"); 2501 2502 // Unload Klasses, String, Symbols, Code Cache, etc. 2503 { 2504 G1CMTraceTime trace("Unloading", G1Log::finer()); 2505 2506 if (ClassUnloadingWithConcurrentMark) { 2507 bool purged_classes; 2508 2509 { 2510 G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest()); 2511 purged_classes = SystemDictionary::do_unloading(&g1_is_alive); 2512 } 2513 2514 { 2515 G1CMTraceTime trace("Parallel Unloading", G1Log::finest()); 2516 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2517 } 2518 } 2519 2520 if (G1StringDedup::is_enabled()) { 2521 G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest()); 2522 G1StringDedup::unlink(&g1_is_alive); 2523 } 2524 } 2525 } 2526 2527 void ConcurrentMark::swapMarkBitMaps() { 2528 CMBitMapRO* temp = _prevMarkBitMap; 2529 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2530 _nextMarkBitMap = (CMBitMap*) temp; 2531 } 2532 2533 class CMObjectClosure; 2534 2535 // Closure for iterating over objects, currently only used for 2536 // processing SATB buffers. 2537 class CMObjectClosure : public ObjectClosure { 2538 private: 2539 CMTask* _task; 2540 2541 public: 2542 void do_object(oop obj) { 2543 _task->deal_with_reference(obj); 2544 } 2545 2546 CMObjectClosure(CMTask* task) : _task(task) { } 2547 }; 2548 2549 class G1RemarkThreadsClosure : public ThreadClosure { 2550 CMObjectClosure _cm_obj; 2551 G1CMOopClosure _cm_cl; 2552 MarkingCodeBlobClosure _code_cl; 2553 int _thread_parity; 2554 2555 public: 2556 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) : 2557 _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2558 _thread_parity(SharedHeap::heap()->strong_roots_parity()) {} 2559 2560 void do_thread(Thread* thread) { 2561 if (thread->is_Java_thread()) { 2562 if (thread->claim_oops_do(true, _thread_parity)) { 2563 JavaThread* jt = (JavaThread*)thread; 2564 2565 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2566 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2567 // * Alive if on the stack of an executing method 2568 // * Weakly reachable otherwise 2569 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2570 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2571 jt->nmethods_do(&_code_cl); 2572 2573 jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj); 2574 } 2575 } else if (thread->is_VM_thread()) { 2576 if (thread->claim_oops_do(true, _thread_parity)) { 2577 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj); 2578 } 2579 } 2580 } 2581 }; 2582 2583 class CMRemarkTask: public AbstractGangTask { 2584 private: 2585 ConcurrentMark* _cm; 2586 public: 2587 void work(uint worker_id) { 2588 // Since all available tasks are actually started, we should 2589 // only proceed if we're supposed to be active. 2590 if (worker_id < _cm->active_tasks()) { 2591 CMTask* task = _cm->task(worker_id); 2592 task->record_start_time(); 2593 { 2594 ResourceMark rm; 2595 HandleMark hm; 2596 2597 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2598 Threads::threads_do(&threads_f); 2599 } 2600 2601 do { 2602 task->do_marking_step(1000000000.0 /* something very large */, 2603 true /* do_termination */, 2604 false /* is_serial */); 2605 } while (task->has_aborted() && !_cm->has_overflown()); 2606 // If we overflow, then we do not want to restart. We instead 2607 // want to abort remark and do concurrent marking again. 2608 task->record_end_time(); 2609 } 2610 } 2611 2612 CMRemarkTask(ConcurrentMark* cm, int active_workers) : 2613 AbstractGangTask("Par Remark"), _cm(cm) { 2614 _cm->terminator()->reset_for_reuse(active_workers); 2615 } 2616 }; 2617 2618 void ConcurrentMark::checkpointRootsFinalWork() { 2619 ResourceMark rm; 2620 HandleMark hm; 2621 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2622 2623 G1CMTraceTime trace("Finalize Marking", G1Log::finer()); 2624 2625 g1h->ensure_parsability(false); 2626 2627 G1CollectedHeap::StrongRootsScope srs(g1h); 2628 // this is remark, so we'll use up all active threads 2629 uint active_workers = g1h->workers()->active_workers(); 2630 if (active_workers == 0) { 2631 assert(active_workers > 0, "Should have been set earlier"); 2632 active_workers = (uint) ParallelGCThreads; 2633 g1h->workers()->set_active_workers(active_workers); 2634 } 2635 set_concurrency_and_phase(active_workers, false /* concurrent */); 2636 // Leave _parallel_marking_threads at it's 2637 // value originally calculated in the ConcurrentMark 2638 // constructor and pass values of the active workers 2639 // through the gang in the task. 2640 2641 CMRemarkTask remarkTask(this, active_workers); 2642 // We will start all available threads, even if we decide that the 2643 // active_workers will be fewer. The extra ones will just bail out 2644 // immediately. 2645 g1h->set_par_threads(active_workers); 2646 g1h->workers()->run_task(&remarkTask); 2647 g1h->set_par_threads(0); 2648 2649 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2650 guarantee(has_overflown() || 2651 satb_mq_set.completed_buffers_num() == 0, 2652 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2653 BOOL_TO_STR(has_overflown()), 2654 satb_mq_set.completed_buffers_num())); 2655 2656 print_stats(); 2657 } 2658 2659 #ifndef PRODUCT 2660 2661 class PrintReachableOopClosure: public OopClosure { 2662 private: 2663 G1CollectedHeap* _g1h; 2664 outputStream* _out; 2665 VerifyOption _vo; 2666 bool _all; 2667 2668 public: 2669 PrintReachableOopClosure(outputStream* out, 2670 VerifyOption vo, 2671 bool all) : 2672 _g1h(G1CollectedHeap::heap()), 2673 _out(out), _vo(vo), _all(all) { } 2674 2675 void do_oop(narrowOop* p) { do_oop_work(p); } 2676 void do_oop( oop* p) { do_oop_work(p); } 2677 2678 template <class T> void do_oop_work(T* p) { 2679 oop obj = oopDesc::load_decode_heap_oop(p); 2680 const char* str = NULL; 2681 const char* str2 = ""; 2682 2683 if (obj == NULL) { 2684 str = ""; 2685 } else if (!_g1h->is_in_g1_reserved(obj)) { 2686 str = " O"; 2687 } else { 2688 HeapRegion* hr = _g1h->heap_region_containing(obj); 2689 bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); 2690 bool marked = _g1h->is_marked(obj, _vo); 2691 2692 if (over_tams) { 2693 str = " >"; 2694 if (marked) { 2695 str2 = " AND MARKED"; 2696 } 2697 } else if (marked) { 2698 str = " M"; 2699 } else { 2700 str = " NOT"; 2701 } 2702 } 2703 2704 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", 2705 p2i(p), p2i((void*) obj), str, str2); 2706 } 2707 }; 2708 2709 class PrintReachableObjectClosure : public ObjectClosure { 2710 private: 2711 G1CollectedHeap* _g1h; 2712 outputStream* _out; 2713 VerifyOption _vo; 2714 bool _all; 2715 HeapRegion* _hr; 2716 2717 public: 2718 PrintReachableObjectClosure(outputStream* out, 2719 VerifyOption vo, 2720 bool all, 2721 HeapRegion* hr) : 2722 _g1h(G1CollectedHeap::heap()), 2723 _out(out), _vo(vo), _all(all), _hr(hr) { } 2724 2725 void do_object(oop o) { 2726 bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo); 2727 bool marked = _g1h->is_marked(o, _vo); 2728 bool print_it = _all || over_tams || marked; 2729 2730 if (print_it) { 2731 _out->print_cr(" "PTR_FORMAT"%s", 2732 p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : ""); 2733 PrintReachableOopClosure oopCl(_out, _vo, _all); 2734 o->oop_iterate_no_header(&oopCl); 2735 } 2736 } 2737 }; 2738 2739 class PrintReachableRegionClosure : public HeapRegionClosure { 2740 private: 2741 G1CollectedHeap* _g1h; 2742 outputStream* _out; 2743 VerifyOption _vo; 2744 bool _all; 2745 2746 public: 2747 bool doHeapRegion(HeapRegion* hr) { 2748 HeapWord* b = hr->bottom(); 2749 HeapWord* e = hr->end(); 2750 HeapWord* t = hr->top(); 2751 HeapWord* p = _g1h->top_at_mark_start(hr, _vo); 2752 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " 2753 "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p)); 2754 _out->cr(); 2755 2756 HeapWord* from = b; 2757 HeapWord* to = t; 2758 2759 if (to > from) { 2760 _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to)); 2761 _out->cr(); 2762 PrintReachableObjectClosure ocl(_out, _vo, _all, hr); 2763 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); 2764 _out->cr(); 2765 } 2766 2767 return false; 2768 } 2769 2770 PrintReachableRegionClosure(outputStream* out, 2771 VerifyOption vo, 2772 bool all) : 2773 _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { } 2774 }; 2775 2776 void ConcurrentMark::print_reachable(const char* str, 2777 VerifyOption vo, 2778 bool all) { 2779 gclog_or_tty->cr(); 2780 gclog_or_tty->print_cr("== Doing heap dump... "); 2781 2782 if (G1PrintReachableBaseFile == NULL) { 2783 gclog_or_tty->print_cr(" #### error: no base file defined"); 2784 return; 2785 } 2786 2787 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) > 2788 (JVM_MAXPATHLEN - 1)) { 2789 gclog_or_tty->print_cr(" #### error: file name too long"); 2790 return; 2791 } 2792 2793 char file_name[JVM_MAXPATHLEN]; 2794 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str); 2795 gclog_or_tty->print_cr(" dumping to file %s", file_name); 2796 2797 fileStream fout(file_name); 2798 if (!fout.is_open()) { 2799 gclog_or_tty->print_cr(" #### error: could not open file"); 2800 return; 2801 } 2802 2803 outputStream* out = &fout; 2804 out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo)); 2805 out->cr(); 2806 2807 out->print_cr("--- ITERATING OVER REGIONS"); 2808 out->cr(); 2809 PrintReachableRegionClosure rcl(out, vo, all); 2810 _g1h->heap_region_iterate(&rcl); 2811 out->cr(); 2812 2813 gclog_or_tty->print_cr(" done"); 2814 gclog_or_tty->flush(); 2815 } 2816 2817 #endif // PRODUCT 2818 2819 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2820 // Note we are overriding the read-only view of the prev map here, via 2821 // the cast. 2822 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2823 } 2824 2825 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2826 _nextMarkBitMap->clearRange(mr); 2827 } 2828 2829 HeapRegion* 2830 ConcurrentMark::claim_region(uint worker_id) { 2831 // "checkpoint" the finger 2832 HeapWord* finger = _finger; 2833 2834 // _heap_end will not change underneath our feet; it only changes at 2835 // yield points. 2836 while (finger < _heap_end) { 2837 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2838 2839 // Note on how this code handles humongous regions. In the 2840 // normal case the finger will reach the start of a "starts 2841 // humongous" (SH) region. Its end will either be the end of the 2842 // last "continues humongous" (CH) region in the sequence, or the 2843 // standard end of the SH region (if the SH is the only region in 2844 // the sequence). That way claim_region() will skip over the CH 2845 // regions. However, there is a subtle race between a CM thread 2846 // executing this method and a mutator thread doing a humongous 2847 // object allocation. The two are not mutually exclusive as the CM 2848 // thread does not need to hold the Heap_lock when it gets 2849 // here. So there is a chance that claim_region() will come across 2850 // a free region that's in the progress of becoming a SH or a CH 2851 // region. In the former case, it will either 2852 // a) Miss the update to the region's end, in which case it will 2853 // visit every subsequent CH region, will find their bitmaps 2854 // empty, and do nothing, or 2855 // b) Will observe the update of the region's end (in which case 2856 // it will skip the subsequent CH regions). 2857 // If it comes across a region that suddenly becomes CH, the 2858 // scenario will be similar to b). So, the race between 2859 // claim_region() and a humongous object allocation might force us 2860 // to do a bit of unnecessary work (due to some unnecessary bitmap 2861 // iterations) but it should not introduce and correctness issues. 2862 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2863 2864 // Above heap_region_containing_raw may return NULL as we always scan claim 2865 // until the end of the heap. In this case, just jump to the next region. 2866 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2867 2868 // Is the gap between reading the finger and doing the CAS too long? 2869 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2870 if (res == finger && curr_region != NULL) { 2871 // we succeeded 2872 HeapWord* bottom = curr_region->bottom(); 2873 HeapWord* limit = curr_region->next_top_at_mark_start(); 2874 2875 if (verbose_low()) { 2876 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2877 "["PTR_FORMAT", "PTR_FORMAT"), " 2878 "limit = "PTR_FORMAT, 2879 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2880 } 2881 2882 // notice that _finger == end cannot be guaranteed here since, 2883 // someone else might have moved the finger even further 2884 assert(_finger >= end, "the finger should have moved forward"); 2885 2886 if (verbose_low()) { 2887 gclog_or_tty->print_cr("[%u] we were successful with region = " 2888 PTR_FORMAT, worker_id, p2i(curr_region)); 2889 } 2890 2891 if (limit > bottom) { 2892 if (verbose_low()) { 2893 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 2894 "returning it ", worker_id, p2i(curr_region)); 2895 } 2896 return curr_region; 2897 } else { 2898 assert(limit == bottom, 2899 "the region limit should be at bottom"); 2900 if (verbose_low()) { 2901 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 2902 "returning NULL", worker_id, p2i(curr_region)); 2903 } 2904 // we return NULL and the caller should try calling 2905 // claim_region() again. 2906 return NULL; 2907 } 2908 } else { 2909 assert(_finger > finger, "the finger should have moved forward"); 2910 if (verbose_low()) { 2911 if (curr_region == NULL) { 2912 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " 2913 "global finger = "PTR_FORMAT", " 2914 "our finger = "PTR_FORMAT, 2915 worker_id, p2i(_finger), p2i(finger)); 2916 } else { 2917 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2918 "global finger = "PTR_FORMAT", " 2919 "our finger = "PTR_FORMAT, 2920 worker_id, p2i(_finger), p2i(finger)); 2921 } 2922 } 2923 2924 // read it again 2925 finger = _finger; 2926 } 2927 } 2928 2929 return NULL; 2930 } 2931 2932 #ifndef PRODUCT 2933 enum VerifyNoCSetOopsPhase { 2934 VerifyNoCSetOopsStack, 2935 VerifyNoCSetOopsQueues, 2936 VerifyNoCSetOopsSATBCompleted, 2937 VerifyNoCSetOopsSATBThread 2938 }; 2939 2940 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 2941 private: 2942 G1CollectedHeap* _g1h; 2943 VerifyNoCSetOopsPhase _phase; 2944 int _info; 2945 2946 const char* phase_str() { 2947 switch (_phase) { 2948 case VerifyNoCSetOopsStack: return "Stack"; 2949 case VerifyNoCSetOopsQueues: return "Queue"; 2950 case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers"; 2951 case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers"; 2952 default: ShouldNotReachHere(); 2953 } 2954 return NULL; 2955 } 2956 2957 void do_object_work(oop obj) { 2958 guarantee(!_g1h->obj_in_cs(obj), 2959 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 2960 p2i((void*) obj), phase_str(), _info)); 2961 } 2962 2963 public: 2964 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 2965 2966 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 2967 _phase = phase; 2968 _info = info; 2969 } 2970 2971 virtual void do_oop(oop* p) { 2972 oop obj = oopDesc::load_decode_heap_oop(p); 2973 do_object_work(obj); 2974 } 2975 2976 virtual void do_oop(narrowOop* p) { 2977 // We should not come across narrow oops while scanning marking 2978 // stacks and SATB buffers. 2979 ShouldNotReachHere(); 2980 } 2981 2982 virtual void do_object(oop obj) { 2983 do_object_work(obj); 2984 } 2985 }; 2986 2987 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks, 2988 bool verify_enqueued_buffers, 2989 bool verify_thread_buffers, 2990 bool verify_fingers) { 2991 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2992 if (!G1CollectedHeap::heap()->mark_in_progress()) { 2993 return; 2994 } 2995 2996 VerifyNoCSetOopsClosure cl; 2997 2998 if (verify_stacks) { 2999 // Verify entries on the global mark stack 3000 cl.set_phase(VerifyNoCSetOopsStack); 3001 _markStack.oops_do(&cl); 3002 3003 // Verify entries on the task queues 3004 for (uint i = 0; i < _max_worker_id; i += 1) { 3005 cl.set_phase(VerifyNoCSetOopsQueues, i); 3006 CMTaskQueue* queue = _task_queues->queue(i); 3007 queue->oops_do(&cl); 3008 } 3009 } 3010 3011 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 3012 3013 // Verify entries on the enqueued SATB buffers 3014 if (verify_enqueued_buffers) { 3015 cl.set_phase(VerifyNoCSetOopsSATBCompleted); 3016 satb_qs.iterate_completed_buffers_read_only(&cl); 3017 } 3018 3019 // Verify entries on the per-thread SATB buffers 3020 if (verify_thread_buffers) { 3021 cl.set_phase(VerifyNoCSetOopsSATBThread); 3022 satb_qs.iterate_thread_buffers_read_only(&cl); 3023 } 3024 3025 if (verify_fingers) { 3026 // Verify the global finger 3027 HeapWord* global_finger = finger(); 3028 if (global_finger != NULL && global_finger < _heap_end) { 3029 // The global finger always points to a heap region boundary. We 3030 // use heap_region_containing_raw() to get the containing region 3031 // given that the global finger could be pointing to a free region 3032 // which subsequently becomes continues humongous. If that 3033 // happens, heap_region_containing() will return the bottom of the 3034 // corresponding starts humongous region and the check below will 3035 // not hold any more. 3036 // Since we always iterate over all regions, we might get a NULL HeapRegion 3037 // here. 3038 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 3039 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 3040 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 3041 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 3042 } 3043 3044 // Verify the task fingers 3045 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 3046 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 3047 CMTask* task = _tasks[i]; 3048 HeapWord* task_finger = task->finger(); 3049 if (task_finger != NULL && task_finger < _heap_end) { 3050 // See above note on the global finger verification. 3051 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 3052 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 3053 !task_hr->in_collection_set(), 3054 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 3055 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 3056 } 3057 } 3058 } 3059 } 3060 #endif // PRODUCT 3061 3062 // Aggregate the counting data that was constructed concurrently 3063 // with marking. 3064 class AggregateCountDataHRClosure: public HeapRegionClosure { 3065 G1CollectedHeap* _g1h; 3066 ConcurrentMark* _cm; 3067 CardTableModRefBS* _ct_bs; 3068 BitMap* _cm_card_bm; 3069 uint _max_worker_id; 3070 3071 public: 3072 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 3073 BitMap* cm_card_bm, 3074 uint max_worker_id) : 3075 _g1h(g1h), _cm(g1h->concurrent_mark()), 3076 _ct_bs((CardTableModRefBS*) (g1h->barrier_set())), 3077 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 3078 3079 bool doHeapRegion(HeapRegion* hr) { 3080 if (hr->is_continues_humongous()) { 3081 // We will ignore these here and process them when their 3082 // associated "starts humongous" region is processed. 3083 // Note that we cannot rely on their associated 3084 // "starts humongous" region to have their bit set to 1 3085 // since, due to the region chunking in the parallel region 3086 // iteration, a "continues humongous" region might be visited 3087 // before its associated "starts humongous". 3088 return false; 3089 } 3090 3091 HeapWord* start = hr->bottom(); 3092 HeapWord* limit = hr->next_top_at_mark_start(); 3093 HeapWord* end = hr->end(); 3094 3095 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 3096 err_msg("Preconditions not met - " 3097 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 3098 "top: "PTR_FORMAT", end: "PTR_FORMAT, 3099 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 3100 3101 assert(hr->next_marked_bytes() == 0, "Precondition"); 3102 3103 if (start == limit) { 3104 // NTAMS of this region has not been set so nothing to do. 3105 return false; 3106 } 3107 3108 // 'start' should be in the heap. 3109 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 3110 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 3111 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 3112 3113 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 3114 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 3115 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 3116 3117 // If ntams is not card aligned then we bump card bitmap index 3118 // for limit so that we get the all the cards spanned by 3119 // the object ending at ntams. 3120 // Note: if this is the last region in the heap then ntams 3121 // could be actually just beyond the end of the the heap; 3122 // limit_idx will then correspond to a (non-existent) card 3123 // that is also outside the heap. 3124 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 3125 limit_idx += 1; 3126 } 3127 3128 assert(limit_idx <= end_idx, "or else use atomics"); 3129 3130 // Aggregate the "stripe" in the count data associated with hr. 3131 uint hrm_index = hr->hrm_index(); 3132 size_t marked_bytes = 0; 3133 3134 for (uint i = 0; i < _max_worker_id; i += 1) { 3135 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 3136 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 3137 3138 // Fetch the marked_bytes in this region for task i and 3139 // add it to the running total for this region. 3140 marked_bytes += marked_bytes_array[hrm_index]; 3141 3142 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 3143 // into the global card bitmap. 3144 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 3145 3146 while (scan_idx < limit_idx) { 3147 assert(task_card_bm->at(scan_idx) == true, "should be"); 3148 _cm_card_bm->set_bit(scan_idx); 3149 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 3150 3151 // BitMap::get_next_one_offset() can handle the case when 3152 // its left_offset parameter is greater than its right_offset 3153 // parameter. It does, however, have an early exit if 3154 // left_offset == right_offset. So let's limit the value 3155 // passed in for left offset here. 3156 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 3157 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 3158 } 3159 } 3160 3161 // Update the marked bytes for this region. 3162 hr->add_to_marked_bytes(marked_bytes); 3163 3164 // Next heap region 3165 return false; 3166 } 3167 }; 3168 3169 class G1AggregateCountDataTask: public AbstractGangTask { 3170 protected: 3171 G1CollectedHeap* _g1h; 3172 ConcurrentMark* _cm; 3173 BitMap* _cm_card_bm; 3174 uint _max_worker_id; 3175 int _active_workers; 3176 HeapRegionClaimer _hrclaimer; 3177 3178 public: 3179 G1AggregateCountDataTask(G1CollectedHeap* g1h, 3180 ConcurrentMark* cm, 3181 BitMap* cm_card_bm, 3182 uint max_worker_id, 3183 int n_workers) : 3184 AbstractGangTask("Count Aggregation"), 3185 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 3186 _max_worker_id(max_worker_id), 3187 _active_workers(n_workers), 3188 _hrclaimer(_active_workers) { 3189 } 3190 3191 void work(uint worker_id) { 3192 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 3193 3194 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 3195 } 3196 }; 3197 3198 3199 void ConcurrentMark::aggregate_count_data() { 3200 int n_workers = _g1h->workers()->active_workers(); 3201 3202 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 3203 _max_worker_id, n_workers); 3204 3205 _g1h->set_par_threads(n_workers); 3206 _g1h->workers()->run_task(&g1_par_agg_task); 3207 _g1h->set_par_threads(0); 3208 _g1h->allocation_context_stats().update_at_remark(); 3209 } 3210 3211 // Clear the per-worker arrays used to store the per-region counting data 3212 void ConcurrentMark::clear_all_count_data() { 3213 // Clear the global card bitmap - it will be filled during 3214 // liveness count aggregation (during remark) and the 3215 // final counting task. 3216 _card_bm.clear(); 3217 3218 // Clear the global region bitmap - it will be filled as part 3219 // of the final counting task. 3220 _region_bm.clear(); 3221 3222 uint max_regions = _g1h->max_regions(); 3223 assert(_max_worker_id > 0, "uninitialized"); 3224 3225 for (uint i = 0; i < _max_worker_id; i += 1) { 3226 BitMap* task_card_bm = count_card_bitmap_for(i); 3227 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3228 3229 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3230 assert(marked_bytes_array != NULL, "uninitialized"); 3231 3232 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3233 task_card_bm->clear(); 3234 } 3235 } 3236 3237 void ConcurrentMark::print_stats() { 3238 if (verbose_stats()) { 3239 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3240 for (size_t i = 0; i < _active_tasks; ++i) { 3241 _tasks[i]->print_stats(); 3242 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3243 } 3244 } 3245 } 3246 3247 // abandon current marking iteration due to a Full GC 3248 void ConcurrentMark::abort() { 3249 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 3250 // concurrent bitmap clearing. 3251 _nextMarkBitMap->clearAll(); 3252 3253 // Note we cannot clear the previous marking bitmap here 3254 // since VerifyDuringGC verifies the objects marked during 3255 // a full GC against the previous bitmap. 3256 3257 // Clear the liveness counting data 3258 clear_all_count_data(); 3259 // Empty mark stack 3260 reset_marking_state(); 3261 for (uint i = 0; i < _max_worker_id; ++i) { 3262 _tasks[i]->clear_region_fields(); 3263 } 3264 _first_overflow_barrier_sync.abort(); 3265 _second_overflow_barrier_sync.abort(); 3266 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); 3267 if (!gc_id.is_undefined()) { 3268 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance 3269 // to detect that it was aborted. Only keep track of the first GC id that we aborted. 3270 _aborted_gc_id = gc_id; 3271 } 3272 _has_aborted = true; 3273 3274 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3275 satb_mq_set.abandon_partial_marking(); 3276 // This can be called either during or outside marking, we'll read 3277 // the expected_active value from the SATB queue set. 3278 satb_mq_set.set_active_all_threads( 3279 false, /* new active value */ 3280 satb_mq_set.is_active() /* expected_active */); 3281 3282 _g1h->trace_heap_after_concurrent_cycle(); 3283 _g1h->register_concurrent_cycle_end(); 3284 } 3285 3286 const GCId& ConcurrentMark::concurrent_gc_id() { 3287 if (has_aborted()) { 3288 return _aborted_gc_id; 3289 } 3290 return _g1h->gc_tracer_cm()->gc_id(); 3291 } 3292 3293 static void print_ms_time_info(const char* prefix, const char* name, 3294 NumberSeq& ns) { 3295 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3296 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3297 if (ns.num() > 0) { 3298 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3299 prefix, ns.sd(), ns.maximum()); 3300 } 3301 } 3302 3303 void ConcurrentMark::print_summary_info() { 3304 gclog_or_tty->print_cr(" Concurrent marking:"); 3305 print_ms_time_info(" ", "init marks", _init_times); 3306 print_ms_time_info(" ", "remarks", _remark_times); 3307 { 3308 print_ms_time_info(" ", "final marks", _remark_mark_times); 3309 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3310 3311 } 3312 print_ms_time_info(" ", "cleanups", _cleanup_times); 3313 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3314 _total_counting_time, 3315 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3316 (double)_cleanup_times.num() 3317 : 0.0)); 3318 if (G1ScrubRemSets) { 3319 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3320 _total_rs_scrub_time, 3321 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3322 (double)_cleanup_times.num() 3323 : 0.0)); 3324 } 3325 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3326 (_init_times.sum() + _remark_times.sum() + 3327 _cleanup_times.sum())/1000.0); 3328 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3329 "(%8.2f s marking).", 3330 cmThread()->vtime_accum(), 3331 cmThread()->vtime_mark_accum()); 3332 } 3333 3334 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3335 _parallel_workers->print_worker_threads_on(st); 3336 } 3337 3338 void ConcurrentMark::print_on_error(outputStream* st) const { 3339 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3340 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3341 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3342 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3343 } 3344 3345 // We take a break if someone is trying to stop the world. 3346 bool ConcurrentMark::do_yield_check(uint worker_id) { 3347 if (SuspendibleThreadSet::should_yield()) { 3348 if (worker_id == 0) { 3349 _g1h->g1_policy()->record_concurrent_pause(); 3350 } 3351 SuspendibleThreadSet::yield(); 3352 return true; 3353 } else { 3354 return false; 3355 } 3356 } 3357 3358 #ifndef PRODUCT 3359 // for debugging purposes 3360 void ConcurrentMark::print_finger() { 3361 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3362 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3363 for (uint i = 0; i < _max_worker_id; ++i) { 3364 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3365 } 3366 gclog_or_tty->cr(); 3367 } 3368 #endif 3369 3370 void CMTask::scan_object(oop obj) { 3371 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3372 3373 if (_cm->verbose_high()) { 3374 gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT, 3375 _worker_id, p2i((void*) obj)); 3376 } 3377 3378 size_t obj_size = obj->size(); 3379 _words_scanned += obj_size; 3380 3381 obj->oop_iterate(_cm_oop_closure); 3382 statsOnly( ++_objs_scanned ); 3383 check_limits(); 3384 } 3385 3386 // Closure for iteration over bitmaps 3387 class CMBitMapClosure : public BitMapClosure { 3388 private: 3389 // the bitmap that is being iterated over 3390 CMBitMap* _nextMarkBitMap; 3391 ConcurrentMark* _cm; 3392 CMTask* _task; 3393 3394 public: 3395 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3396 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3397 3398 bool do_bit(size_t offset) { 3399 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3400 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3401 assert( addr < _cm->finger(), "invariant"); 3402 3403 statsOnly( _task->increase_objs_found_on_bitmap() ); 3404 assert(addr >= _task->finger(), "invariant"); 3405 3406 // We move that task's local finger along. 3407 _task->move_finger_to(addr); 3408 3409 _task->scan_object(oop(addr)); 3410 // we only partially drain the local queue and global stack 3411 _task->drain_local_queue(true); 3412 _task->drain_global_stack(true); 3413 3414 // if the has_aborted flag has been raised, we need to bail out of 3415 // the iteration 3416 return !_task->has_aborted(); 3417 } 3418 }; 3419 3420 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3421 ConcurrentMark* cm, 3422 CMTask* task) 3423 : _g1h(g1h), _cm(cm), _task(task) { 3424 assert(_ref_processor == NULL, "should be initialized to NULL"); 3425 3426 if (G1UseConcMarkReferenceProcessing) { 3427 _ref_processor = g1h->ref_processor_cm(); 3428 assert(_ref_processor != NULL, "should not be NULL"); 3429 } 3430 } 3431 3432 void CMTask::setup_for_region(HeapRegion* hr) { 3433 assert(hr != NULL, 3434 "claim_region() should have filtered out NULL regions"); 3435 assert(!hr->is_continues_humongous(), 3436 "claim_region() should have filtered out continues humongous regions"); 3437 3438 if (_cm->verbose_low()) { 3439 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3440 _worker_id, p2i(hr)); 3441 } 3442 3443 _curr_region = hr; 3444 _finger = hr->bottom(); 3445 update_region_limit(); 3446 } 3447 3448 void CMTask::update_region_limit() { 3449 HeapRegion* hr = _curr_region; 3450 HeapWord* bottom = hr->bottom(); 3451 HeapWord* limit = hr->next_top_at_mark_start(); 3452 3453 if (limit == bottom) { 3454 if (_cm->verbose_low()) { 3455 gclog_or_tty->print_cr("[%u] found an empty region " 3456 "["PTR_FORMAT", "PTR_FORMAT")", 3457 _worker_id, p2i(bottom), p2i(limit)); 3458 } 3459 // The region was collected underneath our feet. 3460 // We set the finger to bottom to ensure that the bitmap 3461 // iteration that will follow this will not do anything. 3462 // (this is not a condition that holds when we set the region up, 3463 // as the region is not supposed to be empty in the first place) 3464 _finger = bottom; 3465 } else if (limit >= _region_limit) { 3466 assert(limit >= _finger, "peace of mind"); 3467 } else { 3468 assert(limit < _region_limit, "only way to get here"); 3469 // This can happen under some pretty unusual circumstances. An 3470 // evacuation pause empties the region underneath our feet (NTAMS 3471 // at bottom). We then do some allocation in the region (NTAMS 3472 // stays at bottom), followed by the region being used as a GC 3473 // alloc region (NTAMS will move to top() and the objects 3474 // originally below it will be grayed). All objects now marked in 3475 // the region are explicitly grayed, if below the global finger, 3476 // and we do not need in fact to scan anything else. So, we simply 3477 // set _finger to be limit to ensure that the bitmap iteration 3478 // doesn't do anything. 3479 _finger = limit; 3480 } 3481 3482 _region_limit = limit; 3483 } 3484 3485 void CMTask::giveup_current_region() { 3486 assert(_curr_region != NULL, "invariant"); 3487 if (_cm->verbose_low()) { 3488 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3489 _worker_id, p2i(_curr_region)); 3490 } 3491 clear_region_fields(); 3492 } 3493 3494 void CMTask::clear_region_fields() { 3495 // Values for these three fields that indicate that we're not 3496 // holding on to a region. 3497 _curr_region = NULL; 3498 _finger = NULL; 3499 _region_limit = NULL; 3500 } 3501 3502 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3503 if (cm_oop_closure == NULL) { 3504 assert(_cm_oop_closure != NULL, "invariant"); 3505 } else { 3506 assert(_cm_oop_closure == NULL, "invariant"); 3507 } 3508 _cm_oop_closure = cm_oop_closure; 3509 } 3510 3511 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3512 guarantee(nextMarkBitMap != NULL, "invariant"); 3513 3514 if (_cm->verbose_low()) { 3515 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3516 } 3517 3518 _nextMarkBitMap = nextMarkBitMap; 3519 clear_region_fields(); 3520 3521 _calls = 0; 3522 _elapsed_time_ms = 0.0; 3523 _termination_time_ms = 0.0; 3524 _termination_start_time_ms = 0.0; 3525 3526 #if _MARKING_STATS_ 3527 _local_pushes = 0; 3528 _local_pops = 0; 3529 _local_max_size = 0; 3530 _objs_scanned = 0; 3531 _global_pushes = 0; 3532 _global_pops = 0; 3533 _global_max_size = 0; 3534 _global_transfers_to = 0; 3535 _global_transfers_from = 0; 3536 _regions_claimed = 0; 3537 _objs_found_on_bitmap = 0; 3538 _satb_buffers_processed = 0; 3539 _steal_attempts = 0; 3540 _steals = 0; 3541 _aborted = 0; 3542 _aborted_overflow = 0; 3543 _aborted_cm_aborted = 0; 3544 _aborted_yield = 0; 3545 _aborted_timed_out = 0; 3546 _aborted_satb = 0; 3547 _aborted_termination = 0; 3548 #endif // _MARKING_STATS_ 3549 } 3550 3551 bool CMTask::should_exit_termination() { 3552 regular_clock_call(); 3553 // This is called when we are in the termination protocol. We should 3554 // quit if, for some reason, this task wants to abort or the global 3555 // stack is not empty (this means that we can get work from it). 3556 return !_cm->mark_stack_empty() || has_aborted(); 3557 } 3558 3559 void CMTask::reached_limit() { 3560 assert(_words_scanned >= _words_scanned_limit || 3561 _refs_reached >= _refs_reached_limit , 3562 "shouldn't have been called otherwise"); 3563 regular_clock_call(); 3564 } 3565 3566 void CMTask::regular_clock_call() { 3567 if (has_aborted()) return; 3568 3569 // First, we need to recalculate the words scanned and refs reached 3570 // limits for the next clock call. 3571 recalculate_limits(); 3572 3573 // During the regular clock call we do the following 3574 3575 // (1) If an overflow has been flagged, then we abort. 3576 if (_cm->has_overflown()) { 3577 set_has_aborted(); 3578 return; 3579 } 3580 3581 // If we are not concurrent (i.e. we're doing remark) we don't need 3582 // to check anything else. The other steps are only needed during 3583 // the concurrent marking phase. 3584 if (!concurrent()) return; 3585 3586 // (2) If marking has been aborted for Full GC, then we also abort. 3587 if (_cm->has_aborted()) { 3588 set_has_aborted(); 3589 statsOnly( ++_aborted_cm_aborted ); 3590 return; 3591 } 3592 3593 double curr_time_ms = os::elapsedVTime() * 1000.0; 3594 3595 // (3) If marking stats are enabled, then we update the step history. 3596 #if _MARKING_STATS_ 3597 if (_words_scanned >= _words_scanned_limit) { 3598 ++_clock_due_to_scanning; 3599 } 3600 if (_refs_reached >= _refs_reached_limit) { 3601 ++_clock_due_to_marking; 3602 } 3603 3604 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3605 _interval_start_time_ms = curr_time_ms; 3606 _all_clock_intervals_ms.add(last_interval_ms); 3607 3608 if (_cm->verbose_medium()) { 3609 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3610 "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s", 3611 _worker_id, last_interval_ms, 3612 _words_scanned, 3613 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3614 _refs_reached, 3615 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3616 } 3617 #endif // _MARKING_STATS_ 3618 3619 // (4) We check whether we should yield. If we have to, then we abort. 3620 if (SuspendibleThreadSet::should_yield()) { 3621 // We should yield. To do this we abort the task. The caller is 3622 // responsible for yielding. 3623 set_has_aborted(); 3624 statsOnly( ++_aborted_yield ); 3625 return; 3626 } 3627 3628 // (5) We check whether we've reached our time quota. If we have, 3629 // then we abort. 3630 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3631 if (elapsed_time_ms > _time_target_ms) { 3632 set_has_aborted(); 3633 _has_timed_out = true; 3634 statsOnly( ++_aborted_timed_out ); 3635 return; 3636 } 3637 3638 // (6) Finally, we check whether there are enough completed STAB 3639 // buffers available for processing. If there are, we abort. 3640 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3641 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3642 if (_cm->verbose_low()) { 3643 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3644 _worker_id); 3645 } 3646 // we do need to process SATB buffers, we'll abort and restart 3647 // the marking task to do so 3648 set_has_aborted(); 3649 statsOnly( ++_aborted_satb ); 3650 return; 3651 } 3652 } 3653 3654 void CMTask::recalculate_limits() { 3655 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3656 _words_scanned_limit = _real_words_scanned_limit; 3657 3658 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3659 _refs_reached_limit = _real_refs_reached_limit; 3660 } 3661 3662 void CMTask::decrease_limits() { 3663 // This is called when we believe that we're going to do an infrequent 3664 // operation which will increase the per byte scanned cost (i.e. move 3665 // entries to/from the global stack). It basically tries to decrease the 3666 // scanning limit so that the clock is called earlier. 3667 3668 if (_cm->verbose_medium()) { 3669 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3670 } 3671 3672 _words_scanned_limit = _real_words_scanned_limit - 3673 3 * words_scanned_period / 4; 3674 _refs_reached_limit = _real_refs_reached_limit - 3675 3 * refs_reached_period / 4; 3676 } 3677 3678 void CMTask::move_entries_to_global_stack() { 3679 // local array where we'll store the entries that will be popped 3680 // from the local queue 3681 oop buffer[global_stack_transfer_size]; 3682 3683 int n = 0; 3684 oop obj; 3685 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3686 buffer[n] = obj; 3687 ++n; 3688 } 3689 3690 if (n > 0) { 3691 // we popped at least one entry from the local queue 3692 3693 statsOnly( ++_global_transfers_to; _local_pops += n ); 3694 3695 if (!_cm->mark_stack_push(buffer, n)) { 3696 if (_cm->verbose_low()) { 3697 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3698 _worker_id); 3699 } 3700 set_has_aborted(); 3701 } else { 3702 // the transfer was successful 3703 3704 if (_cm->verbose_medium()) { 3705 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3706 _worker_id, n); 3707 } 3708 statsOnly( int tmp_size = _cm->mark_stack_size(); 3709 if (tmp_size > _global_max_size) { 3710 _global_max_size = tmp_size; 3711 } 3712 _global_pushes += n ); 3713 } 3714 } 3715 3716 // this operation was quite expensive, so decrease the limits 3717 decrease_limits(); 3718 } 3719 3720 void CMTask::get_entries_from_global_stack() { 3721 // local array where we'll store the entries that will be popped 3722 // from the global stack. 3723 oop buffer[global_stack_transfer_size]; 3724 int n; 3725 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3726 assert(n <= global_stack_transfer_size, 3727 "we should not pop more than the given limit"); 3728 if (n > 0) { 3729 // yes, we did actually pop at least one entry 3730 3731 statsOnly( ++_global_transfers_from; _global_pops += n ); 3732 if (_cm->verbose_medium()) { 3733 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3734 _worker_id, n); 3735 } 3736 for (int i = 0; i < n; ++i) { 3737 bool success = _task_queue->push(buffer[i]); 3738 // We only call this when the local queue is empty or under a 3739 // given target limit. So, we do not expect this push to fail. 3740 assert(success, "invariant"); 3741 } 3742 3743 statsOnly( int tmp_size = _task_queue->size(); 3744 if (tmp_size > _local_max_size) { 3745 _local_max_size = tmp_size; 3746 } 3747 _local_pushes += n ); 3748 } 3749 3750 // this operation was quite expensive, so decrease the limits 3751 decrease_limits(); 3752 } 3753 3754 void CMTask::drain_local_queue(bool partially) { 3755 if (has_aborted()) return; 3756 3757 // Decide what the target size is, depending whether we're going to 3758 // drain it partially (so that other tasks can steal if they run out 3759 // of things to do) or totally (at the very end). 3760 size_t target_size; 3761 if (partially) { 3762 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3763 } else { 3764 target_size = 0; 3765 } 3766 3767 if (_task_queue->size() > target_size) { 3768 if (_cm->verbose_high()) { 3769 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3770 _worker_id, target_size); 3771 } 3772 3773 oop obj; 3774 bool ret = _task_queue->pop_local(obj); 3775 while (ret) { 3776 statsOnly( ++_local_pops ); 3777 3778 if (_cm->verbose_high()) { 3779 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3780 p2i((void*) obj)); 3781 } 3782 3783 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3784 assert(!_g1h->is_on_master_free_list( 3785 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3786 3787 scan_object(obj); 3788 3789 if (_task_queue->size() <= target_size || has_aborted()) { 3790 ret = false; 3791 } else { 3792 ret = _task_queue->pop_local(obj); 3793 } 3794 } 3795 3796 if (_cm->verbose_high()) { 3797 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3798 _worker_id, _task_queue->size()); 3799 } 3800 } 3801 } 3802 3803 void CMTask::drain_global_stack(bool partially) { 3804 if (has_aborted()) return; 3805 3806 // We have a policy to drain the local queue before we attempt to 3807 // drain the global stack. 3808 assert(partially || _task_queue->size() == 0, "invariant"); 3809 3810 // Decide what the target size is, depending whether we're going to 3811 // drain it partially (so that other tasks can steal if they run out 3812 // of things to do) or totally (at the very end). Notice that, 3813 // because we move entries from the global stack in chunks or 3814 // because another task might be doing the same, we might in fact 3815 // drop below the target. But, this is not a problem. 3816 size_t target_size; 3817 if (partially) { 3818 target_size = _cm->partial_mark_stack_size_target(); 3819 } else { 3820 target_size = 0; 3821 } 3822 3823 if (_cm->mark_stack_size() > target_size) { 3824 if (_cm->verbose_low()) { 3825 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3826 _worker_id, target_size); 3827 } 3828 3829 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3830 get_entries_from_global_stack(); 3831 drain_local_queue(partially); 3832 } 3833 3834 if (_cm->verbose_low()) { 3835 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3836 _worker_id, _cm->mark_stack_size()); 3837 } 3838 } 3839 } 3840 3841 // SATB Queue has several assumptions on whether to call the par or 3842 // non-par versions of the methods. this is why some of the code is 3843 // replicated. We should really get rid of the single-threaded version 3844 // of the code to simplify things. 3845 void CMTask::drain_satb_buffers() { 3846 if (has_aborted()) return; 3847 3848 // We set this so that the regular clock knows that we're in the 3849 // middle of draining buffers and doesn't set the abort flag when it 3850 // notices that SATB buffers are available for draining. It'd be 3851 // very counter productive if it did that. :-) 3852 _draining_satb_buffers = true; 3853 3854 CMObjectClosure oc(this); 3855 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3856 satb_mq_set.set_closure(_worker_id, &oc); 3857 3858 // This keeps claiming and applying the closure to completed buffers 3859 // until we run out of buffers or we need to abort. 3860 while (!has_aborted() && 3861 satb_mq_set.apply_closure_to_completed_buffer(_worker_id)) { 3862 if (_cm->verbose_medium()) { 3863 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3864 } 3865 statsOnly( ++_satb_buffers_processed ); 3866 regular_clock_call(); 3867 } 3868 3869 _draining_satb_buffers = false; 3870 3871 assert(has_aborted() || 3872 concurrent() || 3873 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3874 3875 satb_mq_set.set_closure(_worker_id, NULL); 3876 3877 // again, this was a potentially expensive operation, decrease the 3878 // limits to get the regular clock call early 3879 decrease_limits(); 3880 } 3881 3882 void CMTask::print_stats() { 3883 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3884 _worker_id, _calls); 3885 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3886 _elapsed_time_ms, _termination_time_ms); 3887 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3888 _step_times_ms.num(), _step_times_ms.avg(), 3889 _step_times_ms.sd()); 3890 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3891 _step_times_ms.maximum(), _step_times_ms.sum()); 3892 3893 #if _MARKING_STATS_ 3894 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3895 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3896 _all_clock_intervals_ms.sd()); 3897 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3898 _all_clock_intervals_ms.maximum(), 3899 _all_clock_intervals_ms.sum()); 3900 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", 3901 _clock_due_to_scanning, _clock_due_to_marking); 3902 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", 3903 _objs_scanned, _objs_found_on_bitmap); 3904 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", 3905 _local_pushes, _local_pops, _local_max_size); 3906 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", 3907 _global_pushes, _global_pops, _global_max_size); 3908 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", 3909 _global_transfers_to,_global_transfers_from); 3910 gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed); 3911 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); 3912 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", 3913 _steal_attempts, _steals); 3914 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); 3915 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", 3916 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3917 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", 3918 _aborted_timed_out, _aborted_satb, _aborted_termination); 3919 #endif // _MARKING_STATS_ 3920 } 3921 3922 /***************************************************************************** 3923 3924 The do_marking_step(time_target_ms, ...) method is the building 3925 block of the parallel marking framework. It can be called in parallel 3926 with other invocations of do_marking_step() on different tasks 3927 (but only one per task, obviously) and concurrently with the 3928 mutator threads, or during remark, hence it eliminates the need 3929 for two versions of the code. When called during remark, it will 3930 pick up from where the task left off during the concurrent marking 3931 phase. Interestingly, tasks are also claimable during evacuation 3932 pauses too, since do_marking_step() ensures that it aborts before 3933 it needs to yield. 3934 3935 The data structures that it uses to do marking work are the 3936 following: 3937 3938 (1) Marking Bitmap. If there are gray objects that appear only 3939 on the bitmap (this happens either when dealing with an overflow 3940 or when the initial marking phase has simply marked the roots 3941 and didn't push them on the stack), then tasks claim heap 3942 regions whose bitmap they then scan to find gray objects. A 3943 global finger indicates where the end of the last claimed region 3944 is. A local finger indicates how far into the region a task has 3945 scanned. The two fingers are used to determine how to gray an 3946 object (i.e. whether simply marking it is OK, as it will be 3947 visited by a task in the future, or whether it needs to be also 3948 pushed on a stack). 3949 3950 (2) Local Queue. The local queue of the task which is accessed 3951 reasonably efficiently by the task. Other tasks can steal from 3952 it when they run out of work. Throughout the marking phase, a 3953 task attempts to keep its local queue short but not totally 3954 empty, so that entries are available for stealing by other 3955 tasks. Only when there is no more work, a task will totally 3956 drain its local queue. 3957 3958 (3) Global Mark Stack. This handles local queue overflow. During 3959 marking only sets of entries are moved between it and the local 3960 queues, as access to it requires a mutex and more fine-grain 3961 interaction with it which might cause contention. If it 3962 overflows, then the marking phase should restart and iterate 3963 over the bitmap to identify gray objects. Throughout the marking 3964 phase, tasks attempt to keep the global mark stack at a small 3965 length but not totally empty, so that entries are available for 3966 popping by other tasks. Only when there is no more work, tasks 3967 will totally drain the global mark stack. 3968 3969 (4) SATB Buffer Queue. This is where completed SATB buffers are 3970 made available. Buffers are regularly removed from this queue 3971 and scanned for roots, so that the queue doesn't get too 3972 long. During remark, all completed buffers are processed, as 3973 well as the filled in parts of any uncompleted buffers. 3974 3975 The do_marking_step() method tries to abort when the time target 3976 has been reached. There are a few other cases when the 3977 do_marking_step() method also aborts: 3978 3979 (1) When the marking phase has been aborted (after a Full GC). 3980 3981 (2) When a global overflow (on the global stack) has been 3982 triggered. Before the task aborts, it will actually sync up with 3983 the other tasks to ensure that all the marking data structures 3984 (local queues, stacks, fingers etc.) are re-initialized so that 3985 when do_marking_step() completes, the marking phase can 3986 immediately restart. 3987 3988 (3) When enough completed SATB buffers are available. The 3989 do_marking_step() method only tries to drain SATB buffers right 3990 at the beginning. So, if enough buffers are available, the 3991 marking step aborts and the SATB buffers are processed at 3992 the beginning of the next invocation. 3993 3994 (4) To yield. when we have to yield then we abort and yield 3995 right at the end of do_marking_step(). This saves us from a lot 3996 of hassle as, by yielding we might allow a Full GC. If this 3997 happens then objects will be compacted underneath our feet, the 3998 heap might shrink, etc. We save checking for this by just 3999 aborting and doing the yield right at the end. 4000 4001 From the above it follows that the do_marking_step() method should 4002 be called in a loop (or, otherwise, regularly) until it completes. 4003 4004 If a marking step completes without its has_aborted() flag being 4005 true, it means it has completed the current marking phase (and 4006 also all other marking tasks have done so and have all synced up). 4007 4008 A method called regular_clock_call() is invoked "regularly" (in 4009 sub ms intervals) throughout marking. It is this clock method that 4010 checks all the abort conditions which were mentioned above and 4011 decides when the task should abort. A work-based scheme is used to 4012 trigger this clock method: when the number of object words the 4013 marking phase has scanned or the number of references the marking 4014 phase has visited reach a given limit. Additional invocations to 4015 the method clock have been planted in a few other strategic places 4016 too. The initial reason for the clock method was to avoid calling 4017 vtime too regularly, as it is quite expensive. So, once it was in 4018 place, it was natural to piggy-back all the other conditions on it 4019 too and not constantly check them throughout the code. 4020 4021 If do_termination is true then do_marking_step will enter its 4022 termination protocol. 4023 4024 The value of is_serial must be true when do_marking_step is being 4025 called serially (i.e. by the VMThread) and do_marking_step should 4026 skip any synchronization in the termination and overflow code. 4027 Examples include the serial remark code and the serial reference 4028 processing closures. 4029 4030 The value of is_serial must be false when do_marking_step is 4031 being called by any of the worker threads in a work gang. 4032 Examples include the concurrent marking code (CMMarkingTask), 4033 the MT remark code, and the MT reference processing closures. 4034 4035 *****************************************************************************/ 4036 4037 void CMTask::do_marking_step(double time_target_ms, 4038 bool do_termination, 4039 bool is_serial) { 4040 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 4041 assert(concurrent() == _cm->concurrent(), "they should be the same"); 4042 4043 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 4044 assert(_task_queues != NULL, "invariant"); 4045 assert(_task_queue != NULL, "invariant"); 4046 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 4047 4048 assert(!_claimed, 4049 "only one thread should claim this task at any one time"); 4050 4051 // OK, this doesn't safeguard again all possible scenarios, as it is 4052 // possible for two threads to set the _claimed flag at the same 4053 // time. But it is only for debugging purposes anyway and it will 4054 // catch most problems. 4055 _claimed = true; 4056 4057 _start_time_ms = os::elapsedVTime() * 1000.0; 4058 statsOnly( _interval_start_time_ms = _start_time_ms ); 4059 4060 // If do_stealing is true then do_marking_step will attempt to 4061 // steal work from the other CMTasks. It only makes sense to 4062 // enable stealing when the termination protocol is enabled 4063 // and do_marking_step() is not being called serially. 4064 bool do_stealing = do_termination && !is_serial; 4065 4066 double diff_prediction_ms = 4067 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 4068 _time_target_ms = time_target_ms - diff_prediction_ms; 4069 4070 // set up the variables that are used in the work-based scheme to 4071 // call the regular clock method 4072 _words_scanned = 0; 4073 _refs_reached = 0; 4074 recalculate_limits(); 4075 4076 // clear all flags 4077 clear_has_aborted(); 4078 _has_timed_out = false; 4079 _draining_satb_buffers = false; 4080 4081 ++_calls; 4082 4083 if (_cm->verbose_low()) { 4084 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 4085 "target = %1.2lfms >>>>>>>>>>", 4086 _worker_id, _calls, _time_target_ms); 4087 } 4088 4089 // Set up the bitmap and oop closures. Anything that uses them is 4090 // eventually called from this method, so it is OK to allocate these 4091 // statically. 4092 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 4093 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 4094 set_cm_oop_closure(&cm_oop_closure); 4095 4096 if (_cm->has_overflown()) { 4097 // This can happen if the mark stack overflows during a GC pause 4098 // and this task, after a yield point, restarts. We have to abort 4099 // as we need to get into the overflow protocol which happens 4100 // right at the end of this task. 4101 set_has_aborted(); 4102 } 4103 4104 // First drain any available SATB buffers. After this, we will not 4105 // look at SATB buffers before the next invocation of this method. 4106 // If enough completed SATB buffers are queued up, the regular clock 4107 // will abort this task so that it restarts. 4108 drain_satb_buffers(); 4109 // ...then partially drain the local queue and the global stack 4110 drain_local_queue(true); 4111 drain_global_stack(true); 4112 4113 do { 4114 if (!has_aborted() && _curr_region != NULL) { 4115 // This means that we're already holding on to a region. 4116 assert(_finger != NULL, "if region is not NULL, then the finger " 4117 "should not be NULL either"); 4118 4119 // We might have restarted this task after an evacuation pause 4120 // which might have evacuated the region we're holding on to 4121 // underneath our feet. Let's read its limit again to make sure 4122 // that we do not iterate over a region of the heap that 4123 // contains garbage (update_region_limit() will also move 4124 // _finger to the start of the region if it is found empty). 4125 update_region_limit(); 4126 // We will start from _finger not from the start of the region, 4127 // as we might be restarting this task after aborting half-way 4128 // through scanning this region. In this case, _finger points to 4129 // the address where we last found a marked object. If this is a 4130 // fresh region, _finger points to start(). 4131 MemRegion mr = MemRegion(_finger, _region_limit); 4132 4133 if (_cm->verbose_low()) { 4134 gclog_or_tty->print_cr("[%u] we're scanning part " 4135 "["PTR_FORMAT", "PTR_FORMAT") " 4136 "of region "HR_FORMAT, 4137 _worker_id, p2i(_finger), p2i(_region_limit), 4138 HR_FORMAT_PARAMS(_curr_region)); 4139 } 4140 4141 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 4142 "humongous regions should go around loop once only"); 4143 4144 // Some special cases: 4145 // If the memory region is empty, we can just give up the region. 4146 // If the current region is humongous then we only need to check 4147 // the bitmap for the bit associated with the start of the object, 4148 // scan the object if it's live, and give up the region. 4149 // Otherwise, let's iterate over the bitmap of the part of the region 4150 // that is left. 4151 // If the iteration is successful, give up the region. 4152 if (mr.is_empty()) { 4153 giveup_current_region(); 4154 regular_clock_call(); 4155 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 4156 if (_nextMarkBitMap->isMarked(mr.start())) { 4157 // The object is marked - apply the closure 4158 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 4159 bitmap_closure.do_bit(offset); 4160 } 4161 // Even if this task aborted while scanning the humongous object 4162 // we can (and should) give up the current region. 4163 giveup_current_region(); 4164 regular_clock_call(); 4165 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4166 giveup_current_region(); 4167 regular_clock_call(); 4168 } else { 4169 assert(has_aborted(), "currently the only way to do so"); 4170 // The only way to abort the bitmap iteration is to return 4171 // false from the do_bit() method. However, inside the 4172 // do_bit() method we move the _finger to point to the 4173 // object currently being looked at. So, if we bail out, we 4174 // have definitely set _finger to something non-null. 4175 assert(_finger != NULL, "invariant"); 4176 4177 // Region iteration was actually aborted. So now _finger 4178 // points to the address of the object we last scanned. If we 4179 // leave it there, when we restart this task, we will rescan 4180 // the object. It is easy to avoid this. We move the finger by 4181 // enough to point to the next possible object header (the 4182 // bitmap knows by how much we need to move it as it knows its 4183 // granularity). 4184 assert(_finger < _region_limit, "invariant"); 4185 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 4186 // Check if bitmap iteration was aborted while scanning the last object 4187 if (new_finger >= _region_limit) { 4188 giveup_current_region(); 4189 } else { 4190 move_finger_to(new_finger); 4191 } 4192 } 4193 } 4194 // At this point we have either completed iterating over the 4195 // region we were holding on to, or we have aborted. 4196 4197 // We then partially drain the local queue and the global stack. 4198 // (Do we really need this?) 4199 drain_local_queue(true); 4200 drain_global_stack(true); 4201 4202 // Read the note on the claim_region() method on why it might 4203 // return NULL with potentially more regions available for 4204 // claiming and why we have to check out_of_regions() to determine 4205 // whether we're done or not. 4206 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4207 // We are going to try to claim a new region. We should have 4208 // given up on the previous one. 4209 // Separated the asserts so that we know which one fires. 4210 assert(_curr_region == NULL, "invariant"); 4211 assert(_finger == NULL, "invariant"); 4212 assert(_region_limit == NULL, "invariant"); 4213 if (_cm->verbose_low()) { 4214 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4215 } 4216 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4217 if (claimed_region != NULL) { 4218 // Yes, we managed to claim one 4219 statsOnly( ++_regions_claimed ); 4220 4221 if (_cm->verbose_low()) { 4222 gclog_or_tty->print_cr("[%u] we successfully claimed " 4223 "region "PTR_FORMAT, 4224 _worker_id, p2i(claimed_region)); 4225 } 4226 4227 setup_for_region(claimed_region); 4228 assert(_curr_region == claimed_region, "invariant"); 4229 } 4230 // It is important to call the regular clock here. It might take 4231 // a while to claim a region if, for example, we hit a large 4232 // block of empty regions. So we need to call the regular clock 4233 // method once round the loop to make sure it's called 4234 // frequently enough. 4235 regular_clock_call(); 4236 } 4237 4238 if (!has_aborted() && _curr_region == NULL) { 4239 assert(_cm->out_of_regions(), 4240 "at this point we should be out of regions"); 4241 } 4242 } while ( _curr_region != NULL && !has_aborted()); 4243 4244 if (!has_aborted()) { 4245 // We cannot check whether the global stack is empty, since other 4246 // tasks might be pushing objects to it concurrently. 4247 assert(_cm->out_of_regions(), 4248 "at this point we should be out of regions"); 4249 4250 if (_cm->verbose_low()) { 4251 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4252 } 4253 4254 // Try to reduce the number of available SATB buffers so that 4255 // remark has less work to do. 4256 drain_satb_buffers(); 4257 } 4258 4259 // Since we've done everything else, we can now totally drain the 4260 // local queue and global stack. 4261 drain_local_queue(false); 4262 drain_global_stack(false); 4263 4264 // Attempt at work stealing from other task's queues. 4265 if (do_stealing && !has_aborted()) { 4266 // We have not aborted. This means that we have finished all that 4267 // we could. Let's try to do some stealing... 4268 4269 // We cannot check whether the global stack is empty, since other 4270 // tasks might be pushing objects to it concurrently. 4271 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4272 "only way to reach here"); 4273 4274 if (_cm->verbose_low()) { 4275 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4276 } 4277 4278 while (!has_aborted()) { 4279 oop obj; 4280 statsOnly( ++_steal_attempts ); 4281 4282 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4283 if (_cm->verbose_medium()) { 4284 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4285 _worker_id, p2i((void*) obj)); 4286 } 4287 4288 statsOnly( ++_steals ); 4289 4290 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4291 "any stolen object should be marked"); 4292 scan_object(obj); 4293 4294 // And since we're towards the end, let's totally drain the 4295 // local queue and global stack. 4296 drain_local_queue(false); 4297 drain_global_stack(false); 4298 } else { 4299 break; 4300 } 4301 } 4302 } 4303 4304 // If we are about to wrap up and go into termination, check if we 4305 // should raise the overflow flag. 4306 if (do_termination && !has_aborted()) { 4307 if (_cm->force_overflow()->should_force()) { 4308 _cm->set_has_overflown(); 4309 regular_clock_call(); 4310 } 4311 } 4312 4313 // We still haven't aborted. Now, let's try to get into the 4314 // termination protocol. 4315 if (do_termination && !has_aborted()) { 4316 // We cannot check whether the global stack is empty, since other 4317 // tasks might be concurrently pushing objects on it. 4318 // Separated the asserts so that we know which one fires. 4319 assert(_cm->out_of_regions(), "only way to reach here"); 4320 assert(_task_queue->size() == 0, "only way to reach here"); 4321 4322 if (_cm->verbose_low()) { 4323 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4324 } 4325 4326 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4327 4328 // The CMTask class also extends the TerminatorTerminator class, 4329 // hence its should_exit_termination() method will also decide 4330 // whether to exit the termination protocol or not. 4331 bool finished = (is_serial || 4332 _cm->terminator()->offer_termination(this)); 4333 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4334 _termination_time_ms += 4335 termination_end_time_ms - _termination_start_time_ms; 4336 4337 if (finished) { 4338 // We're all done. 4339 4340 if (_worker_id == 0) { 4341 // let's allow task 0 to do this 4342 if (concurrent()) { 4343 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4344 // we need to set this to false before the next 4345 // safepoint. This way we ensure that the marking phase 4346 // doesn't observe any more heap expansions. 4347 _cm->clear_concurrent_marking_in_progress(); 4348 } 4349 } 4350 4351 // We can now guarantee that the global stack is empty, since 4352 // all other tasks have finished. We separated the guarantees so 4353 // that, if a condition is false, we can immediately find out 4354 // which one. 4355 guarantee(_cm->out_of_regions(), "only way to reach here"); 4356 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4357 guarantee(_task_queue->size() == 0, "only way to reach here"); 4358 guarantee(!_cm->has_overflown(), "only way to reach here"); 4359 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4360 4361 if (_cm->verbose_low()) { 4362 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4363 } 4364 } else { 4365 // Apparently there's more work to do. Let's abort this task. It 4366 // will restart it and we can hopefully find more things to do. 4367 4368 if (_cm->verbose_low()) { 4369 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4370 _worker_id); 4371 } 4372 4373 set_has_aborted(); 4374 statsOnly( ++_aborted_termination ); 4375 } 4376 } 4377 4378 // Mainly for debugging purposes to make sure that a pointer to the 4379 // closure which was statically allocated in this frame doesn't 4380 // escape it by accident. 4381 set_cm_oop_closure(NULL); 4382 double end_time_ms = os::elapsedVTime() * 1000.0; 4383 double elapsed_time_ms = end_time_ms - _start_time_ms; 4384 // Update the step history. 4385 _step_times_ms.add(elapsed_time_ms); 4386 4387 if (has_aborted()) { 4388 // The task was aborted for some reason. 4389 4390 statsOnly( ++_aborted ); 4391 4392 if (_has_timed_out) { 4393 double diff_ms = elapsed_time_ms - _time_target_ms; 4394 // Keep statistics of how well we did with respect to hitting 4395 // our target only if we actually timed out (if we aborted for 4396 // other reasons, then the results might get skewed). 4397 _marking_step_diffs_ms.add(diff_ms); 4398 } 4399 4400 if (_cm->has_overflown()) { 4401 // This is the interesting one. We aborted because a global 4402 // overflow was raised. This means we have to restart the 4403 // marking phase and start iterating over regions. However, in 4404 // order to do this we have to make sure that all tasks stop 4405 // what they are doing and re-initialize in a safe manner. We 4406 // will achieve this with the use of two barrier sync points. 4407 4408 if (_cm->verbose_low()) { 4409 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4410 } 4411 4412 if (!is_serial) { 4413 // We only need to enter the sync barrier if being called 4414 // from a parallel context 4415 _cm->enter_first_sync_barrier(_worker_id); 4416 4417 // When we exit this sync barrier we know that all tasks have 4418 // stopped doing marking work. So, it's now safe to 4419 // re-initialize our data structures. At the end of this method, 4420 // task 0 will clear the global data structures. 4421 } 4422 4423 statsOnly( ++_aborted_overflow ); 4424 4425 // We clear the local state of this task... 4426 clear_region_fields(); 4427 4428 if (!is_serial) { 4429 // ...and enter the second barrier. 4430 _cm->enter_second_sync_barrier(_worker_id); 4431 } 4432 // At this point, if we're during the concurrent phase of 4433 // marking, everything has been re-initialized and we're 4434 // ready to restart. 4435 } 4436 4437 if (_cm->verbose_low()) { 4438 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4439 "elapsed = %1.2lfms <<<<<<<<<<", 4440 _worker_id, _time_target_ms, elapsed_time_ms); 4441 if (_cm->has_aborted()) { 4442 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4443 _worker_id); 4444 } 4445 } 4446 } else { 4447 if (_cm->verbose_low()) { 4448 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4449 "elapsed = %1.2lfms <<<<<<<<<<", 4450 _worker_id, _time_target_ms, elapsed_time_ms); 4451 } 4452 } 4453 4454 _claimed = false; 4455 } 4456 4457 CMTask::CMTask(uint worker_id, 4458 ConcurrentMark* cm, 4459 size_t* marked_bytes, 4460 BitMap* card_bm, 4461 CMTaskQueue* task_queue, 4462 CMTaskQueueSet* task_queues) 4463 : _g1h(G1CollectedHeap::heap()), 4464 _worker_id(worker_id), _cm(cm), 4465 _claimed(false), 4466 _nextMarkBitMap(NULL), _hash_seed(17), 4467 _task_queue(task_queue), 4468 _task_queues(task_queues), 4469 _cm_oop_closure(NULL), 4470 _marked_bytes_array(marked_bytes), 4471 _card_bm(card_bm) { 4472 guarantee(task_queue != NULL, "invariant"); 4473 guarantee(task_queues != NULL, "invariant"); 4474 4475 statsOnly( _clock_due_to_scanning = 0; 4476 _clock_due_to_marking = 0 ); 4477 4478 _marking_step_diffs_ms.add(0.5); 4479 } 4480 4481 // These are formatting macros that are used below to ensure 4482 // consistent formatting. The *_H_* versions are used to format the 4483 // header for a particular value and they should be kept consistent 4484 // with the corresponding macro. Also note that most of the macros add 4485 // the necessary white space (as a prefix) which makes them a bit 4486 // easier to compose. 4487 4488 // All the output lines are prefixed with this string to be able to 4489 // identify them easily in a large log file. 4490 #define G1PPRL_LINE_PREFIX "###" 4491 4492 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4493 #ifdef _LP64 4494 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4495 #else // _LP64 4496 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4497 #endif // _LP64 4498 4499 // For per-region info 4500 #define G1PPRL_TYPE_FORMAT " %-4s" 4501 #define G1PPRL_TYPE_H_FORMAT " %4s" 4502 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4503 #define G1PPRL_BYTE_H_FORMAT " %9s" 4504 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4505 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4506 4507 // For summary info 4508 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4509 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4510 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4511 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4512 4513 G1PrintRegionLivenessInfoClosure:: 4514 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4515 : _out(out), 4516 _total_used_bytes(0), _total_capacity_bytes(0), 4517 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4518 _hum_used_bytes(0), _hum_capacity_bytes(0), 4519 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4520 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4521 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4522 MemRegion g1_reserved = g1h->g1_reserved(); 4523 double now = os::elapsedTime(); 4524 4525 // Print the header of the output. 4526 _out->cr(); 4527 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4528 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4529 G1PPRL_SUM_ADDR_FORMAT("reserved") 4530 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4531 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4532 HeapRegion::GrainBytes); 4533 _out->print_cr(G1PPRL_LINE_PREFIX); 4534 _out->print_cr(G1PPRL_LINE_PREFIX 4535 G1PPRL_TYPE_H_FORMAT 4536 G1PPRL_ADDR_BASE_H_FORMAT 4537 G1PPRL_BYTE_H_FORMAT 4538 G1PPRL_BYTE_H_FORMAT 4539 G1PPRL_BYTE_H_FORMAT 4540 G1PPRL_DOUBLE_H_FORMAT 4541 G1PPRL_BYTE_H_FORMAT 4542 G1PPRL_BYTE_H_FORMAT, 4543 "type", "address-range", 4544 "used", "prev-live", "next-live", "gc-eff", 4545 "remset", "code-roots"); 4546 _out->print_cr(G1PPRL_LINE_PREFIX 4547 G1PPRL_TYPE_H_FORMAT 4548 G1PPRL_ADDR_BASE_H_FORMAT 4549 G1PPRL_BYTE_H_FORMAT 4550 G1PPRL_BYTE_H_FORMAT 4551 G1PPRL_BYTE_H_FORMAT 4552 G1PPRL_DOUBLE_H_FORMAT 4553 G1PPRL_BYTE_H_FORMAT 4554 G1PPRL_BYTE_H_FORMAT, 4555 "", "", 4556 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4557 "(bytes)", "(bytes)"); 4558 } 4559 4560 // It takes as a parameter a reference to one of the _hum_* fields, it 4561 // deduces the corresponding value for a region in a humongous region 4562 // series (either the region size, or what's left if the _hum_* field 4563 // is < the region size), and updates the _hum_* field accordingly. 4564 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4565 size_t bytes = 0; 4566 // The > 0 check is to deal with the prev and next live bytes which 4567 // could be 0. 4568 if (*hum_bytes > 0) { 4569 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4570 *hum_bytes -= bytes; 4571 } 4572 return bytes; 4573 } 4574 4575 // It deduces the values for a region in a humongous region series 4576 // from the _hum_* fields and updates those accordingly. It assumes 4577 // that that _hum_* fields have already been set up from the "starts 4578 // humongous" region and we visit the regions in address order. 4579 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4580 size_t* capacity_bytes, 4581 size_t* prev_live_bytes, 4582 size_t* next_live_bytes) { 4583 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4584 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4585 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4586 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4587 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4588 } 4589 4590 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4591 const char* type = r->get_type_str(); 4592 HeapWord* bottom = r->bottom(); 4593 HeapWord* end = r->end(); 4594 size_t capacity_bytes = r->capacity(); 4595 size_t used_bytes = r->used(); 4596 size_t prev_live_bytes = r->live_bytes(); 4597 size_t next_live_bytes = r->next_live_bytes(); 4598 double gc_eff = r->gc_efficiency(); 4599 size_t remset_bytes = r->rem_set()->mem_size(); 4600 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4601 4602 if (r->is_starts_humongous()) { 4603 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4604 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4605 "they should have been zeroed after the last time we used them"); 4606 // Set up the _hum_* fields. 4607 _hum_capacity_bytes = capacity_bytes; 4608 _hum_used_bytes = used_bytes; 4609 _hum_prev_live_bytes = prev_live_bytes; 4610 _hum_next_live_bytes = next_live_bytes; 4611 get_hum_bytes(&used_bytes, &capacity_bytes, 4612 &prev_live_bytes, &next_live_bytes); 4613 end = bottom + HeapRegion::GrainWords; 4614 } else if (r->is_continues_humongous()) { 4615 get_hum_bytes(&used_bytes, &capacity_bytes, 4616 &prev_live_bytes, &next_live_bytes); 4617 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4618 } 4619 4620 _total_used_bytes += used_bytes; 4621 _total_capacity_bytes += capacity_bytes; 4622 _total_prev_live_bytes += prev_live_bytes; 4623 _total_next_live_bytes += next_live_bytes; 4624 _total_remset_bytes += remset_bytes; 4625 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4626 4627 // Print a line for this particular region. 4628 _out->print_cr(G1PPRL_LINE_PREFIX 4629 G1PPRL_TYPE_FORMAT 4630 G1PPRL_ADDR_BASE_FORMAT 4631 G1PPRL_BYTE_FORMAT 4632 G1PPRL_BYTE_FORMAT 4633 G1PPRL_BYTE_FORMAT 4634 G1PPRL_DOUBLE_FORMAT 4635 G1PPRL_BYTE_FORMAT 4636 G1PPRL_BYTE_FORMAT, 4637 type, p2i(bottom), p2i(end), 4638 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4639 remset_bytes, strong_code_roots_bytes); 4640 4641 return false; 4642 } 4643 4644 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4645 // add static memory usages to remembered set sizes 4646 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4647 // Print the footer of the output. 4648 _out->print_cr(G1PPRL_LINE_PREFIX); 4649 _out->print_cr(G1PPRL_LINE_PREFIX 4650 " SUMMARY" 4651 G1PPRL_SUM_MB_FORMAT("capacity") 4652 G1PPRL_SUM_MB_PERC_FORMAT("used") 4653 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4654 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4655 G1PPRL_SUM_MB_FORMAT("remset") 4656 G1PPRL_SUM_MB_FORMAT("code-roots"), 4657 bytes_to_mb(_total_capacity_bytes), 4658 bytes_to_mb(_total_used_bytes), 4659 perc(_total_used_bytes, _total_capacity_bytes), 4660 bytes_to_mb(_total_prev_live_bytes), 4661 perc(_total_prev_live_bytes, _total_capacity_bytes), 4662 bytes_to_mb(_total_next_live_bytes), 4663 perc(_total_next_live_bytes, _total_capacity_bytes), 4664 bytes_to_mb(_total_remset_bytes), 4665 bytes_to_mb(_total_strong_code_roots_bytes)); 4666 _out->cr(); 4667 }