1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMark.inline.hpp" 30 #include "gc/g1/concurrentMarkThread.inline.hpp" 31 #include "gc/g1/g1CollectedHeap.inline.hpp" 32 #include "gc/g1/g1CollectorPolicy.hpp" 33 #include "gc/g1/g1CollectorState.hpp" 34 #include "gc/g1/g1ErgoVerbose.hpp" 35 #include "gc/g1/g1Log.hpp" 36 #include "gc/g1/g1OopClosures.inline.hpp" 37 #include "gc/g1/g1RemSet.hpp" 38 #include "gc/g1/g1StringDedup.hpp" 39 #include "gc/g1/heapRegion.inline.hpp" 40 #include "gc/g1/heapRegionManager.inline.hpp" 41 #include "gc/g1/heapRegionRemSet.hpp" 42 #include "gc/g1/heapRegionSet.inline.hpp" 43 #include "gc/g1/suspendibleThreadSet.hpp" 44 #include "gc/shared/gcTimer.hpp" 45 #include "gc/shared/gcTrace.hpp" 46 #include "gc/shared/gcTraceTime.hpp" 47 #include "gc/shared/genOopClosures.inline.hpp" 48 #include "gc/shared/referencePolicy.hpp" 49 #include "gc/shared/strongRootsScope.hpp" 50 #include "gc/shared/taskqueue.inline.hpp" 51 #include "gc/shared/vmGCOperations.hpp" 52 #include "memory/allocation.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "runtime/atomic.inline.hpp" 56 #include "runtime/handles.inline.hpp" 57 #include "runtime/java.hpp" 58 #include "runtime/prefetch.inline.hpp" 59 #include "services/memTracker.hpp" 60 61 // Concurrent marking bit map wrapper 62 63 CMBitMapRO::CMBitMapRO(int shifter) : 64 _bm(), 65 _shifter(shifter) { 66 _bmStartWord = 0; 67 _bmWordSize = 0; 68 } 69 70 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 71 const HeapWord* limit) const { 72 // First we must round addr *up* to a possible object boundary. 73 addr = (HeapWord*)align_size_up((intptr_t)addr, 74 HeapWordSize << _shifter); 75 size_t addrOffset = heapWordToOffset(addr); 76 if (limit == NULL) { 77 limit = _bmStartWord + _bmWordSize; 78 } 79 size_t limitOffset = heapWordToOffset(limit); 80 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 81 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 82 assert(nextAddr >= addr, "get_next_one postcondition"); 83 assert(nextAddr == limit || isMarked(nextAddr), 84 "get_next_one postcondition"); 85 return nextAddr; 86 } 87 88 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, 89 const HeapWord* limit) const { 90 size_t addrOffset = heapWordToOffset(addr); 91 if (limit == NULL) { 92 limit = _bmStartWord + _bmWordSize; 93 } 94 size_t limitOffset = heapWordToOffset(limit); 95 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 96 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 97 assert(nextAddr >= addr, "get_next_one postcondition"); 98 assert(nextAddr == limit || !isMarked(nextAddr), 99 "get_next_one postcondition"); 100 return nextAddr; 101 } 102 103 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 104 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 105 return (int) (diff >> _shifter); 106 } 107 108 #ifndef PRODUCT 109 bool CMBitMapRO::covers(MemRegion heap_rs) const { 110 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 111 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 112 "size inconsistency"); 113 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 114 _bmWordSize == heap_rs.word_size(); 115 } 116 #endif 117 118 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 119 _bm.print_on_error(st, prefix); 120 } 121 122 size_t CMBitMap::compute_size(size_t heap_size) { 123 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 124 } 125 126 size_t CMBitMap::mark_distance() { 127 return MinObjAlignmentInBytes * BitsPerByte; 128 } 129 130 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 131 _bmStartWord = heap.start(); 132 _bmWordSize = heap.word_size(); 133 134 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 135 _bm.set_size(_bmWordSize >> _shifter); 136 137 storage->set_mapping_changed_listener(&_listener); 138 } 139 140 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 141 if (zero_filled) { 142 return; 143 } 144 // We need to clear the bitmap on commit, removing any existing information. 145 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 146 _bm->clearRange(mr); 147 } 148 149 // Closure used for clearing the given mark bitmap. 150 class ClearBitmapHRClosure : public HeapRegionClosure { 151 private: 152 ConcurrentMark* _cm; 153 CMBitMap* _bitmap; 154 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 155 public: 156 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 157 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 158 } 159 160 virtual bool doHeapRegion(HeapRegion* r) { 161 size_t const chunk_size_in_words = M / HeapWordSize; 162 163 HeapWord* cur = r->bottom(); 164 HeapWord* const end = r->end(); 165 166 while (cur < end) { 167 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 168 _bitmap->clearRange(mr); 169 170 cur += chunk_size_in_words; 171 172 // Abort iteration if after yielding the marking has been aborted. 173 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 174 return true; 175 } 176 // Repeat the asserts from before the start of the closure. We will do them 177 // as asserts here to minimize their overhead on the product. However, we 178 // will have them as guarantees at the beginning / end of the bitmap 179 // clearing to get some checking in the product. 180 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 181 assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 182 } 183 184 return false; 185 } 186 }; 187 188 class ParClearNextMarkBitmapTask : public AbstractGangTask { 189 ClearBitmapHRClosure* _cl; 190 HeapRegionClaimer _hrclaimer; 191 bool _suspendible; // If the task is suspendible, workers must join the STS. 192 193 public: 194 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 195 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 196 197 void work(uint worker_id) { 198 SuspendibleThreadSetJoiner sts_join(_suspendible); 199 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 200 } 201 }; 202 203 void CMBitMap::clearAll() { 204 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 205 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 206 uint n_workers = g1h->workers()->active_workers(); 207 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 208 g1h->workers()->run_task(&task); 209 guarantee(cl.complete(), "Must have completed iteration."); 210 return; 211 } 212 213 void CMBitMap::markRange(MemRegion mr) { 214 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 215 assert(!mr.is_empty(), "unexpected empty region"); 216 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 217 ((HeapWord *) mr.end())), 218 "markRange memory region end is not card aligned"); 219 // convert address range into offset range 220 _bm.at_put_range(heapWordToOffset(mr.start()), 221 heapWordToOffset(mr.end()), true); 222 } 223 224 void CMBitMap::clearRange(MemRegion mr) { 225 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 226 assert(!mr.is_empty(), "unexpected empty region"); 227 // convert address range into offset range 228 _bm.at_put_range(heapWordToOffset(mr.start()), 229 heapWordToOffset(mr.end()), false); 230 } 231 232 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 233 HeapWord* end_addr) { 234 HeapWord* start = getNextMarkedWordAddress(addr); 235 start = MIN2(start, end_addr); 236 HeapWord* end = getNextUnmarkedWordAddress(start); 237 end = MIN2(end, end_addr); 238 assert(start <= end, "Consistency check"); 239 MemRegion mr(start, end); 240 if (!mr.is_empty()) { 241 clearRange(mr); 242 } 243 return mr; 244 } 245 246 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 247 _base(NULL), _cm(cm) 248 #ifdef ASSERT 249 , _drain_in_progress(false) 250 , _drain_in_progress_yields(false) 251 #endif 252 {} 253 254 bool CMMarkStack::allocate(size_t capacity) { 255 // allocate a stack of the requisite depth 256 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 257 if (!rs.is_reserved()) { 258 warning("ConcurrentMark MarkStack allocation failure"); 259 return false; 260 } 261 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 262 if (!_virtual_space.initialize(rs, rs.size())) { 263 warning("ConcurrentMark MarkStack backing store failure"); 264 // Release the virtual memory reserved for the marking stack 265 rs.release(); 266 return false; 267 } 268 assert(_virtual_space.committed_size() == rs.size(), 269 "Didn't reserve backing store for all of ConcurrentMark stack?"); 270 _base = (oop*) _virtual_space.low(); 271 setEmpty(); 272 _capacity = (jint) capacity; 273 _saved_index = -1; 274 _should_expand = false; 275 return true; 276 } 277 278 void CMMarkStack::expand() { 279 // Called, during remark, if we've overflown the marking stack during marking. 280 assert(isEmpty(), "stack should been emptied while handling overflow"); 281 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 282 // Clear expansion flag 283 _should_expand = false; 284 if (_capacity == (jint) MarkStackSizeMax) { 285 if (PrintGCDetails && Verbose) { 286 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 287 } 288 return; 289 } 290 // Double capacity if possible 291 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 292 // Do not give up existing stack until we have managed to 293 // get the double capacity that we desired. 294 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 295 sizeof(oop))); 296 if (rs.is_reserved()) { 297 // Release the backing store associated with old stack 298 _virtual_space.release(); 299 // Reinitialize virtual space for new stack 300 if (!_virtual_space.initialize(rs, rs.size())) { 301 fatal("Not enough swap for expanded marking stack capacity"); 302 } 303 _base = (oop*)(_virtual_space.low()); 304 _index = 0; 305 _capacity = new_capacity; 306 } else { 307 if (PrintGCDetails && Verbose) { 308 // Failed to double capacity, continue; 309 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 310 SIZE_FORMAT "K to " SIZE_FORMAT "K", 311 _capacity / K, new_capacity / K); 312 } 313 } 314 } 315 316 void CMMarkStack::set_should_expand() { 317 // If we're resetting the marking state because of an 318 // marking stack overflow, record that we should, if 319 // possible, expand the stack. 320 _should_expand = _cm->has_overflown(); 321 } 322 323 CMMarkStack::~CMMarkStack() { 324 if (_base != NULL) { 325 _base = NULL; 326 _virtual_space.release(); 327 } 328 } 329 330 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 331 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 332 jint start = _index; 333 jint next_index = start + n; 334 if (next_index > _capacity) { 335 _overflow = true; 336 return; 337 } 338 // Otherwise. 339 _index = next_index; 340 for (int i = 0; i < n; i++) { 341 int ind = start + i; 342 assert(ind < _capacity, "By overflow test above."); 343 _base[ind] = ptr_arr[i]; 344 } 345 } 346 347 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 348 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 349 jint index = _index; 350 if (index == 0) { 351 *n = 0; 352 return false; 353 } else { 354 int k = MIN2(max, index); 355 jint new_ind = index - k; 356 for (int j = 0; j < k; j++) { 357 ptr_arr[j] = _base[new_ind + j]; 358 } 359 _index = new_ind; 360 *n = k; 361 return true; 362 } 363 } 364 365 template<class OopClosureClass> 366 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 367 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 368 || SafepointSynchronize::is_at_safepoint(), 369 "Drain recursion must be yield-safe."); 370 bool res = true; 371 debug_only(_drain_in_progress = true); 372 debug_only(_drain_in_progress_yields = yield_after); 373 while (!isEmpty()) { 374 oop newOop = pop(); 375 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 376 assert(newOop->is_oop(), "Expected an oop"); 377 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 378 "only grey objects on this stack"); 379 newOop->oop_iterate(cl); 380 if (yield_after && _cm->do_yield_check()) { 381 res = false; 382 break; 383 } 384 } 385 debug_only(_drain_in_progress = false); 386 return res; 387 } 388 389 void CMMarkStack::note_start_of_gc() { 390 assert(_saved_index == -1, 391 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 392 _saved_index = _index; 393 } 394 395 void CMMarkStack::note_end_of_gc() { 396 // This is intentionally a guarantee, instead of an assert. If we 397 // accidentally add something to the mark stack during GC, it 398 // will be a correctness issue so it's better if we crash. we'll 399 // only check this once per GC anyway, so it won't be a performance 400 // issue in any way. 401 guarantee(_saved_index == _index, 402 err_msg("saved index: %d index: %d", _saved_index, _index)); 403 _saved_index = -1; 404 } 405 406 CMRootRegions::CMRootRegions() : 407 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 408 _should_abort(false), _next_survivor(NULL) { } 409 410 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 411 _young_list = g1h->young_list(); 412 _cm = cm; 413 } 414 415 void CMRootRegions::prepare_for_scan() { 416 assert(!scan_in_progress(), "pre-condition"); 417 418 // Currently, only survivors can be root regions. 419 assert(_next_survivor == NULL, "pre-condition"); 420 _next_survivor = _young_list->first_survivor_region(); 421 _scan_in_progress = (_next_survivor != NULL); 422 _should_abort = false; 423 } 424 425 HeapRegion* CMRootRegions::claim_next() { 426 if (_should_abort) { 427 // If someone has set the should_abort flag, we return NULL to 428 // force the caller to bail out of their loop. 429 return NULL; 430 } 431 432 // Currently, only survivors can be root regions. 433 HeapRegion* res = _next_survivor; 434 if (res != NULL) { 435 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 436 // Read it again in case it changed while we were waiting for the lock. 437 res = _next_survivor; 438 if (res != NULL) { 439 if (res == _young_list->last_survivor_region()) { 440 // We just claimed the last survivor so store NULL to indicate 441 // that we're done. 442 _next_survivor = NULL; 443 } else { 444 _next_survivor = res->get_next_young_region(); 445 } 446 } else { 447 // Someone else claimed the last survivor while we were trying 448 // to take the lock so nothing else to do. 449 } 450 } 451 assert(res == NULL || res->is_survivor(), "post-condition"); 452 453 return res; 454 } 455 456 void CMRootRegions::scan_finished() { 457 assert(scan_in_progress(), "pre-condition"); 458 459 // Currently, only survivors can be root regions. 460 if (!_should_abort) { 461 assert(_next_survivor == NULL, "we should have claimed all survivors"); 462 } 463 _next_survivor = NULL; 464 465 { 466 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 467 _scan_in_progress = false; 468 RootRegionScan_lock->notify_all(); 469 } 470 } 471 472 bool CMRootRegions::wait_until_scan_finished() { 473 if (!scan_in_progress()) return false; 474 475 { 476 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 477 while (scan_in_progress()) { 478 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 479 } 480 } 481 return true; 482 } 483 484 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 485 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 486 #endif // _MSC_VER 487 488 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 489 return MAX2((n_par_threads + 2) / 4, 1U); 490 } 491 492 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 493 _g1h(g1h), 494 _markBitMap1(), 495 _markBitMap2(), 496 _parallel_marking_threads(0), 497 _max_parallel_marking_threads(0), 498 _sleep_factor(0.0), 499 _marking_task_overhead(1.0), 500 _cleanup_sleep_factor(0.0), 501 _cleanup_task_overhead(1.0), 502 _cleanup_list("Cleanup List"), 503 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 504 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 505 CardTableModRefBS::card_shift, 506 false /* in_resource_area*/), 507 508 _prevMarkBitMap(&_markBitMap1), 509 _nextMarkBitMap(&_markBitMap2), 510 511 _markStack(this), 512 // _finger set in set_non_marking_state 513 514 _max_worker_id(ParallelGCThreads), 515 // _active_tasks set in set_non_marking_state 516 // _tasks set inside the constructor 517 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 518 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 519 520 _has_overflown(false), 521 _concurrent(false), 522 _has_aborted(false), 523 _aborted_gc_id(GCId::undefined()), 524 _restart_for_overflow(false), 525 _concurrent_marking_in_progress(false), 526 527 // _verbose_level set below 528 529 _init_times(), 530 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 531 _cleanup_times(), 532 _total_counting_time(0.0), 533 _total_rs_scrub_time(0.0), 534 535 _parallel_workers(NULL), 536 537 _count_card_bitmaps(NULL), 538 _count_marked_bytes(NULL), 539 _completed_initialization(false) { 540 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 541 if (verbose_level < no_verbose) { 542 verbose_level = no_verbose; 543 } 544 if (verbose_level > high_verbose) { 545 verbose_level = high_verbose; 546 } 547 _verbose_level = verbose_level; 548 549 if (verbose_low()) { 550 gclog_or_tty->print_cr("[global] init, heap start = " PTR_FORMAT ", " 551 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 552 } 553 554 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 555 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 556 557 // Create & start a ConcurrentMark thread. 558 _cmThread = new ConcurrentMarkThread(this); 559 assert(cmThread() != NULL, "CM Thread should have been created"); 560 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 561 if (_cmThread->osthread() == NULL) { 562 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 563 } 564 565 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 566 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 567 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 568 569 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 570 satb_qs.set_buffer_size(G1SATBBufferSize); 571 572 _root_regions.init(_g1h, this); 573 574 if (ConcGCThreads > ParallelGCThreads) { 575 warning("Can't have more ConcGCThreads (%u) " 576 "than ParallelGCThreads (%u).", 577 ConcGCThreads, ParallelGCThreads); 578 return; 579 } 580 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 581 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 582 // if both are set 583 _sleep_factor = 0.0; 584 _marking_task_overhead = 1.0; 585 } else if (G1MarkingOverheadPercent > 0) { 586 // We will calculate the number of parallel marking threads based 587 // on a target overhead with respect to the soft real-time goal 588 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 589 double overall_cm_overhead = 590 (double) MaxGCPauseMillis * marking_overhead / 591 (double) GCPauseIntervalMillis; 592 double cpu_ratio = 1.0 / (double) os::processor_count(); 593 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 594 double marking_task_overhead = 595 overall_cm_overhead / marking_thread_num * 596 (double) os::processor_count(); 597 double sleep_factor = 598 (1.0 - marking_task_overhead) / marking_task_overhead; 599 600 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 601 _sleep_factor = sleep_factor; 602 _marking_task_overhead = marking_task_overhead; 603 } else { 604 // Calculate the number of parallel marking threads by scaling 605 // the number of parallel GC threads. 606 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 607 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 608 _sleep_factor = 0.0; 609 _marking_task_overhead = 1.0; 610 } 611 612 assert(ConcGCThreads > 0, "Should have been set"); 613 _parallel_marking_threads = ConcGCThreads; 614 _max_parallel_marking_threads = _parallel_marking_threads; 615 616 if (parallel_marking_threads() > 1) { 617 _cleanup_task_overhead = 1.0; 618 } else { 619 _cleanup_task_overhead = marking_task_overhead(); 620 } 621 _cleanup_sleep_factor = 622 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 623 624 #if 0 625 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 626 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 627 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 628 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 629 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 630 #endif 631 632 _parallel_workers = new WorkGang("G1 Marker", 633 _max_parallel_marking_threads, false, true); 634 if (_parallel_workers == NULL) { 635 vm_exit_during_initialization("Failed necessary allocation."); 636 } else { 637 _parallel_workers->initialize_workers(); 638 } 639 640 if (FLAG_IS_DEFAULT(MarkStackSize)) { 641 size_t mark_stack_size = 642 MIN2(MarkStackSizeMax, 643 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 644 // Verify that the calculated value for MarkStackSize is in range. 645 // It would be nice to use the private utility routine from Arguments. 646 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 647 warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 648 "must be between 1 and " SIZE_FORMAT, 649 mark_stack_size, MarkStackSizeMax); 650 return; 651 } 652 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 653 } else { 654 // Verify MarkStackSize is in range. 655 if (FLAG_IS_CMDLINE(MarkStackSize)) { 656 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 657 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 658 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 659 "must be between 1 and " SIZE_FORMAT, 660 MarkStackSize, MarkStackSizeMax); 661 return; 662 } 663 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 664 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 665 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 666 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 667 MarkStackSize, MarkStackSizeMax); 668 return; 669 } 670 } 671 } 672 } 673 674 if (!_markStack.allocate(MarkStackSize)) { 675 warning("Failed to allocate CM marking stack"); 676 return; 677 } 678 679 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 680 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 681 682 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 683 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 684 685 BitMap::idx_t card_bm_size = _card_bm.size(); 686 687 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 688 _active_tasks = _max_worker_id; 689 690 uint max_regions = _g1h->max_regions(); 691 for (uint i = 0; i < _max_worker_id; ++i) { 692 CMTaskQueue* task_queue = new CMTaskQueue(); 693 task_queue->initialize(); 694 _task_queues->register_queue(i, task_queue); 695 696 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 697 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 698 699 _tasks[i] = new CMTask(i, this, 700 _count_marked_bytes[i], 701 &_count_card_bitmaps[i], 702 task_queue, _task_queues); 703 704 _accum_task_vtime[i] = 0.0; 705 } 706 707 // Calculate the card number for the bottom of the heap. Used 708 // in biasing indexes into the accounting card bitmaps. 709 _heap_bottom_card_num = 710 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 711 CardTableModRefBS::card_shift); 712 713 // Clear all the liveness counting data 714 clear_all_count_data(); 715 716 // so that the call below can read a sensible value 717 _heap_start = g1h->reserved_region().start(); 718 set_non_marking_state(); 719 _completed_initialization = true; 720 } 721 722 void ConcurrentMark::reset() { 723 // Starting values for these two. This should be called in a STW 724 // phase. 725 MemRegion reserved = _g1h->g1_reserved(); 726 _heap_start = reserved.start(); 727 _heap_end = reserved.end(); 728 729 // Separated the asserts so that we know which one fires. 730 assert(_heap_start != NULL, "heap bounds should look ok"); 731 assert(_heap_end != NULL, "heap bounds should look ok"); 732 assert(_heap_start < _heap_end, "heap bounds should look ok"); 733 734 // Reset all the marking data structures and any necessary flags 735 reset_marking_state(); 736 737 if (verbose_low()) { 738 gclog_or_tty->print_cr("[global] resetting"); 739 } 740 741 // We do reset all of them, since different phases will use 742 // different number of active threads. So, it's easiest to have all 743 // of them ready. 744 for (uint i = 0; i < _max_worker_id; ++i) { 745 _tasks[i]->reset(_nextMarkBitMap); 746 } 747 748 // we need this to make sure that the flag is on during the evac 749 // pause with initial mark piggy-backed 750 set_concurrent_marking_in_progress(); 751 } 752 753 754 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 755 _markStack.set_should_expand(); 756 _markStack.setEmpty(); // Also clears the _markStack overflow flag 757 if (clear_overflow) { 758 clear_has_overflown(); 759 } else { 760 assert(has_overflown(), "pre-condition"); 761 } 762 _finger = _heap_start; 763 764 for (uint i = 0; i < _max_worker_id; ++i) { 765 CMTaskQueue* queue = _task_queues->queue(i); 766 queue->set_empty(); 767 } 768 } 769 770 void ConcurrentMark::set_concurrency(uint active_tasks) { 771 assert(active_tasks <= _max_worker_id, "we should not have more"); 772 773 _active_tasks = active_tasks; 774 // Need to update the three data structures below according to the 775 // number of active threads for this phase. 776 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 777 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 778 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 779 } 780 781 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 782 set_concurrency(active_tasks); 783 784 _concurrent = concurrent; 785 // We propagate this to all tasks, not just the active ones. 786 for (uint i = 0; i < _max_worker_id; ++i) 787 _tasks[i]->set_concurrent(concurrent); 788 789 if (concurrent) { 790 set_concurrent_marking_in_progress(); 791 } else { 792 // We currently assume that the concurrent flag has been set to 793 // false before we start remark. At this point we should also be 794 // in a STW phase. 795 assert(!concurrent_marking_in_progress(), "invariant"); 796 assert(out_of_regions(), 797 err_msg("only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 798 p2i(_finger), p2i(_heap_end))); 799 } 800 } 801 802 void ConcurrentMark::set_non_marking_state() { 803 // We set the global marking state to some default values when we're 804 // not doing marking. 805 reset_marking_state(); 806 _active_tasks = 0; 807 clear_concurrent_marking_in_progress(); 808 } 809 810 ConcurrentMark::~ConcurrentMark() { 811 // The ConcurrentMark instance is never freed. 812 ShouldNotReachHere(); 813 } 814 815 void ConcurrentMark::clearNextBitmap() { 816 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 817 818 // Make sure that the concurrent mark thread looks to still be in 819 // the current cycle. 820 guarantee(cmThread()->during_cycle(), "invariant"); 821 822 // We are finishing up the current cycle by clearing the next 823 // marking bitmap and getting it ready for the next cycle. During 824 // this time no other cycle can start. So, let's make sure that this 825 // is the case. 826 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 827 828 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 829 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 830 _parallel_workers->run_task(&task); 831 832 // Clear the liveness counting data. If the marking has been aborted, the abort() 833 // call already did that. 834 if (cl.complete()) { 835 clear_all_count_data(); 836 } 837 838 // Repeat the asserts from above. 839 guarantee(cmThread()->during_cycle(), "invariant"); 840 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 841 } 842 843 class CheckBitmapClearHRClosure : public HeapRegionClosure { 844 CMBitMap* _bitmap; 845 bool _error; 846 public: 847 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 848 } 849 850 virtual bool doHeapRegion(HeapRegion* r) { 851 // This closure can be called concurrently to the mutator, so we must make sure 852 // that the result of the getNextMarkedWordAddress() call is compared to the 853 // value passed to it as limit to detect any found bits. 854 // We can use the region's orig_end() for the limit and the comparison value 855 // as it always contains the "real" end of the region that never changes and 856 // has no side effects. 857 // Due to the latter, there can also be no problem with the compiler generating 858 // reloads of the orig_end() call. 859 HeapWord* end = r->orig_end(); 860 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 861 } 862 }; 863 864 bool ConcurrentMark::nextMarkBitmapIsClear() { 865 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 866 _g1h->heap_region_iterate(&cl); 867 return cl.complete(); 868 } 869 870 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 871 public: 872 bool doHeapRegion(HeapRegion* r) { 873 if (!r->is_continues_humongous()) { 874 r->note_start_of_marking(); 875 } 876 return false; 877 } 878 }; 879 880 void ConcurrentMark::checkpointRootsInitialPre() { 881 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 882 G1CollectorPolicy* g1p = g1h->g1_policy(); 883 884 _has_aborted = false; 885 886 // Initialize marking structures. This has to be done in a STW phase. 887 reset(); 888 889 // For each region note start of marking. 890 NoteStartOfMarkHRClosure startcl; 891 g1h->heap_region_iterate(&startcl); 892 } 893 894 895 void ConcurrentMark::checkpointRootsInitialPost() { 896 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 897 898 // If we force an overflow during remark, the remark operation will 899 // actually abort and we'll restart concurrent marking. If we always 900 // force an overflow during remark we'll never actually complete the 901 // marking phase. So, we initialize this here, at the start of the 902 // cycle, so that at the remaining overflow number will decrease at 903 // every remark and we'll eventually not need to cause one. 904 force_overflow_stw()->init(); 905 906 // Start Concurrent Marking weak-reference discovery. 907 ReferenceProcessor* rp = g1h->ref_processor_cm(); 908 // enable ("weak") refs discovery 909 rp->enable_discovery(); 910 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 911 912 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 913 // This is the start of the marking cycle, we're expected all 914 // threads to have SATB queues with active set to false. 915 satb_mq_set.set_active_all_threads(true, /* new active value */ 916 false /* expected_active */); 917 918 _root_regions.prepare_for_scan(); 919 920 // update_g1_committed() will be called at the end of an evac pause 921 // when marking is on. So, it's also called at the end of the 922 // initial-mark pause to update the heap end, if the heap expands 923 // during it. No need to call it here. 924 } 925 926 /* 927 * Notice that in the next two methods, we actually leave the STS 928 * during the barrier sync and join it immediately afterwards. If we 929 * do not do this, the following deadlock can occur: one thread could 930 * be in the barrier sync code, waiting for the other thread to also 931 * sync up, whereas another one could be trying to yield, while also 932 * waiting for the other threads to sync up too. 933 * 934 * Note, however, that this code is also used during remark and in 935 * this case we should not attempt to leave / enter the STS, otherwise 936 * we'll either hit an assert (debug / fastdebug) or deadlock 937 * (product). So we should only leave / enter the STS if we are 938 * operating concurrently. 939 * 940 * Because the thread that does the sync barrier has left the STS, it 941 * is possible to be suspended for a Full GC or an evacuation pause 942 * could occur. This is actually safe, since the entering the sync 943 * barrier is one of the last things do_marking_step() does, and it 944 * doesn't manipulate any data structures afterwards. 945 */ 946 947 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 948 bool barrier_aborted; 949 950 if (verbose_low()) { 951 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 952 } 953 954 { 955 SuspendibleThreadSetLeaver sts_leave(concurrent()); 956 barrier_aborted = !_first_overflow_barrier_sync.enter(); 957 } 958 959 // at this point everyone should have synced up and not be doing any 960 // more work 961 962 if (verbose_low()) { 963 if (barrier_aborted) { 964 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 965 } else { 966 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 967 } 968 } 969 970 if (barrier_aborted) { 971 // If the barrier aborted we ignore the overflow condition and 972 // just abort the whole marking phase as quickly as possible. 973 return; 974 } 975 976 // If we're executing the concurrent phase of marking, reset the marking 977 // state; otherwise the marking state is reset after reference processing, 978 // during the remark pause. 979 // If we reset here as a result of an overflow during the remark we will 980 // see assertion failures from any subsequent set_concurrency_and_phase() 981 // calls. 982 if (concurrent()) { 983 // let the task associated with with worker 0 do this 984 if (worker_id == 0) { 985 // task 0 is responsible for clearing the global data structures 986 // We should be here because of an overflow. During STW we should 987 // not clear the overflow flag since we rely on it being true when 988 // we exit this method to abort the pause and restart concurrent 989 // marking. 990 reset_marking_state(true /* clear_overflow */); 991 force_overflow()->update(); 992 993 if (G1Log::fine()) { 994 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 995 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 996 } 997 } 998 } 999 1000 // after this, each task should reset its own data structures then 1001 // then go into the second barrier 1002 } 1003 1004 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1005 bool barrier_aborted; 1006 1007 if (verbose_low()) { 1008 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1009 } 1010 1011 { 1012 SuspendibleThreadSetLeaver sts_leave(concurrent()); 1013 barrier_aborted = !_second_overflow_barrier_sync.enter(); 1014 } 1015 1016 // at this point everything should be re-initialized and ready to go 1017 1018 if (verbose_low()) { 1019 if (barrier_aborted) { 1020 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1021 } else { 1022 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1023 } 1024 } 1025 } 1026 1027 #ifndef PRODUCT 1028 void ForceOverflowSettings::init() { 1029 _num_remaining = G1ConcMarkForceOverflow; 1030 _force = false; 1031 update(); 1032 } 1033 1034 void ForceOverflowSettings::update() { 1035 if (_num_remaining > 0) { 1036 _num_remaining -= 1; 1037 _force = true; 1038 } else { 1039 _force = false; 1040 } 1041 } 1042 1043 bool ForceOverflowSettings::should_force() { 1044 if (_force) { 1045 _force = false; 1046 return true; 1047 } else { 1048 return false; 1049 } 1050 } 1051 #endif // !PRODUCT 1052 1053 class CMConcurrentMarkingTask: public AbstractGangTask { 1054 private: 1055 ConcurrentMark* _cm; 1056 ConcurrentMarkThread* _cmt; 1057 1058 public: 1059 void work(uint worker_id) { 1060 assert(Thread::current()->is_ConcurrentGC_thread(), 1061 "this should only be done by a conc GC thread"); 1062 ResourceMark rm; 1063 1064 double start_vtime = os::elapsedVTime(); 1065 1066 { 1067 SuspendibleThreadSetJoiner sts_join; 1068 1069 assert(worker_id < _cm->active_tasks(), "invariant"); 1070 CMTask* the_task = _cm->task(worker_id); 1071 the_task->record_start_time(); 1072 if (!_cm->has_aborted()) { 1073 do { 1074 double start_vtime_sec = os::elapsedVTime(); 1075 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1076 1077 the_task->do_marking_step(mark_step_duration_ms, 1078 true /* do_termination */, 1079 false /* is_serial*/); 1080 1081 double end_vtime_sec = os::elapsedVTime(); 1082 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1083 _cm->clear_has_overflown(); 1084 1085 _cm->do_yield_check(worker_id); 1086 1087 jlong sleep_time_ms; 1088 if (!_cm->has_aborted() && the_task->has_aborted()) { 1089 sleep_time_ms = 1090 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1091 { 1092 SuspendibleThreadSetLeaver sts_leave; 1093 os::sleep(Thread::current(), sleep_time_ms, false); 1094 } 1095 } 1096 } while (!_cm->has_aborted() && the_task->has_aborted()); 1097 } 1098 the_task->record_end_time(); 1099 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1100 } 1101 1102 double end_vtime = os::elapsedVTime(); 1103 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1104 } 1105 1106 CMConcurrentMarkingTask(ConcurrentMark* cm, 1107 ConcurrentMarkThread* cmt) : 1108 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1109 1110 ~CMConcurrentMarkingTask() { } 1111 }; 1112 1113 // Calculates the number of active workers for a concurrent 1114 // phase. 1115 uint ConcurrentMark::calc_parallel_marking_threads() { 1116 uint n_conc_workers = 0; 1117 if (!UseDynamicNumberOfGCThreads || 1118 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1119 !ForceDynamicNumberOfGCThreads)) { 1120 n_conc_workers = max_parallel_marking_threads(); 1121 } else { 1122 n_conc_workers = 1123 AdaptiveSizePolicy::calc_default_active_workers( 1124 max_parallel_marking_threads(), 1125 1, /* Minimum workers */ 1126 parallel_marking_threads(), 1127 Threads::number_of_non_daemon_threads()); 1128 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1129 // that scaling has already gone into "_max_parallel_marking_threads". 1130 } 1131 assert(n_conc_workers > 0, "Always need at least 1"); 1132 return n_conc_workers; 1133 } 1134 1135 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1136 // Currently, only survivors can be root regions. 1137 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1138 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1139 1140 const uintx interval = PrefetchScanIntervalInBytes; 1141 HeapWord* curr = hr->bottom(); 1142 const HeapWord* end = hr->top(); 1143 while (curr < end) { 1144 Prefetch::read(curr, interval); 1145 oop obj = oop(curr); 1146 int size = obj->oop_iterate_size(&cl); 1147 assert(size == obj->size(), "sanity"); 1148 curr += size; 1149 } 1150 } 1151 1152 class CMRootRegionScanTask : public AbstractGangTask { 1153 private: 1154 ConcurrentMark* _cm; 1155 1156 public: 1157 CMRootRegionScanTask(ConcurrentMark* cm) : 1158 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1159 1160 void work(uint worker_id) { 1161 assert(Thread::current()->is_ConcurrentGC_thread(), 1162 "this should only be done by a conc GC thread"); 1163 1164 CMRootRegions* root_regions = _cm->root_regions(); 1165 HeapRegion* hr = root_regions->claim_next(); 1166 while (hr != NULL) { 1167 _cm->scanRootRegion(hr, worker_id); 1168 hr = root_regions->claim_next(); 1169 } 1170 } 1171 }; 1172 1173 void ConcurrentMark::scanRootRegions() { 1174 double scan_start = os::elapsedTime(); 1175 1176 // Start of concurrent marking. 1177 ClassLoaderDataGraph::clear_claimed_marks(); 1178 1179 // scan_in_progress() will have been set to true only if there was 1180 // at least one root region to scan. So, if it's false, we 1181 // should not attempt to do any further work. 1182 if (root_regions()->scan_in_progress()) { 1183 if (G1Log::fine()) { 1184 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1185 gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]"); 1186 } 1187 1188 _parallel_marking_threads = calc_parallel_marking_threads(); 1189 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1190 "Maximum number of marking threads exceeded"); 1191 uint active_workers = MAX2(1U, parallel_marking_threads()); 1192 1193 CMRootRegionScanTask task(this); 1194 _parallel_workers->set_active_workers(active_workers); 1195 _parallel_workers->run_task(&task); 1196 1197 if (G1Log::fine()) { 1198 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1199 gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start); 1200 } 1201 1202 // It's possible that has_aborted() is true here without actually 1203 // aborting the survivor scan earlier. This is OK as it's 1204 // mainly used for sanity checking. 1205 root_regions()->scan_finished(); 1206 } 1207 } 1208 1209 void ConcurrentMark::markFromRoots() { 1210 // we might be tempted to assert that: 1211 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1212 // "inconsistent argument?"); 1213 // However that wouldn't be right, because it's possible that 1214 // a safepoint is indeed in progress as a younger generation 1215 // stop-the-world GC happens even as we mark in this generation. 1216 1217 _restart_for_overflow = false; 1218 force_overflow_conc()->init(); 1219 1220 // _g1h has _n_par_threads 1221 _parallel_marking_threads = calc_parallel_marking_threads(); 1222 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1223 "Maximum number of marking threads exceeded"); 1224 1225 uint active_workers = MAX2(1U, parallel_marking_threads()); 1226 assert(active_workers > 0, "Should have been set"); 1227 1228 // Parallel task terminator is set in "set_concurrency_and_phase()" 1229 set_concurrency_and_phase(active_workers, true /* concurrent */); 1230 1231 CMConcurrentMarkingTask markingTask(this, cmThread()); 1232 _parallel_workers->set_active_workers(active_workers); 1233 _parallel_workers->run_task(&markingTask); 1234 print_stats(); 1235 } 1236 1237 // Helper class to get rid of some boilerplate code. 1238 class G1CMTraceTime : public GCTraceTime { 1239 static bool doit_and_prepend(bool doit) { 1240 if (doit) { 1241 gclog_or_tty->put(' '); 1242 } 1243 return doit; 1244 } 1245 1246 public: 1247 G1CMTraceTime(const char* title, bool doit) 1248 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), 1249 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { 1250 } 1251 }; 1252 1253 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1254 // world is stopped at this checkpoint 1255 assert(SafepointSynchronize::is_at_safepoint(), 1256 "world should be stopped"); 1257 1258 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1259 1260 // If a full collection has happened, we shouldn't do this. 1261 if (has_aborted()) { 1262 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1263 return; 1264 } 1265 1266 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1267 1268 if (VerifyDuringGC) { 1269 HandleMark hm; // handle scope 1270 g1h->prepare_for_verify(); 1271 Universe::verify(VerifyOption_G1UsePrevMarking, 1272 " VerifyDuringGC:(before)"); 1273 } 1274 g1h->check_bitmaps("Remark Start"); 1275 1276 G1CollectorPolicy* g1p = g1h->g1_policy(); 1277 g1p->record_concurrent_mark_remark_start(); 1278 1279 double start = os::elapsedTime(); 1280 1281 checkpointRootsFinalWork(); 1282 1283 double mark_work_end = os::elapsedTime(); 1284 1285 weakRefsWork(clear_all_soft_refs); 1286 1287 if (has_overflown()) { 1288 // Oops. We overflowed. Restart concurrent marking. 1289 _restart_for_overflow = true; 1290 if (G1TraceMarkStackOverflow) { 1291 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1292 } 1293 1294 // Verify the heap w.r.t. the previous marking bitmap. 1295 if (VerifyDuringGC) { 1296 HandleMark hm; // handle scope 1297 g1h->prepare_for_verify(); 1298 Universe::verify(VerifyOption_G1UsePrevMarking, 1299 " VerifyDuringGC:(overflow)"); 1300 } 1301 1302 // Clear the marking state because we will be restarting 1303 // marking due to overflowing the global mark stack. 1304 reset_marking_state(); 1305 } else { 1306 { 1307 G1CMTraceTime trace("GC aggregate-data", G1Log::finer()); 1308 1309 // Aggregate the per-task counting data that we have accumulated 1310 // while marking. 1311 aggregate_count_data(); 1312 } 1313 1314 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1315 // We're done with marking. 1316 // This is the end of the marking cycle, we're expected all 1317 // threads to have SATB queues with active set to true. 1318 satb_mq_set.set_active_all_threads(false, /* new active value */ 1319 true /* expected_active */); 1320 1321 if (VerifyDuringGC) { 1322 HandleMark hm; // handle scope 1323 g1h->prepare_for_verify(); 1324 Universe::verify(VerifyOption_G1UseNextMarking, 1325 " VerifyDuringGC:(after)"); 1326 } 1327 g1h->check_bitmaps("Remark End"); 1328 assert(!restart_for_overflow(), "sanity"); 1329 // Completely reset the marking state since marking completed 1330 set_non_marking_state(); 1331 } 1332 1333 // Expand the marking stack, if we have to and if we can. 1334 if (_markStack.should_expand()) { 1335 _markStack.expand(); 1336 } 1337 1338 // Statistics 1339 double now = os::elapsedTime(); 1340 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1341 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1342 _remark_times.add((now - start) * 1000.0); 1343 1344 g1p->record_concurrent_mark_remark_end(); 1345 1346 G1CMIsAliveClosure is_alive(g1h); 1347 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1348 } 1349 1350 // Base class of the closures that finalize and verify the 1351 // liveness counting data. 1352 class CMCountDataClosureBase: public HeapRegionClosure { 1353 protected: 1354 G1CollectedHeap* _g1h; 1355 ConcurrentMark* _cm; 1356 CardTableModRefBS* _ct_bs; 1357 1358 BitMap* _region_bm; 1359 BitMap* _card_bm; 1360 1361 // Takes a region that's not empty (i.e., it has at least one 1362 // live object in it and sets its corresponding bit on the region 1363 // bitmap to 1. If the region is "starts humongous" it will also set 1364 // to 1 the bits on the region bitmap that correspond to its 1365 // associated "continues humongous" regions. 1366 void set_bit_for_region(HeapRegion* hr) { 1367 assert(!hr->is_continues_humongous(), "should have filtered those out"); 1368 1369 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1370 if (!hr->is_starts_humongous()) { 1371 // Normal (non-humongous) case: just set the bit. 1372 _region_bm->par_at_put(index, true); 1373 } else { 1374 // Starts humongous case: calculate how many regions are part of 1375 // this humongous region and then set the bit range. 1376 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1377 _region_bm->par_at_put_range(index, end_index, true); 1378 } 1379 } 1380 1381 public: 1382 CMCountDataClosureBase(G1CollectedHeap* g1h, 1383 BitMap* region_bm, BitMap* card_bm): 1384 _g1h(g1h), _cm(g1h->concurrent_mark()), 1385 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 1386 _region_bm(region_bm), _card_bm(card_bm) { } 1387 }; 1388 1389 // Closure that calculates the # live objects per region. Used 1390 // for verification purposes during the cleanup pause. 1391 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1392 CMBitMapRO* _bm; 1393 size_t _region_marked_bytes; 1394 1395 public: 1396 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1397 BitMap* region_bm, BitMap* card_bm) : 1398 CMCountDataClosureBase(g1h, region_bm, card_bm), 1399 _bm(bm), _region_marked_bytes(0) { } 1400 1401 bool doHeapRegion(HeapRegion* hr) { 1402 1403 if (hr->is_continues_humongous()) { 1404 // We will ignore these here and process them when their 1405 // associated "starts humongous" region is processed (see 1406 // set_bit_for_heap_region()). Note that we cannot rely on their 1407 // associated "starts humongous" region to have their bit set to 1408 // 1 since, due to the region chunking in the parallel region 1409 // iteration, a "continues humongous" region might be visited 1410 // before its associated "starts humongous". 1411 return false; 1412 } 1413 1414 HeapWord* ntams = hr->next_top_at_mark_start(); 1415 HeapWord* start = hr->bottom(); 1416 1417 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1418 err_msg("Preconditions not met - " 1419 "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT, 1420 p2i(start), p2i(ntams), p2i(hr->end()))); 1421 1422 // Find the first marked object at or after "start". 1423 start = _bm->getNextMarkedWordAddress(start, ntams); 1424 1425 size_t marked_bytes = 0; 1426 1427 while (start < ntams) { 1428 oop obj = oop(start); 1429 int obj_sz = obj->size(); 1430 HeapWord* obj_end = start + obj_sz; 1431 1432 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1433 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1434 1435 // Note: if we're looking at the last region in heap - obj_end 1436 // could be actually just beyond the end of the heap; end_idx 1437 // will then correspond to a (non-existent) card that is also 1438 // just beyond the heap. 1439 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1440 // end of object is not card aligned - increment to cover 1441 // all the cards spanned by the object 1442 end_idx += 1; 1443 } 1444 1445 // Set the bits in the card BM for the cards spanned by this object. 1446 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1447 1448 // Add the size of this object to the number of marked bytes. 1449 marked_bytes += (size_t)obj_sz * HeapWordSize; 1450 1451 // Find the next marked object after this one. 1452 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1453 } 1454 1455 // Mark the allocated-since-marking portion... 1456 HeapWord* top = hr->top(); 1457 if (ntams < top) { 1458 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1459 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1460 1461 // Note: if we're looking at the last region in heap - top 1462 // could be actually just beyond the end of the heap; end_idx 1463 // will then correspond to a (non-existent) card that is also 1464 // just beyond the heap. 1465 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1466 // end of object is not card aligned - increment to cover 1467 // all the cards spanned by the object 1468 end_idx += 1; 1469 } 1470 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1471 1472 // This definitely means the region has live objects. 1473 set_bit_for_region(hr); 1474 } 1475 1476 // Update the live region bitmap. 1477 if (marked_bytes > 0) { 1478 set_bit_for_region(hr); 1479 } 1480 1481 // Set the marked bytes for the current region so that 1482 // it can be queried by a calling verification routine 1483 _region_marked_bytes = marked_bytes; 1484 1485 return false; 1486 } 1487 1488 size_t region_marked_bytes() const { return _region_marked_bytes; } 1489 }; 1490 1491 // Heap region closure used for verifying the counting data 1492 // that was accumulated concurrently and aggregated during 1493 // the remark pause. This closure is applied to the heap 1494 // regions during the STW cleanup pause. 1495 1496 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1497 G1CollectedHeap* _g1h; 1498 ConcurrentMark* _cm; 1499 CalcLiveObjectsClosure _calc_cl; 1500 BitMap* _region_bm; // Region BM to be verified 1501 BitMap* _card_bm; // Card BM to be verified 1502 bool _verbose; // verbose output? 1503 1504 BitMap* _exp_region_bm; // Expected Region BM values 1505 BitMap* _exp_card_bm; // Expected card BM values 1506 1507 int _failures; 1508 1509 public: 1510 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1511 BitMap* region_bm, 1512 BitMap* card_bm, 1513 BitMap* exp_region_bm, 1514 BitMap* exp_card_bm, 1515 bool verbose) : 1516 _g1h(g1h), _cm(g1h->concurrent_mark()), 1517 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1518 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1519 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1520 _failures(0) { } 1521 1522 int failures() const { return _failures; } 1523 1524 bool doHeapRegion(HeapRegion* hr) { 1525 if (hr->is_continues_humongous()) { 1526 // We will ignore these here and process them when their 1527 // associated "starts humongous" region is processed (see 1528 // set_bit_for_heap_region()). Note that we cannot rely on their 1529 // associated "starts humongous" region to have their bit set to 1530 // 1 since, due to the region chunking in the parallel region 1531 // iteration, a "continues humongous" region might be visited 1532 // before its associated "starts humongous". 1533 return false; 1534 } 1535 1536 int failures = 0; 1537 1538 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1539 // this region and set the corresponding bits in the expected region 1540 // and card bitmaps. 1541 bool res = _calc_cl.doHeapRegion(hr); 1542 assert(res == false, "should be continuing"); 1543 1544 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1545 Mutex::_no_safepoint_check_flag); 1546 1547 // Verify the marked bytes for this region. 1548 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1549 size_t act_marked_bytes = hr->next_marked_bytes(); 1550 1551 // We're not OK if expected marked bytes > actual marked bytes. It means 1552 // we have missed accounting some objects during the actual marking. 1553 if (exp_marked_bytes > act_marked_bytes) { 1554 if (_verbose) { 1555 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1556 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1557 hr->hrm_index(), exp_marked_bytes, act_marked_bytes); 1558 } 1559 failures += 1; 1560 } 1561 1562 // Verify the bit, for this region, in the actual and expected 1563 // (which was just calculated) region bit maps. 1564 // We're not OK if the bit in the calculated expected region 1565 // bitmap is set and the bit in the actual region bitmap is not. 1566 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1567 1568 bool expected = _exp_region_bm->at(index); 1569 bool actual = _region_bm->at(index); 1570 if (expected && !actual) { 1571 if (_verbose) { 1572 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1573 "expected: %s, actual: %s", 1574 hr->hrm_index(), 1575 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1576 } 1577 failures += 1; 1578 } 1579 1580 // Verify that the card bit maps for the cards spanned by the current 1581 // region match. We have an error if we have a set bit in the expected 1582 // bit map and the corresponding bit in the actual bitmap is not set. 1583 1584 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1585 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1586 1587 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1588 expected = _exp_card_bm->at(i); 1589 actual = _card_bm->at(i); 1590 1591 if (expected && !actual) { 1592 if (_verbose) { 1593 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1594 "expected: %s, actual: %s", 1595 hr->hrm_index(), i, 1596 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1597 } 1598 failures += 1; 1599 } 1600 } 1601 1602 if (failures > 0 && _verbose) { 1603 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1604 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1605 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1606 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1607 } 1608 1609 _failures += failures; 1610 1611 // We could stop iteration over the heap when we 1612 // find the first violating region by returning true. 1613 return false; 1614 } 1615 }; 1616 1617 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1618 protected: 1619 G1CollectedHeap* _g1h; 1620 ConcurrentMark* _cm; 1621 BitMap* _actual_region_bm; 1622 BitMap* _actual_card_bm; 1623 1624 uint _n_workers; 1625 1626 BitMap* _expected_region_bm; 1627 BitMap* _expected_card_bm; 1628 1629 int _failures; 1630 bool _verbose; 1631 1632 HeapRegionClaimer _hrclaimer; 1633 1634 public: 1635 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1636 BitMap* region_bm, BitMap* card_bm, 1637 BitMap* expected_region_bm, BitMap* expected_card_bm) 1638 : AbstractGangTask("G1 verify final counting"), 1639 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1640 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1641 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1642 _failures(0), _verbose(false), 1643 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1644 assert(VerifyDuringGC, "don't call this otherwise"); 1645 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1646 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1647 1648 _verbose = _cm->verbose_medium(); 1649 } 1650 1651 void work(uint worker_id) { 1652 assert(worker_id < _n_workers, "invariant"); 1653 1654 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1655 _actual_region_bm, _actual_card_bm, 1656 _expected_region_bm, 1657 _expected_card_bm, 1658 _verbose); 1659 1660 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1661 1662 Atomic::add(verify_cl.failures(), &_failures); 1663 } 1664 1665 int failures() const { return _failures; } 1666 }; 1667 1668 // Closure that finalizes the liveness counting data. 1669 // Used during the cleanup pause. 1670 // Sets the bits corresponding to the interval [NTAMS, top] 1671 // (which contains the implicitly live objects) in the 1672 // card liveness bitmap. Also sets the bit for each region, 1673 // containing live data, in the region liveness bitmap. 1674 1675 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1676 public: 1677 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1678 BitMap* region_bm, 1679 BitMap* card_bm) : 1680 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1681 1682 bool doHeapRegion(HeapRegion* hr) { 1683 1684 if (hr->is_continues_humongous()) { 1685 // We will ignore these here and process them when their 1686 // associated "starts humongous" region is processed (see 1687 // set_bit_for_heap_region()). Note that we cannot rely on their 1688 // associated "starts humongous" region to have their bit set to 1689 // 1 since, due to the region chunking in the parallel region 1690 // iteration, a "continues humongous" region might be visited 1691 // before its associated "starts humongous". 1692 return false; 1693 } 1694 1695 HeapWord* ntams = hr->next_top_at_mark_start(); 1696 HeapWord* top = hr->top(); 1697 1698 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1699 1700 // Mark the allocated-since-marking portion... 1701 if (ntams < top) { 1702 // This definitely means the region has live objects. 1703 set_bit_for_region(hr); 1704 1705 // Now set the bits in the card bitmap for [ntams, top) 1706 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1707 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1708 1709 // Note: if we're looking at the last region in heap - top 1710 // could be actually just beyond the end of the heap; end_idx 1711 // will then correspond to a (non-existent) card that is also 1712 // just beyond the heap. 1713 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1714 // end of object is not card aligned - increment to cover 1715 // all the cards spanned by the object 1716 end_idx += 1; 1717 } 1718 1719 assert(end_idx <= _card_bm->size(), 1720 err_msg("oob: end_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT, 1721 end_idx, _card_bm->size())); 1722 assert(start_idx < _card_bm->size(), 1723 err_msg("oob: start_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT, 1724 start_idx, _card_bm->size())); 1725 1726 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1727 } 1728 1729 // Set the bit for the region if it contains live data 1730 if (hr->next_marked_bytes() > 0) { 1731 set_bit_for_region(hr); 1732 } 1733 1734 return false; 1735 } 1736 }; 1737 1738 class G1ParFinalCountTask: public AbstractGangTask { 1739 protected: 1740 G1CollectedHeap* _g1h; 1741 ConcurrentMark* _cm; 1742 BitMap* _actual_region_bm; 1743 BitMap* _actual_card_bm; 1744 1745 uint _n_workers; 1746 HeapRegionClaimer _hrclaimer; 1747 1748 public: 1749 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1750 : AbstractGangTask("G1 final counting"), 1751 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1752 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1753 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1754 } 1755 1756 void work(uint worker_id) { 1757 assert(worker_id < _n_workers, "invariant"); 1758 1759 FinalCountDataUpdateClosure final_update_cl(_g1h, 1760 _actual_region_bm, 1761 _actual_card_bm); 1762 1763 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1764 } 1765 }; 1766 1767 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1768 G1CollectedHeap* _g1; 1769 size_t _freed_bytes; 1770 FreeRegionList* _local_cleanup_list; 1771 HeapRegionSetCount _old_regions_removed; 1772 HeapRegionSetCount _humongous_regions_removed; 1773 HRRSCleanupTask* _hrrs_cleanup_task; 1774 1775 public: 1776 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1777 FreeRegionList* local_cleanup_list, 1778 HRRSCleanupTask* hrrs_cleanup_task) : 1779 _g1(g1), 1780 _freed_bytes(0), 1781 _local_cleanup_list(local_cleanup_list), 1782 _old_regions_removed(), 1783 _humongous_regions_removed(), 1784 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1785 1786 size_t freed_bytes() { return _freed_bytes; } 1787 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1788 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1789 1790 bool doHeapRegion(HeapRegion *hr) { 1791 if (hr->is_continues_humongous() || hr->is_archive()) { 1792 return false; 1793 } 1794 // We use a claim value of zero here because all regions 1795 // were claimed with value 1 in the FinalCount task. 1796 _g1->reset_gc_time_stamps(hr); 1797 hr->note_end_of_marking(); 1798 1799 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1800 _freed_bytes += hr->used(); 1801 hr->set_containing_set(NULL); 1802 if (hr->is_humongous()) { 1803 assert(hr->is_starts_humongous(), "we should only see starts humongous"); 1804 _humongous_regions_removed.increment(1u, hr->capacity()); 1805 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1806 } else { 1807 _old_regions_removed.increment(1u, hr->capacity()); 1808 _g1->free_region(hr, _local_cleanup_list, true); 1809 } 1810 } else { 1811 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1812 } 1813 1814 return false; 1815 } 1816 }; 1817 1818 class G1ParNoteEndTask: public AbstractGangTask { 1819 friend class G1NoteEndOfConcMarkClosure; 1820 1821 protected: 1822 G1CollectedHeap* _g1h; 1823 FreeRegionList* _cleanup_list; 1824 HeapRegionClaimer _hrclaimer; 1825 1826 public: 1827 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1828 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1829 } 1830 1831 void work(uint worker_id) { 1832 FreeRegionList local_cleanup_list("Local Cleanup List"); 1833 HRRSCleanupTask hrrs_cleanup_task; 1834 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1835 &hrrs_cleanup_task); 1836 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1837 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1838 1839 // Now update the lists 1840 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1841 { 1842 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1843 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1844 1845 // If we iterate over the global cleanup list at the end of 1846 // cleanup to do this printing we will not guarantee to only 1847 // generate output for the newly-reclaimed regions (the list 1848 // might not be empty at the beginning of cleanup; we might 1849 // still be working on its previous contents). So we do the 1850 // printing here, before we append the new regions to the global 1851 // cleanup list. 1852 1853 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1854 if (hr_printer->is_active()) { 1855 FreeRegionListIterator iter(&local_cleanup_list); 1856 while (iter.more_available()) { 1857 HeapRegion* hr = iter.get_next(); 1858 hr_printer->cleanup(hr); 1859 } 1860 } 1861 1862 _cleanup_list->add_ordered(&local_cleanup_list); 1863 assert(local_cleanup_list.is_empty(), "post-condition"); 1864 1865 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1866 } 1867 } 1868 }; 1869 1870 class G1ParScrubRemSetTask: public AbstractGangTask { 1871 protected: 1872 G1RemSet* _g1rs; 1873 BitMap* _region_bm; 1874 BitMap* _card_bm; 1875 HeapRegionClaimer _hrclaimer; 1876 1877 public: 1878 G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) : 1879 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) { 1880 } 1881 1882 void work(uint worker_id) { 1883 _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer); 1884 } 1885 1886 }; 1887 1888 void ConcurrentMark::cleanup() { 1889 // world is stopped at this checkpoint 1890 assert(SafepointSynchronize::is_at_safepoint(), 1891 "world should be stopped"); 1892 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1893 1894 // If a full collection has happened, we shouldn't do this. 1895 if (has_aborted()) { 1896 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1897 return; 1898 } 1899 1900 g1h->verify_region_sets_optional(); 1901 1902 if (VerifyDuringGC) { 1903 HandleMark hm; // handle scope 1904 g1h->prepare_for_verify(); 1905 Universe::verify(VerifyOption_G1UsePrevMarking, 1906 " VerifyDuringGC:(before)"); 1907 } 1908 g1h->check_bitmaps("Cleanup Start"); 1909 1910 G1CollectorPolicy* g1p = g1h->g1_policy(); 1911 g1p->record_concurrent_mark_cleanup_start(); 1912 1913 double start = os::elapsedTime(); 1914 1915 HeapRegionRemSet::reset_for_cleanup_tasks(); 1916 1917 // Do counting once more with the world stopped for good measure. 1918 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 1919 1920 g1h->workers()->run_task(&g1_par_count_task); 1921 1922 if (VerifyDuringGC) { 1923 // Verify that the counting data accumulated during marking matches 1924 // that calculated by walking the marking bitmap. 1925 1926 // Bitmaps to hold expected values 1927 BitMap expected_region_bm(_region_bm.size(), true); 1928 BitMap expected_card_bm(_card_bm.size(), true); 1929 1930 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 1931 &_region_bm, 1932 &_card_bm, 1933 &expected_region_bm, 1934 &expected_card_bm); 1935 1936 g1h->workers()->run_task(&g1_par_verify_task); 1937 1938 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 1939 } 1940 1941 size_t start_used_bytes = g1h->used(); 1942 g1h->collector_state()->set_mark_in_progress(false); 1943 1944 double count_end = os::elapsedTime(); 1945 double this_final_counting_time = (count_end - start); 1946 _total_counting_time += this_final_counting_time; 1947 1948 if (G1PrintRegionLivenessInfo) { 1949 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 1950 _g1h->heap_region_iterate(&cl); 1951 } 1952 1953 // Install newly created mark bitMap as "prev". 1954 swapMarkBitMaps(); 1955 1956 g1h->reset_gc_time_stamp(); 1957 1958 uint n_workers = _g1h->workers()->active_workers(); 1959 1960 // Note end of marking in all heap regions. 1961 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1962 g1h->workers()->run_task(&g1_par_note_end_task); 1963 g1h->check_gc_time_stamps(); 1964 1965 if (!cleanup_list_is_empty()) { 1966 // The cleanup list is not empty, so we'll have to process it 1967 // concurrently. Notify anyone else that might be wanting free 1968 // regions that there will be more free regions coming soon. 1969 g1h->set_free_regions_coming(); 1970 } 1971 1972 // call below, since it affects the metric by which we sort the heap 1973 // regions. 1974 if (G1ScrubRemSets) { 1975 double rs_scrub_start = os::elapsedTime(); 1976 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers); 1977 g1h->workers()->run_task(&g1_par_scrub_rs_task); 1978 1979 double rs_scrub_end = os::elapsedTime(); 1980 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 1981 _total_rs_scrub_time += this_rs_scrub_time; 1982 } 1983 1984 // this will also free any regions totally full of garbage objects, 1985 // and sort the regions. 1986 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1987 1988 // Statistics. 1989 double end = os::elapsedTime(); 1990 _cleanup_times.add((end - start) * 1000.0); 1991 1992 if (G1Log::fine()) { 1993 g1h->g1_policy()->print_heap_transition(start_used_bytes); 1994 } 1995 1996 // Clean up will have freed any regions completely full of garbage. 1997 // Update the soft reference policy with the new heap occupancy. 1998 Universe::update_heap_info_at_gc(); 1999 2000 if (VerifyDuringGC) { 2001 HandleMark hm; // handle scope 2002 g1h->prepare_for_verify(); 2003 Universe::verify(VerifyOption_G1UsePrevMarking, 2004 " VerifyDuringGC:(after)"); 2005 } 2006 2007 g1h->check_bitmaps("Cleanup End"); 2008 2009 g1h->verify_region_sets_optional(); 2010 2011 // We need to make this be a "collection" so any collection pause that 2012 // races with it goes around and waits for completeCleanup to finish. 2013 g1h->increment_total_collections(); 2014 2015 // Clean out dead classes and update Metaspace sizes. 2016 if (ClassUnloadingWithConcurrentMark) { 2017 ClassLoaderDataGraph::purge(); 2018 } 2019 MetaspaceGC::compute_new_size(); 2020 2021 // We reclaimed old regions so we should calculate the sizes to make 2022 // sure we update the old gen/space data. 2023 g1h->g1mm()->update_sizes(); 2024 g1h->allocation_context_stats().update_after_mark(); 2025 2026 g1h->trace_heap_after_concurrent_cycle(); 2027 } 2028 2029 void ConcurrentMark::completeCleanup() { 2030 if (has_aborted()) return; 2031 2032 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2033 2034 _cleanup_list.verify_optional(); 2035 FreeRegionList tmp_free_list("Tmp Free List"); 2036 2037 if (G1ConcRegionFreeingVerbose) { 2038 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2039 "cleanup list has %u entries", 2040 _cleanup_list.length()); 2041 } 2042 2043 // No one else should be accessing the _cleanup_list at this point, 2044 // so it is not necessary to take any locks 2045 while (!_cleanup_list.is_empty()) { 2046 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 2047 assert(hr != NULL, "Got NULL from a non-empty list"); 2048 hr->par_clear(); 2049 tmp_free_list.add_ordered(hr); 2050 2051 // Instead of adding one region at a time to the secondary_free_list, 2052 // we accumulate them in the local list and move them a few at a 2053 // time. This also cuts down on the number of notify_all() calls 2054 // we do during this process. We'll also append the local list when 2055 // _cleanup_list is empty (which means we just removed the last 2056 // region from the _cleanup_list). 2057 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2058 _cleanup_list.is_empty()) { 2059 if (G1ConcRegionFreeingVerbose) { 2060 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2061 "appending %u entries to the secondary_free_list, " 2062 "cleanup list still has %u entries", 2063 tmp_free_list.length(), 2064 _cleanup_list.length()); 2065 } 2066 2067 { 2068 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2069 g1h->secondary_free_list_add(&tmp_free_list); 2070 SecondaryFreeList_lock->notify_all(); 2071 } 2072 #ifndef PRODUCT 2073 if (G1StressConcRegionFreeing) { 2074 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2075 os::sleep(Thread::current(), (jlong) 1, false); 2076 } 2077 } 2078 #endif 2079 } 2080 } 2081 assert(tmp_free_list.is_empty(), "post-condition"); 2082 } 2083 2084 // Supporting Object and Oop closures for reference discovery 2085 // and processing in during marking 2086 2087 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2088 HeapWord* addr = (HeapWord*)obj; 2089 return addr != NULL && 2090 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2091 } 2092 2093 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2094 // Uses the CMTask associated with a worker thread (for serial reference 2095 // processing the CMTask for worker 0 is used) to preserve (mark) and 2096 // trace referent objects. 2097 // 2098 // Using the CMTask and embedded local queues avoids having the worker 2099 // threads operating on the global mark stack. This reduces the risk 2100 // of overflowing the stack - which we would rather avoid at this late 2101 // state. Also using the tasks' local queues removes the potential 2102 // of the workers interfering with each other that could occur if 2103 // operating on the global stack. 2104 2105 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2106 ConcurrentMark* _cm; 2107 CMTask* _task; 2108 int _ref_counter_limit; 2109 int _ref_counter; 2110 bool _is_serial; 2111 public: 2112 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2113 _cm(cm), _task(task), _is_serial(is_serial), 2114 _ref_counter_limit(G1RefProcDrainInterval) { 2115 assert(_ref_counter_limit > 0, "sanity"); 2116 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2117 _ref_counter = _ref_counter_limit; 2118 } 2119 2120 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2121 virtual void do_oop( oop* p) { do_oop_work(p); } 2122 2123 template <class T> void do_oop_work(T* p) { 2124 if (!_cm->has_overflown()) { 2125 oop obj = oopDesc::load_decode_heap_oop(p); 2126 if (_cm->verbose_high()) { 2127 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2128 "*" PTR_FORMAT " = " PTR_FORMAT, 2129 _task->worker_id(), p2i(p), p2i((void*) obj)); 2130 } 2131 2132 _task->deal_with_reference(obj); 2133 _ref_counter--; 2134 2135 if (_ref_counter == 0) { 2136 // We have dealt with _ref_counter_limit references, pushing them 2137 // and objects reachable from them on to the local stack (and 2138 // possibly the global stack). Call CMTask::do_marking_step() to 2139 // process these entries. 2140 // 2141 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2142 // there's nothing more to do (i.e. we're done with the entries that 2143 // were pushed as a result of the CMTask::deal_with_reference() calls 2144 // above) or we overflow. 2145 // 2146 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2147 // flag while there may still be some work to do. (See the comment at 2148 // the beginning of CMTask::do_marking_step() for those conditions - 2149 // one of which is reaching the specified time target.) It is only 2150 // when CMTask::do_marking_step() returns without setting the 2151 // has_aborted() flag that the marking step has completed. 2152 do { 2153 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2154 _task->do_marking_step(mark_step_duration_ms, 2155 false /* do_termination */, 2156 _is_serial); 2157 } while (_task->has_aborted() && !_cm->has_overflown()); 2158 _ref_counter = _ref_counter_limit; 2159 } 2160 } else { 2161 if (_cm->verbose_high()) { 2162 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2163 } 2164 } 2165 } 2166 }; 2167 2168 // 'Drain' oop closure used by both serial and parallel reference processing. 2169 // Uses the CMTask associated with a given worker thread (for serial 2170 // reference processing the CMtask for worker 0 is used). Calls the 2171 // do_marking_step routine, with an unbelievably large timeout value, 2172 // to drain the marking data structures of the remaining entries 2173 // added by the 'keep alive' oop closure above. 2174 2175 class G1CMDrainMarkingStackClosure: public VoidClosure { 2176 ConcurrentMark* _cm; 2177 CMTask* _task; 2178 bool _is_serial; 2179 public: 2180 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2181 _cm(cm), _task(task), _is_serial(is_serial) { 2182 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2183 } 2184 2185 void do_void() { 2186 do { 2187 if (_cm->verbose_high()) { 2188 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2189 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2190 } 2191 2192 // We call CMTask::do_marking_step() to completely drain the local 2193 // and global marking stacks of entries pushed by the 'keep alive' 2194 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2195 // 2196 // CMTask::do_marking_step() is called in a loop, which we'll exit 2197 // if there's nothing more to do (i.e. we've completely drained the 2198 // entries that were pushed as a a result of applying the 'keep alive' 2199 // closure to the entries on the discovered ref lists) or we overflow 2200 // the global marking stack. 2201 // 2202 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2203 // flag while there may still be some work to do. (See the comment at 2204 // the beginning of CMTask::do_marking_step() for those conditions - 2205 // one of which is reaching the specified time target.) It is only 2206 // when CMTask::do_marking_step() returns without setting the 2207 // has_aborted() flag that the marking step has completed. 2208 2209 _task->do_marking_step(1000000000.0 /* something very large */, 2210 true /* do_termination */, 2211 _is_serial); 2212 } while (_task->has_aborted() && !_cm->has_overflown()); 2213 } 2214 }; 2215 2216 // Implementation of AbstractRefProcTaskExecutor for parallel 2217 // reference processing at the end of G1 concurrent marking 2218 2219 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2220 private: 2221 G1CollectedHeap* _g1h; 2222 ConcurrentMark* _cm; 2223 WorkGang* _workers; 2224 uint _active_workers; 2225 2226 public: 2227 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2228 ConcurrentMark* cm, 2229 WorkGang* workers, 2230 uint n_workers) : 2231 _g1h(g1h), _cm(cm), 2232 _workers(workers), _active_workers(n_workers) { } 2233 2234 // Executes the given task using concurrent marking worker threads. 2235 virtual void execute(ProcessTask& task); 2236 virtual void execute(EnqueueTask& task); 2237 }; 2238 2239 class G1CMRefProcTaskProxy: public AbstractGangTask { 2240 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2241 ProcessTask& _proc_task; 2242 G1CollectedHeap* _g1h; 2243 ConcurrentMark* _cm; 2244 2245 public: 2246 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2247 G1CollectedHeap* g1h, 2248 ConcurrentMark* cm) : 2249 AbstractGangTask("Process reference objects in parallel"), 2250 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2251 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2252 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2253 } 2254 2255 virtual void work(uint worker_id) { 2256 ResourceMark rm; 2257 HandleMark hm; 2258 CMTask* task = _cm->task(worker_id); 2259 G1CMIsAliveClosure g1_is_alive(_g1h); 2260 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2261 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2262 2263 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2264 } 2265 }; 2266 2267 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2268 assert(_workers != NULL, "Need parallel worker threads."); 2269 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2270 2271 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2272 2273 // We need to reset the concurrency level before each 2274 // proxy task execution, so that the termination protocol 2275 // and overflow handling in CMTask::do_marking_step() knows 2276 // how many workers to wait for. 2277 _cm->set_concurrency(_active_workers); 2278 _workers->run_task(&proc_task_proxy); 2279 } 2280 2281 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2282 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2283 EnqueueTask& _enq_task; 2284 2285 public: 2286 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2287 AbstractGangTask("Enqueue reference objects in parallel"), 2288 _enq_task(enq_task) { } 2289 2290 virtual void work(uint worker_id) { 2291 _enq_task.work(worker_id); 2292 } 2293 }; 2294 2295 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2296 assert(_workers != NULL, "Need parallel worker threads."); 2297 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2298 2299 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2300 2301 // Not strictly necessary but... 2302 // 2303 // We need to reset the concurrency level before each 2304 // proxy task execution, so that the termination protocol 2305 // and overflow handling in CMTask::do_marking_step() knows 2306 // how many workers to wait for. 2307 _cm->set_concurrency(_active_workers); 2308 _workers->run_task(&enq_task_proxy); 2309 } 2310 2311 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2312 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2313 } 2314 2315 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2316 if (has_overflown()) { 2317 // Skip processing the discovered references if we have 2318 // overflown the global marking stack. Reference objects 2319 // only get discovered once so it is OK to not 2320 // de-populate the discovered reference lists. We could have, 2321 // but the only benefit would be that, when marking restarts, 2322 // less reference objects are discovered. 2323 return; 2324 } 2325 2326 ResourceMark rm; 2327 HandleMark hm; 2328 2329 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2330 2331 // Is alive closure. 2332 G1CMIsAliveClosure g1_is_alive(g1h); 2333 2334 // Inner scope to exclude the cleaning of the string and symbol 2335 // tables from the displayed time. 2336 { 2337 G1CMTraceTime t("GC ref-proc", G1Log::finer()); 2338 2339 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2340 2341 // See the comment in G1CollectedHeap::ref_processing_init() 2342 // about how reference processing currently works in G1. 2343 2344 // Set the soft reference policy 2345 rp->setup_policy(clear_all_soft_refs); 2346 assert(_markStack.isEmpty(), "mark stack should be empty"); 2347 2348 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2349 // in serial reference processing. Note these closures are also 2350 // used for serially processing (by the the current thread) the 2351 // JNI references during parallel reference processing. 2352 // 2353 // These closures do not need to synchronize with the worker 2354 // threads involved in parallel reference processing as these 2355 // instances are executed serially by the current thread (e.g. 2356 // reference processing is not multi-threaded and is thus 2357 // performed by the current thread instead of a gang worker). 2358 // 2359 // The gang tasks involved in parallel reference processing create 2360 // their own instances of these closures, which do their own 2361 // synchronization among themselves. 2362 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2363 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2364 2365 // We need at least one active thread. If reference processing 2366 // is not multi-threaded we use the current (VMThread) thread, 2367 // otherwise we use the work gang from the G1CollectedHeap and 2368 // we utilize all the worker threads we can. 2369 bool processing_is_mt = rp->processing_is_mt(); 2370 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2371 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2372 2373 // Parallel processing task executor. 2374 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2375 g1h->workers(), active_workers); 2376 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2377 2378 // Set the concurrency level. The phase was already set prior to 2379 // executing the remark task. 2380 set_concurrency(active_workers); 2381 2382 // Set the degree of MT processing here. If the discovery was done MT, 2383 // the number of threads involved during discovery could differ from 2384 // the number of active workers. This is OK as long as the discovered 2385 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2386 rp->set_active_mt_degree(active_workers); 2387 2388 // Process the weak references. 2389 const ReferenceProcessorStats& stats = 2390 rp->process_discovered_references(&g1_is_alive, 2391 &g1_keep_alive, 2392 &g1_drain_mark_stack, 2393 executor, 2394 g1h->gc_timer_cm(), 2395 concurrent_gc_id()); 2396 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2397 2398 // The do_oop work routines of the keep_alive and drain_marking_stack 2399 // oop closures will set the has_overflown flag if we overflow the 2400 // global marking stack. 2401 2402 assert(_markStack.overflow() || _markStack.isEmpty(), 2403 "mark stack should be empty (unless it overflowed)"); 2404 2405 if (_markStack.overflow()) { 2406 // This should have been done already when we tried to push an 2407 // entry on to the global mark stack. But let's do it again. 2408 set_has_overflown(); 2409 } 2410 2411 assert(rp->num_q() == active_workers, "why not"); 2412 2413 rp->enqueue_discovered_references(executor); 2414 2415 rp->verify_no_references_recorded(); 2416 assert(!rp->discovery_enabled(), "Post condition"); 2417 } 2418 2419 if (has_overflown()) { 2420 // We can not trust g1_is_alive if the marking stack overflowed 2421 return; 2422 } 2423 2424 assert(_markStack.isEmpty(), "Marking should have completed"); 2425 2426 // Unload Klasses, String, Symbols, Code Cache, etc. 2427 { 2428 G1CMTraceTime trace("Unloading", G1Log::finer()); 2429 2430 if (ClassUnloadingWithConcurrentMark) { 2431 bool purged_classes; 2432 2433 { 2434 G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest()); 2435 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2436 } 2437 2438 { 2439 G1CMTraceTime trace("Parallel Unloading", G1Log::finest()); 2440 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2441 } 2442 } 2443 2444 if (G1StringDedup::is_enabled()) { 2445 G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest()); 2446 G1StringDedup::unlink(&g1_is_alive); 2447 } 2448 } 2449 } 2450 2451 void ConcurrentMark::swapMarkBitMaps() { 2452 CMBitMapRO* temp = _prevMarkBitMap; 2453 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2454 _nextMarkBitMap = (CMBitMap*) temp; 2455 } 2456 2457 // Closure for marking entries in SATB buffers. 2458 class CMSATBBufferClosure : public SATBBufferClosure { 2459 private: 2460 CMTask* _task; 2461 G1CollectedHeap* _g1h; 2462 2463 // This is very similar to CMTask::deal_with_reference, but with 2464 // more relaxed requirements for the argument, so this must be more 2465 // circumspect about treating the argument as an object. 2466 void do_entry(void* entry) const { 2467 _task->increment_refs_reached(); 2468 HeapRegion* hr = _g1h->heap_region_containing_raw(entry); 2469 if (entry < hr->next_top_at_mark_start()) { 2470 // Until we get here, we don't know whether entry refers to a valid 2471 // object; it could instead have been a stale reference. 2472 oop obj = static_cast<oop>(entry); 2473 assert(obj->is_oop(true /* ignore mark word */), 2474 err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj))); 2475 _task->make_reference_grey(obj, hr); 2476 } 2477 } 2478 2479 public: 2480 CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h) 2481 : _task(task), _g1h(g1h) { } 2482 2483 virtual void do_buffer(void** buffer, size_t size) { 2484 for (size_t i = 0; i < size; ++i) { 2485 do_entry(buffer[i]); 2486 } 2487 } 2488 }; 2489 2490 class G1RemarkThreadsClosure : public ThreadClosure { 2491 CMSATBBufferClosure _cm_satb_cl; 2492 G1CMOopClosure _cm_cl; 2493 MarkingCodeBlobClosure _code_cl; 2494 int _thread_parity; 2495 2496 public: 2497 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) : 2498 _cm_satb_cl(task, g1h), 2499 _cm_cl(g1h, g1h->concurrent_mark(), task), 2500 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2501 _thread_parity(Threads::thread_claim_parity()) {} 2502 2503 void do_thread(Thread* thread) { 2504 if (thread->is_Java_thread()) { 2505 if (thread->claim_oops_do(true, _thread_parity)) { 2506 JavaThread* jt = (JavaThread*)thread; 2507 2508 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2509 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2510 // * Alive if on the stack of an executing method 2511 // * Weakly reachable otherwise 2512 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2513 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2514 jt->nmethods_do(&_code_cl); 2515 2516 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 2517 } 2518 } else if (thread->is_VM_thread()) { 2519 if (thread->claim_oops_do(true, _thread_parity)) { 2520 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 2521 } 2522 } 2523 } 2524 }; 2525 2526 class CMRemarkTask: public AbstractGangTask { 2527 private: 2528 ConcurrentMark* _cm; 2529 public: 2530 void work(uint worker_id) { 2531 // Since all available tasks are actually started, we should 2532 // only proceed if we're supposed to be active. 2533 if (worker_id < _cm->active_tasks()) { 2534 CMTask* task = _cm->task(worker_id); 2535 task->record_start_time(); 2536 { 2537 ResourceMark rm; 2538 HandleMark hm; 2539 2540 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2541 Threads::threads_do(&threads_f); 2542 } 2543 2544 do { 2545 task->do_marking_step(1000000000.0 /* something very large */, 2546 true /* do_termination */, 2547 false /* is_serial */); 2548 } while (task->has_aborted() && !_cm->has_overflown()); 2549 // If we overflow, then we do not want to restart. We instead 2550 // want to abort remark and do concurrent marking again. 2551 task->record_end_time(); 2552 } 2553 } 2554 2555 CMRemarkTask(ConcurrentMark* cm, uint active_workers) : 2556 AbstractGangTask("Par Remark"), _cm(cm) { 2557 _cm->terminator()->reset_for_reuse(active_workers); 2558 } 2559 }; 2560 2561 void ConcurrentMark::checkpointRootsFinalWork() { 2562 ResourceMark rm; 2563 HandleMark hm; 2564 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2565 2566 G1CMTraceTime trace("Finalize Marking", G1Log::finer()); 2567 2568 g1h->ensure_parsability(false); 2569 2570 // this is remark, so we'll use up all active threads 2571 uint active_workers = g1h->workers()->active_workers(); 2572 set_concurrency_and_phase(active_workers, false /* concurrent */); 2573 // Leave _parallel_marking_threads at it's 2574 // value originally calculated in the ConcurrentMark 2575 // constructor and pass values of the active workers 2576 // through the gang in the task. 2577 2578 { 2579 StrongRootsScope srs(active_workers); 2580 2581 CMRemarkTask remarkTask(this, active_workers); 2582 // We will start all available threads, even if we decide that the 2583 // active_workers will be fewer. The extra ones will just bail out 2584 // immediately. 2585 g1h->workers()->run_task(&remarkTask); 2586 } 2587 2588 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2589 guarantee(has_overflown() || 2590 satb_mq_set.completed_buffers_num() == 0, 2591 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2592 BOOL_TO_STR(has_overflown()), 2593 satb_mq_set.completed_buffers_num())); 2594 2595 print_stats(); 2596 } 2597 2598 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2599 // Note we are overriding the read-only view of the prev map here, via 2600 // the cast. 2601 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2602 } 2603 2604 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2605 _nextMarkBitMap->clearRange(mr); 2606 } 2607 2608 HeapRegion* 2609 ConcurrentMark::claim_region(uint worker_id) { 2610 // "checkpoint" the finger 2611 HeapWord* finger = _finger; 2612 2613 // _heap_end will not change underneath our feet; it only changes at 2614 // yield points. 2615 while (finger < _heap_end) { 2616 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2617 2618 // Note on how this code handles humongous regions. In the 2619 // normal case the finger will reach the start of a "starts 2620 // humongous" (SH) region. Its end will either be the end of the 2621 // last "continues humongous" (CH) region in the sequence, or the 2622 // standard end of the SH region (if the SH is the only region in 2623 // the sequence). That way claim_region() will skip over the CH 2624 // regions. However, there is a subtle race between a CM thread 2625 // executing this method and a mutator thread doing a humongous 2626 // object allocation. The two are not mutually exclusive as the CM 2627 // thread does not need to hold the Heap_lock when it gets 2628 // here. So there is a chance that claim_region() will come across 2629 // a free region that's in the progress of becoming a SH or a CH 2630 // region. In the former case, it will either 2631 // a) Miss the update to the region's end, in which case it will 2632 // visit every subsequent CH region, will find their bitmaps 2633 // empty, and do nothing, or 2634 // b) Will observe the update of the region's end (in which case 2635 // it will skip the subsequent CH regions). 2636 // If it comes across a region that suddenly becomes CH, the 2637 // scenario will be similar to b). So, the race between 2638 // claim_region() and a humongous object allocation might force us 2639 // to do a bit of unnecessary work (due to some unnecessary bitmap 2640 // iterations) but it should not introduce and correctness issues. 2641 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2642 2643 // Above heap_region_containing_raw may return NULL as we always scan claim 2644 // until the end of the heap. In this case, just jump to the next region. 2645 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2646 2647 // Is the gap between reading the finger and doing the CAS too long? 2648 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2649 if (res == finger && curr_region != NULL) { 2650 // we succeeded 2651 HeapWord* bottom = curr_region->bottom(); 2652 HeapWord* limit = curr_region->next_top_at_mark_start(); 2653 2654 if (verbose_low()) { 2655 gclog_or_tty->print_cr("[%u] curr_region = " PTR_FORMAT " " 2656 "[" PTR_FORMAT ", " PTR_FORMAT "), " 2657 "limit = " PTR_FORMAT, 2658 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2659 } 2660 2661 // notice that _finger == end cannot be guaranteed here since, 2662 // someone else might have moved the finger even further 2663 assert(_finger >= end, "the finger should have moved forward"); 2664 2665 if (verbose_low()) { 2666 gclog_or_tty->print_cr("[%u] we were successful with region = " 2667 PTR_FORMAT, worker_id, p2i(curr_region)); 2668 } 2669 2670 if (limit > bottom) { 2671 if (verbose_low()) { 2672 gclog_or_tty->print_cr("[%u] region " PTR_FORMAT " is not empty, " 2673 "returning it ", worker_id, p2i(curr_region)); 2674 } 2675 return curr_region; 2676 } else { 2677 assert(limit == bottom, 2678 "the region limit should be at bottom"); 2679 if (verbose_low()) { 2680 gclog_or_tty->print_cr("[%u] region " PTR_FORMAT " is empty, " 2681 "returning NULL", worker_id, p2i(curr_region)); 2682 } 2683 // we return NULL and the caller should try calling 2684 // claim_region() again. 2685 return NULL; 2686 } 2687 } else { 2688 assert(_finger > finger, "the finger should have moved forward"); 2689 if (verbose_low()) { 2690 if (curr_region == NULL) { 2691 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " 2692 "global finger = " PTR_FORMAT ", " 2693 "our finger = " PTR_FORMAT, 2694 worker_id, p2i(_finger), p2i(finger)); 2695 } else { 2696 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2697 "global finger = " PTR_FORMAT ", " 2698 "our finger = " PTR_FORMAT, 2699 worker_id, p2i(_finger), p2i(finger)); 2700 } 2701 } 2702 2703 // read it again 2704 finger = _finger; 2705 } 2706 } 2707 2708 return NULL; 2709 } 2710 2711 #ifndef PRODUCT 2712 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 2713 private: 2714 G1CollectedHeap* _g1h; 2715 const char* _phase; 2716 int _info; 2717 2718 public: 2719 VerifyNoCSetOops(const char* phase, int info = -1) : 2720 _g1h(G1CollectedHeap::heap()), 2721 _phase(phase), 2722 _info(info) 2723 { } 2724 2725 void operator()(oop obj) const { 2726 guarantee(obj->is_oop(), 2727 err_msg("Non-oop " PTR_FORMAT ", phase: %s, info: %d", 2728 p2i(obj), _phase, _info)); 2729 guarantee(!_g1h->obj_in_cs(obj), 2730 err_msg("obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 2731 p2i(obj), _phase, _info)); 2732 } 2733 }; 2734 2735 void ConcurrentMark::verify_no_cset_oops() { 2736 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2737 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 2738 return; 2739 } 2740 2741 // Verify entries on the global mark stack 2742 _markStack.iterate(VerifyNoCSetOops("Stack")); 2743 2744 // Verify entries on the task queues 2745 for (uint i = 0; i < _max_worker_id; ++i) { 2746 CMTaskQueue* queue = _task_queues->queue(i); 2747 queue->iterate(VerifyNoCSetOops("Queue", i)); 2748 } 2749 2750 // Verify the global finger 2751 HeapWord* global_finger = finger(); 2752 if (global_finger != NULL && global_finger < _heap_end) { 2753 // The global finger always points to a heap region boundary. We 2754 // use heap_region_containing_raw() to get the containing region 2755 // given that the global finger could be pointing to a free region 2756 // which subsequently becomes continues humongous. If that 2757 // happens, heap_region_containing() will return the bottom of the 2758 // corresponding starts humongous region and the check below will 2759 // not hold any more. 2760 // Since we always iterate over all regions, we might get a NULL HeapRegion 2761 // here. 2762 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 2763 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2764 err_msg("global finger: " PTR_FORMAT " region: " HR_FORMAT, 2765 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 2766 } 2767 2768 // Verify the task fingers 2769 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2770 for (uint i = 0; i < parallel_marking_threads(); ++i) { 2771 CMTask* task = _tasks[i]; 2772 HeapWord* task_finger = task->finger(); 2773 if (task_finger != NULL && task_finger < _heap_end) { 2774 // See above note on the global finger verification. 2775 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 2776 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2777 !task_hr->in_collection_set(), 2778 err_msg("task finger: " PTR_FORMAT " region: " HR_FORMAT, 2779 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 2780 } 2781 } 2782 } 2783 #endif // PRODUCT 2784 2785 // Aggregate the counting data that was constructed concurrently 2786 // with marking. 2787 class AggregateCountDataHRClosure: public HeapRegionClosure { 2788 G1CollectedHeap* _g1h; 2789 ConcurrentMark* _cm; 2790 CardTableModRefBS* _ct_bs; 2791 BitMap* _cm_card_bm; 2792 uint _max_worker_id; 2793 2794 public: 2795 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 2796 BitMap* cm_card_bm, 2797 uint max_worker_id) : 2798 _g1h(g1h), _cm(g1h->concurrent_mark()), 2799 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 2800 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 2801 2802 bool doHeapRegion(HeapRegion* hr) { 2803 if (hr->is_continues_humongous()) { 2804 // We will ignore these here and process them when their 2805 // associated "starts humongous" region is processed. 2806 // Note that we cannot rely on their associated 2807 // "starts humongous" region to have their bit set to 1 2808 // since, due to the region chunking in the parallel region 2809 // iteration, a "continues humongous" region might be visited 2810 // before its associated "starts humongous". 2811 return false; 2812 } 2813 2814 HeapWord* start = hr->bottom(); 2815 HeapWord* limit = hr->next_top_at_mark_start(); 2816 HeapWord* end = hr->end(); 2817 2818 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 2819 err_msg("Preconditions not met - " 2820 "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", " 2821 "top: " PTR_FORMAT ", end: " PTR_FORMAT, 2822 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 2823 2824 assert(hr->next_marked_bytes() == 0, "Precondition"); 2825 2826 if (start == limit) { 2827 // NTAMS of this region has not been set so nothing to do. 2828 return false; 2829 } 2830 2831 // 'start' should be in the heap. 2832 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 2833 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 2834 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 2835 2836 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 2837 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 2838 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 2839 2840 // If ntams is not card aligned then we bump card bitmap index 2841 // for limit so that we get the all the cards spanned by 2842 // the object ending at ntams. 2843 // Note: if this is the last region in the heap then ntams 2844 // could be actually just beyond the end of the the heap; 2845 // limit_idx will then correspond to a (non-existent) card 2846 // that is also outside the heap. 2847 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 2848 limit_idx += 1; 2849 } 2850 2851 assert(limit_idx <= end_idx, "or else use atomics"); 2852 2853 // Aggregate the "stripe" in the count data associated with hr. 2854 uint hrm_index = hr->hrm_index(); 2855 size_t marked_bytes = 0; 2856 2857 for (uint i = 0; i < _max_worker_id; i += 1) { 2858 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 2859 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 2860 2861 // Fetch the marked_bytes in this region for task i and 2862 // add it to the running total for this region. 2863 marked_bytes += marked_bytes_array[hrm_index]; 2864 2865 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 2866 // into the global card bitmap. 2867 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 2868 2869 while (scan_idx < limit_idx) { 2870 assert(task_card_bm->at(scan_idx) == true, "should be"); 2871 _cm_card_bm->set_bit(scan_idx); 2872 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 2873 2874 // BitMap::get_next_one_offset() can handle the case when 2875 // its left_offset parameter is greater than its right_offset 2876 // parameter. It does, however, have an early exit if 2877 // left_offset == right_offset. So let's limit the value 2878 // passed in for left offset here. 2879 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 2880 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 2881 } 2882 } 2883 2884 // Update the marked bytes for this region. 2885 hr->add_to_marked_bytes(marked_bytes); 2886 2887 // Next heap region 2888 return false; 2889 } 2890 }; 2891 2892 class G1AggregateCountDataTask: public AbstractGangTask { 2893 protected: 2894 G1CollectedHeap* _g1h; 2895 ConcurrentMark* _cm; 2896 BitMap* _cm_card_bm; 2897 uint _max_worker_id; 2898 uint _active_workers; 2899 HeapRegionClaimer _hrclaimer; 2900 2901 public: 2902 G1AggregateCountDataTask(G1CollectedHeap* g1h, 2903 ConcurrentMark* cm, 2904 BitMap* cm_card_bm, 2905 uint max_worker_id, 2906 uint n_workers) : 2907 AbstractGangTask("Count Aggregation"), 2908 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 2909 _max_worker_id(max_worker_id), 2910 _active_workers(n_workers), 2911 _hrclaimer(_active_workers) { 2912 } 2913 2914 void work(uint worker_id) { 2915 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 2916 2917 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 2918 } 2919 }; 2920 2921 2922 void ConcurrentMark::aggregate_count_data() { 2923 uint n_workers = _g1h->workers()->active_workers(); 2924 2925 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 2926 _max_worker_id, n_workers); 2927 2928 _g1h->workers()->run_task(&g1_par_agg_task); 2929 } 2930 2931 // Clear the per-worker arrays used to store the per-region counting data 2932 void ConcurrentMark::clear_all_count_data() { 2933 // Clear the global card bitmap - it will be filled during 2934 // liveness count aggregation (during remark) and the 2935 // final counting task. 2936 _card_bm.clear(); 2937 2938 // Clear the global region bitmap - it will be filled as part 2939 // of the final counting task. 2940 _region_bm.clear(); 2941 2942 uint max_regions = _g1h->max_regions(); 2943 assert(_max_worker_id > 0, "uninitialized"); 2944 2945 for (uint i = 0; i < _max_worker_id; i += 1) { 2946 BitMap* task_card_bm = count_card_bitmap_for(i); 2947 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 2948 2949 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 2950 assert(marked_bytes_array != NULL, "uninitialized"); 2951 2952 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 2953 task_card_bm->clear(); 2954 } 2955 } 2956 2957 void ConcurrentMark::print_stats() { 2958 if (verbose_stats()) { 2959 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 2960 for (size_t i = 0; i < _active_tasks; ++i) { 2961 _tasks[i]->print_stats(); 2962 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 2963 } 2964 } 2965 } 2966 2967 // abandon current marking iteration due to a Full GC 2968 void ConcurrentMark::abort() { 2969 if (!cmThread()->during_cycle() || _has_aborted) { 2970 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2971 return; 2972 } 2973 2974 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2975 // concurrent bitmap clearing. 2976 _nextMarkBitMap->clearAll(); 2977 2978 // Note we cannot clear the previous marking bitmap here 2979 // since VerifyDuringGC verifies the objects marked during 2980 // a full GC against the previous bitmap. 2981 2982 // Clear the liveness counting data 2983 clear_all_count_data(); 2984 // Empty mark stack 2985 reset_marking_state(); 2986 for (uint i = 0; i < _max_worker_id; ++i) { 2987 _tasks[i]->clear_region_fields(); 2988 } 2989 _first_overflow_barrier_sync.abort(); 2990 _second_overflow_barrier_sync.abort(); 2991 _aborted_gc_id = _g1h->gc_tracer_cm()->gc_id(); 2992 assert(!_aborted_gc_id.is_undefined(), "ConcurrentMark::abort() executed more than once?"); 2993 _has_aborted = true; 2994 2995 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2996 satb_mq_set.abandon_partial_marking(); 2997 // This can be called either during or outside marking, we'll read 2998 // the expected_active value from the SATB queue set. 2999 satb_mq_set.set_active_all_threads( 3000 false, /* new active value */ 3001 satb_mq_set.is_active() /* expected_active */); 3002 3003 _g1h->trace_heap_after_concurrent_cycle(); 3004 _g1h->register_concurrent_cycle_end(); 3005 } 3006 3007 const GCId& ConcurrentMark::concurrent_gc_id() { 3008 if (has_aborted()) { 3009 return _aborted_gc_id; 3010 } 3011 return _g1h->gc_tracer_cm()->gc_id(); 3012 } 3013 3014 static void print_ms_time_info(const char* prefix, const char* name, 3015 NumberSeq& ns) { 3016 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3017 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3018 if (ns.num() > 0) { 3019 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3020 prefix, ns.sd(), ns.maximum()); 3021 } 3022 } 3023 3024 void ConcurrentMark::print_summary_info() { 3025 gclog_or_tty->print_cr(" Concurrent marking:"); 3026 print_ms_time_info(" ", "init marks", _init_times); 3027 print_ms_time_info(" ", "remarks", _remark_times); 3028 { 3029 print_ms_time_info(" ", "final marks", _remark_mark_times); 3030 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3031 3032 } 3033 print_ms_time_info(" ", "cleanups", _cleanup_times); 3034 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3035 _total_counting_time, 3036 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3037 (double)_cleanup_times.num() 3038 : 0.0)); 3039 if (G1ScrubRemSets) { 3040 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3041 _total_rs_scrub_time, 3042 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3043 (double)_cleanup_times.num() 3044 : 0.0)); 3045 } 3046 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3047 (_init_times.sum() + _remark_times.sum() + 3048 _cleanup_times.sum())/1000.0); 3049 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3050 "(%8.2f s marking).", 3051 cmThread()->vtime_accum(), 3052 cmThread()->vtime_mark_accum()); 3053 } 3054 3055 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3056 _parallel_workers->print_worker_threads_on(st); 3057 } 3058 3059 void ConcurrentMark::print_on_error(outputStream* st) const { 3060 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3061 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3062 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3063 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3064 } 3065 3066 // We take a break if someone is trying to stop the world. 3067 bool ConcurrentMark::do_yield_check(uint worker_id) { 3068 if (SuspendibleThreadSet::should_yield()) { 3069 if (worker_id == 0) { 3070 _g1h->g1_policy()->record_concurrent_pause(); 3071 } 3072 SuspendibleThreadSet::yield(); 3073 return true; 3074 } else { 3075 return false; 3076 } 3077 } 3078 3079 #ifndef PRODUCT 3080 // for debugging purposes 3081 void ConcurrentMark::print_finger() { 3082 gclog_or_tty->print_cr("heap [" PTR_FORMAT ", " PTR_FORMAT "), global finger = " PTR_FORMAT, 3083 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3084 for (uint i = 0; i < _max_worker_id; ++i) { 3085 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3086 } 3087 gclog_or_tty->cr(); 3088 } 3089 #endif 3090 3091 // Closure for iteration over bitmaps 3092 class CMBitMapClosure : public BitMapClosure { 3093 private: 3094 // the bitmap that is being iterated over 3095 CMBitMap* _nextMarkBitMap; 3096 ConcurrentMark* _cm; 3097 CMTask* _task; 3098 3099 public: 3100 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3101 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3102 3103 bool do_bit(size_t offset) { 3104 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3105 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3106 assert( addr < _cm->finger(), "invariant"); 3107 3108 statsOnly( _task->increase_objs_found_on_bitmap() ); 3109 assert(addr >= _task->finger(), "invariant"); 3110 3111 // We move that task's local finger along. 3112 _task->move_finger_to(addr); 3113 3114 _task->scan_object(oop(addr)); 3115 // we only partially drain the local queue and global stack 3116 _task->drain_local_queue(true); 3117 _task->drain_global_stack(true); 3118 3119 // if the has_aborted flag has been raised, we need to bail out of 3120 // the iteration 3121 return !_task->has_aborted(); 3122 } 3123 }; 3124 3125 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3126 ConcurrentMark* cm, 3127 CMTask* task) 3128 : _g1h(g1h), _cm(cm), _task(task) { 3129 assert(_ref_processor == NULL, "should be initialized to NULL"); 3130 3131 if (G1UseConcMarkReferenceProcessing) { 3132 _ref_processor = g1h->ref_processor_cm(); 3133 assert(_ref_processor != NULL, "should not be NULL"); 3134 } 3135 } 3136 3137 void CMTask::setup_for_region(HeapRegion* hr) { 3138 assert(hr != NULL, 3139 "claim_region() should have filtered out NULL regions"); 3140 assert(!hr->is_continues_humongous(), 3141 "claim_region() should have filtered out continues humongous regions"); 3142 3143 if (_cm->verbose_low()) { 3144 gclog_or_tty->print_cr("[%u] setting up for region " PTR_FORMAT, 3145 _worker_id, p2i(hr)); 3146 } 3147 3148 _curr_region = hr; 3149 _finger = hr->bottom(); 3150 update_region_limit(); 3151 } 3152 3153 void CMTask::update_region_limit() { 3154 HeapRegion* hr = _curr_region; 3155 HeapWord* bottom = hr->bottom(); 3156 HeapWord* limit = hr->next_top_at_mark_start(); 3157 3158 if (limit == bottom) { 3159 if (_cm->verbose_low()) { 3160 gclog_or_tty->print_cr("[%u] found an empty region " 3161 "[" PTR_FORMAT ", " PTR_FORMAT ")", 3162 _worker_id, p2i(bottom), p2i(limit)); 3163 } 3164 // The region was collected underneath our feet. 3165 // We set the finger to bottom to ensure that the bitmap 3166 // iteration that will follow this will not do anything. 3167 // (this is not a condition that holds when we set the region up, 3168 // as the region is not supposed to be empty in the first place) 3169 _finger = bottom; 3170 } else if (limit >= _region_limit) { 3171 assert(limit >= _finger, "peace of mind"); 3172 } else { 3173 assert(limit < _region_limit, "only way to get here"); 3174 // This can happen under some pretty unusual circumstances. An 3175 // evacuation pause empties the region underneath our feet (NTAMS 3176 // at bottom). We then do some allocation in the region (NTAMS 3177 // stays at bottom), followed by the region being used as a GC 3178 // alloc region (NTAMS will move to top() and the objects 3179 // originally below it will be grayed). All objects now marked in 3180 // the region are explicitly grayed, if below the global finger, 3181 // and we do not need in fact to scan anything else. So, we simply 3182 // set _finger to be limit to ensure that the bitmap iteration 3183 // doesn't do anything. 3184 _finger = limit; 3185 } 3186 3187 _region_limit = limit; 3188 } 3189 3190 void CMTask::giveup_current_region() { 3191 assert(_curr_region != NULL, "invariant"); 3192 if (_cm->verbose_low()) { 3193 gclog_or_tty->print_cr("[%u] giving up region " PTR_FORMAT, 3194 _worker_id, p2i(_curr_region)); 3195 } 3196 clear_region_fields(); 3197 } 3198 3199 void CMTask::clear_region_fields() { 3200 // Values for these three fields that indicate that we're not 3201 // holding on to a region. 3202 _curr_region = NULL; 3203 _finger = NULL; 3204 _region_limit = NULL; 3205 } 3206 3207 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3208 if (cm_oop_closure == NULL) { 3209 assert(_cm_oop_closure != NULL, "invariant"); 3210 } else { 3211 assert(_cm_oop_closure == NULL, "invariant"); 3212 } 3213 _cm_oop_closure = cm_oop_closure; 3214 } 3215 3216 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3217 guarantee(nextMarkBitMap != NULL, "invariant"); 3218 3219 if (_cm->verbose_low()) { 3220 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3221 } 3222 3223 _nextMarkBitMap = nextMarkBitMap; 3224 clear_region_fields(); 3225 3226 _calls = 0; 3227 _elapsed_time_ms = 0.0; 3228 _termination_time_ms = 0.0; 3229 _termination_start_time_ms = 0.0; 3230 3231 #if _MARKING_STATS_ 3232 _aborted = 0; 3233 _aborted_overflow = 0; 3234 _aborted_cm_aborted = 0; 3235 _aborted_yield = 0; 3236 _aborted_timed_out = 0; 3237 _aborted_satb = 0; 3238 _aborted_termination = 0; 3239 _steal_attempts = 0; 3240 _steals = 0; 3241 _local_pushes = 0; 3242 _local_pops = 0; 3243 _local_max_size = 0; 3244 _objs_scanned = 0; 3245 _global_pushes = 0; 3246 _global_pops = 0; 3247 _global_max_size = 0; 3248 _global_transfers_to = 0; 3249 _global_transfers_from = 0; 3250 _regions_claimed = 0; 3251 _objs_found_on_bitmap = 0; 3252 _satb_buffers_processed = 0; 3253 #endif // _MARKING_STATS_ 3254 } 3255 3256 bool CMTask::should_exit_termination() { 3257 regular_clock_call(); 3258 // This is called when we are in the termination protocol. We should 3259 // quit if, for some reason, this task wants to abort or the global 3260 // stack is not empty (this means that we can get work from it). 3261 return !_cm->mark_stack_empty() || has_aborted(); 3262 } 3263 3264 void CMTask::reached_limit() { 3265 assert(_words_scanned >= _words_scanned_limit || 3266 _refs_reached >= _refs_reached_limit , 3267 "shouldn't have been called otherwise"); 3268 regular_clock_call(); 3269 } 3270 3271 void CMTask::regular_clock_call() { 3272 if (has_aborted()) return; 3273 3274 // First, we need to recalculate the words scanned and refs reached 3275 // limits for the next clock call. 3276 recalculate_limits(); 3277 3278 // During the regular clock call we do the following 3279 3280 // (1) If an overflow has been flagged, then we abort. 3281 if (_cm->has_overflown()) { 3282 set_has_aborted(); 3283 return; 3284 } 3285 3286 // If we are not concurrent (i.e. we're doing remark) we don't need 3287 // to check anything else. The other steps are only needed during 3288 // the concurrent marking phase. 3289 if (!concurrent()) return; 3290 3291 // (2) If marking has been aborted for Full GC, then we also abort. 3292 if (_cm->has_aborted()) { 3293 set_has_aborted(); 3294 statsOnly( ++_aborted_cm_aborted ); 3295 return; 3296 } 3297 3298 double curr_time_ms = os::elapsedVTime() * 1000.0; 3299 3300 // (3) If marking stats are enabled, then we update the step history. 3301 #if _MARKING_STATS_ 3302 if (_words_scanned >= _words_scanned_limit) { 3303 ++_clock_due_to_scanning; 3304 } 3305 if (_refs_reached >= _refs_reached_limit) { 3306 ++_clock_due_to_marking; 3307 } 3308 3309 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3310 _interval_start_time_ms = curr_time_ms; 3311 _all_clock_intervals_ms.add(last_interval_ms); 3312 3313 if (_cm->verbose_medium()) { 3314 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3315 "scanned = " SIZE_FORMAT "%s, refs reached = " SIZE_FORMAT "%s", 3316 _worker_id, last_interval_ms, 3317 _words_scanned, 3318 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3319 _refs_reached, 3320 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3321 } 3322 #endif // _MARKING_STATS_ 3323 3324 // (4) We check whether we should yield. If we have to, then we abort. 3325 if (SuspendibleThreadSet::should_yield()) { 3326 // We should yield. To do this we abort the task. The caller is 3327 // responsible for yielding. 3328 set_has_aborted(); 3329 statsOnly( ++_aborted_yield ); 3330 return; 3331 } 3332 3333 // (5) We check whether we've reached our time quota. If we have, 3334 // then we abort. 3335 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3336 if (elapsed_time_ms > _time_target_ms) { 3337 set_has_aborted(); 3338 _has_timed_out = true; 3339 statsOnly( ++_aborted_timed_out ); 3340 return; 3341 } 3342 3343 // (6) Finally, we check whether there are enough completed STAB 3344 // buffers available for processing. If there are, we abort. 3345 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3346 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3347 if (_cm->verbose_low()) { 3348 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3349 _worker_id); 3350 } 3351 // we do need to process SATB buffers, we'll abort and restart 3352 // the marking task to do so 3353 set_has_aborted(); 3354 statsOnly( ++_aborted_satb ); 3355 return; 3356 } 3357 } 3358 3359 void CMTask::recalculate_limits() { 3360 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3361 _words_scanned_limit = _real_words_scanned_limit; 3362 3363 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3364 _refs_reached_limit = _real_refs_reached_limit; 3365 } 3366 3367 void CMTask::decrease_limits() { 3368 // This is called when we believe that we're going to do an infrequent 3369 // operation which will increase the per byte scanned cost (i.e. move 3370 // entries to/from the global stack). It basically tries to decrease the 3371 // scanning limit so that the clock is called earlier. 3372 3373 if (_cm->verbose_medium()) { 3374 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3375 } 3376 3377 _words_scanned_limit = _real_words_scanned_limit - 3378 3 * words_scanned_period / 4; 3379 _refs_reached_limit = _real_refs_reached_limit - 3380 3 * refs_reached_period / 4; 3381 } 3382 3383 void CMTask::move_entries_to_global_stack() { 3384 // local array where we'll store the entries that will be popped 3385 // from the local queue 3386 oop buffer[global_stack_transfer_size]; 3387 3388 int n = 0; 3389 oop obj; 3390 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3391 buffer[n] = obj; 3392 ++n; 3393 } 3394 3395 if (n > 0) { 3396 // we popped at least one entry from the local queue 3397 3398 statsOnly( ++_global_transfers_to; _local_pops += n ); 3399 3400 if (!_cm->mark_stack_push(buffer, n)) { 3401 if (_cm->verbose_low()) { 3402 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3403 _worker_id); 3404 } 3405 set_has_aborted(); 3406 } else { 3407 // the transfer was successful 3408 3409 if (_cm->verbose_medium()) { 3410 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3411 _worker_id, n); 3412 } 3413 statsOnly( size_t tmp_size = _cm->mark_stack_size(); 3414 if (tmp_size > _global_max_size) { 3415 _global_max_size = tmp_size; 3416 } 3417 _global_pushes += n ); 3418 } 3419 } 3420 3421 // this operation was quite expensive, so decrease the limits 3422 decrease_limits(); 3423 } 3424 3425 void CMTask::get_entries_from_global_stack() { 3426 // local array where we'll store the entries that will be popped 3427 // from the global stack. 3428 oop buffer[global_stack_transfer_size]; 3429 int n; 3430 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3431 assert(n <= global_stack_transfer_size, 3432 "we should not pop more than the given limit"); 3433 if (n > 0) { 3434 // yes, we did actually pop at least one entry 3435 3436 statsOnly( ++_global_transfers_from; _global_pops += n ); 3437 if (_cm->verbose_medium()) { 3438 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3439 _worker_id, n); 3440 } 3441 for (int i = 0; i < n; ++i) { 3442 bool success = _task_queue->push(buffer[i]); 3443 // We only call this when the local queue is empty or under a 3444 // given target limit. So, we do not expect this push to fail. 3445 assert(success, "invariant"); 3446 } 3447 3448 statsOnly( size_t tmp_size = (size_t)_task_queue->size(); 3449 if (tmp_size > _local_max_size) { 3450 _local_max_size = tmp_size; 3451 } 3452 _local_pushes += n ); 3453 } 3454 3455 // this operation was quite expensive, so decrease the limits 3456 decrease_limits(); 3457 } 3458 3459 void CMTask::drain_local_queue(bool partially) { 3460 if (has_aborted()) return; 3461 3462 // Decide what the target size is, depending whether we're going to 3463 // drain it partially (so that other tasks can steal if they run out 3464 // of things to do) or totally (at the very end). 3465 size_t target_size; 3466 if (partially) { 3467 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3468 } else { 3469 target_size = 0; 3470 } 3471 3472 if (_task_queue->size() > target_size) { 3473 if (_cm->verbose_high()) { 3474 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3475 _worker_id, target_size); 3476 } 3477 3478 oop obj; 3479 bool ret = _task_queue->pop_local(obj); 3480 while (ret) { 3481 statsOnly( ++_local_pops ); 3482 3483 if (_cm->verbose_high()) { 3484 gclog_or_tty->print_cr("[%u] popped " PTR_FORMAT, _worker_id, 3485 p2i((void*) obj)); 3486 } 3487 3488 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3489 assert(!_g1h->is_on_master_free_list( 3490 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3491 3492 scan_object(obj); 3493 3494 if (_task_queue->size() <= target_size || has_aborted()) { 3495 ret = false; 3496 } else { 3497 ret = _task_queue->pop_local(obj); 3498 } 3499 } 3500 3501 if (_cm->verbose_high()) { 3502 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3503 _worker_id, _task_queue->size()); 3504 } 3505 } 3506 } 3507 3508 void CMTask::drain_global_stack(bool partially) { 3509 if (has_aborted()) return; 3510 3511 // We have a policy to drain the local queue before we attempt to 3512 // drain the global stack. 3513 assert(partially || _task_queue->size() == 0, "invariant"); 3514 3515 // Decide what the target size is, depending whether we're going to 3516 // drain it partially (so that other tasks can steal if they run out 3517 // of things to do) or totally (at the very end). Notice that, 3518 // because we move entries from the global stack in chunks or 3519 // because another task might be doing the same, we might in fact 3520 // drop below the target. But, this is not a problem. 3521 size_t target_size; 3522 if (partially) { 3523 target_size = _cm->partial_mark_stack_size_target(); 3524 } else { 3525 target_size = 0; 3526 } 3527 3528 if (_cm->mark_stack_size() > target_size) { 3529 if (_cm->verbose_low()) { 3530 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3531 _worker_id, target_size); 3532 } 3533 3534 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3535 get_entries_from_global_stack(); 3536 drain_local_queue(partially); 3537 } 3538 3539 if (_cm->verbose_low()) { 3540 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3541 _worker_id, _cm->mark_stack_size()); 3542 } 3543 } 3544 } 3545 3546 // SATB Queue has several assumptions on whether to call the par or 3547 // non-par versions of the methods. this is why some of the code is 3548 // replicated. We should really get rid of the single-threaded version 3549 // of the code to simplify things. 3550 void CMTask::drain_satb_buffers() { 3551 if (has_aborted()) return; 3552 3553 // We set this so that the regular clock knows that we're in the 3554 // middle of draining buffers and doesn't set the abort flag when it 3555 // notices that SATB buffers are available for draining. It'd be 3556 // very counter productive if it did that. :-) 3557 _draining_satb_buffers = true; 3558 3559 CMSATBBufferClosure satb_cl(this, _g1h); 3560 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3561 3562 // This keeps claiming and applying the closure to completed buffers 3563 // until we run out of buffers or we need to abort. 3564 while (!has_aborted() && 3565 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 3566 if (_cm->verbose_medium()) { 3567 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3568 } 3569 statsOnly( ++_satb_buffers_processed ); 3570 regular_clock_call(); 3571 } 3572 3573 _draining_satb_buffers = false; 3574 3575 assert(has_aborted() || 3576 concurrent() || 3577 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3578 3579 // again, this was a potentially expensive operation, decrease the 3580 // limits to get the regular clock call early 3581 decrease_limits(); 3582 } 3583 3584 void CMTask::print_stats() { 3585 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3586 _worker_id, _calls); 3587 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3588 _elapsed_time_ms, _termination_time_ms); 3589 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3590 _step_times_ms.num(), _step_times_ms.avg(), 3591 _step_times_ms.sd()); 3592 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3593 _step_times_ms.maximum(), _step_times_ms.sum()); 3594 3595 #if _MARKING_STATS_ 3596 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3597 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3598 _all_clock_intervals_ms.sd()); 3599 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3600 _all_clock_intervals_ms.maximum(), 3601 _all_clock_intervals_ms.sum()); 3602 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = " SIZE_FORMAT ", marking = " SIZE_FORMAT, 3603 _clock_due_to_scanning, _clock_due_to_marking); 3604 gclog_or_tty->print_cr(" Objects: scanned = " SIZE_FORMAT ", found on the bitmap = " SIZE_FORMAT, 3605 _objs_scanned, _objs_found_on_bitmap); 3606 gclog_or_tty->print_cr(" Local Queue: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3607 _local_pushes, _local_pops, _local_max_size); 3608 gclog_or_tty->print_cr(" Global Stack: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3609 _global_pushes, _global_pops, _global_max_size); 3610 gclog_or_tty->print_cr(" transfers to = " SIZE_FORMAT ", transfers from = " SIZE_FORMAT, 3611 _global_transfers_to,_global_transfers_from); 3612 gclog_or_tty->print_cr(" Regions: claimed = " SIZE_FORMAT, _regions_claimed); 3613 gclog_or_tty->print_cr(" SATB buffers: processed = " SIZE_FORMAT, _satb_buffers_processed); 3614 gclog_or_tty->print_cr(" Steals: attempts = " SIZE_FORMAT ", successes = " SIZE_FORMAT, 3615 _steal_attempts, _steals); 3616 gclog_or_tty->print_cr(" Aborted: " SIZE_FORMAT ", due to", _aborted); 3617 gclog_or_tty->print_cr(" overflow: " SIZE_FORMAT ", global abort: " SIZE_FORMAT ", yield: " SIZE_FORMAT, 3618 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3619 gclog_or_tty->print_cr(" time out: " SIZE_FORMAT ", SATB: " SIZE_FORMAT ", termination: " SIZE_FORMAT, 3620 _aborted_timed_out, _aborted_satb, _aborted_termination); 3621 #endif // _MARKING_STATS_ 3622 } 3623 3624 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 3625 return _task_queues->steal(worker_id, hash_seed, obj); 3626 } 3627 3628 /***************************************************************************** 3629 3630 The do_marking_step(time_target_ms, ...) method is the building 3631 block of the parallel marking framework. It can be called in parallel 3632 with other invocations of do_marking_step() on different tasks 3633 (but only one per task, obviously) and concurrently with the 3634 mutator threads, or during remark, hence it eliminates the need 3635 for two versions of the code. When called during remark, it will 3636 pick up from where the task left off during the concurrent marking 3637 phase. Interestingly, tasks are also claimable during evacuation 3638 pauses too, since do_marking_step() ensures that it aborts before 3639 it needs to yield. 3640 3641 The data structures that it uses to do marking work are the 3642 following: 3643 3644 (1) Marking Bitmap. If there are gray objects that appear only 3645 on the bitmap (this happens either when dealing with an overflow 3646 or when the initial marking phase has simply marked the roots 3647 and didn't push them on the stack), then tasks claim heap 3648 regions whose bitmap they then scan to find gray objects. A 3649 global finger indicates where the end of the last claimed region 3650 is. A local finger indicates how far into the region a task has 3651 scanned. The two fingers are used to determine how to gray an 3652 object (i.e. whether simply marking it is OK, as it will be 3653 visited by a task in the future, or whether it needs to be also 3654 pushed on a stack). 3655 3656 (2) Local Queue. The local queue of the task which is accessed 3657 reasonably efficiently by the task. Other tasks can steal from 3658 it when they run out of work. Throughout the marking phase, a 3659 task attempts to keep its local queue short but not totally 3660 empty, so that entries are available for stealing by other 3661 tasks. Only when there is no more work, a task will totally 3662 drain its local queue. 3663 3664 (3) Global Mark Stack. This handles local queue overflow. During 3665 marking only sets of entries are moved between it and the local 3666 queues, as access to it requires a mutex and more fine-grain 3667 interaction with it which might cause contention. If it 3668 overflows, then the marking phase should restart and iterate 3669 over the bitmap to identify gray objects. Throughout the marking 3670 phase, tasks attempt to keep the global mark stack at a small 3671 length but not totally empty, so that entries are available for 3672 popping by other tasks. Only when there is no more work, tasks 3673 will totally drain the global mark stack. 3674 3675 (4) SATB Buffer Queue. This is where completed SATB buffers are 3676 made available. Buffers are regularly removed from this queue 3677 and scanned for roots, so that the queue doesn't get too 3678 long. During remark, all completed buffers are processed, as 3679 well as the filled in parts of any uncompleted buffers. 3680 3681 The do_marking_step() method tries to abort when the time target 3682 has been reached. There are a few other cases when the 3683 do_marking_step() method also aborts: 3684 3685 (1) When the marking phase has been aborted (after a Full GC). 3686 3687 (2) When a global overflow (on the global stack) has been 3688 triggered. Before the task aborts, it will actually sync up with 3689 the other tasks to ensure that all the marking data structures 3690 (local queues, stacks, fingers etc.) are re-initialized so that 3691 when do_marking_step() completes, the marking phase can 3692 immediately restart. 3693 3694 (3) When enough completed SATB buffers are available. The 3695 do_marking_step() method only tries to drain SATB buffers right 3696 at the beginning. So, if enough buffers are available, the 3697 marking step aborts and the SATB buffers are processed at 3698 the beginning of the next invocation. 3699 3700 (4) To yield. when we have to yield then we abort and yield 3701 right at the end of do_marking_step(). This saves us from a lot 3702 of hassle as, by yielding we might allow a Full GC. If this 3703 happens then objects will be compacted underneath our feet, the 3704 heap might shrink, etc. We save checking for this by just 3705 aborting and doing the yield right at the end. 3706 3707 From the above it follows that the do_marking_step() method should 3708 be called in a loop (or, otherwise, regularly) until it completes. 3709 3710 If a marking step completes without its has_aborted() flag being 3711 true, it means it has completed the current marking phase (and 3712 also all other marking tasks have done so and have all synced up). 3713 3714 A method called regular_clock_call() is invoked "regularly" (in 3715 sub ms intervals) throughout marking. It is this clock method that 3716 checks all the abort conditions which were mentioned above and 3717 decides when the task should abort. A work-based scheme is used to 3718 trigger this clock method: when the number of object words the 3719 marking phase has scanned or the number of references the marking 3720 phase has visited reach a given limit. Additional invocations to 3721 the method clock have been planted in a few other strategic places 3722 too. The initial reason for the clock method was to avoid calling 3723 vtime too regularly, as it is quite expensive. So, once it was in 3724 place, it was natural to piggy-back all the other conditions on it 3725 too and not constantly check them throughout the code. 3726 3727 If do_termination is true then do_marking_step will enter its 3728 termination protocol. 3729 3730 The value of is_serial must be true when do_marking_step is being 3731 called serially (i.e. by the VMThread) and do_marking_step should 3732 skip any synchronization in the termination and overflow code. 3733 Examples include the serial remark code and the serial reference 3734 processing closures. 3735 3736 The value of is_serial must be false when do_marking_step is 3737 being called by any of the worker threads in a work gang. 3738 Examples include the concurrent marking code (CMMarkingTask), 3739 the MT remark code, and the MT reference processing closures. 3740 3741 *****************************************************************************/ 3742 3743 void CMTask::do_marking_step(double time_target_ms, 3744 bool do_termination, 3745 bool is_serial) { 3746 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3747 assert(concurrent() == _cm->concurrent(), "they should be the same"); 3748 3749 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3750 assert(_task_queues != NULL, "invariant"); 3751 assert(_task_queue != NULL, "invariant"); 3752 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 3753 3754 assert(!_claimed, 3755 "only one thread should claim this task at any one time"); 3756 3757 // OK, this doesn't safeguard again all possible scenarios, as it is 3758 // possible for two threads to set the _claimed flag at the same 3759 // time. But it is only for debugging purposes anyway and it will 3760 // catch most problems. 3761 _claimed = true; 3762 3763 _start_time_ms = os::elapsedVTime() * 1000.0; 3764 statsOnly( _interval_start_time_ms = _start_time_ms ); 3765 3766 // If do_stealing is true then do_marking_step will attempt to 3767 // steal work from the other CMTasks. It only makes sense to 3768 // enable stealing when the termination protocol is enabled 3769 // and do_marking_step() is not being called serially. 3770 bool do_stealing = do_termination && !is_serial; 3771 3772 double diff_prediction_ms = 3773 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 3774 _time_target_ms = time_target_ms - diff_prediction_ms; 3775 3776 // set up the variables that are used in the work-based scheme to 3777 // call the regular clock method 3778 _words_scanned = 0; 3779 _refs_reached = 0; 3780 recalculate_limits(); 3781 3782 // clear all flags 3783 clear_has_aborted(); 3784 _has_timed_out = false; 3785 _draining_satb_buffers = false; 3786 3787 ++_calls; 3788 3789 if (_cm->verbose_low()) { 3790 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 3791 "target = %1.2lfms >>>>>>>>>>", 3792 _worker_id, _calls, _time_target_ms); 3793 } 3794 3795 // Set up the bitmap and oop closures. Anything that uses them is 3796 // eventually called from this method, so it is OK to allocate these 3797 // statically. 3798 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 3799 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 3800 set_cm_oop_closure(&cm_oop_closure); 3801 3802 if (_cm->has_overflown()) { 3803 // This can happen if the mark stack overflows during a GC pause 3804 // and this task, after a yield point, restarts. We have to abort 3805 // as we need to get into the overflow protocol which happens 3806 // right at the end of this task. 3807 set_has_aborted(); 3808 } 3809 3810 // First drain any available SATB buffers. After this, we will not 3811 // look at SATB buffers before the next invocation of this method. 3812 // If enough completed SATB buffers are queued up, the regular clock 3813 // will abort this task so that it restarts. 3814 drain_satb_buffers(); 3815 // ...then partially drain the local queue and the global stack 3816 drain_local_queue(true); 3817 drain_global_stack(true); 3818 3819 do { 3820 if (!has_aborted() && _curr_region != NULL) { 3821 // This means that we're already holding on to a region. 3822 assert(_finger != NULL, "if region is not NULL, then the finger " 3823 "should not be NULL either"); 3824 3825 // We might have restarted this task after an evacuation pause 3826 // which might have evacuated the region we're holding on to 3827 // underneath our feet. Let's read its limit again to make sure 3828 // that we do not iterate over a region of the heap that 3829 // contains garbage (update_region_limit() will also move 3830 // _finger to the start of the region if it is found empty). 3831 update_region_limit(); 3832 // We will start from _finger not from the start of the region, 3833 // as we might be restarting this task after aborting half-way 3834 // through scanning this region. In this case, _finger points to 3835 // the address where we last found a marked object. If this is a 3836 // fresh region, _finger points to start(). 3837 MemRegion mr = MemRegion(_finger, _region_limit); 3838 3839 if (_cm->verbose_low()) { 3840 gclog_or_tty->print_cr("[%u] we're scanning part " 3841 "[" PTR_FORMAT ", " PTR_FORMAT ") " 3842 "of region " HR_FORMAT, 3843 _worker_id, p2i(_finger), p2i(_region_limit), 3844 HR_FORMAT_PARAMS(_curr_region)); 3845 } 3846 3847 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 3848 "humongous regions should go around loop once only"); 3849 3850 // Some special cases: 3851 // If the memory region is empty, we can just give up the region. 3852 // If the current region is humongous then we only need to check 3853 // the bitmap for the bit associated with the start of the object, 3854 // scan the object if it's live, and give up the region. 3855 // Otherwise, let's iterate over the bitmap of the part of the region 3856 // that is left. 3857 // If the iteration is successful, give up the region. 3858 if (mr.is_empty()) { 3859 giveup_current_region(); 3860 regular_clock_call(); 3861 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 3862 if (_nextMarkBitMap->isMarked(mr.start())) { 3863 // The object is marked - apply the closure 3864 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 3865 bitmap_closure.do_bit(offset); 3866 } 3867 // Even if this task aborted while scanning the humongous object 3868 // we can (and should) give up the current region. 3869 giveup_current_region(); 3870 regular_clock_call(); 3871 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 3872 giveup_current_region(); 3873 regular_clock_call(); 3874 } else { 3875 assert(has_aborted(), "currently the only way to do so"); 3876 // The only way to abort the bitmap iteration is to return 3877 // false from the do_bit() method. However, inside the 3878 // do_bit() method we move the _finger to point to the 3879 // object currently being looked at. So, if we bail out, we 3880 // have definitely set _finger to something non-null. 3881 assert(_finger != NULL, "invariant"); 3882 3883 // Region iteration was actually aborted. So now _finger 3884 // points to the address of the object we last scanned. If we 3885 // leave it there, when we restart this task, we will rescan 3886 // the object. It is easy to avoid this. We move the finger by 3887 // enough to point to the next possible object header (the 3888 // bitmap knows by how much we need to move it as it knows its 3889 // granularity). 3890 assert(_finger < _region_limit, "invariant"); 3891 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 3892 // Check if bitmap iteration was aborted while scanning the last object 3893 if (new_finger >= _region_limit) { 3894 giveup_current_region(); 3895 } else { 3896 move_finger_to(new_finger); 3897 } 3898 } 3899 } 3900 // At this point we have either completed iterating over the 3901 // region we were holding on to, or we have aborted. 3902 3903 // We then partially drain the local queue and the global stack. 3904 // (Do we really need this?) 3905 drain_local_queue(true); 3906 drain_global_stack(true); 3907 3908 // Read the note on the claim_region() method on why it might 3909 // return NULL with potentially more regions available for 3910 // claiming and why we have to check out_of_regions() to determine 3911 // whether we're done or not. 3912 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 3913 // We are going to try to claim a new region. We should have 3914 // given up on the previous one. 3915 // Separated the asserts so that we know which one fires. 3916 assert(_curr_region == NULL, "invariant"); 3917 assert(_finger == NULL, "invariant"); 3918 assert(_region_limit == NULL, "invariant"); 3919 if (_cm->verbose_low()) { 3920 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 3921 } 3922 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 3923 if (claimed_region != NULL) { 3924 // Yes, we managed to claim one 3925 statsOnly( ++_regions_claimed ); 3926 3927 if (_cm->verbose_low()) { 3928 gclog_or_tty->print_cr("[%u] we successfully claimed " 3929 "region " PTR_FORMAT, 3930 _worker_id, p2i(claimed_region)); 3931 } 3932 3933 setup_for_region(claimed_region); 3934 assert(_curr_region == claimed_region, "invariant"); 3935 } 3936 // It is important to call the regular clock here. It might take 3937 // a while to claim a region if, for example, we hit a large 3938 // block of empty regions. So we need to call the regular clock 3939 // method once round the loop to make sure it's called 3940 // frequently enough. 3941 regular_clock_call(); 3942 } 3943 3944 if (!has_aborted() && _curr_region == NULL) { 3945 assert(_cm->out_of_regions(), 3946 "at this point we should be out of regions"); 3947 } 3948 } while ( _curr_region != NULL && !has_aborted()); 3949 3950 if (!has_aborted()) { 3951 // We cannot check whether the global stack is empty, since other 3952 // tasks might be pushing objects to it concurrently. 3953 assert(_cm->out_of_regions(), 3954 "at this point we should be out of regions"); 3955 3956 if (_cm->verbose_low()) { 3957 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 3958 } 3959 3960 // Try to reduce the number of available SATB buffers so that 3961 // remark has less work to do. 3962 drain_satb_buffers(); 3963 } 3964 3965 // Since we've done everything else, we can now totally drain the 3966 // local queue and global stack. 3967 drain_local_queue(false); 3968 drain_global_stack(false); 3969 3970 // Attempt at work stealing from other task's queues. 3971 if (do_stealing && !has_aborted()) { 3972 // We have not aborted. This means that we have finished all that 3973 // we could. Let's try to do some stealing... 3974 3975 // We cannot check whether the global stack is empty, since other 3976 // tasks might be pushing objects to it concurrently. 3977 assert(_cm->out_of_regions() && _task_queue->size() == 0, 3978 "only way to reach here"); 3979 3980 if (_cm->verbose_low()) { 3981 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 3982 } 3983 3984 while (!has_aborted()) { 3985 oop obj; 3986 statsOnly( ++_steal_attempts ); 3987 3988 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 3989 if (_cm->verbose_medium()) { 3990 gclog_or_tty->print_cr("[%u] stolen " PTR_FORMAT " successfully", 3991 _worker_id, p2i((void*) obj)); 3992 } 3993 3994 statsOnly( ++_steals ); 3995 3996 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 3997 "any stolen object should be marked"); 3998 scan_object(obj); 3999 4000 // And since we're towards the end, let's totally drain the 4001 // local queue and global stack. 4002 drain_local_queue(false); 4003 drain_global_stack(false); 4004 } else { 4005 break; 4006 } 4007 } 4008 } 4009 4010 // If we are about to wrap up and go into termination, check if we 4011 // should raise the overflow flag. 4012 if (do_termination && !has_aborted()) { 4013 if (_cm->force_overflow()->should_force()) { 4014 _cm->set_has_overflown(); 4015 regular_clock_call(); 4016 } 4017 } 4018 4019 // We still haven't aborted. Now, let's try to get into the 4020 // termination protocol. 4021 if (do_termination && !has_aborted()) { 4022 // We cannot check whether the global stack is empty, since other 4023 // tasks might be concurrently pushing objects on it. 4024 // Separated the asserts so that we know which one fires. 4025 assert(_cm->out_of_regions(), "only way to reach here"); 4026 assert(_task_queue->size() == 0, "only way to reach here"); 4027 4028 if (_cm->verbose_low()) { 4029 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4030 } 4031 4032 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4033 4034 // The CMTask class also extends the TerminatorTerminator class, 4035 // hence its should_exit_termination() method will also decide 4036 // whether to exit the termination protocol or not. 4037 bool finished = (is_serial || 4038 _cm->terminator()->offer_termination(this)); 4039 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4040 _termination_time_ms += 4041 termination_end_time_ms - _termination_start_time_ms; 4042 4043 if (finished) { 4044 // We're all done. 4045 4046 if (_worker_id == 0) { 4047 // let's allow task 0 to do this 4048 if (concurrent()) { 4049 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4050 // we need to set this to false before the next 4051 // safepoint. This way we ensure that the marking phase 4052 // doesn't observe any more heap expansions. 4053 _cm->clear_concurrent_marking_in_progress(); 4054 } 4055 } 4056 4057 // We can now guarantee that the global stack is empty, since 4058 // all other tasks have finished. We separated the guarantees so 4059 // that, if a condition is false, we can immediately find out 4060 // which one. 4061 guarantee(_cm->out_of_regions(), "only way to reach here"); 4062 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4063 guarantee(_task_queue->size() == 0, "only way to reach here"); 4064 guarantee(!_cm->has_overflown(), "only way to reach here"); 4065 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4066 4067 if (_cm->verbose_low()) { 4068 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4069 } 4070 } else { 4071 // Apparently there's more work to do. Let's abort this task. It 4072 // will restart it and we can hopefully find more things to do. 4073 4074 if (_cm->verbose_low()) { 4075 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4076 _worker_id); 4077 } 4078 4079 set_has_aborted(); 4080 statsOnly( ++_aborted_termination ); 4081 } 4082 } 4083 4084 // Mainly for debugging purposes to make sure that a pointer to the 4085 // closure which was statically allocated in this frame doesn't 4086 // escape it by accident. 4087 set_cm_oop_closure(NULL); 4088 double end_time_ms = os::elapsedVTime() * 1000.0; 4089 double elapsed_time_ms = end_time_ms - _start_time_ms; 4090 // Update the step history. 4091 _step_times_ms.add(elapsed_time_ms); 4092 4093 if (has_aborted()) { 4094 // The task was aborted for some reason. 4095 4096 statsOnly( ++_aborted ); 4097 4098 if (_has_timed_out) { 4099 double diff_ms = elapsed_time_ms - _time_target_ms; 4100 // Keep statistics of how well we did with respect to hitting 4101 // our target only if we actually timed out (if we aborted for 4102 // other reasons, then the results might get skewed). 4103 _marking_step_diffs_ms.add(diff_ms); 4104 } 4105 4106 if (_cm->has_overflown()) { 4107 // This is the interesting one. We aborted because a global 4108 // overflow was raised. This means we have to restart the 4109 // marking phase and start iterating over regions. However, in 4110 // order to do this we have to make sure that all tasks stop 4111 // what they are doing and re-initialize in a safe manner. We 4112 // will achieve this with the use of two barrier sync points. 4113 4114 if (_cm->verbose_low()) { 4115 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4116 } 4117 4118 if (!is_serial) { 4119 // We only need to enter the sync barrier if being called 4120 // from a parallel context 4121 _cm->enter_first_sync_barrier(_worker_id); 4122 4123 // When we exit this sync barrier we know that all tasks have 4124 // stopped doing marking work. So, it's now safe to 4125 // re-initialize our data structures. At the end of this method, 4126 // task 0 will clear the global data structures. 4127 } 4128 4129 statsOnly( ++_aborted_overflow ); 4130 4131 // We clear the local state of this task... 4132 clear_region_fields(); 4133 4134 if (!is_serial) { 4135 // ...and enter the second barrier. 4136 _cm->enter_second_sync_barrier(_worker_id); 4137 } 4138 // At this point, if we're during the concurrent phase of 4139 // marking, everything has been re-initialized and we're 4140 // ready to restart. 4141 } 4142 4143 if (_cm->verbose_low()) { 4144 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4145 "elapsed = %1.2lfms <<<<<<<<<<", 4146 _worker_id, _time_target_ms, elapsed_time_ms); 4147 if (_cm->has_aborted()) { 4148 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4149 _worker_id); 4150 } 4151 } 4152 } else { 4153 if (_cm->verbose_low()) { 4154 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4155 "elapsed = %1.2lfms <<<<<<<<<<", 4156 _worker_id, _time_target_ms, elapsed_time_ms); 4157 } 4158 } 4159 4160 _claimed = false; 4161 } 4162 4163 CMTask::CMTask(uint worker_id, 4164 ConcurrentMark* cm, 4165 size_t* marked_bytes, 4166 BitMap* card_bm, 4167 CMTaskQueue* task_queue, 4168 CMTaskQueueSet* task_queues) 4169 : _g1h(G1CollectedHeap::heap()), 4170 _worker_id(worker_id), _cm(cm), 4171 _claimed(false), 4172 _nextMarkBitMap(NULL), _hash_seed(17), 4173 _task_queue(task_queue), 4174 _task_queues(task_queues), 4175 _cm_oop_closure(NULL), 4176 _marked_bytes_array(marked_bytes), 4177 _card_bm(card_bm) { 4178 guarantee(task_queue != NULL, "invariant"); 4179 guarantee(task_queues != NULL, "invariant"); 4180 4181 statsOnly( _clock_due_to_scanning = 0; 4182 _clock_due_to_marking = 0 ); 4183 4184 _marking_step_diffs_ms.add(0.5); 4185 } 4186 4187 // These are formatting macros that are used below to ensure 4188 // consistent formatting. The *_H_* versions are used to format the 4189 // header for a particular value and they should be kept consistent 4190 // with the corresponding macro. Also note that most of the macros add 4191 // the necessary white space (as a prefix) which makes them a bit 4192 // easier to compose. 4193 4194 // All the output lines are prefixed with this string to be able to 4195 // identify them easily in a large log file. 4196 #define G1PPRL_LINE_PREFIX "###" 4197 4198 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 4199 #ifdef _LP64 4200 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4201 #else // _LP64 4202 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4203 #endif // _LP64 4204 4205 // For per-region info 4206 #define G1PPRL_TYPE_FORMAT " %-4s" 4207 #define G1PPRL_TYPE_H_FORMAT " %4s" 4208 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 4209 #define G1PPRL_BYTE_H_FORMAT " %9s" 4210 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4211 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4212 4213 // For summary info 4214 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 4215 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 4216 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 4217 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 4218 4219 G1PrintRegionLivenessInfoClosure:: 4220 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4221 : _out(out), 4222 _total_used_bytes(0), _total_capacity_bytes(0), 4223 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4224 _hum_used_bytes(0), _hum_capacity_bytes(0), 4225 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4226 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4227 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4228 MemRegion g1_reserved = g1h->g1_reserved(); 4229 double now = os::elapsedTime(); 4230 4231 // Print the header of the output. 4232 _out->cr(); 4233 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4234 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4235 G1PPRL_SUM_ADDR_FORMAT("reserved") 4236 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4237 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4238 HeapRegion::GrainBytes); 4239 _out->print_cr(G1PPRL_LINE_PREFIX); 4240 _out->print_cr(G1PPRL_LINE_PREFIX 4241 G1PPRL_TYPE_H_FORMAT 4242 G1PPRL_ADDR_BASE_H_FORMAT 4243 G1PPRL_BYTE_H_FORMAT 4244 G1PPRL_BYTE_H_FORMAT 4245 G1PPRL_BYTE_H_FORMAT 4246 G1PPRL_DOUBLE_H_FORMAT 4247 G1PPRL_BYTE_H_FORMAT 4248 G1PPRL_BYTE_H_FORMAT, 4249 "type", "address-range", 4250 "used", "prev-live", "next-live", "gc-eff", 4251 "remset", "code-roots"); 4252 _out->print_cr(G1PPRL_LINE_PREFIX 4253 G1PPRL_TYPE_H_FORMAT 4254 G1PPRL_ADDR_BASE_H_FORMAT 4255 G1PPRL_BYTE_H_FORMAT 4256 G1PPRL_BYTE_H_FORMAT 4257 G1PPRL_BYTE_H_FORMAT 4258 G1PPRL_DOUBLE_H_FORMAT 4259 G1PPRL_BYTE_H_FORMAT 4260 G1PPRL_BYTE_H_FORMAT, 4261 "", "", 4262 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4263 "(bytes)", "(bytes)"); 4264 } 4265 4266 // It takes as a parameter a reference to one of the _hum_* fields, it 4267 // deduces the corresponding value for a region in a humongous region 4268 // series (either the region size, or what's left if the _hum_* field 4269 // is < the region size), and updates the _hum_* field accordingly. 4270 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4271 size_t bytes = 0; 4272 // The > 0 check is to deal with the prev and next live bytes which 4273 // could be 0. 4274 if (*hum_bytes > 0) { 4275 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4276 *hum_bytes -= bytes; 4277 } 4278 return bytes; 4279 } 4280 4281 // It deduces the values for a region in a humongous region series 4282 // from the _hum_* fields and updates those accordingly. It assumes 4283 // that that _hum_* fields have already been set up from the "starts 4284 // humongous" region and we visit the regions in address order. 4285 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4286 size_t* capacity_bytes, 4287 size_t* prev_live_bytes, 4288 size_t* next_live_bytes) { 4289 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4290 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4291 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4292 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4293 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4294 } 4295 4296 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4297 const char* type = r->get_type_str(); 4298 HeapWord* bottom = r->bottom(); 4299 HeapWord* end = r->end(); 4300 size_t capacity_bytes = r->capacity(); 4301 size_t used_bytes = r->used(); 4302 size_t prev_live_bytes = r->live_bytes(); 4303 size_t next_live_bytes = r->next_live_bytes(); 4304 double gc_eff = r->gc_efficiency(); 4305 size_t remset_bytes = r->rem_set()->mem_size(); 4306 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4307 4308 if (r->is_starts_humongous()) { 4309 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4310 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4311 "they should have been zeroed after the last time we used them"); 4312 // Set up the _hum_* fields. 4313 _hum_capacity_bytes = capacity_bytes; 4314 _hum_used_bytes = used_bytes; 4315 _hum_prev_live_bytes = prev_live_bytes; 4316 _hum_next_live_bytes = next_live_bytes; 4317 get_hum_bytes(&used_bytes, &capacity_bytes, 4318 &prev_live_bytes, &next_live_bytes); 4319 end = bottom + HeapRegion::GrainWords; 4320 } else if (r->is_continues_humongous()) { 4321 get_hum_bytes(&used_bytes, &capacity_bytes, 4322 &prev_live_bytes, &next_live_bytes); 4323 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4324 } 4325 4326 _total_used_bytes += used_bytes; 4327 _total_capacity_bytes += capacity_bytes; 4328 _total_prev_live_bytes += prev_live_bytes; 4329 _total_next_live_bytes += next_live_bytes; 4330 _total_remset_bytes += remset_bytes; 4331 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4332 4333 // Print a line for this particular region. 4334 _out->print_cr(G1PPRL_LINE_PREFIX 4335 G1PPRL_TYPE_FORMAT 4336 G1PPRL_ADDR_BASE_FORMAT 4337 G1PPRL_BYTE_FORMAT 4338 G1PPRL_BYTE_FORMAT 4339 G1PPRL_BYTE_FORMAT 4340 G1PPRL_DOUBLE_FORMAT 4341 G1PPRL_BYTE_FORMAT 4342 G1PPRL_BYTE_FORMAT, 4343 type, p2i(bottom), p2i(end), 4344 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4345 remset_bytes, strong_code_roots_bytes); 4346 4347 return false; 4348 } 4349 4350 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4351 // add static memory usages to remembered set sizes 4352 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4353 // Print the footer of the output. 4354 _out->print_cr(G1PPRL_LINE_PREFIX); 4355 _out->print_cr(G1PPRL_LINE_PREFIX 4356 " SUMMARY" 4357 G1PPRL_SUM_MB_FORMAT("capacity") 4358 G1PPRL_SUM_MB_PERC_FORMAT("used") 4359 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4360 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4361 G1PPRL_SUM_MB_FORMAT("remset") 4362 G1PPRL_SUM_MB_FORMAT("code-roots"), 4363 bytes_to_mb(_total_capacity_bytes), 4364 bytes_to_mb(_total_used_bytes), 4365 perc(_total_used_bytes, _total_capacity_bytes), 4366 bytes_to_mb(_total_prev_live_bytes), 4367 perc(_total_prev_live_bytes, _total_capacity_bytes), 4368 bytes_to_mb(_total_next_live_bytes), 4369 perc(_total_next_live_bytes, _total_capacity_bytes), 4370 bytes_to_mb(_total_remset_bytes), 4371 bytes_to_mb(_total_strong_code_roots_bytes)); 4372 _out->cr(); 4373 }