1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMark.inline.hpp" 30 #include "gc/g1/concurrentMarkThread.inline.hpp" 31 #include "gc/g1/g1CollectedHeap.inline.hpp" 32 #include "gc/g1/g1CollectorPolicy.hpp" 33 #include "gc/g1/g1CollectorState.hpp" 34 #include "gc/g1/g1ErgoVerbose.hpp" 35 #include "gc/g1/g1Log.hpp" 36 #include "gc/g1/g1OopClosures.inline.hpp" 37 #include "gc/g1/g1RemSet.hpp" 38 #include "gc/g1/g1StringDedup.hpp" 39 #include "gc/g1/heapRegion.inline.hpp" 40 #include "gc/g1/heapRegionManager.inline.hpp" 41 #include "gc/g1/heapRegionRemSet.hpp" 42 #include "gc/g1/heapRegionSet.inline.hpp" 43 #include "gc/g1/suspendibleThreadSet.hpp" 44 #include "gc/shared/gcTimer.hpp" 45 #include "gc/shared/gcTrace.hpp" 46 #include "gc/shared/gcTraceTime.hpp" 47 #include "gc/shared/genOopClosures.inline.hpp" 48 #include "gc/shared/referencePolicy.hpp" 49 #include "gc/shared/strongRootsScope.hpp" 50 #include "gc/shared/taskqueue.inline.hpp" 51 #include "gc/shared/vmGCOperations.hpp" 52 #include "memory/allocation.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "runtime/atomic.inline.hpp" 56 #include "runtime/handles.inline.hpp" 57 #include "runtime/java.hpp" 58 #include "runtime/prefetch.inline.hpp" 59 #include "services/memTracker.hpp" 60 61 // Concurrent marking bit map wrapper 62 63 CMBitMapRO::CMBitMapRO(int shifter) : 64 _bm(), 65 _shifter(shifter) { 66 _bmStartWord = 0; 67 _bmWordSize = 0; 68 } 69 70 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 71 const HeapWord* limit) const { 72 // First we must round addr *up* to a possible object boundary. 73 addr = (HeapWord*)align_size_up((intptr_t)addr, 74 HeapWordSize << _shifter); 75 size_t addrOffset = heapWordToOffset(addr); 76 if (limit == NULL) { 77 limit = _bmStartWord + _bmWordSize; 78 } 79 size_t limitOffset = heapWordToOffset(limit); 80 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 81 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 82 assert(nextAddr >= addr, "get_next_one postcondition"); 83 assert(nextAddr == limit || isMarked(nextAddr), 84 "get_next_one postcondition"); 85 return nextAddr; 86 } 87 88 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, 89 const HeapWord* limit) const { 90 size_t addrOffset = heapWordToOffset(addr); 91 if (limit == NULL) { 92 limit = _bmStartWord + _bmWordSize; 93 } 94 size_t limitOffset = heapWordToOffset(limit); 95 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 96 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 97 assert(nextAddr >= addr, "get_next_one postcondition"); 98 assert(nextAddr == limit || !isMarked(nextAddr), 99 "get_next_one postcondition"); 100 return nextAddr; 101 } 102 103 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 104 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 105 return (int) (diff >> _shifter); 106 } 107 108 #ifndef PRODUCT 109 bool CMBitMapRO::covers(MemRegion heap_rs) const { 110 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 111 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 112 "size inconsistency"); 113 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 114 _bmWordSize == heap_rs.word_size(); 115 } 116 #endif 117 118 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 119 _bm.print_on_error(st, prefix); 120 } 121 122 size_t CMBitMap::compute_size(size_t heap_size) { 123 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 124 } 125 126 size_t CMBitMap::mark_distance() { 127 return MinObjAlignmentInBytes * BitsPerByte; 128 } 129 130 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 131 _bmStartWord = heap.start(); 132 _bmWordSize = heap.word_size(); 133 134 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 135 _bm.set_size(_bmWordSize >> _shifter); 136 137 storage->set_mapping_changed_listener(&_listener); 138 } 139 140 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 141 if (zero_filled) { 142 return; 143 } 144 // We need to clear the bitmap on commit, removing any existing information. 145 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 146 _bm->clearRange(mr); 147 } 148 149 // Closure used for clearing the given mark bitmap. 150 class ClearBitmapHRClosure : public HeapRegionClosure { 151 private: 152 ConcurrentMark* _cm; 153 CMBitMap* _bitmap; 154 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 155 public: 156 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 157 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 158 } 159 160 virtual bool doHeapRegion(HeapRegion* r) { 161 size_t const chunk_size_in_words = M / HeapWordSize; 162 163 HeapWord* cur = r->bottom(); 164 HeapWord* const end = r->end(); 165 166 while (cur < end) { 167 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 168 _bitmap->clearRange(mr); 169 170 cur += chunk_size_in_words; 171 172 // Abort iteration if after yielding the marking has been aborted. 173 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 174 return true; 175 } 176 // Repeat the asserts from before the start of the closure. We will do them 177 // as asserts here to minimize their overhead on the product. However, we 178 // will have them as guarantees at the beginning / end of the bitmap 179 // clearing to get some checking in the product. 180 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 181 assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 182 } 183 184 return false; 185 } 186 }; 187 188 class ParClearNextMarkBitmapTask : public AbstractGangTask { 189 ClearBitmapHRClosure* _cl; 190 HeapRegionClaimer _hrclaimer; 191 bool _suspendible; // If the task is suspendible, workers must join the STS. 192 193 public: 194 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 195 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 196 197 void work(uint worker_id) { 198 SuspendibleThreadSetJoiner sts_join(_suspendible); 199 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 200 } 201 }; 202 203 void CMBitMap::clearAll() { 204 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 205 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 206 uint n_workers = g1h->workers()->active_workers(); 207 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 208 g1h->workers()->run_task(&task); 209 guarantee(cl.complete(), "Must have completed iteration."); 210 return; 211 } 212 213 void CMBitMap::markRange(MemRegion mr) { 214 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 215 assert(!mr.is_empty(), "unexpected empty region"); 216 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 217 ((HeapWord *) mr.end())), 218 "markRange memory region end is not card aligned"); 219 // convert address range into offset range 220 _bm.at_put_range(heapWordToOffset(mr.start()), 221 heapWordToOffset(mr.end()), true); 222 } 223 224 void CMBitMap::clearRange(MemRegion mr) { 225 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 226 assert(!mr.is_empty(), "unexpected empty region"); 227 // convert address range into offset range 228 _bm.at_put_range(heapWordToOffset(mr.start()), 229 heapWordToOffset(mr.end()), false); 230 } 231 232 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 233 HeapWord* end_addr) { 234 HeapWord* start = getNextMarkedWordAddress(addr); 235 start = MIN2(start, end_addr); 236 HeapWord* end = getNextUnmarkedWordAddress(start); 237 end = MIN2(end, end_addr); 238 assert(start <= end, "Consistency check"); 239 MemRegion mr(start, end); 240 if (!mr.is_empty()) { 241 clearRange(mr); 242 } 243 return mr; 244 } 245 246 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 247 _base(NULL), _cm(cm) 248 #ifdef ASSERT 249 , _drain_in_progress(false) 250 , _drain_in_progress_yields(false) 251 #endif 252 {} 253 254 bool CMMarkStack::allocate(size_t capacity) { 255 // allocate a stack of the requisite depth 256 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 257 if (!rs.is_reserved()) { 258 warning("ConcurrentMark MarkStack allocation failure"); 259 return false; 260 } 261 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 262 if (!_virtual_space.initialize(rs, rs.size())) { 263 warning("ConcurrentMark MarkStack backing store failure"); 264 // Release the virtual memory reserved for the marking stack 265 rs.release(); 266 return false; 267 } 268 assert(_virtual_space.committed_size() == rs.size(), 269 "Didn't reserve backing store for all of ConcurrentMark stack?"); 270 _base = (oop*) _virtual_space.low(); 271 setEmpty(); 272 _capacity = (jint) capacity; 273 _saved_index = -1; 274 _should_expand = false; 275 return true; 276 } 277 278 void CMMarkStack::expand() { 279 // Called, during remark, if we've overflown the marking stack during marking. 280 assert(isEmpty(), "stack should been emptied while handling overflow"); 281 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 282 // Clear expansion flag 283 _should_expand = false; 284 if (_capacity == (jint) MarkStackSizeMax) { 285 if (PrintGCDetails && Verbose) { 286 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 287 } 288 return; 289 } 290 // Double capacity if possible 291 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 292 // Do not give up existing stack until we have managed to 293 // get the double capacity that we desired. 294 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 295 sizeof(oop))); 296 if (rs.is_reserved()) { 297 // Release the backing store associated with old stack 298 _virtual_space.release(); 299 // Reinitialize virtual space for new stack 300 if (!_virtual_space.initialize(rs, rs.size())) { 301 fatal("Not enough swap for expanded marking stack capacity"); 302 } 303 _base = (oop*)(_virtual_space.low()); 304 _index = 0; 305 _capacity = new_capacity; 306 } else { 307 if (PrintGCDetails && Verbose) { 308 // Failed to double capacity, continue; 309 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 310 SIZE_FORMAT "K to " SIZE_FORMAT "K", 311 _capacity / K, new_capacity / K); 312 } 313 } 314 } 315 316 void CMMarkStack::set_should_expand() { 317 // If we're resetting the marking state because of an 318 // marking stack overflow, record that we should, if 319 // possible, expand the stack. 320 _should_expand = _cm->has_overflown(); 321 } 322 323 CMMarkStack::~CMMarkStack() { 324 if (_base != NULL) { 325 _base = NULL; 326 _virtual_space.release(); 327 } 328 } 329 330 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 331 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 332 jint start = _index; 333 jint next_index = start + n; 334 if (next_index > _capacity) { 335 _overflow = true; 336 return; 337 } 338 // Otherwise. 339 _index = next_index; 340 for (int i = 0; i < n; i++) { 341 int ind = start + i; 342 assert(ind < _capacity, "By overflow test above."); 343 _base[ind] = ptr_arr[i]; 344 } 345 } 346 347 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 348 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 349 jint index = _index; 350 if (index == 0) { 351 *n = 0; 352 return false; 353 } else { 354 int k = MIN2(max, index); 355 jint new_ind = index - k; 356 for (int j = 0; j < k; j++) { 357 ptr_arr[j] = _base[new_ind + j]; 358 } 359 _index = new_ind; 360 *n = k; 361 return true; 362 } 363 } 364 365 template<class OopClosureClass> 366 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 367 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 368 || SafepointSynchronize::is_at_safepoint(), 369 "Drain recursion must be yield-safe."); 370 bool res = true; 371 debug_only(_drain_in_progress = true); 372 debug_only(_drain_in_progress_yields = yield_after); 373 while (!isEmpty()) { 374 oop newOop = pop(); 375 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 376 assert(newOop->is_oop(), "Expected an oop"); 377 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 378 "only grey objects on this stack"); 379 newOop->oop_iterate(cl); 380 if (yield_after && _cm->do_yield_check()) { 381 res = false; 382 break; 383 } 384 } 385 debug_only(_drain_in_progress = false); 386 return res; 387 } 388 389 void CMMarkStack::note_start_of_gc() { 390 assert(_saved_index == -1, 391 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 392 _saved_index = _index; 393 } 394 395 void CMMarkStack::note_end_of_gc() { 396 // This is intentionally a guarantee, instead of an assert. If we 397 // accidentally add something to the mark stack during GC, it 398 // will be a correctness issue so it's better if we crash. we'll 399 // only check this once per GC anyway, so it won't be a performance 400 // issue in any way. 401 guarantee(_saved_index == _index, 402 err_msg("saved index: %d index: %d", _saved_index, _index)); 403 _saved_index = -1; 404 } 405 406 CMRootRegions::CMRootRegions() : 407 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 408 _should_abort(false), _next_survivor(NULL) { } 409 410 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 411 _young_list = g1h->young_list(); 412 _cm = cm; 413 } 414 415 void CMRootRegions::prepare_for_scan() { 416 assert(!scan_in_progress(), "pre-condition"); 417 418 // Currently, only survivors can be root regions. 419 assert(_next_survivor == NULL, "pre-condition"); 420 _next_survivor = _young_list->first_survivor_region(); 421 _scan_in_progress = (_next_survivor != NULL); 422 _should_abort = false; 423 } 424 425 HeapRegion* CMRootRegions::claim_next() { 426 if (_should_abort) { 427 // If someone has set the should_abort flag, we return NULL to 428 // force the caller to bail out of their loop. 429 return NULL; 430 } 431 432 // Currently, only survivors can be root regions. 433 HeapRegion* res = _next_survivor; 434 if (res != NULL) { 435 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 436 // Read it again in case it changed while we were waiting for the lock. 437 res = _next_survivor; 438 if (res != NULL) { 439 if (res == _young_list->last_survivor_region()) { 440 // We just claimed the last survivor so store NULL to indicate 441 // that we're done. 442 _next_survivor = NULL; 443 } else { 444 _next_survivor = res->get_next_young_region(); 445 } 446 } else { 447 // Someone else claimed the last survivor while we were trying 448 // to take the lock so nothing else to do. 449 } 450 } 451 assert(res == NULL || res->is_survivor(), "post-condition"); 452 453 return res; 454 } 455 456 void CMRootRegions::scan_finished() { 457 assert(scan_in_progress(), "pre-condition"); 458 459 // Currently, only survivors can be root regions. 460 if (!_should_abort) { 461 assert(_next_survivor == NULL, "we should have claimed all survivors"); 462 } 463 _next_survivor = NULL; 464 465 { 466 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 467 _scan_in_progress = false; 468 RootRegionScan_lock->notify_all(); 469 } 470 } 471 472 bool CMRootRegions::wait_until_scan_finished() { 473 if (!scan_in_progress()) return false; 474 475 { 476 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 477 while (scan_in_progress()) { 478 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 479 } 480 } 481 return true; 482 } 483 484 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 485 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 486 #endif // _MSC_VER 487 488 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 489 return MAX2((n_par_threads + 2) / 4, 1U); 490 } 491 492 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 493 _g1h(g1h), 494 _markBitMap1(), 495 _markBitMap2(), 496 _parallel_marking_threads(0), 497 _max_parallel_marking_threads(0), 498 _sleep_factor(0.0), 499 _marking_task_overhead(1.0), 500 _cleanup_sleep_factor(0.0), 501 _cleanup_task_overhead(1.0), 502 _cleanup_list("Cleanup List"), 503 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 504 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 505 CardTableModRefBS::card_shift, 506 false /* in_resource_area*/), 507 508 _prevMarkBitMap(&_markBitMap1), 509 _nextMarkBitMap(&_markBitMap2), 510 511 _markStack(this), 512 // _finger set in set_non_marking_state 513 514 _max_worker_id(ParallelGCThreads), 515 // _active_tasks set in set_non_marking_state 516 // _tasks set inside the constructor 517 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 518 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 519 520 _has_overflown(false), 521 _concurrent(false), 522 _has_aborted(false), 523 _restart_for_overflow(false), 524 _concurrent_marking_in_progress(false), 525 526 // _verbose_level set below 527 528 _init_times(), 529 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 530 _cleanup_times(), 531 _total_counting_time(0.0), 532 _total_rs_scrub_time(0.0), 533 534 _parallel_workers(NULL), 535 536 _count_card_bitmaps(NULL), 537 _count_marked_bytes(NULL), 538 _completed_initialization(false) { 539 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 540 if (verbose_level < no_verbose) { 541 verbose_level = no_verbose; 542 } 543 if (verbose_level > high_verbose) { 544 verbose_level = high_verbose; 545 } 546 _verbose_level = verbose_level; 547 548 if (verbose_low()) { 549 gclog_or_tty->print_cr("[global] init, heap start = " PTR_FORMAT ", " 550 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 551 } 552 553 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 554 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 555 556 // Create & start a ConcurrentMark thread. 557 _cmThread = new ConcurrentMarkThread(this); 558 assert(cmThread() != NULL, "CM Thread should have been created"); 559 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 560 if (_cmThread->osthread() == NULL) { 561 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 562 } 563 564 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 565 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 566 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 567 568 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 569 satb_qs.set_buffer_size(G1SATBBufferSize); 570 571 _root_regions.init(_g1h, this); 572 573 if (ConcGCThreads > ParallelGCThreads) { 574 warning("Can't have more ConcGCThreads (%u) " 575 "than ParallelGCThreads (%u).", 576 ConcGCThreads, ParallelGCThreads); 577 return; 578 } 579 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 580 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 581 // if both are set 582 _sleep_factor = 0.0; 583 _marking_task_overhead = 1.0; 584 } else if (G1MarkingOverheadPercent > 0) { 585 // We will calculate the number of parallel marking threads based 586 // on a target overhead with respect to the soft real-time goal 587 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 588 double overall_cm_overhead = 589 (double) MaxGCPauseMillis * marking_overhead / 590 (double) GCPauseIntervalMillis; 591 double cpu_ratio = 1.0 / (double) os::processor_count(); 592 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 593 double marking_task_overhead = 594 overall_cm_overhead / marking_thread_num * 595 (double) os::processor_count(); 596 double sleep_factor = 597 (1.0 - marking_task_overhead) / marking_task_overhead; 598 599 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 600 _sleep_factor = sleep_factor; 601 _marking_task_overhead = marking_task_overhead; 602 } else { 603 // Calculate the number of parallel marking threads by scaling 604 // the number of parallel GC threads. 605 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 606 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 607 _sleep_factor = 0.0; 608 _marking_task_overhead = 1.0; 609 } 610 611 assert(ConcGCThreads > 0, "Should have been set"); 612 _parallel_marking_threads = ConcGCThreads; 613 _max_parallel_marking_threads = _parallel_marking_threads; 614 615 if (parallel_marking_threads() > 1) { 616 _cleanup_task_overhead = 1.0; 617 } else { 618 _cleanup_task_overhead = marking_task_overhead(); 619 } 620 _cleanup_sleep_factor = 621 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 622 623 #if 0 624 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 625 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 626 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 627 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 628 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 629 #endif 630 631 _parallel_workers = new WorkGang("G1 Marker", 632 _max_parallel_marking_threads, false, true); 633 if (_parallel_workers == NULL) { 634 vm_exit_during_initialization("Failed necessary allocation."); 635 } else { 636 _parallel_workers->initialize_workers(); 637 } 638 639 if (FLAG_IS_DEFAULT(MarkStackSize)) { 640 size_t mark_stack_size = 641 MIN2(MarkStackSizeMax, 642 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 643 // Verify that the calculated value for MarkStackSize is in range. 644 // It would be nice to use the private utility routine from Arguments. 645 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 646 warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 647 "must be between 1 and " SIZE_FORMAT, 648 mark_stack_size, MarkStackSizeMax); 649 return; 650 } 651 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 652 } else { 653 // Verify MarkStackSize is in range. 654 if (FLAG_IS_CMDLINE(MarkStackSize)) { 655 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 656 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 657 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 658 "must be between 1 and " SIZE_FORMAT, 659 MarkStackSize, MarkStackSizeMax); 660 return; 661 } 662 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 663 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 664 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 665 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 666 MarkStackSize, MarkStackSizeMax); 667 return; 668 } 669 } 670 } 671 } 672 673 if (!_markStack.allocate(MarkStackSize)) { 674 warning("Failed to allocate CM marking stack"); 675 return; 676 } 677 678 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 679 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 680 681 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 682 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 683 684 BitMap::idx_t card_bm_size = _card_bm.size(); 685 686 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 687 _active_tasks = _max_worker_id; 688 689 uint max_regions = _g1h->max_regions(); 690 for (uint i = 0; i < _max_worker_id; ++i) { 691 CMTaskQueue* task_queue = new CMTaskQueue(); 692 task_queue->initialize(); 693 _task_queues->register_queue(i, task_queue); 694 695 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 696 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 697 698 _tasks[i] = new CMTask(i, this, 699 _count_marked_bytes[i], 700 &_count_card_bitmaps[i], 701 task_queue, _task_queues); 702 703 _accum_task_vtime[i] = 0.0; 704 } 705 706 // Calculate the card number for the bottom of the heap. Used 707 // in biasing indexes into the accounting card bitmaps. 708 _heap_bottom_card_num = 709 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 710 CardTableModRefBS::card_shift); 711 712 // Clear all the liveness counting data 713 clear_all_count_data(); 714 715 // so that the call below can read a sensible value 716 _heap_start = g1h->reserved_region().start(); 717 set_non_marking_state(); 718 _completed_initialization = true; 719 } 720 721 void ConcurrentMark::reset() { 722 // Starting values for these two. This should be called in a STW 723 // phase. 724 MemRegion reserved = _g1h->g1_reserved(); 725 _heap_start = reserved.start(); 726 _heap_end = reserved.end(); 727 728 // Separated the asserts so that we know which one fires. 729 assert(_heap_start != NULL, "heap bounds should look ok"); 730 assert(_heap_end != NULL, "heap bounds should look ok"); 731 assert(_heap_start < _heap_end, "heap bounds should look ok"); 732 733 // Reset all the marking data structures and any necessary flags 734 reset_marking_state(); 735 736 if (verbose_low()) { 737 gclog_or_tty->print_cr("[global] resetting"); 738 } 739 740 // We do reset all of them, since different phases will use 741 // different number of active threads. So, it's easiest to have all 742 // of them ready. 743 for (uint i = 0; i < _max_worker_id; ++i) { 744 _tasks[i]->reset(_nextMarkBitMap); 745 } 746 747 // we need this to make sure that the flag is on during the evac 748 // pause with initial mark piggy-backed 749 set_concurrent_marking_in_progress(); 750 } 751 752 753 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 754 _markStack.set_should_expand(); 755 _markStack.setEmpty(); // Also clears the _markStack overflow flag 756 if (clear_overflow) { 757 clear_has_overflown(); 758 } else { 759 assert(has_overflown(), "pre-condition"); 760 } 761 _finger = _heap_start; 762 763 for (uint i = 0; i < _max_worker_id; ++i) { 764 CMTaskQueue* queue = _task_queues->queue(i); 765 queue->set_empty(); 766 } 767 } 768 769 void ConcurrentMark::set_concurrency(uint active_tasks) { 770 assert(active_tasks <= _max_worker_id, "we should not have more"); 771 772 _active_tasks = active_tasks; 773 // Need to update the three data structures below according to the 774 // number of active threads for this phase. 775 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 776 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 777 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 778 } 779 780 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 781 set_concurrency(active_tasks); 782 783 _concurrent = concurrent; 784 // We propagate this to all tasks, not just the active ones. 785 for (uint i = 0; i < _max_worker_id; ++i) 786 _tasks[i]->set_concurrent(concurrent); 787 788 if (concurrent) { 789 set_concurrent_marking_in_progress(); 790 } else { 791 // We currently assume that the concurrent flag has been set to 792 // false before we start remark. At this point we should also be 793 // in a STW phase. 794 assert(!concurrent_marking_in_progress(), "invariant"); 795 assert(out_of_regions(), 796 err_msg("only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 797 p2i(_finger), p2i(_heap_end))); 798 } 799 } 800 801 void ConcurrentMark::set_non_marking_state() { 802 // We set the global marking state to some default values when we're 803 // not doing marking. 804 reset_marking_state(); 805 _active_tasks = 0; 806 clear_concurrent_marking_in_progress(); 807 } 808 809 ConcurrentMark::~ConcurrentMark() { 810 // The ConcurrentMark instance is never freed. 811 ShouldNotReachHere(); 812 } 813 814 void ConcurrentMark::clearNextBitmap() { 815 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 816 817 // Make sure that the concurrent mark thread looks to still be in 818 // the current cycle. 819 guarantee(cmThread()->during_cycle(), "invariant"); 820 821 // We are finishing up the current cycle by clearing the next 822 // marking bitmap and getting it ready for the next cycle. During 823 // this time no other cycle can start. So, let's make sure that this 824 // is the case. 825 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 826 827 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 828 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 829 _parallel_workers->run_task(&task); 830 831 // Clear the liveness counting data. If the marking has been aborted, the abort() 832 // call already did that. 833 if (cl.complete()) { 834 clear_all_count_data(); 835 } 836 837 // Repeat the asserts from above. 838 guarantee(cmThread()->during_cycle(), "invariant"); 839 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 840 } 841 842 class CheckBitmapClearHRClosure : public HeapRegionClosure { 843 CMBitMap* _bitmap; 844 bool _error; 845 public: 846 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 847 } 848 849 virtual bool doHeapRegion(HeapRegion* r) { 850 // This closure can be called concurrently to the mutator, so we must make sure 851 // that the result of the getNextMarkedWordAddress() call is compared to the 852 // value passed to it as limit to detect any found bits. 853 // We can use the region's orig_end() for the limit and the comparison value 854 // as it always contains the "real" end of the region that never changes and 855 // has no side effects. 856 // Due to the latter, there can also be no problem with the compiler generating 857 // reloads of the orig_end() call. 858 HeapWord* end = r->orig_end(); 859 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 860 } 861 }; 862 863 bool ConcurrentMark::nextMarkBitmapIsClear() { 864 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 865 _g1h->heap_region_iterate(&cl); 866 return cl.complete(); 867 } 868 869 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 870 public: 871 bool doHeapRegion(HeapRegion* r) { 872 if (!r->is_continues_humongous()) { 873 r->note_start_of_marking(); 874 } 875 return false; 876 } 877 }; 878 879 void ConcurrentMark::checkpointRootsInitialPre() { 880 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 881 G1CollectorPolicy* g1p = g1h->g1_policy(); 882 883 _has_aborted = false; 884 885 // Initialize marking structures. This has to be done in a STW phase. 886 reset(); 887 888 // For each region note start of marking. 889 NoteStartOfMarkHRClosure startcl; 890 g1h->heap_region_iterate(&startcl); 891 } 892 893 894 void ConcurrentMark::checkpointRootsInitialPost() { 895 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 896 897 // If we force an overflow during remark, the remark operation will 898 // actually abort and we'll restart concurrent marking. If we always 899 // force an overflow during remark we'll never actually complete the 900 // marking phase. So, we initialize this here, at the start of the 901 // cycle, so that at the remaining overflow number will decrease at 902 // every remark and we'll eventually not need to cause one. 903 force_overflow_stw()->init(); 904 905 // Start Concurrent Marking weak-reference discovery. 906 ReferenceProcessor* rp = g1h->ref_processor_cm(); 907 // enable ("weak") refs discovery 908 rp->enable_discovery(); 909 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 910 911 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 912 // This is the start of the marking cycle, we're expected all 913 // threads to have SATB queues with active set to false. 914 satb_mq_set.set_active_all_threads(true, /* new active value */ 915 false /* expected_active */); 916 917 _root_regions.prepare_for_scan(); 918 919 // update_g1_committed() will be called at the end of an evac pause 920 // when marking is on. So, it's also called at the end of the 921 // initial-mark pause to update the heap end, if the heap expands 922 // during it. No need to call it here. 923 } 924 925 /* 926 * Notice that in the next two methods, we actually leave the STS 927 * during the barrier sync and join it immediately afterwards. If we 928 * do not do this, the following deadlock can occur: one thread could 929 * be in the barrier sync code, waiting for the other thread to also 930 * sync up, whereas another one could be trying to yield, while also 931 * waiting for the other threads to sync up too. 932 * 933 * Note, however, that this code is also used during remark and in 934 * this case we should not attempt to leave / enter the STS, otherwise 935 * we'll either hit an assert (debug / fastdebug) or deadlock 936 * (product). So we should only leave / enter the STS if we are 937 * operating concurrently. 938 * 939 * Because the thread that does the sync barrier has left the STS, it 940 * is possible to be suspended for a Full GC or an evacuation pause 941 * could occur. This is actually safe, since the entering the sync 942 * barrier is one of the last things do_marking_step() does, and it 943 * doesn't manipulate any data structures afterwards. 944 */ 945 946 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 947 bool barrier_aborted; 948 949 if (verbose_low()) { 950 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 951 } 952 953 { 954 SuspendibleThreadSetLeaver sts_leave(concurrent()); 955 barrier_aborted = !_first_overflow_barrier_sync.enter(); 956 } 957 958 // at this point everyone should have synced up and not be doing any 959 // more work 960 961 if (verbose_low()) { 962 if (barrier_aborted) { 963 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 964 } else { 965 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 966 } 967 } 968 969 if (barrier_aborted) { 970 // If the barrier aborted we ignore the overflow condition and 971 // just abort the whole marking phase as quickly as possible. 972 return; 973 } 974 975 // If we're executing the concurrent phase of marking, reset the marking 976 // state; otherwise the marking state is reset after reference processing, 977 // during the remark pause. 978 // If we reset here as a result of an overflow during the remark we will 979 // see assertion failures from any subsequent set_concurrency_and_phase() 980 // calls. 981 if (concurrent()) { 982 // let the task associated with with worker 0 do this 983 if (worker_id == 0) { 984 // task 0 is responsible for clearing the global data structures 985 // We should be here because of an overflow. During STW we should 986 // not clear the overflow flag since we rely on it being true when 987 // we exit this method to abort the pause and restart concurrent 988 // marking. 989 reset_marking_state(true /* clear_overflow */); 990 force_overflow()->update(); 991 992 if (G1Log::fine()) { 993 gclog_or_tty->gclog_stamp(); 994 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 995 } 996 } 997 } 998 999 // after this, each task should reset its own data structures then 1000 // then go into the second barrier 1001 } 1002 1003 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1004 bool barrier_aborted; 1005 1006 if (verbose_low()) { 1007 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1008 } 1009 1010 { 1011 SuspendibleThreadSetLeaver sts_leave(concurrent()); 1012 barrier_aborted = !_second_overflow_barrier_sync.enter(); 1013 } 1014 1015 // at this point everything should be re-initialized and ready to go 1016 1017 if (verbose_low()) { 1018 if (barrier_aborted) { 1019 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1020 } else { 1021 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1022 } 1023 } 1024 } 1025 1026 #ifndef PRODUCT 1027 void ForceOverflowSettings::init() { 1028 _num_remaining = G1ConcMarkForceOverflow; 1029 _force = false; 1030 update(); 1031 } 1032 1033 void ForceOverflowSettings::update() { 1034 if (_num_remaining > 0) { 1035 _num_remaining -= 1; 1036 _force = true; 1037 } else { 1038 _force = false; 1039 } 1040 } 1041 1042 bool ForceOverflowSettings::should_force() { 1043 if (_force) { 1044 _force = false; 1045 return true; 1046 } else { 1047 return false; 1048 } 1049 } 1050 #endif // !PRODUCT 1051 1052 class CMConcurrentMarkingTask: public AbstractGangTask { 1053 private: 1054 ConcurrentMark* _cm; 1055 ConcurrentMarkThread* _cmt; 1056 1057 public: 1058 void work(uint worker_id) { 1059 assert(Thread::current()->is_ConcurrentGC_thread(), 1060 "this should only be done by a conc GC thread"); 1061 ResourceMark rm; 1062 1063 double start_vtime = os::elapsedVTime(); 1064 1065 { 1066 SuspendibleThreadSetJoiner sts_join; 1067 1068 assert(worker_id < _cm->active_tasks(), "invariant"); 1069 CMTask* the_task = _cm->task(worker_id); 1070 the_task->record_start_time(); 1071 if (!_cm->has_aborted()) { 1072 do { 1073 double start_vtime_sec = os::elapsedVTime(); 1074 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1075 1076 the_task->do_marking_step(mark_step_duration_ms, 1077 true /* do_termination */, 1078 false /* is_serial*/); 1079 1080 double end_vtime_sec = os::elapsedVTime(); 1081 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1082 _cm->clear_has_overflown(); 1083 1084 _cm->do_yield_check(worker_id); 1085 1086 jlong sleep_time_ms; 1087 if (!_cm->has_aborted() && the_task->has_aborted()) { 1088 sleep_time_ms = 1089 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1090 { 1091 SuspendibleThreadSetLeaver sts_leave; 1092 os::sleep(Thread::current(), sleep_time_ms, false); 1093 } 1094 } 1095 } while (!_cm->has_aborted() && the_task->has_aborted()); 1096 } 1097 the_task->record_end_time(); 1098 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1099 } 1100 1101 double end_vtime = os::elapsedVTime(); 1102 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1103 } 1104 1105 CMConcurrentMarkingTask(ConcurrentMark* cm, 1106 ConcurrentMarkThread* cmt) : 1107 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1108 1109 ~CMConcurrentMarkingTask() { } 1110 }; 1111 1112 // Calculates the number of active workers for a concurrent 1113 // phase. 1114 uint ConcurrentMark::calc_parallel_marking_threads() { 1115 uint n_conc_workers = 0; 1116 if (!UseDynamicNumberOfGCThreads || 1117 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1118 !ForceDynamicNumberOfGCThreads)) { 1119 n_conc_workers = max_parallel_marking_threads(); 1120 } else { 1121 n_conc_workers = 1122 AdaptiveSizePolicy::calc_default_active_workers( 1123 max_parallel_marking_threads(), 1124 1, /* Minimum workers */ 1125 parallel_marking_threads(), 1126 Threads::number_of_non_daemon_threads()); 1127 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1128 // that scaling has already gone into "_max_parallel_marking_threads". 1129 } 1130 assert(n_conc_workers > 0, "Always need at least 1"); 1131 return n_conc_workers; 1132 } 1133 1134 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1135 // Currently, only survivors can be root regions. 1136 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1137 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1138 1139 const uintx interval = PrefetchScanIntervalInBytes; 1140 HeapWord* curr = hr->bottom(); 1141 const HeapWord* end = hr->top(); 1142 while (curr < end) { 1143 Prefetch::read(curr, interval); 1144 oop obj = oop(curr); 1145 int size = obj->oop_iterate_size(&cl); 1146 assert(size == obj->size(), "sanity"); 1147 curr += size; 1148 } 1149 } 1150 1151 class CMRootRegionScanTask : public AbstractGangTask { 1152 private: 1153 ConcurrentMark* _cm; 1154 1155 public: 1156 CMRootRegionScanTask(ConcurrentMark* cm) : 1157 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1158 1159 void work(uint worker_id) { 1160 assert(Thread::current()->is_ConcurrentGC_thread(), 1161 "this should only be done by a conc GC thread"); 1162 1163 CMRootRegions* root_regions = _cm->root_regions(); 1164 HeapRegion* hr = root_regions->claim_next(); 1165 while (hr != NULL) { 1166 _cm->scanRootRegion(hr, worker_id); 1167 hr = root_regions->claim_next(); 1168 } 1169 } 1170 }; 1171 1172 void ConcurrentMark::scanRootRegions() { 1173 double scan_start = os::elapsedTime(); 1174 1175 // Start of concurrent marking. 1176 ClassLoaderDataGraph::clear_claimed_marks(); 1177 1178 // scan_in_progress() will have been set to true only if there was 1179 // at least one root region to scan. So, if it's false, we 1180 // should not attempt to do any further work. 1181 if (root_regions()->scan_in_progress()) { 1182 if (G1Log::fine()) { 1183 gclog_or_tty->gclog_stamp(); 1184 gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]"); 1185 } 1186 1187 _parallel_marking_threads = calc_parallel_marking_threads(); 1188 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1189 "Maximum number of marking threads exceeded"); 1190 uint active_workers = MAX2(1U, parallel_marking_threads()); 1191 1192 CMRootRegionScanTask task(this); 1193 _parallel_workers->set_active_workers(active_workers); 1194 _parallel_workers->run_task(&task); 1195 1196 if (G1Log::fine()) { 1197 gclog_or_tty->gclog_stamp(); 1198 gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start); 1199 } 1200 1201 // It's possible that has_aborted() is true here without actually 1202 // aborting the survivor scan earlier. This is OK as it's 1203 // mainly used for sanity checking. 1204 root_regions()->scan_finished(); 1205 } 1206 } 1207 1208 void ConcurrentMark::markFromRoots() { 1209 // we might be tempted to assert that: 1210 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1211 // "inconsistent argument?"); 1212 // However that wouldn't be right, because it's possible that 1213 // a safepoint is indeed in progress as a younger generation 1214 // stop-the-world GC happens even as we mark in this generation. 1215 1216 _restart_for_overflow = false; 1217 force_overflow_conc()->init(); 1218 1219 // _g1h has _n_par_threads 1220 _parallel_marking_threads = calc_parallel_marking_threads(); 1221 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1222 "Maximum number of marking threads exceeded"); 1223 1224 uint active_workers = MAX2(1U, parallel_marking_threads()); 1225 assert(active_workers > 0, "Should have been set"); 1226 1227 // Parallel task terminator is set in "set_concurrency_and_phase()" 1228 set_concurrency_and_phase(active_workers, true /* concurrent */); 1229 1230 CMConcurrentMarkingTask markingTask(this, cmThread()); 1231 _parallel_workers->set_active_workers(active_workers); 1232 _parallel_workers->run_task(&markingTask); 1233 print_stats(); 1234 } 1235 1236 // Helper class to get rid of some boilerplate code. 1237 class G1CMTraceTime : public GCTraceTime { 1238 static bool doit_and_prepend(bool doit) { 1239 if (doit) { 1240 gclog_or_tty->put(' '); 1241 } 1242 return doit; 1243 } 1244 1245 public: 1246 G1CMTraceTime(const char* title, bool doit) 1247 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm()) { 1248 } 1249 }; 1250 1251 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1252 // world is stopped at this checkpoint 1253 assert(SafepointSynchronize::is_at_safepoint(), 1254 "world should be stopped"); 1255 1256 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1257 1258 // If a full collection has happened, we shouldn't do this. 1259 if (has_aborted()) { 1260 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1261 return; 1262 } 1263 1264 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1265 1266 if (VerifyDuringGC) { 1267 HandleMark hm; // handle scope 1268 g1h->prepare_for_verify(); 1269 Universe::verify(VerifyOption_G1UsePrevMarking, 1270 " VerifyDuringGC:(before)"); 1271 } 1272 g1h->check_bitmaps("Remark Start"); 1273 1274 G1CollectorPolicy* g1p = g1h->g1_policy(); 1275 g1p->record_concurrent_mark_remark_start(); 1276 1277 double start = os::elapsedTime(); 1278 1279 checkpointRootsFinalWork(); 1280 1281 double mark_work_end = os::elapsedTime(); 1282 1283 weakRefsWork(clear_all_soft_refs); 1284 1285 if (has_overflown()) { 1286 // Oops. We overflowed. Restart concurrent marking. 1287 _restart_for_overflow = true; 1288 if (G1TraceMarkStackOverflow) { 1289 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1290 } 1291 1292 // Verify the heap w.r.t. the previous marking bitmap. 1293 if (VerifyDuringGC) { 1294 HandleMark hm; // handle scope 1295 g1h->prepare_for_verify(); 1296 Universe::verify(VerifyOption_G1UsePrevMarking, 1297 " VerifyDuringGC:(overflow)"); 1298 } 1299 1300 // Clear the marking state because we will be restarting 1301 // marking due to overflowing the global mark stack. 1302 reset_marking_state(); 1303 } else { 1304 { 1305 G1CMTraceTime trace("GC aggregate-data", G1Log::finer()); 1306 1307 // Aggregate the per-task counting data that we have accumulated 1308 // while marking. 1309 aggregate_count_data(); 1310 } 1311 1312 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1313 // We're done with marking. 1314 // This is the end of the marking cycle, we're expected all 1315 // threads to have SATB queues with active set to true. 1316 satb_mq_set.set_active_all_threads(false, /* new active value */ 1317 true /* expected_active */); 1318 1319 if (VerifyDuringGC) { 1320 HandleMark hm; // handle scope 1321 g1h->prepare_for_verify(); 1322 Universe::verify(VerifyOption_G1UseNextMarking, 1323 " VerifyDuringGC:(after)"); 1324 } 1325 g1h->check_bitmaps("Remark End"); 1326 assert(!restart_for_overflow(), "sanity"); 1327 // Completely reset the marking state since marking completed 1328 set_non_marking_state(); 1329 } 1330 1331 // Expand the marking stack, if we have to and if we can. 1332 if (_markStack.should_expand()) { 1333 _markStack.expand(); 1334 } 1335 1336 // Statistics 1337 double now = os::elapsedTime(); 1338 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1339 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1340 _remark_times.add((now - start) * 1000.0); 1341 1342 g1p->record_concurrent_mark_remark_end(); 1343 1344 G1CMIsAliveClosure is_alive(g1h); 1345 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1346 } 1347 1348 // Base class of the closures that finalize and verify the 1349 // liveness counting data. 1350 class CMCountDataClosureBase: public HeapRegionClosure { 1351 protected: 1352 G1CollectedHeap* _g1h; 1353 ConcurrentMark* _cm; 1354 CardTableModRefBS* _ct_bs; 1355 1356 BitMap* _region_bm; 1357 BitMap* _card_bm; 1358 1359 // Takes a region that's not empty (i.e., it has at least one 1360 // live object in it and sets its corresponding bit on the region 1361 // bitmap to 1. If the region is "starts humongous" it will also set 1362 // to 1 the bits on the region bitmap that correspond to its 1363 // associated "continues humongous" regions. 1364 void set_bit_for_region(HeapRegion* hr) { 1365 assert(!hr->is_continues_humongous(), "should have filtered those out"); 1366 1367 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1368 if (!hr->is_starts_humongous()) { 1369 // Normal (non-humongous) case: just set the bit. 1370 _region_bm->par_at_put(index, true); 1371 } else { 1372 // Starts humongous case: calculate how many regions are part of 1373 // this humongous region and then set the bit range. 1374 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1375 _region_bm->par_at_put_range(index, end_index, true); 1376 } 1377 } 1378 1379 public: 1380 CMCountDataClosureBase(G1CollectedHeap* g1h, 1381 BitMap* region_bm, BitMap* card_bm): 1382 _g1h(g1h), _cm(g1h->concurrent_mark()), 1383 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 1384 _region_bm(region_bm), _card_bm(card_bm) { } 1385 }; 1386 1387 // Closure that calculates the # live objects per region. Used 1388 // for verification purposes during the cleanup pause. 1389 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1390 CMBitMapRO* _bm; 1391 size_t _region_marked_bytes; 1392 1393 public: 1394 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1395 BitMap* region_bm, BitMap* card_bm) : 1396 CMCountDataClosureBase(g1h, region_bm, card_bm), 1397 _bm(bm), _region_marked_bytes(0) { } 1398 1399 bool doHeapRegion(HeapRegion* hr) { 1400 1401 if (hr->is_continues_humongous()) { 1402 // We will ignore these here and process them when their 1403 // associated "starts humongous" region is processed (see 1404 // set_bit_for_heap_region()). Note that we cannot rely on their 1405 // associated "starts humongous" region to have their bit set to 1406 // 1 since, due to the region chunking in the parallel region 1407 // iteration, a "continues humongous" region might be visited 1408 // before its associated "starts humongous". 1409 return false; 1410 } 1411 1412 HeapWord* ntams = hr->next_top_at_mark_start(); 1413 HeapWord* start = hr->bottom(); 1414 1415 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1416 err_msg("Preconditions not met - " 1417 "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT, 1418 p2i(start), p2i(ntams), p2i(hr->end()))); 1419 1420 // Find the first marked object at or after "start". 1421 start = _bm->getNextMarkedWordAddress(start, ntams); 1422 1423 size_t marked_bytes = 0; 1424 1425 while (start < ntams) { 1426 oop obj = oop(start); 1427 int obj_sz = obj->size(); 1428 HeapWord* obj_end = start + obj_sz; 1429 1430 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1431 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1432 1433 // Note: if we're looking at the last region in heap - obj_end 1434 // could be actually just beyond the end of the heap; end_idx 1435 // will then correspond to a (non-existent) card that is also 1436 // just beyond the heap. 1437 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1438 // end of object is not card aligned - increment to cover 1439 // all the cards spanned by the object 1440 end_idx += 1; 1441 } 1442 1443 // Set the bits in the card BM for the cards spanned by this object. 1444 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1445 1446 // Add the size of this object to the number of marked bytes. 1447 marked_bytes += (size_t)obj_sz * HeapWordSize; 1448 1449 // Find the next marked object after this one. 1450 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1451 } 1452 1453 // Mark the allocated-since-marking portion... 1454 HeapWord* top = hr->top(); 1455 if (ntams < top) { 1456 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1457 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1458 1459 // Note: if we're looking at the last region in heap - top 1460 // could be actually just beyond the end of the heap; end_idx 1461 // will then correspond to a (non-existent) card that is also 1462 // just beyond the heap. 1463 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1464 // end of object is not card aligned - increment to cover 1465 // all the cards spanned by the object 1466 end_idx += 1; 1467 } 1468 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1469 1470 // This definitely means the region has live objects. 1471 set_bit_for_region(hr); 1472 } 1473 1474 // Update the live region bitmap. 1475 if (marked_bytes > 0) { 1476 set_bit_for_region(hr); 1477 } 1478 1479 // Set the marked bytes for the current region so that 1480 // it can be queried by a calling verification routine 1481 _region_marked_bytes = marked_bytes; 1482 1483 return false; 1484 } 1485 1486 size_t region_marked_bytes() const { return _region_marked_bytes; } 1487 }; 1488 1489 // Heap region closure used for verifying the counting data 1490 // that was accumulated concurrently and aggregated during 1491 // the remark pause. This closure is applied to the heap 1492 // regions during the STW cleanup pause. 1493 1494 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1495 G1CollectedHeap* _g1h; 1496 ConcurrentMark* _cm; 1497 CalcLiveObjectsClosure _calc_cl; 1498 BitMap* _region_bm; // Region BM to be verified 1499 BitMap* _card_bm; // Card BM to be verified 1500 bool _verbose; // verbose output? 1501 1502 BitMap* _exp_region_bm; // Expected Region BM values 1503 BitMap* _exp_card_bm; // Expected card BM values 1504 1505 int _failures; 1506 1507 public: 1508 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1509 BitMap* region_bm, 1510 BitMap* card_bm, 1511 BitMap* exp_region_bm, 1512 BitMap* exp_card_bm, 1513 bool verbose) : 1514 _g1h(g1h), _cm(g1h->concurrent_mark()), 1515 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1516 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1517 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1518 _failures(0) { } 1519 1520 int failures() const { return _failures; } 1521 1522 bool doHeapRegion(HeapRegion* hr) { 1523 if (hr->is_continues_humongous()) { 1524 // We will ignore these here and process them when their 1525 // associated "starts humongous" region is processed (see 1526 // set_bit_for_heap_region()). Note that we cannot rely on their 1527 // associated "starts humongous" region to have their bit set to 1528 // 1 since, due to the region chunking in the parallel region 1529 // iteration, a "continues humongous" region might be visited 1530 // before its associated "starts humongous". 1531 return false; 1532 } 1533 1534 int failures = 0; 1535 1536 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1537 // this region and set the corresponding bits in the expected region 1538 // and card bitmaps. 1539 bool res = _calc_cl.doHeapRegion(hr); 1540 assert(res == false, "should be continuing"); 1541 1542 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1543 Mutex::_no_safepoint_check_flag); 1544 1545 // Verify the marked bytes for this region. 1546 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1547 size_t act_marked_bytes = hr->next_marked_bytes(); 1548 1549 // We're not OK if expected marked bytes > actual marked bytes. It means 1550 // we have missed accounting some objects during the actual marking. 1551 if (exp_marked_bytes > act_marked_bytes) { 1552 if (_verbose) { 1553 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1554 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1555 hr->hrm_index(), exp_marked_bytes, act_marked_bytes); 1556 } 1557 failures += 1; 1558 } 1559 1560 // Verify the bit, for this region, in the actual and expected 1561 // (which was just calculated) region bit maps. 1562 // We're not OK if the bit in the calculated expected region 1563 // bitmap is set and the bit in the actual region bitmap is not. 1564 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1565 1566 bool expected = _exp_region_bm->at(index); 1567 bool actual = _region_bm->at(index); 1568 if (expected && !actual) { 1569 if (_verbose) { 1570 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1571 "expected: %s, actual: %s", 1572 hr->hrm_index(), 1573 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1574 } 1575 failures += 1; 1576 } 1577 1578 // Verify that the card bit maps for the cards spanned by the current 1579 // region match. We have an error if we have a set bit in the expected 1580 // bit map and the corresponding bit in the actual bitmap is not set. 1581 1582 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1583 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1584 1585 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1586 expected = _exp_card_bm->at(i); 1587 actual = _card_bm->at(i); 1588 1589 if (expected && !actual) { 1590 if (_verbose) { 1591 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1592 "expected: %s, actual: %s", 1593 hr->hrm_index(), i, 1594 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1595 } 1596 failures += 1; 1597 } 1598 } 1599 1600 if (failures > 0 && _verbose) { 1601 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1602 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1603 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1604 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1605 } 1606 1607 _failures += failures; 1608 1609 // We could stop iteration over the heap when we 1610 // find the first violating region by returning true. 1611 return false; 1612 } 1613 }; 1614 1615 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1616 protected: 1617 G1CollectedHeap* _g1h; 1618 ConcurrentMark* _cm; 1619 BitMap* _actual_region_bm; 1620 BitMap* _actual_card_bm; 1621 1622 uint _n_workers; 1623 1624 BitMap* _expected_region_bm; 1625 BitMap* _expected_card_bm; 1626 1627 int _failures; 1628 bool _verbose; 1629 1630 HeapRegionClaimer _hrclaimer; 1631 1632 public: 1633 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1634 BitMap* region_bm, BitMap* card_bm, 1635 BitMap* expected_region_bm, BitMap* expected_card_bm) 1636 : AbstractGangTask("G1 verify final counting"), 1637 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1638 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1639 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1640 _failures(0), _verbose(false), 1641 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1642 assert(VerifyDuringGC, "don't call this otherwise"); 1643 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1644 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1645 1646 _verbose = _cm->verbose_medium(); 1647 } 1648 1649 void work(uint worker_id) { 1650 assert(worker_id < _n_workers, "invariant"); 1651 1652 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1653 _actual_region_bm, _actual_card_bm, 1654 _expected_region_bm, 1655 _expected_card_bm, 1656 _verbose); 1657 1658 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1659 1660 Atomic::add(verify_cl.failures(), &_failures); 1661 } 1662 1663 int failures() const { return _failures; } 1664 }; 1665 1666 // Closure that finalizes the liveness counting data. 1667 // Used during the cleanup pause. 1668 // Sets the bits corresponding to the interval [NTAMS, top] 1669 // (which contains the implicitly live objects) in the 1670 // card liveness bitmap. Also sets the bit for each region, 1671 // containing live data, in the region liveness bitmap. 1672 1673 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1674 public: 1675 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1676 BitMap* region_bm, 1677 BitMap* card_bm) : 1678 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1679 1680 bool doHeapRegion(HeapRegion* hr) { 1681 1682 if (hr->is_continues_humongous()) { 1683 // We will ignore these here and process them when their 1684 // associated "starts humongous" region is processed (see 1685 // set_bit_for_heap_region()). Note that we cannot rely on their 1686 // associated "starts humongous" region to have their bit set to 1687 // 1 since, due to the region chunking in the parallel region 1688 // iteration, a "continues humongous" region might be visited 1689 // before its associated "starts humongous". 1690 return false; 1691 } 1692 1693 HeapWord* ntams = hr->next_top_at_mark_start(); 1694 HeapWord* top = hr->top(); 1695 1696 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1697 1698 // Mark the allocated-since-marking portion... 1699 if (ntams < top) { 1700 // This definitely means the region has live objects. 1701 set_bit_for_region(hr); 1702 1703 // Now set the bits in the card bitmap for [ntams, top) 1704 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1705 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1706 1707 // Note: if we're looking at the last region in heap - top 1708 // could be actually just beyond the end of the heap; end_idx 1709 // will then correspond to a (non-existent) card that is also 1710 // just beyond the heap. 1711 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1712 // end of object is not card aligned - increment to cover 1713 // all the cards spanned by the object 1714 end_idx += 1; 1715 } 1716 1717 assert(end_idx <= _card_bm->size(), 1718 err_msg("oob: end_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT, 1719 end_idx, _card_bm->size())); 1720 assert(start_idx < _card_bm->size(), 1721 err_msg("oob: start_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT, 1722 start_idx, _card_bm->size())); 1723 1724 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1725 } 1726 1727 // Set the bit for the region if it contains live data 1728 if (hr->next_marked_bytes() > 0) { 1729 set_bit_for_region(hr); 1730 } 1731 1732 return false; 1733 } 1734 }; 1735 1736 class G1ParFinalCountTask: public AbstractGangTask { 1737 protected: 1738 G1CollectedHeap* _g1h; 1739 ConcurrentMark* _cm; 1740 BitMap* _actual_region_bm; 1741 BitMap* _actual_card_bm; 1742 1743 uint _n_workers; 1744 HeapRegionClaimer _hrclaimer; 1745 1746 public: 1747 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1748 : AbstractGangTask("G1 final counting"), 1749 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1750 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1751 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1752 } 1753 1754 void work(uint worker_id) { 1755 assert(worker_id < _n_workers, "invariant"); 1756 1757 FinalCountDataUpdateClosure final_update_cl(_g1h, 1758 _actual_region_bm, 1759 _actual_card_bm); 1760 1761 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1762 } 1763 }; 1764 1765 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1766 G1CollectedHeap* _g1; 1767 size_t _freed_bytes; 1768 FreeRegionList* _local_cleanup_list; 1769 HeapRegionSetCount _old_regions_removed; 1770 HeapRegionSetCount _humongous_regions_removed; 1771 HRRSCleanupTask* _hrrs_cleanup_task; 1772 1773 public: 1774 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1775 FreeRegionList* local_cleanup_list, 1776 HRRSCleanupTask* hrrs_cleanup_task) : 1777 _g1(g1), 1778 _freed_bytes(0), 1779 _local_cleanup_list(local_cleanup_list), 1780 _old_regions_removed(), 1781 _humongous_regions_removed(), 1782 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1783 1784 size_t freed_bytes() { return _freed_bytes; } 1785 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1786 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1787 1788 bool doHeapRegion(HeapRegion *hr) { 1789 if (hr->is_continues_humongous() || hr->is_archive()) { 1790 return false; 1791 } 1792 // We use a claim value of zero here because all regions 1793 // were claimed with value 1 in the FinalCount task. 1794 _g1->reset_gc_time_stamps(hr); 1795 hr->note_end_of_marking(); 1796 1797 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1798 _freed_bytes += hr->used(); 1799 hr->set_containing_set(NULL); 1800 if (hr->is_humongous()) { 1801 assert(hr->is_starts_humongous(), "we should only see starts humongous"); 1802 _humongous_regions_removed.increment(1u, hr->capacity()); 1803 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1804 } else { 1805 _old_regions_removed.increment(1u, hr->capacity()); 1806 _g1->free_region(hr, _local_cleanup_list, true); 1807 } 1808 } else { 1809 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1810 } 1811 1812 return false; 1813 } 1814 }; 1815 1816 class G1ParNoteEndTask: public AbstractGangTask { 1817 friend class G1NoteEndOfConcMarkClosure; 1818 1819 protected: 1820 G1CollectedHeap* _g1h; 1821 FreeRegionList* _cleanup_list; 1822 HeapRegionClaimer _hrclaimer; 1823 1824 public: 1825 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1826 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1827 } 1828 1829 void work(uint worker_id) { 1830 FreeRegionList local_cleanup_list("Local Cleanup List"); 1831 HRRSCleanupTask hrrs_cleanup_task; 1832 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1833 &hrrs_cleanup_task); 1834 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1835 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1836 1837 // Now update the lists 1838 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1839 { 1840 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1841 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1842 1843 // If we iterate over the global cleanup list at the end of 1844 // cleanup to do this printing we will not guarantee to only 1845 // generate output for the newly-reclaimed regions (the list 1846 // might not be empty at the beginning of cleanup; we might 1847 // still be working on its previous contents). So we do the 1848 // printing here, before we append the new regions to the global 1849 // cleanup list. 1850 1851 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1852 if (hr_printer->is_active()) { 1853 FreeRegionListIterator iter(&local_cleanup_list); 1854 while (iter.more_available()) { 1855 HeapRegion* hr = iter.get_next(); 1856 hr_printer->cleanup(hr); 1857 } 1858 } 1859 1860 _cleanup_list->add_ordered(&local_cleanup_list); 1861 assert(local_cleanup_list.is_empty(), "post-condition"); 1862 1863 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1864 } 1865 } 1866 }; 1867 1868 class G1ParScrubRemSetTask: public AbstractGangTask { 1869 protected: 1870 G1RemSet* _g1rs; 1871 BitMap* _region_bm; 1872 BitMap* _card_bm; 1873 HeapRegionClaimer _hrclaimer; 1874 1875 public: 1876 G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) : 1877 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) { 1878 } 1879 1880 void work(uint worker_id) { 1881 _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer); 1882 } 1883 1884 }; 1885 1886 void ConcurrentMark::cleanup() { 1887 // world is stopped at this checkpoint 1888 assert(SafepointSynchronize::is_at_safepoint(), 1889 "world should be stopped"); 1890 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1891 1892 // If a full collection has happened, we shouldn't do this. 1893 if (has_aborted()) { 1894 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1895 return; 1896 } 1897 1898 g1h->verify_region_sets_optional(); 1899 1900 if (VerifyDuringGC) { 1901 HandleMark hm; // handle scope 1902 g1h->prepare_for_verify(); 1903 Universe::verify(VerifyOption_G1UsePrevMarking, 1904 " VerifyDuringGC:(before)"); 1905 } 1906 g1h->check_bitmaps("Cleanup Start"); 1907 1908 G1CollectorPolicy* g1p = g1h->g1_policy(); 1909 g1p->record_concurrent_mark_cleanup_start(); 1910 1911 double start = os::elapsedTime(); 1912 1913 HeapRegionRemSet::reset_for_cleanup_tasks(); 1914 1915 // Do counting once more with the world stopped for good measure. 1916 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 1917 1918 g1h->workers()->run_task(&g1_par_count_task); 1919 1920 if (VerifyDuringGC) { 1921 // Verify that the counting data accumulated during marking matches 1922 // that calculated by walking the marking bitmap. 1923 1924 // Bitmaps to hold expected values 1925 BitMap expected_region_bm(_region_bm.size(), true); 1926 BitMap expected_card_bm(_card_bm.size(), true); 1927 1928 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 1929 &_region_bm, 1930 &_card_bm, 1931 &expected_region_bm, 1932 &expected_card_bm); 1933 1934 g1h->workers()->run_task(&g1_par_verify_task); 1935 1936 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 1937 } 1938 1939 size_t start_used_bytes = g1h->used(); 1940 g1h->collector_state()->set_mark_in_progress(false); 1941 1942 double count_end = os::elapsedTime(); 1943 double this_final_counting_time = (count_end - start); 1944 _total_counting_time += this_final_counting_time; 1945 1946 if (G1PrintRegionLivenessInfo) { 1947 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 1948 _g1h->heap_region_iterate(&cl); 1949 } 1950 1951 // Install newly created mark bitMap as "prev". 1952 swapMarkBitMaps(); 1953 1954 g1h->reset_gc_time_stamp(); 1955 1956 uint n_workers = _g1h->workers()->active_workers(); 1957 1958 // Note end of marking in all heap regions. 1959 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1960 g1h->workers()->run_task(&g1_par_note_end_task); 1961 g1h->check_gc_time_stamps(); 1962 1963 if (!cleanup_list_is_empty()) { 1964 // The cleanup list is not empty, so we'll have to process it 1965 // concurrently. Notify anyone else that might be wanting free 1966 // regions that there will be more free regions coming soon. 1967 g1h->set_free_regions_coming(); 1968 } 1969 1970 // call below, since it affects the metric by which we sort the heap 1971 // regions. 1972 if (G1ScrubRemSets) { 1973 double rs_scrub_start = os::elapsedTime(); 1974 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers); 1975 g1h->workers()->run_task(&g1_par_scrub_rs_task); 1976 1977 double rs_scrub_end = os::elapsedTime(); 1978 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 1979 _total_rs_scrub_time += this_rs_scrub_time; 1980 } 1981 1982 // this will also free any regions totally full of garbage objects, 1983 // and sort the regions. 1984 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1985 1986 // Statistics. 1987 double end = os::elapsedTime(); 1988 _cleanup_times.add((end - start) * 1000.0); 1989 1990 if (G1Log::fine()) { 1991 g1h->g1_policy()->print_heap_transition(start_used_bytes); 1992 } 1993 1994 // Clean up will have freed any regions completely full of garbage. 1995 // Update the soft reference policy with the new heap occupancy. 1996 Universe::update_heap_info_at_gc(); 1997 1998 if (VerifyDuringGC) { 1999 HandleMark hm; // handle scope 2000 g1h->prepare_for_verify(); 2001 Universe::verify(VerifyOption_G1UsePrevMarking, 2002 " VerifyDuringGC:(after)"); 2003 } 2004 2005 g1h->check_bitmaps("Cleanup End"); 2006 2007 g1h->verify_region_sets_optional(); 2008 2009 // We need to make this be a "collection" so any collection pause that 2010 // races with it goes around and waits for completeCleanup to finish. 2011 g1h->increment_total_collections(); 2012 2013 // Clean out dead classes and update Metaspace sizes. 2014 if (ClassUnloadingWithConcurrentMark) { 2015 ClassLoaderDataGraph::purge(); 2016 } 2017 MetaspaceGC::compute_new_size(); 2018 2019 // We reclaimed old regions so we should calculate the sizes to make 2020 // sure we update the old gen/space data. 2021 g1h->g1mm()->update_sizes(); 2022 g1h->allocation_context_stats().update_after_mark(); 2023 2024 g1h->trace_heap_after_concurrent_cycle(); 2025 } 2026 2027 void ConcurrentMark::completeCleanup() { 2028 if (has_aborted()) return; 2029 2030 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2031 2032 _cleanup_list.verify_optional(); 2033 FreeRegionList tmp_free_list("Tmp Free List"); 2034 2035 if (G1ConcRegionFreeingVerbose) { 2036 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2037 "cleanup list has %u entries", 2038 _cleanup_list.length()); 2039 } 2040 2041 // No one else should be accessing the _cleanup_list at this point, 2042 // so it is not necessary to take any locks 2043 while (!_cleanup_list.is_empty()) { 2044 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 2045 assert(hr != NULL, "Got NULL from a non-empty list"); 2046 hr->par_clear(); 2047 tmp_free_list.add_ordered(hr); 2048 2049 // Instead of adding one region at a time to the secondary_free_list, 2050 // we accumulate them in the local list and move them a few at a 2051 // time. This also cuts down on the number of notify_all() calls 2052 // we do during this process. We'll also append the local list when 2053 // _cleanup_list is empty (which means we just removed the last 2054 // region from the _cleanup_list). 2055 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2056 _cleanup_list.is_empty()) { 2057 if (G1ConcRegionFreeingVerbose) { 2058 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2059 "appending %u entries to the secondary_free_list, " 2060 "cleanup list still has %u entries", 2061 tmp_free_list.length(), 2062 _cleanup_list.length()); 2063 } 2064 2065 { 2066 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2067 g1h->secondary_free_list_add(&tmp_free_list); 2068 SecondaryFreeList_lock->notify_all(); 2069 } 2070 #ifndef PRODUCT 2071 if (G1StressConcRegionFreeing) { 2072 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2073 os::sleep(Thread::current(), (jlong) 1, false); 2074 } 2075 } 2076 #endif 2077 } 2078 } 2079 assert(tmp_free_list.is_empty(), "post-condition"); 2080 } 2081 2082 // Supporting Object and Oop closures for reference discovery 2083 // and processing in during marking 2084 2085 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2086 HeapWord* addr = (HeapWord*)obj; 2087 return addr != NULL && 2088 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2089 } 2090 2091 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2092 // Uses the CMTask associated with a worker thread (for serial reference 2093 // processing the CMTask for worker 0 is used) to preserve (mark) and 2094 // trace referent objects. 2095 // 2096 // Using the CMTask and embedded local queues avoids having the worker 2097 // threads operating on the global mark stack. This reduces the risk 2098 // of overflowing the stack - which we would rather avoid at this late 2099 // state. Also using the tasks' local queues removes the potential 2100 // of the workers interfering with each other that could occur if 2101 // operating on the global stack. 2102 2103 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2104 ConcurrentMark* _cm; 2105 CMTask* _task; 2106 int _ref_counter_limit; 2107 int _ref_counter; 2108 bool _is_serial; 2109 public: 2110 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2111 _cm(cm), _task(task), _is_serial(is_serial), 2112 _ref_counter_limit(G1RefProcDrainInterval) { 2113 assert(_ref_counter_limit > 0, "sanity"); 2114 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2115 _ref_counter = _ref_counter_limit; 2116 } 2117 2118 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2119 virtual void do_oop( oop* p) { do_oop_work(p); } 2120 2121 template <class T> void do_oop_work(T* p) { 2122 if (!_cm->has_overflown()) { 2123 oop obj = oopDesc::load_decode_heap_oop(p); 2124 if (_cm->verbose_high()) { 2125 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2126 "*" PTR_FORMAT " = " PTR_FORMAT, 2127 _task->worker_id(), p2i(p), p2i((void*) obj)); 2128 } 2129 2130 _task->deal_with_reference(obj); 2131 _ref_counter--; 2132 2133 if (_ref_counter == 0) { 2134 // We have dealt with _ref_counter_limit references, pushing them 2135 // and objects reachable from them on to the local stack (and 2136 // possibly the global stack). Call CMTask::do_marking_step() to 2137 // process these entries. 2138 // 2139 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2140 // there's nothing more to do (i.e. we're done with the entries that 2141 // were pushed as a result of the CMTask::deal_with_reference() calls 2142 // above) or we overflow. 2143 // 2144 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2145 // flag while there may still be some work to do. (See the comment at 2146 // the beginning of CMTask::do_marking_step() for those conditions - 2147 // one of which is reaching the specified time target.) It is only 2148 // when CMTask::do_marking_step() returns without setting the 2149 // has_aborted() flag that the marking step has completed. 2150 do { 2151 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2152 _task->do_marking_step(mark_step_duration_ms, 2153 false /* do_termination */, 2154 _is_serial); 2155 } while (_task->has_aborted() && !_cm->has_overflown()); 2156 _ref_counter = _ref_counter_limit; 2157 } 2158 } else { 2159 if (_cm->verbose_high()) { 2160 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2161 } 2162 } 2163 } 2164 }; 2165 2166 // 'Drain' oop closure used by both serial and parallel reference processing. 2167 // Uses the CMTask associated with a given worker thread (for serial 2168 // reference processing the CMtask for worker 0 is used). Calls the 2169 // do_marking_step routine, with an unbelievably large timeout value, 2170 // to drain the marking data structures of the remaining entries 2171 // added by the 'keep alive' oop closure above. 2172 2173 class G1CMDrainMarkingStackClosure: public VoidClosure { 2174 ConcurrentMark* _cm; 2175 CMTask* _task; 2176 bool _is_serial; 2177 public: 2178 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2179 _cm(cm), _task(task), _is_serial(is_serial) { 2180 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2181 } 2182 2183 void do_void() { 2184 do { 2185 if (_cm->verbose_high()) { 2186 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2187 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2188 } 2189 2190 // We call CMTask::do_marking_step() to completely drain the local 2191 // and global marking stacks of entries pushed by the 'keep alive' 2192 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2193 // 2194 // CMTask::do_marking_step() is called in a loop, which we'll exit 2195 // if there's nothing more to do (i.e. we've completely drained the 2196 // entries that were pushed as a a result of applying the 'keep alive' 2197 // closure to the entries on the discovered ref lists) or we overflow 2198 // the global marking stack. 2199 // 2200 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2201 // flag while there may still be some work to do. (See the comment at 2202 // the beginning of CMTask::do_marking_step() for those conditions - 2203 // one of which is reaching the specified time target.) It is only 2204 // when CMTask::do_marking_step() returns without setting the 2205 // has_aborted() flag that the marking step has completed. 2206 2207 _task->do_marking_step(1000000000.0 /* something very large */, 2208 true /* do_termination */, 2209 _is_serial); 2210 } while (_task->has_aborted() && !_cm->has_overflown()); 2211 } 2212 }; 2213 2214 // Implementation of AbstractRefProcTaskExecutor for parallel 2215 // reference processing at the end of G1 concurrent marking 2216 2217 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2218 private: 2219 G1CollectedHeap* _g1h; 2220 ConcurrentMark* _cm; 2221 WorkGang* _workers; 2222 uint _active_workers; 2223 2224 public: 2225 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2226 ConcurrentMark* cm, 2227 WorkGang* workers, 2228 uint n_workers) : 2229 _g1h(g1h), _cm(cm), 2230 _workers(workers), _active_workers(n_workers) { } 2231 2232 // Executes the given task using concurrent marking worker threads. 2233 virtual void execute(ProcessTask& task); 2234 virtual void execute(EnqueueTask& task); 2235 }; 2236 2237 class G1CMRefProcTaskProxy: public AbstractGangTask { 2238 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2239 ProcessTask& _proc_task; 2240 G1CollectedHeap* _g1h; 2241 ConcurrentMark* _cm; 2242 2243 public: 2244 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2245 G1CollectedHeap* g1h, 2246 ConcurrentMark* cm) : 2247 AbstractGangTask("Process reference objects in parallel"), 2248 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2249 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2250 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2251 } 2252 2253 virtual void work(uint worker_id) { 2254 ResourceMark rm; 2255 HandleMark hm; 2256 CMTask* task = _cm->task(worker_id); 2257 G1CMIsAliveClosure g1_is_alive(_g1h); 2258 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2259 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2260 2261 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2262 } 2263 }; 2264 2265 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2266 assert(_workers != NULL, "Need parallel worker threads."); 2267 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2268 2269 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2270 2271 // We need to reset the concurrency level before each 2272 // proxy task execution, so that the termination protocol 2273 // and overflow handling in CMTask::do_marking_step() knows 2274 // how many workers to wait for. 2275 _cm->set_concurrency(_active_workers); 2276 _workers->run_task(&proc_task_proxy); 2277 } 2278 2279 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2280 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2281 EnqueueTask& _enq_task; 2282 2283 public: 2284 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2285 AbstractGangTask("Enqueue reference objects in parallel"), 2286 _enq_task(enq_task) { } 2287 2288 virtual void work(uint worker_id) { 2289 _enq_task.work(worker_id); 2290 } 2291 }; 2292 2293 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2294 assert(_workers != NULL, "Need parallel worker threads."); 2295 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2296 2297 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2298 2299 // Not strictly necessary but... 2300 // 2301 // We need to reset the concurrency level before each 2302 // proxy task execution, so that the termination protocol 2303 // and overflow handling in CMTask::do_marking_step() knows 2304 // how many workers to wait for. 2305 _cm->set_concurrency(_active_workers); 2306 _workers->run_task(&enq_task_proxy); 2307 } 2308 2309 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2310 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2311 } 2312 2313 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2314 if (has_overflown()) { 2315 // Skip processing the discovered references if we have 2316 // overflown the global marking stack. Reference objects 2317 // only get discovered once so it is OK to not 2318 // de-populate the discovered reference lists. We could have, 2319 // but the only benefit would be that, when marking restarts, 2320 // less reference objects are discovered. 2321 return; 2322 } 2323 2324 ResourceMark rm; 2325 HandleMark hm; 2326 2327 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2328 2329 // Is alive closure. 2330 G1CMIsAliveClosure g1_is_alive(g1h); 2331 2332 // Inner scope to exclude the cleaning of the string and symbol 2333 // tables from the displayed time. 2334 { 2335 G1CMTraceTime t("GC ref-proc", G1Log::finer()); 2336 2337 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2338 2339 // See the comment in G1CollectedHeap::ref_processing_init() 2340 // about how reference processing currently works in G1. 2341 2342 // Set the soft reference policy 2343 rp->setup_policy(clear_all_soft_refs); 2344 assert(_markStack.isEmpty(), "mark stack should be empty"); 2345 2346 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2347 // in serial reference processing. Note these closures are also 2348 // used for serially processing (by the the current thread) the 2349 // JNI references during parallel reference processing. 2350 // 2351 // These closures do not need to synchronize with the worker 2352 // threads involved in parallel reference processing as these 2353 // instances are executed serially by the current thread (e.g. 2354 // reference processing is not multi-threaded and is thus 2355 // performed by the current thread instead of a gang worker). 2356 // 2357 // The gang tasks involved in parallel reference processing create 2358 // their own instances of these closures, which do their own 2359 // synchronization among themselves. 2360 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2361 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2362 2363 // We need at least one active thread. If reference processing 2364 // is not multi-threaded we use the current (VMThread) thread, 2365 // otherwise we use the work gang from the G1CollectedHeap and 2366 // we utilize all the worker threads we can. 2367 bool processing_is_mt = rp->processing_is_mt(); 2368 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2369 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2370 2371 // Parallel processing task executor. 2372 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2373 g1h->workers(), active_workers); 2374 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2375 2376 // Set the concurrency level. The phase was already set prior to 2377 // executing the remark task. 2378 set_concurrency(active_workers); 2379 2380 // Set the degree of MT processing here. If the discovery was done MT, 2381 // the number of threads involved during discovery could differ from 2382 // the number of active workers. This is OK as long as the discovered 2383 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2384 rp->set_active_mt_degree(active_workers); 2385 2386 // Process the weak references. 2387 const ReferenceProcessorStats& stats = 2388 rp->process_discovered_references(&g1_is_alive, 2389 &g1_keep_alive, 2390 &g1_drain_mark_stack, 2391 executor, 2392 g1h->gc_timer_cm()); 2393 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2394 2395 // The do_oop work routines of the keep_alive and drain_marking_stack 2396 // oop closures will set the has_overflown flag if we overflow the 2397 // global marking stack. 2398 2399 assert(_markStack.overflow() || _markStack.isEmpty(), 2400 "mark stack should be empty (unless it overflowed)"); 2401 2402 if (_markStack.overflow()) { 2403 // This should have been done already when we tried to push an 2404 // entry on to the global mark stack. But let's do it again. 2405 set_has_overflown(); 2406 } 2407 2408 assert(rp->num_q() == active_workers, "why not"); 2409 2410 rp->enqueue_discovered_references(executor); 2411 2412 rp->verify_no_references_recorded(); 2413 assert(!rp->discovery_enabled(), "Post condition"); 2414 } 2415 2416 if (has_overflown()) { 2417 // We can not trust g1_is_alive if the marking stack overflowed 2418 return; 2419 } 2420 2421 assert(_markStack.isEmpty(), "Marking should have completed"); 2422 2423 // Unload Klasses, String, Symbols, Code Cache, etc. 2424 { 2425 G1CMTraceTime trace("Unloading", G1Log::finer()); 2426 2427 if (ClassUnloadingWithConcurrentMark) { 2428 bool purged_classes; 2429 2430 { 2431 G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest()); 2432 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2433 } 2434 2435 { 2436 G1CMTraceTime trace("Parallel Unloading", G1Log::finest()); 2437 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2438 } 2439 } 2440 2441 if (G1StringDedup::is_enabled()) { 2442 G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest()); 2443 G1StringDedup::unlink(&g1_is_alive); 2444 } 2445 } 2446 } 2447 2448 void ConcurrentMark::swapMarkBitMaps() { 2449 CMBitMapRO* temp = _prevMarkBitMap; 2450 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2451 _nextMarkBitMap = (CMBitMap*) temp; 2452 } 2453 2454 // Closure for marking entries in SATB buffers. 2455 class CMSATBBufferClosure : public SATBBufferClosure { 2456 private: 2457 CMTask* _task; 2458 G1CollectedHeap* _g1h; 2459 2460 // This is very similar to CMTask::deal_with_reference, but with 2461 // more relaxed requirements for the argument, so this must be more 2462 // circumspect about treating the argument as an object. 2463 void do_entry(void* entry) const { 2464 _task->increment_refs_reached(); 2465 HeapRegion* hr = _g1h->heap_region_containing_raw(entry); 2466 if (entry < hr->next_top_at_mark_start()) { 2467 // Until we get here, we don't know whether entry refers to a valid 2468 // object; it could instead have been a stale reference. 2469 oop obj = static_cast<oop>(entry); 2470 assert(obj->is_oop(true /* ignore mark word */), 2471 err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj))); 2472 _task->make_reference_grey(obj, hr); 2473 } 2474 } 2475 2476 public: 2477 CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h) 2478 : _task(task), _g1h(g1h) { } 2479 2480 virtual void do_buffer(void** buffer, size_t size) { 2481 for (size_t i = 0; i < size; ++i) { 2482 do_entry(buffer[i]); 2483 } 2484 } 2485 }; 2486 2487 class G1RemarkThreadsClosure : public ThreadClosure { 2488 CMSATBBufferClosure _cm_satb_cl; 2489 G1CMOopClosure _cm_cl; 2490 MarkingCodeBlobClosure _code_cl; 2491 int _thread_parity; 2492 2493 public: 2494 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) : 2495 _cm_satb_cl(task, g1h), 2496 _cm_cl(g1h, g1h->concurrent_mark(), task), 2497 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2498 _thread_parity(Threads::thread_claim_parity()) {} 2499 2500 void do_thread(Thread* thread) { 2501 if (thread->is_Java_thread()) { 2502 if (thread->claim_oops_do(true, _thread_parity)) { 2503 JavaThread* jt = (JavaThread*)thread; 2504 2505 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2506 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2507 // * Alive if on the stack of an executing method 2508 // * Weakly reachable otherwise 2509 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2510 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2511 jt->nmethods_do(&_code_cl); 2512 2513 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 2514 } 2515 } else if (thread->is_VM_thread()) { 2516 if (thread->claim_oops_do(true, _thread_parity)) { 2517 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 2518 } 2519 } 2520 } 2521 }; 2522 2523 class CMRemarkTask: public AbstractGangTask { 2524 private: 2525 ConcurrentMark* _cm; 2526 public: 2527 void work(uint worker_id) { 2528 // Since all available tasks are actually started, we should 2529 // only proceed if we're supposed to be active. 2530 if (worker_id < _cm->active_tasks()) { 2531 CMTask* task = _cm->task(worker_id); 2532 task->record_start_time(); 2533 { 2534 ResourceMark rm; 2535 HandleMark hm; 2536 2537 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2538 Threads::threads_do(&threads_f); 2539 } 2540 2541 do { 2542 task->do_marking_step(1000000000.0 /* something very large */, 2543 true /* do_termination */, 2544 false /* is_serial */); 2545 } while (task->has_aborted() && !_cm->has_overflown()); 2546 // If we overflow, then we do not want to restart. We instead 2547 // want to abort remark and do concurrent marking again. 2548 task->record_end_time(); 2549 } 2550 } 2551 2552 CMRemarkTask(ConcurrentMark* cm, uint active_workers) : 2553 AbstractGangTask("Par Remark"), _cm(cm) { 2554 _cm->terminator()->reset_for_reuse(active_workers); 2555 } 2556 }; 2557 2558 void ConcurrentMark::checkpointRootsFinalWork() { 2559 ResourceMark rm; 2560 HandleMark hm; 2561 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2562 2563 G1CMTraceTime trace("Finalize Marking", G1Log::finer()); 2564 2565 g1h->ensure_parsability(false); 2566 2567 // this is remark, so we'll use up all active threads 2568 uint active_workers = g1h->workers()->active_workers(); 2569 set_concurrency_and_phase(active_workers, false /* concurrent */); 2570 // Leave _parallel_marking_threads at it's 2571 // value originally calculated in the ConcurrentMark 2572 // constructor and pass values of the active workers 2573 // through the gang in the task. 2574 2575 { 2576 StrongRootsScope srs(active_workers); 2577 2578 CMRemarkTask remarkTask(this, active_workers); 2579 // We will start all available threads, even if we decide that the 2580 // active_workers will be fewer. The extra ones will just bail out 2581 // immediately. 2582 g1h->workers()->run_task(&remarkTask); 2583 } 2584 2585 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2586 guarantee(has_overflown() || 2587 satb_mq_set.completed_buffers_num() == 0, 2588 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2589 BOOL_TO_STR(has_overflown()), 2590 satb_mq_set.completed_buffers_num())); 2591 2592 print_stats(); 2593 } 2594 2595 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2596 // Note we are overriding the read-only view of the prev map here, via 2597 // the cast. 2598 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2599 } 2600 2601 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2602 _nextMarkBitMap->clearRange(mr); 2603 } 2604 2605 HeapRegion* 2606 ConcurrentMark::claim_region(uint worker_id) { 2607 // "checkpoint" the finger 2608 HeapWord* finger = _finger; 2609 2610 // _heap_end will not change underneath our feet; it only changes at 2611 // yield points. 2612 while (finger < _heap_end) { 2613 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2614 2615 // Note on how this code handles humongous regions. In the 2616 // normal case the finger will reach the start of a "starts 2617 // humongous" (SH) region. Its end will either be the end of the 2618 // last "continues humongous" (CH) region in the sequence, or the 2619 // standard end of the SH region (if the SH is the only region in 2620 // the sequence). That way claim_region() will skip over the CH 2621 // regions. However, there is a subtle race between a CM thread 2622 // executing this method and a mutator thread doing a humongous 2623 // object allocation. The two are not mutually exclusive as the CM 2624 // thread does not need to hold the Heap_lock when it gets 2625 // here. So there is a chance that claim_region() will come across 2626 // a free region that's in the progress of becoming a SH or a CH 2627 // region. In the former case, it will either 2628 // a) Miss the update to the region's end, in which case it will 2629 // visit every subsequent CH region, will find their bitmaps 2630 // empty, and do nothing, or 2631 // b) Will observe the update of the region's end (in which case 2632 // it will skip the subsequent CH regions). 2633 // If it comes across a region that suddenly becomes CH, the 2634 // scenario will be similar to b). So, the race between 2635 // claim_region() and a humongous object allocation might force us 2636 // to do a bit of unnecessary work (due to some unnecessary bitmap 2637 // iterations) but it should not introduce and correctness issues. 2638 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2639 2640 // Above heap_region_containing_raw may return NULL as we always scan claim 2641 // until the end of the heap. In this case, just jump to the next region. 2642 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2643 2644 // Is the gap between reading the finger and doing the CAS too long? 2645 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2646 if (res == finger && curr_region != NULL) { 2647 // we succeeded 2648 HeapWord* bottom = curr_region->bottom(); 2649 HeapWord* limit = curr_region->next_top_at_mark_start(); 2650 2651 if (verbose_low()) { 2652 gclog_or_tty->print_cr("[%u] curr_region = " PTR_FORMAT " " 2653 "[" PTR_FORMAT ", " PTR_FORMAT "), " 2654 "limit = " PTR_FORMAT, 2655 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2656 } 2657 2658 // notice that _finger == end cannot be guaranteed here since, 2659 // someone else might have moved the finger even further 2660 assert(_finger >= end, "the finger should have moved forward"); 2661 2662 if (verbose_low()) { 2663 gclog_or_tty->print_cr("[%u] we were successful with region = " 2664 PTR_FORMAT, worker_id, p2i(curr_region)); 2665 } 2666 2667 if (limit > bottom) { 2668 if (verbose_low()) { 2669 gclog_or_tty->print_cr("[%u] region " PTR_FORMAT " is not empty, " 2670 "returning it ", worker_id, p2i(curr_region)); 2671 } 2672 return curr_region; 2673 } else { 2674 assert(limit == bottom, 2675 "the region limit should be at bottom"); 2676 if (verbose_low()) { 2677 gclog_or_tty->print_cr("[%u] region " PTR_FORMAT " is empty, " 2678 "returning NULL", worker_id, p2i(curr_region)); 2679 } 2680 // we return NULL and the caller should try calling 2681 // claim_region() again. 2682 return NULL; 2683 } 2684 } else { 2685 assert(_finger > finger, "the finger should have moved forward"); 2686 if (verbose_low()) { 2687 if (curr_region == NULL) { 2688 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " 2689 "global finger = " PTR_FORMAT ", " 2690 "our finger = " PTR_FORMAT, 2691 worker_id, p2i(_finger), p2i(finger)); 2692 } else { 2693 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2694 "global finger = " PTR_FORMAT ", " 2695 "our finger = " PTR_FORMAT, 2696 worker_id, p2i(_finger), p2i(finger)); 2697 } 2698 } 2699 2700 // read it again 2701 finger = _finger; 2702 } 2703 } 2704 2705 return NULL; 2706 } 2707 2708 #ifndef PRODUCT 2709 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 2710 private: 2711 G1CollectedHeap* _g1h; 2712 const char* _phase; 2713 int _info; 2714 2715 public: 2716 VerifyNoCSetOops(const char* phase, int info = -1) : 2717 _g1h(G1CollectedHeap::heap()), 2718 _phase(phase), 2719 _info(info) 2720 { } 2721 2722 void operator()(oop obj) const { 2723 guarantee(obj->is_oop(), 2724 err_msg("Non-oop " PTR_FORMAT ", phase: %s, info: %d", 2725 p2i(obj), _phase, _info)); 2726 guarantee(!_g1h->obj_in_cs(obj), 2727 err_msg("obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 2728 p2i(obj), _phase, _info)); 2729 } 2730 }; 2731 2732 void ConcurrentMark::verify_no_cset_oops() { 2733 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2734 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 2735 return; 2736 } 2737 2738 // Verify entries on the global mark stack 2739 _markStack.iterate(VerifyNoCSetOops("Stack")); 2740 2741 // Verify entries on the task queues 2742 for (uint i = 0; i < _max_worker_id; ++i) { 2743 CMTaskQueue* queue = _task_queues->queue(i); 2744 queue->iterate(VerifyNoCSetOops("Queue", i)); 2745 } 2746 2747 // Verify the global finger 2748 HeapWord* global_finger = finger(); 2749 if (global_finger != NULL && global_finger < _heap_end) { 2750 // The global finger always points to a heap region boundary. We 2751 // use heap_region_containing_raw() to get the containing region 2752 // given that the global finger could be pointing to a free region 2753 // which subsequently becomes continues humongous. If that 2754 // happens, heap_region_containing() will return the bottom of the 2755 // corresponding starts humongous region and the check below will 2756 // not hold any more. 2757 // Since we always iterate over all regions, we might get a NULL HeapRegion 2758 // here. 2759 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 2760 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2761 err_msg("global finger: " PTR_FORMAT " region: " HR_FORMAT, 2762 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 2763 } 2764 2765 // Verify the task fingers 2766 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2767 for (uint i = 0; i < parallel_marking_threads(); ++i) { 2768 CMTask* task = _tasks[i]; 2769 HeapWord* task_finger = task->finger(); 2770 if (task_finger != NULL && task_finger < _heap_end) { 2771 // See above note on the global finger verification. 2772 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 2773 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2774 !task_hr->in_collection_set(), 2775 err_msg("task finger: " PTR_FORMAT " region: " HR_FORMAT, 2776 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 2777 } 2778 } 2779 } 2780 #endif // PRODUCT 2781 2782 // Aggregate the counting data that was constructed concurrently 2783 // with marking. 2784 class AggregateCountDataHRClosure: public HeapRegionClosure { 2785 G1CollectedHeap* _g1h; 2786 ConcurrentMark* _cm; 2787 CardTableModRefBS* _ct_bs; 2788 BitMap* _cm_card_bm; 2789 uint _max_worker_id; 2790 2791 public: 2792 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 2793 BitMap* cm_card_bm, 2794 uint max_worker_id) : 2795 _g1h(g1h), _cm(g1h->concurrent_mark()), 2796 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 2797 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 2798 2799 bool doHeapRegion(HeapRegion* hr) { 2800 if (hr->is_continues_humongous()) { 2801 // We will ignore these here and process them when their 2802 // associated "starts humongous" region is processed. 2803 // Note that we cannot rely on their associated 2804 // "starts humongous" region to have their bit set to 1 2805 // since, due to the region chunking in the parallel region 2806 // iteration, a "continues humongous" region might be visited 2807 // before its associated "starts humongous". 2808 return false; 2809 } 2810 2811 HeapWord* start = hr->bottom(); 2812 HeapWord* limit = hr->next_top_at_mark_start(); 2813 HeapWord* end = hr->end(); 2814 2815 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 2816 err_msg("Preconditions not met - " 2817 "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", " 2818 "top: " PTR_FORMAT ", end: " PTR_FORMAT, 2819 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 2820 2821 assert(hr->next_marked_bytes() == 0, "Precondition"); 2822 2823 if (start == limit) { 2824 // NTAMS of this region has not been set so nothing to do. 2825 return false; 2826 } 2827 2828 // 'start' should be in the heap. 2829 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 2830 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 2831 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 2832 2833 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 2834 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 2835 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 2836 2837 // If ntams is not card aligned then we bump card bitmap index 2838 // for limit so that we get the all the cards spanned by 2839 // the object ending at ntams. 2840 // Note: if this is the last region in the heap then ntams 2841 // could be actually just beyond the end of the the heap; 2842 // limit_idx will then correspond to a (non-existent) card 2843 // that is also outside the heap. 2844 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 2845 limit_idx += 1; 2846 } 2847 2848 assert(limit_idx <= end_idx, "or else use atomics"); 2849 2850 // Aggregate the "stripe" in the count data associated with hr. 2851 uint hrm_index = hr->hrm_index(); 2852 size_t marked_bytes = 0; 2853 2854 for (uint i = 0; i < _max_worker_id; i += 1) { 2855 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 2856 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 2857 2858 // Fetch the marked_bytes in this region for task i and 2859 // add it to the running total for this region. 2860 marked_bytes += marked_bytes_array[hrm_index]; 2861 2862 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 2863 // into the global card bitmap. 2864 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 2865 2866 while (scan_idx < limit_idx) { 2867 assert(task_card_bm->at(scan_idx) == true, "should be"); 2868 _cm_card_bm->set_bit(scan_idx); 2869 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 2870 2871 // BitMap::get_next_one_offset() can handle the case when 2872 // its left_offset parameter is greater than its right_offset 2873 // parameter. It does, however, have an early exit if 2874 // left_offset == right_offset. So let's limit the value 2875 // passed in for left offset here. 2876 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 2877 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 2878 } 2879 } 2880 2881 // Update the marked bytes for this region. 2882 hr->add_to_marked_bytes(marked_bytes); 2883 2884 // Next heap region 2885 return false; 2886 } 2887 }; 2888 2889 class G1AggregateCountDataTask: public AbstractGangTask { 2890 protected: 2891 G1CollectedHeap* _g1h; 2892 ConcurrentMark* _cm; 2893 BitMap* _cm_card_bm; 2894 uint _max_worker_id; 2895 uint _active_workers; 2896 HeapRegionClaimer _hrclaimer; 2897 2898 public: 2899 G1AggregateCountDataTask(G1CollectedHeap* g1h, 2900 ConcurrentMark* cm, 2901 BitMap* cm_card_bm, 2902 uint max_worker_id, 2903 uint n_workers) : 2904 AbstractGangTask("Count Aggregation"), 2905 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 2906 _max_worker_id(max_worker_id), 2907 _active_workers(n_workers), 2908 _hrclaimer(_active_workers) { 2909 } 2910 2911 void work(uint worker_id) { 2912 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 2913 2914 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 2915 } 2916 }; 2917 2918 2919 void ConcurrentMark::aggregate_count_data() { 2920 uint n_workers = _g1h->workers()->active_workers(); 2921 2922 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 2923 _max_worker_id, n_workers); 2924 2925 _g1h->workers()->run_task(&g1_par_agg_task); 2926 } 2927 2928 // Clear the per-worker arrays used to store the per-region counting data 2929 void ConcurrentMark::clear_all_count_data() { 2930 // Clear the global card bitmap - it will be filled during 2931 // liveness count aggregation (during remark) and the 2932 // final counting task. 2933 _card_bm.clear(); 2934 2935 // Clear the global region bitmap - it will be filled as part 2936 // of the final counting task. 2937 _region_bm.clear(); 2938 2939 uint max_regions = _g1h->max_regions(); 2940 assert(_max_worker_id > 0, "uninitialized"); 2941 2942 for (uint i = 0; i < _max_worker_id; i += 1) { 2943 BitMap* task_card_bm = count_card_bitmap_for(i); 2944 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 2945 2946 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 2947 assert(marked_bytes_array != NULL, "uninitialized"); 2948 2949 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 2950 task_card_bm->clear(); 2951 } 2952 } 2953 2954 void ConcurrentMark::print_stats() { 2955 if (verbose_stats()) { 2956 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 2957 for (size_t i = 0; i < _active_tasks; ++i) { 2958 _tasks[i]->print_stats(); 2959 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 2960 } 2961 } 2962 } 2963 2964 // abandon current marking iteration due to a Full GC 2965 void ConcurrentMark::abort() { 2966 if (!cmThread()->during_cycle() || _has_aborted) { 2967 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2968 return; 2969 } 2970 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2971 // concurrent bitmap clearing. 2972 _nextMarkBitMap->clearAll(); 2973 2974 // Note we cannot clear the previous marking bitmap here 2975 // since VerifyDuringGC verifies the objects marked during 2976 // a full GC against the previous bitmap. 2977 2978 // Clear the liveness counting data 2979 clear_all_count_data(); 2980 // Empty mark stack 2981 reset_marking_state(); 2982 for (uint i = 0; i < _max_worker_id; ++i) { 2983 _tasks[i]->clear_region_fields(); 2984 } 2985 _first_overflow_barrier_sync.abort(); 2986 _second_overflow_barrier_sync.abort(); 2987 _has_aborted = true; 2988 2989 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2990 satb_mq_set.abandon_partial_marking(); 2991 // This can be called either during or outside marking, we'll read 2992 // the expected_active value from the SATB queue set. 2993 satb_mq_set.set_active_all_threads( 2994 false, /* new active value */ 2995 satb_mq_set.is_active() /* expected_active */); 2996 2997 _g1h->trace_heap_after_concurrent_cycle(); 2998 _g1h->register_concurrent_cycle_end(); 2999 } 3000 3001 static void print_ms_time_info(const char* prefix, const char* name, 3002 NumberSeq& ns) { 3003 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3004 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3005 if (ns.num() > 0) { 3006 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3007 prefix, ns.sd(), ns.maximum()); 3008 } 3009 } 3010 3011 void ConcurrentMark::print_summary_info() { 3012 gclog_or_tty->print_cr(" Concurrent marking:"); 3013 print_ms_time_info(" ", "init marks", _init_times); 3014 print_ms_time_info(" ", "remarks", _remark_times); 3015 { 3016 print_ms_time_info(" ", "final marks", _remark_mark_times); 3017 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3018 3019 } 3020 print_ms_time_info(" ", "cleanups", _cleanup_times); 3021 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3022 _total_counting_time, 3023 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3024 (double)_cleanup_times.num() 3025 : 0.0)); 3026 if (G1ScrubRemSets) { 3027 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3028 _total_rs_scrub_time, 3029 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3030 (double)_cleanup_times.num() 3031 : 0.0)); 3032 } 3033 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3034 (_init_times.sum() + _remark_times.sum() + 3035 _cleanup_times.sum())/1000.0); 3036 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3037 "(%8.2f s marking).", 3038 cmThread()->vtime_accum(), 3039 cmThread()->vtime_mark_accum()); 3040 } 3041 3042 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3043 _parallel_workers->print_worker_threads_on(st); 3044 } 3045 3046 void ConcurrentMark::print_on_error(outputStream* st) const { 3047 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3048 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3049 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3050 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3051 } 3052 3053 // We take a break if someone is trying to stop the world. 3054 bool ConcurrentMark::do_yield_check(uint worker_id) { 3055 if (SuspendibleThreadSet::should_yield()) { 3056 if (worker_id == 0) { 3057 _g1h->g1_policy()->record_concurrent_pause(); 3058 } 3059 SuspendibleThreadSet::yield(); 3060 return true; 3061 } else { 3062 return false; 3063 } 3064 } 3065 3066 #ifndef PRODUCT 3067 // for debugging purposes 3068 void ConcurrentMark::print_finger() { 3069 gclog_or_tty->print_cr("heap [" PTR_FORMAT ", " PTR_FORMAT "), global finger = " PTR_FORMAT, 3070 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3071 for (uint i = 0; i < _max_worker_id; ++i) { 3072 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3073 } 3074 gclog_or_tty->cr(); 3075 } 3076 #endif 3077 3078 // Closure for iteration over bitmaps 3079 class CMBitMapClosure : public BitMapClosure { 3080 private: 3081 // the bitmap that is being iterated over 3082 CMBitMap* _nextMarkBitMap; 3083 ConcurrentMark* _cm; 3084 CMTask* _task; 3085 3086 public: 3087 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3088 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3089 3090 bool do_bit(size_t offset) { 3091 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3092 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3093 assert( addr < _cm->finger(), "invariant"); 3094 3095 statsOnly( _task->increase_objs_found_on_bitmap() ); 3096 assert(addr >= _task->finger(), "invariant"); 3097 3098 // We move that task's local finger along. 3099 _task->move_finger_to(addr); 3100 3101 _task->scan_object(oop(addr)); 3102 // we only partially drain the local queue and global stack 3103 _task->drain_local_queue(true); 3104 _task->drain_global_stack(true); 3105 3106 // if the has_aborted flag has been raised, we need to bail out of 3107 // the iteration 3108 return !_task->has_aborted(); 3109 } 3110 }; 3111 3112 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3113 ConcurrentMark* cm, 3114 CMTask* task) 3115 : _g1h(g1h), _cm(cm), _task(task) { 3116 assert(_ref_processor == NULL, "should be initialized to NULL"); 3117 3118 if (G1UseConcMarkReferenceProcessing) { 3119 _ref_processor = g1h->ref_processor_cm(); 3120 assert(_ref_processor != NULL, "should not be NULL"); 3121 } 3122 } 3123 3124 void CMTask::setup_for_region(HeapRegion* hr) { 3125 assert(hr != NULL, 3126 "claim_region() should have filtered out NULL regions"); 3127 assert(!hr->is_continues_humongous(), 3128 "claim_region() should have filtered out continues humongous regions"); 3129 3130 if (_cm->verbose_low()) { 3131 gclog_or_tty->print_cr("[%u] setting up for region " PTR_FORMAT, 3132 _worker_id, p2i(hr)); 3133 } 3134 3135 _curr_region = hr; 3136 _finger = hr->bottom(); 3137 update_region_limit(); 3138 } 3139 3140 void CMTask::update_region_limit() { 3141 HeapRegion* hr = _curr_region; 3142 HeapWord* bottom = hr->bottom(); 3143 HeapWord* limit = hr->next_top_at_mark_start(); 3144 3145 if (limit == bottom) { 3146 if (_cm->verbose_low()) { 3147 gclog_or_tty->print_cr("[%u] found an empty region " 3148 "[" PTR_FORMAT ", " PTR_FORMAT ")", 3149 _worker_id, p2i(bottom), p2i(limit)); 3150 } 3151 // The region was collected underneath our feet. 3152 // We set the finger to bottom to ensure that the bitmap 3153 // iteration that will follow this will not do anything. 3154 // (this is not a condition that holds when we set the region up, 3155 // as the region is not supposed to be empty in the first place) 3156 _finger = bottom; 3157 } else if (limit >= _region_limit) { 3158 assert(limit >= _finger, "peace of mind"); 3159 } else { 3160 assert(limit < _region_limit, "only way to get here"); 3161 // This can happen under some pretty unusual circumstances. An 3162 // evacuation pause empties the region underneath our feet (NTAMS 3163 // at bottom). We then do some allocation in the region (NTAMS 3164 // stays at bottom), followed by the region being used as a GC 3165 // alloc region (NTAMS will move to top() and the objects 3166 // originally below it will be grayed). All objects now marked in 3167 // the region are explicitly grayed, if below the global finger, 3168 // and we do not need in fact to scan anything else. So, we simply 3169 // set _finger to be limit to ensure that the bitmap iteration 3170 // doesn't do anything. 3171 _finger = limit; 3172 } 3173 3174 _region_limit = limit; 3175 } 3176 3177 void CMTask::giveup_current_region() { 3178 assert(_curr_region != NULL, "invariant"); 3179 if (_cm->verbose_low()) { 3180 gclog_or_tty->print_cr("[%u] giving up region " PTR_FORMAT, 3181 _worker_id, p2i(_curr_region)); 3182 } 3183 clear_region_fields(); 3184 } 3185 3186 void CMTask::clear_region_fields() { 3187 // Values for these three fields that indicate that we're not 3188 // holding on to a region. 3189 _curr_region = NULL; 3190 _finger = NULL; 3191 _region_limit = NULL; 3192 } 3193 3194 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3195 if (cm_oop_closure == NULL) { 3196 assert(_cm_oop_closure != NULL, "invariant"); 3197 } else { 3198 assert(_cm_oop_closure == NULL, "invariant"); 3199 } 3200 _cm_oop_closure = cm_oop_closure; 3201 } 3202 3203 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3204 guarantee(nextMarkBitMap != NULL, "invariant"); 3205 3206 if (_cm->verbose_low()) { 3207 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3208 } 3209 3210 _nextMarkBitMap = nextMarkBitMap; 3211 clear_region_fields(); 3212 3213 _calls = 0; 3214 _elapsed_time_ms = 0.0; 3215 _termination_time_ms = 0.0; 3216 _termination_start_time_ms = 0.0; 3217 3218 #if _MARKING_STATS_ 3219 _aborted = 0; 3220 _aborted_overflow = 0; 3221 _aborted_cm_aborted = 0; 3222 _aborted_yield = 0; 3223 _aborted_timed_out = 0; 3224 _aborted_satb = 0; 3225 _aborted_termination = 0; 3226 _steal_attempts = 0; 3227 _steals = 0; 3228 _local_pushes = 0; 3229 _local_pops = 0; 3230 _local_max_size = 0; 3231 _objs_scanned = 0; 3232 _global_pushes = 0; 3233 _global_pops = 0; 3234 _global_max_size = 0; 3235 _global_transfers_to = 0; 3236 _global_transfers_from = 0; 3237 _regions_claimed = 0; 3238 _objs_found_on_bitmap = 0; 3239 _satb_buffers_processed = 0; 3240 #endif // _MARKING_STATS_ 3241 } 3242 3243 bool CMTask::should_exit_termination() { 3244 regular_clock_call(); 3245 // This is called when we are in the termination protocol. We should 3246 // quit if, for some reason, this task wants to abort or the global 3247 // stack is not empty (this means that we can get work from it). 3248 return !_cm->mark_stack_empty() || has_aborted(); 3249 } 3250 3251 void CMTask::reached_limit() { 3252 assert(_words_scanned >= _words_scanned_limit || 3253 _refs_reached >= _refs_reached_limit , 3254 "shouldn't have been called otherwise"); 3255 regular_clock_call(); 3256 } 3257 3258 void CMTask::regular_clock_call() { 3259 if (has_aborted()) return; 3260 3261 // First, we need to recalculate the words scanned and refs reached 3262 // limits for the next clock call. 3263 recalculate_limits(); 3264 3265 // During the regular clock call we do the following 3266 3267 // (1) If an overflow has been flagged, then we abort. 3268 if (_cm->has_overflown()) { 3269 set_has_aborted(); 3270 return; 3271 } 3272 3273 // If we are not concurrent (i.e. we're doing remark) we don't need 3274 // to check anything else. The other steps are only needed during 3275 // the concurrent marking phase. 3276 if (!concurrent()) return; 3277 3278 // (2) If marking has been aborted for Full GC, then we also abort. 3279 if (_cm->has_aborted()) { 3280 set_has_aborted(); 3281 statsOnly( ++_aborted_cm_aborted ); 3282 return; 3283 } 3284 3285 double curr_time_ms = os::elapsedVTime() * 1000.0; 3286 3287 // (3) If marking stats are enabled, then we update the step history. 3288 #if _MARKING_STATS_ 3289 if (_words_scanned >= _words_scanned_limit) { 3290 ++_clock_due_to_scanning; 3291 } 3292 if (_refs_reached >= _refs_reached_limit) { 3293 ++_clock_due_to_marking; 3294 } 3295 3296 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3297 _interval_start_time_ms = curr_time_ms; 3298 _all_clock_intervals_ms.add(last_interval_ms); 3299 3300 if (_cm->verbose_medium()) { 3301 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3302 "scanned = " SIZE_FORMAT "%s, refs reached = " SIZE_FORMAT "%s", 3303 _worker_id, last_interval_ms, 3304 _words_scanned, 3305 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3306 _refs_reached, 3307 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3308 } 3309 #endif // _MARKING_STATS_ 3310 3311 // (4) We check whether we should yield. If we have to, then we abort. 3312 if (SuspendibleThreadSet::should_yield()) { 3313 // We should yield. To do this we abort the task. The caller is 3314 // responsible for yielding. 3315 set_has_aborted(); 3316 statsOnly( ++_aborted_yield ); 3317 return; 3318 } 3319 3320 // (5) We check whether we've reached our time quota. If we have, 3321 // then we abort. 3322 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3323 if (elapsed_time_ms > _time_target_ms) { 3324 set_has_aborted(); 3325 _has_timed_out = true; 3326 statsOnly( ++_aborted_timed_out ); 3327 return; 3328 } 3329 3330 // (6) Finally, we check whether there are enough completed STAB 3331 // buffers available for processing. If there are, we abort. 3332 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3333 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3334 if (_cm->verbose_low()) { 3335 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3336 _worker_id); 3337 } 3338 // we do need to process SATB buffers, we'll abort and restart 3339 // the marking task to do so 3340 set_has_aborted(); 3341 statsOnly( ++_aborted_satb ); 3342 return; 3343 } 3344 } 3345 3346 void CMTask::recalculate_limits() { 3347 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3348 _words_scanned_limit = _real_words_scanned_limit; 3349 3350 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3351 _refs_reached_limit = _real_refs_reached_limit; 3352 } 3353 3354 void CMTask::decrease_limits() { 3355 // This is called when we believe that we're going to do an infrequent 3356 // operation which will increase the per byte scanned cost (i.e. move 3357 // entries to/from the global stack). It basically tries to decrease the 3358 // scanning limit so that the clock is called earlier. 3359 3360 if (_cm->verbose_medium()) { 3361 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3362 } 3363 3364 _words_scanned_limit = _real_words_scanned_limit - 3365 3 * words_scanned_period / 4; 3366 _refs_reached_limit = _real_refs_reached_limit - 3367 3 * refs_reached_period / 4; 3368 } 3369 3370 void CMTask::move_entries_to_global_stack() { 3371 // local array where we'll store the entries that will be popped 3372 // from the local queue 3373 oop buffer[global_stack_transfer_size]; 3374 3375 int n = 0; 3376 oop obj; 3377 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3378 buffer[n] = obj; 3379 ++n; 3380 } 3381 3382 if (n > 0) { 3383 // we popped at least one entry from the local queue 3384 3385 statsOnly( ++_global_transfers_to; _local_pops += n ); 3386 3387 if (!_cm->mark_stack_push(buffer, n)) { 3388 if (_cm->verbose_low()) { 3389 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3390 _worker_id); 3391 } 3392 set_has_aborted(); 3393 } else { 3394 // the transfer was successful 3395 3396 if (_cm->verbose_medium()) { 3397 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3398 _worker_id, n); 3399 } 3400 statsOnly( size_t tmp_size = _cm->mark_stack_size(); 3401 if (tmp_size > _global_max_size) { 3402 _global_max_size = tmp_size; 3403 } 3404 _global_pushes += n ); 3405 } 3406 } 3407 3408 // this operation was quite expensive, so decrease the limits 3409 decrease_limits(); 3410 } 3411 3412 void CMTask::get_entries_from_global_stack() { 3413 // local array where we'll store the entries that will be popped 3414 // from the global stack. 3415 oop buffer[global_stack_transfer_size]; 3416 int n; 3417 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3418 assert(n <= global_stack_transfer_size, 3419 "we should not pop more than the given limit"); 3420 if (n > 0) { 3421 // yes, we did actually pop at least one entry 3422 3423 statsOnly( ++_global_transfers_from; _global_pops += n ); 3424 if (_cm->verbose_medium()) { 3425 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3426 _worker_id, n); 3427 } 3428 for (int i = 0; i < n; ++i) { 3429 bool success = _task_queue->push(buffer[i]); 3430 // We only call this when the local queue is empty or under a 3431 // given target limit. So, we do not expect this push to fail. 3432 assert(success, "invariant"); 3433 } 3434 3435 statsOnly( size_t tmp_size = (size_t)_task_queue->size(); 3436 if (tmp_size > _local_max_size) { 3437 _local_max_size = tmp_size; 3438 } 3439 _local_pushes += n ); 3440 } 3441 3442 // this operation was quite expensive, so decrease the limits 3443 decrease_limits(); 3444 } 3445 3446 void CMTask::drain_local_queue(bool partially) { 3447 if (has_aborted()) return; 3448 3449 // Decide what the target size is, depending whether we're going to 3450 // drain it partially (so that other tasks can steal if they run out 3451 // of things to do) or totally (at the very end). 3452 size_t target_size; 3453 if (partially) { 3454 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3455 } else { 3456 target_size = 0; 3457 } 3458 3459 if (_task_queue->size() > target_size) { 3460 if (_cm->verbose_high()) { 3461 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3462 _worker_id, target_size); 3463 } 3464 3465 oop obj; 3466 bool ret = _task_queue->pop_local(obj); 3467 while (ret) { 3468 statsOnly( ++_local_pops ); 3469 3470 if (_cm->verbose_high()) { 3471 gclog_or_tty->print_cr("[%u] popped " PTR_FORMAT, _worker_id, 3472 p2i((void*) obj)); 3473 } 3474 3475 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3476 assert(!_g1h->is_on_master_free_list( 3477 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3478 3479 scan_object(obj); 3480 3481 if (_task_queue->size() <= target_size || has_aborted()) { 3482 ret = false; 3483 } else { 3484 ret = _task_queue->pop_local(obj); 3485 } 3486 } 3487 3488 if (_cm->verbose_high()) { 3489 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3490 _worker_id, _task_queue->size()); 3491 } 3492 } 3493 } 3494 3495 void CMTask::drain_global_stack(bool partially) { 3496 if (has_aborted()) return; 3497 3498 // We have a policy to drain the local queue before we attempt to 3499 // drain the global stack. 3500 assert(partially || _task_queue->size() == 0, "invariant"); 3501 3502 // Decide what the target size is, depending whether we're going to 3503 // drain it partially (so that other tasks can steal if they run out 3504 // of things to do) or totally (at the very end). Notice that, 3505 // because we move entries from the global stack in chunks or 3506 // because another task might be doing the same, we might in fact 3507 // drop below the target. But, this is not a problem. 3508 size_t target_size; 3509 if (partially) { 3510 target_size = _cm->partial_mark_stack_size_target(); 3511 } else { 3512 target_size = 0; 3513 } 3514 3515 if (_cm->mark_stack_size() > target_size) { 3516 if (_cm->verbose_low()) { 3517 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3518 _worker_id, target_size); 3519 } 3520 3521 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3522 get_entries_from_global_stack(); 3523 drain_local_queue(partially); 3524 } 3525 3526 if (_cm->verbose_low()) { 3527 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3528 _worker_id, _cm->mark_stack_size()); 3529 } 3530 } 3531 } 3532 3533 // SATB Queue has several assumptions on whether to call the par or 3534 // non-par versions of the methods. this is why some of the code is 3535 // replicated. We should really get rid of the single-threaded version 3536 // of the code to simplify things. 3537 void CMTask::drain_satb_buffers() { 3538 if (has_aborted()) return; 3539 3540 // We set this so that the regular clock knows that we're in the 3541 // middle of draining buffers and doesn't set the abort flag when it 3542 // notices that SATB buffers are available for draining. It'd be 3543 // very counter productive if it did that. :-) 3544 _draining_satb_buffers = true; 3545 3546 CMSATBBufferClosure satb_cl(this, _g1h); 3547 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3548 3549 // This keeps claiming and applying the closure to completed buffers 3550 // until we run out of buffers or we need to abort. 3551 while (!has_aborted() && 3552 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 3553 if (_cm->verbose_medium()) { 3554 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3555 } 3556 statsOnly( ++_satb_buffers_processed ); 3557 regular_clock_call(); 3558 } 3559 3560 _draining_satb_buffers = false; 3561 3562 assert(has_aborted() || 3563 concurrent() || 3564 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3565 3566 // again, this was a potentially expensive operation, decrease the 3567 // limits to get the regular clock call early 3568 decrease_limits(); 3569 } 3570 3571 void CMTask::print_stats() { 3572 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3573 _worker_id, _calls); 3574 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3575 _elapsed_time_ms, _termination_time_ms); 3576 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3577 _step_times_ms.num(), _step_times_ms.avg(), 3578 _step_times_ms.sd()); 3579 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3580 _step_times_ms.maximum(), _step_times_ms.sum()); 3581 3582 #if _MARKING_STATS_ 3583 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3584 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3585 _all_clock_intervals_ms.sd()); 3586 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3587 _all_clock_intervals_ms.maximum(), 3588 _all_clock_intervals_ms.sum()); 3589 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = " SIZE_FORMAT ", marking = " SIZE_FORMAT, 3590 _clock_due_to_scanning, _clock_due_to_marking); 3591 gclog_or_tty->print_cr(" Objects: scanned = " SIZE_FORMAT ", found on the bitmap = " SIZE_FORMAT, 3592 _objs_scanned, _objs_found_on_bitmap); 3593 gclog_or_tty->print_cr(" Local Queue: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3594 _local_pushes, _local_pops, _local_max_size); 3595 gclog_or_tty->print_cr(" Global Stack: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3596 _global_pushes, _global_pops, _global_max_size); 3597 gclog_or_tty->print_cr(" transfers to = " SIZE_FORMAT ", transfers from = " SIZE_FORMAT, 3598 _global_transfers_to,_global_transfers_from); 3599 gclog_or_tty->print_cr(" Regions: claimed = " SIZE_FORMAT, _regions_claimed); 3600 gclog_or_tty->print_cr(" SATB buffers: processed = " SIZE_FORMAT, _satb_buffers_processed); 3601 gclog_or_tty->print_cr(" Steals: attempts = " SIZE_FORMAT ", successes = " SIZE_FORMAT, 3602 _steal_attempts, _steals); 3603 gclog_or_tty->print_cr(" Aborted: " SIZE_FORMAT ", due to", _aborted); 3604 gclog_or_tty->print_cr(" overflow: " SIZE_FORMAT ", global abort: " SIZE_FORMAT ", yield: " SIZE_FORMAT, 3605 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3606 gclog_or_tty->print_cr(" time out: " SIZE_FORMAT ", SATB: " SIZE_FORMAT ", termination: " SIZE_FORMAT, 3607 _aborted_timed_out, _aborted_satb, _aborted_termination); 3608 #endif // _MARKING_STATS_ 3609 } 3610 3611 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 3612 return _task_queues->steal(worker_id, hash_seed, obj); 3613 } 3614 3615 /***************************************************************************** 3616 3617 The do_marking_step(time_target_ms, ...) method is the building 3618 block of the parallel marking framework. It can be called in parallel 3619 with other invocations of do_marking_step() on different tasks 3620 (but only one per task, obviously) and concurrently with the 3621 mutator threads, or during remark, hence it eliminates the need 3622 for two versions of the code. When called during remark, it will 3623 pick up from where the task left off during the concurrent marking 3624 phase. Interestingly, tasks are also claimable during evacuation 3625 pauses too, since do_marking_step() ensures that it aborts before 3626 it needs to yield. 3627 3628 The data structures that it uses to do marking work are the 3629 following: 3630 3631 (1) Marking Bitmap. If there are gray objects that appear only 3632 on the bitmap (this happens either when dealing with an overflow 3633 or when the initial marking phase has simply marked the roots 3634 and didn't push them on the stack), then tasks claim heap 3635 regions whose bitmap they then scan to find gray objects. A 3636 global finger indicates where the end of the last claimed region 3637 is. A local finger indicates how far into the region a task has 3638 scanned. The two fingers are used to determine how to gray an 3639 object (i.e. whether simply marking it is OK, as it will be 3640 visited by a task in the future, or whether it needs to be also 3641 pushed on a stack). 3642 3643 (2) Local Queue. The local queue of the task which is accessed 3644 reasonably efficiently by the task. Other tasks can steal from 3645 it when they run out of work. Throughout the marking phase, a 3646 task attempts to keep its local queue short but not totally 3647 empty, so that entries are available for stealing by other 3648 tasks. Only when there is no more work, a task will totally 3649 drain its local queue. 3650 3651 (3) Global Mark Stack. This handles local queue overflow. During 3652 marking only sets of entries are moved between it and the local 3653 queues, as access to it requires a mutex and more fine-grain 3654 interaction with it which might cause contention. If it 3655 overflows, then the marking phase should restart and iterate 3656 over the bitmap to identify gray objects. Throughout the marking 3657 phase, tasks attempt to keep the global mark stack at a small 3658 length but not totally empty, so that entries are available for 3659 popping by other tasks. Only when there is no more work, tasks 3660 will totally drain the global mark stack. 3661 3662 (4) SATB Buffer Queue. This is where completed SATB buffers are 3663 made available. Buffers are regularly removed from this queue 3664 and scanned for roots, so that the queue doesn't get too 3665 long. During remark, all completed buffers are processed, as 3666 well as the filled in parts of any uncompleted buffers. 3667 3668 The do_marking_step() method tries to abort when the time target 3669 has been reached. There are a few other cases when the 3670 do_marking_step() method also aborts: 3671 3672 (1) When the marking phase has been aborted (after a Full GC). 3673 3674 (2) When a global overflow (on the global stack) has been 3675 triggered. Before the task aborts, it will actually sync up with 3676 the other tasks to ensure that all the marking data structures 3677 (local queues, stacks, fingers etc.) are re-initialized so that 3678 when do_marking_step() completes, the marking phase can 3679 immediately restart. 3680 3681 (3) When enough completed SATB buffers are available. The 3682 do_marking_step() method only tries to drain SATB buffers right 3683 at the beginning. So, if enough buffers are available, the 3684 marking step aborts and the SATB buffers are processed at 3685 the beginning of the next invocation. 3686 3687 (4) To yield. when we have to yield then we abort and yield 3688 right at the end of do_marking_step(). This saves us from a lot 3689 of hassle as, by yielding we might allow a Full GC. If this 3690 happens then objects will be compacted underneath our feet, the 3691 heap might shrink, etc. We save checking for this by just 3692 aborting and doing the yield right at the end. 3693 3694 From the above it follows that the do_marking_step() method should 3695 be called in a loop (or, otherwise, regularly) until it completes. 3696 3697 If a marking step completes without its has_aborted() flag being 3698 true, it means it has completed the current marking phase (and 3699 also all other marking tasks have done so and have all synced up). 3700 3701 A method called regular_clock_call() is invoked "regularly" (in 3702 sub ms intervals) throughout marking. It is this clock method that 3703 checks all the abort conditions which were mentioned above and 3704 decides when the task should abort. A work-based scheme is used to 3705 trigger this clock method: when the number of object words the 3706 marking phase has scanned or the number of references the marking 3707 phase has visited reach a given limit. Additional invocations to 3708 the method clock have been planted in a few other strategic places 3709 too. The initial reason for the clock method was to avoid calling 3710 vtime too regularly, as it is quite expensive. So, once it was in 3711 place, it was natural to piggy-back all the other conditions on it 3712 too and not constantly check them throughout the code. 3713 3714 If do_termination is true then do_marking_step will enter its 3715 termination protocol. 3716 3717 The value of is_serial must be true when do_marking_step is being 3718 called serially (i.e. by the VMThread) and do_marking_step should 3719 skip any synchronization in the termination and overflow code. 3720 Examples include the serial remark code and the serial reference 3721 processing closures. 3722 3723 The value of is_serial must be false when do_marking_step is 3724 being called by any of the worker threads in a work gang. 3725 Examples include the concurrent marking code (CMMarkingTask), 3726 the MT remark code, and the MT reference processing closures. 3727 3728 *****************************************************************************/ 3729 3730 void CMTask::do_marking_step(double time_target_ms, 3731 bool do_termination, 3732 bool is_serial) { 3733 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3734 assert(concurrent() == _cm->concurrent(), "they should be the same"); 3735 3736 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3737 assert(_task_queues != NULL, "invariant"); 3738 assert(_task_queue != NULL, "invariant"); 3739 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 3740 3741 assert(!_claimed, 3742 "only one thread should claim this task at any one time"); 3743 3744 // OK, this doesn't safeguard again all possible scenarios, as it is 3745 // possible for two threads to set the _claimed flag at the same 3746 // time. But it is only for debugging purposes anyway and it will 3747 // catch most problems. 3748 _claimed = true; 3749 3750 _start_time_ms = os::elapsedVTime() * 1000.0; 3751 statsOnly( _interval_start_time_ms = _start_time_ms ); 3752 3753 // If do_stealing is true then do_marking_step will attempt to 3754 // steal work from the other CMTasks. It only makes sense to 3755 // enable stealing when the termination protocol is enabled 3756 // and do_marking_step() is not being called serially. 3757 bool do_stealing = do_termination && !is_serial; 3758 3759 double diff_prediction_ms = 3760 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 3761 _time_target_ms = time_target_ms - diff_prediction_ms; 3762 3763 // set up the variables that are used in the work-based scheme to 3764 // call the regular clock method 3765 _words_scanned = 0; 3766 _refs_reached = 0; 3767 recalculate_limits(); 3768 3769 // clear all flags 3770 clear_has_aborted(); 3771 _has_timed_out = false; 3772 _draining_satb_buffers = false; 3773 3774 ++_calls; 3775 3776 if (_cm->verbose_low()) { 3777 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 3778 "target = %1.2lfms >>>>>>>>>>", 3779 _worker_id, _calls, _time_target_ms); 3780 } 3781 3782 // Set up the bitmap and oop closures. Anything that uses them is 3783 // eventually called from this method, so it is OK to allocate these 3784 // statically. 3785 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 3786 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 3787 set_cm_oop_closure(&cm_oop_closure); 3788 3789 if (_cm->has_overflown()) { 3790 // This can happen if the mark stack overflows during a GC pause 3791 // and this task, after a yield point, restarts. We have to abort 3792 // as we need to get into the overflow protocol which happens 3793 // right at the end of this task. 3794 set_has_aborted(); 3795 } 3796 3797 // First drain any available SATB buffers. After this, we will not 3798 // look at SATB buffers before the next invocation of this method. 3799 // If enough completed SATB buffers are queued up, the regular clock 3800 // will abort this task so that it restarts. 3801 drain_satb_buffers(); 3802 // ...then partially drain the local queue and the global stack 3803 drain_local_queue(true); 3804 drain_global_stack(true); 3805 3806 do { 3807 if (!has_aborted() && _curr_region != NULL) { 3808 // This means that we're already holding on to a region. 3809 assert(_finger != NULL, "if region is not NULL, then the finger " 3810 "should not be NULL either"); 3811 3812 // We might have restarted this task after an evacuation pause 3813 // which might have evacuated the region we're holding on to 3814 // underneath our feet. Let's read its limit again to make sure 3815 // that we do not iterate over a region of the heap that 3816 // contains garbage (update_region_limit() will also move 3817 // _finger to the start of the region if it is found empty). 3818 update_region_limit(); 3819 // We will start from _finger not from the start of the region, 3820 // as we might be restarting this task after aborting half-way 3821 // through scanning this region. In this case, _finger points to 3822 // the address where we last found a marked object. If this is a 3823 // fresh region, _finger points to start(). 3824 MemRegion mr = MemRegion(_finger, _region_limit); 3825 3826 if (_cm->verbose_low()) { 3827 gclog_or_tty->print_cr("[%u] we're scanning part " 3828 "[" PTR_FORMAT ", " PTR_FORMAT ") " 3829 "of region " HR_FORMAT, 3830 _worker_id, p2i(_finger), p2i(_region_limit), 3831 HR_FORMAT_PARAMS(_curr_region)); 3832 } 3833 3834 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 3835 "humongous regions should go around loop once only"); 3836 3837 // Some special cases: 3838 // If the memory region is empty, we can just give up the region. 3839 // If the current region is humongous then we only need to check 3840 // the bitmap for the bit associated with the start of the object, 3841 // scan the object if it's live, and give up the region. 3842 // Otherwise, let's iterate over the bitmap of the part of the region 3843 // that is left. 3844 // If the iteration is successful, give up the region. 3845 if (mr.is_empty()) { 3846 giveup_current_region(); 3847 regular_clock_call(); 3848 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 3849 if (_nextMarkBitMap->isMarked(mr.start())) { 3850 // The object is marked - apply the closure 3851 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 3852 bitmap_closure.do_bit(offset); 3853 } 3854 // Even if this task aborted while scanning the humongous object 3855 // we can (and should) give up the current region. 3856 giveup_current_region(); 3857 regular_clock_call(); 3858 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 3859 giveup_current_region(); 3860 regular_clock_call(); 3861 } else { 3862 assert(has_aborted(), "currently the only way to do so"); 3863 // The only way to abort the bitmap iteration is to return 3864 // false from the do_bit() method. However, inside the 3865 // do_bit() method we move the _finger to point to the 3866 // object currently being looked at. So, if we bail out, we 3867 // have definitely set _finger to something non-null. 3868 assert(_finger != NULL, "invariant"); 3869 3870 // Region iteration was actually aborted. So now _finger 3871 // points to the address of the object we last scanned. If we 3872 // leave it there, when we restart this task, we will rescan 3873 // the object. It is easy to avoid this. We move the finger by 3874 // enough to point to the next possible object header (the 3875 // bitmap knows by how much we need to move it as it knows its 3876 // granularity). 3877 assert(_finger < _region_limit, "invariant"); 3878 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 3879 // Check if bitmap iteration was aborted while scanning the last object 3880 if (new_finger >= _region_limit) { 3881 giveup_current_region(); 3882 } else { 3883 move_finger_to(new_finger); 3884 } 3885 } 3886 } 3887 // At this point we have either completed iterating over the 3888 // region we were holding on to, or we have aborted. 3889 3890 // We then partially drain the local queue and the global stack. 3891 // (Do we really need this?) 3892 drain_local_queue(true); 3893 drain_global_stack(true); 3894 3895 // Read the note on the claim_region() method on why it might 3896 // return NULL with potentially more regions available for 3897 // claiming and why we have to check out_of_regions() to determine 3898 // whether we're done or not. 3899 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 3900 // We are going to try to claim a new region. We should have 3901 // given up on the previous one. 3902 // Separated the asserts so that we know which one fires. 3903 assert(_curr_region == NULL, "invariant"); 3904 assert(_finger == NULL, "invariant"); 3905 assert(_region_limit == NULL, "invariant"); 3906 if (_cm->verbose_low()) { 3907 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 3908 } 3909 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 3910 if (claimed_region != NULL) { 3911 // Yes, we managed to claim one 3912 statsOnly( ++_regions_claimed ); 3913 3914 if (_cm->verbose_low()) { 3915 gclog_or_tty->print_cr("[%u] we successfully claimed " 3916 "region " PTR_FORMAT, 3917 _worker_id, p2i(claimed_region)); 3918 } 3919 3920 setup_for_region(claimed_region); 3921 assert(_curr_region == claimed_region, "invariant"); 3922 } 3923 // It is important to call the regular clock here. It might take 3924 // a while to claim a region if, for example, we hit a large 3925 // block of empty regions. So we need to call the regular clock 3926 // method once round the loop to make sure it's called 3927 // frequently enough. 3928 regular_clock_call(); 3929 } 3930 3931 if (!has_aborted() && _curr_region == NULL) { 3932 assert(_cm->out_of_regions(), 3933 "at this point we should be out of regions"); 3934 } 3935 } while ( _curr_region != NULL && !has_aborted()); 3936 3937 if (!has_aborted()) { 3938 // We cannot check whether the global stack is empty, since other 3939 // tasks might be pushing objects to it concurrently. 3940 assert(_cm->out_of_regions(), 3941 "at this point we should be out of regions"); 3942 3943 if (_cm->verbose_low()) { 3944 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 3945 } 3946 3947 // Try to reduce the number of available SATB buffers so that 3948 // remark has less work to do. 3949 drain_satb_buffers(); 3950 } 3951 3952 // Since we've done everything else, we can now totally drain the 3953 // local queue and global stack. 3954 drain_local_queue(false); 3955 drain_global_stack(false); 3956 3957 // Attempt at work stealing from other task's queues. 3958 if (do_stealing && !has_aborted()) { 3959 // We have not aborted. This means that we have finished all that 3960 // we could. Let's try to do some stealing... 3961 3962 // We cannot check whether the global stack is empty, since other 3963 // tasks might be pushing objects to it concurrently. 3964 assert(_cm->out_of_regions() && _task_queue->size() == 0, 3965 "only way to reach here"); 3966 3967 if (_cm->verbose_low()) { 3968 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 3969 } 3970 3971 while (!has_aborted()) { 3972 oop obj; 3973 statsOnly( ++_steal_attempts ); 3974 3975 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 3976 if (_cm->verbose_medium()) { 3977 gclog_or_tty->print_cr("[%u] stolen " PTR_FORMAT " successfully", 3978 _worker_id, p2i((void*) obj)); 3979 } 3980 3981 statsOnly( ++_steals ); 3982 3983 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 3984 "any stolen object should be marked"); 3985 scan_object(obj); 3986 3987 // And since we're towards the end, let's totally drain the 3988 // local queue and global stack. 3989 drain_local_queue(false); 3990 drain_global_stack(false); 3991 } else { 3992 break; 3993 } 3994 } 3995 } 3996 3997 // If we are about to wrap up and go into termination, check if we 3998 // should raise the overflow flag. 3999 if (do_termination && !has_aborted()) { 4000 if (_cm->force_overflow()->should_force()) { 4001 _cm->set_has_overflown(); 4002 regular_clock_call(); 4003 } 4004 } 4005 4006 // We still haven't aborted. Now, let's try to get into the 4007 // termination protocol. 4008 if (do_termination && !has_aborted()) { 4009 // We cannot check whether the global stack is empty, since other 4010 // tasks might be concurrently pushing objects on it. 4011 // Separated the asserts so that we know which one fires. 4012 assert(_cm->out_of_regions(), "only way to reach here"); 4013 assert(_task_queue->size() == 0, "only way to reach here"); 4014 4015 if (_cm->verbose_low()) { 4016 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4017 } 4018 4019 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4020 4021 // The CMTask class also extends the TerminatorTerminator class, 4022 // hence its should_exit_termination() method will also decide 4023 // whether to exit the termination protocol or not. 4024 bool finished = (is_serial || 4025 _cm->terminator()->offer_termination(this)); 4026 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4027 _termination_time_ms += 4028 termination_end_time_ms - _termination_start_time_ms; 4029 4030 if (finished) { 4031 // We're all done. 4032 4033 if (_worker_id == 0) { 4034 // let's allow task 0 to do this 4035 if (concurrent()) { 4036 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4037 // we need to set this to false before the next 4038 // safepoint. This way we ensure that the marking phase 4039 // doesn't observe any more heap expansions. 4040 _cm->clear_concurrent_marking_in_progress(); 4041 } 4042 } 4043 4044 // We can now guarantee that the global stack is empty, since 4045 // all other tasks have finished. We separated the guarantees so 4046 // that, if a condition is false, we can immediately find out 4047 // which one. 4048 guarantee(_cm->out_of_regions(), "only way to reach here"); 4049 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4050 guarantee(_task_queue->size() == 0, "only way to reach here"); 4051 guarantee(!_cm->has_overflown(), "only way to reach here"); 4052 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4053 4054 if (_cm->verbose_low()) { 4055 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4056 } 4057 } else { 4058 // Apparently there's more work to do. Let's abort this task. It 4059 // will restart it and we can hopefully find more things to do. 4060 4061 if (_cm->verbose_low()) { 4062 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4063 _worker_id); 4064 } 4065 4066 set_has_aborted(); 4067 statsOnly( ++_aborted_termination ); 4068 } 4069 } 4070 4071 // Mainly for debugging purposes to make sure that a pointer to the 4072 // closure which was statically allocated in this frame doesn't 4073 // escape it by accident. 4074 set_cm_oop_closure(NULL); 4075 double end_time_ms = os::elapsedVTime() * 1000.0; 4076 double elapsed_time_ms = end_time_ms - _start_time_ms; 4077 // Update the step history. 4078 _step_times_ms.add(elapsed_time_ms); 4079 4080 if (has_aborted()) { 4081 // The task was aborted for some reason. 4082 4083 statsOnly( ++_aborted ); 4084 4085 if (_has_timed_out) { 4086 double diff_ms = elapsed_time_ms - _time_target_ms; 4087 // Keep statistics of how well we did with respect to hitting 4088 // our target only if we actually timed out (if we aborted for 4089 // other reasons, then the results might get skewed). 4090 _marking_step_diffs_ms.add(diff_ms); 4091 } 4092 4093 if (_cm->has_overflown()) { 4094 // This is the interesting one. We aborted because a global 4095 // overflow was raised. This means we have to restart the 4096 // marking phase and start iterating over regions. However, in 4097 // order to do this we have to make sure that all tasks stop 4098 // what they are doing and re-initialize in a safe manner. We 4099 // will achieve this with the use of two barrier sync points. 4100 4101 if (_cm->verbose_low()) { 4102 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4103 } 4104 4105 if (!is_serial) { 4106 // We only need to enter the sync barrier if being called 4107 // from a parallel context 4108 _cm->enter_first_sync_barrier(_worker_id); 4109 4110 // When we exit this sync barrier we know that all tasks have 4111 // stopped doing marking work. So, it's now safe to 4112 // re-initialize our data structures. At the end of this method, 4113 // task 0 will clear the global data structures. 4114 } 4115 4116 statsOnly( ++_aborted_overflow ); 4117 4118 // We clear the local state of this task... 4119 clear_region_fields(); 4120 4121 if (!is_serial) { 4122 // ...and enter the second barrier. 4123 _cm->enter_second_sync_barrier(_worker_id); 4124 } 4125 // At this point, if we're during the concurrent phase of 4126 // marking, everything has been re-initialized and we're 4127 // ready to restart. 4128 } 4129 4130 if (_cm->verbose_low()) { 4131 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4132 "elapsed = %1.2lfms <<<<<<<<<<", 4133 _worker_id, _time_target_ms, elapsed_time_ms); 4134 if (_cm->has_aborted()) { 4135 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4136 _worker_id); 4137 } 4138 } 4139 } else { 4140 if (_cm->verbose_low()) { 4141 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4142 "elapsed = %1.2lfms <<<<<<<<<<", 4143 _worker_id, _time_target_ms, elapsed_time_ms); 4144 } 4145 } 4146 4147 _claimed = false; 4148 } 4149 4150 CMTask::CMTask(uint worker_id, 4151 ConcurrentMark* cm, 4152 size_t* marked_bytes, 4153 BitMap* card_bm, 4154 CMTaskQueue* task_queue, 4155 CMTaskQueueSet* task_queues) 4156 : _g1h(G1CollectedHeap::heap()), 4157 _worker_id(worker_id), _cm(cm), 4158 _claimed(false), 4159 _nextMarkBitMap(NULL), _hash_seed(17), 4160 _task_queue(task_queue), 4161 _task_queues(task_queues), 4162 _cm_oop_closure(NULL), 4163 _marked_bytes_array(marked_bytes), 4164 _card_bm(card_bm) { 4165 guarantee(task_queue != NULL, "invariant"); 4166 guarantee(task_queues != NULL, "invariant"); 4167 4168 statsOnly( _clock_due_to_scanning = 0; 4169 _clock_due_to_marking = 0 ); 4170 4171 _marking_step_diffs_ms.add(0.5); 4172 } 4173 4174 // These are formatting macros that are used below to ensure 4175 // consistent formatting. The *_H_* versions are used to format the 4176 // header for a particular value and they should be kept consistent 4177 // with the corresponding macro. Also note that most of the macros add 4178 // the necessary white space (as a prefix) which makes them a bit 4179 // easier to compose. 4180 4181 // All the output lines are prefixed with this string to be able to 4182 // identify them easily in a large log file. 4183 #define G1PPRL_LINE_PREFIX "###" 4184 4185 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 4186 #ifdef _LP64 4187 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4188 #else // _LP64 4189 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4190 #endif // _LP64 4191 4192 // For per-region info 4193 #define G1PPRL_TYPE_FORMAT " %-4s" 4194 #define G1PPRL_TYPE_H_FORMAT " %4s" 4195 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 4196 #define G1PPRL_BYTE_H_FORMAT " %9s" 4197 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4198 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4199 4200 // For summary info 4201 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 4202 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 4203 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 4204 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 4205 4206 G1PrintRegionLivenessInfoClosure:: 4207 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4208 : _out(out), 4209 _total_used_bytes(0), _total_capacity_bytes(0), 4210 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4211 _hum_used_bytes(0), _hum_capacity_bytes(0), 4212 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4213 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4214 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4215 MemRegion g1_reserved = g1h->g1_reserved(); 4216 double now = os::elapsedTime(); 4217 4218 // Print the header of the output. 4219 _out->cr(); 4220 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4221 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4222 G1PPRL_SUM_ADDR_FORMAT("reserved") 4223 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4224 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4225 HeapRegion::GrainBytes); 4226 _out->print_cr(G1PPRL_LINE_PREFIX); 4227 _out->print_cr(G1PPRL_LINE_PREFIX 4228 G1PPRL_TYPE_H_FORMAT 4229 G1PPRL_ADDR_BASE_H_FORMAT 4230 G1PPRL_BYTE_H_FORMAT 4231 G1PPRL_BYTE_H_FORMAT 4232 G1PPRL_BYTE_H_FORMAT 4233 G1PPRL_DOUBLE_H_FORMAT 4234 G1PPRL_BYTE_H_FORMAT 4235 G1PPRL_BYTE_H_FORMAT, 4236 "type", "address-range", 4237 "used", "prev-live", "next-live", "gc-eff", 4238 "remset", "code-roots"); 4239 _out->print_cr(G1PPRL_LINE_PREFIX 4240 G1PPRL_TYPE_H_FORMAT 4241 G1PPRL_ADDR_BASE_H_FORMAT 4242 G1PPRL_BYTE_H_FORMAT 4243 G1PPRL_BYTE_H_FORMAT 4244 G1PPRL_BYTE_H_FORMAT 4245 G1PPRL_DOUBLE_H_FORMAT 4246 G1PPRL_BYTE_H_FORMAT 4247 G1PPRL_BYTE_H_FORMAT, 4248 "", "", 4249 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4250 "(bytes)", "(bytes)"); 4251 } 4252 4253 // It takes as a parameter a reference to one of the _hum_* fields, it 4254 // deduces the corresponding value for a region in a humongous region 4255 // series (either the region size, or what's left if the _hum_* field 4256 // is < the region size), and updates the _hum_* field accordingly. 4257 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4258 size_t bytes = 0; 4259 // The > 0 check is to deal with the prev and next live bytes which 4260 // could be 0. 4261 if (*hum_bytes > 0) { 4262 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4263 *hum_bytes -= bytes; 4264 } 4265 return bytes; 4266 } 4267 4268 // It deduces the values for a region in a humongous region series 4269 // from the _hum_* fields and updates those accordingly. It assumes 4270 // that that _hum_* fields have already been set up from the "starts 4271 // humongous" region and we visit the regions in address order. 4272 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4273 size_t* capacity_bytes, 4274 size_t* prev_live_bytes, 4275 size_t* next_live_bytes) { 4276 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4277 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4278 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4279 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4280 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4281 } 4282 4283 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4284 const char* type = r->get_type_str(); 4285 HeapWord* bottom = r->bottom(); 4286 HeapWord* end = r->end(); 4287 size_t capacity_bytes = r->capacity(); 4288 size_t used_bytes = r->used(); 4289 size_t prev_live_bytes = r->live_bytes(); 4290 size_t next_live_bytes = r->next_live_bytes(); 4291 double gc_eff = r->gc_efficiency(); 4292 size_t remset_bytes = r->rem_set()->mem_size(); 4293 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4294 4295 if (r->is_starts_humongous()) { 4296 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4297 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4298 "they should have been zeroed after the last time we used them"); 4299 // Set up the _hum_* fields. 4300 _hum_capacity_bytes = capacity_bytes; 4301 _hum_used_bytes = used_bytes; 4302 _hum_prev_live_bytes = prev_live_bytes; 4303 _hum_next_live_bytes = next_live_bytes; 4304 get_hum_bytes(&used_bytes, &capacity_bytes, 4305 &prev_live_bytes, &next_live_bytes); 4306 end = bottom + HeapRegion::GrainWords; 4307 } else if (r->is_continues_humongous()) { 4308 get_hum_bytes(&used_bytes, &capacity_bytes, 4309 &prev_live_bytes, &next_live_bytes); 4310 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4311 } 4312 4313 _total_used_bytes += used_bytes; 4314 _total_capacity_bytes += capacity_bytes; 4315 _total_prev_live_bytes += prev_live_bytes; 4316 _total_next_live_bytes += next_live_bytes; 4317 _total_remset_bytes += remset_bytes; 4318 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4319 4320 // Print a line for this particular region. 4321 _out->print_cr(G1PPRL_LINE_PREFIX 4322 G1PPRL_TYPE_FORMAT 4323 G1PPRL_ADDR_BASE_FORMAT 4324 G1PPRL_BYTE_FORMAT 4325 G1PPRL_BYTE_FORMAT 4326 G1PPRL_BYTE_FORMAT 4327 G1PPRL_DOUBLE_FORMAT 4328 G1PPRL_BYTE_FORMAT 4329 G1PPRL_BYTE_FORMAT, 4330 type, p2i(bottom), p2i(end), 4331 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4332 remset_bytes, strong_code_roots_bytes); 4333 4334 return false; 4335 } 4336 4337 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4338 // add static memory usages to remembered set sizes 4339 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4340 // Print the footer of the output. 4341 _out->print_cr(G1PPRL_LINE_PREFIX); 4342 _out->print_cr(G1PPRL_LINE_PREFIX 4343 " SUMMARY" 4344 G1PPRL_SUM_MB_FORMAT("capacity") 4345 G1PPRL_SUM_MB_PERC_FORMAT("used") 4346 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4347 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4348 G1PPRL_SUM_MB_FORMAT("remset") 4349 G1PPRL_SUM_MB_FORMAT("code-roots"), 4350 bytes_to_mb(_total_capacity_bytes), 4351 bytes_to_mb(_total_used_bytes), 4352 perc(_total_used_bytes, _total_capacity_bytes), 4353 bytes_to_mb(_total_prev_live_bytes), 4354 perc(_total_prev_live_bytes, _total_capacity_bytes), 4355 bytes_to_mb(_total_next_live_bytes), 4356 perc(_total_next_live_bytes, _total_capacity_bytes), 4357 bytes_to_mb(_total_remset_bytes), 4358 bytes_to_mb(_total_strong_code_roots_bytes)); 4359 _out->cr(); 4360 }