1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMark.inline.hpp" 30 #include "gc/g1/concurrentMarkThread.inline.hpp" 31 #include "gc/g1/g1CollectedHeap.inline.hpp" 32 #include "gc/g1/g1CollectorPolicy.hpp" 33 #include "gc/g1/g1CollectorState.hpp" 34 #include "gc/g1/g1ErgoVerbose.hpp" 35 #include "gc/g1/g1Log.hpp" 36 #include "gc/g1/g1OopClosures.inline.hpp" 37 #include "gc/g1/g1RemSet.hpp" 38 #include "gc/g1/g1StringDedup.hpp" 39 #include "gc/g1/heapRegion.inline.hpp" 40 #include "gc/g1/heapRegionManager.inline.hpp" 41 #include "gc/g1/heapRegionRemSet.hpp" 42 #include "gc/g1/heapRegionSet.inline.hpp" 43 #include "gc/g1/suspendibleThreadSet.hpp" 44 #include "gc/shared/gcTimer.hpp" 45 #include "gc/shared/gcTrace.hpp" 46 #include "gc/shared/gcTraceTime.hpp" 47 #include "gc/shared/genOopClosures.inline.hpp" 48 #include "gc/shared/referencePolicy.hpp" 49 #include "gc/shared/strongRootsScope.hpp" 50 #include "gc/shared/taskqueue.inline.hpp" 51 #include "gc/shared/vmGCOperations.hpp" 52 #include "memory/allocation.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "runtime/atomic.inline.hpp" 56 #include "runtime/handles.inline.hpp" 57 #include "runtime/java.hpp" 58 #include "runtime/prefetch.inline.hpp" 59 #include "services/memTracker.hpp" 60 61 // Concurrent marking bit map wrapper 62 63 CMBitMapRO::CMBitMapRO(int shifter) : 64 _bm(), 65 _shifter(shifter) { 66 _bmStartWord = 0; 67 _bmWordSize = 0; 68 } 69 70 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 71 const HeapWord* limit) const { 72 // First we must round addr *up* to a possible object boundary. 73 addr = (HeapWord*)align_size_up((intptr_t)addr, 74 HeapWordSize << _shifter); 75 size_t addrOffset = heapWordToOffset(addr); 76 if (limit == NULL) { 77 limit = _bmStartWord + _bmWordSize; 78 } 79 size_t limitOffset = heapWordToOffset(limit); 80 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 81 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 82 assert(nextAddr >= addr, "get_next_one postcondition"); 83 assert(nextAddr == limit || isMarked(nextAddr), 84 "get_next_one postcondition"); 85 return nextAddr; 86 } 87 88 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, 89 const HeapWord* limit) const { 90 size_t addrOffset = heapWordToOffset(addr); 91 if (limit == NULL) { 92 limit = _bmStartWord + _bmWordSize; 93 } 94 size_t limitOffset = heapWordToOffset(limit); 95 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 96 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 97 assert(nextAddr >= addr, "get_next_one postcondition"); 98 assert(nextAddr == limit || !isMarked(nextAddr), 99 "get_next_one postcondition"); 100 return nextAddr; 101 } 102 103 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 104 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 105 return (int) (diff >> _shifter); 106 } 107 108 #ifndef PRODUCT 109 bool CMBitMapRO::covers(MemRegion heap_rs) const { 110 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 111 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 112 "size inconsistency"); 113 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 114 _bmWordSize == heap_rs.word_size(); 115 } 116 #endif 117 118 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 119 _bm.print_on_error(st, prefix); 120 } 121 122 size_t CMBitMap::compute_size(size_t heap_size) { 123 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 124 } 125 126 size_t CMBitMap::mark_distance() { 127 return MinObjAlignmentInBytes * BitsPerByte; 128 } 129 130 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 131 _bmStartWord = heap.start(); 132 _bmWordSize = heap.word_size(); 133 134 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 135 _bm.set_size(_bmWordSize >> _shifter); 136 137 storage->set_mapping_changed_listener(&_listener); 138 } 139 140 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 141 if (zero_filled) { 142 return; 143 } 144 // We need to clear the bitmap on commit, removing any existing information. 145 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 146 _bm->clearRange(mr); 147 } 148 149 // Closure used for clearing the given mark bitmap. 150 class ClearBitmapHRClosure : public HeapRegionClosure { 151 private: 152 ConcurrentMark* _cm; 153 CMBitMap* _bitmap; 154 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 155 public: 156 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 157 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 158 } 159 160 virtual bool doHeapRegion(HeapRegion* r) { 161 size_t const chunk_size_in_words = M / HeapWordSize; 162 163 HeapWord* cur = r->bottom(); 164 HeapWord* const end = r->end(); 165 166 while (cur < end) { 167 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 168 _bitmap->clearRange(mr); 169 170 cur += chunk_size_in_words; 171 172 // Abort iteration if after yielding the marking has been aborted. 173 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 174 return true; 175 } 176 // Repeat the asserts from before the start of the closure. We will do them 177 // as asserts here to minimize their overhead on the product. However, we 178 // will have them as guarantees at the beginning / end of the bitmap 179 // clearing to get some checking in the product. 180 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 181 assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 182 } 183 184 return false; 185 } 186 }; 187 188 class ParClearNextMarkBitmapTask : public AbstractGangTask { 189 ClearBitmapHRClosure* _cl; 190 HeapRegionClaimer _hrclaimer; 191 bool _suspendible; // If the task is suspendible, workers must join the STS. 192 193 public: 194 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 195 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 196 197 void work(uint worker_id) { 198 SuspendibleThreadSetJoiner sts_join(_suspendible); 199 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 200 } 201 }; 202 203 void CMBitMap::clearAll() { 204 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 205 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 206 uint n_workers = g1h->workers()->active_workers(); 207 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 208 g1h->workers()->run_task(&task); 209 guarantee(cl.complete(), "Must have completed iteration."); 210 return; 211 } 212 213 void CMBitMap::markRange(MemRegion mr) { 214 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 215 assert(!mr.is_empty(), "unexpected empty region"); 216 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 217 ((HeapWord *) mr.end())), 218 "markRange memory region end is not card aligned"); 219 // convert address range into offset range 220 _bm.at_put_range(heapWordToOffset(mr.start()), 221 heapWordToOffset(mr.end()), true); 222 } 223 224 void CMBitMap::clearRange(MemRegion mr) { 225 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 226 assert(!mr.is_empty(), "unexpected empty region"); 227 // convert address range into offset range 228 _bm.at_put_range(heapWordToOffset(mr.start()), 229 heapWordToOffset(mr.end()), false); 230 } 231 232 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 233 HeapWord* end_addr) { 234 HeapWord* start = getNextMarkedWordAddress(addr); 235 start = MIN2(start, end_addr); 236 HeapWord* end = getNextUnmarkedWordAddress(start); 237 end = MIN2(end, end_addr); 238 assert(start <= end, "Consistency check"); 239 MemRegion mr(start, end); 240 if (!mr.is_empty()) { 241 clearRange(mr); 242 } 243 return mr; 244 } 245 246 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 247 _base(NULL), _cm(cm) 248 #ifdef ASSERT 249 , _drain_in_progress(false) 250 , _drain_in_progress_yields(false) 251 #endif 252 {} 253 254 bool CMMarkStack::allocate(size_t capacity) { 255 // allocate a stack of the requisite depth 256 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 257 if (!rs.is_reserved()) { 258 warning("ConcurrentMark MarkStack allocation failure"); 259 return false; 260 } 261 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 262 if (!_virtual_space.initialize(rs, rs.size())) { 263 warning("ConcurrentMark MarkStack backing store failure"); 264 // Release the virtual memory reserved for the marking stack 265 rs.release(); 266 return false; 267 } 268 assert(_virtual_space.committed_size() == rs.size(), 269 "Didn't reserve backing store for all of ConcurrentMark stack?"); 270 _base = (oop*) _virtual_space.low(); 271 setEmpty(); 272 _capacity = (jint) capacity; 273 _saved_index = -1; 274 _should_expand = false; 275 return true; 276 } 277 278 void CMMarkStack::expand() { 279 // Called, during remark, if we've overflown the marking stack during marking. 280 assert(isEmpty(), "stack should been emptied while handling overflow"); 281 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 282 // Clear expansion flag 283 _should_expand = false; 284 if (_capacity == (jint) MarkStackSizeMax) { 285 if (PrintGCDetails && Verbose) { 286 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 287 } 288 return; 289 } 290 // Double capacity if possible 291 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 292 // Do not give up existing stack until we have managed to 293 // get the double capacity that we desired. 294 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 295 sizeof(oop))); 296 if (rs.is_reserved()) { 297 // Release the backing store associated with old stack 298 _virtual_space.release(); 299 // Reinitialize virtual space for new stack 300 if (!_virtual_space.initialize(rs, rs.size())) { 301 fatal("Not enough swap for expanded marking stack capacity"); 302 } 303 _base = (oop*)(_virtual_space.low()); 304 _index = 0; 305 _capacity = new_capacity; 306 } else { 307 if (PrintGCDetails && Verbose) { 308 // Failed to double capacity, continue; 309 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 310 SIZE_FORMAT"K to " SIZE_FORMAT"K", 311 _capacity / K, new_capacity / K); 312 } 313 } 314 } 315 316 void CMMarkStack::set_should_expand() { 317 // If we're resetting the marking state because of an 318 // marking stack overflow, record that we should, if 319 // possible, expand the stack. 320 _should_expand = _cm->has_overflown(); 321 } 322 323 CMMarkStack::~CMMarkStack() { 324 if (_base != NULL) { 325 _base = NULL; 326 _virtual_space.release(); 327 } 328 } 329 330 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 331 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 332 jint start = _index; 333 jint next_index = start + n; 334 if (next_index > _capacity) { 335 _overflow = true; 336 return; 337 } 338 // Otherwise. 339 _index = next_index; 340 for (int i = 0; i < n; i++) { 341 int ind = start + i; 342 assert(ind < _capacity, "By overflow test above."); 343 _base[ind] = ptr_arr[i]; 344 } 345 } 346 347 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 348 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 349 jint index = _index; 350 if (index == 0) { 351 *n = 0; 352 return false; 353 } else { 354 int k = MIN2(max, index); 355 jint new_ind = index - k; 356 for (int j = 0; j < k; j++) { 357 ptr_arr[j] = _base[new_ind + j]; 358 } 359 _index = new_ind; 360 *n = k; 361 return true; 362 } 363 } 364 365 template<class OopClosureClass> 366 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 367 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 368 || SafepointSynchronize::is_at_safepoint(), 369 "Drain recursion must be yield-safe."); 370 bool res = true; 371 debug_only(_drain_in_progress = true); 372 debug_only(_drain_in_progress_yields = yield_after); 373 while (!isEmpty()) { 374 oop newOop = pop(); 375 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 376 assert(newOop->is_oop(), "Expected an oop"); 377 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 378 "only grey objects on this stack"); 379 newOop->oop_iterate(cl); 380 if (yield_after && _cm->do_yield_check()) { 381 res = false; 382 break; 383 } 384 } 385 debug_only(_drain_in_progress = false); 386 return res; 387 } 388 389 void CMMarkStack::note_start_of_gc() { 390 assert(_saved_index == -1, 391 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 392 _saved_index = _index; 393 } 394 395 void CMMarkStack::note_end_of_gc() { 396 // This is intentionally a guarantee, instead of an assert. If we 397 // accidentally add something to the mark stack during GC, it 398 // will be a correctness issue so it's better if we crash. we'll 399 // only check this once per GC anyway, so it won't be a performance 400 // issue in any way. 401 guarantee(_saved_index == _index, 402 err_msg("saved index: %d index: %d", _saved_index, _index)); 403 _saved_index = -1; 404 } 405 406 void CMMarkStack::oops_do(OopClosure* f) { 407 assert(_saved_index == _index, 408 err_msg("saved index: %d index: %d", _saved_index, _index)); 409 for (int i = 0; i < _index; i += 1) { 410 f->do_oop(&_base[i]); 411 } 412 } 413 414 CMRootRegions::CMRootRegions() : 415 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 416 _should_abort(false), _next_survivor(NULL) { } 417 418 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 419 _young_list = g1h->young_list(); 420 _cm = cm; 421 } 422 423 void CMRootRegions::prepare_for_scan() { 424 assert(!scan_in_progress(), "pre-condition"); 425 426 // Currently, only survivors can be root regions. 427 assert(_next_survivor == NULL, "pre-condition"); 428 _next_survivor = _young_list->first_survivor_region(); 429 _scan_in_progress = (_next_survivor != NULL); 430 _should_abort = false; 431 } 432 433 HeapRegion* CMRootRegions::claim_next() { 434 if (_should_abort) { 435 // If someone has set the should_abort flag, we return NULL to 436 // force the caller to bail out of their loop. 437 return NULL; 438 } 439 440 // Currently, only survivors can be root regions. 441 HeapRegion* res = _next_survivor; 442 if (res != NULL) { 443 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 444 // Read it again in case it changed while we were waiting for the lock. 445 res = _next_survivor; 446 if (res != NULL) { 447 if (res == _young_list->last_survivor_region()) { 448 // We just claimed the last survivor so store NULL to indicate 449 // that we're done. 450 _next_survivor = NULL; 451 } else { 452 _next_survivor = res->get_next_young_region(); 453 } 454 } else { 455 // Someone else claimed the last survivor while we were trying 456 // to take the lock so nothing else to do. 457 } 458 } 459 assert(res == NULL || res->is_survivor(), "post-condition"); 460 461 return res; 462 } 463 464 void CMRootRegions::scan_finished() { 465 assert(scan_in_progress(), "pre-condition"); 466 467 // Currently, only survivors can be root regions. 468 if (!_should_abort) { 469 assert(_next_survivor == NULL, "we should have claimed all survivors"); 470 } 471 _next_survivor = NULL; 472 473 { 474 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 475 _scan_in_progress = false; 476 RootRegionScan_lock->notify_all(); 477 } 478 } 479 480 bool CMRootRegions::wait_until_scan_finished() { 481 if (!scan_in_progress()) return false; 482 483 { 484 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 485 while (scan_in_progress()) { 486 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 487 } 488 } 489 return true; 490 } 491 492 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 493 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 494 #endif // _MSC_VER 495 496 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 497 return MAX2((n_par_threads + 2) / 4, 1U); 498 } 499 500 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 501 _g1h(g1h), 502 _markBitMap1(), 503 _markBitMap2(), 504 _parallel_marking_threads(0), 505 _max_parallel_marking_threads(0), 506 _sleep_factor(0.0), 507 _marking_task_overhead(1.0), 508 _cleanup_sleep_factor(0.0), 509 _cleanup_task_overhead(1.0), 510 _cleanup_list("Cleanup List"), 511 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 512 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 513 CardTableModRefBS::card_shift, 514 false /* in_resource_area*/), 515 516 _prevMarkBitMap(&_markBitMap1), 517 _nextMarkBitMap(&_markBitMap2), 518 519 _markStack(this), 520 // _finger set in set_non_marking_state 521 522 _max_worker_id(ParallelGCThreads), 523 // _active_tasks set in set_non_marking_state 524 // _tasks set inside the constructor 525 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 526 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 527 528 _has_overflown(false), 529 _concurrent(false), 530 _has_aborted(false), 531 _aborted_gc_id(GCId::undefined()), 532 _restart_for_overflow(false), 533 _concurrent_marking_in_progress(false), 534 535 // _verbose_level set below 536 537 _init_times(), 538 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 539 _cleanup_times(), 540 _total_counting_time(0.0), 541 _total_rs_scrub_time(0.0), 542 543 _parallel_workers(NULL), 544 545 _count_card_bitmaps(NULL), 546 _count_marked_bytes(NULL), 547 _completed_initialization(false) { 548 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 549 if (verbose_level < no_verbose) { 550 verbose_level = no_verbose; 551 } 552 if (verbose_level > high_verbose) { 553 verbose_level = high_verbose; 554 } 555 _verbose_level = verbose_level; 556 557 if (verbose_low()) { 558 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 559 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 560 } 561 562 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 563 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 564 565 // Create & start a ConcurrentMark thread. 566 _cmThread = new ConcurrentMarkThread(this); 567 assert(cmThread() != NULL, "CM Thread should have been created"); 568 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 569 if (_cmThread->osthread() == NULL) { 570 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 571 } 572 573 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 574 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 575 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 576 577 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 578 satb_qs.set_buffer_size(G1SATBBufferSize); 579 580 _root_regions.init(_g1h, this); 581 582 if (ConcGCThreads > ParallelGCThreads) { 583 warning("Can't have more ConcGCThreads (%u) " 584 "than ParallelGCThreads (%u).", 585 ConcGCThreads, ParallelGCThreads); 586 return; 587 } 588 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 589 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 590 // if both are set 591 _sleep_factor = 0.0; 592 _marking_task_overhead = 1.0; 593 } else if (G1MarkingOverheadPercent > 0) { 594 // We will calculate the number of parallel marking threads based 595 // on a target overhead with respect to the soft real-time goal 596 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 597 double overall_cm_overhead = 598 (double) MaxGCPauseMillis * marking_overhead / 599 (double) GCPauseIntervalMillis; 600 double cpu_ratio = 1.0 / (double) os::processor_count(); 601 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 602 double marking_task_overhead = 603 overall_cm_overhead / marking_thread_num * 604 (double) os::processor_count(); 605 double sleep_factor = 606 (1.0 - marking_task_overhead) / marking_task_overhead; 607 608 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 609 _sleep_factor = sleep_factor; 610 _marking_task_overhead = marking_task_overhead; 611 } else { 612 // Calculate the number of parallel marking threads by scaling 613 // the number of parallel GC threads. 614 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 615 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 616 _sleep_factor = 0.0; 617 _marking_task_overhead = 1.0; 618 } 619 620 assert(ConcGCThreads > 0, "Should have been set"); 621 _parallel_marking_threads = ConcGCThreads; 622 _max_parallel_marking_threads = _parallel_marking_threads; 623 624 if (parallel_marking_threads() > 1) { 625 _cleanup_task_overhead = 1.0; 626 } else { 627 _cleanup_task_overhead = marking_task_overhead(); 628 } 629 _cleanup_sleep_factor = 630 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 631 632 #if 0 633 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 634 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 635 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 636 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 637 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 638 #endif 639 640 _parallel_workers = new FlexibleWorkGang("G1 Marker", 641 _max_parallel_marking_threads, false, true); 642 if (_parallel_workers == NULL) { 643 vm_exit_during_initialization("Failed necessary allocation."); 644 } else { 645 _parallel_workers->initialize_workers(); 646 } 647 648 if (FLAG_IS_DEFAULT(MarkStackSize)) { 649 size_t mark_stack_size = 650 MIN2(MarkStackSizeMax, 651 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 652 // Verify that the calculated value for MarkStackSize is in range. 653 // It would be nice to use the private utility routine from Arguments. 654 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 655 warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 656 "must be between 1 and " SIZE_FORMAT, 657 mark_stack_size, MarkStackSizeMax); 658 return; 659 } 660 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 661 } else { 662 // Verify MarkStackSize is in range. 663 if (FLAG_IS_CMDLINE(MarkStackSize)) { 664 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 665 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 666 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 667 "must be between 1 and " SIZE_FORMAT, 668 MarkStackSize, MarkStackSizeMax); 669 return; 670 } 671 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 672 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 673 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 674 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 675 MarkStackSize, MarkStackSizeMax); 676 return; 677 } 678 } 679 } 680 } 681 682 if (!_markStack.allocate(MarkStackSize)) { 683 warning("Failed to allocate CM marking stack"); 684 return; 685 } 686 687 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 688 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 689 690 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 691 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 692 693 BitMap::idx_t card_bm_size = _card_bm.size(); 694 695 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 696 _active_tasks = _max_worker_id; 697 698 uint max_regions = _g1h->max_regions(); 699 for (uint i = 0; i < _max_worker_id; ++i) { 700 CMTaskQueue* task_queue = new CMTaskQueue(); 701 task_queue->initialize(); 702 _task_queues->register_queue(i, task_queue); 703 704 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 705 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 706 707 _tasks[i] = new CMTask(i, this, 708 _count_marked_bytes[i], 709 &_count_card_bitmaps[i], 710 task_queue, _task_queues); 711 712 _accum_task_vtime[i] = 0.0; 713 } 714 715 // Calculate the card number for the bottom of the heap. Used 716 // in biasing indexes into the accounting card bitmaps. 717 _heap_bottom_card_num = 718 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 719 CardTableModRefBS::card_shift); 720 721 // Clear all the liveness counting data 722 clear_all_count_data(); 723 724 // so that the call below can read a sensible value 725 _heap_start = g1h->reserved_region().start(); 726 set_non_marking_state(); 727 _completed_initialization = true; 728 } 729 730 void ConcurrentMark::reset() { 731 // Starting values for these two. This should be called in a STW 732 // phase. 733 MemRegion reserved = _g1h->g1_reserved(); 734 _heap_start = reserved.start(); 735 _heap_end = reserved.end(); 736 737 // Separated the asserts so that we know which one fires. 738 assert(_heap_start != NULL, "heap bounds should look ok"); 739 assert(_heap_end != NULL, "heap bounds should look ok"); 740 assert(_heap_start < _heap_end, "heap bounds should look ok"); 741 742 // Reset all the marking data structures and any necessary flags 743 reset_marking_state(); 744 745 if (verbose_low()) { 746 gclog_or_tty->print_cr("[global] resetting"); 747 } 748 749 // We do reset all of them, since different phases will use 750 // different number of active threads. So, it's easiest to have all 751 // of them ready. 752 for (uint i = 0; i < _max_worker_id; ++i) { 753 _tasks[i]->reset(_nextMarkBitMap); 754 } 755 756 // we need this to make sure that the flag is on during the evac 757 // pause with initial mark piggy-backed 758 set_concurrent_marking_in_progress(); 759 } 760 761 762 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 763 _markStack.set_should_expand(); 764 _markStack.setEmpty(); // Also clears the _markStack overflow flag 765 if (clear_overflow) { 766 clear_has_overflown(); 767 } else { 768 assert(has_overflown(), "pre-condition"); 769 } 770 _finger = _heap_start; 771 772 for (uint i = 0; i < _max_worker_id; ++i) { 773 CMTaskQueue* queue = _task_queues->queue(i); 774 queue->set_empty(); 775 } 776 } 777 778 void ConcurrentMark::set_concurrency(uint active_tasks) { 779 assert(active_tasks <= _max_worker_id, "we should not have more"); 780 781 _active_tasks = active_tasks; 782 // Need to update the three data structures below according to the 783 // number of active threads for this phase. 784 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 785 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 786 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 787 } 788 789 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 790 set_concurrency(active_tasks); 791 792 _concurrent = concurrent; 793 // We propagate this to all tasks, not just the active ones. 794 for (uint i = 0; i < _max_worker_id; ++i) 795 _tasks[i]->set_concurrent(concurrent); 796 797 if (concurrent) { 798 set_concurrent_marking_in_progress(); 799 } else { 800 // We currently assume that the concurrent flag has been set to 801 // false before we start remark. At this point we should also be 802 // in a STW phase. 803 assert(!concurrent_marking_in_progress(), "invariant"); 804 assert(out_of_regions(), 805 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 806 p2i(_finger), p2i(_heap_end))); 807 } 808 } 809 810 void ConcurrentMark::set_non_marking_state() { 811 // We set the global marking state to some default values when we're 812 // not doing marking. 813 reset_marking_state(); 814 _active_tasks = 0; 815 clear_concurrent_marking_in_progress(); 816 } 817 818 ConcurrentMark::~ConcurrentMark() { 819 // The ConcurrentMark instance is never freed. 820 ShouldNotReachHere(); 821 } 822 823 void ConcurrentMark::clearNextBitmap() { 824 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 825 826 // Make sure that the concurrent mark thread looks to still be in 827 // the current cycle. 828 guarantee(cmThread()->during_cycle(), "invariant"); 829 830 // We are finishing up the current cycle by clearing the next 831 // marking bitmap and getting it ready for the next cycle. During 832 // this time no other cycle can start. So, let's make sure that this 833 // is the case. 834 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 835 836 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 837 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 838 _parallel_workers->run_task(&task); 839 840 // Clear the liveness counting data. If the marking has been aborted, the abort() 841 // call already did that. 842 if (cl.complete()) { 843 clear_all_count_data(); 844 } 845 846 // Repeat the asserts from above. 847 guarantee(cmThread()->during_cycle(), "invariant"); 848 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 849 } 850 851 class CheckBitmapClearHRClosure : public HeapRegionClosure { 852 CMBitMap* _bitmap; 853 bool _error; 854 public: 855 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 856 } 857 858 virtual bool doHeapRegion(HeapRegion* r) { 859 // This closure can be called concurrently to the mutator, so we must make sure 860 // that the result of the getNextMarkedWordAddress() call is compared to the 861 // value passed to it as limit to detect any found bits. 862 // We can use the region's orig_end() for the limit and the comparison value 863 // as it always contains the "real" end of the region that never changes and 864 // has no side effects. 865 // Due to the latter, there can also be no problem with the compiler generating 866 // reloads of the orig_end() call. 867 HeapWord* end = r->orig_end(); 868 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 869 } 870 }; 871 872 bool ConcurrentMark::nextMarkBitmapIsClear() { 873 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 874 _g1h->heap_region_iterate(&cl); 875 return cl.complete(); 876 } 877 878 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 879 public: 880 bool doHeapRegion(HeapRegion* r) { 881 if (!r->is_continues_humongous()) { 882 r->note_start_of_marking(); 883 } 884 return false; 885 } 886 }; 887 888 void ConcurrentMark::checkpointRootsInitialPre() { 889 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 890 G1CollectorPolicy* g1p = g1h->g1_policy(); 891 892 _has_aborted = false; 893 894 // Initialize marking structures. This has to be done in a STW phase. 895 reset(); 896 897 // For each region note start of marking. 898 NoteStartOfMarkHRClosure startcl; 899 g1h->heap_region_iterate(&startcl); 900 } 901 902 903 void ConcurrentMark::checkpointRootsInitialPost() { 904 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 905 906 // If we force an overflow during remark, the remark operation will 907 // actually abort and we'll restart concurrent marking. If we always 908 // force an overflow during remark we'll never actually complete the 909 // marking phase. So, we initialize this here, at the start of the 910 // cycle, so that at the remaining overflow number will decrease at 911 // every remark and we'll eventually not need to cause one. 912 force_overflow_stw()->init(); 913 914 // Start Concurrent Marking weak-reference discovery. 915 ReferenceProcessor* rp = g1h->ref_processor_cm(); 916 // enable ("weak") refs discovery 917 rp->enable_discovery(); 918 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 919 920 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 921 // This is the start of the marking cycle, we're expected all 922 // threads to have SATB queues with active set to false. 923 satb_mq_set.set_active_all_threads(true, /* new active value */ 924 false /* expected_active */); 925 926 _root_regions.prepare_for_scan(); 927 928 // update_g1_committed() will be called at the end of an evac pause 929 // when marking is on. So, it's also called at the end of the 930 // initial-mark pause to update the heap end, if the heap expands 931 // during it. No need to call it here. 932 } 933 934 /* 935 * Notice that in the next two methods, we actually leave the STS 936 * during the barrier sync and join it immediately afterwards. If we 937 * do not do this, the following deadlock can occur: one thread could 938 * be in the barrier sync code, waiting for the other thread to also 939 * sync up, whereas another one could be trying to yield, while also 940 * waiting for the other threads to sync up too. 941 * 942 * Note, however, that this code is also used during remark and in 943 * this case we should not attempt to leave / enter the STS, otherwise 944 * we'll either hit an assert (debug / fastdebug) or deadlock 945 * (product). So we should only leave / enter the STS if we are 946 * operating concurrently. 947 * 948 * Because the thread that does the sync barrier has left the STS, it 949 * is possible to be suspended for a Full GC or an evacuation pause 950 * could occur. This is actually safe, since the entering the sync 951 * barrier is one of the last things do_marking_step() does, and it 952 * doesn't manipulate any data structures afterwards. 953 */ 954 955 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 956 bool barrier_aborted; 957 958 if (verbose_low()) { 959 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 960 } 961 962 { 963 SuspendibleThreadSetLeaver sts_leave(concurrent()); 964 barrier_aborted = !_first_overflow_barrier_sync.enter(); 965 } 966 967 // at this point everyone should have synced up and not be doing any 968 // more work 969 970 if (verbose_low()) { 971 if (barrier_aborted) { 972 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 973 } else { 974 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 975 } 976 } 977 978 if (barrier_aborted) { 979 // If the barrier aborted we ignore the overflow condition and 980 // just abort the whole marking phase as quickly as possible. 981 return; 982 } 983 984 // If we're executing the concurrent phase of marking, reset the marking 985 // state; otherwise the marking state is reset after reference processing, 986 // during the remark pause. 987 // If we reset here as a result of an overflow during the remark we will 988 // see assertion failures from any subsequent set_concurrency_and_phase() 989 // calls. 990 if (concurrent()) { 991 // let the task associated with with worker 0 do this 992 if (worker_id == 0) { 993 // task 0 is responsible for clearing the global data structures 994 // We should be here because of an overflow. During STW we should 995 // not clear the overflow flag since we rely on it being true when 996 // we exit this method to abort the pause and restart concurrent 997 // marking. 998 reset_marking_state(true /* clear_overflow */); 999 force_overflow()->update(); 1000 1001 if (G1Log::fine()) { 1002 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1003 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1004 } 1005 } 1006 } 1007 1008 // after this, each task should reset its own data structures then 1009 // then go into the second barrier 1010 } 1011 1012 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1013 bool barrier_aborted; 1014 1015 if (verbose_low()) { 1016 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1017 } 1018 1019 { 1020 SuspendibleThreadSetLeaver sts_leave(concurrent()); 1021 barrier_aborted = !_second_overflow_barrier_sync.enter(); 1022 } 1023 1024 // at this point everything should be re-initialized and ready to go 1025 1026 if (verbose_low()) { 1027 if (barrier_aborted) { 1028 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1029 } else { 1030 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1031 } 1032 } 1033 } 1034 1035 #ifndef PRODUCT 1036 void ForceOverflowSettings::init() { 1037 _num_remaining = G1ConcMarkForceOverflow; 1038 _force = false; 1039 update(); 1040 } 1041 1042 void ForceOverflowSettings::update() { 1043 if (_num_remaining > 0) { 1044 _num_remaining -= 1; 1045 _force = true; 1046 } else { 1047 _force = false; 1048 } 1049 } 1050 1051 bool ForceOverflowSettings::should_force() { 1052 if (_force) { 1053 _force = false; 1054 return true; 1055 } else { 1056 return false; 1057 } 1058 } 1059 #endif // !PRODUCT 1060 1061 class CMConcurrentMarkingTask: public AbstractGangTask { 1062 private: 1063 ConcurrentMark* _cm; 1064 ConcurrentMarkThread* _cmt; 1065 1066 public: 1067 void work(uint worker_id) { 1068 assert(Thread::current()->is_ConcurrentGC_thread(), 1069 "this should only be done by a conc GC thread"); 1070 ResourceMark rm; 1071 1072 double start_vtime = os::elapsedVTime(); 1073 1074 { 1075 SuspendibleThreadSetJoiner sts_join; 1076 1077 assert(worker_id < _cm->active_tasks(), "invariant"); 1078 CMTask* the_task = _cm->task(worker_id); 1079 the_task->record_start_time(); 1080 if (!_cm->has_aborted()) { 1081 do { 1082 double start_vtime_sec = os::elapsedVTime(); 1083 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1084 1085 the_task->do_marking_step(mark_step_duration_ms, 1086 true /* do_termination */, 1087 false /* is_serial*/); 1088 1089 double end_vtime_sec = os::elapsedVTime(); 1090 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1091 _cm->clear_has_overflown(); 1092 1093 _cm->do_yield_check(worker_id); 1094 1095 jlong sleep_time_ms; 1096 if (!_cm->has_aborted() && the_task->has_aborted()) { 1097 sleep_time_ms = 1098 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1099 { 1100 SuspendibleThreadSetLeaver sts_leave; 1101 os::sleep(Thread::current(), sleep_time_ms, false); 1102 } 1103 } 1104 } while (!_cm->has_aborted() && the_task->has_aborted()); 1105 } 1106 the_task->record_end_time(); 1107 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1108 } 1109 1110 double end_vtime = os::elapsedVTime(); 1111 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1112 } 1113 1114 CMConcurrentMarkingTask(ConcurrentMark* cm, 1115 ConcurrentMarkThread* cmt) : 1116 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1117 1118 ~CMConcurrentMarkingTask() { } 1119 }; 1120 1121 // Calculates the number of active workers for a concurrent 1122 // phase. 1123 uint ConcurrentMark::calc_parallel_marking_threads() { 1124 uint n_conc_workers = 0; 1125 if (!UseDynamicNumberOfGCThreads || 1126 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1127 !ForceDynamicNumberOfGCThreads)) { 1128 n_conc_workers = max_parallel_marking_threads(); 1129 } else { 1130 n_conc_workers = 1131 AdaptiveSizePolicy::calc_default_active_workers( 1132 max_parallel_marking_threads(), 1133 1, /* Minimum workers */ 1134 parallel_marking_threads(), 1135 Threads::number_of_non_daemon_threads()); 1136 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1137 // that scaling has already gone into "_max_parallel_marking_threads". 1138 } 1139 assert(n_conc_workers > 0, "Always need at least 1"); 1140 return n_conc_workers; 1141 } 1142 1143 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1144 // Currently, only survivors can be root regions. 1145 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1146 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1147 1148 const uintx interval = PrefetchScanIntervalInBytes; 1149 HeapWord* curr = hr->bottom(); 1150 const HeapWord* end = hr->top(); 1151 while (curr < end) { 1152 Prefetch::read(curr, interval); 1153 oop obj = oop(curr); 1154 int size = obj->oop_iterate(&cl); 1155 assert(size == obj->size(), "sanity"); 1156 curr += size; 1157 } 1158 } 1159 1160 class CMRootRegionScanTask : public AbstractGangTask { 1161 private: 1162 ConcurrentMark* _cm; 1163 1164 public: 1165 CMRootRegionScanTask(ConcurrentMark* cm) : 1166 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1167 1168 void work(uint worker_id) { 1169 assert(Thread::current()->is_ConcurrentGC_thread(), 1170 "this should only be done by a conc GC thread"); 1171 1172 CMRootRegions* root_regions = _cm->root_regions(); 1173 HeapRegion* hr = root_regions->claim_next(); 1174 while (hr != NULL) { 1175 _cm->scanRootRegion(hr, worker_id); 1176 hr = root_regions->claim_next(); 1177 } 1178 } 1179 }; 1180 1181 void ConcurrentMark::scanRootRegions() { 1182 double scan_start = os::elapsedTime(); 1183 1184 // Start of concurrent marking. 1185 ClassLoaderDataGraph::clear_claimed_marks(); 1186 1187 // scan_in_progress() will have been set to true only if there was 1188 // at least one root region to scan. So, if it's false, we 1189 // should not attempt to do any further work. 1190 if (root_regions()->scan_in_progress()) { 1191 if (G1Log::fine()) { 1192 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1193 gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]"); 1194 } 1195 1196 _parallel_marking_threads = calc_parallel_marking_threads(); 1197 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1198 "Maximum number of marking threads exceeded"); 1199 uint active_workers = MAX2(1U, parallel_marking_threads()); 1200 1201 CMRootRegionScanTask task(this); 1202 _parallel_workers->set_active_workers(active_workers); 1203 _parallel_workers->run_task(&task); 1204 1205 if (G1Log::fine()) { 1206 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1207 gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start); 1208 } 1209 1210 // It's possible that has_aborted() is true here without actually 1211 // aborting the survivor scan earlier. This is OK as it's 1212 // mainly used for sanity checking. 1213 root_regions()->scan_finished(); 1214 } 1215 } 1216 1217 void ConcurrentMark::markFromRoots() { 1218 // we might be tempted to assert that: 1219 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1220 // "inconsistent argument?"); 1221 // However that wouldn't be right, because it's possible that 1222 // a safepoint is indeed in progress as a younger generation 1223 // stop-the-world GC happens even as we mark in this generation. 1224 1225 _restart_for_overflow = false; 1226 force_overflow_conc()->init(); 1227 1228 // _g1h has _n_par_threads 1229 _parallel_marking_threads = calc_parallel_marking_threads(); 1230 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1231 "Maximum number of marking threads exceeded"); 1232 1233 uint active_workers = MAX2(1U, parallel_marking_threads()); 1234 assert(active_workers > 0, "Should have been set"); 1235 1236 // Parallel task terminator is set in "set_concurrency_and_phase()" 1237 set_concurrency_and_phase(active_workers, true /* concurrent */); 1238 1239 CMConcurrentMarkingTask markingTask(this, cmThread()); 1240 _parallel_workers->set_active_workers(active_workers); 1241 _parallel_workers->run_task(&markingTask); 1242 print_stats(); 1243 } 1244 1245 // Helper class to get rid of some boilerplate code. 1246 class G1CMTraceTime : public GCTraceTime { 1247 static bool doit_and_prepend(bool doit) { 1248 if (doit) { 1249 gclog_or_tty->put(' '); 1250 } 1251 return doit; 1252 } 1253 1254 public: 1255 G1CMTraceTime(const char* title, bool doit) 1256 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), 1257 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { 1258 } 1259 }; 1260 1261 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1262 // world is stopped at this checkpoint 1263 assert(SafepointSynchronize::is_at_safepoint(), 1264 "world should be stopped"); 1265 1266 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1267 1268 // If a full collection has happened, we shouldn't do this. 1269 if (has_aborted()) { 1270 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1271 return; 1272 } 1273 1274 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1275 1276 if (VerifyDuringGC) { 1277 HandleMark hm; // handle scope 1278 g1h->prepare_for_verify(); 1279 Universe::verify(VerifyOption_G1UsePrevMarking, 1280 " VerifyDuringGC:(before)"); 1281 } 1282 g1h->check_bitmaps("Remark Start"); 1283 1284 G1CollectorPolicy* g1p = g1h->g1_policy(); 1285 g1p->record_concurrent_mark_remark_start(); 1286 1287 double start = os::elapsedTime(); 1288 1289 checkpointRootsFinalWork(); 1290 1291 double mark_work_end = os::elapsedTime(); 1292 1293 weakRefsWork(clear_all_soft_refs); 1294 1295 if (has_overflown()) { 1296 // Oops. We overflowed. Restart concurrent marking. 1297 _restart_for_overflow = true; 1298 if (G1TraceMarkStackOverflow) { 1299 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1300 } 1301 1302 // Verify the heap w.r.t. the previous marking bitmap. 1303 if (VerifyDuringGC) { 1304 HandleMark hm; // handle scope 1305 g1h->prepare_for_verify(); 1306 Universe::verify(VerifyOption_G1UsePrevMarking, 1307 " VerifyDuringGC:(overflow)"); 1308 } 1309 1310 // Clear the marking state because we will be restarting 1311 // marking due to overflowing the global mark stack. 1312 reset_marking_state(); 1313 } else { 1314 { 1315 G1CMTraceTime trace("GC aggregate-data", G1Log::finer()); 1316 1317 // Aggregate the per-task counting data that we have accumulated 1318 // while marking. 1319 aggregate_count_data(); 1320 } 1321 1322 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1323 // We're done with marking. 1324 // This is the end of the marking cycle, we're expected all 1325 // threads to have SATB queues with active set to true. 1326 satb_mq_set.set_active_all_threads(false, /* new active value */ 1327 true /* expected_active */); 1328 1329 if (VerifyDuringGC) { 1330 HandleMark hm; // handle scope 1331 g1h->prepare_for_verify(); 1332 Universe::verify(VerifyOption_G1UseNextMarking, 1333 " VerifyDuringGC:(after)"); 1334 } 1335 g1h->check_bitmaps("Remark End"); 1336 assert(!restart_for_overflow(), "sanity"); 1337 // Completely reset the marking state since marking completed 1338 set_non_marking_state(); 1339 } 1340 1341 // Expand the marking stack, if we have to and if we can. 1342 if (_markStack.should_expand()) { 1343 _markStack.expand(); 1344 } 1345 1346 // Statistics 1347 double now = os::elapsedTime(); 1348 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1349 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1350 _remark_times.add((now - start) * 1000.0); 1351 1352 g1p->record_concurrent_mark_remark_end(); 1353 1354 G1CMIsAliveClosure is_alive(g1h); 1355 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1356 } 1357 1358 // Base class of the closures that finalize and verify the 1359 // liveness counting data. 1360 class CMCountDataClosureBase: public HeapRegionClosure { 1361 protected: 1362 G1CollectedHeap* _g1h; 1363 ConcurrentMark* _cm; 1364 CardTableModRefBS* _ct_bs; 1365 1366 BitMap* _region_bm; 1367 BitMap* _card_bm; 1368 1369 // Takes a region that's not empty (i.e., it has at least one 1370 // live object in it and sets its corresponding bit on the region 1371 // bitmap to 1. If the region is "starts humongous" it will also set 1372 // to 1 the bits on the region bitmap that correspond to its 1373 // associated "continues humongous" regions. 1374 void set_bit_for_region(HeapRegion* hr) { 1375 assert(!hr->is_continues_humongous(), "should have filtered those out"); 1376 1377 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1378 if (!hr->is_starts_humongous()) { 1379 // Normal (non-humongous) case: just set the bit. 1380 _region_bm->par_at_put(index, true); 1381 } else { 1382 // Starts humongous case: calculate how many regions are part of 1383 // this humongous region and then set the bit range. 1384 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1385 _region_bm->par_at_put_range(index, end_index, true); 1386 } 1387 } 1388 1389 public: 1390 CMCountDataClosureBase(G1CollectedHeap* g1h, 1391 BitMap* region_bm, BitMap* card_bm): 1392 _g1h(g1h), _cm(g1h->concurrent_mark()), 1393 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 1394 _region_bm(region_bm), _card_bm(card_bm) { } 1395 }; 1396 1397 // Closure that calculates the # live objects per region. Used 1398 // for verification purposes during the cleanup pause. 1399 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1400 CMBitMapRO* _bm; 1401 size_t _region_marked_bytes; 1402 1403 public: 1404 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1405 BitMap* region_bm, BitMap* card_bm) : 1406 CMCountDataClosureBase(g1h, region_bm, card_bm), 1407 _bm(bm), _region_marked_bytes(0) { } 1408 1409 bool doHeapRegion(HeapRegion* hr) { 1410 1411 if (hr->is_continues_humongous()) { 1412 // We will ignore these here and process them when their 1413 // associated "starts humongous" region is processed (see 1414 // set_bit_for_heap_region()). Note that we cannot rely on their 1415 // associated "starts humongous" region to have their bit set to 1416 // 1 since, due to the region chunking in the parallel region 1417 // iteration, a "continues humongous" region might be visited 1418 // before its associated "starts humongous". 1419 return false; 1420 } 1421 1422 HeapWord* ntams = hr->next_top_at_mark_start(); 1423 HeapWord* start = hr->bottom(); 1424 1425 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1426 err_msg("Preconditions not met - " 1427 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1428 p2i(start), p2i(ntams), p2i(hr->end()))); 1429 1430 // Find the first marked object at or after "start". 1431 start = _bm->getNextMarkedWordAddress(start, ntams); 1432 1433 size_t marked_bytes = 0; 1434 1435 while (start < ntams) { 1436 oop obj = oop(start); 1437 int obj_sz = obj->size(); 1438 HeapWord* obj_end = start + obj_sz; 1439 1440 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1441 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1442 1443 // Note: if we're looking at the last region in heap - obj_end 1444 // could be actually just beyond the end of the heap; end_idx 1445 // will then correspond to a (non-existent) card that is also 1446 // just beyond the heap. 1447 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1448 // end of object is not card aligned - increment to cover 1449 // all the cards spanned by the object 1450 end_idx += 1; 1451 } 1452 1453 // Set the bits in the card BM for the cards spanned by this object. 1454 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1455 1456 // Add the size of this object to the number of marked bytes. 1457 marked_bytes += (size_t)obj_sz * HeapWordSize; 1458 1459 // Find the next marked object after this one. 1460 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1461 } 1462 1463 // Mark the allocated-since-marking portion... 1464 HeapWord* top = hr->top(); 1465 if (ntams < top) { 1466 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1467 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1468 1469 // Note: if we're looking at the last region in heap - top 1470 // could be actually just beyond the end of the heap; end_idx 1471 // will then correspond to a (non-existent) card that is also 1472 // just beyond the heap. 1473 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1474 // end of object is not card aligned - increment to cover 1475 // all the cards spanned by the object 1476 end_idx += 1; 1477 } 1478 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1479 1480 // This definitely means the region has live objects. 1481 set_bit_for_region(hr); 1482 } 1483 1484 // Update the live region bitmap. 1485 if (marked_bytes > 0) { 1486 set_bit_for_region(hr); 1487 } 1488 1489 // Set the marked bytes for the current region so that 1490 // it can be queried by a calling verification routine 1491 _region_marked_bytes = marked_bytes; 1492 1493 return false; 1494 } 1495 1496 size_t region_marked_bytes() const { return _region_marked_bytes; } 1497 }; 1498 1499 // Heap region closure used for verifying the counting data 1500 // that was accumulated concurrently and aggregated during 1501 // the remark pause. This closure is applied to the heap 1502 // regions during the STW cleanup pause. 1503 1504 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1505 G1CollectedHeap* _g1h; 1506 ConcurrentMark* _cm; 1507 CalcLiveObjectsClosure _calc_cl; 1508 BitMap* _region_bm; // Region BM to be verified 1509 BitMap* _card_bm; // Card BM to be verified 1510 bool _verbose; // verbose output? 1511 1512 BitMap* _exp_region_bm; // Expected Region BM values 1513 BitMap* _exp_card_bm; // Expected card BM values 1514 1515 int _failures; 1516 1517 public: 1518 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1519 BitMap* region_bm, 1520 BitMap* card_bm, 1521 BitMap* exp_region_bm, 1522 BitMap* exp_card_bm, 1523 bool verbose) : 1524 _g1h(g1h), _cm(g1h->concurrent_mark()), 1525 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1526 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1527 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1528 _failures(0) { } 1529 1530 int failures() const { return _failures; } 1531 1532 bool doHeapRegion(HeapRegion* hr) { 1533 if (hr->is_continues_humongous()) { 1534 // We will ignore these here and process them when their 1535 // associated "starts humongous" region is processed (see 1536 // set_bit_for_heap_region()). Note that we cannot rely on their 1537 // associated "starts humongous" region to have their bit set to 1538 // 1 since, due to the region chunking in the parallel region 1539 // iteration, a "continues humongous" region might be visited 1540 // before its associated "starts humongous". 1541 return false; 1542 } 1543 1544 int failures = 0; 1545 1546 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1547 // this region and set the corresponding bits in the expected region 1548 // and card bitmaps. 1549 bool res = _calc_cl.doHeapRegion(hr); 1550 assert(res == false, "should be continuing"); 1551 1552 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1553 Mutex::_no_safepoint_check_flag); 1554 1555 // Verify the marked bytes for this region. 1556 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1557 size_t act_marked_bytes = hr->next_marked_bytes(); 1558 1559 // We're not OK if expected marked bytes > actual marked bytes. It means 1560 // we have missed accounting some objects during the actual marking. 1561 if (exp_marked_bytes > act_marked_bytes) { 1562 if (_verbose) { 1563 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1564 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1565 hr->hrm_index(), exp_marked_bytes, act_marked_bytes); 1566 } 1567 failures += 1; 1568 } 1569 1570 // Verify the bit, for this region, in the actual and expected 1571 // (which was just calculated) region bit maps. 1572 // We're not OK if the bit in the calculated expected region 1573 // bitmap is set and the bit in the actual region bitmap is not. 1574 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1575 1576 bool expected = _exp_region_bm->at(index); 1577 bool actual = _region_bm->at(index); 1578 if (expected && !actual) { 1579 if (_verbose) { 1580 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1581 "expected: %s, actual: %s", 1582 hr->hrm_index(), 1583 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1584 } 1585 failures += 1; 1586 } 1587 1588 // Verify that the card bit maps for the cards spanned by the current 1589 // region match. We have an error if we have a set bit in the expected 1590 // bit map and the corresponding bit in the actual bitmap is not set. 1591 1592 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1593 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1594 1595 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1596 expected = _exp_card_bm->at(i); 1597 actual = _card_bm->at(i); 1598 1599 if (expected && !actual) { 1600 if (_verbose) { 1601 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1602 "expected: %s, actual: %s", 1603 hr->hrm_index(), i, 1604 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1605 } 1606 failures += 1; 1607 } 1608 } 1609 1610 if (failures > 0 && _verbose) { 1611 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1612 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1613 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1614 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1615 } 1616 1617 _failures += failures; 1618 1619 // We could stop iteration over the heap when we 1620 // find the first violating region by returning true. 1621 return false; 1622 } 1623 }; 1624 1625 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1626 protected: 1627 G1CollectedHeap* _g1h; 1628 ConcurrentMark* _cm; 1629 BitMap* _actual_region_bm; 1630 BitMap* _actual_card_bm; 1631 1632 uint _n_workers; 1633 1634 BitMap* _expected_region_bm; 1635 BitMap* _expected_card_bm; 1636 1637 int _failures; 1638 bool _verbose; 1639 1640 HeapRegionClaimer _hrclaimer; 1641 1642 public: 1643 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1644 BitMap* region_bm, BitMap* card_bm, 1645 BitMap* expected_region_bm, BitMap* expected_card_bm) 1646 : AbstractGangTask("G1 verify final counting"), 1647 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1648 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1649 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1650 _failures(0), _verbose(false), 1651 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1652 assert(VerifyDuringGC, "don't call this otherwise"); 1653 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1654 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1655 1656 _verbose = _cm->verbose_medium(); 1657 } 1658 1659 void work(uint worker_id) { 1660 assert(worker_id < _n_workers, "invariant"); 1661 1662 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1663 _actual_region_bm, _actual_card_bm, 1664 _expected_region_bm, 1665 _expected_card_bm, 1666 _verbose); 1667 1668 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1669 1670 Atomic::add(verify_cl.failures(), &_failures); 1671 } 1672 1673 int failures() const { return _failures; } 1674 }; 1675 1676 // Closure that finalizes the liveness counting data. 1677 // Used during the cleanup pause. 1678 // Sets the bits corresponding to the interval [NTAMS, top] 1679 // (which contains the implicitly live objects) in the 1680 // card liveness bitmap. Also sets the bit for each region, 1681 // containing live data, in the region liveness bitmap. 1682 1683 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1684 public: 1685 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1686 BitMap* region_bm, 1687 BitMap* card_bm) : 1688 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1689 1690 bool doHeapRegion(HeapRegion* hr) { 1691 1692 if (hr->is_continues_humongous()) { 1693 // We will ignore these here and process them when their 1694 // associated "starts humongous" region is processed (see 1695 // set_bit_for_heap_region()). Note that we cannot rely on their 1696 // associated "starts humongous" region to have their bit set to 1697 // 1 since, due to the region chunking in the parallel region 1698 // iteration, a "continues humongous" region might be visited 1699 // before its associated "starts humongous". 1700 return false; 1701 } 1702 1703 HeapWord* ntams = hr->next_top_at_mark_start(); 1704 HeapWord* top = hr->top(); 1705 1706 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1707 1708 // Mark the allocated-since-marking portion... 1709 if (ntams < top) { 1710 // This definitely means the region has live objects. 1711 set_bit_for_region(hr); 1712 1713 // Now set the bits in the card bitmap for [ntams, top) 1714 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1715 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1716 1717 // Note: if we're looking at the last region in heap - top 1718 // could be actually just beyond the end of the heap; end_idx 1719 // will then correspond to a (non-existent) card that is also 1720 // just beyond the heap. 1721 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1722 // end of object is not card aligned - increment to cover 1723 // all the cards spanned by the object 1724 end_idx += 1; 1725 } 1726 1727 assert(end_idx <= _card_bm->size(), 1728 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1729 end_idx, _card_bm->size())); 1730 assert(start_idx < _card_bm->size(), 1731 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1732 start_idx, _card_bm->size())); 1733 1734 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1735 } 1736 1737 // Set the bit for the region if it contains live data 1738 if (hr->next_marked_bytes() > 0) { 1739 set_bit_for_region(hr); 1740 } 1741 1742 return false; 1743 } 1744 }; 1745 1746 class G1ParFinalCountTask: public AbstractGangTask { 1747 protected: 1748 G1CollectedHeap* _g1h; 1749 ConcurrentMark* _cm; 1750 BitMap* _actual_region_bm; 1751 BitMap* _actual_card_bm; 1752 1753 uint _n_workers; 1754 HeapRegionClaimer _hrclaimer; 1755 1756 public: 1757 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1758 : AbstractGangTask("G1 final counting"), 1759 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1760 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1761 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1762 } 1763 1764 void work(uint worker_id) { 1765 assert(worker_id < _n_workers, "invariant"); 1766 1767 FinalCountDataUpdateClosure final_update_cl(_g1h, 1768 _actual_region_bm, 1769 _actual_card_bm); 1770 1771 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1772 } 1773 }; 1774 1775 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1776 G1CollectedHeap* _g1; 1777 size_t _freed_bytes; 1778 FreeRegionList* _local_cleanup_list; 1779 HeapRegionSetCount _old_regions_removed; 1780 HeapRegionSetCount _humongous_regions_removed; 1781 HRRSCleanupTask* _hrrs_cleanup_task; 1782 1783 public: 1784 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1785 FreeRegionList* local_cleanup_list, 1786 HRRSCleanupTask* hrrs_cleanup_task) : 1787 _g1(g1), 1788 _freed_bytes(0), 1789 _local_cleanup_list(local_cleanup_list), 1790 _old_regions_removed(), 1791 _humongous_regions_removed(), 1792 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1793 1794 size_t freed_bytes() { return _freed_bytes; } 1795 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1796 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1797 1798 bool doHeapRegion(HeapRegion *hr) { 1799 if (hr->is_continues_humongous() || hr->is_archive()) { 1800 return false; 1801 } 1802 // We use a claim value of zero here because all regions 1803 // were claimed with value 1 in the FinalCount task. 1804 _g1->reset_gc_time_stamps(hr); 1805 hr->note_end_of_marking(); 1806 1807 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1808 _freed_bytes += hr->used(); 1809 hr->set_containing_set(NULL); 1810 if (hr->is_humongous()) { 1811 assert(hr->is_starts_humongous(), "we should only see starts humongous"); 1812 _humongous_regions_removed.increment(1u, hr->capacity()); 1813 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1814 } else { 1815 _old_regions_removed.increment(1u, hr->capacity()); 1816 _g1->free_region(hr, _local_cleanup_list, true); 1817 } 1818 } else { 1819 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1820 } 1821 1822 return false; 1823 } 1824 }; 1825 1826 class G1ParNoteEndTask: public AbstractGangTask { 1827 friend class G1NoteEndOfConcMarkClosure; 1828 1829 protected: 1830 G1CollectedHeap* _g1h; 1831 FreeRegionList* _cleanup_list; 1832 HeapRegionClaimer _hrclaimer; 1833 1834 public: 1835 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1836 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1837 } 1838 1839 void work(uint worker_id) { 1840 FreeRegionList local_cleanup_list("Local Cleanup List"); 1841 HRRSCleanupTask hrrs_cleanup_task; 1842 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1843 &hrrs_cleanup_task); 1844 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1845 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1846 1847 // Now update the lists 1848 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1849 { 1850 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1851 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1852 1853 // If we iterate over the global cleanup list at the end of 1854 // cleanup to do this printing we will not guarantee to only 1855 // generate output for the newly-reclaimed regions (the list 1856 // might not be empty at the beginning of cleanup; we might 1857 // still be working on its previous contents). So we do the 1858 // printing here, before we append the new regions to the global 1859 // cleanup list. 1860 1861 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1862 if (hr_printer->is_active()) { 1863 FreeRegionListIterator iter(&local_cleanup_list); 1864 while (iter.more_available()) { 1865 HeapRegion* hr = iter.get_next(); 1866 hr_printer->cleanup(hr); 1867 } 1868 } 1869 1870 _cleanup_list->add_ordered(&local_cleanup_list); 1871 assert(local_cleanup_list.is_empty(), "post-condition"); 1872 1873 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1874 } 1875 } 1876 }; 1877 1878 class G1ParScrubRemSetTask: public AbstractGangTask { 1879 protected: 1880 G1RemSet* _g1rs; 1881 BitMap* _region_bm; 1882 BitMap* _card_bm; 1883 HeapRegionClaimer _hrclaimer; 1884 1885 public: 1886 G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) : 1887 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) { 1888 } 1889 1890 void work(uint worker_id) { 1891 _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer); 1892 } 1893 1894 }; 1895 1896 void ConcurrentMark::cleanup() { 1897 // world is stopped at this checkpoint 1898 assert(SafepointSynchronize::is_at_safepoint(), 1899 "world should be stopped"); 1900 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1901 1902 // If a full collection has happened, we shouldn't do this. 1903 if (has_aborted()) { 1904 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1905 return; 1906 } 1907 1908 g1h->verify_region_sets_optional(); 1909 1910 if (VerifyDuringGC) { 1911 HandleMark hm; // handle scope 1912 g1h->prepare_for_verify(); 1913 Universe::verify(VerifyOption_G1UsePrevMarking, 1914 " VerifyDuringGC:(before)"); 1915 } 1916 g1h->check_bitmaps("Cleanup Start"); 1917 1918 G1CollectorPolicy* g1p = g1h->g1_policy(); 1919 g1p->record_concurrent_mark_cleanup_start(); 1920 1921 double start = os::elapsedTime(); 1922 1923 HeapRegionRemSet::reset_for_cleanup_tasks(); 1924 1925 // Do counting once more with the world stopped for good measure. 1926 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 1927 1928 g1h->workers()->run_task(&g1_par_count_task); 1929 1930 if (VerifyDuringGC) { 1931 // Verify that the counting data accumulated during marking matches 1932 // that calculated by walking the marking bitmap. 1933 1934 // Bitmaps to hold expected values 1935 BitMap expected_region_bm(_region_bm.size(), true); 1936 BitMap expected_card_bm(_card_bm.size(), true); 1937 1938 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 1939 &_region_bm, 1940 &_card_bm, 1941 &expected_region_bm, 1942 &expected_card_bm); 1943 1944 g1h->workers()->run_task(&g1_par_verify_task); 1945 1946 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 1947 } 1948 1949 size_t start_used_bytes = g1h->used(); 1950 g1h->collector_state()->set_mark_in_progress(false); 1951 1952 double count_end = os::elapsedTime(); 1953 double this_final_counting_time = (count_end - start); 1954 _total_counting_time += this_final_counting_time; 1955 1956 if (G1PrintRegionLivenessInfo) { 1957 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 1958 _g1h->heap_region_iterate(&cl); 1959 } 1960 1961 // Install newly created mark bitMap as "prev". 1962 swapMarkBitMaps(); 1963 1964 g1h->reset_gc_time_stamp(); 1965 1966 uint n_workers = _g1h->workers()->active_workers(); 1967 1968 // Note end of marking in all heap regions. 1969 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1970 g1h->workers()->run_task(&g1_par_note_end_task); 1971 g1h->check_gc_time_stamps(); 1972 1973 if (!cleanup_list_is_empty()) { 1974 // The cleanup list is not empty, so we'll have to process it 1975 // concurrently. Notify anyone else that might be wanting free 1976 // regions that there will be more free regions coming soon. 1977 g1h->set_free_regions_coming(); 1978 } 1979 1980 // call below, since it affects the metric by which we sort the heap 1981 // regions. 1982 if (G1ScrubRemSets) { 1983 double rs_scrub_start = os::elapsedTime(); 1984 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers); 1985 g1h->workers()->run_task(&g1_par_scrub_rs_task); 1986 1987 double rs_scrub_end = os::elapsedTime(); 1988 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 1989 _total_rs_scrub_time += this_rs_scrub_time; 1990 } 1991 1992 // this will also free any regions totally full of garbage objects, 1993 // and sort the regions. 1994 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1995 1996 // Statistics. 1997 double end = os::elapsedTime(); 1998 _cleanup_times.add((end - start) * 1000.0); 1999 2000 if (G1Log::fine()) { 2001 g1h->g1_policy()->print_heap_transition(start_used_bytes); 2002 } 2003 2004 // Clean up will have freed any regions completely full of garbage. 2005 // Update the soft reference policy with the new heap occupancy. 2006 Universe::update_heap_info_at_gc(); 2007 2008 if (VerifyDuringGC) { 2009 HandleMark hm; // handle scope 2010 g1h->prepare_for_verify(); 2011 Universe::verify(VerifyOption_G1UsePrevMarking, 2012 " VerifyDuringGC:(after)"); 2013 } 2014 2015 g1h->check_bitmaps("Cleanup End"); 2016 2017 g1h->verify_region_sets_optional(); 2018 2019 // We need to make this be a "collection" so any collection pause that 2020 // races with it goes around and waits for completeCleanup to finish. 2021 g1h->increment_total_collections(); 2022 2023 // Clean out dead classes and update Metaspace sizes. 2024 if (ClassUnloadingWithConcurrentMark) { 2025 ClassLoaderDataGraph::purge(); 2026 } 2027 MetaspaceGC::compute_new_size(); 2028 2029 // We reclaimed old regions so we should calculate the sizes to make 2030 // sure we update the old gen/space data. 2031 g1h->g1mm()->update_sizes(); 2032 g1h->allocation_context_stats().update_after_mark(); 2033 2034 g1h->trace_heap_after_concurrent_cycle(); 2035 } 2036 2037 void ConcurrentMark::completeCleanup() { 2038 if (has_aborted()) return; 2039 2040 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2041 2042 _cleanup_list.verify_optional(); 2043 FreeRegionList tmp_free_list("Tmp Free List"); 2044 2045 if (G1ConcRegionFreeingVerbose) { 2046 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2047 "cleanup list has %u entries", 2048 _cleanup_list.length()); 2049 } 2050 2051 // No one else should be accessing the _cleanup_list at this point, 2052 // so it is not necessary to take any locks 2053 while (!_cleanup_list.is_empty()) { 2054 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 2055 assert(hr != NULL, "Got NULL from a non-empty list"); 2056 hr->par_clear(); 2057 tmp_free_list.add_ordered(hr); 2058 2059 // Instead of adding one region at a time to the secondary_free_list, 2060 // we accumulate them in the local list and move them a few at a 2061 // time. This also cuts down on the number of notify_all() calls 2062 // we do during this process. We'll also append the local list when 2063 // _cleanup_list is empty (which means we just removed the last 2064 // region from the _cleanup_list). 2065 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2066 _cleanup_list.is_empty()) { 2067 if (G1ConcRegionFreeingVerbose) { 2068 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2069 "appending %u entries to the secondary_free_list, " 2070 "cleanup list still has %u entries", 2071 tmp_free_list.length(), 2072 _cleanup_list.length()); 2073 } 2074 2075 { 2076 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2077 g1h->secondary_free_list_add(&tmp_free_list); 2078 SecondaryFreeList_lock->notify_all(); 2079 } 2080 #ifndef PRODUCT 2081 if (G1StressConcRegionFreeing) { 2082 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2083 os::sleep(Thread::current(), (jlong) 1, false); 2084 } 2085 } 2086 #endif 2087 } 2088 } 2089 assert(tmp_free_list.is_empty(), "post-condition"); 2090 } 2091 2092 // Supporting Object and Oop closures for reference discovery 2093 // and processing in during marking 2094 2095 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2096 HeapWord* addr = (HeapWord*)obj; 2097 return addr != NULL && 2098 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2099 } 2100 2101 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2102 // Uses the CMTask associated with a worker thread (for serial reference 2103 // processing the CMTask for worker 0 is used) to preserve (mark) and 2104 // trace referent objects. 2105 // 2106 // Using the CMTask and embedded local queues avoids having the worker 2107 // threads operating on the global mark stack. This reduces the risk 2108 // of overflowing the stack - which we would rather avoid at this late 2109 // state. Also using the tasks' local queues removes the potential 2110 // of the workers interfering with each other that could occur if 2111 // operating on the global stack. 2112 2113 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2114 ConcurrentMark* _cm; 2115 CMTask* _task; 2116 int _ref_counter_limit; 2117 int _ref_counter; 2118 bool _is_serial; 2119 public: 2120 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2121 _cm(cm), _task(task), _is_serial(is_serial), 2122 _ref_counter_limit(G1RefProcDrainInterval) { 2123 assert(_ref_counter_limit > 0, "sanity"); 2124 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2125 _ref_counter = _ref_counter_limit; 2126 } 2127 2128 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2129 virtual void do_oop( oop* p) { do_oop_work(p); } 2130 2131 template <class T> void do_oop_work(T* p) { 2132 if (!_cm->has_overflown()) { 2133 oop obj = oopDesc::load_decode_heap_oop(p); 2134 if (_cm->verbose_high()) { 2135 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2136 "*"PTR_FORMAT" = "PTR_FORMAT, 2137 _task->worker_id(), p2i(p), p2i((void*) obj)); 2138 } 2139 2140 _task->deal_with_reference(obj); 2141 _ref_counter--; 2142 2143 if (_ref_counter == 0) { 2144 // We have dealt with _ref_counter_limit references, pushing them 2145 // and objects reachable from them on to the local stack (and 2146 // possibly the global stack). Call CMTask::do_marking_step() to 2147 // process these entries. 2148 // 2149 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2150 // there's nothing more to do (i.e. we're done with the entries that 2151 // were pushed as a result of the CMTask::deal_with_reference() calls 2152 // above) or we overflow. 2153 // 2154 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2155 // flag while there may still be some work to do. (See the comment at 2156 // the beginning of CMTask::do_marking_step() for those conditions - 2157 // one of which is reaching the specified time target.) It is only 2158 // when CMTask::do_marking_step() returns without setting the 2159 // has_aborted() flag that the marking step has completed. 2160 do { 2161 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2162 _task->do_marking_step(mark_step_duration_ms, 2163 false /* do_termination */, 2164 _is_serial); 2165 } while (_task->has_aborted() && !_cm->has_overflown()); 2166 _ref_counter = _ref_counter_limit; 2167 } 2168 } else { 2169 if (_cm->verbose_high()) { 2170 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2171 } 2172 } 2173 } 2174 }; 2175 2176 // 'Drain' oop closure used by both serial and parallel reference processing. 2177 // Uses the CMTask associated with a given worker thread (for serial 2178 // reference processing the CMtask for worker 0 is used). Calls the 2179 // do_marking_step routine, with an unbelievably large timeout value, 2180 // to drain the marking data structures of the remaining entries 2181 // added by the 'keep alive' oop closure above. 2182 2183 class G1CMDrainMarkingStackClosure: public VoidClosure { 2184 ConcurrentMark* _cm; 2185 CMTask* _task; 2186 bool _is_serial; 2187 public: 2188 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2189 _cm(cm), _task(task), _is_serial(is_serial) { 2190 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2191 } 2192 2193 void do_void() { 2194 do { 2195 if (_cm->verbose_high()) { 2196 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2197 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2198 } 2199 2200 // We call CMTask::do_marking_step() to completely drain the local 2201 // and global marking stacks of entries pushed by the 'keep alive' 2202 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2203 // 2204 // CMTask::do_marking_step() is called in a loop, which we'll exit 2205 // if there's nothing more to do (i.e. we've completely drained the 2206 // entries that were pushed as a a result of applying the 'keep alive' 2207 // closure to the entries on the discovered ref lists) or we overflow 2208 // the global marking stack. 2209 // 2210 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2211 // flag while there may still be some work to do. (See the comment at 2212 // the beginning of CMTask::do_marking_step() for those conditions - 2213 // one of which is reaching the specified time target.) It is only 2214 // when CMTask::do_marking_step() returns without setting the 2215 // has_aborted() flag that the marking step has completed. 2216 2217 _task->do_marking_step(1000000000.0 /* something very large */, 2218 true /* do_termination */, 2219 _is_serial); 2220 } while (_task->has_aborted() && !_cm->has_overflown()); 2221 } 2222 }; 2223 2224 // Implementation of AbstractRefProcTaskExecutor for parallel 2225 // reference processing at the end of G1 concurrent marking 2226 2227 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2228 private: 2229 G1CollectedHeap* _g1h; 2230 ConcurrentMark* _cm; 2231 WorkGang* _workers; 2232 uint _active_workers; 2233 2234 public: 2235 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2236 ConcurrentMark* cm, 2237 WorkGang* workers, 2238 uint n_workers) : 2239 _g1h(g1h), _cm(cm), 2240 _workers(workers), _active_workers(n_workers) { } 2241 2242 // Executes the given task using concurrent marking worker threads. 2243 virtual void execute(ProcessTask& task); 2244 virtual void execute(EnqueueTask& task); 2245 }; 2246 2247 class G1CMRefProcTaskProxy: public AbstractGangTask { 2248 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2249 ProcessTask& _proc_task; 2250 G1CollectedHeap* _g1h; 2251 ConcurrentMark* _cm; 2252 2253 public: 2254 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2255 G1CollectedHeap* g1h, 2256 ConcurrentMark* cm) : 2257 AbstractGangTask("Process reference objects in parallel"), 2258 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2259 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2260 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2261 } 2262 2263 virtual void work(uint worker_id) { 2264 ResourceMark rm; 2265 HandleMark hm; 2266 CMTask* task = _cm->task(worker_id); 2267 G1CMIsAliveClosure g1_is_alive(_g1h); 2268 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2269 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2270 2271 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2272 } 2273 }; 2274 2275 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2276 assert(_workers != NULL, "Need parallel worker threads."); 2277 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2278 2279 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2280 2281 // We need to reset the concurrency level before each 2282 // proxy task execution, so that the termination protocol 2283 // and overflow handling in CMTask::do_marking_step() knows 2284 // how many workers to wait for. 2285 _cm->set_concurrency(_active_workers); 2286 _workers->run_task(&proc_task_proxy); 2287 } 2288 2289 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2290 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2291 EnqueueTask& _enq_task; 2292 2293 public: 2294 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2295 AbstractGangTask("Enqueue reference objects in parallel"), 2296 _enq_task(enq_task) { } 2297 2298 virtual void work(uint worker_id) { 2299 _enq_task.work(worker_id); 2300 } 2301 }; 2302 2303 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2304 assert(_workers != NULL, "Need parallel worker threads."); 2305 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2306 2307 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2308 2309 // Not strictly necessary but... 2310 // 2311 // We need to reset the concurrency level before each 2312 // proxy task execution, so that the termination protocol 2313 // and overflow handling in CMTask::do_marking_step() knows 2314 // how many workers to wait for. 2315 _cm->set_concurrency(_active_workers); 2316 _workers->run_task(&enq_task_proxy); 2317 } 2318 2319 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2320 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2321 } 2322 2323 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2324 if (has_overflown()) { 2325 // Skip processing the discovered references if we have 2326 // overflown the global marking stack. Reference objects 2327 // only get discovered once so it is OK to not 2328 // de-populate the discovered reference lists. We could have, 2329 // but the only benefit would be that, when marking restarts, 2330 // less reference objects are discovered. 2331 return; 2332 } 2333 2334 ResourceMark rm; 2335 HandleMark hm; 2336 2337 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2338 2339 // Is alive closure. 2340 G1CMIsAliveClosure g1_is_alive(g1h); 2341 2342 // Inner scope to exclude the cleaning of the string and symbol 2343 // tables from the displayed time. 2344 { 2345 G1CMTraceTime t("GC ref-proc", G1Log::finer()); 2346 2347 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2348 2349 // See the comment in G1CollectedHeap::ref_processing_init() 2350 // about how reference processing currently works in G1. 2351 2352 // Set the soft reference policy 2353 rp->setup_policy(clear_all_soft_refs); 2354 assert(_markStack.isEmpty(), "mark stack should be empty"); 2355 2356 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2357 // in serial reference processing. Note these closures are also 2358 // used for serially processing (by the the current thread) the 2359 // JNI references during parallel reference processing. 2360 // 2361 // These closures do not need to synchronize with the worker 2362 // threads involved in parallel reference processing as these 2363 // instances are executed serially by the current thread (e.g. 2364 // reference processing is not multi-threaded and is thus 2365 // performed by the current thread instead of a gang worker). 2366 // 2367 // The gang tasks involved in parallel reference processing create 2368 // their own instances of these closures, which do their own 2369 // synchronization among themselves. 2370 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2371 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2372 2373 // We need at least one active thread. If reference processing 2374 // is not multi-threaded we use the current (VMThread) thread, 2375 // otherwise we use the work gang from the G1CollectedHeap and 2376 // we utilize all the worker threads we can. 2377 bool processing_is_mt = rp->processing_is_mt(); 2378 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2379 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2380 2381 // Parallel processing task executor. 2382 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2383 g1h->workers(), active_workers); 2384 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2385 2386 // Set the concurrency level. The phase was already set prior to 2387 // executing the remark task. 2388 set_concurrency(active_workers); 2389 2390 // Set the degree of MT processing here. If the discovery was done MT, 2391 // the number of threads involved during discovery could differ from 2392 // the number of active workers. This is OK as long as the discovered 2393 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2394 rp->set_active_mt_degree(active_workers); 2395 2396 // Process the weak references. 2397 const ReferenceProcessorStats& stats = 2398 rp->process_discovered_references(&g1_is_alive, 2399 &g1_keep_alive, 2400 &g1_drain_mark_stack, 2401 executor, 2402 g1h->gc_timer_cm(), 2403 concurrent_gc_id()); 2404 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2405 2406 // The do_oop work routines of the keep_alive and drain_marking_stack 2407 // oop closures will set the has_overflown flag if we overflow the 2408 // global marking stack. 2409 2410 assert(_markStack.overflow() || _markStack.isEmpty(), 2411 "mark stack should be empty (unless it overflowed)"); 2412 2413 if (_markStack.overflow()) { 2414 // This should have been done already when we tried to push an 2415 // entry on to the global mark stack. But let's do it again. 2416 set_has_overflown(); 2417 } 2418 2419 assert(rp->num_q() == active_workers, "why not"); 2420 2421 rp->enqueue_discovered_references(executor); 2422 2423 rp->verify_no_references_recorded(); 2424 assert(!rp->discovery_enabled(), "Post condition"); 2425 } 2426 2427 if (has_overflown()) { 2428 // We can not trust g1_is_alive if the marking stack overflowed 2429 return; 2430 } 2431 2432 assert(_markStack.isEmpty(), "Marking should have completed"); 2433 2434 // Unload Klasses, String, Symbols, Code Cache, etc. 2435 { 2436 G1CMTraceTime trace("Unloading", G1Log::finer()); 2437 2438 if (ClassUnloadingWithConcurrentMark) { 2439 bool purged_classes; 2440 2441 { 2442 G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest()); 2443 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2444 } 2445 2446 { 2447 G1CMTraceTime trace("Parallel Unloading", G1Log::finest()); 2448 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2449 } 2450 } 2451 2452 if (G1StringDedup::is_enabled()) { 2453 G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest()); 2454 G1StringDedup::unlink(&g1_is_alive); 2455 } 2456 } 2457 } 2458 2459 void ConcurrentMark::swapMarkBitMaps() { 2460 CMBitMapRO* temp = _prevMarkBitMap; 2461 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2462 _nextMarkBitMap = (CMBitMap*) temp; 2463 } 2464 2465 // Closure for marking entries in SATB buffers. 2466 class CMSATBBufferClosure : public SATBBufferClosure { 2467 private: 2468 CMTask* _task; 2469 G1CollectedHeap* _g1h; 2470 2471 // This is very similar to CMTask::deal_with_reference, but with 2472 // more relaxed requirements for the argument, so this must be more 2473 // circumspect about treating the argument as an object. 2474 void do_entry(void* entry) const { 2475 _task->increment_refs_reached(); 2476 HeapRegion* hr = _g1h->heap_region_containing_raw(entry); 2477 if (entry < hr->next_top_at_mark_start()) { 2478 // Until we get here, we don't know whether entry refers to a valid 2479 // object; it could instead have been a stale reference. 2480 oop obj = static_cast<oop>(entry); 2481 assert(obj->is_oop(true /* ignore mark word */), 2482 err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj))); 2483 _task->make_reference_grey(obj, hr); 2484 } 2485 } 2486 2487 public: 2488 CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h) 2489 : _task(task), _g1h(g1h) { } 2490 2491 virtual void do_buffer(void** buffer, size_t size) { 2492 for (size_t i = 0; i < size; ++i) { 2493 do_entry(buffer[i]); 2494 } 2495 } 2496 }; 2497 2498 class G1RemarkThreadsClosure : public ThreadClosure { 2499 CMSATBBufferClosure _cm_satb_cl; 2500 G1CMOopClosure _cm_cl; 2501 MarkingCodeBlobClosure _code_cl; 2502 int _thread_parity; 2503 2504 public: 2505 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) : 2506 _cm_satb_cl(task, g1h), 2507 _cm_cl(g1h, g1h->concurrent_mark(), task), 2508 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2509 _thread_parity(Threads::thread_claim_parity()) {} 2510 2511 void do_thread(Thread* thread) { 2512 if (thread->is_Java_thread()) { 2513 if (thread->claim_oops_do(true, _thread_parity)) { 2514 JavaThread* jt = (JavaThread*)thread; 2515 2516 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2517 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2518 // * Alive if on the stack of an executing method 2519 // * Weakly reachable otherwise 2520 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2521 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2522 jt->nmethods_do(&_code_cl); 2523 2524 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 2525 } 2526 } else if (thread->is_VM_thread()) { 2527 if (thread->claim_oops_do(true, _thread_parity)) { 2528 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 2529 } 2530 } 2531 } 2532 }; 2533 2534 class CMRemarkTask: public AbstractGangTask { 2535 private: 2536 ConcurrentMark* _cm; 2537 public: 2538 void work(uint worker_id) { 2539 // Since all available tasks are actually started, we should 2540 // only proceed if we're supposed to be active. 2541 if (worker_id < _cm->active_tasks()) { 2542 CMTask* task = _cm->task(worker_id); 2543 task->record_start_time(); 2544 { 2545 ResourceMark rm; 2546 HandleMark hm; 2547 2548 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2549 Threads::threads_do(&threads_f); 2550 } 2551 2552 do { 2553 task->do_marking_step(1000000000.0 /* something very large */, 2554 true /* do_termination */, 2555 false /* is_serial */); 2556 } while (task->has_aborted() && !_cm->has_overflown()); 2557 // If we overflow, then we do not want to restart. We instead 2558 // want to abort remark and do concurrent marking again. 2559 task->record_end_time(); 2560 } 2561 } 2562 2563 CMRemarkTask(ConcurrentMark* cm, uint active_workers) : 2564 AbstractGangTask("Par Remark"), _cm(cm) { 2565 _cm->terminator()->reset_for_reuse(active_workers); 2566 } 2567 }; 2568 2569 void ConcurrentMark::checkpointRootsFinalWork() { 2570 ResourceMark rm; 2571 HandleMark hm; 2572 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2573 2574 G1CMTraceTime trace("Finalize Marking", G1Log::finer()); 2575 2576 g1h->ensure_parsability(false); 2577 2578 // this is remark, so we'll use up all active threads 2579 uint active_workers = g1h->workers()->active_workers(); 2580 set_concurrency_and_phase(active_workers, false /* concurrent */); 2581 // Leave _parallel_marking_threads at it's 2582 // value originally calculated in the ConcurrentMark 2583 // constructor and pass values of the active workers 2584 // through the gang in the task. 2585 2586 { 2587 StrongRootsScope srs(active_workers); 2588 2589 CMRemarkTask remarkTask(this, active_workers); 2590 // We will start all available threads, even if we decide that the 2591 // active_workers will be fewer. The extra ones will just bail out 2592 // immediately. 2593 g1h->workers()->run_task(&remarkTask); 2594 } 2595 2596 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2597 guarantee(has_overflown() || 2598 satb_mq_set.completed_buffers_num() == 0, 2599 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2600 BOOL_TO_STR(has_overflown()), 2601 satb_mq_set.completed_buffers_num())); 2602 2603 print_stats(); 2604 } 2605 2606 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2607 // Note we are overriding the read-only view of the prev map here, via 2608 // the cast. 2609 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2610 } 2611 2612 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2613 _nextMarkBitMap->clearRange(mr); 2614 } 2615 2616 HeapRegion* 2617 ConcurrentMark::claim_region(uint worker_id) { 2618 // "checkpoint" the finger 2619 HeapWord* finger = _finger; 2620 2621 // _heap_end will not change underneath our feet; it only changes at 2622 // yield points. 2623 while (finger < _heap_end) { 2624 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2625 2626 // Note on how this code handles humongous regions. In the 2627 // normal case the finger will reach the start of a "starts 2628 // humongous" (SH) region. Its end will either be the end of the 2629 // last "continues humongous" (CH) region in the sequence, or the 2630 // standard end of the SH region (if the SH is the only region in 2631 // the sequence). That way claim_region() will skip over the CH 2632 // regions. However, there is a subtle race between a CM thread 2633 // executing this method and a mutator thread doing a humongous 2634 // object allocation. The two are not mutually exclusive as the CM 2635 // thread does not need to hold the Heap_lock when it gets 2636 // here. So there is a chance that claim_region() will come across 2637 // a free region that's in the progress of becoming a SH or a CH 2638 // region. In the former case, it will either 2639 // a) Miss the update to the region's end, in which case it will 2640 // visit every subsequent CH region, will find their bitmaps 2641 // empty, and do nothing, or 2642 // b) Will observe the update of the region's end (in which case 2643 // it will skip the subsequent CH regions). 2644 // If it comes across a region that suddenly becomes CH, the 2645 // scenario will be similar to b). So, the race between 2646 // claim_region() and a humongous object allocation might force us 2647 // to do a bit of unnecessary work (due to some unnecessary bitmap 2648 // iterations) but it should not introduce and correctness issues. 2649 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2650 2651 // Above heap_region_containing_raw may return NULL as we always scan claim 2652 // until the end of the heap. In this case, just jump to the next region. 2653 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2654 2655 // Is the gap between reading the finger and doing the CAS too long? 2656 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2657 if (res == finger && curr_region != NULL) { 2658 // we succeeded 2659 HeapWord* bottom = curr_region->bottom(); 2660 HeapWord* limit = curr_region->next_top_at_mark_start(); 2661 2662 if (verbose_low()) { 2663 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2664 "["PTR_FORMAT", "PTR_FORMAT"), " 2665 "limit = "PTR_FORMAT, 2666 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2667 } 2668 2669 // notice that _finger == end cannot be guaranteed here since, 2670 // someone else might have moved the finger even further 2671 assert(_finger >= end, "the finger should have moved forward"); 2672 2673 if (verbose_low()) { 2674 gclog_or_tty->print_cr("[%u] we were successful with region = " 2675 PTR_FORMAT, worker_id, p2i(curr_region)); 2676 } 2677 2678 if (limit > bottom) { 2679 if (verbose_low()) { 2680 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 2681 "returning it ", worker_id, p2i(curr_region)); 2682 } 2683 return curr_region; 2684 } else { 2685 assert(limit == bottom, 2686 "the region limit should be at bottom"); 2687 if (verbose_low()) { 2688 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 2689 "returning NULL", worker_id, p2i(curr_region)); 2690 } 2691 // we return NULL and the caller should try calling 2692 // claim_region() again. 2693 return NULL; 2694 } 2695 } else { 2696 assert(_finger > finger, "the finger should have moved forward"); 2697 if (verbose_low()) { 2698 if (curr_region == NULL) { 2699 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " 2700 "global finger = "PTR_FORMAT", " 2701 "our finger = "PTR_FORMAT, 2702 worker_id, p2i(_finger), p2i(finger)); 2703 } else { 2704 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2705 "global finger = "PTR_FORMAT", " 2706 "our finger = "PTR_FORMAT, 2707 worker_id, p2i(_finger), p2i(finger)); 2708 } 2709 } 2710 2711 // read it again 2712 finger = _finger; 2713 } 2714 } 2715 2716 return NULL; 2717 } 2718 2719 #ifndef PRODUCT 2720 enum VerifyNoCSetOopsPhase { 2721 VerifyNoCSetOopsStack, 2722 VerifyNoCSetOopsQueues 2723 }; 2724 2725 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 2726 private: 2727 G1CollectedHeap* _g1h; 2728 VerifyNoCSetOopsPhase _phase; 2729 int _info; 2730 2731 const char* phase_str() { 2732 switch (_phase) { 2733 case VerifyNoCSetOopsStack: return "Stack"; 2734 case VerifyNoCSetOopsQueues: return "Queue"; 2735 default: ShouldNotReachHere(); 2736 } 2737 return NULL; 2738 } 2739 2740 void do_object_work(oop obj) { 2741 guarantee(!_g1h->obj_in_cs(obj), 2742 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 2743 p2i((void*) obj), phase_str(), _info)); 2744 } 2745 2746 public: 2747 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 2748 2749 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 2750 _phase = phase; 2751 _info = info; 2752 } 2753 2754 virtual void do_oop(oop* p) { 2755 oop obj = oopDesc::load_decode_heap_oop(p); 2756 do_object_work(obj); 2757 } 2758 2759 virtual void do_oop(narrowOop* p) { 2760 // We should not come across narrow oops while scanning marking 2761 // stacks 2762 ShouldNotReachHere(); 2763 } 2764 2765 virtual void do_object(oop obj) { 2766 do_object_work(obj); 2767 } 2768 }; 2769 2770 void ConcurrentMark::verify_no_cset_oops() { 2771 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2772 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 2773 return; 2774 } 2775 2776 VerifyNoCSetOopsClosure cl; 2777 2778 // Verify entries on the global mark stack 2779 cl.set_phase(VerifyNoCSetOopsStack); 2780 _markStack.oops_do(&cl); 2781 2782 // Verify entries on the task queues 2783 for (uint i = 0; i < _max_worker_id; i += 1) { 2784 cl.set_phase(VerifyNoCSetOopsQueues, i); 2785 CMTaskQueue* queue = _task_queues->queue(i); 2786 queue->oops_do(&cl); 2787 } 2788 2789 // Verify the global finger 2790 HeapWord* global_finger = finger(); 2791 if (global_finger != NULL && global_finger < _heap_end) { 2792 // The global finger always points to a heap region boundary. We 2793 // use heap_region_containing_raw() to get the containing region 2794 // given that the global finger could be pointing to a free region 2795 // which subsequently becomes continues humongous. If that 2796 // happens, heap_region_containing() will return the bottom of the 2797 // corresponding starts humongous region and the check below will 2798 // not hold any more. 2799 // Since we always iterate over all regions, we might get a NULL HeapRegion 2800 // here. 2801 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 2802 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2803 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 2804 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 2805 } 2806 2807 // Verify the task fingers 2808 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2809 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 2810 CMTask* task = _tasks[i]; 2811 HeapWord* task_finger = task->finger(); 2812 if (task_finger != NULL && task_finger < _heap_end) { 2813 // See above note on the global finger verification. 2814 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 2815 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2816 !task_hr->in_collection_set(), 2817 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 2818 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 2819 } 2820 } 2821 } 2822 #endif // PRODUCT 2823 2824 // Aggregate the counting data that was constructed concurrently 2825 // with marking. 2826 class AggregateCountDataHRClosure: public HeapRegionClosure { 2827 G1CollectedHeap* _g1h; 2828 ConcurrentMark* _cm; 2829 CardTableModRefBS* _ct_bs; 2830 BitMap* _cm_card_bm; 2831 uint _max_worker_id; 2832 2833 public: 2834 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 2835 BitMap* cm_card_bm, 2836 uint max_worker_id) : 2837 _g1h(g1h), _cm(g1h->concurrent_mark()), 2838 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 2839 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 2840 2841 bool doHeapRegion(HeapRegion* hr) { 2842 if (hr->is_continues_humongous()) { 2843 // We will ignore these here and process them when their 2844 // associated "starts humongous" region is processed. 2845 // Note that we cannot rely on their associated 2846 // "starts humongous" region to have their bit set to 1 2847 // since, due to the region chunking in the parallel region 2848 // iteration, a "continues humongous" region might be visited 2849 // before its associated "starts humongous". 2850 return false; 2851 } 2852 2853 HeapWord* start = hr->bottom(); 2854 HeapWord* limit = hr->next_top_at_mark_start(); 2855 HeapWord* end = hr->end(); 2856 2857 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 2858 err_msg("Preconditions not met - " 2859 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 2860 "top: "PTR_FORMAT", end: "PTR_FORMAT, 2861 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 2862 2863 assert(hr->next_marked_bytes() == 0, "Precondition"); 2864 2865 if (start == limit) { 2866 // NTAMS of this region has not been set so nothing to do. 2867 return false; 2868 } 2869 2870 // 'start' should be in the heap. 2871 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 2872 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 2873 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 2874 2875 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 2876 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 2877 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 2878 2879 // If ntams is not card aligned then we bump card bitmap index 2880 // for limit so that we get the all the cards spanned by 2881 // the object ending at ntams. 2882 // Note: if this is the last region in the heap then ntams 2883 // could be actually just beyond the end of the the heap; 2884 // limit_idx will then correspond to a (non-existent) card 2885 // that is also outside the heap. 2886 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 2887 limit_idx += 1; 2888 } 2889 2890 assert(limit_idx <= end_idx, "or else use atomics"); 2891 2892 // Aggregate the "stripe" in the count data associated with hr. 2893 uint hrm_index = hr->hrm_index(); 2894 size_t marked_bytes = 0; 2895 2896 for (uint i = 0; i < _max_worker_id; i += 1) { 2897 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 2898 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 2899 2900 // Fetch the marked_bytes in this region for task i and 2901 // add it to the running total for this region. 2902 marked_bytes += marked_bytes_array[hrm_index]; 2903 2904 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 2905 // into the global card bitmap. 2906 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 2907 2908 while (scan_idx < limit_idx) { 2909 assert(task_card_bm->at(scan_idx) == true, "should be"); 2910 _cm_card_bm->set_bit(scan_idx); 2911 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 2912 2913 // BitMap::get_next_one_offset() can handle the case when 2914 // its left_offset parameter is greater than its right_offset 2915 // parameter. It does, however, have an early exit if 2916 // left_offset == right_offset. So let's limit the value 2917 // passed in for left offset here. 2918 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 2919 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 2920 } 2921 } 2922 2923 // Update the marked bytes for this region. 2924 hr->add_to_marked_bytes(marked_bytes); 2925 2926 // Next heap region 2927 return false; 2928 } 2929 }; 2930 2931 class G1AggregateCountDataTask: public AbstractGangTask { 2932 protected: 2933 G1CollectedHeap* _g1h; 2934 ConcurrentMark* _cm; 2935 BitMap* _cm_card_bm; 2936 uint _max_worker_id; 2937 uint _active_workers; 2938 HeapRegionClaimer _hrclaimer; 2939 2940 public: 2941 G1AggregateCountDataTask(G1CollectedHeap* g1h, 2942 ConcurrentMark* cm, 2943 BitMap* cm_card_bm, 2944 uint max_worker_id, 2945 uint n_workers) : 2946 AbstractGangTask("Count Aggregation"), 2947 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 2948 _max_worker_id(max_worker_id), 2949 _active_workers(n_workers), 2950 _hrclaimer(_active_workers) { 2951 } 2952 2953 void work(uint worker_id) { 2954 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 2955 2956 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 2957 } 2958 }; 2959 2960 2961 void ConcurrentMark::aggregate_count_data() { 2962 uint n_workers = _g1h->workers()->active_workers(); 2963 2964 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 2965 _max_worker_id, n_workers); 2966 2967 _g1h->workers()->run_task(&g1_par_agg_task); 2968 } 2969 2970 // Clear the per-worker arrays used to store the per-region counting data 2971 void ConcurrentMark::clear_all_count_data() { 2972 // Clear the global card bitmap - it will be filled during 2973 // liveness count aggregation (during remark) and the 2974 // final counting task. 2975 _card_bm.clear(); 2976 2977 // Clear the global region bitmap - it will be filled as part 2978 // of the final counting task. 2979 _region_bm.clear(); 2980 2981 uint max_regions = _g1h->max_regions(); 2982 assert(_max_worker_id > 0, "uninitialized"); 2983 2984 for (uint i = 0; i < _max_worker_id; i += 1) { 2985 BitMap* task_card_bm = count_card_bitmap_for(i); 2986 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 2987 2988 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 2989 assert(marked_bytes_array != NULL, "uninitialized"); 2990 2991 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 2992 task_card_bm->clear(); 2993 } 2994 } 2995 2996 void ConcurrentMark::print_stats() { 2997 if (verbose_stats()) { 2998 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 2999 for (size_t i = 0; i < _active_tasks; ++i) { 3000 _tasks[i]->print_stats(); 3001 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3002 } 3003 } 3004 } 3005 3006 // abandon current marking iteration due to a Full GC 3007 void ConcurrentMark::abort() { 3008 if (!cmThread()->during_cycle() || _has_aborted) { 3009 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 3010 return; 3011 } 3012 3013 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 3014 // concurrent bitmap clearing. 3015 _nextMarkBitMap->clearAll(); 3016 3017 // Note we cannot clear the previous marking bitmap here 3018 // since VerifyDuringGC verifies the objects marked during 3019 // a full GC against the previous bitmap. 3020 3021 // Clear the liveness counting data 3022 clear_all_count_data(); 3023 // Empty mark stack 3024 reset_marking_state(); 3025 for (uint i = 0; i < _max_worker_id; ++i) { 3026 _tasks[i]->clear_region_fields(); 3027 } 3028 _first_overflow_barrier_sync.abort(); 3029 _second_overflow_barrier_sync.abort(); 3030 _aborted_gc_id = _g1h->gc_tracer_cm()->gc_id(); 3031 assert(!_aborted_gc_id.is_undefined(), "ConcurrentMark::abort() executed more than once?"); 3032 _has_aborted = true; 3033 3034 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3035 satb_mq_set.abandon_partial_marking(); 3036 // This can be called either during or outside marking, we'll read 3037 // the expected_active value from the SATB queue set. 3038 satb_mq_set.set_active_all_threads( 3039 false, /* new active value */ 3040 satb_mq_set.is_active() /* expected_active */); 3041 3042 _g1h->trace_heap_after_concurrent_cycle(); 3043 _g1h->register_concurrent_cycle_end(); 3044 } 3045 3046 const GCId& ConcurrentMark::concurrent_gc_id() { 3047 if (has_aborted()) { 3048 return _aborted_gc_id; 3049 } 3050 return _g1h->gc_tracer_cm()->gc_id(); 3051 } 3052 3053 static void print_ms_time_info(const char* prefix, const char* name, 3054 NumberSeq& ns) { 3055 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3056 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3057 if (ns.num() > 0) { 3058 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3059 prefix, ns.sd(), ns.maximum()); 3060 } 3061 } 3062 3063 void ConcurrentMark::print_summary_info() { 3064 gclog_or_tty->print_cr(" Concurrent marking:"); 3065 print_ms_time_info(" ", "init marks", _init_times); 3066 print_ms_time_info(" ", "remarks", _remark_times); 3067 { 3068 print_ms_time_info(" ", "final marks", _remark_mark_times); 3069 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3070 3071 } 3072 print_ms_time_info(" ", "cleanups", _cleanup_times); 3073 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3074 _total_counting_time, 3075 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3076 (double)_cleanup_times.num() 3077 : 0.0)); 3078 if (G1ScrubRemSets) { 3079 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3080 _total_rs_scrub_time, 3081 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3082 (double)_cleanup_times.num() 3083 : 0.0)); 3084 } 3085 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3086 (_init_times.sum() + _remark_times.sum() + 3087 _cleanup_times.sum())/1000.0); 3088 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3089 "(%8.2f s marking).", 3090 cmThread()->vtime_accum(), 3091 cmThread()->vtime_mark_accum()); 3092 } 3093 3094 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3095 _parallel_workers->print_worker_threads_on(st); 3096 } 3097 3098 void ConcurrentMark::print_on_error(outputStream* st) const { 3099 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3100 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3101 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3102 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3103 } 3104 3105 // We take a break if someone is trying to stop the world. 3106 bool ConcurrentMark::do_yield_check(uint worker_id) { 3107 if (SuspendibleThreadSet::should_yield()) { 3108 if (worker_id == 0) { 3109 _g1h->g1_policy()->record_concurrent_pause(); 3110 } 3111 SuspendibleThreadSet::yield(); 3112 return true; 3113 } else { 3114 return false; 3115 } 3116 } 3117 3118 #ifndef PRODUCT 3119 // for debugging purposes 3120 void ConcurrentMark::print_finger() { 3121 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3122 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3123 for (uint i = 0; i < _max_worker_id; ++i) { 3124 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3125 } 3126 gclog_or_tty->cr(); 3127 } 3128 #endif 3129 3130 template<bool scan> 3131 inline void CMTask::process_grey_object(oop obj) { 3132 assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray"); 3133 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3134 3135 if (_cm->verbose_high()) { 3136 gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT, 3137 _worker_id, p2i((void*) obj)); 3138 } 3139 3140 size_t obj_size = obj->size(); 3141 _words_scanned += obj_size; 3142 3143 if (scan) { 3144 obj->oop_iterate(_cm_oop_closure); 3145 } 3146 statsOnly( ++_objs_scanned ); 3147 check_limits(); 3148 } 3149 3150 template void CMTask::process_grey_object<true>(oop); 3151 template void CMTask::process_grey_object<false>(oop); 3152 3153 // Closure for iteration over bitmaps 3154 class CMBitMapClosure : public BitMapClosure { 3155 private: 3156 // the bitmap that is being iterated over 3157 CMBitMap* _nextMarkBitMap; 3158 ConcurrentMark* _cm; 3159 CMTask* _task; 3160 3161 public: 3162 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3163 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3164 3165 bool do_bit(size_t offset) { 3166 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3167 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3168 assert( addr < _cm->finger(), "invariant"); 3169 3170 statsOnly( _task->increase_objs_found_on_bitmap() ); 3171 assert(addr >= _task->finger(), "invariant"); 3172 3173 // We move that task's local finger along. 3174 _task->move_finger_to(addr); 3175 3176 _task->scan_object(oop(addr)); 3177 // we only partially drain the local queue and global stack 3178 _task->drain_local_queue(true); 3179 _task->drain_global_stack(true); 3180 3181 // if the has_aborted flag has been raised, we need to bail out of 3182 // the iteration 3183 return !_task->has_aborted(); 3184 } 3185 }; 3186 3187 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3188 ConcurrentMark* cm, 3189 CMTask* task) 3190 : _g1h(g1h), _cm(cm), _task(task) { 3191 assert(_ref_processor == NULL, "should be initialized to NULL"); 3192 3193 if (G1UseConcMarkReferenceProcessing) { 3194 _ref_processor = g1h->ref_processor_cm(); 3195 assert(_ref_processor != NULL, "should not be NULL"); 3196 } 3197 } 3198 3199 void CMTask::setup_for_region(HeapRegion* hr) { 3200 assert(hr != NULL, 3201 "claim_region() should have filtered out NULL regions"); 3202 assert(!hr->is_continues_humongous(), 3203 "claim_region() should have filtered out continues humongous regions"); 3204 3205 if (_cm->verbose_low()) { 3206 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3207 _worker_id, p2i(hr)); 3208 } 3209 3210 _curr_region = hr; 3211 _finger = hr->bottom(); 3212 update_region_limit(); 3213 } 3214 3215 void CMTask::update_region_limit() { 3216 HeapRegion* hr = _curr_region; 3217 HeapWord* bottom = hr->bottom(); 3218 HeapWord* limit = hr->next_top_at_mark_start(); 3219 3220 if (limit == bottom) { 3221 if (_cm->verbose_low()) { 3222 gclog_or_tty->print_cr("[%u] found an empty region " 3223 "["PTR_FORMAT", "PTR_FORMAT")", 3224 _worker_id, p2i(bottom), p2i(limit)); 3225 } 3226 // The region was collected underneath our feet. 3227 // We set the finger to bottom to ensure that the bitmap 3228 // iteration that will follow this will not do anything. 3229 // (this is not a condition that holds when we set the region up, 3230 // as the region is not supposed to be empty in the first place) 3231 _finger = bottom; 3232 } else if (limit >= _region_limit) { 3233 assert(limit >= _finger, "peace of mind"); 3234 } else { 3235 assert(limit < _region_limit, "only way to get here"); 3236 // This can happen under some pretty unusual circumstances. An 3237 // evacuation pause empties the region underneath our feet (NTAMS 3238 // at bottom). We then do some allocation in the region (NTAMS 3239 // stays at bottom), followed by the region being used as a GC 3240 // alloc region (NTAMS will move to top() and the objects 3241 // originally below it will be grayed). All objects now marked in 3242 // the region are explicitly grayed, if below the global finger, 3243 // and we do not need in fact to scan anything else. So, we simply 3244 // set _finger to be limit to ensure that the bitmap iteration 3245 // doesn't do anything. 3246 _finger = limit; 3247 } 3248 3249 _region_limit = limit; 3250 } 3251 3252 void CMTask::giveup_current_region() { 3253 assert(_curr_region != NULL, "invariant"); 3254 if (_cm->verbose_low()) { 3255 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3256 _worker_id, p2i(_curr_region)); 3257 } 3258 clear_region_fields(); 3259 } 3260 3261 void CMTask::clear_region_fields() { 3262 // Values for these three fields that indicate that we're not 3263 // holding on to a region. 3264 _curr_region = NULL; 3265 _finger = NULL; 3266 _region_limit = NULL; 3267 } 3268 3269 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3270 if (cm_oop_closure == NULL) { 3271 assert(_cm_oop_closure != NULL, "invariant"); 3272 } else { 3273 assert(_cm_oop_closure == NULL, "invariant"); 3274 } 3275 _cm_oop_closure = cm_oop_closure; 3276 } 3277 3278 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3279 guarantee(nextMarkBitMap != NULL, "invariant"); 3280 3281 if (_cm->verbose_low()) { 3282 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3283 } 3284 3285 _nextMarkBitMap = nextMarkBitMap; 3286 clear_region_fields(); 3287 3288 _calls = 0; 3289 _elapsed_time_ms = 0.0; 3290 _termination_time_ms = 0.0; 3291 _termination_start_time_ms = 0.0; 3292 3293 #if _MARKING_STATS_ 3294 _aborted = 0; 3295 _aborted_overflow = 0; 3296 _aborted_cm_aborted = 0; 3297 _aborted_yield = 0; 3298 _aborted_timed_out = 0; 3299 _aborted_satb = 0; 3300 _aborted_termination = 0; 3301 _steal_attempts = 0; 3302 _steals = 0; 3303 _local_pushes = 0; 3304 _local_pops = 0; 3305 _local_max_size = 0; 3306 _objs_scanned = 0; 3307 _global_pushes = 0; 3308 _global_pops = 0; 3309 _global_max_size = 0; 3310 _global_transfers_to = 0; 3311 _global_transfers_from = 0; 3312 _regions_claimed = 0; 3313 _objs_found_on_bitmap = 0; 3314 _satb_buffers_processed = 0; 3315 #endif // _MARKING_STATS_ 3316 } 3317 3318 bool CMTask::should_exit_termination() { 3319 regular_clock_call(); 3320 // This is called when we are in the termination protocol. We should 3321 // quit if, for some reason, this task wants to abort or the global 3322 // stack is not empty (this means that we can get work from it). 3323 return !_cm->mark_stack_empty() || has_aborted(); 3324 } 3325 3326 void CMTask::reached_limit() { 3327 assert(_words_scanned >= _words_scanned_limit || 3328 _refs_reached >= _refs_reached_limit , 3329 "shouldn't have been called otherwise"); 3330 regular_clock_call(); 3331 } 3332 3333 void CMTask::regular_clock_call() { 3334 if (has_aborted()) return; 3335 3336 // First, we need to recalculate the words scanned and refs reached 3337 // limits for the next clock call. 3338 recalculate_limits(); 3339 3340 // During the regular clock call we do the following 3341 3342 // (1) If an overflow has been flagged, then we abort. 3343 if (_cm->has_overflown()) { 3344 set_has_aborted(); 3345 return; 3346 } 3347 3348 // If we are not concurrent (i.e. we're doing remark) we don't need 3349 // to check anything else. The other steps are only needed during 3350 // the concurrent marking phase. 3351 if (!concurrent()) return; 3352 3353 // (2) If marking has been aborted for Full GC, then we also abort. 3354 if (_cm->has_aborted()) { 3355 set_has_aborted(); 3356 statsOnly( ++_aborted_cm_aborted ); 3357 return; 3358 } 3359 3360 double curr_time_ms = os::elapsedVTime() * 1000.0; 3361 3362 // (3) If marking stats are enabled, then we update the step history. 3363 #if _MARKING_STATS_ 3364 if (_words_scanned >= _words_scanned_limit) { 3365 ++_clock_due_to_scanning; 3366 } 3367 if (_refs_reached >= _refs_reached_limit) { 3368 ++_clock_due_to_marking; 3369 } 3370 3371 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3372 _interval_start_time_ms = curr_time_ms; 3373 _all_clock_intervals_ms.add(last_interval_ms); 3374 3375 if (_cm->verbose_medium()) { 3376 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3377 "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s", 3378 _worker_id, last_interval_ms, 3379 _words_scanned, 3380 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3381 _refs_reached, 3382 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3383 } 3384 #endif // _MARKING_STATS_ 3385 3386 // (4) We check whether we should yield. If we have to, then we abort. 3387 if (SuspendibleThreadSet::should_yield()) { 3388 // We should yield. To do this we abort the task. The caller is 3389 // responsible for yielding. 3390 set_has_aborted(); 3391 statsOnly( ++_aborted_yield ); 3392 return; 3393 } 3394 3395 // (5) We check whether we've reached our time quota. If we have, 3396 // then we abort. 3397 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3398 if (elapsed_time_ms > _time_target_ms) { 3399 set_has_aborted(); 3400 _has_timed_out = true; 3401 statsOnly( ++_aborted_timed_out ); 3402 return; 3403 } 3404 3405 // (6) Finally, we check whether there are enough completed STAB 3406 // buffers available for processing. If there are, we abort. 3407 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3408 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3409 if (_cm->verbose_low()) { 3410 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3411 _worker_id); 3412 } 3413 // we do need to process SATB buffers, we'll abort and restart 3414 // the marking task to do so 3415 set_has_aborted(); 3416 statsOnly( ++_aborted_satb ); 3417 return; 3418 } 3419 } 3420 3421 void CMTask::recalculate_limits() { 3422 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3423 _words_scanned_limit = _real_words_scanned_limit; 3424 3425 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3426 _refs_reached_limit = _real_refs_reached_limit; 3427 } 3428 3429 void CMTask::decrease_limits() { 3430 // This is called when we believe that we're going to do an infrequent 3431 // operation which will increase the per byte scanned cost (i.e. move 3432 // entries to/from the global stack). It basically tries to decrease the 3433 // scanning limit so that the clock is called earlier. 3434 3435 if (_cm->verbose_medium()) { 3436 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3437 } 3438 3439 _words_scanned_limit = _real_words_scanned_limit - 3440 3 * words_scanned_period / 4; 3441 _refs_reached_limit = _real_refs_reached_limit - 3442 3 * refs_reached_period / 4; 3443 } 3444 3445 void CMTask::move_entries_to_global_stack() { 3446 // local array where we'll store the entries that will be popped 3447 // from the local queue 3448 oop buffer[global_stack_transfer_size]; 3449 3450 int n = 0; 3451 oop obj; 3452 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3453 buffer[n] = obj; 3454 ++n; 3455 } 3456 3457 if (n > 0) { 3458 // we popped at least one entry from the local queue 3459 3460 statsOnly( ++_global_transfers_to; _local_pops += n ); 3461 3462 if (!_cm->mark_stack_push(buffer, n)) { 3463 if (_cm->verbose_low()) { 3464 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3465 _worker_id); 3466 } 3467 set_has_aborted(); 3468 } else { 3469 // the transfer was successful 3470 3471 if (_cm->verbose_medium()) { 3472 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3473 _worker_id, n); 3474 } 3475 statsOnly( size_t tmp_size = _cm->mark_stack_size(); 3476 if (tmp_size > _global_max_size) { 3477 _global_max_size = tmp_size; 3478 } 3479 _global_pushes += n ); 3480 } 3481 } 3482 3483 // this operation was quite expensive, so decrease the limits 3484 decrease_limits(); 3485 } 3486 3487 void CMTask::get_entries_from_global_stack() { 3488 // local array where we'll store the entries that will be popped 3489 // from the global stack. 3490 oop buffer[global_stack_transfer_size]; 3491 int n; 3492 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3493 assert(n <= global_stack_transfer_size, 3494 "we should not pop more than the given limit"); 3495 if (n > 0) { 3496 // yes, we did actually pop at least one entry 3497 3498 statsOnly( ++_global_transfers_from; _global_pops += n ); 3499 if (_cm->verbose_medium()) { 3500 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3501 _worker_id, n); 3502 } 3503 for (int i = 0; i < n; ++i) { 3504 bool success = _task_queue->push(buffer[i]); 3505 // We only call this when the local queue is empty or under a 3506 // given target limit. So, we do not expect this push to fail. 3507 assert(success, "invariant"); 3508 } 3509 3510 statsOnly( size_t tmp_size = (size_t)_task_queue->size(); 3511 if (tmp_size > _local_max_size) { 3512 _local_max_size = tmp_size; 3513 } 3514 _local_pushes += n ); 3515 } 3516 3517 // this operation was quite expensive, so decrease the limits 3518 decrease_limits(); 3519 } 3520 3521 void CMTask::drain_local_queue(bool partially) { 3522 if (has_aborted()) return; 3523 3524 // Decide what the target size is, depending whether we're going to 3525 // drain it partially (so that other tasks can steal if they run out 3526 // of things to do) or totally (at the very end). 3527 size_t target_size; 3528 if (partially) { 3529 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3530 } else { 3531 target_size = 0; 3532 } 3533 3534 if (_task_queue->size() > target_size) { 3535 if (_cm->verbose_high()) { 3536 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3537 _worker_id, target_size); 3538 } 3539 3540 oop obj; 3541 bool ret = _task_queue->pop_local(obj); 3542 while (ret) { 3543 statsOnly( ++_local_pops ); 3544 3545 if (_cm->verbose_high()) { 3546 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3547 p2i((void*) obj)); 3548 } 3549 3550 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3551 assert(!_g1h->is_on_master_free_list( 3552 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3553 3554 scan_object(obj); 3555 3556 if (_task_queue->size() <= target_size || has_aborted()) { 3557 ret = false; 3558 } else { 3559 ret = _task_queue->pop_local(obj); 3560 } 3561 } 3562 3563 if (_cm->verbose_high()) { 3564 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3565 _worker_id, _task_queue->size()); 3566 } 3567 } 3568 } 3569 3570 void CMTask::drain_global_stack(bool partially) { 3571 if (has_aborted()) return; 3572 3573 // We have a policy to drain the local queue before we attempt to 3574 // drain the global stack. 3575 assert(partially || _task_queue->size() == 0, "invariant"); 3576 3577 // Decide what the target size is, depending whether we're going to 3578 // drain it partially (so that other tasks can steal if they run out 3579 // of things to do) or totally (at the very end). Notice that, 3580 // because we move entries from the global stack in chunks or 3581 // because another task might be doing the same, we might in fact 3582 // drop below the target. But, this is not a problem. 3583 size_t target_size; 3584 if (partially) { 3585 target_size = _cm->partial_mark_stack_size_target(); 3586 } else { 3587 target_size = 0; 3588 } 3589 3590 if (_cm->mark_stack_size() > target_size) { 3591 if (_cm->verbose_low()) { 3592 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3593 _worker_id, target_size); 3594 } 3595 3596 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3597 get_entries_from_global_stack(); 3598 drain_local_queue(partially); 3599 } 3600 3601 if (_cm->verbose_low()) { 3602 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3603 _worker_id, _cm->mark_stack_size()); 3604 } 3605 } 3606 } 3607 3608 // SATB Queue has several assumptions on whether to call the par or 3609 // non-par versions of the methods. this is why some of the code is 3610 // replicated. We should really get rid of the single-threaded version 3611 // of the code to simplify things. 3612 void CMTask::drain_satb_buffers() { 3613 if (has_aborted()) return; 3614 3615 // We set this so that the regular clock knows that we're in the 3616 // middle of draining buffers and doesn't set the abort flag when it 3617 // notices that SATB buffers are available for draining. It'd be 3618 // very counter productive if it did that. :-) 3619 _draining_satb_buffers = true; 3620 3621 CMSATBBufferClosure satb_cl(this, _g1h); 3622 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3623 3624 // This keeps claiming and applying the closure to completed buffers 3625 // until we run out of buffers or we need to abort. 3626 while (!has_aborted() && 3627 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 3628 if (_cm->verbose_medium()) { 3629 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3630 } 3631 statsOnly( ++_satb_buffers_processed ); 3632 regular_clock_call(); 3633 } 3634 3635 _draining_satb_buffers = false; 3636 3637 assert(has_aborted() || 3638 concurrent() || 3639 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3640 3641 // again, this was a potentially expensive operation, decrease the 3642 // limits to get the regular clock call early 3643 decrease_limits(); 3644 } 3645 3646 void CMTask::print_stats() { 3647 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3648 _worker_id, _calls); 3649 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3650 _elapsed_time_ms, _termination_time_ms); 3651 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3652 _step_times_ms.num(), _step_times_ms.avg(), 3653 _step_times_ms.sd()); 3654 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3655 _step_times_ms.maximum(), _step_times_ms.sum()); 3656 3657 #if _MARKING_STATS_ 3658 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3659 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3660 _all_clock_intervals_ms.sd()); 3661 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3662 _all_clock_intervals_ms.maximum(), 3663 _all_clock_intervals_ms.sum()); 3664 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = " SIZE_FORMAT ", marking = " SIZE_FORMAT, 3665 _clock_due_to_scanning, _clock_due_to_marking); 3666 gclog_or_tty->print_cr(" Objects: scanned = " SIZE_FORMAT ", found on the bitmap = " SIZE_FORMAT, 3667 _objs_scanned, _objs_found_on_bitmap); 3668 gclog_or_tty->print_cr(" Local Queue: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3669 _local_pushes, _local_pops, _local_max_size); 3670 gclog_or_tty->print_cr(" Global Stack: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3671 _global_pushes, _global_pops, _global_max_size); 3672 gclog_or_tty->print_cr(" transfers to = " SIZE_FORMAT ", transfers from = " SIZE_FORMAT, 3673 _global_transfers_to,_global_transfers_from); 3674 gclog_or_tty->print_cr(" Regions: claimed = " SIZE_FORMAT, _regions_claimed); 3675 gclog_or_tty->print_cr(" SATB buffers: processed = " SIZE_FORMAT, _satb_buffers_processed); 3676 gclog_or_tty->print_cr(" Steals: attempts = " SIZE_FORMAT ", successes = " SIZE_FORMAT, 3677 _steal_attempts, _steals); 3678 gclog_or_tty->print_cr(" Aborted: " SIZE_FORMAT ", due to", _aborted); 3679 gclog_or_tty->print_cr(" overflow: " SIZE_FORMAT ", global abort: " SIZE_FORMAT ", yield: " SIZE_FORMAT, 3680 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3681 gclog_or_tty->print_cr(" time out: " SIZE_FORMAT ", SATB: " SIZE_FORMAT ", termination: " SIZE_FORMAT, 3682 _aborted_timed_out, _aborted_satb, _aborted_termination); 3683 #endif // _MARKING_STATS_ 3684 } 3685 3686 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 3687 return _task_queues->steal(worker_id, hash_seed, obj); 3688 } 3689 3690 /***************************************************************************** 3691 3692 The do_marking_step(time_target_ms, ...) method is the building 3693 block of the parallel marking framework. It can be called in parallel 3694 with other invocations of do_marking_step() on different tasks 3695 (but only one per task, obviously) and concurrently with the 3696 mutator threads, or during remark, hence it eliminates the need 3697 for two versions of the code. When called during remark, it will 3698 pick up from where the task left off during the concurrent marking 3699 phase. Interestingly, tasks are also claimable during evacuation 3700 pauses too, since do_marking_step() ensures that it aborts before 3701 it needs to yield. 3702 3703 The data structures that it uses to do marking work are the 3704 following: 3705 3706 (1) Marking Bitmap. If there are gray objects that appear only 3707 on the bitmap (this happens either when dealing with an overflow 3708 or when the initial marking phase has simply marked the roots 3709 and didn't push them on the stack), then tasks claim heap 3710 regions whose bitmap they then scan to find gray objects. A 3711 global finger indicates where the end of the last claimed region 3712 is. A local finger indicates how far into the region a task has 3713 scanned. The two fingers are used to determine how to gray an 3714 object (i.e. whether simply marking it is OK, as it will be 3715 visited by a task in the future, or whether it needs to be also 3716 pushed on a stack). 3717 3718 (2) Local Queue. The local queue of the task which is accessed 3719 reasonably efficiently by the task. Other tasks can steal from 3720 it when they run out of work. Throughout the marking phase, a 3721 task attempts to keep its local queue short but not totally 3722 empty, so that entries are available for stealing by other 3723 tasks. Only when there is no more work, a task will totally 3724 drain its local queue. 3725 3726 (3) Global Mark Stack. This handles local queue overflow. During 3727 marking only sets of entries are moved between it and the local 3728 queues, as access to it requires a mutex and more fine-grain 3729 interaction with it which might cause contention. If it 3730 overflows, then the marking phase should restart and iterate 3731 over the bitmap to identify gray objects. Throughout the marking 3732 phase, tasks attempt to keep the global mark stack at a small 3733 length but not totally empty, so that entries are available for 3734 popping by other tasks. Only when there is no more work, tasks 3735 will totally drain the global mark stack. 3736 3737 (4) SATB Buffer Queue. This is where completed SATB buffers are 3738 made available. Buffers are regularly removed from this queue 3739 and scanned for roots, so that the queue doesn't get too 3740 long. During remark, all completed buffers are processed, as 3741 well as the filled in parts of any uncompleted buffers. 3742 3743 The do_marking_step() method tries to abort when the time target 3744 has been reached. There are a few other cases when the 3745 do_marking_step() method also aborts: 3746 3747 (1) When the marking phase has been aborted (after a Full GC). 3748 3749 (2) When a global overflow (on the global stack) has been 3750 triggered. Before the task aborts, it will actually sync up with 3751 the other tasks to ensure that all the marking data structures 3752 (local queues, stacks, fingers etc.) are re-initialized so that 3753 when do_marking_step() completes, the marking phase can 3754 immediately restart. 3755 3756 (3) When enough completed SATB buffers are available. The 3757 do_marking_step() method only tries to drain SATB buffers right 3758 at the beginning. So, if enough buffers are available, the 3759 marking step aborts and the SATB buffers are processed at 3760 the beginning of the next invocation. 3761 3762 (4) To yield. when we have to yield then we abort and yield 3763 right at the end of do_marking_step(). This saves us from a lot 3764 of hassle as, by yielding we might allow a Full GC. If this 3765 happens then objects will be compacted underneath our feet, the 3766 heap might shrink, etc. We save checking for this by just 3767 aborting and doing the yield right at the end. 3768 3769 From the above it follows that the do_marking_step() method should 3770 be called in a loop (or, otherwise, regularly) until it completes. 3771 3772 If a marking step completes without its has_aborted() flag being 3773 true, it means it has completed the current marking phase (and 3774 also all other marking tasks have done so and have all synced up). 3775 3776 A method called regular_clock_call() is invoked "regularly" (in 3777 sub ms intervals) throughout marking. It is this clock method that 3778 checks all the abort conditions which were mentioned above and 3779 decides when the task should abort. A work-based scheme is used to 3780 trigger this clock method: when the number of object words the 3781 marking phase has scanned or the number of references the marking 3782 phase has visited reach a given limit. Additional invocations to 3783 the method clock have been planted in a few other strategic places 3784 too. The initial reason for the clock method was to avoid calling 3785 vtime too regularly, as it is quite expensive. So, once it was in 3786 place, it was natural to piggy-back all the other conditions on it 3787 too and not constantly check them throughout the code. 3788 3789 If do_termination is true then do_marking_step will enter its 3790 termination protocol. 3791 3792 The value of is_serial must be true when do_marking_step is being 3793 called serially (i.e. by the VMThread) and do_marking_step should 3794 skip any synchronization in the termination and overflow code. 3795 Examples include the serial remark code and the serial reference 3796 processing closures. 3797 3798 The value of is_serial must be false when do_marking_step is 3799 being called by any of the worker threads in a work gang. 3800 Examples include the concurrent marking code (CMMarkingTask), 3801 the MT remark code, and the MT reference processing closures. 3802 3803 *****************************************************************************/ 3804 3805 void CMTask::do_marking_step(double time_target_ms, 3806 bool do_termination, 3807 bool is_serial) { 3808 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3809 assert(concurrent() == _cm->concurrent(), "they should be the same"); 3810 3811 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3812 assert(_task_queues != NULL, "invariant"); 3813 assert(_task_queue != NULL, "invariant"); 3814 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 3815 3816 assert(!_claimed, 3817 "only one thread should claim this task at any one time"); 3818 3819 // OK, this doesn't safeguard again all possible scenarios, as it is 3820 // possible for two threads to set the _claimed flag at the same 3821 // time. But it is only for debugging purposes anyway and it will 3822 // catch most problems. 3823 _claimed = true; 3824 3825 _start_time_ms = os::elapsedVTime() * 1000.0; 3826 statsOnly( _interval_start_time_ms = _start_time_ms ); 3827 3828 // If do_stealing is true then do_marking_step will attempt to 3829 // steal work from the other CMTasks. It only makes sense to 3830 // enable stealing when the termination protocol is enabled 3831 // and do_marking_step() is not being called serially. 3832 bool do_stealing = do_termination && !is_serial; 3833 3834 double diff_prediction_ms = 3835 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 3836 _time_target_ms = time_target_ms - diff_prediction_ms; 3837 3838 // set up the variables that are used in the work-based scheme to 3839 // call the regular clock method 3840 _words_scanned = 0; 3841 _refs_reached = 0; 3842 recalculate_limits(); 3843 3844 // clear all flags 3845 clear_has_aborted(); 3846 _has_timed_out = false; 3847 _draining_satb_buffers = false; 3848 3849 ++_calls; 3850 3851 if (_cm->verbose_low()) { 3852 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 3853 "target = %1.2lfms >>>>>>>>>>", 3854 _worker_id, _calls, _time_target_ms); 3855 } 3856 3857 // Set up the bitmap and oop closures. Anything that uses them is 3858 // eventually called from this method, so it is OK to allocate these 3859 // statically. 3860 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 3861 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 3862 set_cm_oop_closure(&cm_oop_closure); 3863 3864 if (_cm->has_overflown()) { 3865 // This can happen if the mark stack overflows during a GC pause 3866 // and this task, after a yield point, restarts. We have to abort 3867 // as we need to get into the overflow protocol which happens 3868 // right at the end of this task. 3869 set_has_aborted(); 3870 } 3871 3872 // First drain any available SATB buffers. After this, we will not 3873 // look at SATB buffers before the next invocation of this method. 3874 // If enough completed SATB buffers are queued up, the regular clock 3875 // will abort this task so that it restarts. 3876 drain_satb_buffers(); 3877 // ...then partially drain the local queue and the global stack 3878 drain_local_queue(true); 3879 drain_global_stack(true); 3880 3881 do { 3882 if (!has_aborted() && _curr_region != NULL) { 3883 // This means that we're already holding on to a region. 3884 assert(_finger != NULL, "if region is not NULL, then the finger " 3885 "should not be NULL either"); 3886 3887 // We might have restarted this task after an evacuation pause 3888 // which might have evacuated the region we're holding on to 3889 // underneath our feet. Let's read its limit again to make sure 3890 // that we do not iterate over a region of the heap that 3891 // contains garbage (update_region_limit() will also move 3892 // _finger to the start of the region if it is found empty). 3893 update_region_limit(); 3894 // We will start from _finger not from the start of the region, 3895 // as we might be restarting this task after aborting half-way 3896 // through scanning this region. In this case, _finger points to 3897 // the address where we last found a marked object. If this is a 3898 // fresh region, _finger points to start(). 3899 MemRegion mr = MemRegion(_finger, _region_limit); 3900 3901 if (_cm->verbose_low()) { 3902 gclog_or_tty->print_cr("[%u] we're scanning part " 3903 "["PTR_FORMAT", "PTR_FORMAT") " 3904 "of region "HR_FORMAT, 3905 _worker_id, p2i(_finger), p2i(_region_limit), 3906 HR_FORMAT_PARAMS(_curr_region)); 3907 } 3908 3909 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 3910 "humongous regions should go around loop once only"); 3911 3912 // Some special cases: 3913 // If the memory region is empty, we can just give up the region. 3914 // If the current region is humongous then we only need to check 3915 // the bitmap for the bit associated with the start of the object, 3916 // scan the object if it's live, and give up the region. 3917 // Otherwise, let's iterate over the bitmap of the part of the region 3918 // that is left. 3919 // If the iteration is successful, give up the region. 3920 if (mr.is_empty()) { 3921 giveup_current_region(); 3922 regular_clock_call(); 3923 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 3924 if (_nextMarkBitMap->isMarked(mr.start())) { 3925 // The object is marked - apply the closure 3926 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 3927 bitmap_closure.do_bit(offset); 3928 } 3929 // Even if this task aborted while scanning the humongous object 3930 // we can (and should) give up the current region. 3931 giveup_current_region(); 3932 regular_clock_call(); 3933 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 3934 giveup_current_region(); 3935 regular_clock_call(); 3936 } else { 3937 assert(has_aborted(), "currently the only way to do so"); 3938 // The only way to abort the bitmap iteration is to return 3939 // false from the do_bit() method. However, inside the 3940 // do_bit() method we move the _finger to point to the 3941 // object currently being looked at. So, if we bail out, we 3942 // have definitely set _finger to something non-null. 3943 assert(_finger != NULL, "invariant"); 3944 3945 // Region iteration was actually aborted. So now _finger 3946 // points to the address of the object we last scanned. If we 3947 // leave it there, when we restart this task, we will rescan 3948 // the object. It is easy to avoid this. We move the finger by 3949 // enough to point to the next possible object header (the 3950 // bitmap knows by how much we need to move it as it knows its 3951 // granularity). 3952 assert(_finger < _region_limit, "invariant"); 3953 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 3954 // Check if bitmap iteration was aborted while scanning the last object 3955 if (new_finger >= _region_limit) { 3956 giveup_current_region(); 3957 } else { 3958 move_finger_to(new_finger); 3959 } 3960 } 3961 } 3962 // At this point we have either completed iterating over the 3963 // region we were holding on to, or we have aborted. 3964 3965 // We then partially drain the local queue and the global stack. 3966 // (Do we really need this?) 3967 drain_local_queue(true); 3968 drain_global_stack(true); 3969 3970 // Read the note on the claim_region() method on why it might 3971 // return NULL with potentially more regions available for 3972 // claiming and why we have to check out_of_regions() to determine 3973 // whether we're done or not. 3974 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 3975 // We are going to try to claim a new region. We should have 3976 // given up on the previous one. 3977 // Separated the asserts so that we know which one fires. 3978 assert(_curr_region == NULL, "invariant"); 3979 assert(_finger == NULL, "invariant"); 3980 assert(_region_limit == NULL, "invariant"); 3981 if (_cm->verbose_low()) { 3982 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 3983 } 3984 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 3985 if (claimed_region != NULL) { 3986 // Yes, we managed to claim one 3987 statsOnly( ++_regions_claimed ); 3988 3989 if (_cm->verbose_low()) { 3990 gclog_or_tty->print_cr("[%u] we successfully claimed " 3991 "region "PTR_FORMAT, 3992 _worker_id, p2i(claimed_region)); 3993 } 3994 3995 setup_for_region(claimed_region); 3996 assert(_curr_region == claimed_region, "invariant"); 3997 } 3998 // It is important to call the regular clock here. It might take 3999 // a while to claim a region if, for example, we hit a large 4000 // block of empty regions. So we need to call the regular clock 4001 // method once round the loop to make sure it's called 4002 // frequently enough. 4003 regular_clock_call(); 4004 } 4005 4006 if (!has_aborted() && _curr_region == NULL) { 4007 assert(_cm->out_of_regions(), 4008 "at this point we should be out of regions"); 4009 } 4010 } while ( _curr_region != NULL && !has_aborted()); 4011 4012 if (!has_aborted()) { 4013 // We cannot check whether the global stack is empty, since other 4014 // tasks might be pushing objects to it concurrently. 4015 assert(_cm->out_of_regions(), 4016 "at this point we should be out of regions"); 4017 4018 if (_cm->verbose_low()) { 4019 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4020 } 4021 4022 // Try to reduce the number of available SATB buffers so that 4023 // remark has less work to do. 4024 drain_satb_buffers(); 4025 } 4026 4027 // Since we've done everything else, we can now totally drain the 4028 // local queue and global stack. 4029 drain_local_queue(false); 4030 drain_global_stack(false); 4031 4032 // Attempt at work stealing from other task's queues. 4033 if (do_stealing && !has_aborted()) { 4034 // We have not aborted. This means that we have finished all that 4035 // we could. Let's try to do some stealing... 4036 4037 // We cannot check whether the global stack is empty, since other 4038 // tasks might be pushing objects to it concurrently. 4039 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4040 "only way to reach here"); 4041 4042 if (_cm->verbose_low()) { 4043 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4044 } 4045 4046 while (!has_aborted()) { 4047 oop obj; 4048 statsOnly( ++_steal_attempts ); 4049 4050 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4051 if (_cm->verbose_medium()) { 4052 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4053 _worker_id, p2i((void*) obj)); 4054 } 4055 4056 statsOnly( ++_steals ); 4057 4058 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4059 "any stolen object should be marked"); 4060 scan_object(obj); 4061 4062 // And since we're towards the end, let's totally drain the 4063 // local queue and global stack. 4064 drain_local_queue(false); 4065 drain_global_stack(false); 4066 } else { 4067 break; 4068 } 4069 } 4070 } 4071 4072 // If we are about to wrap up and go into termination, check if we 4073 // should raise the overflow flag. 4074 if (do_termination && !has_aborted()) { 4075 if (_cm->force_overflow()->should_force()) { 4076 _cm->set_has_overflown(); 4077 regular_clock_call(); 4078 } 4079 } 4080 4081 // We still haven't aborted. Now, let's try to get into the 4082 // termination protocol. 4083 if (do_termination && !has_aborted()) { 4084 // We cannot check whether the global stack is empty, since other 4085 // tasks might be concurrently pushing objects on it. 4086 // Separated the asserts so that we know which one fires. 4087 assert(_cm->out_of_regions(), "only way to reach here"); 4088 assert(_task_queue->size() == 0, "only way to reach here"); 4089 4090 if (_cm->verbose_low()) { 4091 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4092 } 4093 4094 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4095 4096 // The CMTask class also extends the TerminatorTerminator class, 4097 // hence its should_exit_termination() method will also decide 4098 // whether to exit the termination protocol or not. 4099 bool finished = (is_serial || 4100 _cm->terminator()->offer_termination(this)); 4101 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4102 _termination_time_ms += 4103 termination_end_time_ms - _termination_start_time_ms; 4104 4105 if (finished) { 4106 // We're all done. 4107 4108 if (_worker_id == 0) { 4109 // let's allow task 0 to do this 4110 if (concurrent()) { 4111 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4112 // we need to set this to false before the next 4113 // safepoint. This way we ensure that the marking phase 4114 // doesn't observe any more heap expansions. 4115 _cm->clear_concurrent_marking_in_progress(); 4116 } 4117 } 4118 4119 // We can now guarantee that the global stack is empty, since 4120 // all other tasks have finished. We separated the guarantees so 4121 // that, if a condition is false, we can immediately find out 4122 // which one. 4123 guarantee(_cm->out_of_regions(), "only way to reach here"); 4124 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4125 guarantee(_task_queue->size() == 0, "only way to reach here"); 4126 guarantee(!_cm->has_overflown(), "only way to reach here"); 4127 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4128 4129 if (_cm->verbose_low()) { 4130 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4131 } 4132 } else { 4133 // Apparently there's more work to do. Let's abort this task. It 4134 // will restart it and we can hopefully find more things to do. 4135 4136 if (_cm->verbose_low()) { 4137 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4138 _worker_id); 4139 } 4140 4141 set_has_aborted(); 4142 statsOnly( ++_aborted_termination ); 4143 } 4144 } 4145 4146 // Mainly for debugging purposes to make sure that a pointer to the 4147 // closure which was statically allocated in this frame doesn't 4148 // escape it by accident. 4149 set_cm_oop_closure(NULL); 4150 double end_time_ms = os::elapsedVTime() * 1000.0; 4151 double elapsed_time_ms = end_time_ms - _start_time_ms; 4152 // Update the step history. 4153 _step_times_ms.add(elapsed_time_ms); 4154 4155 if (has_aborted()) { 4156 // The task was aborted for some reason. 4157 4158 statsOnly( ++_aborted ); 4159 4160 if (_has_timed_out) { 4161 double diff_ms = elapsed_time_ms - _time_target_ms; 4162 // Keep statistics of how well we did with respect to hitting 4163 // our target only if we actually timed out (if we aborted for 4164 // other reasons, then the results might get skewed). 4165 _marking_step_diffs_ms.add(diff_ms); 4166 } 4167 4168 if (_cm->has_overflown()) { 4169 // This is the interesting one. We aborted because a global 4170 // overflow was raised. This means we have to restart the 4171 // marking phase and start iterating over regions. However, in 4172 // order to do this we have to make sure that all tasks stop 4173 // what they are doing and re-initialize in a safe manner. We 4174 // will achieve this with the use of two barrier sync points. 4175 4176 if (_cm->verbose_low()) { 4177 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4178 } 4179 4180 if (!is_serial) { 4181 // We only need to enter the sync barrier if being called 4182 // from a parallel context 4183 _cm->enter_first_sync_barrier(_worker_id); 4184 4185 // When we exit this sync barrier we know that all tasks have 4186 // stopped doing marking work. So, it's now safe to 4187 // re-initialize our data structures. At the end of this method, 4188 // task 0 will clear the global data structures. 4189 } 4190 4191 statsOnly( ++_aborted_overflow ); 4192 4193 // We clear the local state of this task... 4194 clear_region_fields(); 4195 4196 if (!is_serial) { 4197 // ...and enter the second barrier. 4198 _cm->enter_second_sync_barrier(_worker_id); 4199 } 4200 // At this point, if we're during the concurrent phase of 4201 // marking, everything has been re-initialized and we're 4202 // ready to restart. 4203 } 4204 4205 if (_cm->verbose_low()) { 4206 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4207 "elapsed = %1.2lfms <<<<<<<<<<", 4208 _worker_id, _time_target_ms, elapsed_time_ms); 4209 if (_cm->has_aborted()) { 4210 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4211 _worker_id); 4212 } 4213 } 4214 } else { 4215 if (_cm->verbose_low()) { 4216 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4217 "elapsed = %1.2lfms <<<<<<<<<<", 4218 _worker_id, _time_target_ms, elapsed_time_ms); 4219 } 4220 } 4221 4222 _claimed = false; 4223 } 4224 4225 CMTask::CMTask(uint worker_id, 4226 ConcurrentMark* cm, 4227 size_t* marked_bytes, 4228 BitMap* card_bm, 4229 CMTaskQueue* task_queue, 4230 CMTaskQueueSet* task_queues) 4231 : _g1h(G1CollectedHeap::heap()), 4232 _worker_id(worker_id), _cm(cm), 4233 _claimed(false), 4234 _nextMarkBitMap(NULL), _hash_seed(17), 4235 _task_queue(task_queue), 4236 _task_queues(task_queues), 4237 _cm_oop_closure(NULL), 4238 _marked_bytes_array(marked_bytes), 4239 _card_bm(card_bm) { 4240 guarantee(task_queue != NULL, "invariant"); 4241 guarantee(task_queues != NULL, "invariant"); 4242 4243 statsOnly( _clock_due_to_scanning = 0; 4244 _clock_due_to_marking = 0 ); 4245 4246 _marking_step_diffs_ms.add(0.5); 4247 } 4248 4249 // These are formatting macros that are used below to ensure 4250 // consistent formatting. The *_H_* versions are used to format the 4251 // header for a particular value and they should be kept consistent 4252 // with the corresponding macro. Also note that most of the macros add 4253 // the necessary white space (as a prefix) which makes them a bit 4254 // easier to compose. 4255 4256 // All the output lines are prefixed with this string to be able to 4257 // identify them easily in a large log file. 4258 #define G1PPRL_LINE_PREFIX "###" 4259 4260 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4261 #ifdef _LP64 4262 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4263 #else // _LP64 4264 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4265 #endif // _LP64 4266 4267 // For per-region info 4268 #define G1PPRL_TYPE_FORMAT " %-4s" 4269 #define G1PPRL_TYPE_H_FORMAT " %4s" 4270 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4271 #define G1PPRL_BYTE_H_FORMAT " %9s" 4272 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4273 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4274 4275 // For summary info 4276 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4277 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4278 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4279 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4280 4281 G1PrintRegionLivenessInfoClosure:: 4282 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4283 : _out(out), 4284 _total_used_bytes(0), _total_capacity_bytes(0), 4285 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4286 _hum_used_bytes(0), _hum_capacity_bytes(0), 4287 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4288 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4289 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4290 MemRegion g1_reserved = g1h->g1_reserved(); 4291 double now = os::elapsedTime(); 4292 4293 // Print the header of the output. 4294 _out->cr(); 4295 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4296 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4297 G1PPRL_SUM_ADDR_FORMAT("reserved") 4298 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4299 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4300 HeapRegion::GrainBytes); 4301 _out->print_cr(G1PPRL_LINE_PREFIX); 4302 _out->print_cr(G1PPRL_LINE_PREFIX 4303 G1PPRL_TYPE_H_FORMAT 4304 G1PPRL_ADDR_BASE_H_FORMAT 4305 G1PPRL_BYTE_H_FORMAT 4306 G1PPRL_BYTE_H_FORMAT 4307 G1PPRL_BYTE_H_FORMAT 4308 G1PPRL_DOUBLE_H_FORMAT 4309 G1PPRL_BYTE_H_FORMAT 4310 G1PPRL_BYTE_H_FORMAT, 4311 "type", "address-range", 4312 "used", "prev-live", "next-live", "gc-eff", 4313 "remset", "code-roots"); 4314 _out->print_cr(G1PPRL_LINE_PREFIX 4315 G1PPRL_TYPE_H_FORMAT 4316 G1PPRL_ADDR_BASE_H_FORMAT 4317 G1PPRL_BYTE_H_FORMAT 4318 G1PPRL_BYTE_H_FORMAT 4319 G1PPRL_BYTE_H_FORMAT 4320 G1PPRL_DOUBLE_H_FORMAT 4321 G1PPRL_BYTE_H_FORMAT 4322 G1PPRL_BYTE_H_FORMAT, 4323 "", "", 4324 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4325 "(bytes)", "(bytes)"); 4326 } 4327 4328 // It takes as a parameter a reference to one of the _hum_* fields, it 4329 // deduces the corresponding value for a region in a humongous region 4330 // series (either the region size, or what's left if the _hum_* field 4331 // is < the region size), and updates the _hum_* field accordingly. 4332 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4333 size_t bytes = 0; 4334 // The > 0 check is to deal with the prev and next live bytes which 4335 // could be 0. 4336 if (*hum_bytes > 0) { 4337 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4338 *hum_bytes -= bytes; 4339 } 4340 return bytes; 4341 } 4342 4343 // It deduces the values for a region in a humongous region series 4344 // from the _hum_* fields and updates those accordingly. It assumes 4345 // that that _hum_* fields have already been set up from the "starts 4346 // humongous" region and we visit the regions in address order. 4347 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4348 size_t* capacity_bytes, 4349 size_t* prev_live_bytes, 4350 size_t* next_live_bytes) { 4351 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4352 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4353 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4354 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4355 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4356 } 4357 4358 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4359 const char* type = r->get_type_str(); 4360 HeapWord* bottom = r->bottom(); 4361 HeapWord* end = r->end(); 4362 size_t capacity_bytes = r->capacity(); 4363 size_t used_bytes = r->used(); 4364 size_t prev_live_bytes = r->live_bytes(); 4365 size_t next_live_bytes = r->next_live_bytes(); 4366 double gc_eff = r->gc_efficiency(); 4367 size_t remset_bytes = r->rem_set()->mem_size(); 4368 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4369 4370 if (r->is_starts_humongous()) { 4371 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4372 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4373 "they should have been zeroed after the last time we used them"); 4374 // Set up the _hum_* fields. 4375 _hum_capacity_bytes = capacity_bytes; 4376 _hum_used_bytes = used_bytes; 4377 _hum_prev_live_bytes = prev_live_bytes; 4378 _hum_next_live_bytes = next_live_bytes; 4379 get_hum_bytes(&used_bytes, &capacity_bytes, 4380 &prev_live_bytes, &next_live_bytes); 4381 end = bottom + HeapRegion::GrainWords; 4382 } else if (r->is_continues_humongous()) { 4383 get_hum_bytes(&used_bytes, &capacity_bytes, 4384 &prev_live_bytes, &next_live_bytes); 4385 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4386 } 4387 4388 _total_used_bytes += used_bytes; 4389 _total_capacity_bytes += capacity_bytes; 4390 _total_prev_live_bytes += prev_live_bytes; 4391 _total_next_live_bytes += next_live_bytes; 4392 _total_remset_bytes += remset_bytes; 4393 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4394 4395 // Print a line for this particular region. 4396 _out->print_cr(G1PPRL_LINE_PREFIX 4397 G1PPRL_TYPE_FORMAT 4398 G1PPRL_ADDR_BASE_FORMAT 4399 G1PPRL_BYTE_FORMAT 4400 G1PPRL_BYTE_FORMAT 4401 G1PPRL_BYTE_FORMAT 4402 G1PPRL_DOUBLE_FORMAT 4403 G1PPRL_BYTE_FORMAT 4404 G1PPRL_BYTE_FORMAT, 4405 type, p2i(bottom), p2i(end), 4406 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4407 remset_bytes, strong_code_roots_bytes); 4408 4409 return false; 4410 } 4411 4412 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4413 // add static memory usages to remembered set sizes 4414 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4415 // Print the footer of the output. 4416 _out->print_cr(G1PPRL_LINE_PREFIX); 4417 _out->print_cr(G1PPRL_LINE_PREFIX 4418 " SUMMARY" 4419 G1PPRL_SUM_MB_FORMAT("capacity") 4420 G1PPRL_SUM_MB_PERC_FORMAT("used") 4421 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4422 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4423 G1PPRL_SUM_MB_FORMAT("remset") 4424 G1PPRL_SUM_MB_FORMAT("code-roots"), 4425 bytes_to_mb(_total_capacity_bytes), 4426 bytes_to_mb(_total_used_bytes), 4427 perc(_total_used_bytes, _total_capacity_bytes), 4428 bytes_to_mb(_total_prev_live_bytes), 4429 perc(_total_prev_live_bytes, _total_capacity_bytes), 4430 bytes_to_mb(_total_next_live_bytes), 4431 perc(_total_next_live_bytes, _total_capacity_bytes), 4432 bytes_to_mb(_total_remset_bytes), 4433 bytes_to_mb(_total_strong_code_roots_bytes)); 4434 _out->cr(); 4435 }