1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMark.inline.hpp" 30 #include "gc/g1/concurrentMarkThread.inline.hpp" 31 #include "gc/g1/g1CollectedHeap.inline.hpp" 32 #include "gc/g1/g1CollectorPolicy.hpp" 33 #include "gc/g1/g1ErgoVerbose.hpp" 34 #include "gc/g1/g1Log.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1RemSet.hpp" 37 #include "gc/g1/g1StringDedup.hpp" 38 #include "gc/g1/heapRegion.inline.hpp" 39 #include "gc/g1/heapRegionManager.inline.hpp" 40 #include "gc/g1/heapRegionRemSet.hpp" 41 #include "gc/g1/heapRegionSet.inline.hpp" 42 #include "gc/shared/gcTimer.hpp" 43 #include "gc/shared/gcTrace.hpp" 44 #include "gc/shared/gcTraceTime.hpp" 45 #include "gc/shared/genOopClosures.inline.hpp" 46 #include "gc/shared/referencePolicy.hpp" 47 #include "gc/shared/strongRootsScope.hpp" 48 #include "gc/shared/taskqueue.inline.hpp" 49 #include "gc/shared/vmGCOperations.hpp" 50 #include "memory/allocation.hpp" 51 #include "memory/resourceArea.hpp" 52 #include "oops/oop.inline.hpp" 53 #include "runtime/atomic.inline.hpp" 54 #include "runtime/handles.inline.hpp" 55 #include "runtime/java.hpp" 56 #include "runtime/prefetch.inline.hpp" 57 #include "services/memTracker.hpp" 58 59 // Concurrent marking bit map wrapper 60 61 CMBitMapRO::CMBitMapRO(int shifter) : 62 _bm(), 63 _shifter(shifter) { 64 _bmStartWord = 0; 65 _bmWordSize = 0; 66 } 67 68 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 69 const HeapWord* limit) const { 70 // First we must round addr *up* to a possible object boundary. 71 addr = (HeapWord*)align_size_up((intptr_t)addr, 72 HeapWordSize << _shifter); 73 size_t addrOffset = heapWordToOffset(addr); 74 if (limit == NULL) { 75 limit = _bmStartWord + _bmWordSize; 76 } 77 size_t limitOffset = heapWordToOffset(limit); 78 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 79 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 80 assert(nextAddr >= addr, "get_next_one postcondition"); 81 assert(nextAddr == limit || isMarked(nextAddr), 82 "get_next_one postcondition"); 83 return nextAddr; 84 } 85 86 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, 87 const HeapWord* limit) const { 88 size_t addrOffset = heapWordToOffset(addr); 89 if (limit == NULL) { 90 limit = _bmStartWord + _bmWordSize; 91 } 92 size_t limitOffset = heapWordToOffset(limit); 93 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 94 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 95 assert(nextAddr >= addr, "get_next_one postcondition"); 96 assert(nextAddr == limit || !isMarked(nextAddr), 97 "get_next_one postcondition"); 98 return nextAddr; 99 } 100 101 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 102 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 103 return (int) (diff >> _shifter); 104 } 105 106 #ifndef PRODUCT 107 bool CMBitMapRO::covers(MemRegion heap_rs) const { 108 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 109 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 110 "size inconsistency"); 111 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 112 _bmWordSize == heap_rs.word_size(); 113 } 114 #endif 115 116 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 117 _bm.print_on_error(st, prefix); 118 } 119 120 size_t CMBitMap::compute_size(size_t heap_size) { 121 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 122 } 123 124 size_t CMBitMap::mark_distance() { 125 return MinObjAlignmentInBytes * BitsPerByte; 126 } 127 128 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 129 _bmStartWord = heap.start(); 130 _bmWordSize = heap.word_size(); 131 132 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 133 _bm.set_size(_bmWordSize >> _shifter); 134 135 storage->set_mapping_changed_listener(&_listener); 136 } 137 138 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 139 if (zero_filled) { 140 return; 141 } 142 // We need to clear the bitmap on commit, removing any existing information. 143 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 144 _bm->clearRange(mr); 145 } 146 147 // Closure used for clearing the given mark bitmap. 148 class ClearBitmapHRClosure : public HeapRegionClosure { 149 private: 150 ConcurrentMark* _cm; 151 CMBitMap* _bitmap; 152 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 153 public: 154 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 155 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 156 } 157 158 virtual bool doHeapRegion(HeapRegion* r) { 159 size_t const chunk_size_in_words = M / HeapWordSize; 160 161 HeapWord* cur = r->bottom(); 162 HeapWord* const end = r->end(); 163 164 while (cur < end) { 165 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 166 _bitmap->clearRange(mr); 167 168 cur += chunk_size_in_words; 169 170 // Abort iteration if after yielding the marking has been aborted. 171 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 172 return true; 173 } 174 // Repeat the asserts from before the start of the closure. We will do them 175 // as asserts here to minimize their overhead on the product. However, we 176 // will have them as guarantees at the beginning / end of the bitmap 177 // clearing to get some checking in the product. 178 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 179 assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant"); 180 } 181 182 return false; 183 } 184 }; 185 186 class ParClearNextMarkBitmapTask : public AbstractGangTask { 187 ClearBitmapHRClosure* _cl; 188 HeapRegionClaimer _hrclaimer; 189 bool _suspendible; // If the task is suspendible, workers must join the STS. 190 191 public: 192 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 193 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 194 195 void work(uint worker_id) { 196 SuspendibleThreadSetJoiner sts_join(_suspendible); 197 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 198 } 199 }; 200 201 void CMBitMap::clearAll() { 202 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 203 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 204 uint n_workers = g1h->workers()->active_workers(); 205 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 206 g1h->workers()->run_task(&task); 207 guarantee(cl.complete(), "Must have completed iteration."); 208 return; 209 } 210 211 void CMBitMap::markRange(MemRegion mr) { 212 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 213 assert(!mr.is_empty(), "unexpected empty region"); 214 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 215 ((HeapWord *) mr.end())), 216 "markRange memory region end is not card aligned"); 217 // convert address range into offset range 218 _bm.at_put_range(heapWordToOffset(mr.start()), 219 heapWordToOffset(mr.end()), true); 220 } 221 222 void CMBitMap::clearRange(MemRegion mr) { 223 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 224 assert(!mr.is_empty(), "unexpected empty region"); 225 // convert address range into offset range 226 _bm.at_put_range(heapWordToOffset(mr.start()), 227 heapWordToOffset(mr.end()), false); 228 } 229 230 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 231 HeapWord* end_addr) { 232 HeapWord* start = getNextMarkedWordAddress(addr); 233 start = MIN2(start, end_addr); 234 HeapWord* end = getNextUnmarkedWordAddress(start); 235 end = MIN2(end, end_addr); 236 assert(start <= end, "Consistency check"); 237 MemRegion mr(start, end); 238 if (!mr.is_empty()) { 239 clearRange(mr); 240 } 241 return mr; 242 } 243 244 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 245 _base(NULL), _cm(cm) 246 #ifdef ASSERT 247 , _drain_in_progress(false) 248 , _drain_in_progress_yields(false) 249 #endif 250 {} 251 252 bool CMMarkStack::allocate(size_t capacity) { 253 // allocate a stack of the requisite depth 254 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 255 if (!rs.is_reserved()) { 256 warning("ConcurrentMark MarkStack allocation failure"); 257 return false; 258 } 259 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 260 if (!_virtual_space.initialize(rs, rs.size())) { 261 warning("ConcurrentMark MarkStack backing store failure"); 262 // Release the virtual memory reserved for the marking stack 263 rs.release(); 264 return false; 265 } 266 assert(_virtual_space.committed_size() == rs.size(), 267 "Didn't reserve backing store for all of ConcurrentMark stack?"); 268 _base = (oop*) _virtual_space.low(); 269 setEmpty(); 270 _capacity = (jint) capacity; 271 _saved_index = -1; 272 _should_expand = false; 273 return true; 274 } 275 276 void CMMarkStack::expand() { 277 // Called, during remark, if we've overflown the marking stack during marking. 278 assert(isEmpty(), "stack should been emptied while handling overflow"); 279 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 280 // Clear expansion flag 281 _should_expand = false; 282 if (_capacity == (jint) MarkStackSizeMax) { 283 if (PrintGCDetails && Verbose) { 284 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 285 } 286 return; 287 } 288 // Double capacity if possible 289 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 290 // Do not give up existing stack until we have managed to 291 // get the double capacity that we desired. 292 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 293 sizeof(oop))); 294 if (rs.is_reserved()) { 295 // Release the backing store associated with old stack 296 _virtual_space.release(); 297 // Reinitialize virtual space for new stack 298 if (!_virtual_space.initialize(rs, rs.size())) { 299 fatal("Not enough swap for expanded marking stack capacity"); 300 } 301 _base = (oop*)(_virtual_space.low()); 302 _index = 0; 303 _capacity = new_capacity; 304 } else { 305 if (PrintGCDetails && Verbose) { 306 // Failed to double capacity, continue; 307 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 308 SIZE_FORMAT"K to " SIZE_FORMAT"K", 309 _capacity / K, new_capacity / K); 310 } 311 } 312 } 313 314 void CMMarkStack::set_should_expand() { 315 // If we're resetting the marking state because of an 316 // marking stack overflow, record that we should, if 317 // possible, expand the stack. 318 _should_expand = _cm->has_overflown(); 319 } 320 321 CMMarkStack::~CMMarkStack() { 322 if (_base != NULL) { 323 _base = NULL; 324 _virtual_space.release(); 325 } 326 } 327 328 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 329 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 330 jint start = _index; 331 jint next_index = start + n; 332 if (next_index > _capacity) { 333 _overflow = true; 334 return; 335 } 336 // Otherwise. 337 _index = next_index; 338 for (int i = 0; i < n; i++) { 339 int ind = start + i; 340 assert(ind < _capacity, "By overflow test above."); 341 _base[ind] = ptr_arr[i]; 342 } 343 } 344 345 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 346 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 347 jint index = _index; 348 if (index == 0) { 349 *n = 0; 350 return false; 351 } else { 352 int k = MIN2(max, index); 353 jint new_ind = index - k; 354 for (int j = 0; j < k; j++) { 355 ptr_arr[j] = _base[new_ind + j]; 356 } 357 _index = new_ind; 358 *n = k; 359 return true; 360 } 361 } 362 363 template<class OopClosureClass> 364 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 365 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 366 || SafepointSynchronize::is_at_safepoint(), 367 "Drain recursion must be yield-safe."); 368 bool res = true; 369 debug_only(_drain_in_progress = true); 370 debug_only(_drain_in_progress_yields = yield_after); 371 while (!isEmpty()) { 372 oop newOop = pop(); 373 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 374 assert(newOop->is_oop(), "Expected an oop"); 375 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 376 "only grey objects on this stack"); 377 newOop->oop_iterate(cl); 378 if (yield_after && _cm->do_yield_check()) { 379 res = false; 380 break; 381 } 382 } 383 debug_only(_drain_in_progress = false); 384 return res; 385 } 386 387 void CMMarkStack::note_start_of_gc() { 388 assert(_saved_index == -1, 389 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 390 _saved_index = _index; 391 } 392 393 void CMMarkStack::note_end_of_gc() { 394 // This is intentionally a guarantee, instead of an assert. If we 395 // accidentally add something to the mark stack during GC, it 396 // will be a correctness issue so it's better if we crash. we'll 397 // only check this once per GC anyway, so it won't be a performance 398 // issue in any way. 399 guarantee(_saved_index == _index, 400 err_msg("saved index: %d index: %d", _saved_index, _index)); 401 _saved_index = -1; 402 } 403 404 void CMMarkStack::oops_do(OopClosure* f) { 405 assert(_saved_index == _index, 406 err_msg("saved index: %d index: %d", _saved_index, _index)); 407 for (int i = 0; i < _index; i += 1) { 408 f->do_oop(&_base[i]); 409 } 410 } 411 412 CMRootRegions::CMRootRegions() : 413 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 414 _should_abort(false), _next_survivor(NULL) { } 415 416 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 417 _young_list = g1h->young_list(); 418 _cm = cm; 419 } 420 421 void CMRootRegions::prepare_for_scan() { 422 assert(!scan_in_progress(), "pre-condition"); 423 424 // Currently, only survivors can be root regions. 425 assert(_next_survivor == NULL, "pre-condition"); 426 _next_survivor = _young_list->first_survivor_region(); 427 _scan_in_progress = (_next_survivor != NULL); 428 _should_abort = false; 429 } 430 431 HeapRegion* CMRootRegions::claim_next() { 432 if (_should_abort) { 433 // If someone has set the should_abort flag, we return NULL to 434 // force the caller to bail out of their loop. 435 return NULL; 436 } 437 438 // Currently, only survivors can be root regions. 439 HeapRegion* res = _next_survivor; 440 if (res != NULL) { 441 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 442 // Read it again in case it changed while we were waiting for the lock. 443 res = _next_survivor; 444 if (res != NULL) { 445 if (res == _young_list->last_survivor_region()) { 446 // We just claimed the last survivor so store NULL to indicate 447 // that we're done. 448 _next_survivor = NULL; 449 } else { 450 _next_survivor = res->get_next_young_region(); 451 } 452 } else { 453 // Someone else claimed the last survivor while we were trying 454 // to take the lock so nothing else to do. 455 } 456 } 457 assert(res == NULL || res->is_survivor(), "post-condition"); 458 459 return res; 460 } 461 462 void CMRootRegions::scan_finished() { 463 assert(scan_in_progress(), "pre-condition"); 464 465 // Currently, only survivors can be root regions. 466 if (!_should_abort) { 467 assert(_next_survivor == NULL, "we should have claimed all survivors"); 468 } 469 _next_survivor = NULL; 470 471 { 472 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 473 _scan_in_progress = false; 474 RootRegionScan_lock->notify_all(); 475 } 476 } 477 478 bool CMRootRegions::wait_until_scan_finished() { 479 if (!scan_in_progress()) return false; 480 481 { 482 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 483 while (scan_in_progress()) { 484 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 485 } 486 } 487 return true; 488 } 489 490 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 491 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 492 #endif // _MSC_VER 493 494 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 495 return MAX2((n_par_threads + 2) / 4, 1U); 496 } 497 498 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 499 _g1h(g1h), 500 _markBitMap1(), 501 _markBitMap2(), 502 _parallel_marking_threads(0), 503 _max_parallel_marking_threads(0), 504 _sleep_factor(0.0), 505 _marking_task_overhead(1.0), 506 _cleanup_sleep_factor(0.0), 507 _cleanup_task_overhead(1.0), 508 _cleanup_list("Cleanup List"), 509 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 510 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 511 CardTableModRefBS::card_shift, 512 false /* in_resource_area*/), 513 514 _prevMarkBitMap(&_markBitMap1), 515 _nextMarkBitMap(&_markBitMap2), 516 517 _markStack(this), 518 // _finger set in set_non_marking_state 519 520 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 521 // _active_tasks set in set_non_marking_state 522 // _tasks set inside the constructor 523 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 524 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 525 526 _has_overflown(false), 527 _concurrent(false), 528 _has_aborted(false), 529 _aborted_gc_id(GCId::undefined()), 530 _restart_for_overflow(false), 531 _concurrent_marking_in_progress(false), 532 533 // _verbose_level set below 534 535 _init_times(), 536 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 537 _cleanup_times(), 538 _total_counting_time(0.0), 539 _total_rs_scrub_time(0.0), 540 541 _parallel_workers(NULL), 542 543 _count_card_bitmaps(NULL), 544 _count_marked_bytes(NULL), 545 _completed_initialization(false) { 546 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 547 if (verbose_level < no_verbose) { 548 verbose_level = no_verbose; 549 } 550 if (verbose_level > high_verbose) { 551 verbose_level = high_verbose; 552 } 553 _verbose_level = verbose_level; 554 555 if (verbose_low()) { 556 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 557 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 558 } 559 560 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 561 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 562 563 // Create & start a ConcurrentMark thread. 564 _cmThread = new ConcurrentMarkThread(this); 565 assert(cmThread() != NULL, "CM Thread should have been created"); 566 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 567 if (_cmThread->osthread() == NULL) { 568 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 569 } 570 571 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 572 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 573 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 574 575 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 576 satb_qs.set_buffer_size(G1SATBBufferSize); 577 578 _root_regions.init(_g1h, this); 579 580 if (ConcGCThreads > ParallelGCThreads) { 581 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") " 582 "than ParallelGCThreads (" UINTX_FORMAT ").", 583 ConcGCThreads, ParallelGCThreads); 584 return; 585 } 586 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 587 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 588 // if both are set 589 _sleep_factor = 0.0; 590 _marking_task_overhead = 1.0; 591 } else if (G1MarkingOverheadPercent > 0) { 592 // We will calculate the number of parallel marking threads based 593 // on a target overhead with respect to the soft real-time goal 594 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 595 double overall_cm_overhead = 596 (double) MaxGCPauseMillis * marking_overhead / 597 (double) GCPauseIntervalMillis; 598 double cpu_ratio = 1.0 / (double) os::processor_count(); 599 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 600 double marking_task_overhead = 601 overall_cm_overhead / marking_thread_num * 602 (double) os::processor_count(); 603 double sleep_factor = 604 (1.0 - marking_task_overhead) / marking_task_overhead; 605 606 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); 607 _sleep_factor = sleep_factor; 608 _marking_task_overhead = marking_task_overhead; 609 } else { 610 // Calculate the number of parallel marking threads by scaling 611 // the number of parallel GC threads. 612 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); 613 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); 614 _sleep_factor = 0.0; 615 _marking_task_overhead = 1.0; 616 } 617 618 assert(ConcGCThreads > 0, "Should have been set"); 619 _parallel_marking_threads = (uint) ConcGCThreads; 620 _max_parallel_marking_threads = _parallel_marking_threads; 621 622 if (parallel_marking_threads() > 1) { 623 _cleanup_task_overhead = 1.0; 624 } else { 625 _cleanup_task_overhead = marking_task_overhead(); 626 } 627 _cleanup_sleep_factor = 628 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 629 630 #if 0 631 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 632 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 633 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 634 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 635 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 636 #endif 637 638 _parallel_workers = new FlexibleWorkGang("G1 Marker", 639 _max_parallel_marking_threads, false, true); 640 if (_parallel_workers == NULL) { 641 vm_exit_during_initialization("Failed necessary allocation."); 642 } else { 643 _parallel_workers->initialize_workers(); 644 } 645 646 if (FLAG_IS_DEFAULT(MarkStackSize)) { 647 size_t mark_stack_size = 648 MIN2(MarkStackSizeMax, 649 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 650 // Verify that the calculated value for MarkStackSize is in range. 651 // It would be nice to use the private utility routine from Arguments. 652 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 653 warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 654 "must be between 1 and " SIZE_FORMAT, 655 mark_stack_size, MarkStackSizeMax); 656 return; 657 } 658 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 659 } else { 660 // Verify MarkStackSize is in range. 661 if (FLAG_IS_CMDLINE(MarkStackSize)) { 662 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 663 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 664 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 665 "must be between 1 and " SIZE_FORMAT, 666 MarkStackSize, MarkStackSizeMax); 667 return; 668 } 669 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 670 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 671 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 672 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 673 MarkStackSize, MarkStackSizeMax); 674 return; 675 } 676 } 677 } 678 } 679 680 if (!_markStack.allocate(MarkStackSize)) { 681 warning("Failed to allocate CM marking stack"); 682 return; 683 } 684 685 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 686 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 687 688 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 689 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 690 691 BitMap::idx_t card_bm_size = _card_bm.size(); 692 693 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 694 _active_tasks = _max_worker_id; 695 696 uint max_regions = _g1h->max_regions(); 697 for (uint i = 0; i < _max_worker_id; ++i) { 698 CMTaskQueue* task_queue = new CMTaskQueue(); 699 task_queue->initialize(); 700 _task_queues->register_queue(i, task_queue); 701 702 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 703 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 704 705 _tasks[i] = new CMTask(i, this, 706 _count_marked_bytes[i], 707 &_count_card_bitmaps[i], 708 task_queue, _task_queues); 709 710 _accum_task_vtime[i] = 0.0; 711 } 712 713 // Calculate the card number for the bottom of the heap. Used 714 // in biasing indexes into the accounting card bitmaps. 715 _heap_bottom_card_num = 716 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 717 CardTableModRefBS::card_shift); 718 719 // Clear all the liveness counting data 720 clear_all_count_data(); 721 722 // so that the call below can read a sensible value 723 _heap_start = g1h->reserved_region().start(); 724 set_non_marking_state(); 725 _completed_initialization = true; 726 } 727 728 void ConcurrentMark::reset() { 729 // Starting values for these two. This should be called in a STW 730 // phase. 731 MemRegion reserved = _g1h->g1_reserved(); 732 _heap_start = reserved.start(); 733 _heap_end = reserved.end(); 734 735 // Separated the asserts so that we know which one fires. 736 assert(_heap_start != NULL, "heap bounds should look ok"); 737 assert(_heap_end != NULL, "heap bounds should look ok"); 738 assert(_heap_start < _heap_end, "heap bounds should look ok"); 739 740 // Reset all the marking data structures and any necessary flags 741 reset_marking_state(); 742 743 if (verbose_low()) { 744 gclog_or_tty->print_cr("[global] resetting"); 745 } 746 747 // We do reset all of them, since different phases will use 748 // different number of active threads. So, it's easiest to have all 749 // of them ready. 750 for (uint i = 0; i < _max_worker_id; ++i) { 751 _tasks[i]->reset(_nextMarkBitMap); 752 } 753 754 // we need this to make sure that the flag is on during the evac 755 // pause with initial mark piggy-backed 756 set_concurrent_marking_in_progress(); 757 } 758 759 760 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 761 _markStack.set_should_expand(); 762 _markStack.setEmpty(); // Also clears the _markStack overflow flag 763 if (clear_overflow) { 764 clear_has_overflown(); 765 } else { 766 assert(has_overflown(), "pre-condition"); 767 } 768 _finger = _heap_start; 769 770 for (uint i = 0; i < _max_worker_id; ++i) { 771 CMTaskQueue* queue = _task_queues->queue(i); 772 queue->set_empty(); 773 } 774 } 775 776 void ConcurrentMark::set_concurrency(uint active_tasks) { 777 assert(active_tasks <= _max_worker_id, "we should not have more"); 778 779 _active_tasks = active_tasks; 780 // Need to update the three data structures below according to the 781 // number of active threads for this phase. 782 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 783 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 784 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 785 } 786 787 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 788 set_concurrency(active_tasks); 789 790 _concurrent = concurrent; 791 // We propagate this to all tasks, not just the active ones. 792 for (uint i = 0; i < _max_worker_id; ++i) 793 _tasks[i]->set_concurrent(concurrent); 794 795 if (concurrent) { 796 set_concurrent_marking_in_progress(); 797 } else { 798 // We currently assume that the concurrent flag has been set to 799 // false before we start remark. At this point we should also be 800 // in a STW phase. 801 assert(!concurrent_marking_in_progress(), "invariant"); 802 assert(out_of_regions(), 803 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 804 p2i(_finger), p2i(_heap_end))); 805 } 806 } 807 808 void ConcurrentMark::set_non_marking_state() { 809 // We set the global marking state to some default values when we're 810 // not doing marking. 811 reset_marking_state(); 812 _active_tasks = 0; 813 clear_concurrent_marking_in_progress(); 814 } 815 816 ConcurrentMark::~ConcurrentMark() { 817 // The ConcurrentMark instance is never freed. 818 ShouldNotReachHere(); 819 } 820 821 void ConcurrentMark::clearNextBitmap() { 822 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 823 824 // Make sure that the concurrent mark thread looks to still be in 825 // the current cycle. 826 guarantee(cmThread()->during_cycle(), "invariant"); 827 828 // We are finishing up the current cycle by clearing the next 829 // marking bitmap and getting it ready for the next cycle. During 830 // this time no other cycle can start. So, let's make sure that this 831 // is the case. 832 guarantee(!g1h->mark_in_progress(), "invariant"); 833 834 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 835 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 836 _parallel_workers->run_task(&task); 837 838 // Clear the liveness counting data. If the marking has been aborted, the abort() 839 // call already did that. 840 if (cl.complete()) { 841 clear_all_count_data(); 842 } 843 844 // Repeat the asserts from above. 845 guarantee(cmThread()->during_cycle(), "invariant"); 846 guarantee(!g1h->mark_in_progress(), "invariant"); 847 } 848 849 class CheckBitmapClearHRClosure : public HeapRegionClosure { 850 CMBitMap* _bitmap; 851 bool _error; 852 public: 853 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 854 } 855 856 virtual bool doHeapRegion(HeapRegion* r) { 857 // This closure can be called concurrently to the mutator, so we must make sure 858 // that the result of the getNextMarkedWordAddress() call is compared to the 859 // value passed to it as limit to detect any found bits. 860 // We can use the region's orig_end() for the limit and the comparison value 861 // as it always contains the "real" end of the region that never changes and 862 // has no side effects. 863 // Due to the latter, there can also be no problem with the compiler generating 864 // reloads of the orig_end() call. 865 HeapWord* end = r->orig_end(); 866 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 867 } 868 }; 869 870 bool ConcurrentMark::nextMarkBitmapIsClear() { 871 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 872 _g1h->heap_region_iterate(&cl); 873 return cl.complete(); 874 } 875 876 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 877 public: 878 bool doHeapRegion(HeapRegion* r) { 879 if (!r->is_continues_humongous()) { 880 r->note_start_of_marking(); 881 } 882 return false; 883 } 884 }; 885 886 void ConcurrentMark::checkpointRootsInitialPre() { 887 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 888 G1CollectorPolicy* g1p = g1h->g1_policy(); 889 890 _has_aborted = false; 891 892 // Initialize marking structures. This has to be done in a STW phase. 893 reset(); 894 895 // For each region note start of marking. 896 NoteStartOfMarkHRClosure startcl; 897 g1h->heap_region_iterate(&startcl); 898 } 899 900 901 void ConcurrentMark::checkpointRootsInitialPost() { 902 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 903 904 // If we force an overflow during remark, the remark operation will 905 // actually abort and we'll restart concurrent marking. If we always 906 // force an overflow during remark we'll never actually complete the 907 // marking phase. So, we initialize this here, at the start of the 908 // cycle, so that at the remaining overflow number will decrease at 909 // every remark and we'll eventually not need to cause one. 910 force_overflow_stw()->init(); 911 912 // Start Concurrent Marking weak-reference discovery. 913 ReferenceProcessor* rp = g1h->ref_processor_cm(); 914 // enable ("weak") refs discovery 915 rp->enable_discovery(); 916 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 917 918 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 919 // This is the start of the marking cycle, we're expected all 920 // threads to have SATB queues with active set to false. 921 satb_mq_set.set_active_all_threads(true, /* new active value */ 922 false /* expected_active */); 923 924 _root_regions.prepare_for_scan(); 925 926 // update_g1_committed() will be called at the end of an evac pause 927 // when marking is on. So, it's also called at the end of the 928 // initial-mark pause to update the heap end, if the heap expands 929 // during it. No need to call it here. 930 } 931 932 /* 933 * Notice that in the next two methods, we actually leave the STS 934 * during the barrier sync and join it immediately afterwards. If we 935 * do not do this, the following deadlock can occur: one thread could 936 * be in the barrier sync code, waiting for the other thread to also 937 * sync up, whereas another one could be trying to yield, while also 938 * waiting for the other threads to sync up too. 939 * 940 * Note, however, that this code is also used during remark and in 941 * this case we should not attempt to leave / enter the STS, otherwise 942 * we'll either hit an assert (debug / fastdebug) or deadlock 943 * (product). So we should only leave / enter the STS if we are 944 * operating concurrently. 945 * 946 * Because the thread that does the sync barrier has left the STS, it 947 * is possible to be suspended for a Full GC or an evacuation pause 948 * could occur. This is actually safe, since the entering the sync 949 * barrier is one of the last things do_marking_step() does, and it 950 * doesn't manipulate any data structures afterwards. 951 */ 952 953 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 954 bool barrier_aborted; 955 956 if (verbose_low()) { 957 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 958 } 959 960 { 961 SuspendibleThreadSetLeaver sts_leave(concurrent()); 962 barrier_aborted = !_first_overflow_barrier_sync.enter(); 963 } 964 965 // at this point everyone should have synced up and not be doing any 966 // more work 967 968 if (verbose_low()) { 969 if (barrier_aborted) { 970 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 971 } else { 972 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 973 } 974 } 975 976 if (barrier_aborted) { 977 // If the barrier aborted we ignore the overflow condition and 978 // just abort the whole marking phase as quickly as possible. 979 return; 980 } 981 982 // If we're executing the concurrent phase of marking, reset the marking 983 // state; otherwise the marking state is reset after reference processing, 984 // during the remark pause. 985 // If we reset here as a result of an overflow during the remark we will 986 // see assertion failures from any subsequent set_concurrency_and_phase() 987 // calls. 988 if (concurrent()) { 989 // let the task associated with with worker 0 do this 990 if (worker_id == 0) { 991 // task 0 is responsible for clearing the global data structures 992 // We should be here because of an overflow. During STW we should 993 // not clear the overflow flag since we rely on it being true when 994 // we exit this method to abort the pause and restart concurrent 995 // marking. 996 reset_marking_state(true /* clear_overflow */); 997 force_overflow()->update(); 998 999 if (G1Log::fine()) { 1000 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1001 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1002 } 1003 } 1004 } 1005 1006 // after this, each task should reset its own data structures then 1007 // then go into the second barrier 1008 } 1009 1010 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1011 bool barrier_aborted; 1012 1013 if (verbose_low()) { 1014 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1015 } 1016 1017 { 1018 SuspendibleThreadSetLeaver sts_leave(concurrent()); 1019 barrier_aborted = !_second_overflow_barrier_sync.enter(); 1020 } 1021 1022 // at this point everything should be re-initialized and ready to go 1023 1024 if (verbose_low()) { 1025 if (barrier_aborted) { 1026 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1027 } else { 1028 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1029 } 1030 } 1031 } 1032 1033 #ifndef PRODUCT 1034 void ForceOverflowSettings::init() { 1035 _num_remaining = G1ConcMarkForceOverflow; 1036 _force = false; 1037 update(); 1038 } 1039 1040 void ForceOverflowSettings::update() { 1041 if (_num_remaining > 0) { 1042 _num_remaining -= 1; 1043 _force = true; 1044 } else { 1045 _force = false; 1046 } 1047 } 1048 1049 bool ForceOverflowSettings::should_force() { 1050 if (_force) { 1051 _force = false; 1052 return true; 1053 } else { 1054 return false; 1055 } 1056 } 1057 #endif // !PRODUCT 1058 1059 class CMConcurrentMarkingTask: public AbstractGangTask { 1060 private: 1061 ConcurrentMark* _cm; 1062 ConcurrentMarkThread* _cmt; 1063 1064 public: 1065 void work(uint worker_id) { 1066 assert(Thread::current()->is_ConcurrentGC_thread(), 1067 "this should only be done by a conc GC thread"); 1068 ResourceMark rm; 1069 1070 double start_vtime = os::elapsedVTime(); 1071 1072 { 1073 SuspendibleThreadSetJoiner sts_join; 1074 1075 assert(worker_id < _cm->active_tasks(), "invariant"); 1076 CMTask* the_task = _cm->task(worker_id); 1077 the_task->record_start_time(); 1078 if (!_cm->has_aborted()) { 1079 do { 1080 double start_vtime_sec = os::elapsedVTime(); 1081 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1082 1083 the_task->do_marking_step(mark_step_duration_ms, 1084 true /* do_termination */, 1085 false /* is_serial*/); 1086 1087 double end_vtime_sec = os::elapsedVTime(); 1088 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1089 _cm->clear_has_overflown(); 1090 1091 _cm->do_yield_check(worker_id); 1092 1093 jlong sleep_time_ms; 1094 if (!_cm->has_aborted() && the_task->has_aborted()) { 1095 sleep_time_ms = 1096 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1097 { 1098 SuspendibleThreadSetLeaver sts_leave; 1099 os::sleep(Thread::current(), sleep_time_ms, false); 1100 } 1101 } 1102 } while (!_cm->has_aborted() && the_task->has_aborted()); 1103 } 1104 the_task->record_end_time(); 1105 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1106 } 1107 1108 double end_vtime = os::elapsedVTime(); 1109 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1110 } 1111 1112 CMConcurrentMarkingTask(ConcurrentMark* cm, 1113 ConcurrentMarkThread* cmt) : 1114 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1115 1116 ~CMConcurrentMarkingTask() { } 1117 }; 1118 1119 // Calculates the number of active workers for a concurrent 1120 // phase. 1121 uint ConcurrentMark::calc_parallel_marking_threads() { 1122 uint n_conc_workers = 0; 1123 if (!UseDynamicNumberOfGCThreads || 1124 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1125 !ForceDynamicNumberOfGCThreads)) { 1126 n_conc_workers = max_parallel_marking_threads(); 1127 } else { 1128 n_conc_workers = 1129 AdaptiveSizePolicy::calc_default_active_workers( 1130 max_parallel_marking_threads(), 1131 1, /* Minimum workers */ 1132 parallel_marking_threads(), 1133 Threads::number_of_non_daemon_threads()); 1134 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1135 // that scaling has already gone into "_max_parallel_marking_threads". 1136 } 1137 assert(n_conc_workers > 0, "Always need at least 1"); 1138 return n_conc_workers; 1139 } 1140 1141 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1142 // Currently, only survivors can be root regions. 1143 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1144 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1145 1146 const uintx interval = PrefetchScanIntervalInBytes; 1147 HeapWord* curr = hr->bottom(); 1148 const HeapWord* end = hr->top(); 1149 while (curr < end) { 1150 Prefetch::read(curr, interval); 1151 oop obj = oop(curr); 1152 int size = obj->oop_iterate(&cl); 1153 assert(size == obj->size(), "sanity"); 1154 curr += size; 1155 } 1156 } 1157 1158 class CMRootRegionScanTask : public AbstractGangTask { 1159 private: 1160 ConcurrentMark* _cm; 1161 1162 public: 1163 CMRootRegionScanTask(ConcurrentMark* cm) : 1164 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1165 1166 void work(uint worker_id) { 1167 assert(Thread::current()->is_ConcurrentGC_thread(), 1168 "this should only be done by a conc GC thread"); 1169 1170 CMRootRegions* root_regions = _cm->root_regions(); 1171 HeapRegion* hr = root_regions->claim_next(); 1172 while (hr != NULL) { 1173 _cm->scanRootRegion(hr, worker_id); 1174 hr = root_regions->claim_next(); 1175 } 1176 } 1177 }; 1178 1179 void ConcurrentMark::scanRootRegions() { 1180 // Start of concurrent marking. 1181 ClassLoaderDataGraph::clear_claimed_marks(); 1182 1183 // scan_in_progress() will have been set to true only if there was 1184 // at least one root region to scan. So, if it's false, we 1185 // should not attempt to do any further work. 1186 if (root_regions()->scan_in_progress()) { 1187 _parallel_marking_threads = calc_parallel_marking_threads(); 1188 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1189 "Maximum number of marking threads exceeded"); 1190 uint active_workers = MAX2(1U, parallel_marking_threads()); 1191 1192 CMRootRegionScanTask task(this); 1193 _parallel_workers->set_active_workers(active_workers); 1194 _parallel_workers->run_task(&task); 1195 1196 // It's possible that has_aborted() is true here without actually 1197 // aborting the survivor scan earlier. This is OK as it's 1198 // mainly used for sanity checking. 1199 root_regions()->scan_finished(); 1200 } 1201 } 1202 1203 void ConcurrentMark::markFromRoots() { 1204 // we might be tempted to assert that: 1205 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1206 // "inconsistent argument?"); 1207 // However that wouldn't be right, because it's possible that 1208 // a safepoint is indeed in progress as a younger generation 1209 // stop-the-world GC happens even as we mark in this generation. 1210 1211 _restart_for_overflow = false; 1212 force_overflow_conc()->init(); 1213 1214 // _g1h has _n_par_threads 1215 _parallel_marking_threads = calc_parallel_marking_threads(); 1216 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1217 "Maximum number of marking threads exceeded"); 1218 1219 uint active_workers = MAX2(1U, parallel_marking_threads()); 1220 1221 // Parallel task terminator is set in "set_concurrency_and_phase()" 1222 set_concurrency_and_phase(active_workers, true /* concurrent */); 1223 1224 CMConcurrentMarkingTask markingTask(this, cmThread()); 1225 _parallel_workers->set_active_workers(active_workers); 1226 // Don't set _n_par_threads because it affects MT in process_roots() 1227 // and the decisions on that MT processing is made elsewhere. 1228 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1229 _parallel_workers->run_task(&markingTask); 1230 print_stats(); 1231 } 1232 1233 // Helper class to get rid of some boilerplate code. 1234 class G1CMTraceTime : public GCTraceTime { 1235 static bool doit_and_prepend(bool doit) { 1236 if (doit) { 1237 gclog_or_tty->put(' '); 1238 } 1239 return doit; 1240 } 1241 1242 public: 1243 G1CMTraceTime(const char* title, bool doit) 1244 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), 1245 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { 1246 } 1247 }; 1248 1249 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1250 // world is stopped at this checkpoint 1251 assert(SafepointSynchronize::is_at_safepoint(), 1252 "world should be stopped"); 1253 1254 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1255 1256 // If a full collection has happened, we shouldn't do this. 1257 if (has_aborted()) { 1258 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1259 return; 1260 } 1261 1262 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1263 1264 if (VerifyDuringGC) { 1265 HandleMark hm; // handle scope 1266 g1h->prepare_for_verify(); 1267 Universe::verify(VerifyOption_G1UsePrevMarking, 1268 " VerifyDuringGC:(before)"); 1269 } 1270 g1h->check_bitmaps("Remark Start"); 1271 1272 G1CollectorPolicy* g1p = g1h->g1_policy(); 1273 g1p->record_concurrent_mark_remark_start(); 1274 1275 double start = os::elapsedTime(); 1276 1277 checkpointRootsFinalWork(); 1278 1279 double mark_work_end = os::elapsedTime(); 1280 1281 weakRefsWork(clear_all_soft_refs); 1282 1283 if (has_overflown()) { 1284 // Oops. We overflowed. Restart concurrent marking. 1285 _restart_for_overflow = true; 1286 if (G1TraceMarkStackOverflow) { 1287 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1288 } 1289 1290 // Verify the heap w.r.t. the previous marking bitmap. 1291 if (VerifyDuringGC) { 1292 HandleMark hm; // handle scope 1293 g1h->prepare_for_verify(); 1294 Universe::verify(VerifyOption_G1UsePrevMarking, 1295 " VerifyDuringGC:(overflow)"); 1296 } 1297 1298 // Clear the marking state because we will be restarting 1299 // marking due to overflowing the global mark stack. 1300 reset_marking_state(); 1301 } else { 1302 { 1303 G1CMTraceTime trace("GC aggregate-data", G1Log::finer()); 1304 1305 // Aggregate the per-task counting data that we have accumulated 1306 // while marking. 1307 aggregate_count_data(); 1308 } 1309 1310 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1311 // We're done with marking. 1312 // This is the end of the marking cycle, we're expected all 1313 // threads to have SATB queues with active set to true. 1314 satb_mq_set.set_active_all_threads(false, /* new active value */ 1315 true /* expected_active */); 1316 1317 if (VerifyDuringGC) { 1318 HandleMark hm; // handle scope 1319 g1h->prepare_for_verify(); 1320 Universe::verify(VerifyOption_G1UseNextMarking, 1321 " VerifyDuringGC:(after)"); 1322 } 1323 g1h->check_bitmaps("Remark End"); 1324 assert(!restart_for_overflow(), "sanity"); 1325 // Completely reset the marking state since marking completed 1326 set_non_marking_state(); 1327 } 1328 1329 // Expand the marking stack, if we have to and if we can. 1330 if (_markStack.should_expand()) { 1331 _markStack.expand(); 1332 } 1333 1334 // Statistics 1335 double now = os::elapsedTime(); 1336 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1337 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1338 _remark_times.add((now - start) * 1000.0); 1339 1340 g1p->record_concurrent_mark_remark_end(); 1341 1342 G1CMIsAliveClosure is_alive(g1h); 1343 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1344 } 1345 1346 // Base class of the closures that finalize and verify the 1347 // liveness counting data. 1348 class CMCountDataClosureBase: public HeapRegionClosure { 1349 protected: 1350 G1CollectedHeap* _g1h; 1351 ConcurrentMark* _cm; 1352 CardTableModRefBS* _ct_bs; 1353 1354 BitMap* _region_bm; 1355 BitMap* _card_bm; 1356 1357 // Takes a region that's not empty (i.e., it has at least one 1358 // live object in it and sets its corresponding bit on the region 1359 // bitmap to 1. If the region is "starts humongous" it will also set 1360 // to 1 the bits on the region bitmap that correspond to its 1361 // associated "continues humongous" regions. 1362 void set_bit_for_region(HeapRegion* hr) { 1363 assert(!hr->is_continues_humongous(), "should have filtered those out"); 1364 1365 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1366 if (!hr->is_starts_humongous()) { 1367 // Normal (non-humongous) case: just set the bit. 1368 _region_bm->par_at_put(index, true); 1369 } else { 1370 // Starts humongous case: calculate how many regions are part of 1371 // this humongous region and then set the bit range. 1372 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1373 _region_bm->par_at_put_range(index, end_index, true); 1374 } 1375 } 1376 1377 public: 1378 CMCountDataClosureBase(G1CollectedHeap* g1h, 1379 BitMap* region_bm, BitMap* card_bm): 1380 _g1h(g1h), _cm(g1h->concurrent_mark()), 1381 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 1382 _region_bm(region_bm), _card_bm(card_bm) { } 1383 }; 1384 1385 // Closure that calculates the # live objects per region. Used 1386 // for verification purposes during the cleanup pause. 1387 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1388 CMBitMapRO* _bm; 1389 size_t _region_marked_bytes; 1390 1391 public: 1392 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1393 BitMap* region_bm, BitMap* card_bm) : 1394 CMCountDataClosureBase(g1h, region_bm, card_bm), 1395 _bm(bm), _region_marked_bytes(0) { } 1396 1397 bool doHeapRegion(HeapRegion* hr) { 1398 1399 if (hr->is_continues_humongous()) { 1400 // We will ignore these here and process them when their 1401 // associated "starts humongous" region is processed (see 1402 // set_bit_for_heap_region()). Note that we cannot rely on their 1403 // associated "starts humongous" region to have their bit set to 1404 // 1 since, due to the region chunking in the parallel region 1405 // iteration, a "continues humongous" region might be visited 1406 // before its associated "starts humongous". 1407 return false; 1408 } 1409 1410 HeapWord* ntams = hr->next_top_at_mark_start(); 1411 HeapWord* start = hr->bottom(); 1412 1413 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1414 err_msg("Preconditions not met - " 1415 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1416 p2i(start), p2i(ntams), p2i(hr->end()))); 1417 1418 // Find the first marked object at or after "start". 1419 start = _bm->getNextMarkedWordAddress(start, ntams); 1420 1421 size_t marked_bytes = 0; 1422 1423 while (start < ntams) { 1424 oop obj = oop(start); 1425 int obj_sz = obj->size(); 1426 HeapWord* obj_end = start + obj_sz; 1427 1428 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1429 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1430 1431 // Note: if we're looking at the last region in heap - obj_end 1432 // could be actually just beyond the end of the heap; end_idx 1433 // will then correspond to a (non-existent) card that is also 1434 // just beyond the heap. 1435 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1436 // end of object is not card aligned - increment to cover 1437 // all the cards spanned by the object 1438 end_idx += 1; 1439 } 1440 1441 // Set the bits in the card BM for the cards spanned by this object. 1442 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1443 1444 // Add the size of this object to the number of marked bytes. 1445 marked_bytes += (size_t)obj_sz * HeapWordSize; 1446 1447 // Find the next marked object after this one. 1448 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1449 } 1450 1451 // Mark the allocated-since-marking portion... 1452 HeapWord* top = hr->top(); 1453 if (ntams < top) { 1454 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1455 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1456 1457 // Note: if we're looking at the last region in heap - top 1458 // could be actually just beyond the end of the heap; end_idx 1459 // will then correspond to a (non-existent) card that is also 1460 // just beyond the heap. 1461 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1462 // end of object is not card aligned - increment to cover 1463 // all the cards spanned by the object 1464 end_idx += 1; 1465 } 1466 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1467 1468 // This definitely means the region has live objects. 1469 set_bit_for_region(hr); 1470 } 1471 1472 // Update the live region bitmap. 1473 if (marked_bytes > 0) { 1474 set_bit_for_region(hr); 1475 } 1476 1477 // Set the marked bytes for the current region so that 1478 // it can be queried by a calling verification routine 1479 _region_marked_bytes = marked_bytes; 1480 1481 return false; 1482 } 1483 1484 size_t region_marked_bytes() const { return _region_marked_bytes; } 1485 }; 1486 1487 // Heap region closure used for verifying the counting data 1488 // that was accumulated concurrently and aggregated during 1489 // the remark pause. This closure is applied to the heap 1490 // regions during the STW cleanup pause. 1491 1492 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1493 G1CollectedHeap* _g1h; 1494 ConcurrentMark* _cm; 1495 CalcLiveObjectsClosure _calc_cl; 1496 BitMap* _region_bm; // Region BM to be verified 1497 BitMap* _card_bm; // Card BM to be verified 1498 bool _verbose; // verbose output? 1499 1500 BitMap* _exp_region_bm; // Expected Region BM values 1501 BitMap* _exp_card_bm; // Expected card BM values 1502 1503 int _failures; 1504 1505 public: 1506 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1507 BitMap* region_bm, 1508 BitMap* card_bm, 1509 BitMap* exp_region_bm, 1510 BitMap* exp_card_bm, 1511 bool verbose) : 1512 _g1h(g1h), _cm(g1h->concurrent_mark()), 1513 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1514 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1515 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1516 _failures(0) { } 1517 1518 int failures() const { return _failures; } 1519 1520 bool doHeapRegion(HeapRegion* hr) { 1521 if (hr->is_continues_humongous()) { 1522 // We will ignore these here and process them when their 1523 // associated "starts humongous" region is processed (see 1524 // set_bit_for_heap_region()). Note that we cannot rely on their 1525 // associated "starts humongous" region to have their bit set to 1526 // 1 since, due to the region chunking in the parallel region 1527 // iteration, a "continues humongous" region might be visited 1528 // before its associated "starts humongous". 1529 return false; 1530 } 1531 1532 int failures = 0; 1533 1534 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1535 // this region and set the corresponding bits in the expected region 1536 // and card bitmaps. 1537 bool res = _calc_cl.doHeapRegion(hr); 1538 assert(res == false, "should be continuing"); 1539 1540 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1541 Mutex::_no_safepoint_check_flag); 1542 1543 // Verify the marked bytes for this region. 1544 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1545 size_t act_marked_bytes = hr->next_marked_bytes(); 1546 1547 // We're not OK if expected marked bytes > actual marked bytes. It means 1548 // we have missed accounting some objects during the actual marking. 1549 if (exp_marked_bytes > act_marked_bytes) { 1550 if (_verbose) { 1551 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1552 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1553 hr->hrm_index(), exp_marked_bytes, act_marked_bytes); 1554 } 1555 failures += 1; 1556 } 1557 1558 // Verify the bit, for this region, in the actual and expected 1559 // (which was just calculated) region bit maps. 1560 // We're not OK if the bit in the calculated expected region 1561 // bitmap is set and the bit in the actual region bitmap is not. 1562 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1563 1564 bool expected = _exp_region_bm->at(index); 1565 bool actual = _region_bm->at(index); 1566 if (expected && !actual) { 1567 if (_verbose) { 1568 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1569 "expected: %s, actual: %s", 1570 hr->hrm_index(), 1571 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1572 } 1573 failures += 1; 1574 } 1575 1576 // Verify that the card bit maps for the cards spanned by the current 1577 // region match. We have an error if we have a set bit in the expected 1578 // bit map and the corresponding bit in the actual bitmap is not set. 1579 1580 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1581 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1582 1583 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1584 expected = _exp_card_bm->at(i); 1585 actual = _card_bm->at(i); 1586 1587 if (expected && !actual) { 1588 if (_verbose) { 1589 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1590 "expected: %s, actual: %s", 1591 hr->hrm_index(), i, 1592 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1593 } 1594 failures += 1; 1595 } 1596 } 1597 1598 if (failures > 0 && _verbose) { 1599 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1600 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1601 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1602 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1603 } 1604 1605 _failures += failures; 1606 1607 // We could stop iteration over the heap when we 1608 // find the first violating region by returning true. 1609 return false; 1610 } 1611 }; 1612 1613 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1614 protected: 1615 G1CollectedHeap* _g1h; 1616 ConcurrentMark* _cm; 1617 BitMap* _actual_region_bm; 1618 BitMap* _actual_card_bm; 1619 1620 uint _n_workers; 1621 1622 BitMap* _expected_region_bm; 1623 BitMap* _expected_card_bm; 1624 1625 int _failures; 1626 bool _verbose; 1627 1628 HeapRegionClaimer _hrclaimer; 1629 1630 public: 1631 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1632 BitMap* region_bm, BitMap* card_bm, 1633 BitMap* expected_region_bm, BitMap* expected_card_bm) 1634 : AbstractGangTask("G1 verify final counting"), 1635 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1636 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1637 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1638 _failures(0), _verbose(false), 1639 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1640 assert(VerifyDuringGC, "don't call this otherwise"); 1641 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1642 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1643 1644 _verbose = _cm->verbose_medium(); 1645 } 1646 1647 void work(uint worker_id) { 1648 assert(worker_id < _n_workers, "invariant"); 1649 1650 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1651 _actual_region_bm, _actual_card_bm, 1652 _expected_region_bm, 1653 _expected_card_bm, 1654 _verbose); 1655 1656 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1657 1658 Atomic::add(verify_cl.failures(), &_failures); 1659 } 1660 1661 int failures() const { return _failures; } 1662 }; 1663 1664 // Closure that finalizes the liveness counting data. 1665 // Used during the cleanup pause. 1666 // Sets the bits corresponding to the interval [NTAMS, top] 1667 // (which contains the implicitly live objects) in the 1668 // card liveness bitmap. Also sets the bit for each region, 1669 // containing live data, in the region liveness bitmap. 1670 1671 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1672 public: 1673 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1674 BitMap* region_bm, 1675 BitMap* card_bm) : 1676 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1677 1678 bool doHeapRegion(HeapRegion* hr) { 1679 1680 if (hr->is_continues_humongous()) { 1681 // We will ignore these here and process them when their 1682 // associated "starts humongous" region is processed (see 1683 // set_bit_for_heap_region()). Note that we cannot rely on their 1684 // associated "starts humongous" region to have their bit set to 1685 // 1 since, due to the region chunking in the parallel region 1686 // iteration, a "continues humongous" region might be visited 1687 // before its associated "starts humongous". 1688 return false; 1689 } 1690 1691 HeapWord* ntams = hr->next_top_at_mark_start(); 1692 HeapWord* top = hr->top(); 1693 1694 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1695 1696 // Mark the allocated-since-marking portion... 1697 if (ntams < top) { 1698 // This definitely means the region has live objects. 1699 set_bit_for_region(hr); 1700 1701 // Now set the bits in the card bitmap for [ntams, top) 1702 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1703 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1704 1705 // Note: if we're looking at the last region in heap - top 1706 // could be actually just beyond the end of the heap; end_idx 1707 // will then correspond to a (non-existent) card that is also 1708 // just beyond the heap. 1709 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1710 // end of object is not card aligned - increment to cover 1711 // all the cards spanned by the object 1712 end_idx += 1; 1713 } 1714 1715 assert(end_idx <= _card_bm->size(), 1716 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1717 end_idx, _card_bm->size())); 1718 assert(start_idx < _card_bm->size(), 1719 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1720 start_idx, _card_bm->size())); 1721 1722 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1723 } 1724 1725 // Set the bit for the region if it contains live data 1726 if (hr->next_marked_bytes() > 0) { 1727 set_bit_for_region(hr); 1728 } 1729 1730 return false; 1731 } 1732 }; 1733 1734 class G1ParFinalCountTask: public AbstractGangTask { 1735 protected: 1736 G1CollectedHeap* _g1h; 1737 ConcurrentMark* _cm; 1738 BitMap* _actual_region_bm; 1739 BitMap* _actual_card_bm; 1740 1741 uint _n_workers; 1742 HeapRegionClaimer _hrclaimer; 1743 1744 public: 1745 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1746 : AbstractGangTask("G1 final counting"), 1747 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1748 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1749 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1750 } 1751 1752 void work(uint worker_id) { 1753 assert(worker_id < _n_workers, "invariant"); 1754 1755 FinalCountDataUpdateClosure final_update_cl(_g1h, 1756 _actual_region_bm, 1757 _actual_card_bm); 1758 1759 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1760 } 1761 }; 1762 1763 class G1ParNoteEndTask; 1764 1765 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1766 G1CollectedHeap* _g1; 1767 size_t _max_live_bytes; 1768 uint _regions_claimed; 1769 size_t _freed_bytes; 1770 FreeRegionList* _local_cleanup_list; 1771 HeapRegionSetCount _old_regions_removed; 1772 HeapRegionSetCount _humongous_regions_removed; 1773 HRRSCleanupTask* _hrrs_cleanup_task; 1774 double _claimed_region_time; 1775 double _max_region_time; 1776 1777 public: 1778 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1779 FreeRegionList* local_cleanup_list, 1780 HRRSCleanupTask* hrrs_cleanup_task) : 1781 _g1(g1), 1782 _max_live_bytes(0), _regions_claimed(0), 1783 _freed_bytes(0), 1784 _claimed_region_time(0.0), _max_region_time(0.0), 1785 _local_cleanup_list(local_cleanup_list), 1786 _old_regions_removed(), 1787 _humongous_regions_removed(), 1788 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1789 1790 size_t freed_bytes() { return _freed_bytes; } 1791 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1792 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1793 1794 bool doHeapRegion(HeapRegion *hr) { 1795 if (hr->is_continues_humongous()) { 1796 return false; 1797 } 1798 // We use a claim value of zero here because all regions 1799 // were claimed with value 1 in the FinalCount task. 1800 _g1->reset_gc_time_stamps(hr); 1801 double start = os::elapsedTime(); 1802 _regions_claimed++; 1803 hr->note_end_of_marking(); 1804 _max_live_bytes += hr->max_live_bytes(); 1805 1806 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1807 _freed_bytes += hr->used(); 1808 hr->set_containing_set(NULL); 1809 if (hr->is_humongous()) { 1810 assert(hr->is_starts_humongous(), "we should only see starts humongous"); 1811 _humongous_regions_removed.increment(1u, hr->capacity()); 1812 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1813 } else { 1814 _old_regions_removed.increment(1u, hr->capacity()); 1815 _g1->free_region(hr, _local_cleanup_list, true); 1816 } 1817 } else { 1818 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1819 } 1820 1821 double region_time = (os::elapsedTime() - start); 1822 _claimed_region_time += region_time; 1823 if (region_time > _max_region_time) { 1824 _max_region_time = region_time; 1825 } 1826 return false; 1827 } 1828 1829 size_t max_live_bytes() { return _max_live_bytes; } 1830 uint regions_claimed() { return _regions_claimed; } 1831 double claimed_region_time_sec() { return _claimed_region_time; } 1832 double max_region_time_sec() { return _max_region_time; } 1833 }; 1834 1835 class G1ParNoteEndTask: public AbstractGangTask { 1836 friend class G1NoteEndOfConcMarkClosure; 1837 1838 protected: 1839 G1CollectedHeap* _g1h; 1840 size_t _max_live_bytes; 1841 size_t _freed_bytes; 1842 FreeRegionList* _cleanup_list; 1843 HeapRegionClaimer _hrclaimer; 1844 1845 public: 1846 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1847 AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1848 } 1849 1850 void work(uint worker_id) { 1851 FreeRegionList local_cleanup_list("Local Cleanup List"); 1852 HRRSCleanupTask hrrs_cleanup_task; 1853 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1854 &hrrs_cleanup_task); 1855 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1856 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1857 1858 // Now update the lists 1859 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1860 { 1861 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1862 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1863 _max_live_bytes += g1_note_end.max_live_bytes(); 1864 _freed_bytes += g1_note_end.freed_bytes(); 1865 1866 // If we iterate over the global cleanup list at the end of 1867 // cleanup to do this printing we will not guarantee to only 1868 // generate output for the newly-reclaimed regions (the list 1869 // might not be empty at the beginning of cleanup; we might 1870 // still be working on its previous contents). So we do the 1871 // printing here, before we append the new regions to the global 1872 // cleanup list. 1873 1874 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1875 if (hr_printer->is_active()) { 1876 FreeRegionListIterator iter(&local_cleanup_list); 1877 while (iter.more_available()) { 1878 HeapRegion* hr = iter.get_next(); 1879 hr_printer->cleanup(hr); 1880 } 1881 } 1882 1883 _cleanup_list->add_ordered(&local_cleanup_list); 1884 assert(local_cleanup_list.is_empty(), "post-condition"); 1885 1886 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1887 } 1888 } 1889 size_t max_live_bytes() { return _max_live_bytes; } 1890 size_t freed_bytes() { return _freed_bytes; } 1891 }; 1892 1893 class G1ParScrubRemSetTask: public AbstractGangTask { 1894 protected: 1895 G1RemSet* _g1rs; 1896 BitMap* _region_bm; 1897 BitMap* _card_bm; 1898 HeapRegionClaimer _hrclaimer; 1899 1900 public: 1901 G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) : 1902 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) { 1903 } 1904 1905 void work(uint worker_id) { 1906 _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer); 1907 } 1908 1909 }; 1910 1911 void ConcurrentMark::cleanup() { 1912 // world is stopped at this checkpoint 1913 assert(SafepointSynchronize::is_at_safepoint(), 1914 "world should be stopped"); 1915 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1916 1917 // If a full collection has happened, we shouldn't do this. 1918 if (has_aborted()) { 1919 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1920 return; 1921 } 1922 1923 g1h->verify_region_sets_optional(); 1924 1925 if (VerifyDuringGC) { 1926 HandleMark hm; // handle scope 1927 g1h->prepare_for_verify(); 1928 Universe::verify(VerifyOption_G1UsePrevMarking, 1929 " VerifyDuringGC:(before)"); 1930 } 1931 g1h->check_bitmaps("Cleanup Start"); 1932 1933 G1CollectorPolicy* g1p = g1h->g1_policy(); 1934 g1p->record_concurrent_mark_cleanup_start(); 1935 1936 double start = os::elapsedTime(); 1937 1938 HeapRegionRemSet::reset_for_cleanup_tasks(); 1939 1940 // Do counting once more with the world stopped for good measure. 1941 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 1942 1943 uint n_workers = _g1h->workers()->active_workers(); 1944 1945 g1h->workers()->run_task(&g1_par_count_task); 1946 1947 if (VerifyDuringGC) { 1948 // Verify that the counting data accumulated during marking matches 1949 // that calculated by walking the marking bitmap. 1950 1951 // Bitmaps to hold expected values 1952 BitMap expected_region_bm(_region_bm.size(), true); 1953 BitMap expected_card_bm(_card_bm.size(), true); 1954 1955 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 1956 &_region_bm, 1957 &_card_bm, 1958 &expected_region_bm, 1959 &expected_card_bm); 1960 1961 g1h->workers()->run_task(&g1_par_verify_task); 1962 1963 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 1964 } 1965 1966 size_t start_used_bytes = g1h->used(); 1967 g1h->set_marking_complete(); 1968 1969 double count_end = os::elapsedTime(); 1970 double this_final_counting_time = (count_end - start); 1971 _total_counting_time += this_final_counting_time; 1972 1973 if (G1PrintRegionLivenessInfo) { 1974 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 1975 _g1h->heap_region_iterate(&cl); 1976 } 1977 1978 // Install newly created mark bitMap as "prev". 1979 swapMarkBitMaps(); 1980 1981 g1h->reset_gc_time_stamp(); 1982 1983 // Note end of marking in all heap regions. 1984 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1985 g1h->workers()->run_task(&g1_par_note_end_task); 1986 g1h->check_gc_time_stamps(); 1987 1988 if (!cleanup_list_is_empty()) { 1989 // The cleanup list is not empty, so we'll have to process it 1990 // concurrently. Notify anyone else that might be wanting free 1991 // regions that there will be more free regions coming soon. 1992 g1h->set_free_regions_coming(); 1993 } 1994 1995 // call below, since it affects the metric by which we sort the heap 1996 // regions. 1997 if (G1ScrubRemSets) { 1998 double rs_scrub_start = os::elapsedTime(); 1999 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers); 2000 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2001 2002 double rs_scrub_end = os::elapsedTime(); 2003 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2004 _total_rs_scrub_time += this_rs_scrub_time; 2005 } 2006 2007 // this will also free any regions totally full of garbage objects, 2008 // and sort the regions. 2009 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2010 2011 // Statistics. 2012 double end = os::elapsedTime(); 2013 _cleanup_times.add((end - start) * 1000.0); 2014 2015 if (G1Log::fine()) { 2016 g1h->g1_policy()->print_heap_transition(start_used_bytes); 2017 } 2018 2019 // Clean up will have freed any regions completely full of garbage. 2020 // Update the soft reference policy with the new heap occupancy. 2021 Universe::update_heap_info_at_gc(); 2022 2023 if (VerifyDuringGC) { 2024 HandleMark hm; // handle scope 2025 g1h->prepare_for_verify(); 2026 Universe::verify(VerifyOption_G1UsePrevMarking, 2027 " VerifyDuringGC:(after)"); 2028 } 2029 2030 g1h->check_bitmaps("Cleanup End"); 2031 2032 g1h->verify_region_sets_optional(); 2033 2034 // We need to make this be a "collection" so any collection pause that 2035 // races with it goes around and waits for completeCleanup to finish. 2036 g1h->increment_total_collections(); 2037 2038 // Clean out dead classes and update Metaspace sizes. 2039 if (ClassUnloadingWithConcurrentMark) { 2040 ClassLoaderDataGraph::purge(); 2041 } 2042 MetaspaceGC::compute_new_size(); 2043 2044 // We reclaimed old regions so we should calculate the sizes to make 2045 // sure we update the old gen/space data. 2046 g1h->g1mm()->update_sizes(); 2047 g1h->allocation_context_stats().update_after_mark(); 2048 2049 g1h->trace_heap_after_concurrent_cycle(); 2050 } 2051 2052 void ConcurrentMark::completeCleanup() { 2053 if (has_aborted()) return; 2054 2055 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2056 2057 _cleanup_list.verify_optional(); 2058 FreeRegionList tmp_free_list("Tmp Free List"); 2059 2060 if (G1ConcRegionFreeingVerbose) { 2061 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2062 "cleanup list has %u entries", 2063 _cleanup_list.length()); 2064 } 2065 2066 // No one else should be accessing the _cleanup_list at this point, 2067 // so it is not necessary to take any locks 2068 while (!_cleanup_list.is_empty()) { 2069 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 2070 assert(hr != NULL, "Got NULL from a non-empty list"); 2071 hr->par_clear(); 2072 tmp_free_list.add_ordered(hr); 2073 2074 // Instead of adding one region at a time to the secondary_free_list, 2075 // we accumulate them in the local list and move them a few at a 2076 // time. This also cuts down on the number of notify_all() calls 2077 // we do during this process. We'll also append the local list when 2078 // _cleanup_list is empty (which means we just removed the last 2079 // region from the _cleanup_list). 2080 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2081 _cleanup_list.is_empty()) { 2082 if (G1ConcRegionFreeingVerbose) { 2083 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2084 "appending %u entries to the secondary_free_list, " 2085 "cleanup list still has %u entries", 2086 tmp_free_list.length(), 2087 _cleanup_list.length()); 2088 } 2089 2090 { 2091 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2092 g1h->secondary_free_list_add(&tmp_free_list); 2093 SecondaryFreeList_lock->notify_all(); 2094 } 2095 #ifndef PRODUCT 2096 if (G1StressConcRegionFreeing) { 2097 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2098 os::sleep(Thread::current(), (jlong) 1, false); 2099 } 2100 } 2101 #endif 2102 } 2103 } 2104 assert(tmp_free_list.is_empty(), "post-condition"); 2105 } 2106 2107 // Supporting Object and Oop closures for reference discovery 2108 // and processing in during marking 2109 2110 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2111 HeapWord* addr = (HeapWord*)obj; 2112 return addr != NULL && 2113 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2114 } 2115 2116 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2117 // Uses the CMTask associated with a worker thread (for serial reference 2118 // processing the CMTask for worker 0 is used) to preserve (mark) and 2119 // trace referent objects. 2120 // 2121 // Using the CMTask and embedded local queues avoids having the worker 2122 // threads operating on the global mark stack. This reduces the risk 2123 // of overflowing the stack - which we would rather avoid at this late 2124 // state. Also using the tasks' local queues removes the potential 2125 // of the workers interfering with each other that could occur if 2126 // operating on the global stack. 2127 2128 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2129 ConcurrentMark* _cm; 2130 CMTask* _task; 2131 int _ref_counter_limit; 2132 int _ref_counter; 2133 bool _is_serial; 2134 public: 2135 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2136 _cm(cm), _task(task), _is_serial(is_serial), 2137 _ref_counter_limit(G1RefProcDrainInterval) { 2138 assert(_ref_counter_limit > 0, "sanity"); 2139 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2140 _ref_counter = _ref_counter_limit; 2141 } 2142 2143 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2144 virtual void do_oop( oop* p) { do_oop_work(p); } 2145 2146 template <class T> void do_oop_work(T* p) { 2147 if (!_cm->has_overflown()) { 2148 oop obj = oopDesc::load_decode_heap_oop(p); 2149 if (_cm->verbose_high()) { 2150 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2151 "*"PTR_FORMAT" = "PTR_FORMAT, 2152 _task->worker_id(), p2i(p), p2i((void*) obj)); 2153 } 2154 2155 _task->deal_with_reference(obj); 2156 _ref_counter--; 2157 2158 if (_ref_counter == 0) { 2159 // We have dealt with _ref_counter_limit references, pushing them 2160 // and objects reachable from them on to the local stack (and 2161 // possibly the global stack). Call CMTask::do_marking_step() to 2162 // process these entries. 2163 // 2164 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2165 // there's nothing more to do (i.e. we're done with the entries that 2166 // were pushed as a result of the CMTask::deal_with_reference() calls 2167 // above) or we overflow. 2168 // 2169 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2170 // flag while there may still be some work to do. (See the comment at 2171 // the beginning of CMTask::do_marking_step() for those conditions - 2172 // one of which is reaching the specified time target.) It is only 2173 // when CMTask::do_marking_step() returns without setting the 2174 // has_aborted() flag that the marking step has completed. 2175 do { 2176 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2177 _task->do_marking_step(mark_step_duration_ms, 2178 false /* do_termination */, 2179 _is_serial); 2180 } while (_task->has_aborted() && !_cm->has_overflown()); 2181 _ref_counter = _ref_counter_limit; 2182 } 2183 } else { 2184 if (_cm->verbose_high()) { 2185 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2186 } 2187 } 2188 } 2189 }; 2190 2191 // 'Drain' oop closure used by both serial and parallel reference processing. 2192 // Uses the CMTask associated with a given worker thread (for serial 2193 // reference processing the CMtask for worker 0 is used). Calls the 2194 // do_marking_step routine, with an unbelievably large timeout value, 2195 // to drain the marking data structures of the remaining entries 2196 // added by the 'keep alive' oop closure above. 2197 2198 class G1CMDrainMarkingStackClosure: public VoidClosure { 2199 ConcurrentMark* _cm; 2200 CMTask* _task; 2201 bool _is_serial; 2202 public: 2203 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2204 _cm(cm), _task(task), _is_serial(is_serial) { 2205 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2206 } 2207 2208 void do_void() { 2209 do { 2210 if (_cm->verbose_high()) { 2211 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2212 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2213 } 2214 2215 // We call CMTask::do_marking_step() to completely drain the local 2216 // and global marking stacks of entries pushed by the 'keep alive' 2217 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2218 // 2219 // CMTask::do_marking_step() is called in a loop, which we'll exit 2220 // if there's nothing more to do (i.e. we've completely drained the 2221 // entries that were pushed as a a result of applying the 'keep alive' 2222 // closure to the entries on the discovered ref lists) or we overflow 2223 // the global marking stack. 2224 // 2225 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2226 // flag while there may still be some work to do. (See the comment at 2227 // the beginning of CMTask::do_marking_step() for those conditions - 2228 // one of which is reaching the specified time target.) It is only 2229 // when CMTask::do_marking_step() returns without setting the 2230 // has_aborted() flag that the marking step has completed. 2231 2232 _task->do_marking_step(1000000000.0 /* something very large */, 2233 true /* do_termination */, 2234 _is_serial); 2235 } while (_task->has_aborted() && !_cm->has_overflown()); 2236 } 2237 }; 2238 2239 // Implementation of AbstractRefProcTaskExecutor for parallel 2240 // reference processing at the end of G1 concurrent marking 2241 2242 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2243 private: 2244 G1CollectedHeap* _g1h; 2245 ConcurrentMark* _cm; 2246 WorkGang* _workers; 2247 uint _active_workers; 2248 2249 public: 2250 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2251 ConcurrentMark* cm, 2252 WorkGang* workers, 2253 uint n_workers) : 2254 _g1h(g1h), _cm(cm), 2255 _workers(workers), _active_workers(n_workers) { } 2256 2257 // Executes the given task using concurrent marking worker threads. 2258 virtual void execute(ProcessTask& task); 2259 virtual void execute(EnqueueTask& task); 2260 }; 2261 2262 class G1CMRefProcTaskProxy: public AbstractGangTask { 2263 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2264 ProcessTask& _proc_task; 2265 G1CollectedHeap* _g1h; 2266 ConcurrentMark* _cm; 2267 2268 public: 2269 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2270 G1CollectedHeap* g1h, 2271 ConcurrentMark* cm) : 2272 AbstractGangTask("Process reference objects in parallel"), 2273 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2274 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2275 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2276 } 2277 2278 virtual void work(uint worker_id) { 2279 ResourceMark rm; 2280 HandleMark hm; 2281 CMTask* task = _cm->task(worker_id); 2282 G1CMIsAliveClosure g1_is_alive(_g1h); 2283 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2284 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2285 2286 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2287 } 2288 }; 2289 2290 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2291 assert(_workers != NULL, "Need parallel worker threads."); 2292 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2293 2294 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2295 2296 // We need to reset the concurrency level before each 2297 // proxy task execution, so that the termination protocol 2298 // and overflow handling in CMTask::do_marking_step() knows 2299 // how many workers to wait for. 2300 _cm->set_concurrency(_active_workers); 2301 _workers->run_task(&proc_task_proxy); 2302 } 2303 2304 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2305 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2306 EnqueueTask& _enq_task; 2307 2308 public: 2309 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2310 AbstractGangTask("Enqueue reference objects in parallel"), 2311 _enq_task(enq_task) { } 2312 2313 virtual void work(uint worker_id) { 2314 _enq_task.work(worker_id); 2315 } 2316 }; 2317 2318 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2319 assert(_workers != NULL, "Need parallel worker threads."); 2320 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2321 2322 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2323 2324 // Not strictly necessary but... 2325 // 2326 // We need to reset the concurrency level before each 2327 // proxy task execution, so that the termination protocol 2328 // and overflow handling in CMTask::do_marking_step() knows 2329 // how many workers to wait for. 2330 _cm->set_concurrency(_active_workers); 2331 _workers->run_task(&enq_task_proxy); 2332 } 2333 2334 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2335 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2336 } 2337 2338 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2339 if (has_overflown()) { 2340 // Skip processing the discovered references if we have 2341 // overflown the global marking stack. Reference objects 2342 // only get discovered once so it is OK to not 2343 // de-populate the discovered reference lists. We could have, 2344 // but the only benefit would be that, when marking restarts, 2345 // less reference objects are discovered. 2346 return; 2347 } 2348 2349 ResourceMark rm; 2350 HandleMark hm; 2351 2352 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2353 2354 // Is alive closure. 2355 G1CMIsAliveClosure g1_is_alive(g1h); 2356 2357 // Inner scope to exclude the cleaning of the string and symbol 2358 // tables from the displayed time. 2359 { 2360 G1CMTraceTime t("GC ref-proc", G1Log::finer()); 2361 2362 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2363 2364 // See the comment in G1CollectedHeap::ref_processing_init() 2365 // about how reference processing currently works in G1. 2366 2367 // Set the soft reference policy 2368 rp->setup_policy(clear_all_soft_refs); 2369 assert(_markStack.isEmpty(), "mark stack should be empty"); 2370 2371 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2372 // in serial reference processing. Note these closures are also 2373 // used for serially processing (by the the current thread) the 2374 // JNI references during parallel reference processing. 2375 // 2376 // These closures do not need to synchronize with the worker 2377 // threads involved in parallel reference processing as these 2378 // instances are executed serially by the current thread (e.g. 2379 // reference processing is not multi-threaded and is thus 2380 // performed by the current thread instead of a gang worker). 2381 // 2382 // The gang tasks involved in parallel reference processing create 2383 // their own instances of these closures, which do their own 2384 // synchronization among themselves. 2385 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2386 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2387 2388 // We need at least one active thread. If reference processing 2389 // is not multi-threaded we use the current (VMThread) thread, 2390 // otherwise we use the work gang from the G1CollectedHeap and 2391 // we utilize all the worker threads we can. 2392 bool processing_is_mt = rp->processing_is_mt(); 2393 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2394 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2395 2396 // Parallel processing task executor. 2397 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2398 g1h->workers(), active_workers); 2399 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2400 2401 // Set the concurrency level. The phase was already set prior to 2402 // executing the remark task. 2403 set_concurrency(active_workers); 2404 2405 // Set the degree of MT processing here. If the discovery was done MT, 2406 // the number of threads involved during discovery could differ from 2407 // the number of active workers. This is OK as long as the discovered 2408 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2409 rp->set_active_mt_degree(active_workers); 2410 2411 // Process the weak references. 2412 const ReferenceProcessorStats& stats = 2413 rp->process_discovered_references(&g1_is_alive, 2414 &g1_keep_alive, 2415 &g1_drain_mark_stack, 2416 executor, 2417 g1h->gc_timer_cm(), 2418 concurrent_gc_id()); 2419 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2420 2421 // The do_oop work routines of the keep_alive and drain_marking_stack 2422 // oop closures will set the has_overflown flag if we overflow the 2423 // global marking stack. 2424 2425 assert(_markStack.overflow() || _markStack.isEmpty(), 2426 "mark stack should be empty (unless it overflowed)"); 2427 2428 if (_markStack.overflow()) { 2429 // This should have been done already when we tried to push an 2430 // entry on to the global mark stack. But let's do it again. 2431 set_has_overflown(); 2432 } 2433 2434 assert(rp->num_q() == active_workers, "why not"); 2435 2436 rp->enqueue_discovered_references(executor); 2437 2438 rp->verify_no_references_recorded(); 2439 assert(!rp->discovery_enabled(), "Post condition"); 2440 } 2441 2442 if (has_overflown()) { 2443 // We can not trust g1_is_alive if the marking stack overflowed 2444 return; 2445 } 2446 2447 assert(_markStack.isEmpty(), "Marking should have completed"); 2448 2449 // Unload Klasses, String, Symbols, Code Cache, etc. 2450 { 2451 G1CMTraceTime trace("Unloading", G1Log::finer()); 2452 2453 if (ClassUnloadingWithConcurrentMark) { 2454 bool purged_classes; 2455 2456 { 2457 G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest()); 2458 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2459 } 2460 2461 { 2462 G1CMTraceTime trace("Parallel Unloading", G1Log::finest()); 2463 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2464 } 2465 } 2466 2467 if (G1StringDedup::is_enabled()) { 2468 G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest()); 2469 G1StringDedup::unlink(&g1_is_alive); 2470 } 2471 } 2472 } 2473 2474 void ConcurrentMark::swapMarkBitMaps() { 2475 CMBitMapRO* temp = _prevMarkBitMap; 2476 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2477 _nextMarkBitMap = (CMBitMap*) temp; 2478 } 2479 2480 // Closure for marking entries in SATB buffers. 2481 class CMSATBBufferClosure : public SATBBufferClosure { 2482 private: 2483 CMTask* _task; 2484 G1CollectedHeap* _g1h; 2485 2486 // This is very similar to CMTask::deal_with_reference, but with 2487 // more relaxed requirements for the argument, so this must be more 2488 // circumspect about treating the argument as an object. 2489 void do_entry(void* entry) const { 2490 _task->increment_refs_reached(); 2491 HeapRegion* hr = _g1h->heap_region_containing_raw(entry); 2492 if (entry < hr->next_top_at_mark_start()) { 2493 // Until we get here, we don't know whether entry refers to a valid 2494 // object; it could instead have been a stale reference. 2495 oop obj = static_cast<oop>(entry); 2496 assert(obj->is_oop(true /* ignore mark word */), 2497 err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj))); 2498 _task->make_reference_grey(obj, hr); 2499 } 2500 } 2501 2502 public: 2503 CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h) 2504 : _task(task), _g1h(g1h) { } 2505 2506 virtual void do_buffer(void** buffer, size_t size) { 2507 for (size_t i = 0; i < size; ++i) { 2508 do_entry(buffer[i]); 2509 } 2510 } 2511 }; 2512 2513 class G1RemarkThreadsClosure : public ThreadClosure { 2514 CMSATBBufferClosure _cm_satb_cl; 2515 G1CMOopClosure _cm_cl; 2516 MarkingCodeBlobClosure _code_cl; 2517 int _thread_parity; 2518 2519 public: 2520 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) : 2521 _cm_satb_cl(task, g1h), 2522 _cm_cl(g1h, g1h->concurrent_mark(), task), 2523 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2524 _thread_parity(Threads::thread_claim_parity()) {} 2525 2526 void do_thread(Thread* thread) { 2527 if (thread->is_Java_thread()) { 2528 if (thread->claim_oops_do(true, _thread_parity)) { 2529 JavaThread* jt = (JavaThread*)thread; 2530 2531 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2532 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2533 // * Alive if on the stack of an executing method 2534 // * Weakly reachable otherwise 2535 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2536 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2537 jt->nmethods_do(&_code_cl); 2538 2539 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 2540 } 2541 } else if (thread->is_VM_thread()) { 2542 if (thread->claim_oops_do(true, _thread_parity)) { 2543 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 2544 } 2545 } 2546 } 2547 }; 2548 2549 class CMRemarkTask: public AbstractGangTask { 2550 private: 2551 ConcurrentMark* _cm; 2552 public: 2553 void work(uint worker_id) { 2554 // Since all available tasks are actually started, we should 2555 // only proceed if we're supposed to be active. 2556 if (worker_id < _cm->active_tasks()) { 2557 CMTask* task = _cm->task(worker_id); 2558 task->record_start_time(); 2559 { 2560 ResourceMark rm; 2561 HandleMark hm; 2562 2563 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2564 Threads::threads_do(&threads_f); 2565 } 2566 2567 do { 2568 task->do_marking_step(1000000000.0 /* something very large */, 2569 true /* do_termination */, 2570 false /* is_serial */); 2571 } while (task->has_aborted() && !_cm->has_overflown()); 2572 // If we overflow, then we do not want to restart. We instead 2573 // want to abort remark and do concurrent marking again. 2574 task->record_end_time(); 2575 } 2576 } 2577 2578 CMRemarkTask(ConcurrentMark* cm, uint active_workers) : 2579 AbstractGangTask("Par Remark"), _cm(cm) { 2580 _cm->terminator()->reset_for_reuse(active_workers); 2581 } 2582 }; 2583 2584 void ConcurrentMark::checkpointRootsFinalWork() { 2585 ResourceMark rm; 2586 HandleMark hm; 2587 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2588 2589 G1CMTraceTime trace("Finalize Marking", G1Log::finer()); 2590 2591 g1h->ensure_parsability(false); 2592 2593 // this is remark, so we'll use up all active threads 2594 uint active_workers = g1h->workers()->active_workers(); 2595 if (active_workers == 0) { 2596 assert(active_workers > 0, "Should have been set earlier"); 2597 active_workers = (uint) ParallelGCThreads; 2598 g1h->workers()->set_active_workers(active_workers); 2599 } 2600 set_concurrency_and_phase(active_workers, false /* concurrent */); 2601 // Leave _parallel_marking_threads at it's 2602 // value originally calculated in the ConcurrentMark 2603 // constructor and pass values of the active workers 2604 // through the gang in the task. 2605 2606 { 2607 StrongRootsScope srs(active_workers); 2608 2609 CMRemarkTask remarkTask(this, active_workers); 2610 // We will start all available threads, even if we decide that the 2611 // active_workers will be fewer. The extra ones will just bail out 2612 // immediately. 2613 g1h->workers()->run_task(&remarkTask); 2614 } 2615 2616 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2617 guarantee(has_overflown() || 2618 satb_mq_set.completed_buffers_num() == 0, 2619 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2620 BOOL_TO_STR(has_overflown()), 2621 satb_mq_set.completed_buffers_num())); 2622 2623 print_stats(); 2624 } 2625 2626 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2627 // Note we are overriding the read-only view of the prev map here, via 2628 // the cast. 2629 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2630 } 2631 2632 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2633 _nextMarkBitMap->clearRange(mr); 2634 } 2635 2636 HeapRegion* 2637 ConcurrentMark::claim_region(uint worker_id) { 2638 // "checkpoint" the finger 2639 HeapWord* finger = _finger; 2640 2641 // _heap_end will not change underneath our feet; it only changes at 2642 // yield points. 2643 while (finger < _heap_end) { 2644 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2645 2646 // Note on how this code handles humongous regions. In the 2647 // normal case the finger will reach the start of a "starts 2648 // humongous" (SH) region. Its end will either be the end of the 2649 // last "continues humongous" (CH) region in the sequence, or the 2650 // standard end of the SH region (if the SH is the only region in 2651 // the sequence). That way claim_region() will skip over the CH 2652 // regions. However, there is a subtle race between a CM thread 2653 // executing this method and a mutator thread doing a humongous 2654 // object allocation. The two are not mutually exclusive as the CM 2655 // thread does not need to hold the Heap_lock when it gets 2656 // here. So there is a chance that claim_region() will come across 2657 // a free region that's in the progress of becoming a SH or a CH 2658 // region. In the former case, it will either 2659 // a) Miss the update to the region's end, in which case it will 2660 // visit every subsequent CH region, will find their bitmaps 2661 // empty, and do nothing, or 2662 // b) Will observe the update of the region's end (in which case 2663 // it will skip the subsequent CH regions). 2664 // If it comes across a region that suddenly becomes CH, the 2665 // scenario will be similar to b). So, the race between 2666 // claim_region() and a humongous object allocation might force us 2667 // to do a bit of unnecessary work (due to some unnecessary bitmap 2668 // iterations) but it should not introduce and correctness issues. 2669 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2670 2671 // Above heap_region_containing_raw may return NULL as we always scan claim 2672 // until the end of the heap. In this case, just jump to the next region. 2673 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2674 2675 // Is the gap between reading the finger and doing the CAS too long? 2676 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2677 if (res == finger && curr_region != NULL) { 2678 // we succeeded 2679 HeapWord* bottom = curr_region->bottom(); 2680 HeapWord* limit = curr_region->next_top_at_mark_start(); 2681 2682 if (verbose_low()) { 2683 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2684 "["PTR_FORMAT", "PTR_FORMAT"), " 2685 "limit = "PTR_FORMAT, 2686 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2687 } 2688 2689 // notice that _finger == end cannot be guaranteed here since, 2690 // someone else might have moved the finger even further 2691 assert(_finger >= end, "the finger should have moved forward"); 2692 2693 if (verbose_low()) { 2694 gclog_or_tty->print_cr("[%u] we were successful with region = " 2695 PTR_FORMAT, worker_id, p2i(curr_region)); 2696 } 2697 2698 if (limit > bottom) { 2699 if (verbose_low()) { 2700 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 2701 "returning it ", worker_id, p2i(curr_region)); 2702 } 2703 return curr_region; 2704 } else { 2705 assert(limit == bottom, 2706 "the region limit should be at bottom"); 2707 if (verbose_low()) { 2708 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 2709 "returning NULL", worker_id, p2i(curr_region)); 2710 } 2711 // we return NULL and the caller should try calling 2712 // claim_region() again. 2713 return NULL; 2714 } 2715 } else { 2716 assert(_finger > finger, "the finger should have moved forward"); 2717 if (verbose_low()) { 2718 if (curr_region == NULL) { 2719 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " 2720 "global finger = "PTR_FORMAT", " 2721 "our finger = "PTR_FORMAT, 2722 worker_id, p2i(_finger), p2i(finger)); 2723 } else { 2724 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2725 "global finger = "PTR_FORMAT", " 2726 "our finger = "PTR_FORMAT, 2727 worker_id, p2i(_finger), p2i(finger)); 2728 } 2729 } 2730 2731 // read it again 2732 finger = _finger; 2733 } 2734 } 2735 2736 return NULL; 2737 } 2738 2739 #ifndef PRODUCT 2740 enum VerifyNoCSetOopsPhase { 2741 VerifyNoCSetOopsStack, 2742 VerifyNoCSetOopsQueues 2743 }; 2744 2745 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 2746 private: 2747 G1CollectedHeap* _g1h; 2748 VerifyNoCSetOopsPhase _phase; 2749 int _info; 2750 2751 const char* phase_str() { 2752 switch (_phase) { 2753 case VerifyNoCSetOopsStack: return "Stack"; 2754 case VerifyNoCSetOopsQueues: return "Queue"; 2755 default: ShouldNotReachHere(); 2756 } 2757 return NULL; 2758 } 2759 2760 void do_object_work(oop obj) { 2761 guarantee(!_g1h->obj_in_cs(obj), 2762 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 2763 p2i((void*) obj), phase_str(), _info)); 2764 } 2765 2766 public: 2767 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 2768 2769 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 2770 _phase = phase; 2771 _info = info; 2772 } 2773 2774 virtual void do_oop(oop* p) { 2775 oop obj = oopDesc::load_decode_heap_oop(p); 2776 do_object_work(obj); 2777 } 2778 2779 virtual void do_oop(narrowOop* p) { 2780 // We should not come across narrow oops while scanning marking 2781 // stacks 2782 ShouldNotReachHere(); 2783 } 2784 2785 virtual void do_object(oop obj) { 2786 do_object_work(obj); 2787 } 2788 }; 2789 2790 void ConcurrentMark::verify_no_cset_oops() { 2791 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2792 if (!G1CollectedHeap::heap()->mark_in_progress()) { 2793 return; 2794 } 2795 2796 VerifyNoCSetOopsClosure cl; 2797 2798 // Verify entries on the global mark stack 2799 cl.set_phase(VerifyNoCSetOopsStack); 2800 _markStack.oops_do(&cl); 2801 2802 // Verify entries on the task queues 2803 for (uint i = 0; i < _max_worker_id; i += 1) { 2804 cl.set_phase(VerifyNoCSetOopsQueues, i); 2805 CMTaskQueue* queue = _task_queues->queue(i); 2806 queue->oops_do(&cl); 2807 } 2808 2809 // Verify the global finger 2810 HeapWord* global_finger = finger(); 2811 if (global_finger != NULL && global_finger < _heap_end) { 2812 // The global finger always points to a heap region boundary. We 2813 // use heap_region_containing_raw() to get the containing region 2814 // given that the global finger could be pointing to a free region 2815 // which subsequently becomes continues humongous. If that 2816 // happens, heap_region_containing() will return the bottom of the 2817 // corresponding starts humongous region and the check below will 2818 // not hold any more. 2819 // Since we always iterate over all regions, we might get a NULL HeapRegion 2820 // here. 2821 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 2822 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2823 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 2824 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 2825 } 2826 2827 // Verify the task fingers 2828 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2829 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 2830 CMTask* task = _tasks[i]; 2831 HeapWord* task_finger = task->finger(); 2832 if (task_finger != NULL && task_finger < _heap_end) { 2833 // See above note on the global finger verification. 2834 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 2835 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2836 !task_hr->in_collection_set(), 2837 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 2838 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 2839 } 2840 } 2841 } 2842 #endif // PRODUCT 2843 2844 // Aggregate the counting data that was constructed concurrently 2845 // with marking. 2846 class AggregateCountDataHRClosure: public HeapRegionClosure { 2847 G1CollectedHeap* _g1h; 2848 ConcurrentMark* _cm; 2849 CardTableModRefBS* _ct_bs; 2850 BitMap* _cm_card_bm; 2851 uint _max_worker_id; 2852 2853 public: 2854 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 2855 BitMap* cm_card_bm, 2856 uint max_worker_id) : 2857 _g1h(g1h), _cm(g1h->concurrent_mark()), 2858 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 2859 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 2860 2861 bool doHeapRegion(HeapRegion* hr) { 2862 if (hr->is_continues_humongous()) { 2863 // We will ignore these here and process them when their 2864 // associated "starts humongous" region is processed. 2865 // Note that we cannot rely on their associated 2866 // "starts humongous" region to have their bit set to 1 2867 // since, due to the region chunking in the parallel region 2868 // iteration, a "continues humongous" region might be visited 2869 // before its associated "starts humongous". 2870 return false; 2871 } 2872 2873 HeapWord* start = hr->bottom(); 2874 HeapWord* limit = hr->next_top_at_mark_start(); 2875 HeapWord* end = hr->end(); 2876 2877 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 2878 err_msg("Preconditions not met - " 2879 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 2880 "top: "PTR_FORMAT", end: "PTR_FORMAT, 2881 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 2882 2883 assert(hr->next_marked_bytes() == 0, "Precondition"); 2884 2885 if (start == limit) { 2886 // NTAMS of this region has not been set so nothing to do. 2887 return false; 2888 } 2889 2890 // 'start' should be in the heap. 2891 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 2892 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 2893 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 2894 2895 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 2896 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 2897 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 2898 2899 // If ntams is not card aligned then we bump card bitmap index 2900 // for limit so that we get the all the cards spanned by 2901 // the object ending at ntams. 2902 // Note: if this is the last region in the heap then ntams 2903 // could be actually just beyond the end of the the heap; 2904 // limit_idx will then correspond to a (non-existent) card 2905 // that is also outside the heap. 2906 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 2907 limit_idx += 1; 2908 } 2909 2910 assert(limit_idx <= end_idx, "or else use atomics"); 2911 2912 // Aggregate the "stripe" in the count data associated with hr. 2913 uint hrm_index = hr->hrm_index(); 2914 size_t marked_bytes = 0; 2915 2916 for (uint i = 0; i < _max_worker_id; i += 1) { 2917 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 2918 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 2919 2920 // Fetch the marked_bytes in this region for task i and 2921 // add it to the running total for this region. 2922 marked_bytes += marked_bytes_array[hrm_index]; 2923 2924 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 2925 // into the global card bitmap. 2926 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 2927 2928 while (scan_idx < limit_idx) { 2929 assert(task_card_bm->at(scan_idx) == true, "should be"); 2930 _cm_card_bm->set_bit(scan_idx); 2931 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 2932 2933 // BitMap::get_next_one_offset() can handle the case when 2934 // its left_offset parameter is greater than its right_offset 2935 // parameter. It does, however, have an early exit if 2936 // left_offset == right_offset. So let's limit the value 2937 // passed in for left offset here. 2938 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 2939 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 2940 } 2941 } 2942 2943 // Update the marked bytes for this region. 2944 hr->add_to_marked_bytes(marked_bytes); 2945 2946 // Next heap region 2947 return false; 2948 } 2949 }; 2950 2951 class G1AggregateCountDataTask: public AbstractGangTask { 2952 protected: 2953 G1CollectedHeap* _g1h; 2954 ConcurrentMark* _cm; 2955 BitMap* _cm_card_bm; 2956 uint _max_worker_id; 2957 uint _active_workers; 2958 HeapRegionClaimer _hrclaimer; 2959 2960 public: 2961 G1AggregateCountDataTask(G1CollectedHeap* g1h, 2962 ConcurrentMark* cm, 2963 BitMap* cm_card_bm, 2964 uint max_worker_id, 2965 uint n_workers) : 2966 AbstractGangTask("Count Aggregation"), 2967 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 2968 _max_worker_id(max_worker_id), 2969 _active_workers(n_workers), 2970 _hrclaimer(_active_workers) { 2971 } 2972 2973 void work(uint worker_id) { 2974 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 2975 2976 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 2977 } 2978 }; 2979 2980 2981 void ConcurrentMark::aggregate_count_data() { 2982 uint n_workers = _g1h->workers()->active_workers(); 2983 2984 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 2985 _max_worker_id, n_workers); 2986 2987 _g1h->workers()->run_task(&g1_par_agg_task); 2988 } 2989 2990 // Clear the per-worker arrays used to store the per-region counting data 2991 void ConcurrentMark::clear_all_count_data() { 2992 // Clear the global card bitmap - it will be filled during 2993 // liveness count aggregation (during remark) and the 2994 // final counting task. 2995 _card_bm.clear(); 2996 2997 // Clear the global region bitmap - it will be filled as part 2998 // of the final counting task. 2999 _region_bm.clear(); 3000 3001 uint max_regions = _g1h->max_regions(); 3002 assert(_max_worker_id > 0, "uninitialized"); 3003 3004 for (uint i = 0; i < _max_worker_id; i += 1) { 3005 BitMap* task_card_bm = count_card_bitmap_for(i); 3006 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3007 3008 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3009 assert(marked_bytes_array != NULL, "uninitialized"); 3010 3011 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3012 task_card_bm->clear(); 3013 } 3014 } 3015 3016 void ConcurrentMark::print_stats() { 3017 if (verbose_stats()) { 3018 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3019 for (size_t i = 0; i < _active_tasks; ++i) { 3020 _tasks[i]->print_stats(); 3021 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3022 } 3023 } 3024 } 3025 3026 // abandon current marking iteration due to a Full GC 3027 void ConcurrentMark::abort() { 3028 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 3029 // concurrent bitmap clearing. 3030 _nextMarkBitMap->clearAll(); 3031 3032 // Note we cannot clear the previous marking bitmap here 3033 // since VerifyDuringGC verifies the objects marked during 3034 // a full GC against the previous bitmap. 3035 3036 // Clear the liveness counting data 3037 clear_all_count_data(); 3038 // Empty mark stack 3039 reset_marking_state(); 3040 for (uint i = 0; i < _max_worker_id; ++i) { 3041 _tasks[i]->clear_region_fields(); 3042 } 3043 _first_overflow_barrier_sync.abort(); 3044 _second_overflow_barrier_sync.abort(); 3045 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); 3046 if (!gc_id.is_undefined()) { 3047 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance 3048 // to detect that it was aborted. Only keep track of the first GC id that we aborted. 3049 _aborted_gc_id = gc_id; 3050 } 3051 _has_aborted = true; 3052 3053 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3054 satb_mq_set.abandon_partial_marking(); 3055 // This can be called either during or outside marking, we'll read 3056 // the expected_active value from the SATB queue set. 3057 satb_mq_set.set_active_all_threads( 3058 false, /* new active value */ 3059 satb_mq_set.is_active() /* expected_active */); 3060 3061 _g1h->trace_heap_after_concurrent_cycle(); 3062 _g1h->register_concurrent_cycle_end(); 3063 } 3064 3065 const GCId& ConcurrentMark::concurrent_gc_id() { 3066 if (has_aborted()) { 3067 return _aborted_gc_id; 3068 } 3069 return _g1h->gc_tracer_cm()->gc_id(); 3070 } 3071 3072 static void print_ms_time_info(const char* prefix, const char* name, 3073 NumberSeq& ns) { 3074 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3075 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3076 if (ns.num() > 0) { 3077 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3078 prefix, ns.sd(), ns.maximum()); 3079 } 3080 } 3081 3082 void ConcurrentMark::print_summary_info() { 3083 gclog_or_tty->print_cr(" Concurrent marking:"); 3084 print_ms_time_info(" ", "init marks", _init_times); 3085 print_ms_time_info(" ", "remarks", _remark_times); 3086 { 3087 print_ms_time_info(" ", "final marks", _remark_mark_times); 3088 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3089 3090 } 3091 print_ms_time_info(" ", "cleanups", _cleanup_times); 3092 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3093 _total_counting_time, 3094 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3095 (double)_cleanup_times.num() 3096 : 0.0)); 3097 if (G1ScrubRemSets) { 3098 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3099 _total_rs_scrub_time, 3100 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3101 (double)_cleanup_times.num() 3102 : 0.0)); 3103 } 3104 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3105 (_init_times.sum() + _remark_times.sum() + 3106 _cleanup_times.sum())/1000.0); 3107 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3108 "(%8.2f s marking).", 3109 cmThread()->vtime_accum(), 3110 cmThread()->vtime_mark_accum()); 3111 } 3112 3113 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3114 _parallel_workers->print_worker_threads_on(st); 3115 } 3116 3117 void ConcurrentMark::print_on_error(outputStream* st) const { 3118 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3119 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3120 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3121 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3122 } 3123 3124 // We take a break if someone is trying to stop the world. 3125 bool ConcurrentMark::do_yield_check(uint worker_id) { 3126 if (SuspendibleThreadSet::should_yield()) { 3127 if (worker_id == 0) { 3128 _g1h->g1_policy()->record_concurrent_pause(); 3129 } 3130 SuspendibleThreadSet::yield(); 3131 return true; 3132 } else { 3133 return false; 3134 } 3135 } 3136 3137 #ifndef PRODUCT 3138 // for debugging purposes 3139 void ConcurrentMark::print_finger() { 3140 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3141 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3142 for (uint i = 0; i < _max_worker_id; ++i) { 3143 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3144 } 3145 gclog_or_tty->cr(); 3146 } 3147 #endif 3148 3149 template<bool scan> 3150 inline void CMTask::process_grey_object(oop obj) { 3151 assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray"); 3152 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3153 3154 if (_cm->verbose_high()) { 3155 gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT, 3156 _worker_id, p2i((void*) obj)); 3157 } 3158 3159 size_t obj_size = obj->size(); 3160 _words_scanned += obj_size; 3161 3162 if (scan) { 3163 obj->oop_iterate(_cm_oop_closure); 3164 } 3165 statsOnly( ++_objs_scanned ); 3166 check_limits(); 3167 } 3168 3169 template void CMTask::process_grey_object<true>(oop); 3170 template void CMTask::process_grey_object<false>(oop); 3171 3172 // Closure for iteration over bitmaps 3173 class CMBitMapClosure : public BitMapClosure { 3174 private: 3175 // the bitmap that is being iterated over 3176 CMBitMap* _nextMarkBitMap; 3177 ConcurrentMark* _cm; 3178 CMTask* _task; 3179 3180 public: 3181 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3182 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3183 3184 bool do_bit(size_t offset) { 3185 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3186 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3187 assert( addr < _cm->finger(), "invariant"); 3188 3189 statsOnly( _task->increase_objs_found_on_bitmap() ); 3190 assert(addr >= _task->finger(), "invariant"); 3191 3192 // We move that task's local finger along. 3193 _task->move_finger_to(addr); 3194 3195 _task->scan_object(oop(addr)); 3196 // we only partially drain the local queue and global stack 3197 _task->drain_local_queue(true); 3198 _task->drain_global_stack(true); 3199 3200 // if the has_aborted flag has been raised, we need to bail out of 3201 // the iteration 3202 return !_task->has_aborted(); 3203 } 3204 }; 3205 3206 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3207 ConcurrentMark* cm, 3208 CMTask* task) 3209 : _g1h(g1h), _cm(cm), _task(task) { 3210 assert(_ref_processor == NULL, "should be initialized to NULL"); 3211 3212 if (G1UseConcMarkReferenceProcessing) { 3213 _ref_processor = g1h->ref_processor_cm(); 3214 assert(_ref_processor != NULL, "should not be NULL"); 3215 } 3216 } 3217 3218 void CMTask::setup_for_region(HeapRegion* hr) { 3219 assert(hr != NULL, 3220 "claim_region() should have filtered out NULL regions"); 3221 assert(!hr->is_continues_humongous(), 3222 "claim_region() should have filtered out continues humongous regions"); 3223 3224 if (_cm->verbose_low()) { 3225 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3226 _worker_id, p2i(hr)); 3227 } 3228 3229 _curr_region = hr; 3230 _finger = hr->bottom(); 3231 update_region_limit(); 3232 } 3233 3234 void CMTask::update_region_limit() { 3235 HeapRegion* hr = _curr_region; 3236 HeapWord* bottom = hr->bottom(); 3237 HeapWord* limit = hr->next_top_at_mark_start(); 3238 3239 if (limit == bottom) { 3240 if (_cm->verbose_low()) { 3241 gclog_or_tty->print_cr("[%u] found an empty region " 3242 "["PTR_FORMAT", "PTR_FORMAT")", 3243 _worker_id, p2i(bottom), p2i(limit)); 3244 } 3245 // The region was collected underneath our feet. 3246 // We set the finger to bottom to ensure that the bitmap 3247 // iteration that will follow this will not do anything. 3248 // (this is not a condition that holds when we set the region up, 3249 // as the region is not supposed to be empty in the first place) 3250 _finger = bottom; 3251 } else if (limit >= _region_limit) { 3252 assert(limit >= _finger, "peace of mind"); 3253 } else { 3254 assert(limit < _region_limit, "only way to get here"); 3255 // This can happen under some pretty unusual circumstances. An 3256 // evacuation pause empties the region underneath our feet (NTAMS 3257 // at bottom). We then do some allocation in the region (NTAMS 3258 // stays at bottom), followed by the region being used as a GC 3259 // alloc region (NTAMS will move to top() and the objects 3260 // originally below it will be grayed). All objects now marked in 3261 // the region are explicitly grayed, if below the global finger, 3262 // and we do not need in fact to scan anything else. So, we simply 3263 // set _finger to be limit to ensure that the bitmap iteration 3264 // doesn't do anything. 3265 _finger = limit; 3266 } 3267 3268 _region_limit = limit; 3269 } 3270 3271 void CMTask::giveup_current_region() { 3272 assert(_curr_region != NULL, "invariant"); 3273 if (_cm->verbose_low()) { 3274 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3275 _worker_id, p2i(_curr_region)); 3276 } 3277 clear_region_fields(); 3278 } 3279 3280 void CMTask::clear_region_fields() { 3281 // Values for these three fields that indicate that we're not 3282 // holding on to a region. 3283 _curr_region = NULL; 3284 _finger = NULL; 3285 _region_limit = NULL; 3286 } 3287 3288 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3289 if (cm_oop_closure == NULL) { 3290 assert(_cm_oop_closure != NULL, "invariant"); 3291 } else { 3292 assert(_cm_oop_closure == NULL, "invariant"); 3293 } 3294 _cm_oop_closure = cm_oop_closure; 3295 } 3296 3297 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3298 guarantee(nextMarkBitMap != NULL, "invariant"); 3299 3300 if (_cm->verbose_low()) { 3301 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3302 } 3303 3304 _nextMarkBitMap = nextMarkBitMap; 3305 clear_region_fields(); 3306 3307 _calls = 0; 3308 _elapsed_time_ms = 0.0; 3309 _termination_time_ms = 0.0; 3310 _termination_start_time_ms = 0.0; 3311 3312 #if _MARKING_STATS_ 3313 _aborted = 0; 3314 _aborted_overflow = 0; 3315 _aborted_cm_aborted = 0; 3316 _aborted_yield = 0; 3317 _aborted_timed_out = 0; 3318 _aborted_satb = 0; 3319 _aborted_termination = 0; 3320 _steal_attempts = 0; 3321 _steals = 0; 3322 _local_pushes = 0; 3323 _local_pops = 0; 3324 _local_max_size = 0; 3325 _objs_scanned = 0; 3326 _global_pushes = 0; 3327 _global_pops = 0; 3328 _global_max_size = 0; 3329 _global_transfers_to = 0; 3330 _global_transfers_from = 0; 3331 _regions_claimed = 0; 3332 _objs_found_on_bitmap = 0; 3333 _satb_buffers_processed = 0; 3334 #endif // _MARKING_STATS_ 3335 } 3336 3337 bool CMTask::should_exit_termination() { 3338 regular_clock_call(); 3339 // This is called when we are in the termination protocol. We should 3340 // quit if, for some reason, this task wants to abort or the global 3341 // stack is not empty (this means that we can get work from it). 3342 return !_cm->mark_stack_empty() || has_aborted(); 3343 } 3344 3345 void CMTask::reached_limit() { 3346 assert(_words_scanned >= _words_scanned_limit || 3347 _refs_reached >= _refs_reached_limit , 3348 "shouldn't have been called otherwise"); 3349 regular_clock_call(); 3350 } 3351 3352 void CMTask::regular_clock_call() { 3353 if (has_aborted()) return; 3354 3355 // First, we need to recalculate the words scanned and refs reached 3356 // limits for the next clock call. 3357 recalculate_limits(); 3358 3359 // During the regular clock call we do the following 3360 3361 // (1) If an overflow has been flagged, then we abort. 3362 if (_cm->has_overflown()) { 3363 set_has_aborted(); 3364 return; 3365 } 3366 3367 // If we are not concurrent (i.e. we're doing remark) we don't need 3368 // to check anything else. The other steps are only needed during 3369 // the concurrent marking phase. 3370 if (!concurrent()) return; 3371 3372 // (2) If marking has been aborted for Full GC, then we also abort. 3373 if (_cm->has_aborted()) { 3374 set_has_aborted(); 3375 statsOnly( ++_aborted_cm_aborted ); 3376 return; 3377 } 3378 3379 double curr_time_ms = os::elapsedVTime() * 1000.0; 3380 3381 // (3) If marking stats are enabled, then we update the step history. 3382 #if _MARKING_STATS_ 3383 if (_words_scanned >= _words_scanned_limit) { 3384 ++_clock_due_to_scanning; 3385 } 3386 if (_refs_reached >= _refs_reached_limit) { 3387 ++_clock_due_to_marking; 3388 } 3389 3390 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3391 _interval_start_time_ms = curr_time_ms; 3392 _all_clock_intervals_ms.add(last_interval_ms); 3393 3394 if (_cm->verbose_medium()) { 3395 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3396 "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s", 3397 _worker_id, last_interval_ms, 3398 _words_scanned, 3399 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3400 _refs_reached, 3401 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3402 } 3403 #endif // _MARKING_STATS_ 3404 3405 // (4) We check whether we should yield. If we have to, then we abort. 3406 if (SuspendibleThreadSet::should_yield()) { 3407 // We should yield. To do this we abort the task. The caller is 3408 // responsible for yielding. 3409 set_has_aborted(); 3410 statsOnly( ++_aborted_yield ); 3411 return; 3412 } 3413 3414 // (5) We check whether we've reached our time quota. If we have, 3415 // then we abort. 3416 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3417 if (elapsed_time_ms > _time_target_ms) { 3418 set_has_aborted(); 3419 _has_timed_out = true; 3420 statsOnly( ++_aborted_timed_out ); 3421 return; 3422 } 3423 3424 // (6) Finally, we check whether there are enough completed STAB 3425 // buffers available for processing. If there are, we abort. 3426 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3427 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3428 if (_cm->verbose_low()) { 3429 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3430 _worker_id); 3431 } 3432 // we do need to process SATB buffers, we'll abort and restart 3433 // the marking task to do so 3434 set_has_aborted(); 3435 statsOnly( ++_aborted_satb ); 3436 return; 3437 } 3438 } 3439 3440 void CMTask::recalculate_limits() { 3441 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3442 _words_scanned_limit = _real_words_scanned_limit; 3443 3444 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3445 _refs_reached_limit = _real_refs_reached_limit; 3446 } 3447 3448 void CMTask::decrease_limits() { 3449 // This is called when we believe that we're going to do an infrequent 3450 // operation which will increase the per byte scanned cost (i.e. move 3451 // entries to/from the global stack). It basically tries to decrease the 3452 // scanning limit so that the clock is called earlier. 3453 3454 if (_cm->verbose_medium()) { 3455 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3456 } 3457 3458 _words_scanned_limit = _real_words_scanned_limit - 3459 3 * words_scanned_period / 4; 3460 _refs_reached_limit = _real_refs_reached_limit - 3461 3 * refs_reached_period / 4; 3462 } 3463 3464 void CMTask::move_entries_to_global_stack() { 3465 // local array where we'll store the entries that will be popped 3466 // from the local queue 3467 oop buffer[global_stack_transfer_size]; 3468 3469 int n = 0; 3470 oop obj; 3471 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3472 buffer[n] = obj; 3473 ++n; 3474 } 3475 3476 if (n > 0) { 3477 // we popped at least one entry from the local queue 3478 3479 statsOnly( ++_global_transfers_to; _local_pops += n ); 3480 3481 if (!_cm->mark_stack_push(buffer, n)) { 3482 if (_cm->verbose_low()) { 3483 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3484 _worker_id); 3485 } 3486 set_has_aborted(); 3487 } else { 3488 // the transfer was successful 3489 3490 if (_cm->verbose_medium()) { 3491 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3492 _worker_id, n); 3493 } 3494 statsOnly( size_t tmp_size = _cm->mark_stack_size(); 3495 if (tmp_size > _global_max_size) { 3496 _global_max_size = tmp_size; 3497 } 3498 _global_pushes += n ); 3499 } 3500 } 3501 3502 // this operation was quite expensive, so decrease the limits 3503 decrease_limits(); 3504 } 3505 3506 void CMTask::get_entries_from_global_stack() { 3507 // local array where we'll store the entries that will be popped 3508 // from the global stack. 3509 oop buffer[global_stack_transfer_size]; 3510 int n; 3511 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3512 assert(n <= global_stack_transfer_size, 3513 "we should not pop more than the given limit"); 3514 if (n > 0) { 3515 // yes, we did actually pop at least one entry 3516 3517 statsOnly( ++_global_transfers_from; _global_pops += n ); 3518 if (_cm->verbose_medium()) { 3519 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3520 _worker_id, n); 3521 } 3522 for (int i = 0; i < n; ++i) { 3523 bool success = _task_queue->push(buffer[i]); 3524 // We only call this when the local queue is empty or under a 3525 // given target limit. So, we do not expect this push to fail. 3526 assert(success, "invariant"); 3527 } 3528 3529 statsOnly( size_t tmp_size = (size_t)_task_queue->size(); 3530 if (tmp_size > _local_max_size) { 3531 _local_max_size = tmp_size; 3532 } 3533 _local_pushes += n ); 3534 } 3535 3536 // this operation was quite expensive, so decrease the limits 3537 decrease_limits(); 3538 } 3539 3540 void CMTask::drain_local_queue(bool partially) { 3541 if (has_aborted()) return; 3542 3543 // Decide what the target size is, depending whether we're going to 3544 // drain it partially (so that other tasks can steal if they run out 3545 // of things to do) or totally (at the very end). 3546 size_t target_size; 3547 if (partially) { 3548 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3549 } else { 3550 target_size = 0; 3551 } 3552 3553 if (_task_queue->size() > target_size) { 3554 if (_cm->verbose_high()) { 3555 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3556 _worker_id, target_size); 3557 } 3558 3559 oop obj; 3560 bool ret = _task_queue->pop_local(obj); 3561 while (ret) { 3562 statsOnly( ++_local_pops ); 3563 3564 if (_cm->verbose_high()) { 3565 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3566 p2i((void*) obj)); 3567 } 3568 3569 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3570 assert(!_g1h->is_on_master_free_list( 3571 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3572 3573 scan_object(obj); 3574 3575 if (_task_queue->size() <= target_size || has_aborted()) { 3576 ret = false; 3577 } else { 3578 ret = _task_queue->pop_local(obj); 3579 } 3580 } 3581 3582 if (_cm->verbose_high()) { 3583 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3584 _worker_id, _task_queue->size()); 3585 } 3586 } 3587 } 3588 3589 void CMTask::drain_global_stack(bool partially) { 3590 if (has_aborted()) return; 3591 3592 // We have a policy to drain the local queue before we attempt to 3593 // drain the global stack. 3594 assert(partially || _task_queue->size() == 0, "invariant"); 3595 3596 // Decide what the target size is, depending whether we're going to 3597 // drain it partially (so that other tasks can steal if they run out 3598 // of things to do) or totally (at the very end). Notice that, 3599 // because we move entries from the global stack in chunks or 3600 // because another task might be doing the same, we might in fact 3601 // drop below the target. But, this is not a problem. 3602 size_t target_size; 3603 if (partially) { 3604 target_size = _cm->partial_mark_stack_size_target(); 3605 } else { 3606 target_size = 0; 3607 } 3608 3609 if (_cm->mark_stack_size() > target_size) { 3610 if (_cm->verbose_low()) { 3611 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3612 _worker_id, target_size); 3613 } 3614 3615 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3616 get_entries_from_global_stack(); 3617 drain_local_queue(partially); 3618 } 3619 3620 if (_cm->verbose_low()) { 3621 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3622 _worker_id, _cm->mark_stack_size()); 3623 } 3624 } 3625 } 3626 3627 // SATB Queue has several assumptions on whether to call the par or 3628 // non-par versions of the methods. this is why some of the code is 3629 // replicated. We should really get rid of the single-threaded version 3630 // of the code to simplify things. 3631 void CMTask::drain_satb_buffers() { 3632 if (has_aborted()) return; 3633 3634 // We set this so that the regular clock knows that we're in the 3635 // middle of draining buffers and doesn't set the abort flag when it 3636 // notices that SATB buffers are available for draining. It'd be 3637 // very counter productive if it did that. :-) 3638 _draining_satb_buffers = true; 3639 3640 CMSATBBufferClosure satb_cl(this, _g1h); 3641 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3642 3643 // This keeps claiming and applying the closure to completed buffers 3644 // until we run out of buffers or we need to abort. 3645 while (!has_aborted() && 3646 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 3647 if (_cm->verbose_medium()) { 3648 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3649 } 3650 statsOnly( ++_satb_buffers_processed ); 3651 regular_clock_call(); 3652 } 3653 3654 _draining_satb_buffers = false; 3655 3656 assert(has_aborted() || 3657 concurrent() || 3658 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3659 3660 // again, this was a potentially expensive operation, decrease the 3661 // limits to get the regular clock call early 3662 decrease_limits(); 3663 } 3664 3665 void CMTask::print_stats() { 3666 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3667 _worker_id, _calls); 3668 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3669 _elapsed_time_ms, _termination_time_ms); 3670 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3671 _step_times_ms.num(), _step_times_ms.avg(), 3672 _step_times_ms.sd()); 3673 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3674 _step_times_ms.maximum(), _step_times_ms.sum()); 3675 3676 #if _MARKING_STATS_ 3677 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3678 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3679 _all_clock_intervals_ms.sd()); 3680 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3681 _all_clock_intervals_ms.maximum(), 3682 _all_clock_intervals_ms.sum()); 3683 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = " SIZE_FORMAT ", marking = " SIZE_FORMAT, 3684 _clock_due_to_scanning, _clock_due_to_marking); 3685 gclog_or_tty->print_cr(" Objects: scanned = " SIZE_FORMAT ", found on the bitmap = " SIZE_FORMAT, 3686 _objs_scanned, _objs_found_on_bitmap); 3687 gclog_or_tty->print_cr(" Local Queue: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3688 _local_pushes, _local_pops, _local_max_size); 3689 gclog_or_tty->print_cr(" Global Stack: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3690 _global_pushes, _global_pops, _global_max_size); 3691 gclog_or_tty->print_cr(" transfers to = " SIZE_FORMAT ", transfers from = " SIZE_FORMAT, 3692 _global_transfers_to,_global_transfers_from); 3693 gclog_or_tty->print_cr(" Regions: claimed = " SIZE_FORMAT, _regions_claimed); 3694 gclog_or_tty->print_cr(" SATB buffers: processed = " SIZE_FORMAT, _satb_buffers_processed); 3695 gclog_or_tty->print_cr(" Steals: attempts = " SIZE_FORMAT ", successes = " SIZE_FORMAT, 3696 _steal_attempts, _steals); 3697 gclog_or_tty->print_cr(" Aborted: " SIZE_FORMAT ", due to", _aborted); 3698 gclog_or_tty->print_cr(" overflow: " SIZE_FORMAT ", global abort: " SIZE_FORMAT ", yield: " SIZE_FORMAT, 3699 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3700 gclog_or_tty->print_cr(" time out: " SIZE_FORMAT ", SATB: " SIZE_FORMAT ", termination: " SIZE_FORMAT, 3701 _aborted_timed_out, _aborted_satb, _aborted_termination); 3702 #endif // _MARKING_STATS_ 3703 } 3704 3705 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 3706 return _task_queues->steal(worker_id, hash_seed, obj); 3707 } 3708 3709 /***************************************************************************** 3710 3711 The do_marking_step(time_target_ms, ...) method is the building 3712 block of the parallel marking framework. It can be called in parallel 3713 with other invocations of do_marking_step() on different tasks 3714 (but only one per task, obviously) and concurrently with the 3715 mutator threads, or during remark, hence it eliminates the need 3716 for two versions of the code. When called during remark, it will 3717 pick up from where the task left off during the concurrent marking 3718 phase. Interestingly, tasks are also claimable during evacuation 3719 pauses too, since do_marking_step() ensures that it aborts before 3720 it needs to yield. 3721 3722 The data structures that it uses to do marking work are the 3723 following: 3724 3725 (1) Marking Bitmap. If there are gray objects that appear only 3726 on the bitmap (this happens either when dealing with an overflow 3727 or when the initial marking phase has simply marked the roots 3728 and didn't push them on the stack), then tasks claim heap 3729 regions whose bitmap they then scan to find gray objects. A 3730 global finger indicates where the end of the last claimed region 3731 is. A local finger indicates how far into the region a task has 3732 scanned. The two fingers are used to determine how to gray an 3733 object (i.e. whether simply marking it is OK, as it will be 3734 visited by a task in the future, or whether it needs to be also 3735 pushed on a stack). 3736 3737 (2) Local Queue. The local queue of the task which is accessed 3738 reasonably efficiently by the task. Other tasks can steal from 3739 it when they run out of work. Throughout the marking phase, a 3740 task attempts to keep its local queue short but not totally 3741 empty, so that entries are available for stealing by other 3742 tasks. Only when there is no more work, a task will totally 3743 drain its local queue. 3744 3745 (3) Global Mark Stack. This handles local queue overflow. During 3746 marking only sets of entries are moved between it and the local 3747 queues, as access to it requires a mutex and more fine-grain 3748 interaction with it which might cause contention. If it 3749 overflows, then the marking phase should restart and iterate 3750 over the bitmap to identify gray objects. Throughout the marking 3751 phase, tasks attempt to keep the global mark stack at a small 3752 length but not totally empty, so that entries are available for 3753 popping by other tasks. Only when there is no more work, tasks 3754 will totally drain the global mark stack. 3755 3756 (4) SATB Buffer Queue. This is where completed SATB buffers are 3757 made available. Buffers are regularly removed from this queue 3758 and scanned for roots, so that the queue doesn't get too 3759 long. During remark, all completed buffers are processed, as 3760 well as the filled in parts of any uncompleted buffers. 3761 3762 The do_marking_step() method tries to abort when the time target 3763 has been reached. There are a few other cases when the 3764 do_marking_step() method also aborts: 3765 3766 (1) When the marking phase has been aborted (after a Full GC). 3767 3768 (2) When a global overflow (on the global stack) has been 3769 triggered. Before the task aborts, it will actually sync up with 3770 the other tasks to ensure that all the marking data structures 3771 (local queues, stacks, fingers etc.) are re-initialized so that 3772 when do_marking_step() completes, the marking phase can 3773 immediately restart. 3774 3775 (3) When enough completed SATB buffers are available. The 3776 do_marking_step() method only tries to drain SATB buffers right 3777 at the beginning. So, if enough buffers are available, the 3778 marking step aborts and the SATB buffers are processed at 3779 the beginning of the next invocation. 3780 3781 (4) To yield. when we have to yield then we abort and yield 3782 right at the end of do_marking_step(). This saves us from a lot 3783 of hassle as, by yielding we might allow a Full GC. If this 3784 happens then objects will be compacted underneath our feet, the 3785 heap might shrink, etc. We save checking for this by just 3786 aborting and doing the yield right at the end. 3787 3788 From the above it follows that the do_marking_step() method should 3789 be called in a loop (or, otherwise, regularly) until it completes. 3790 3791 If a marking step completes without its has_aborted() flag being 3792 true, it means it has completed the current marking phase (and 3793 also all other marking tasks have done so and have all synced up). 3794 3795 A method called regular_clock_call() is invoked "regularly" (in 3796 sub ms intervals) throughout marking. It is this clock method that 3797 checks all the abort conditions which were mentioned above and 3798 decides when the task should abort. A work-based scheme is used to 3799 trigger this clock method: when the number of object words the 3800 marking phase has scanned or the number of references the marking 3801 phase has visited reach a given limit. Additional invocations to 3802 the method clock have been planted in a few other strategic places 3803 too. The initial reason for the clock method was to avoid calling 3804 vtime too regularly, as it is quite expensive. So, once it was in 3805 place, it was natural to piggy-back all the other conditions on it 3806 too and not constantly check them throughout the code. 3807 3808 If do_termination is true then do_marking_step will enter its 3809 termination protocol. 3810 3811 The value of is_serial must be true when do_marking_step is being 3812 called serially (i.e. by the VMThread) and do_marking_step should 3813 skip any synchronization in the termination and overflow code. 3814 Examples include the serial remark code and the serial reference 3815 processing closures. 3816 3817 The value of is_serial must be false when do_marking_step is 3818 being called by any of the worker threads in a work gang. 3819 Examples include the concurrent marking code (CMMarkingTask), 3820 the MT remark code, and the MT reference processing closures. 3821 3822 *****************************************************************************/ 3823 3824 void CMTask::do_marking_step(double time_target_ms, 3825 bool do_termination, 3826 bool is_serial) { 3827 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3828 assert(concurrent() == _cm->concurrent(), "they should be the same"); 3829 3830 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3831 assert(_task_queues != NULL, "invariant"); 3832 assert(_task_queue != NULL, "invariant"); 3833 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 3834 3835 assert(!_claimed, 3836 "only one thread should claim this task at any one time"); 3837 3838 // OK, this doesn't safeguard again all possible scenarios, as it is 3839 // possible for two threads to set the _claimed flag at the same 3840 // time. But it is only for debugging purposes anyway and it will 3841 // catch most problems. 3842 _claimed = true; 3843 3844 _start_time_ms = os::elapsedVTime() * 1000.0; 3845 statsOnly( _interval_start_time_ms = _start_time_ms ); 3846 3847 // If do_stealing is true then do_marking_step will attempt to 3848 // steal work from the other CMTasks. It only makes sense to 3849 // enable stealing when the termination protocol is enabled 3850 // and do_marking_step() is not being called serially. 3851 bool do_stealing = do_termination && !is_serial; 3852 3853 double diff_prediction_ms = 3854 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 3855 _time_target_ms = time_target_ms - diff_prediction_ms; 3856 3857 // set up the variables that are used in the work-based scheme to 3858 // call the regular clock method 3859 _words_scanned = 0; 3860 _refs_reached = 0; 3861 recalculate_limits(); 3862 3863 // clear all flags 3864 clear_has_aborted(); 3865 _has_timed_out = false; 3866 _draining_satb_buffers = false; 3867 3868 ++_calls; 3869 3870 if (_cm->verbose_low()) { 3871 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 3872 "target = %1.2lfms >>>>>>>>>>", 3873 _worker_id, _calls, _time_target_ms); 3874 } 3875 3876 // Set up the bitmap and oop closures. Anything that uses them is 3877 // eventually called from this method, so it is OK to allocate these 3878 // statically. 3879 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 3880 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 3881 set_cm_oop_closure(&cm_oop_closure); 3882 3883 if (_cm->has_overflown()) { 3884 // This can happen if the mark stack overflows during a GC pause 3885 // and this task, after a yield point, restarts. We have to abort 3886 // as we need to get into the overflow protocol which happens 3887 // right at the end of this task. 3888 set_has_aborted(); 3889 } 3890 3891 // First drain any available SATB buffers. After this, we will not 3892 // look at SATB buffers before the next invocation of this method. 3893 // If enough completed SATB buffers are queued up, the regular clock 3894 // will abort this task so that it restarts. 3895 drain_satb_buffers(); 3896 // ...then partially drain the local queue and the global stack 3897 drain_local_queue(true); 3898 drain_global_stack(true); 3899 3900 do { 3901 if (!has_aborted() && _curr_region != NULL) { 3902 // This means that we're already holding on to a region. 3903 assert(_finger != NULL, "if region is not NULL, then the finger " 3904 "should not be NULL either"); 3905 3906 // We might have restarted this task after an evacuation pause 3907 // which might have evacuated the region we're holding on to 3908 // underneath our feet. Let's read its limit again to make sure 3909 // that we do not iterate over a region of the heap that 3910 // contains garbage (update_region_limit() will also move 3911 // _finger to the start of the region if it is found empty). 3912 update_region_limit(); 3913 // We will start from _finger not from the start of the region, 3914 // as we might be restarting this task after aborting half-way 3915 // through scanning this region. In this case, _finger points to 3916 // the address where we last found a marked object. If this is a 3917 // fresh region, _finger points to start(). 3918 MemRegion mr = MemRegion(_finger, _region_limit); 3919 3920 if (_cm->verbose_low()) { 3921 gclog_or_tty->print_cr("[%u] we're scanning part " 3922 "["PTR_FORMAT", "PTR_FORMAT") " 3923 "of region "HR_FORMAT, 3924 _worker_id, p2i(_finger), p2i(_region_limit), 3925 HR_FORMAT_PARAMS(_curr_region)); 3926 } 3927 3928 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 3929 "humongous regions should go around loop once only"); 3930 3931 // Some special cases: 3932 // If the memory region is empty, we can just give up the region. 3933 // If the current region is humongous then we only need to check 3934 // the bitmap for the bit associated with the start of the object, 3935 // scan the object if it's live, and give up the region. 3936 // Otherwise, let's iterate over the bitmap of the part of the region 3937 // that is left. 3938 // If the iteration is successful, give up the region. 3939 if (mr.is_empty()) { 3940 giveup_current_region(); 3941 regular_clock_call(); 3942 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 3943 if (_nextMarkBitMap->isMarked(mr.start())) { 3944 // The object is marked - apply the closure 3945 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 3946 bitmap_closure.do_bit(offset); 3947 } 3948 // Even if this task aborted while scanning the humongous object 3949 // we can (and should) give up the current region. 3950 giveup_current_region(); 3951 regular_clock_call(); 3952 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 3953 giveup_current_region(); 3954 regular_clock_call(); 3955 } else { 3956 assert(has_aborted(), "currently the only way to do so"); 3957 // The only way to abort the bitmap iteration is to return 3958 // false from the do_bit() method. However, inside the 3959 // do_bit() method we move the _finger to point to the 3960 // object currently being looked at. So, if we bail out, we 3961 // have definitely set _finger to something non-null. 3962 assert(_finger != NULL, "invariant"); 3963 3964 // Region iteration was actually aborted. So now _finger 3965 // points to the address of the object we last scanned. If we 3966 // leave it there, when we restart this task, we will rescan 3967 // the object. It is easy to avoid this. We move the finger by 3968 // enough to point to the next possible object header (the 3969 // bitmap knows by how much we need to move it as it knows its 3970 // granularity). 3971 assert(_finger < _region_limit, "invariant"); 3972 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 3973 // Check if bitmap iteration was aborted while scanning the last object 3974 if (new_finger >= _region_limit) { 3975 giveup_current_region(); 3976 } else { 3977 move_finger_to(new_finger); 3978 } 3979 } 3980 } 3981 // At this point we have either completed iterating over the 3982 // region we were holding on to, or we have aborted. 3983 3984 // We then partially drain the local queue and the global stack. 3985 // (Do we really need this?) 3986 drain_local_queue(true); 3987 drain_global_stack(true); 3988 3989 // Read the note on the claim_region() method on why it might 3990 // return NULL with potentially more regions available for 3991 // claiming and why we have to check out_of_regions() to determine 3992 // whether we're done or not. 3993 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 3994 // We are going to try to claim a new region. We should have 3995 // given up on the previous one. 3996 // Separated the asserts so that we know which one fires. 3997 assert(_curr_region == NULL, "invariant"); 3998 assert(_finger == NULL, "invariant"); 3999 assert(_region_limit == NULL, "invariant"); 4000 if (_cm->verbose_low()) { 4001 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4002 } 4003 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4004 if (claimed_region != NULL) { 4005 // Yes, we managed to claim one 4006 statsOnly( ++_regions_claimed ); 4007 4008 if (_cm->verbose_low()) { 4009 gclog_or_tty->print_cr("[%u] we successfully claimed " 4010 "region "PTR_FORMAT, 4011 _worker_id, p2i(claimed_region)); 4012 } 4013 4014 setup_for_region(claimed_region); 4015 assert(_curr_region == claimed_region, "invariant"); 4016 } 4017 // It is important to call the regular clock here. It might take 4018 // a while to claim a region if, for example, we hit a large 4019 // block of empty regions. So we need to call the regular clock 4020 // method once round the loop to make sure it's called 4021 // frequently enough. 4022 regular_clock_call(); 4023 } 4024 4025 if (!has_aborted() && _curr_region == NULL) { 4026 assert(_cm->out_of_regions(), 4027 "at this point we should be out of regions"); 4028 } 4029 } while ( _curr_region != NULL && !has_aborted()); 4030 4031 if (!has_aborted()) { 4032 // We cannot check whether the global stack is empty, since other 4033 // tasks might be pushing objects to it concurrently. 4034 assert(_cm->out_of_regions(), 4035 "at this point we should be out of regions"); 4036 4037 if (_cm->verbose_low()) { 4038 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4039 } 4040 4041 // Try to reduce the number of available SATB buffers so that 4042 // remark has less work to do. 4043 drain_satb_buffers(); 4044 } 4045 4046 // Since we've done everything else, we can now totally drain the 4047 // local queue and global stack. 4048 drain_local_queue(false); 4049 drain_global_stack(false); 4050 4051 // Attempt at work stealing from other task's queues. 4052 if (do_stealing && !has_aborted()) { 4053 // We have not aborted. This means that we have finished all that 4054 // we could. Let's try to do some stealing... 4055 4056 // We cannot check whether the global stack is empty, since other 4057 // tasks might be pushing objects to it concurrently. 4058 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4059 "only way to reach here"); 4060 4061 if (_cm->verbose_low()) { 4062 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4063 } 4064 4065 while (!has_aborted()) { 4066 oop obj; 4067 statsOnly( ++_steal_attempts ); 4068 4069 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4070 if (_cm->verbose_medium()) { 4071 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4072 _worker_id, p2i((void*) obj)); 4073 } 4074 4075 statsOnly( ++_steals ); 4076 4077 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4078 "any stolen object should be marked"); 4079 scan_object(obj); 4080 4081 // And since we're towards the end, let's totally drain the 4082 // local queue and global stack. 4083 drain_local_queue(false); 4084 drain_global_stack(false); 4085 } else { 4086 break; 4087 } 4088 } 4089 } 4090 4091 // If we are about to wrap up and go into termination, check if we 4092 // should raise the overflow flag. 4093 if (do_termination && !has_aborted()) { 4094 if (_cm->force_overflow()->should_force()) { 4095 _cm->set_has_overflown(); 4096 regular_clock_call(); 4097 } 4098 } 4099 4100 // We still haven't aborted. Now, let's try to get into the 4101 // termination protocol. 4102 if (do_termination && !has_aborted()) { 4103 // We cannot check whether the global stack is empty, since other 4104 // tasks might be concurrently pushing objects on it. 4105 // Separated the asserts so that we know which one fires. 4106 assert(_cm->out_of_regions(), "only way to reach here"); 4107 assert(_task_queue->size() == 0, "only way to reach here"); 4108 4109 if (_cm->verbose_low()) { 4110 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4111 } 4112 4113 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4114 4115 // The CMTask class also extends the TerminatorTerminator class, 4116 // hence its should_exit_termination() method will also decide 4117 // whether to exit the termination protocol or not. 4118 bool finished = (is_serial || 4119 _cm->terminator()->offer_termination(this)); 4120 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4121 _termination_time_ms += 4122 termination_end_time_ms - _termination_start_time_ms; 4123 4124 if (finished) { 4125 // We're all done. 4126 4127 if (_worker_id == 0) { 4128 // let's allow task 0 to do this 4129 if (concurrent()) { 4130 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4131 // we need to set this to false before the next 4132 // safepoint. This way we ensure that the marking phase 4133 // doesn't observe any more heap expansions. 4134 _cm->clear_concurrent_marking_in_progress(); 4135 } 4136 } 4137 4138 // We can now guarantee that the global stack is empty, since 4139 // all other tasks have finished. We separated the guarantees so 4140 // that, if a condition is false, we can immediately find out 4141 // which one. 4142 guarantee(_cm->out_of_regions(), "only way to reach here"); 4143 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4144 guarantee(_task_queue->size() == 0, "only way to reach here"); 4145 guarantee(!_cm->has_overflown(), "only way to reach here"); 4146 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4147 4148 if (_cm->verbose_low()) { 4149 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4150 } 4151 } else { 4152 // Apparently there's more work to do. Let's abort this task. It 4153 // will restart it and we can hopefully find more things to do. 4154 4155 if (_cm->verbose_low()) { 4156 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4157 _worker_id); 4158 } 4159 4160 set_has_aborted(); 4161 statsOnly( ++_aborted_termination ); 4162 } 4163 } 4164 4165 // Mainly for debugging purposes to make sure that a pointer to the 4166 // closure which was statically allocated in this frame doesn't 4167 // escape it by accident. 4168 set_cm_oop_closure(NULL); 4169 double end_time_ms = os::elapsedVTime() * 1000.0; 4170 double elapsed_time_ms = end_time_ms - _start_time_ms; 4171 // Update the step history. 4172 _step_times_ms.add(elapsed_time_ms); 4173 4174 if (has_aborted()) { 4175 // The task was aborted for some reason. 4176 4177 statsOnly( ++_aborted ); 4178 4179 if (_has_timed_out) { 4180 double diff_ms = elapsed_time_ms - _time_target_ms; 4181 // Keep statistics of how well we did with respect to hitting 4182 // our target only if we actually timed out (if we aborted for 4183 // other reasons, then the results might get skewed). 4184 _marking_step_diffs_ms.add(diff_ms); 4185 } 4186 4187 if (_cm->has_overflown()) { 4188 // This is the interesting one. We aborted because a global 4189 // overflow was raised. This means we have to restart the 4190 // marking phase and start iterating over regions. However, in 4191 // order to do this we have to make sure that all tasks stop 4192 // what they are doing and re-initialize in a safe manner. We 4193 // will achieve this with the use of two barrier sync points. 4194 4195 if (_cm->verbose_low()) { 4196 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4197 } 4198 4199 if (!is_serial) { 4200 // We only need to enter the sync barrier if being called 4201 // from a parallel context 4202 _cm->enter_first_sync_barrier(_worker_id); 4203 4204 // When we exit this sync barrier we know that all tasks have 4205 // stopped doing marking work. So, it's now safe to 4206 // re-initialize our data structures. At the end of this method, 4207 // task 0 will clear the global data structures. 4208 } 4209 4210 statsOnly( ++_aborted_overflow ); 4211 4212 // We clear the local state of this task... 4213 clear_region_fields(); 4214 4215 if (!is_serial) { 4216 // ...and enter the second barrier. 4217 _cm->enter_second_sync_barrier(_worker_id); 4218 } 4219 // At this point, if we're during the concurrent phase of 4220 // marking, everything has been re-initialized and we're 4221 // ready to restart. 4222 } 4223 4224 if (_cm->verbose_low()) { 4225 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4226 "elapsed = %1.2lfms <<<<<<<<<<", 4227 _worker_id, _time_target_ms, elapsed_time_ms); 4228 if (_cm->has_aborted()) { 4229 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4230 _worker_id); 4231 } 4232 } 4233 } else { 4234 if (_cm->verbose_low()) { 4235 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4236 "elapsed = %1.2lfms <<<<<<<<<<", 4237 _worker_id, _time_target_ms, elapsed_time_ms); 4238 } 4239 } 4240 4241 _claimed = false; 4242 } 4243 4244 CMTask::CMTask(uint worker_id, 4245 ConcurrentMark* cm, 4246 size_t* marked_bytes, 4247 BitMap* card_bm, 4248 CMTaskQueue* task_queue, 4249 CMTaskQueueSet* task_queues) 4250 : _g1h(G1CollectedHeap::heap()), 4251 _worker_id(worker_id), _cm(cm), 4252 _claimed(false), 4253 _nextMarkBitMap(NULL), _hash_seed(17), 4254 _task_queue(task_queue), 4255 _task_queues(task_queues), 4256 _cm_oop_closure(NULL), 4257 _marked_bytes_array(marked_bytes), 4258 _card_bm(card_bm) { 4259 guarantee(task_queue != NULL, "invariant"); 4260 guarantee(task_queues != NULL, "invariant"); 4261 4262 statsOnly( _clock_due_to_scanning = 0; 4263 _clock_due_to_marking = 0 ); 4264 4265 _marking_step_diffs_ms.add(0.5); 4266 } 4267 4268 // These are formatting macros that are used below to ensure 4269 // consistent formatting. The *_H_* versions are used to format the 4270 // header for a particular value and they should be kept consistent 4271 // with the corresponding macro. Also note that most of the macros add 4272 // the necessary white space (as a prefix) which makes them a bit 4273 // easier to compose. 4274 4275 // All the output lines are prefixed with this string to be able to 4276 // identify them easily in a large log file. 4277 #define G1PPRL_LINE_PREFIX "###" 4278 4279 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4280 #ifdef _LP64 4281 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4282 #else // _LP64 4283 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4284 #endif // _LP64 4285 4286 // For per-region info 4287 #define G1PPRL_TYPE_FORMAT " %-4s" 4288 #define G1PPRL_TYPE_H_FORMAT " %4s" 4289 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4290 #define G1PPRL_BYTE_H_FORMAT " %9s" 4291 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4292 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4293 4294 // For summary info 4295 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4296 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4297 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4298 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4299 4300 G1PrintRegionLivenessInfoClosure:: 4301 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4302 : _out(out), 4303 _total_used_bytes(0), _total_capacity_bytes(0), 4304 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4305 _hum_used_bytes(0), _hum_capacity_bytes(0), 4306 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4307 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4308 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4309 MemRegion g1_reserved = g1h->g1_reserved(); 4310 double now = os::elapsedTime(); 4311 4312 // Print the header of the output. 4313 _out->cr(); 4314 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4315 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4316 G1PPRL_SUM_ADDR_FORMAT("reserved") 4317 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4318 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4319 HeapRegion::GrainBytes); 4320 _out->print_cr(G1PPRL_LINE_PREFIX); 4321 _out->print_cr(G1PPRL_LINE_PREFIX 4322 G1PPRL_TYPE_H_FORMAT 4323 G1PPRL_ADDR_BASE_H_FORMAT 4324 G1PPRL_BYTE_H_FORMAT 4325 G1PPRL_BYTE_H_FORMAT 4326 G1PPRL_BYTE_H_FORMAT 4327 G1PPRL_DOUBLE_H_FORMAT 4328 G1PPRL_BYTE_H_FORMAT 4329 G1PPRL_BYTE_H_FORMAT, 4330 "type", "address-range", 4331 "used", "prev-live", "next-live", "gc-eff", 4332 "remset", "code-roots"); 4333 _out->print_cr(G1PPRL_LINE_PREFIX 4334 G1PPRL_TYPE_H_FORMAT 4335 G1PPRL_ADDR_BASE_H_FORMAT 4336 G1PPRL_BYTE_H_FORMAT 4337 G1PPRL_BYTE_H_FORMAT 4338 G1PPRL_BYTE_H_FORMAT 4339 G1PPRL_DOUBLE_H_FORMAT 4340 G1PPRL_BYTE_H_FORMAT 4341 G1PPRL_BYTE_H_FORMAT, 4342 "", "", 4343 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4344 "(bytes)", "(bytes)"); 4345 } 4346 4347 // It takes as a parameter a reference to one of the _hum_* fields, it 4348 // deduces the corresponding value for a region in a humongous region 4349 // series (either the region size, or what's left if the _hum_* field 4350 // is < the region size), and updates the _hum_* field accordingly. 4351 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4352 size_t bytes = 0; 4353 // The > 0 check is to deal with the prev and next live bytes which 4354 // could be 0. 4355 if (*hum_bytes > 0) { 4356 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4357 *hum_bytes -= bytes; 4358 } 4359 return bytes; 4360 } 4361 4362 // It deduces the values for a region in a humongous region series 4363 // from the _hum_* fields and updates those accordingly. It assumes 4364 // that that _hum_* fields have already been set up from the "starts 4365 // humongous" region and we visit the regions in address order. 4366 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4367 size_t* capacity_bytes, 4368 size_t* prev_live_bytes, 4369 size_t* next_live_bytes) { 4370 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4371 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4372 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4373 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4374 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4375 } 4376 4377 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4378 const char* type = r->get_type_str(); 4379 HeapWord* bottom = r->bottom(); 4380 HeapWord* end = r->end(); 4381 size_t capacity_bytes = r->capacity(); 4382 size_t used_bytes = r->used(); 4383 size_t prev_live_bytes = r->live_bytes(); 4384 size_t next_live_bytes = r->next_live_bytes(); 4385 double gc_eff = r->gc_efficiency(); 4386 size_t remset_bytes = r->rem_set()->mem_size(); 4387 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4388 4389 if (r->is_starts_humongous()) { 4390 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4391 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4392 "they should have been zeroed after the last time we used them"); 4393 // Set up the _hum_* fields. 4394 _hum_capacity_bytes = capacity_bytes; 4395 _hum_used_bytes = used_bytes; 4396 _hum_prev_live_bytes = prev_live_bytes; 4397 _hum_next_live_bytes = next_live_bytes; 4398 get_hum_bytes(&used_bytes, &capacity_bytes, 4399 &prev_live_bytes, &next_live_bytes); 4400 end = bottom + HeapRegion::GrainWords; 4401 } else if (r->is_continues_humongous()) { 4402 get_hum_bytes(&used_bytes, &capacity_bytes, 4403 &prev_live_bytes, &next_live_bytes); 4404 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4405 } 4406 4407 _total_used_bytes += used_bytes; 4408 _total_capacity_bytes += capacity_bytes; 4409 _total_prev_live_bytes += prev_live_bytes; 4410 _total_next_live_bytes += next_live_bytes; 4411 _total_remset_bytes += remset_bytes; 4412 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4413 4414 // Print a line for this particular region. 4415 _out->print_cr(G1PPRL_LINE_PREFIX 4416 G1PPRL_TYPE_FORMAT 4417 G1PPRL_ADDR_BASE_FORMAT 4418 G1PPRL_BYTE_FORMAT 4419 G1PPRL_BYTE_FORMAT 4420 G1PPRL_BYTE_FORMAT 4421 G1PPRL_DOUBLE_FORMAT 4422 G1PPRL_BYTE_FORMAT 4423 G1PPRL_BYTE_FORMAT, 4424 type, p2i(bottom), p2i(end), 4425 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4426 remset_bytes, strong_code_roots_bytes); 4427 4428 return false; 4429 } 4430 4431 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4432 // add static memory usages to remembered set sizes 4433 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4434 // Print the footer of the output. 4435 _out->print_cr(G1PPRL_LINE_PREFIX); 4436 _out->print_cr(G1PPRL_LINE_PREFIX 4437 " SUMMARY" 4438 G1PPRL_SUM_MB_FORMAT("capacity") 4439 G1PPRL_SUM_MB_PERC_FORMAT("used") 4440 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4441 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4442 G1PPRL_SUM_MB_FORMAT("remset") 4443 G1PPRL_SUM_MB_FORMAT("code-roots"), 4444 bytes_to_mb(_total_capacity_bytes), 4445 bytes_to_mb(_total_used_bytes), 4446 perc(_total_used_bytes, _total_capacity_bytes), 4447 bytes_to_mb(_total_prev_live_bytes), 4448 perc(_total_prev_live_bytes, _total_capacity_bytes), 4449 bytes_to_mb(_total_next_live_bytes), 4450 perc(_total_next_live_bytes, _total_capacity_bytes), 4451 bytes_to_mb(_total_remset_bytes), 4452 bytes_to_mb(_total_strong_code_roots_bytes)); 4453 _out->cr(); 4454 }