1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMark.inline.hpp" 30 #include "gc/g1/concurrentMarkThread.inline.hpp" 31 #include "gc/g1/g1CollectedHeap.inline.hpp" 32 #include "gc/g1/g1CollectorPolicy.hpp" 33 #include "gc/g1/g1ErgoVerbose.hpp" 34 #include "gc/g1/g1Log.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1RemSet.hpp" 37 #include "gc/g1/g1StringDedup.hpp" 38 #include "gc/g1/heapRegion.inline.hpp" 39 #include "gc/g1/heapRegionManager.inline.hpp" 40 #include "gc/g1/heapRegionRemSet.hpp" 41 #include "gc/g1/heapRegionSet.inline.hpp" 42 #include "gc/shared/gcTimer.hpp" 43 #include "gc/shared/gcTrace.hpp" 44 #include "gc/shared/gcTraceTime.hpp" 45 #include "gc/shared/genOopClosures.inline.hpp" 46 #include "gc/shared/referencePolicy.hpp" 47 #include "gc/shared/strongRootsScope.hpp" 48 #include "gc/shared/taskqueue.inline.hpp" 49 #include "gc/shared/vmGCOperations.hpp" 50 #include "memory/allocation.hpp" 51 #include "memory/resourceArea.hpp" 52 #include "oops/oop.inline.hpp" 53 #include "runtime/atomic.inline.hpp" 54 #include "runtime/handles.inline.hpp" 55 #include "runtime/java.hpp" 56 #include "runtime/prefetch.inline.hpp" 57 #include "services/memTracker.hpp" 58 59 // Concurrent marking bit map wrapper 60 61 CMBitMapRO::CMBitMapRO(int shifter) : 62 _bm(), 63 _shifter(shifter) { 64 _bmStartWord = 0; 65 _bmWordSize = 0; 66 } 67 68 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 69 const HeapWord* limit) const { 70 // First we must round addr *up* to a possible object boundary. 71 addr = (HeapWord*)align_size_up((intptr_t)addr, 72 HeapWordSize << _shifter); 73 size_t addrOffset = heapWordToOffset(addr); 74 if (limit == NULL) { 75 limit = _bmStartWord + _bmWordSize; 76 } 77 size_t limitOffset = heapWordToOffset(limit); 78 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 79 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 80 assert(nextAddr >= addr, "get_next_one postcondition"); 81 assert(nextAddr == limit || isMarked(nextAddr), 82 "get_next_one postcondition"); 83 return nextAddr; 84 } 85 86 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr, 87 const HeapWord* limit) const { 88 size_t addrOffset = heapWordToOffset(addr); 89 if (limit == NULL) { 90 limit = _bmStartWord + _bmWordSize; 91 } 92 size_t limitOffset = heapWordToOffset(limit); 93 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 94 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 95 assert(nextAddr >= addr, "get_next_one postcondition"); 96 assert(nextAddr == limit || !isMarked(nextAddr), 97 "get_next_one postcondition"); 98 return nextAddr; 99 } 100 101 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 102 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 103 return (int) (diff >> _shifter); 104 } 105 106 #ifndef PRODUCT 107 bool CMBitMapRO::covers(MemRegion heap_rs) const { 108 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 109 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 110 "size inconsistency"); 111 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 112 _bmWordSize == heap_rs.word_size(); 113 } 114 #endif 115 116 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 117 _bm.print_on_error(st, prefix); 118 } 119 120 size_t CMBitMap::compute_size(size_t heap_size) { 121 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 122 } 123 124 size_t CMBitMap::mark_distance() { 125 return MinObjAlignmentInBytes * BitsPerByte; 126 } 127 128 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 129 _bmStartWord = heap.start(); 130 _bmWordSize = heap.word_size(); 131 132 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 133 _bm.set_size(_bmWordSize >> _shifter); 134 135 storage->set_mapping_changed_listener(&_listener); 136 } 137 138 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 139 if (zero_filled) { 140 return; 141 } 142 // We need to clear the bitmap on commit, removing any existing information. 143 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 144 _bm->clearRange(mr); 145 } 146 147 // Closure used for clearing the given mark bitmap. 148 class ClearBitmapHRClosure : public HeapRegionClosure { 149 private: 150 ConcurrentMark* _cm; 151 CMBitMap* _bitmap; 152 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 153 public: 154 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 155 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 156 } 157 158 virtual bool doHeapRegion(HeapRegion* r) { 159 size_t const chunk_size_in_words = M / HeapWordSize; 160 161 HeapWord* cur = r->bottom(); 162 HeapWord* const end = r->end(); 163 164 while (cur < end) { 165 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 166 _bitmap->clearRange(mr); 167 168 cur += chunk_size_in_words; 169 170 // Abort iteration if after yielding the marking has been aborted. 171 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 172 return true; 173 } 174 // Repeat the asserts from before the start of the closure. We will do them 175 // as asserts here to minimize their overhead on the product. However, we 176 // will have them as guarantees at the beginning / end of the bitmap 177 // clearing to get some checking in the product. 178 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 179 assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant"); 180 } 181 182 return false; 183 } 184 }; 185 186 class ParClearNextMarkBitmapTask : public AbstractGangTask { 187 ClearBitmapHRClosure* _cl; 188 HeapRegionClaimer _hrclaimer; 189 bool _suspendible; // If the task is suspendible, workers must join the STS. 190 191 public: 192 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 193 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 194 195 void work(uint worker_id) { 196 SuspendibleThreadSetJoiner sts_join(_suspendible); 197 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 198 } 199 }; 200 201 void CMBitMap::clearAll() { 202 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 203 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 204 uint n_workers = g1h->workers()->active_workers(); 205 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 206 g1h->workers()->run_task(&task); 207 guarantee(cl.complete(), "Must have completed iteration."); 208 return; 209 } 210 211 void CMBitMap::markRange(MemRegion mr) { 212 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 213 assert(!mr.is_empty(), "unexpected empty region"); 214 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 215 ((HeapWord *) mr.end())), 216 "markRange memory region end is not card aligned"); 217 // convert address range into offset range 218 _bm.at_put_range(heapWordToOffset(mr.start()), 219 heapWordToOffset(mr.end()), true); 220 } 221 222 void CMBitMap::clearRange(MemRegion mr) { 223 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 224 assert(!mr.is_empty(), "unexpected empty region"); 225 // convert address range into offset range 226 _bm.at_put_range(heapWordToOffset(mr.start()), 227 heapWordToOffset(mr.end()), false); 228 } 229 230 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 231 HeapWord* end_addr) { 232 HeapWord* start = getNextMarkedWordAddress(addr); 233 start = MIN2(start, end_addr); 234 HeapWord* end = getNextUnmarkedWordAddress(start); 235 end = MIN2(end, end_addr); 236 assert(start <= end, "Consistency check"); 237 MemRegion mr(start, end); 238 if (!mr.is_empty()) { 239 clearRange(mr); 240 } 241 return mr; 242 } 243 244 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 245 _base(NULL), _cm(cm) 246 #ifdef ASSERT 247 , _drain_in_progress(false) 248 , _drain_in_progress_yields(false) 249 #endif 250 {} 251 252 bool CMMarkStack::allocate(size_t capacity) { 253 // allocate a stack of the requisite depth 254 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 255 if (!rs.is_reserved()) { 256 warning("ConcurrentMark MarkStack allocation failure"); 257 return false; 258 } 259 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 260 if (!_virtual_space.initialize(rs, rs.size())) { 261 warning("ConcurrentMark MarkStack backing store failure"); 262 // Release the virtual memory reserved for the marking stack 263 rs.release(); 264 return false; 265 } 266 assert(_virtual_space.committed_size() == rs.size(), 267 "Didn't reserve backing store for all of ConcurrentMark stack?"); 268 _base = (oop*) _virtual_space.low(); 269 setEmpty(); 270 _capacity = (jint) capacity; 271 _saved_index = -1; 272 _should_expand = false; 273 return true; 274 } 275 276 void CMMarkStack::expand() { 277 // Called, during remark, if we've overflown the marking stack during marking. 278 assert(isEmpty(), "stack should been emptied while handling overflow"); 279 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 280 // Clear expansion flag 281 _should_expand = false; 282 if (_capacity == (jint) MarkStackSizeMax) { 283 if (PrintGCDetails && Verbose) { 284 gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); 285 } 286 return; 287 } 288 // Double capacity if possible 289 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 290 // Do not give up existing stack until we have managed to 291 // get the double capacity that we desired. 292 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 293 sizeof(oop))); 294 if (rs.is_reserved()) { 295 // Release the backing store associated with old stack 296 _virtual_space.release(); 297 // Reinitialize virtual space for new stack 298 if (!_virtual_space.initialize(rs, rs.size())) { 299 fatal("Not enough swap for expanded marking stack capacity"); 300 } 301 _base = (oop*)(_virtual_space.low()); 302 _index = 0; 303 _capacity = new_capacity; 304 } else { 305 if (PrintGCDetails && Verbose) { 306 // Failed to double capacity, continue; 307 gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " 308 SIZE_FORMAT"K to " SIZE_FORMAT"K", 309 _capacity / K, new_capacity / K); 310 } 311 } 312 } 313 314 void CMMarkStack::set_should_expand() { 315 // If we're resetting the marking state because of an 316 // marking stack overflow, record that we should, if 317 // possible, expand the stack. 318 _should_expand = _cm->has_overflown(); 319 } 320 321 CMMarkStack::~CMMarkStack() { 322 if (_base != NULL) { 323 _base = NULL; 324 _virtual_space.release(); 325 } 326 } 327 328 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 329 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 330 jint start = _index; 331 jint next_index = start + n; 332 if (next_index > _capacity) { 333 _overflow = true; 334 return; 335 } 336 // Otherwise. 337 _index = next_index; 338 for (int i = 0; i < n; i++) { 339 int ind = start + i; 340 assert(ind < _capacity, "By overflow test above."); 341 _base[ind] = ptr_arr[i]; 342 } 343 } 344 345 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 346 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 347 jint index = _index; 348 if (index == 0) { 349 *n = 0; 350 return false; 351 } else { 352 int k = MIN2(max, index); 353 jint new_ind = index - k; 354 for (int j = 0; j < k; j++) { 355 ptr_arr[j] = _base[new_ind + j]; 356 } 357 _index = new_ind; 358 *n = k; 359 return true; 360 } 361 } 362 363 template<class OopClosureClass> 364 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 365 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 366 || SafepointSynchronize::is_at_safepoint(), 367 "Drain recursion must be yield-safe."); 368 bool res = true; 369 debug_only(_drain_in_progress = true); 370 debug_only(_drain_in_progress_yields = yield_after); 371 while (!isEmpty()) { 372 oop newOop = pop(); 373 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 374 assert(newOop->is_oop(), "Expected an oop"); 375 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 376 "only grey objects on this stack"); 377 newOop->oop_iterate(cl); 378 if (yield_after && _cm->do_yield_check()) { 379 res = false; 380 break; 381 } 382 } 383 debug_only(_drain_in_progress = false); 384 return res; 385 } 386 387 void CMMarkStack::note_start_of_gc() { 388 assert(_saved_index == -1, 389 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 390 _saved_index = _index; 391 } 392 393 void CMMarkStack::note_end_of_gc() { 394 // This is intentionally a guarantee, instead of an assert. If we 395 // accidentally add something to the mark stack during GC, it 396 // will be a correctness issue so it's better if we crash. we'll 397 // only check this once per GC anyway, so it won't be a performance 398 // issue in any way. 399 guarantee(_saved_index == _index, 400 err_msg("saved index: %d index: %d", _saved_index, _index)); 401 _saved_index = -1; 402 } 403 404 void CMMarkStack::oops_do(OopClosure* f) { 405 assert(_saved_index == _index, 406 err_msg("saved index: %d index: %d", _saved_index, _index)); 407 for (int i = 0; i < _index; i += 1) { 408 f->do_oop(&_base[i]); 409 } 410 } 411 412 CMRootRegions::CMRootRegions() : 413 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 414 _should_abort(false), _next_survivor(NULL) { } 415 416 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 417 _young_list = g1h->young_list(); 418 _cm = cm; 419 } 420 421 void CMRootRegions::prepare_for_scan() { 422 assert(!scan_in_progress(), "pre-condition"); 423 424 // Currently, only survivors can be root regions. 425 assert(_next_survivor == NULL, "pre-condition"); 426 _next_survivor = _young_list->first_survivor_region(); 427 _scan_in_progress = (_next_survivor != NULL); 428 _should_abort = false; 429 } 430 431 HeapRegion* CMRootRegions::claim_next() { 432 if (_should_abort) { 433 // If someone has set the should_abort flag, we return NULL to 434 // force the caller to bail out of their loop. 435 return NULL; 436 } 437 438 // Currently, only survivors can be root regions. 439 HeapRegion* res = _next_survivor; 440 if (res != NULL) { 441 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 442 // Read it again in case it changed while we were waiting for the lock. 443 res = _next_survivor; 444 if (res != NULL) { 445 if (res == _young_list->last_survivor_region()) { 446 // We just claimed the last survivor so store NULL to indicate 447 // that we're done. 448 _next_survivor = NULL; 449 } else { 450 _next_survivor = res->get_next_young_region(); 451 } 452 } else { 453 // Someone else claimed the last survivor while we were trying 454 // to take the lock so nothing else to do. 455 } 456 } 457 assert(res == NULL || res->is_survivor(), "post-condition"); 458 459 return res; 460 } 461 462 void CMRootRegions::scan_finished() { 463 assert(scan_in_progress(), "pre-condition"); 464 465 // Currently, only survivors can be root regions. 466 if (!_should_abort) { 467 assert(_next_survivor == NULL, "we should have claimed all survivors"); 468 } 469 _next_survivor = NULL; 470 471 { 472 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 473 _scan_in_progress = false; 474 RootRegionScan_lock->notify_all(); 475 } 476 } 477 478 bool CMRootRegions::wait_until_scan_finished() { 479 if (!scan_in_progress()) return false; 480 481 { 482 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 483 while (scan_in_progress()) { 484 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 485 } 486 } 487 return true; 488 } 489 490 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 491 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 492 #endif // _MSC_VER 493 494 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 495 return MAX2((n_par_threads + 2) / 4, 1U); 496 } 497 498 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 499 _g1h(g1h), 500 _markBitMap1(), 501 _markBitMap2(), 502 _parallel_marking_threads(0), 503 _max_parallel_marking_threads(0), 504 _sleep_factor(0.0), 505 _marking_task_overhead(1.0), 506 _cleanup_sleep_factor(0.0), 507 _cleanup_task_overhead(1.0), 508 _cleanup_list("Cleanup List"), 509 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 510 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 511 CardTableModRefBS::card_shift, 512 false /* in_resource_area*/), 513 514 _prevMarkBitMap(&_markBitMap1), 515 _nextMarkBitMap(&_markBitMap2), 516 517 _markStack(this), 518 // _finger set in set_non_marking_state 519 520 _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), 521 // _active_tasks set in set_non_marking_state 522 // _tasks set inside the constructor 523 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 524 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 525 526 _has_overflown(false), 527 _concurrent(false), 528 _has_aborted(false), 529 _aborted_gc_id(GCId::undefined()), 530 _restart_for_overflow(false), 531 _concurrent_marking_in_progress(false), 532 533 // _verbose_level set below 534 535 _init_times(), 536 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 537 _cleanup_times(), 538 _total_counting_time(0.0), 539 _total_rs_scrub_time(0.0), 540 541 _parallel_workers(NULL), 542 543 _count_card_bitmaps(NULL), 544 _count_marked_bytes(NULL), 545 _completed_initialization(false) { 546 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 547 if (verbose_level < no_verbose) { 548 verbose_level = no_verbose; 549 } 550 if (verbose_level > high_verbose) { 551 verbose_level = high_verbose; 552 } 553 _verbose_level = verbose_level; 554 555 if (verbose_low()) { 556 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 557 "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); 558 } 559 560 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 561 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 562 563 // Create & start a ConcurrentMark thread. 564 _cmThread = new ConcurrentMarkThread(this); 565 assert(cmThread() != NULL, "CM Thread should have been created"); 566 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 567 if (_cmThread->osthread() == NULL) { 568 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 569 } 570 571 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 572 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 573 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 574 575 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 576 satb_qs.set_buffer_size(G1SATBBufferSize); 577 578 _root_regions.init(_g1h, this); 579 580 if (ConcGCThreads > ParallelGCThreads) { 581 warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") " 582 "than ParallelGCThreads (" UINTX_FORMAT ").", 583 ConcGCThreads, ParallelGCThreads); 584 return; 585 } 586 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 587 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 588 // if both are set 589 _sleep_factor = 0.0; 590 _marking_task_overhead = 1.0; 591 } else if (G1MarkingOverheadPercent > 0) { 592 // We will calculate the number of parallel marking threads based 593 // on a target overhead with respect to the soft real-time goal 594 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 595 double overall_cm_overhead = 596 (double) MaxGCPauseMillis * marking_overhead / 597 (double) GCPauseIntervalMillis; 598 double cpu_ratio = 1.0 / (double) os::processor_count(); 599 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 600 double marking_task_overhead = 601 overall_cm_overhead / marking_thread_num * 602 (double) os::processor_count(); 603 double sleep_factor = 604 (1.0 - marking_task_overhead) / marking_task_overhead; 605 606 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num); 607 _sleep_factor = sleep_factor; 608 _marking_task_overhead = marking_task_overhead; 609 } else { 610 // Calculate the number of parallel marking threads by scaling 611 // the number of parallel GC threads. 612 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads); 613 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num); 614 _sleep_factor = 0.0; 615 _marking_task_overhead = 1.0; 616 } 617 618 assert(ConcGCThreads > 0, "Should have been set"); 619 _parallel_marking_threads = (uint) ConcGCThreads; 620 _max_parallel_marking_threads = _parallel_marking_threads; 621 622 if (parallel_marking_threads() > 1) { 623 _cleanup_task_overhead = 1.0; 624 } else { 625 _cleanup_task_overhead = marking_task_overhead(); 626 } 627 _cleanup_sleep_factor = 628 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 629 630 #if 0 631 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 632 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 633 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 634 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 635 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 636 #endif 637 638 _parallel_workers = new FlexibleWorkGang("G1 Marker", 639 _max_parallel_marking_threads, false, true); 640 if (_parallel_workers == NULL) { 641 vm_exit_during_initialization("Failed necessary allocation."); 642 } else { 643 _parallel_workers->initialize_workers(); 644 } 645 646 if (FLAG_IS_DEFAULT(MarkStackSize)) { 647 size_t mark_stack_size = 648 MIN2(MarkStackSizeMax, 649 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 650 // Verify that the calculated value for MarkStackSize is in range. 651 // It would be nice to use the private utility routine from Arguments. 652 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 653 warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 654 "must be between 1 and " SIZE_FORMAT, 655 mark_stack_size, MarkStackSizeMax); 656 return; 657 } 658 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 659 } else { 660 // Verify MarkStackSize is in range. 661 if (FLAG_IS_CMDLINE(MarkStackSize)) { 662 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 663 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 664 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 665 "must be between 1 and " SIZE_FORMAT, 666 MarkStackSize, MarkStackSizeMax); 667 return; 668 } 669 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 670 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 671 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 672 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 673 MarkStackSize, MarkStackSizeMax); 674 return; 675 } 676 } 677 } 678 } 679 680 if (!_markStack.allocate(MarkStackSize)) { 681 warning("Failed to allocate CM marking stack"); 682 return; 683 } 684 685 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 686 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 687 688 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 689 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 690 691 BitMap::idx_t card_bm_size = _card_bm.size(); 692 693 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 694 _active_tasks = _max_worker_id; 695 696 uint max_regions = _g1h->max_regions(); 697 for (uint i = 0; i < _max_worker_id; ++i) { 698 CMTaskQueue* task_queue = new CMTaskQueue(); 699 task_queue->initialize(); 700 _task_queues->register_queue(i, task_queue); 701 702 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 703 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 704 705 _tasks[i] = new CMTask(i, this, 706 _count_marked_bytes[i], 707 &_count_card_bitmaps[i], 708 task_queue, _task_queues); 709 710 _accum_task_vtime[i] = 0.0; 711 } 712 713 // Calculate the card number for the bottom of the heap. Used 714 // in biasing indexes into the accounting card bitmaps. 715 _heap_bottom_card_num = 716 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 717 CardTableModRefBS::card_shift); 718 719 // Clear all the liveness counting data 720 clear_all_count_data(); 721 722 // so that the call below can read a sensible value 723 _heap_start = g1h->reserved_region().start(); 724 set_non_marking_state(); 725 _completed_initialization = true; 726 } 727 728 void ConcurrentMark::reset() { 729 // Starting values for these two. This should be called in a STW 730 // phase. 731 MemRegion reserved = _g1h->g1_reserved(); 732 _heap_start = reserved.start(); 733 _heap_end = reserved.end(); 734 735 // Separated the asserts so that we know which one fires. 736 assert(_heap_start != NULL, "heap bounds should look ok"); 737 assert(_heap_end != NULL, "heap bounds should look ok"); 738 assert(_heap_start < _heap_end, "heap bounds should look ok"); 739 740 // Reset all the marking data structures and any necessary flags 741 reset_marking_state(); 742 743 if (verbose_low()) { 744 gclog_or_tty->print_cr("[global] resetting"); 745 } 746 747 // We do reset all of them, since different phases will use 748 // different number of active threads. So, it's easiest to have all 749 // of them ready. 750 for (uint i = 0; i < _max_worker_id; ++i) { 751 _tasks[i]->reset(_nextMarkBitMap); 752 } 753 754 // we need this to make sure that the flag is on during the evac 755 // pause with initial mark piggy-backed 756 set_concurrent_marking_in_progress(); 757 } 758 759 760 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 761 _markStack.set_should_expand(); 762 _markStack.setEmpty(); // Also clears the _markStack overflow flag 763 if (clear_overflow) { 764 clear_has_overflown(); 765 } else { 766 assert(has_overflown(), "pre-condition"); 767 } 768 _finger = _heap_start; 769 770 for (uint i = 0; i < _max_worker_id; ++i) { 771 CMTaskQueue* queue = _task_queues->queue(i); 772 queue->set_empty(); 773 } 774 } 775 776 void ConcurrentMark::set_concurrency(uint active_tasks) { 777 assert(active_tasks <= _max_worker_id, "we should not have more"); 778 779 _active_tasks = active_tasks; 780 // Need to update the three data structures below according to the 781 // number of active threads for this phase. 782 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 783 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 784 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 785 } 786 787 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 788 set_concurrency(active_tasks); 789 790 _concurrent = concurrent; 791 // We propagate this to all tasks, not just the active ones. 792 for (uint i = 0; i < _max_worker_id; ++i) 793 _tasks[i]->set_concurrent(concurrent); 794 795 if (concurrent) { 796 set_concurrent_marking_in_progress(); 797 } else { 798 // We currently assume that the concurrent flag has been set to 799 // false before we start remark. At this point we should also be 800 // in a STW phase. 801 assert(!concurrent_marking_in_progress(), "invariant"); 802 assert(out_of_regions(), 803 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 804 p2i(_finger), p2i(_heap_end))); 805 } 806 } 807 808 void ConcurrentMark::set_non_marking_state() { 809 // We set the global marking state to some default values when we're 810 // not doing marking. 811 reset_marking_state(); 812 _active_tasks = 0; 813 clear_concurrent_marking_in_progress(); 814 } 815 816 ConcurrentMark::~ConcurrentMark() { 817 // The ConcurrentMark instance is never freed. 818 ShouldNotReachHere(); 819 } 820 821 void ConcurrentMark::clearNextBitmap() { 822 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 823 824 // Make sure that the concurrent mark thread looks to still be in 825 // the current cycle. 826 guarantee(cmThread()->during_cycle(), "invariant"); 827 828 // We are finishing up the current cycle by clearing the next 829 // marking bitmap and getting it ready for the next cycle. During 830 // this time no other cycle can start. So, let's make sure that this 831 // is the case. 832 guarantee(!g1h->mark_in_progress(), "invariant"); 833 834 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 835 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 836 _parallel_workers->run_task(&task); 837 838 // Clear the liveness counting data. If the marking has been aborted, the abort() 839 // call already did that. 840 if (cl.complete()) { 841 clear_all_count_data(); 842 } 843 844 // Repeat the asserts from above. 845 guarantee(cmThread()->during_cycle(), "invariant"); 846 guarantee(!g1h->mark_in_progress(), "invariant"); 847 } 848 849 class CheckBitmapClearHRClosure : public HeapRegionClosure { 850 CMBitMap* _bitmap; 851 bool _error; 852 public: 853 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 854 } 855 856 virtual bool doHeapRegion(HeapRegion* r) { 857 // This closure can be called concurrently to the mutator, so we must make sure 858 // that the result of the getNextMarkedWordAddress() call is compared to the 859 // value passed to it as limit to detect any found bits. 860 // We can use the region's orig_end() for the limit and the comparison value 861 // as it always contains the "real" end of the region that never changes and 862 // has no side effects. 863 // Due to the latter, there can also be no problem with the compiler generating 864 // reloads of the orig_end() call. 865 HeapWord* end = r->orig_end(); 866 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 867 } 868 }; 869 870 bool ConcurrentMark::nextMarkBitmapIsClear() { 871 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 872 _g1h->heap_region_iterate(&cl); 873 return cl.complete(); 874 } 875 876 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 877 public: 878 bool doHeapRegion(HeapRegion* r) { 879 if (!r->is_continues_humongous()) { 880 r->note_start_of_marking(); 881 } 882 return false; 883 } 884 }; 885 886 void ConcurrentMark::checkpointRootsInitialPre() { 887 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 888 G1CollectorPolicy* g1p = g1h->g1_policy(); 889 890 _has_aborted = false; 891 892 // Initialize marking structures. This has to be done in a STW phase. 893 reset(); 894 895 // For each region note start of marking. 896 NoteStartOfMarkHRClosure startcl; 897 g1h->heap_region_iterate(&startcl); 898 } 899 900 901 void ConcurrentMark::checkpointRootsInitialPost() { 902 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 903 904 // If we force an overflow during remark, the remark operation will 905 // actually abort and we'll restart concurrent marking. If we always 906 // force an overflow during remark we'll never actually complete the 907 // marking phase. So, we initialize this here, at the start of the 908 // cycle, so that at the remaining overflow number will decrease at 909 // every remark and we'll eventually not need to cause one. 910 force_overflow_stw()->init(); 911 912 // Start Concurrent Marking weak-reference discovery. 913 ReferenceProcessor* rp = g1h->ref_processor_cm(); 914 // enable ("weak") refs discovery 915 rp->enable_discovery(); 916 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 917 918 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 919 // This is the start of the marking cycle, we're expected all 920 // threads to have SATB queues with active set to false. 921 satb_mq_set.set_active_all_threads(true, /* new active value */ 922 false /* expected_active */); 923 924 _root_regions.prepare_for_scan(); 925 926 // update_g1_committed() will be called at the end of an evac pause 927 // when marking is on. So, it's also called at the end of the 928 // initial-mark pause to update the heap end, if the heap expands 929 // during it. No need to call it here. 930 } 931 932 /* 933 * Notice that in the next two methods, we actually leave the STS 934 * during the barrier sync and join it immediately afterwards. If we 935 * do not do this, the following deadlock can occur: one thread could 936 * be in the barrier sync code, waiting for the other thread to also 937 * sync up, whereas another one could be trying to yield, while also 938 * waiting for the other threads to sync up too. 939 * 940 * Note, however, that this code is also used during remark and in 941 * this case we should not attempt to leave / enter the STS, otherwise 942 * we'll either hit an assert (debug / fastdebug) or deadlock 943 * (product). So we should only leave / enter the STS if we are 944 * operating concurrently. 945 * 946 * Because the thread that does the sync barrier has left the STS, it 947 * is possible to be suspended for a Full GC or an evacuation pause 948 * could occur. This is actually safe, since the entering the sync 949 * barrier is one of the last things do_marking_step() does, and it 950 * doesn't manipulate any data structures afterwards. 951 */ 952 953 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 954 bool barrier_aborted; 955 956 if (verbose_low()) { 957 gclog_or_tty->print_cr("[%u] entering first barrier", worker_id); 958 } 959 960 { 961 SuspendibleThreadSetLeaver sts_leave(concurrent()); 962 barrier_aborted = !_first_overflow_barrier_sync.enter(); 963 } 964 965 // at this point everyone should have synced up and not be doing any 966 // more work 967 968 if (verbose_low()) { 969 if (barrier_aborted) { 970 gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 971 } else { 972 gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 973 } 974 } 975 976 if (barrier_aborted) { 977 // If the barrier aborted we ignore the overflow condition and 978 // just abort the whole marking phase as quickly as possible. 979 return; 980 } 981 982 // If we're executing the concurrent phase of marking, reset the marking 983 // state; otherwise the marking state is reset after reference processing, 984 // during the remark pause. 985 // If we reset here as a result of an overflow during the remark we will 986 // see assertion failures from any subsequent set_concurrency_and_phase() 987 // calls. 988 if (concurrent()) { 989 // let the task associated with with worker 0 do this 990 if (worker_id == 0) { 991 // task 0 is responsible for clearing the global data structures 992 // We should be here because of an overflow. During STW we should 993 // not clear the overflow flag since we rely on it being true when 994 // we exit this method to abort the pause and restart concurrent 995 // marking. 996 reset_marking_state(true /* clear_overflow */); 997 force_overflow()->update(); 998 999 if (G1Log::fine()) { 1000 gclog_or_tty->gclog_stamp(concurrent_gc_id()); 1001 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 1002 } 1003 } 1004 } 1005 1006 // after this, each task should reset its own data structures then 1007 // then go into the second barrier 1008 } 1009 1010 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 1011 bool barrier_aborted; 1012 1013 if (verbose_low()) { 1014 gclog_or_tty->print_cr("[%u] entering second barrier", worker_id); 1015 } 1016 1017 { 1018 SuspendibleThreadSetLeaver sts_leave(concurrent()); 1019 barrier_aborted = !_second_overflow_barrier_sync.enter(); 1020 } 1021 1022 // at this point everything should be re-initialized and ready to go 1023 1024 if (verbose_low()) { 1025 if (barrier_aborted) { 1026 gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 1027 } else { 1028 gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 1029 } 1030 } 1031 } 1032 1033 #ifndef PRODUCT 1034 void ForceOverflowSettings::init() { 1035 _num_remaining = G1ConcMarkForceOverflow; 1036 _force = false; 1037 update(); 1038 } 1039 1040 void ForceOverflowSettings::update() { 1041 if (_num_remaining > 0) { 1042 _num_remaining -= 1; 1043 _force = true; 1044 } else { 1045 _force = false; 1046 } 1047 } 1048 1049 bool ForceOverflowSettings::should_force() { 1050 if (_force) { 1051 _force = false; 1052 return true; 1053 } else { 1054 return false; 1055 } 1056 } 1057 #endif // !PRODUCT 1058 1059 class CMConcurrentMarkingTask: public AbstractGangTask { 1060 private: 1061 ConcurrentMark* _cm; 1062 ConcurrentMarkThread* _cmt; 1063 1064 public: 1065 void work(uint worker_id) { 1066 assert(Thread::current()->is_ConcurrentGC_thread(), 1067 "this should only be done by a conc GC thread"); 1068 ResourceMark rm; 1069 1070 double start_vtime = os::elapsedVTime(); 1071 1072 { 1073 SuspendibleThreadSetJoiner sts_join; 1074 1075 assert(worker_id < _cm->active_tasks(), "invariant"); 1076 CMTask* the_task = _cm->task(worker_id); 1077 the_task->record_start_time(); 1078 if (!_cm->has_aborted()) { 1079 do { 1080 double start_vtime_sec = os::elapsedVTime(); 1081 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1082 1083 the_task->do_marking_step(mark_step_duration_ms, 1084 true /* do_termination */, 1085 false /* is_serial*/); 1086 1087 double end_vtime_sec = os::elapsedVTime(); 1088 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1089 _cm->clear_has_overflown(); 1090 1091 _cm->do_yield_check(worker_id); 1092 1093 jlong sleep_time_ms; 1094 if (!_cm->has_aborted() && the_task->has_aborted()) { 1095 sleep_time_ms = 1096 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1097 { 1098 SuspendibleThreadSetLeaver sts_leave; 1099 os::sleep(Thread::current(), sleep_time_ms, false); 1100 } 1101 } 1102 } while (!_cm->has_aborted() && the_task->has_aborted()); 1103 } 1104 the_task->record_end_time(); 1105 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1106 } 1107 1108 double end_vtime = os::elapsedVTime(); 1109 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 1110 } 1111 1112 CMConcurrentMarkingTask(ConcurrentMark* cm, 1113 ConcurrentMarkThread* cmt) : 1114 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1115 1116 ~CMConcurrentMarkingTask() { } 1117 }; 1118 1119 // Calculates the number of active workers for a concurrent 1120 // phase. 1121 uint ConcurrentMark::calc_parallel_marking_threads() { 1122 uint n_conc_workers = 0; 1123 if (!UseDynamicNumberOfGCThreads || 1124 (!FLAG_IS_DEFAULT(ConcGCThreads) && 1125 !ForceDynamicNumberOfGCThreads)) { 1126 n_conc_workers = max_parallel_marking_threads(); 1127 } else { 1128 n_conc_workers = 1129 AdaptiveSizePolicy::calc_default_active_workers( 1130 max_parallel_marking_threads(), 1131 1, /* Minimum workers */ 1132 parallel_marking_threads(), 1133 Threads::number_of_non_daemon_threads()); 1134 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 1135 // that scaling has already gone into "_max_parallel_marking_threads". 1136 } 1137 assert(n_conc_workers > 0, "Always need at least 1"); 1138 return n_conc_workers; 1139 } 1140 1141 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 1142 // Currently, only survivors can be root regions. 1143 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 1144 G1RootRegionScanClosure cl(_g1h, this, worker_id); 1145 1146 const uintx interval = PrefetchScanIntervalInBytes; 1147 HeapWord* curr = hr->bottom(); 1148 const HeapWord* end = hr->top(); 1149 while (curr < end) { 1150 Prefetch::read(curr, interval); 1151 oop obj = oop(curr); 1152 int size = obj->oop_iterate(&cl); 1153 assert(size == obj->size(), "sanity"); 1154 curr += size; 1155 } 1156 } 1157 1158 class CMRootRegionScanTask : public AbstractGangTask { 1159 private: 1160 ConcurrentMark* _cm; 1161 1162 public: 1163 CMRootRegionScanTask(ConcurrentMark* cm) : 1164 AbstractGangTask("Root Region Scan"), _cm(cm) { } 1165 1166 void work(uint worker_id) { 1167 assert(Thread::current()->is_ConcurrentGC_thread(), 1168 "this should only be done by a conc GC thread"); 1169 1170 CMRootRegions* root_regions = _cm->root_regions(); 1171 HeapRegion* hr = root_regions->claim_next(); 1172 while (hr != NULL) { 1173 _cm->scanRootRegion(hr, worker_id); 1174 hr = root_regions->claim_next(); 1175 } 1176 } 1177 }; 1178 1179 void ConcurrentMark::scanRootRegions() { 1180 // Start of concurrent marking. 1181 ClassLoaderDataGraph::clear_claimed_marks(); 1182 1183 // scan_in_progress() will have been set to true only if there was 1184 // at least one root region to scan. So, if it's false, we 1185 // should not attempt to do any further work. 1186 if (root_regions()->scan_in_progress()) { 1187 _parallel_marking_threads = calc_parallel_marking_threads(); 1188 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1189 "Maximum number of marking threads exceeded"); 1190 uint active_workers = MAX2(1U, parallel_marking_threads()); 1191 1192 CMRootRegionScanTask task(this); 1193 _parallel_workers->set_active_workers(active_workers); 1194 _parallel_workers->run_task(&task); 1195 1196 // It's possible that has_aborted() is true here without actually 1197 // aborting the survivor scan earlier. This is OK as it's 1198 // mainly used for sanity checking. 1199 root_regions()->scan_finished(); 1200 } 1201 } 1202 1203 void ConcurrentMark::markFromRoots() { 1204 // we might be tempted to assert that: 1205 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1206 // "inconsistent argument?"); 1207 // However that wouldn't be right, because it's possible that 1208 // a safepoint is indeed in progress as a younger generation 1209 // stop-the-world GC happens even as we mark in this generation. 1210 1211 _restart_for_overflow = false; 1212 force_overflow_conc()->init(); 1213 1214 // _g1h has _n_par_threads 1215 _parallel_marking_threads = calc_parallel_marking_threads(); 1216 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1217 "Maximum number of marking threads exceeded"); 1218 1219 uint active_workers = MAX2(1U, parallel_marking_threads()); 1220 1221 // Parallel task terminator is set in "set_concurrency_and_phase()" 1222 set_concurrency_and_phase(active_workers, true /* concurrent */); 1223 1224 CMConcurrentMarkingTask markingTask(this, cmThread()); 1225 _parallel_workers->set_active_workers(active_workers); 1226 // Don't set _n_par_threads because it affects MT in process_roots() 1227 // and the decisions on that MT processing is made elsewhere. 1228 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1229 _parallel_workers->run_task(&markingTask); 1230 print_stats(); 1231 } 1232 1233 // Helper class to get rid of some boilerplate code. 1234 class G1CMTraceTime : public GCTraceTime { 1235 static bool doit_and_prepend(bool doit) { 1236 if (doit) { 1237 gclog_or_tty->put(' '); 1238 } 1239 return doit; 1240 } 1241 1242 public: 1243 G1CMTraceTime(const char* title, bool doit) 1244 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), 1245 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) { 1246 } 1247 }; 1248 1249 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1250 // world is stopped at this checkpoint 1251 assert(SafepointSynchronize::is_at_safepoint(), 1252 "world should be stopped"); 1253 1254 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1255 1256 // If a full collection has happened, we shouldn't do this. 1257 if (has_aborted()) { 1258 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1259 return; 1260 } 1261 1262 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1263 1264 if (VerifyDuringGC) { 1265 HandleMark hm; // handle scope 1266 g1h->prepare_for_verify(); 1267 Universe::verify(VerifyOption_G1UsePrevMarking, 1268 " VerifyDuringGC:(before)"); 1269 } 1270 g1h->check_bitmaps("Remark Start"); 1271 1272 G1CollectorPolicy* g1p = g1h->g1_policy(); 1273 g1p->record_concurrent_mark_remark_start(); 1274 1275 double start = os::elapsedTime(); 1276 1277 checkpointRootsFinalWork(); 1278 1279 double mark_work_end = os::elapsedTime(); 1280 1281 weakRefsWork(clear_all_soft_refs); 1282 1283 if (has_overflown()) { 1284 // Oops. We overflowed. Restart concurrent marking. 1285 _restart_for_overflow = true; 1286 if (G1TraceMarkStackOverflow) { 1287 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1288 } 1289 1290 // Verify the heap w.r.t. the previous marking bitmap. 1291 if (VerifyDuringGC) { 1292 HandleMark hm; // handle scope 1293 g1h->prepare_for_verify(); 1294 Universe::verify(VerifyOption_G1UsePrevMarking, 1295 " VerifyDuringGC:(overflow)"); 1296 } 1297 1298 // Clear the marking state because we will be restarting 1299 // marking due to overflowing the global mark stack. 1300 reset_marking_state(); 1301 } else { 1302 { 1303 G1CMTraceTime trace("GC aggregate-data", G1Log::finer()); 1304 1305 // Aggregate the per-task counting data that we have accumulated 1306 // while marking. 1307 aggregate_count_data(); 1308 } 1309 1310 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1311 // We're done with marking. 1312 // This is the end of the marking cycle, we're expected all 1313 // threads to have SATB queues with active set to true. 1314 satb_mq_set.set_active_all_threads(false, /* new active value */ 1315 true /* expected_active */); 1316 1317 if (VerifyDuringGC) { 1318 HandleMark hm; // handle scope 1319 g1h->prepare_for_verify(); 1320 Universe::verify(VerifyOption_G1UseNextMarking, 1321 " VerifyDuringGC:(after)"); 1322 } 1323 g1h->check_bitmaps("Remark End"); 1324 assert(!restart_for_overflow(), "sanity"); 1325 // Completely reset the marking state since marking completed 1326 set_non_marking_state(); 1327 } 1328 1329 // Expand the marking stack, if we have to and if we can. 1330 if (_markStack.should_expand()) { 1331 _markStack.expand(); 1332 } 1333 1334 // Statistics 1335 double now = os::elapsedTime(); 1336 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1337 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1338 _remark_times.add((now - start) * 1000.0); 1339 1340 g1p->record_concurrent_mark_remark_end(); 1341 1342 G1CMIsAliveClosure is_alive(g1h); 1343 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1344 } 1345 1346 // Base class of the closures that finalize and verify the 1347 // liveness counting data. 1348 class CMCountDataClosureBase: public HeapRegionClosure { 1349 protected: 1350 G1CollectedHeap* _g1h; 1351 ConcurrentMark* _cm; 1352 CardTableModRefBS* _ct_bs; 1353 1354 BitMap* _region_bm; 1355 BitMap* _card_bm; 1356 1357 // Takes a region that's not empty (i.e., it has at least one 1358 // live object in it and sets its corresponding bit on the region 1359 // bitmap to 1. If the region is "starts humongous" it will also set 1360 // to 1 the bits on the region bitmap that correspond to its 1361 // associated "continues humongous" regions. 1362 void set_bit_for_region(HeapRegion* hr) { 1363 assert(!hr->is_continues_humongous(), "should have filtered those out"); 1364 1365 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1366 if (!hr->is_starts_humongous()) { 1367 // Normal (non-humongous) case: just set the bit. 1368 _region_bm->par_at_put(index, true); 1369 } else { 1370 // Starts humongous case: calculate how many regions are part of 1371 // this humongous region and then set the bit range. 1372 BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index(); 1373 _region_bm->par_at_put_range(index, end_index, true); 1374 } 1375 } 1376 1377 public: 1378 CMCountDataClosureBase(G1CollectedHeap* g1h, 1379 BitMap* region_bm, BitMap* card_bm): 1380 _g1h(g1h), _cm(g1h->concurrent_mark()), 1381 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 1382 _region_bm(region_bm), _card_bm(card_bm) { } 1383 }; 1384 1385 // Closure that calculates the # live objects per region. Used 1386 // for verification purposes during the cleanup pause. 1387 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1388 CMBitMapRO* _bm; 1389 size_t _region_marked_bytes; 1390 1391 public: 1392 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1393 BitMap* region_bm, BitMap* card_bm) : 1394 CMCountDataClosureBase(g1h, region_bm, card_bm), 1395 _bm(bm), _region_marked_bytes(0) { } 1396 1397 bool doHeapRegion(HeapRegion* hr) { 1398 1399 if (hr->is_continues_humongous()) { 1400 // We will ignore these here and process them when their 1401 // associated "starts humongous" region is processed (see 1402 // set_bit_for_heap_region()). Note that we cannot rely on their 1403 // associated "starts humongous" region to have their bit set to 1404 // 1 since, due to the region chunking in the parallel region 1405 // iteration, a "continues humongous" region might be visited 1406 // before its associated "starts humongous". 1407 return false; 1408 } 1409 1410 HeapWord* ntams = hr->next_top_at_mark_start(); 1411 HeapWord* start = hr->bottom(); 1412 1413 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1414 err_msg("Preconditions not met - " 1415 "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT, 1416 p2i(start), p2i(ntams), p2i(hr->end()))); 1417 1418 // Find the first marked object at or after "start". 1419 start = _bm->getNextMarkedWordAddress(start, ntams); 1420 1421 size_t marked_bytes = 0; 1422 1423 while (start < ntams) { 1424 oop obj = oop(start); 1425 int obj_sz = obj->size(); 1426 HeapWord* obj_end = start + obj_sz; 1427 1428 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1429 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1430 1431 // Note: if we're looking at the last region in heap - obj_end 1432 // could be actually just beyond the end of the heap; end_idx 1433 // will then correspond to a (non-existent) card that is also 1434 // just beyond the heap. 1435 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1436 // end of object is not card aligned - increment to cover 1437 // all the cards spanned by the object 1438 end_idx += 1; 1439 } 1440 1441 // Set the bits in the card BM for the cards spanned by this object. 1442 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1443 1444 // Add the size of this object to the number of marked bytes. 1445 marked_bytes += (size_t)obj_sz * HeapWordSize; 1446 1447 // Find the next marked object after this one. 1448 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1449 } 1450 1451 // Mark the allocated-since-marking portion... 1452 HeapWord* top = hr->top(); 1453 if (ntams < top) { 1454 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1455 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1456 1457 // Note: if we're looking at the last region in heap - top 1458 // could be actually just beyond the end of the heap; end_idx 1459 // will then correspond to a (non-existent) card that is also 1460 // just beyond the heap. 1461 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1462 // end of object is not card aligned - increment to cover 1463 // all the cards spanned by the object 1464 end_idx += 1; 1465 } 1466 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1467 1468 // This definitely means the region has live objects. 1469 set_bit_for_region(hr); 1470 } 1471 1472 // Update the live region bitmap. 1473 if (marked_bytes > 0) { 1474 set_bit_for_region(hr); 1475 } 1476 1477 // Set the marked bytes for the current region so that 1478 // it can be queried by a calling verification routine 1479 _region_marked_bytes = marked_bytes; 1480 1481 return false; 1482 } 1483 1484 size_t region_marked_bytes() const { return _region_marked_bytes; } 1485 }; 1486 1487 // Heap region closure used for verifying the counting data 1488 // that was accumulated concurrently and aggregated during 1489 // the remark pause. This closure is applied to the heap 1490 // regions during the STW cleanup pause. 1491 1492 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1493 G1CollectedHeap* _g1h; 1494 ConcurrentMark* _cm; 1495 CalcLiveObjectsClosure _calc_cl; 1496 BitMap* _region_bm; // Region BM to be verified 1497 BitMap* _card_bm; // Card BM to be verified 1498 bool _verbose; // verbose output? 1499 1500 BitMap* _exp_region_bm; // Expected Region BM values 1501 BitMap* _exp_card_bm; // Expected card BM values 1502 1503 int _failures; 1504 1505 public: 1506 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1507 BitMap* region_bm, 1508 BitMap* card_bm, 1509 BitMap* exp_region_bm, 1510 BitMap* exp_card_bm, 1511 bool verbose) : 1512 _g1h(g1h), _cm(g1h->concurrent_mark()), 1513 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1514 _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose), 1515 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1516 _failures(0) { } 1517 1518 int failures() const { return _failures; } 1519 1520 bool doHeapRegion(HeapRegion* hr) { 1521 if (hr->is_continues_humongous()) { 1522 // We will ignore these here and process them when their 1523 // associated "starts humongous" region is processed (see 1524 // set_bit_for_heap_region()). Note that we cannot rely on their 1525 // associated "starts humongous" region to have their bit set to 1526 // 1 since, due to the region chunking in the parallel region 1527 // iteration, a "continues humongous" region might be visited 1528 // before its associated "starts humongous". 1529 return false; 1530 } 1531 1532 int failures = 0; 1533 1534 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1535 // this region and set the corresponding bits in the expected region 1536 // and card bitmaps. 1537 bool res = _calc_cl.doHeapRegion(hr); 1538 assert(res == false, "should be continuing"); 1539 1540 MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL), 1541 Mutex::_no_safepoint_check_flag); 1542 1543 // Verify the marked bytes for this region. 1544 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1545 size_t act_marked_bytes = hr->next_marked_bytes(); 1546 1547 // We're not OK if expected marked bytes > actual marked bytes. It means 1548 // we have missed accounting some objects during the actual marking. 1549 if (exp_marked_bytes > act_marked_bytes) { 1550 if (_verbose) { 1551 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 1552 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 1553 hr->hrm_index(), exp_marked_bytes, act_marked_bytes); 1554 } 1555 failures += 1; 1556 } 1557 1558 // Verify the bit, for this region, in the actual and expected 1559 // (which was just calculated) region bit maps. 1560 // We're not OK if the bit in the calculated expected region 1561 // bitmap is set and the bit in the actual region bitmap is not. 1562 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1563 1564 bool expected = _exp_region_bm->at(index); 1565 bool actual = _region_bm->at(index); 1566 if (expected && !actual) { 1567 if (_verbose) { 1568 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 1569 "expected: %s, actual: %s", 1570 hr->hrm_index(), 1571 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1572 } 1573 failures += 1; 1574 } 1575 1576 // Verify that the card bit maps for the cards spanned by the current 1577 // region match. We have an error if we have a set bit in the expected 1578 // bit map and the corresponding bit in the actual bitmap is not set. 1579 1580 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1581 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1582 1583 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1584 expected = _exp_card_bm->at(i); 1585 actual = _card_bm->at(i); 1586 1587 if (expected && !actual) { 1588 if (_verbose) { 1589 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 1590 "expected: %s, actual: %s", 1591 hr->hrm_index(), i, 1592 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 1593 } 1594 failures += 1; 1595 } 1596 } 1597 1598 if (failures > 0 && _verbose) { 1599 gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", " 1600 "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT, 1601 HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()), 1602 _calc_cl.region_marked_bytes(), hr->next_marked_bytes()); 1603 } 1604 1605 _failures += failures; 1606 1607 // We could stop iteration over the heap when we 1608 // find the first violating region by returning true. 1609 return false; 1610 } 1611 }; 1612 1613 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1614 protected: 1615 G1CollectedHeap* _g1h; 1616 ConcurrentMark* _cm; 1617 BitMap* _actual_region_bm; 1618 BitMap* _actual_card_bm; 1619 1620 uint _n_workers; 1621 1622 BitMap* _expected_region_bm; 1623 BitMap* _expected_card_bm; 1624 1625 int _failures; 1626 bool _verbose; 1627 1628 HeapRegionClaimer _hrclaimer; 1629 1630 public: 1631 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1632 BitMap* region_bm, BitMap* card_bm, 1633 BitMap* expected_region_bm, BitMap* expected_card_bm) 1634 : AbstractGangTask("G1 verify final counting"), 1635 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1636 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1637 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1638 _failures(0), _verbose(false), 1639 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1640 assert(VerifyDuringGC, "don't call this otherwise"); 1641 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1642 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1643 1644 _verbose = _cm->verbose_medium(); 1645 } 1646 1647 void work(uint worker_id) { 1648 assert(worker_id < _n_workers, "invariant"); 1649 1650 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1651 _actual_region_bm, _actual_card_bm, 1652 _expected_region_bm, 1653 _expected_card_bm, 1654 _verbose); 1655 1656 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1657 1658 Atomic::add(verify_cl.failures(), &_failures); 1659 } 1660 1661 int failures() const { return _failures; } 1662 }; 1663 1664 // Closure that finalizes the liveness counting data. 1665 // Used during the cleanup pause. 1666 // Sets the bits corresponding to the interval [NTAMS, top] 1667 // (which contains the implicitly live objects) in the 1668 // card liveness bitmap. Also sets the bit for each region, 1669 // containing live data, in the region liveness bitmap. 1670 1671 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1672 public: 1673 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1674 BitMap* region_bm, 1675 BitMap* card_bm) : 1676 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1677 1678 bool doHeapRegion(HeapRegion* hr) { 1679 1680 if (hr->is_continues_humongous()) { 1681 // We will ignore these here and process them when their 1682 // associated "starts humongous" region is processed (see 1683 // set_bit_for_heap_region()). Note that we cannot rely on their 1684 // associated "starts humongous" region to have their bit set to 1685 // 1 since, due to the region chunking in the parallel region 1686 // iteration, a "continues humongous" region might be visited 1687 // before its associated "starts humongous". 1688 return false; 1689 } 1690 1691 HeapWord* ntams = hr->next_top_at_mark_start(); 1692 HeapWord* top = hr->top(); 1693 1694 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1695 1696 // Mark the allocated-since-marking portion... 1697 if (ntams < top) { 1698 // This definitely means the region has live objects. 1699 set_bit_for_region(hr); 1700 1701 // Now set the bits in the card bitmap for [ntams, top) 1702 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1703 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1704 1705 // Note: if we're looking at the last region in heap - top 1706 // could be actually just beyond the end of the heap; end_idx 1707 // will then correspond to a (non-existent) card that is also 1708 // just beyond the heap. 1709 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1710 // end of object is not card aligned - increment to cover 1711 // all the cards spanned by the object 1712 end_idx += 1; 1713 } 1714 1715 assert(end_idx <= _card_bm->size(), 1716 err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1717 end_idx, _card_bm->size())); 1718 assert(start_idx < _card_bm->size(), 1719 err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT, 1720 start_idx, _card_bm->size())); 1721 1722 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1723 } 1724 1725 // Set the bit for the region if it contains live data 1726 if (hr->next_marked_bytes() > 0) { 1727 set_bit_for_region(hr); 1728 } 1729 1730 return false; 1731 } 1732 }; 1733 1734 class G1ParFinalCountTask: public AbstractGangTask { 1735 protected: 1736 G1CollectedHeap* _g1h; 1737 ConcurrentMark* _cm; 1738 BitMap* _actual_region_bm; 1739 BitMap* _actual_card_bm; 1740 1741 uint _n_workers; 1742 HeapRegionClaimer _hrclaimer; 1743 1744 public: 1745 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1746 : AbstractGangTask("G1 final counting"), 1747 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1748 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1749 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1750 } 1751 1752 void work(uint worker_id) { 1753 assert(worker_id < _n_workers, "invariant"); 1754 1755 FinalCountDataUpdateClosure final_update_cl(_g1h, 1756 _actual_region_bm, 1757 _actual_card_bm); 1758 1759 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1760 } 1761 }; 1762 1763 class G1ParNoteEndTask; 1764 1765 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1766 G1CollectedHeap* _g1; 1767 size_t _max_live_bytes; 1768 uint _regions_claimed; 1769 size_t _freed_bytes; 1770 FreeRegionList* _local_cleanup_list; 1771 HeapRegionSetCount _old_regions_removed; 1772 HeapRegionSetCount _humongous_regions_removed; 1773 HRRSCleanupTask* _hrrs_cleanup_task; 1774 double _claimed_region_time; 1775 double _max_region_time; 1776 1777 public: 1778 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1779 FreeRegionList* local_cleanup_list, 1780 HRRSCleanupTask* hrrs_cleanup_task) : 1781 _g1(g1), 1782 _max_live_bytes(0), _regions_claimed(0), 1783 _freed_bytes(0), 1784 _claimed_region_time(0.0), _max_region_time(0.0), 1785 _local_cleanup_list(local_cleanup_list), 1786 _old_regions_removed(), 1787 _humongous_regions_removed(), 1788 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1789 1790 size_t freed_bytes() { return _freed_bytes; } 1791 const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } 1792 const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } 1793 1794 bool doHeapRegion(HeapRegion *hr) { 1795 if (hr->is_continues_humongous()) { 1796 return false; 1797 } 1798 // We use a claim value of zero here because all regions 1799 // were claimed with value 1 in the FinalCount task. 1800 _g1->reset_gc_time_stamps(hr); 1801 double start = os::elapsedTime(); 1802 _regions_claimed++; 1803 hr->note_end_of_marking(); 1804 _max_live_bytes += hr->max_live_bytes(); 1805 1806 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1807 _freed_bytes += hr->used(); 1808 hr->set_containing_set(NULL); 1809 if (hr->is_humongous()) { 1810 assert(hr->is_starts_humongous(), "we should only see starts humongous"); 1811 _humongous_regions_removed.increment(1u, hr->capacity()); 1812 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1813 } else { 1814 _old_regions_removed.increment(1u, hr->capacity()); 1815 _g1->free_region(hr, _local_cleanup_list, true); 1816 } 1817 } else { 1818 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1819 } 1820 1821 double region_time = (os::elapsedTime() - start); 1822 _claimed_region_time += region_time; 1823 if (region_time > _max_region_time) { 1824 _max_region_time = region_time; 1825 } 1826 return false; 1827 } 1828 1829 size_t max_live_bytes() { return _max_live_bytes; } 1830 uint regions_claimed() { return _regions_claimed; } 1831 double claimed_region_time_sec() { return _claimed_region_time; } 1832 double max_region_time_sec() { return _max_region_time; } 1833 }; 1834 1835 class G1ParNoteEndTask: public AbstractGangTask { 1836 friend class G1NoteEndOfConcMarkClosure; 1837 1838 protected: 1839 G1CollectedHeap* _g1h; 1840 size_t _max_live_bytes; 1841 size_t _freed_bytes; 1842 FreeRegionList* _cleanup_list; 1843 HeapRegionClaimer _hrclaimer; 1844 1845 public: 1846 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1847 AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1848 } 1849 1850 void work(uint worker_id) { 1851 FreeRegionList local_cleanup_list("Local Cleanup List"); 1852 HRRSCleanupTask hrrs_cleanup_task; 1853 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1854 &hrrs_cleanup_task); 1855 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1856 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1857 1858 // Now update the lists 1859 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1860 { 1861 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1862 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1863 _max_live_bytes += g1_note_end.max_live_bytes(); 1864 _freed_bytes += g1_note_end.freed_bytes(); 1865 1866 // If we iterate over the global cleanup list at the end of 1867 // cleanup to do this printing we will not guarantee to only 1868 // generate output for the newly-reclaimed regions (the list 1869 // might not be empty at the beginning of cleanup; we might 1870 // still be working on its previous contents). So we do the 1871 // printing here, before we append the new regions to the global 1872 // cleanup list. 1873 1874 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1875 if (hr_printer->is_active()) { 1876 FreeRegionListIterator iter(&local_cleanup_list); 1877 while (iter.more_available()) { 1878 HeapRegion* hr = iter.get_next(); 1879 hr_printer->cleanup(hr); 1880 } 1881 } 1882 1883 _cleanup_list->add_ordered(&local_cleanup_list); 1884 assert(local_cleanup_list.is_empty(), "post-condition"); 1885 1886 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1887 } 1888 } 1889 size_t max_live_bytes() { return _max_live_bytes; } 1890 size_t freed_bytes() { return _freed_bytes; } 1891 }; 1892 1893 class G1ParScrubRemSetTask: public AbstractGangTask { 1894 protected: 1895 G1RemSet* _g1rs; 1896 BitMap* _region_bm; 1897 BitMap* _card_bm; 1898 HeapRegionClaimer _hrclaimer; 1899 1900 public: 1901 G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) : 1902 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) { 1903 } 1904 1905 void work(uint worker_id) { 1906 _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer); 1907 } 1908 1909 }; 1910 1911 void ConcurrentMark::cleanup() { 1912 // world is stopped at this checkpoint 1913 assert(SafepointSynchronize::is_at_safepoint(), 1914 "world should be stopped"); 1915 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1916 1917 // If a full collection has happened, we shouldn't do this. 1918 if (has_aborted()) { 1919 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1920 return; 1921 } 1922 1923 g1h->verify_region_sets_optional(); 1924 1925 if (VerifyDuringGC) { 1926 HandleMark hm; // handle scope 1927 g1h->prepare_for_verify(); 1928 Universe::verify(VerifyOption_G1UsePrevMarking, 1929 " VerifyDuringGC:(before)"); 1930 } 1931 g1h->check_bitmaps("Cleanup Start"); 1932 1933 G1CollectorPolicy* g1p = g1h->g1_policy(); 1934 g1p->record_concurrent_mark_cleanup_start(); 1935 1936 double start = os::elapsedTime(); 1937 1938 HeapRegionRemSet::reset_for_cleanup_tasks(); 1939 1940 uint n_workers; 1941 1942 // Do counting once more with the world stopped for good measure. 1943 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 1944 1945 g1h->set_par_threads(); 1946 n_workers = g1h->n_par_threads(); 1947 assert(g1h->n_par_threads() == n_workers, 1948 "Should not have been reset"); 1949 g1h->workers()->run_task(&g1_par_count_task); 1950 // Done with the parallel phase so reset to 0. 1951 g1h->set_par_threads(0); 1952 1953 if (VerifyDuringGC) { 1954 // Verify that the counting data accumulated during marking matches 1955 // that calculated by walking the marking bitmap. 1956 1957 // Bitmaps to hold expected values 1958 BitMap expected_region_bm(_region_bm.size(), true); 1959 BitMap expected_card_bm(_card_bm.size(), true); 1960 1961 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 1962 &_region_bm, 1963 &_card_bm, 1964 &expected_region_bm, 1965 &expected_card_bm); 1966 1967 g1h->set_par_threads((int)n_workers); 1968 g1h->workers()->run_task(&g1_par_verify_task); 1969 // Done with the parallel phase so reset to 0. 1970 g1h->set_par_threads(0); 1971 1972 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 1973 } 1974 1975 size_t start_used_bytes = g1h->used(); 1976 g1h->set_marking_complete(); 1977 1978 double count_end = os::elapsedTime(); 1979 double this_final_counting_time = (count_end - start); 1980 _total_counting_time += this_final_counting_time; 1981 1982 if (G1PrintRegionLivenessInfo) { 1983 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 1984 _g1h->heap_region_iterate(&cl); 1985 } 1986 1987 // Install newly created mark bitMap as "prev". 1988 swapMarkBitMaps(); 1989 1990 g1h->reset_gc_time_stamp(); 1991 1992 // Note end of marking in all heap regions. 1993 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1994 g1h->set_par_threads((int)n_workers); 1995 g1h->workers()->run_task(&g1_par_note_end_task); 1996 g1h->set_par_threads(0); 1997 g1h->check_gc_time_stamps(); 1998 1999 if (!cleanup_list_is_empty()) { 2000 // The cleanup list is not empty, so we'll have to process it 2001 // concurrently. Notify anyone else that might be wanting free 2002 // regions that there will be more free regions coming soon. 2003 g1h->set_free_regions_coming(); 2004 } 2005 2006 // call below, since it affects the metric by which we sort the heap 2007 // regions. 2008 if (G1ScrubRemSets) { 2009 double rs_scrub_start = os::elapsedTime(); 2010 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers); 2011 g1h->set_par_threads((int)n_workers); 2012 g1h->workers()->run_task(&g1_par_scrub_rs_task); 2013 g1h->set_par_threads(0); 2014 2015 double rs_scrub_end = os::elapsedTime(); 2016 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 2017 _total_rs_scrub_time += this_rs_scrub_time; 2018 } 2019 2020 // this will also free any regions totally full of garbage objects, 2021 // and sort the regions. 2022 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); 2023 2024 // Statistics. 2025 double end = os::elapsedTime(); 2026 _cleanup_times.add((end - start) * 1000.0); 2027 2028 if (G1Log::fine()) { 2029 g1h->g1_policy()->print_heap_transition(start_used_bytes); 2030 } 2031 2032 // Clean up will have freed any regions completely full of garbage. 2033 // Update the soft reference policy with the new heap occupancy. 2034 Universe::update_heap_info_at_gc(); 2035 2036 if (VerifyDuringGC) { 2037 HandleMark hm; // handle scope 2038 g1h->prepare_for_verify(); 2039 Universe::verify(VerifyOption_G1UsePrevMarking, 2040 " VerifyDuringGC:(after)"); 2041 } 2042 2043 g1h->check_bitmaps("Cleanup End"); 2044 2045 g1h->verify_region_sets_optional(); 2046 2047 // We need to make this be a "collection" so any collection pause that 2048 // races with it goes around and waits for completeCleanup to finish. 2049 g1h->increment_total_collections(); 2050 2051 // Clean out dead classes and update Metaspace sizes. 2052 if (ClassUnloadingWithConcurrentMark) { 2053 ClassLoaderDataGraph::purge(); 2054 } 2055 MetaspaceGC::compute_new_size(); 2056 2057 // We reclaimed old regions so we should calculate the sizes to make 2058 // sure we update the old gen/space data. 2059 g1h->g1mm()->update_sizes(); 2060 g1h->allocation_context_stats().update_after_mark(); 2061 2062 g1h->trace_heap_after_concurrent_cycle(); 2063 } 2064 2065 void ConcurrentMark::completeCleanup() { 2066 if (has_aborted()) return; 2067 2068 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2069 2070 _cleanup_list.verify_optional(); 2071 FreeRegionList tmp_free_list("Tmp Free List"); 2072 2073 if (G1ConcRegionFreeingVerbose) { 2074 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2075 "cleanup list has %u entries", 2076 _cleanup_list.length()); 2077 } 2078 2079 // No one else should be accessing the _cleanup_list at this point, 2080 // so it is not necessary to take any locks 2081 while (!_cleanup_list.is_empty()) { 2082 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 2083 assert(hr != NULL, "Got NULL from a non-empty list"); 2084 hr->par_clear(); 2085 tmp_free_list.add_ordered(hr); 2086 2087 // Instead of adding one region at a time to the secondary_free_list, 2088 // we accumulate them in the local list and move them a few at a 2089 // time. This also cuts down on the number of notify_all() calls 2090 // we do during this process. We'll also append the local list when 2091 // _cleanup_list is empty (which means we just removed the last 2092 // region from the _cleanup_list). 2093 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 2094 _cleanup_list.is_empty()) { 2095 if (G1ConcRegionFreeingVerbose) { 2096 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 2097 "appending %u entries to the secondary_free_list, " 2098 "cleanup list still has %u entries", 2099 tmp_free_list.length(), 2100 _cleanup_list.length()); 2101 } 2102 2103 { 2104 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2105 g1h->secondary_free_list_add(&tmp_free_list); 2106 SecondaryFreeList_lock->notify_all(); 2107 } 2108 #ifndef PRODUCT 2109 if (G1StressConcRegionFreeing) { 2110 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 2111 os::sleep(Thread::current(), (jlong) 1, false); 2112 } 2113 } 2114 #endif 2115 } 2116 } 2117 assert(tmp_free_list.is_empty(), "post-condition"); 2118 } 2119 2120 // Supporting Object and Oop closures for reference discovery 2121 // and processing in during marking 2122 2123 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2124 HeapWord* addr = (HeapWord*)obj; 2125 return addr != NULL && 2126 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2127 } 2128 2129 // 'Keep Alive' oop closure used by both serial parallel reference processing. 2130 // Uses the CMTask associated with a worker thread (for serial reference 2131 // processing the CMTask for worker 0 is used) to preserve (mark) and 2132 // trace referent objects. 2133 // 2134 // Using the CMTask and embedded local queues avoids having the worker 2135 // threads operating on the global mark stack. This reduces the risk 2136 // of overflowing the stack - which we would rather avoid at this late 2137 // state. Also using the tasks' local queues removes the potential 2138 // of the workers interfering with each other that could occur if 2139 // operating on the global stack. 2140 2141 class G1CMKeepAliveAndDrainClosure: public OopClosure { 2142 ConcurrentMark* _cm; 2143 CMTask* _task; 2144 int _ref_counter_limit; 2145 int _ref_counter; 2146 bool _is_serial; 2147 public: 2148 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2149 _cm(cm), _task(task), _is_serial(is_serial), 2150 _ref_counter_limit(G1RefProcDrainInterval) { 2151 assert(_ref_counter_limit > 0, "sanity"); 2152 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2153 _ref_counter = _ref_counter_limit; 2154 } 2155 2156 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2157 virtual void do_oop( oop* p) { do_oop_work(p); } 2158 2159 template <class T> void do_oop_work(T* p) { 2160 if (!_cm->has_overflown()) { 2161 oop obj = oopDesc::load_decode_heap_oop(p); 2162 if (_cm->verbose_high()) { 2163 gclog_or_tty->print_cr("\t[%u] we're looking at location " 2164 "*"PTR_FORMAT" = "PTR_FORMAT, 2165 _task->worker_id(), p2i(p), p2i((void*) obj)); 2166 } 2167 2168 _task->deal_with_reference(obj); 2169 _ref_counter--; 2170 2171 if (_ref_counter == 0) { 2172 // We have dealt with _ref_counter_limit references, pushing them 2173 // and objects reachable from them on to the local stack (and 2174 // possibly the global stack). Call CMTask::do_marking_step() to 2175 // process these entries. 2176 // 2177 // We call CMTask::do_marking_step() in a loop, which we'll exit if 2178 // there's nothing more to do (i.e. we're done with the entries that 2179 // were pushed as a result of the CMTask::deal_with_reference() calls 2180 // above) or we overflow. 2181 // 2182 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2183 // flag while there may still be some work to do. (See the comment at 2184 // the beginning of CMTask::do_marking_step() for those conditions - 2185 // one of which is reaching the specified time target.) It is only 2186 // when CMTask::do_marking_step() returns without setting the 2187 // has_aborted() flag that the marking step has completed. 2188 do { 2189 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2190 _task->do_marking_step(mark_step_duration_ms, 2191 false /* do_termination */, 2192 _is_serial); 2193 } while (_task->has_aborted() && !_cm->has_overflown()); 2194 _ref_counter = _ref_counter_limit; 2195 } 2196 } else { 2197 if (_cm->verbose_high()) { 2198 gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id()); 2199 } 2200 } 2201 } 2202 }; 2203 2204 // 'Drain' oop closure used by both serial and parallel reference processing. 2205 // Uses the CMTask associated with a given worker thread (for serial 2206 // reference processing the CMtask for worker 0 is used). Calls the 2207 // do_marking_step routine, with an unbelievably large timeout value, 2208 // to drain the marking data structures of the remaining entries 2209 // added by the 'keep alive' oop closure above. 2210 2211 class G1CMDrainMarkingStackClosure: public VoidClosure { 2212 ConcurrentMark* _cm; 2213 CMTask* _task; 2214 bool _is_serial; 2215 public: 2216 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 2217 _cm(cm), _task(task), _is_serial(is_serial) { 2218 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 2219 } 2220 2221 void do_void() { 2222 do { 2223 if (_cm->verbose_high()) { 2224 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", 2225 _task->worker_id(), BOOL_TO_STR(_is_serial)); 2226 } 2227 2228 // We call CMTask::do_marking_step() to completely drain the local 2229 // and global marking stacks of entries pushed by the 'keep alive' 2230 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2231 // 2232 // CMTask::do_marking_step() is called in a loop, which we'll exit 2233 // if there's nothing more to do (i.e. we've completely drained the 2234 // entries that were pushed as a a result of applying the 'keep alive' 2235 // closure to the entries on the discovered ref lists) or we overflow 2236 // the global marking stack. 2237 // 2238 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2239 // flag while there may still be some work to do. (See the comment at 2240 // the beginning of CMTask::do_marking_step() for those conditions - 2241 // one of which is reaching the specified time target.) It is only 2242 // when CMTask::do_marking_step() returns without setting the 2243 // has_aborted() flag that the marking step has completed. 2244 2245 _task->do_marking_step(1000000000.0 /* something very large */, 2246 true /* do_termination */, 2247 _is_serial); 2248 } while (_task->has_aborted() && !_cm->has_overflown()); 2249 } 2250 }; 2251 2252 // Implementation of AbstractRefProcTaskExecutor for parallel 2253 // reference processing at the end of G1 concurrent marking 2254 2255 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2256 private: 2257 G1CollectedHeap* _g1h; 2258 ConcurrentMark* _cm; 2259 WorkGang* _workers; 2260 uint _active_workers; 2261 2262 public: 2263 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2264 ConcurrentMark* cm, 2265 WorkGang* workers, 2266 uint n_workers) : 2267 _g1h(g1h), _cm(cm), 2268 _workers(workers), _active_workers(n_workers) { } 2269 2270 // Executes the given task using concurrent marking worker threads. 2271 virtual void execute(ProcessTask& task); 2272 virtual void execute(EnqueueTask& task); 2273 }; 2274 2275 class G1CMRefProcTaskProxy: public AbstractGangTask { 2276 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2277 ProcessTask& _proc_task; 2278 G1CollectedHeap* _g1h; 2279 ConcurrentMark* _cm; 2280 2281 public: 2282 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2283 G1CollectedHeap* g1h, 2284 ConcurrentMark* cm) : 2285 AbstractGangTask("Process reference objects in parallel"), 2286 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 2287 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 2288 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 2289 } 2290 2291 virtual void work(uint worker_id) { 2292 ResourceMark rm; 2293 HandleMark hm; 2294 CMTask* task = _cm->task(worker_id); 2295 G1CMIsAliveClosure g1_is_alive(_g1h); 2296 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 2297 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 2298 2299 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2300 } 2301 }; 2302 2303 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2304 assert(_workers != NULL, "Need parallel worker threads."); 2305 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2306 2307 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2308 2309 // We need to reset the concurrency level before each 2310 // proxy task execution, so that the termination protocol 2311 // and overflow handling in CMTask::do_marking_step() knows 2312 // how many workers to wait for. 2313 _cm->set_concurrency(_active_workers); 2314 _g1h->set_par_threads(_active_workers); 2315 _workers->run_task(&proc_task_proxy); 2316 _g1h->set_par_threads(0); 2317 } 2318 2319 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2320 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2321 EnqueueTask& _enq_task; 2322 2323 public: 2324 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2325 AbstractGangTask("Enqueue reference objects in parallel"), 2326 _enq_task(enq_task) { } 2327 2328 virtual void work(uint worker_id) { 2329 _enq_task.work(worker_id); 2330 } 2331 }; 2332 2333 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2334 assert(_workers != NULL, "Need parallel worker threads."); 2335 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2336 2337 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2338 2339 // Not strictly necessary but... 2340 // 2341 // We need to reset the concurrency level before each 2342 // proxy task execution, so that the termination protocol 2343 // and overflow handling in CMTask::do_marking_step() knows 2344 // how many workers to wait for. 2345 _cm->set_concurrency(_active_workers); 2346 _g1h->set_par_threads(_active_workers); 2347 _workers->run_task(&enq_task_proxy); 2348 _g1h->set_par_threads(0); 2349 } 2350 2351 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2352 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2353 } 2354 2355 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2356 if (has_overflown()) { 2357 // Skip processing the discovered references if we have 2358 // overflown the global marking stack. Reference objects 2359 // only get discovered once so it is OK to not 2360 // de-populate the discovered reference lists. We could have, 2361 // but the only benefit would be that, when marking restarts, 2362 // less reference objects are discovered. 2363 return; 2364 } 2365 2366 ResourceMark rm; 2367 HandleMark hm; 2368 2369 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2370 2371 // Is alive closure. 2372 G1CMIsAliveClosure g1_is_alive(g1h); 2373 2374 // Inner scope to exclude the cleaning of the string and symbol 2375 // tables from the displayed time. 2376 { 2377 G1CMTraceTime t("GC ref-proc", G1Log::finer()); 2378 2379 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2380 2381 // See the comment in G1CollectedHeap::ref_processing_init() 2382 // about how reference processing currently works in G1. 2383 2384 // Set the soft reference policy 2385 rp->setup_policy(clear_all_soft_refs); 2386 assert(_markStack.isEmpty(), "mark stack should be empty"); 2387 2388 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2389 // in serial reference processing. Note these closures are also 2390 // used for serially processing (by the the current thread) the 2391 // JNI references during parallel reference processing. 2392 // 2393 // These closures do not need to synchronize with the worker 2394 // threads involved in parallel reference processing as these 2395 // instances are executed serially by the current thread (e.g. 2396 // reference processing is not multi-threaded and is thus 2397 // performed by the current thread instead of a gang worker). 2398 // 2399 // The gang tasks involved in parallel reference processing create 2400 // their own instances of these closures, which do their own 2401 // synchronization among themselves. 2402 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2403 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2404 2405 // We need at least one active thread. If reference processing 2406 // is not multi-threaded we use the current (VMThread) thread, 2407 // otherwise we use the work gang from the G1CollectedHeap and 2408 // we utilize all the worker threads we can. 2409 bool processing_is_mt = rp->processing_is_mt(); 2410 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2411 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2412 2413 // Parallel processing task executor. 2414 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2415 g1h->workers(), active_workers); 2416 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2417 2418 // Set the concurrency level. The phase was already set prior to 2419 // executing the remark task. 2420 set_concurrency(active_workers); 2421 2422 // Set the degree of MT processing here. If the discovery was done MT, 2423 // the number of threads involved during discovery could differ from 2424 // the number of active workers. This is OK as long as the discovered 2425 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2426 rp->set_active_mt_degree(active_workers); 2427 2428 // Process the weak references. 2429 const ReferenceProcessorStats& stats = 2430 rp->process_discovered_references(&g1_is_alive, 2431 &g1_keep_alive, 2432 &g1_drain_mark_stack, 2433 executor, 2434 g1h->gc_timer_cm(), 2435 concurrent_gc_id()); 2436 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2437 2438 // The do_oop work routines of the keep_alive and drain_marking_stack 2439 // oop closures will set the has_overflown flag if we overflow the 2440 // global marking stack. 2441 2442 assert(_markStack.overflow() || _markStack.isEmpty(), 2443 "mark stack should be empty (unless it overflowed)"); 2444 2445 if (_markStack.overflow()) { 2446 // This should have been done already when we tried to push an 2447 // entry on to the global mark stack. But let's do it again. 2448 set_has_overflown(); 2449 } 2450 2451 assert(rp->num_q() == active_workers, "why not"); 2452 2453 rp->enqueue_discovered_references(executor); 2454 2455 rp->verify_no_references_recorded(); 2456 assert(!rp->discovery_enabled(), "Post condition"); 2457 } 2458 2459 if (has_overflown()) { 2460 // We can not trust g1_is_alive if the marking stack overflowed 2461 return; 2462 } 2463 2464 assert(_markStack.isEmpty(), "Marking should have completed"); 2465 2466 // Unload Klasses, String, Symbols, Code Cache, etc. 2467 { 2468 G1CMTraceTime trace("Unloading", G1Log::finer()); 2469 2470 if (ClassUnloadingWithConcurrentMark) { 2471 bool purged_classes; 2472 2473 { 2474 G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest()); 2475 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2476 } 2477 2478 { 2479 G1CMTraceTime trace("Parallel Unloading", G1Log::finest()); 2480 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2481 } 2482 } 2483 2484 if (G1StringDedup::is_enabled()) { 2485 G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest()); 2486 G1StringDedup::unlink(&g1_is_alive); 2487 } 2488 } 2489 } 2490 2491 void ConcurrentMark::swapMarkBitMaps() { 2492 CMBitMapRO* temp = _prevMarkBitMap; 2493 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2494 _nextMarkBitMap = (CMBitMap*) temp; 2495 } 2496 2497 // Closure for marking entries in SATB buffers. 2498 class CMSATBBufferClosure : public SATBBufferClosure { 2499 private: 2500 CMTask* _task; 2501 G1CollectedHeap* _g1h; 2502 2503 // This is very similar to CMTask::deal_with_reference, but with 2504 // more relaxed requirements for the argument, so this must be more 2505 // circumspect about treating the argument as an object. 2506 void do_entry(void* entry) const { 2507 _task->increment_refs_reached(); 2508 HeapRegion* hr = _g1h->heap_region_containing_raw(entry); 2509 if (entry < hr->next_top_at_mark_start()) { 2510 // Until we get here, we don't know whether entry refers to a valid 2511 // object; it could instead have been a stale reference. 2512 oop obj = static_cast<oop>(entry); 2513 assert(obj->is_oop(true /* ignore mark word */), 2514 err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj))); 2515 _task->make_reference_grey(obj, hr); 2516 } 2517 } 2518 2519 public: 2520 CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h) 2521 : _task(task), _g1h(g1h) { } 2522 2523 virtual void do_buffer(void** buffer, size_t size) { 2524 for (size_t i = 0; i < size; ++i) { 2525 do_entry(buffer[i]); 2526 } 2527 } 2528 }; 2529 2530 class G1RemarkThreadsClosure : public ThreadClosure { 2531 CMSATBBufferClosure _cm_satb_cl; 2532 G1CMOopClosure _cm_cl; 2533 MarkingCodeBlobClosure _code_cl; 2534 int _thread_parity; 2535 2536 public: 2537 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) : 2538 _cm_satb_cl(task, g1h), 2539 _cm_cl(g1h, g1h->concurrent_mark(), task), 2540 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2541 _thread_parity(Threads::thread_claim_parity()) {} 2542 2543 void do_thread(Thread* thread) { 2544 if (thread->is_Java_thread()) { 2545 if (thread->claim_oops_do(true, _thread_parity)) { 2546 JavaThread* jt = (JavaThread*)thread; 2547 2548 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2549 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2550 // * Alive if on the stack of an executing method 2551 // * Weakly reachable otherwise 2552 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2553 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2554 jt->nmethods_do(&_code_cl); 2555 2556 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 2557 } 2558 } else if (thread->is_VM_thread()) { 2559 if (thread->claim_oops_do(true, _thread_parity)) { 2560 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 2561 } 2562 } 2563 } 2564 }; 2565 2566 class CMRemarkTask: public AbstractGangTask { 2567 private: 2568 ConcurrentMark* _cm; 2569 public: 2570 void work(uint worker_id) { 2571 // Since all available tasks are actually started, we should 2572 // only proceed if we're supposed to be active. 2573 if (worker_id < _cm->active_tasks()) { 2574 CMTask* task = _cm->task(worker_id); 2575 task->record_start_time(); 2576 { 2577 ResourceMark rm; 2578 HandleMark hm; 2579 2580 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2581 Threads::threads_do(&threads_f); 2582 } 2583 2584 do { 2585 task->do_marking_step(1000000000.0 /* something very large */, 2586 true /* do_termination */, 2587 false /* is_serial */); 2588 } while (task->has_aborted() && !_cm->has_overflown()); 2589 // If we overflow, then we do not want to restart. We instead 2590 // want to abort remark and do concurrent marking again. 2591 task->record_end_time(); 2592 } 2593 } 2594 2595 CMRemarkTask(ConcurrentMark* cm, uint active_workers) : 2596 AbstractGangTask("Par Remark"), _cm(cm) { 2597 _cm->terminator()->reset_for_reuse(active_workers); 2598 } 2599 }; 2600 2601 void ConcurrentMark::checkpointRootsFinalWork() { 2602 ResourceMark rm; 2603 HandleMark hm; 2604 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2605 2606 G1CMTraceTime trace("Finalize Marking", G1Log::finer()); 2607 2608 g1h->ensure_parsability(false); 2609 2610 StrongRootsScope srs; 2611 // this is remark, so we'll use up all active threads 2612 uint active_workers = g1h->workers()->active_workers(); 2613 if (active_workers == 0) { 2614 assert(active_workers > 0, "Should have been set earlier"); 2615 active_workers = (uint) ParallelGCThreads; 2616 g1h->workers()->set_active_workers(active_workers); 2617 } 2618 set_concurrency_and_phase(active_workers, false /* concurrent */); 2619 // Leave _parallel_marking_threads at it's 2620 // value originally calculated in the ConcurrentMark 2621 // constructor and pass values of the active workers 2622 // through the gang in the task. 2623 2624 CMRemarkTask remarkTask(this, active_workers); 2625 // We will start all available threads, even if we decide that the 2626 // active_workers will be fewer. The extra ones will just bail out 2627 // immediately. 2628 g1h->set_par_threads(active_workers); 2629 g1h->workers()->run_task(&remarkTask); 2630 g1h->set_par_threads(0); 2631 2632 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2633 guarantee(has_overflown() || 2634 satb_mq_set.completed_buffers_num() == 0, 2635 err_msg("Invariant: has_overflown = %s, num buffers = %d", 2636 BOOL_TO_STR(has_overflown()), 2637 satb_mq_set.completed_buffers_num())); 2638 2639 print_stats(); 2640 } 2641 2642 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2643 // Note we are overriding the read-only view of the prev map here, via 2644 // the cast. 2645 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2646 } 2647 2648 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) { 2649 _nextMarkBitMap->clearRange(mr); 2650 } 2651 2652 HeapRegion* 2653 ConcurrentMark::claim_region(uint worker_id) { 2654 // "checkpoint" the finger 2655 HeapWord* finger = _finger; 2656 2657 // _heap_end will not change underneath our feet; it only changes at 2658 // yield points. 2659 while (finger < _heap_end) { 2660 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2661 2662 // Note on how this code handles humongous regions. In the 2663 // normal case the finger will reach the start of a "starts 2664 // humongous" (SH) region. Its end will either be the end of the 2665 // last "continues humongous" (CH) region in the sequence, or the 2666 // standard end of the SH region (if the SH is the only region in 2667 // the sequence). That way claim_region() will skip over the CH 2668 // regions. However, there is a subtle race between a CM thread 2669 // executing this method and a mutator thread doing a humongous 2670 // object allocation. The two are not mutually exclusive as the CM 2671 // thread does not need to hold the Heap_lock when it gets 2672 // here. So there is a chance that claim_region() will come across 2673 // a free region that's in the progress of becoming a SH or a CH 2674 // region. In the former case, it will either 2675 // a) Miss the update to the region's end, in which case it will 2676 // visit every subsequent CH region, will find their bitmaps 2677 // empty, and do nothing, or 2678 // b) Will observe the update of the region's end (in which case 2679 // it will skip the subsequent CH regions). 2680 // If it comes across a region that suddenly becomes CH, the 2681 // scenario will be similar to b). So, the race between 2682 // claim_region() and a humongous object allocation might force us 2683 // to do a bit of unnecessary work (due to some unnecessary bitmap 2684 // iterations) but it should not introduce and correctness issues. 2685 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2686 2687 // Above heap_region_containing_raw may return NULL as we always scan claim 2688 // until the end of the heap. In this case, just jump to the next region. 2689 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2690 2691 // Is the gap between reading the finger and doing the CAS too long? 2692 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2693 if (res == finger && curr_region != NULL) { 2694 // we succeeded 2695 HeapWord* bottom = curr_region->bottom(); 2696 HeapWord* limit = curr_region->next_top_at_mark_start(); 2697 2698 if (verbose_low()) { 2699 gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " 2700 "["PTR_FORMAT", "PTR_FORMAT"), " 2701 "limit = "PTR_FORMAT, 2702 worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); 2703 } 2704 2705 // notice that _finger == end cannot be guaranteed here since, 2706 // someone else might have moved the finger even further 2707 assert(_finger >= end, "the finger should have moved forward"); 2708 2709 if (verbose_low()) { 2710 gclog_or_tty->print_cr("[%u] we were successful with region = " 2711 PTR_FORMAT, worker_id, p2i(curr_region)); 2712 } 2713 2714 if (limit > bottom) { 2715 if (verbose_low()) { 2716 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, " 2717 "returning it ", worker_id, p2i(curr_region)); 2718 } 2719 return curr_region; 2720 } else { 2721 assert(limit == bottom, 2722 "the region limit should be at bottom"); 2723 if (verbose_low()) { 2724 gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, " 2725 "returning NULL", worker_id, p2i(curr_region)); 2726 } 2727 // we return NULL and the caller should try calling 2728 // claim_region() again. 2729 return NULL; 2730 } 2731 } else { 2732 assert(_finger > finger, "the finger should have moved forward"); 2733 if (verbose_low()) { 2734 if (curr_region == NULL) { 2735 gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " 2736 "global finger = "PTR_FORMAT", " 2737 "our finger = "PTR_FORMAT, 2738 worker_id, p2i(_finger), p2i(finger)); 2739 } else { 2740 gclog_or_tty->print_cr("[%u] somebody else moved the finger, " 2741 "global finger = "PTR_FORMAT", " 2742 "our finger = "PTR_FORMAT, 2743 worker_id, p2i(_finger), p2i(finger)); 2744 } 2745 } 2746 2747 // read it again 2748 finger = _finger; 2749 } 2750 } 2751 2752 return NULL; 2753 } 2754 2755 #ifndef PRODUCT 2756 enum VerifyNoCSetOopsPhase { 2757 VerifyNoCSetOopsStack, 2758 VerifyNoCSetOopsQueues 2759 }; 2760 2761 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure { 2762 private: 2763 G1CollectedHeap* _g1h; 2764 VerifyNoCSetOopsPhase _phase; 2765 int _info; 2766 2767 const char* phase_str() { 2768 switch (_phase) { 2769 case VerifyNoCSetOopsStack: return "Stack"; 2770 case VerifyNoCSetOopsQueues: return "Queue"; 2771 default: ShouldNotReachHere(); 2772 } 2773 return NULL; 2774 } 2775 2776 void do_object_work(oop obj) { 2777 guarantee(!_g1h->obj_in_cs(obj), 2778 err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d", 2779 p2i((void*) obj), phase_str(), _info)); 2780 } 2781 2782 public: 2783 VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { } 2784 2785 void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) { 2786 _phase = phase; 2787 _info = info; 2788 } 2789 2790 virtual void do_oop(oop* p) { 2791 oop obj = oopDesc::load_decode_heap_oop(p); 2792 do_object_work(obj); 2793 } 2794 2795 virtual void do_oop(narrowOop* p) { 2796 // We should not come across narrow oops while scanning marking 2797 // stacks 2798 ShouldNotReachHere(); 2799 } 2800 2801 virtual void do_object(oop obj) { 2802 do_object_work(obj); 2803 } 2804 }; 2805 2806 void ConcurrentMark::verify_no_cset_oops() { 2807 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2808 if (!G1CollectedHeap::heap()->mark_in_progress()) { 2809 return; 2810 } 2811 2812 VerifyNoCSetOopsClosure cl; 2813 2814 // Verify entries on the global mark stack 2815 cl.set_phase(VerifyNoCSetOopsStack); 2816 _markStack.oops_do(&cl); 2817 2818 // Verify entries on the task queues 2819 for (uint i = 0; i < _max_worker_id; i += 1) { 2820 cl.set_phase(VerifyNoCSetOopsQueues, i); 2821 CMTaskQueue* queue = _task_queues->queue(i); 2822 queue->oops_do(&cl); 2823 } 2824 2825 // Verify the global finger 2826 HeapWord* global_finger = finger(); 2827 if (global_finger != NULL && global_finger < _heap_end) { 2828 // The global finger always points to a heap region boundary. We 2829 // use heap_region_containing_raw() to get the containing region 2830 // given that the global finger could be pointing to a free region 2831 // which subsequently becomes continues humongous. If that 2832 // happens, heap_region_containing() will return the bottom of the 2833 // corresponding starts humongous region and the check below will 2834 // not hold any more. 2835 // Since we always iterate over all regions, we might get a NULL HeapRegion 2836 // here. 2837 HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); 2838 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2839 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, 2840 p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); 2841 } 2842 2843 // Verify the task fingers 2844 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2845 for (int i = 0; i < (int) parallel_marking_threads(); i += 1) { 2846 CMTask* task = _tasks[i]; 2847 HeapWord* task_finger = task->finger(); 2848 if (task_finger != NULL && task_finger < _heap_end) { 2849 // See above note on the global finger verification. 2850 HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); 2851 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2852 !task_hr->in_collection_set(), 2853 err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, 2854 p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); 2855 } 2856 } 2857 } 2858 #endif // PRODUCT 2859 2860 // Aggregate the counting data that was constructed concurrently 2861 // with marking. 2862 class AggregateCountDataHRClosure: public HeapRegionClosure { 2863 G1CollectedHeap* _g1h; 2864 ConcurrentMark* _cm; 2865 CardTableModRefBS* _ct_bs; 2866 BitMap* _cm_card_bm; 2867 uint _max_worker_id; 2868 2869 public: 2870 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 2871 BitMap* cm_card_bm, 2872 uint max_worker_id) : 2873 _g1h(g1h), _cm(g1h->concurrent_mark()), 2874 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 2875 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 2876 2877 bool doHeapRegion(HeapRegion* hr) { 2878 if (hr->is_continues_humongous()) { 2879 // We will ignore these here and process them when their 2880 // associated "starts humongous" region is processed. 2881 // Note that we cannot rely on their associated 2882 // "starts humongous" region to have their bit set to 1 2883 // since, due to the region chunking in the parallel region 2884 // iteration, a "continues humongous" region might be visited 2885 // before its associated "starts humongous". 2886 return false; 2887 } 2888 2889 HeapWord* start = hr->bottom(); 2890 HeapWord* limit = hr->next_top_at_mark_start(); 2891 HeapWord* end = hr->end(); 2892 2893 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 2894 err_msg("Preconditions not met - " 2895 "start: "PTR_FORMAT", limit: "PTR_FORMAT", " 2896 "top: "PTR_FORMAT", end: "PTR_FORMAT, 2897 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()))); 2898 2899 assert(hr->next_marked_bytes() == 0, "Precondition"); 2900 2901 if (start == limit) { 2902 // NTAMS of this region has not been set so nothing to do. 2903 return false; 2904 } 2905 2906 // 'start' should be in the heap. 2907 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 2908 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 2909 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 2910 2911 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 2912 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 2913 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 2914 2915 // If ntams is not card aligned then we bump card bitmap index 2916 // for limit so that we get the all the cards spanned by 2917 // the object ending at ntams. 2918 // Note: if this is the last region in the heap then ntams 2919 // could be actually just beyond the end of the the heap; 2920 // limit_idx will then correspond to a (non-existent) card 2921 // that is also outside the heap. 2922 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 2923 limit_idx += 1; 2924 } 2925 2926 assert(limit_idx <= end_idx, "or else use atomics"); 2927 2928 // Aggregate the "stripe" in the count data associated with hr. 2929 uint hrm_index = hr->hrm_index(); 2930 size_t marked_bytes = 0; 2931 2932 for (uint i = 0; i < _max_worker_id; i += 1) { 2933 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 2934 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 2935 2936 // Fetch the marked_bytes in this region for task i and 2937 // add it to the running total for this region. 2938 marked_bytes += marked_bytes_array[hrm_index]; 2939 2940 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 2941 // into the global card bitmap. 2942 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 2943 2944 while (scan_idx < limit_idx) { 2945 assert(task_card_bm->at(scan_idx) == true, "should be"); 2946 _cm_card_bm->set_bit(scan_idx); 2947 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 2948 2949 // BitMap::get_next_one_offset() can handle the case when 2950 // its left_offset parameter is greater than its right_offset 2951 // parameter. It does, however, have an early exit if 2952 // left_offset == right_offset. So let's limit the value 2953 // passed in for left offset here. 2954 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 2955 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 2956 } 2957 } 2958 2959 // Update the marked bytes for this region. 2960 hr->add_to_marked_bytes(marked_bytes); 2961 2962 // Next heap region 2963 return false; 2964 } 2965 }; 2966 2967 class G1AggregateCountDataTask: public AbstractGangTask { 2968 protected: 2969 G1CollectedHeap* _g1h; 2970 ConcurrentMark* _cm; 2971 BitMap* _cm_card_bm; 2972 uint _max_worker_id; 2973 uint _active_workers; 2974 HeapRegionClaimer _hrclaimer; 2975 2976 public: 2977 G1AggregateCountDataTask(G1CollectedHeap* g1h, 2978 ConcurrentMark* cm, 2979 BitMap* cm_card_bm, 2980 uint max_worker_id, 2981 uint n_workers) : 2982 AbstractGangTask("Count Aggregation"), 2983 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 2984 _max_worker_id(max_worker_id), 2985 _active_workers(n_workers), 2986 _hrclaimer(_active_workers) { 2987 } 2988 2989 void work(uint worker_id) { 2990 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 2991 2992 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 2993 } 2994 }; 2995 2996 2997 void ConcurrentMark::aggregate_count_data() { 2998 uint n_workers = _g1h->workers()->active_workers(); 2999 3000 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 3001 _max_worker_id, n_workers); 3002 3003 _g1h->set_par_threads(n_workers); 3004 _g1h->workers()->run_task(&g1_par_agg_task); 3005 _g1h->set_par_threads(0); 3006 } 3007 3008 // Clear the per-worker arrays used to store the per-region counting data 3009 void ConcurrentMark::clear_all_count_data() { 3010 // Clear the global card bitmap - it will be filled during 3011 // liveness count aggregation (during remark) and the 3012 // final counting task. 3013 _card_bm.clear(); 3014 3015 // Clear the global region bitmap - it will be filled as part 3016 // of the final counting task. 3017 _region_bm.clear(); 3018 3019 uint max_regions = _g1h->max_regions(); 3020 assert(_max_worker_id > 0, "uninitialized"); 3021 3022 for (uint i = 0; i < _max_worker_id; i += 1) { 3023 BitMap* task_card_bm = count_card_bitmap_for(i); 3024 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 3025 3026 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 3027 assert(marked_bytes_array != NULL, "uninitialized"); 3028 3029 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 3030 task_card_bm->clear(); 3031 } 3032 } 3033 3034 void ConcurrentMark::print_stats() { 3035 if (verbose_stats()) { 3036 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3037 for (size_t i = 0; i < _active_tasks; ++i) { 3038 _tasks[i]->print_stats(); 3039 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 3040 } 3041 } 3042 } 3043 3044 // abandon current marking iteration due to a Full GC 3045 void ConcurrentMark::abort() { 3046 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 3047 // concurrent bitmap clearing. 3048 _nextMarkBitMap->clearAll(); 3049 3050 // Note we cannot clear the previous marking bitmap here 3051 // since VerifyDuringGC verifies the objects marked during 3052 // a full GC against the previous bitmap. 3053 3054 // Clear the liveness counting data 3055 clear_all_count_data(); 3056 // Empty mark stack 3057 reset_marking_state(); 3058 for (uint i = 0; i < _max_worker_id; ++i) { 3059 _tasks[i]->clear_region_fields(); 3060 } 3061 _first_overflow_barrier_sync.abort(); 3062 _second_overflow_barrier_sync.abort(); 3063 const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); 3064 if (!gc_id.is_undefined()) { 3065 // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance 3066 // to detect that it was aborted. Only keep track of the first GC id that we aborted. 3067 _aborted_gc_id = gc_id; 3068 } 3069 _has_aborted = true; 3070 3071 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3072 satb_mq_set.abandon_partial_marking(); 3073 // This can be called either during or outside marking, we'll read 3074 // the expected_active value from the SATB queue set. 3075 satb_mq_set.set_active_all_threads( 3076 false, /* new active value */ 3077 satb_mq_set.is_active() /* expected_active */); 3078 3079 _g1h->trace_heap_after_concurrent_cycle(); 3080 _g1h->register_concurrent_cycle_end(); 3081 } 3082 3083 const GCId& ConcurrentMark::concurrent_gc_id() { 3084 if (has_aborted()) { 3085 return _aborted_gc_id; 3086 } 3087 return _g1h->gc_tracer_cm()->gc_id(); 3088 } 3089 3090 static void print_ms_time_info(const char* prefix, const char* name, 3091 NumberSeq& ns) { 3092 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3093 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3094 if (ns.num() > 0) { 3095 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3096 prefix, ns.sd(), ns.maximum()); 3097 } 3098 } 3099 3100 void ConcurrentMark::print_summary_info() { 3101 gclog_or_tty->print_cr(" Concurrent marking:"); 3102 print_ms_time_info(" ", "init marks", _init_times); 3103 print_ms_time_info(" ", "remarks", _remark_times); 3104 { 3105 print_ms_time_info(" ", "final marks", _remark_mark_times); 3106 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3107 3108 } 3109 print_ms_time_info(" ", "cleanups", _cleanup_times); 3110 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3111 _total_counting_time, 3112 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3113 (double)_cleanup_times.num() 3114 : 0.0)); 3115 if (G1ScrubRemSets) { 3116 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3117 _total_rs_scrub_time, 3118 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3119 (double)_cleanup_times.num() 3120 : 0.0)); 3121 } 3122 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3123 (_init_times.sum() + _remark_times.sum() + 3124 _cleanup_times.sum())/1000.0); 3125 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3126 "(%8.2f s marking).", 3127 cmThread()->vtime_accum(), 3128 cmThread()->vtime_mark_accum()); 3129 } 3130 3131 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3132 _parallel_workers->print_worker_threads_on(st); 3133 } 3134 3135 void ConcurrentMark::print_on_error(outputStream* st) const { 3136 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 3137 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 3138 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 3139 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 3140 } 3141 3142 // We take a break if someone is trying to stop the world. 3143 bool ConcurrentMark::do_yield_check(uint worker_id) { 3144 if (SuspendibleThreadSet::should_yield()) { 3145 if (worker_id == 0) { 3146 _g1h->g1_policy()->record_concurrent_pause(); 3147 } 3148 SuspendibleThreadSet::yield(); 3149 return true; 3150 } else { 3151 return false; 3152 } 3153 } 3154 3155 #ifndef PRODUCT 3156 // for debugging purposes 3157 void ConcurrentMark::print_finger() { 3158 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3159 p2i(_heap_start), p2i(_heap_end), p2i(_finger)); 3160 for (uint i = 0; i < _max_worker_id; ++i) { 3161 gclog_or_tty->print(" %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger())); 3162 } 3163 gclog_or_tty->cr(); 3164 } 3165 #endif 3166 3167 template<bool scan> 3168 inline void CMTask::process_grey_object(oop obj) { 3169 assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray"); 3170 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3171 3172 if (_cm->verbose_high()) { 3173 gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT, 3174 _worker_id, p2i((void*) obj)); 3175 } 3176 3177 size_t obj_size = obj->size(); 3178 _words_scanned += obj_size; 3179 3180 if (scan) { 3181 obj->oop_iterate(_cm_oop_closure); 3182 } 3183 statsOnly( ++_objs_scanned ); 3184 check_limits(); 3185 } 3186 3187 template void CMTask::process_grey_object<true>(oop); 3188 template void CMTask::process_grey_object<false>(oop); 3189 3190 // Closure for iteration over bitmaps 3191 class CMBitMapClosure : public BitMapClosure { 3192 private: 3193 // the bitmap that is being iterated over 3194 CMBitMap* _nextMarkBitMap; 3195 ConcurrentMark* _cm; 3196 CMTask* _task; 3197 3198 public: 3199 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 3200 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3201 3202 bool do_bit(size_t offset) { 3203 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3204 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3205 assert( addr < _cm->finger(), "invariant"); 3206 3207 statsOnly( _task->increase_objs_found_on_bitmap() ); 3208 assert(addr >= _task->finger(), "invariant"); 3209 3210 // We move that task's local finger along. 3211 _task->move_finger_to(addr); 3212 3213 _task->scan_object(oop(addr)); 3214 // we only partially drain the local queue and global stack 3215 _task->drain_local_queue(true); 3216 _task->drain_global_stack(true); 3217 3218 // if the has_aborted flag has been raised, we need to bail out of 3219 // the iteration 3220 return !_task->has_aborted(); 3221 } 3222 }; 3223 3224 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3225 ConcurrentMark* cm, 3226 CMTask* task) 3227 : _g1h(g1h), _cm(cm), _task(task) { 3228 assert(_ref_processor == NULL, "should be initialized to NULL"); 3229 3230 if (G1UseConcMarkReferenceProcessing) { 3231 _ref_processor = g1h->ref_processor_cm(); 3232 assert(_ref_processor != NULL, "should not be NULL"); 3233 } 3234 } 3235 3236 void CMTask::setup_for_region(HeapRegion* hr) { 3237 assert(hr != NULL, 3238 "claim_region() should have filtered out NULL regions"); 3239 assert(!hr->is_continues_humongous(), 3240 "claim_region() should have filtered out continues humongous regions"); 3241 3242 if (_cm->verbose_low()) { 3243 gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT, 3244 _worker_id, p2i(hr)); 3245 } 3246 3247 _curr_region = hr; 3248 _finger = hr->bottom(); 3249 update_region_limit(); 3250 } 3251 3252 void CMTask::update_region_limit() { 3253 HeapRegion* hr = _curr_region; 3254 HeapWord* bottom = hr->bottom(); 3255 HeapWord* limit = hr->next_top_at_mark_start(); 3256 3257 if (limit == bottom) { 3258 if (_cm->verbose_low()) { 3259 gclog_or_tty->print_cr("[%u] found an empty region " 3260 "["PTR_FORMAT", "PTR_FORMAT")", 3261 _worker_id, p2i(bottom), p2i(limit)); 3262 } 3263 // The region was collected underneath our feet. 3264 // We set the finger to bottom to ensure that the bitmap 3265 // iteration that will follow this will not do anything. 3266 // (this is not a condition that holds when we set the region up, 3267 // as the region is not supposed to be empty in the first place) 3268 _finger = bottom; 3269 } else if (limit >= _region_limit) { 3270 assert(limit >= _finger, "peace of mind"); 3271 } else { 3272 assert(limit < _region_limit, "only way to get here"); 3273 // This can happen under some pretty unusual circumstances. An 3274 // evacuation pause empties the region underneath our feet (NTAMS 3275 // at bottom). We then do some allocation in the region (NTAMS 3276 // stays at bottom), followed by the region being used as a GC 3277 // alloc region (NTAMS will move to top() and the objects 3278 // originally below it will be grayed). All objects now marked in 3279 // the region are explicitly grayed, if below the global finger, 3280 // and we do not need in fact to scan anything else. So, we simply 3281 // set _finger to be limit to ensure that the bitmap iteration 3282 // doesn't do anything. 3283 _finger = limit; 3284 } 3285 3286 _region_limit = limit; 3287 } 3288 3289 void CMTask::giveup_current_region() { 3290 assert(_curr_region != NULL, "invariant"); 3291 if (_cm->verbose_low()) { 3292 gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT, 3293 _worker_id, p2i(_curr_region)); 3294 } 3295 clear_region_fields(); 3296 } 3297 3298 void CMTask::clear_region_fields() { 3299 // Values for these three fields that indicate that we're not 3300 // holding on to a region. 3301 _curr_region = NULL; 3302 _finger = NULL; 3303 _region_limit = NULL; 3304 } 3305 3306 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3307 if (cm_oop_closure == NULL) { 3308 assert(_cm_oop_closure != NULL, "invariant"); 3309 } else { 3310 assert(_cm_oop_closure == NULL, "invariant"); 3311 } 3312 _cm_oop_closure = cm_oop_closure; 3313 } 3314 3315 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3316 guarantee(nextMarkBitMap != NULL, "invariant"); 3317 3318 if (_cm->verbose_low()) { 3319 gclog_or_tty->print_cr("[%u] resetting", _worker_id); 3320 } 3321 3322 _nextMarkBitMap = nextMarkBitMap; 3323 clear_region_fields(); 3324 3325 _calls = 0; 3326 _elapsed_time_ms = 0.0; 3327 _termination_time_ms = 0.0; 3328 _termination_start_time_ms = 0.0; 3329 3330 #if _MARKING_STATS_ 3331 _aborted = 0; 3332 _aborted_overflow = 0; 3333 _aborted_cm_aborted = 0; 3334 _aborted_yield = 0; 3335 _aborted_timed_out = 0; 3336 _aborted_satb = 0; 3337 _aborted_termination = 0; 3338 _steal_attempts = 0; 3339 _steals = 0; 3340 _local_pushes = 0; 3341 _local_pops = 0; 3342 _local_max_size = 0; 3343 _objs_scanned = 0; 3344 _global_pushes = 0; 3345 _global_pops = 0; 3346 _global_max_size = 0; 3347 _global_transfers_to = 0; 3348 _global_transfers_from = 0; 3349 _regions_claimed = 0; 3350 _objs_found_on_bitmap = 0; 3351 _satb_buffers_processed = 0; 3352 #endif // _MARKING_STATS_ 3353 } 3354 3355 bool CMTask::should_exit_termination() { 3356 regular_clock_call(); 3357 // This is called when we are in the termination protocol. We should 3358 // quit if, for some reason, this task wants to abort or the global 3359 // stack is not empty (this means that we can get work from it). 3360 return !_cm->mark_stack_empty() || has_aborted(); 3361 } 3362 3363 void CMTask::reached_limit() { 3364 assert(_words_scanned >= _words_scanned_limit || 3365 _refs_reached >= _refs_reached_limit , 3366 "shouldn't have been called otherwise"); 3367 regular_clock_call(); 3368 } 3369 3370 void CMTask::regular_clock_call() { 3371 if (has_aborted()) return; 3372 3373 // First, we need to recalculate the words scanned and refs reached 3374 // limits for the next clock call. 3375 recalculate_limits(); 3376 3377 // During the regular clock call we do the following 3378 3379 // (1) If an overflow has been flagged, then we abort. 3380 if (_cm->has_overflown()) { 3381 set_has_aborted(); 3382 return; 3383 } 3384 3385 // If we are not concurrent (i.e. we're doing remark) we don't need 3386 // to check anything else. The other steps are only needed during 3387 // the concurrent marking phase. 3388 if (!concurrent()) return; 3389 3390 // (2) If marking has been aborted for Full GC, then we also abort. 3391 if (_cm->has_aborted()) { 3392 set_has_aborted(); 3393 statsOnly( ++_aborted_cm_aborted ); 3394 return; 3395 } 3396 3397 double curr_time_ms = os::elapsedVTime() * 1000.0; 3398 3399 // (3) If marking stats are enabled, then we update the step history. 3400 #if _MARKING_STATS_ 3401 if (_words_scanned >= _words_scanned_limit) { 3402 ++_clock_due_to_scanning; 3403 } 3404 if (_refs_reached >= _refs_reached_limit) { 3405 ++_clock_due_to_marking; 3406 } 3407 3408 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3409 _interval_start_time_ms = curr_time_ms; 3410 _all_clock_intervals_ms.add(last_interval_ms); 3411 3412 if (_cm->verbose_medium()) { 3413 gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, " 3414 "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s", 3415 _worker_id, last_interval_ms, 3416 _words_scanned, 3417 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3418 _refs_reached, 3419 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3420 } 3421 #endif // _MARKING_STATS_ 3422 3423 // (4) We check whether we should yield. If we have to, then we abort. 3424 if (SuspendibleThreadSet::should_yield()) { 3425 // We should yield. To do this we abort the task. The caller is 3426 // responsible for yielding. 3427 set_has_aborted(); 3428 statsOnly( ++_aborted_yield ); 3429 return; 3430 } 3431 3432 // (5) We check whether we've reached our time quota. If we have, 3433 // then we abort. 3434 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3435 if (elapsed_time_ms > _time_target_ms) { 3436 set_has_aborted(); 3437 _has_timed_out = true; 3438 statsOnly( ++_aborted_timed_out ); 3439 return; 3440 } 3441 3442 // (6) Finally, we check whether there are enough completed STAB 3443 // buffers available for processing. If there are, we abort. 3444 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3445 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3446 if (_cm->verbose_low()) { 3447 gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers", 3448 _worker_id); 3449 } 3450 // we do need to process SATB buffers, we'll abort and restart 3451 // the marking task to do so 3452 set_has_aborted(); 3453 statsOnly( ++_aborted_satb ); 3454 return; 3455 } 3456 } 3457 3458 void CMTask::recalculate_limits() { 3459 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3460 _words_scanned_limit = _real_words_scanned_limit; 3461 3462 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3463 _refs_reached_limit = _real_refs_reached_limit; 3464 } 3465 3466 void CMTask::decrease_limits() { 3467 // This is called when we believe that we're going to do an infrequent 3468 // operation which will increase the per byte scanned cost (i.e. move 3469 // entries to/from the global stack). It basically tries to decrease the 3470 // scanning limit so that the clock is called earlier. 3471 3472 if (_cm->verbose_medium()) { 3473 gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id); 3474 } 3475 3476 _words_scanned_limit = _real_words_scanned_limit - 3477 3 * words_scanned_period / 4; 3478 _refs_reached_limit = _real_refs_reached_limit - 3479 3 * refs_reached_period / 4; 3480 } 3481 3482 void CMTask::move_entries_to_global_stack() { 3483 // local array where we'll store the entries that will be popped 3484 // from the local queue 3485 oop buffer[global_stack_transfer_size]; 3486 3487 int n = 0; 3488 oop obj; 3489 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3490 buffer[n] = obj; 3491 ++n; 3492 } 3493 3494 if (n > 0) { 3495 // we popped at least one entry from the local queue 3496 3497 statsOnly( ++_global_transfers_to; _local_pops += n ); 3498 3499 if (!_cm->mark_stack_push(buffer, n)) { 3500 if (_cm->verbose_low()) { 3501 gclog_or_tty->print_cr("[%u] aborting due to global stack overflow", 3502 _worker_id); 3503 } 3504 set_has_aborted(); 3505 } else { 3506 // the transfer was successful 3507 3508 if (_cm->verbose_medium()) { 3509 gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", 3510 _worker_id, n); 3511 } 3512 statsOnly( size_t tmp_size = _cm->mark_stack_size(); 3513 if (tmp_size > _global_max_size) { 3514 _global_max_size = tmp_size; 3515 } 3516 _global_pushes += n ); 3517 } 3518 } 3519 3520 // this operation was quite expensive, so decrease the limits 3521 decrease_limits(); 3522 } 3523 3524 void CMTask::get_entries_from_global_stack() { 3525 // local array where we'll store the entries that will be popped 3526 // from the global stack. 3527 oop buffer[global_stack_transfer_size]; 3528 int n; 3529 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3530 assert(n <= global_stack_transfer_size, 3531 "we should not pop more than the given limit"); 3532 if (n > 0) { 3533 // yes, we did actually pop at least one entry 3534 3535 statsOnly( ++_global_transfers_from; _global_pops += n ); 3536 if (_cm->verbose_medium()) { 3537 gclog_or_tty->print_cr("[%u] popped %d entries from the global stack", 3538 _worker_id, n); 3539 } 3540 for (int i = 0; i < n; ++i) { 3541 bool success = _task_queue->push(buffer[i]); 3542 // We only call this when the local queue is empty or under a 3543 // given target limit. So, we do not expect this push to fail. 3544 assert(success, "invariant"); 3545 } 3546 3547 statsOnly( size_t tmp_size = (size_t)_task_queue->size(); 3548 if (tmp_size > _local_max_size) { 3549 _local_max_size = tmp_size; 3550 } 3551 _local_pushes += n ); 3552 } 3553 3554 // this operation was quite expensive, so decrease the limits 3555 decrease_limits(); 3556 } 3557 3558 void CMTask::drain_local_queue(bool partially) { 3559 if (has_aborted()) return; 3560 3561 // Decide what the target size is, depending whether we're going to 3562 // drain it partially (so that other tasks can steal if they run out 3563 // of things to do) or totally (at the very end). 3564 size_t target_size; 3565 if (partially) { 3566 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3567 } else { 3568 target_size = 0; 3569 } 3570 3571 if (_task_queue->size() > target_size) { 3572 if (_cm->verbose_high()) { 3573 gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT, 3574 _worker_id, target_size); 3575 } 3576 3577 oop obj; 3578 bool ret = _task_queue->pop_local(obj); 3579 while (ret) { 3580 statsOnly( ++_local_pops ); 3581 3582 if (_cm->verbose_high()) { 3583 gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id, 3584 p2i((void*) obj)); 3585 } 3586 3587 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3588 assert(!_g1h->is_on_master_free_list( 3589 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3590 3591 scan_object(obj); 3592 3593 if (_task_queue->size() <= target_size || has_aborted()) { 3594 ret = false; 3595 } else { 3596 ret = _task_queue->pop_local(obj); 3597 } 3598 } 3599 3600 if (_cm->verbose_high()) { 3601 gclog_or_tty->print_cr("[%u] drained local queue, size = %u", 3602 _worker_id, _task_queue->size()); 3603 } 3604 } 3605 } 3606 3607 void CMTask::drain_global_stack(bool partially) { 3608 if (has_aborted()) return; 3609 3610 // We have a policy to drain the local queue before we attempt to 3611 // drain the global stack. 3612 assert(partially || _task_queue->size() == 0, "invariant"); 3613 3614 // Decide what the target size is, depending whether we're going to 3615 // drain it partially (so that other tasks can steal if they run out 3616 // of things to do) or totally (at the very end). Notice that, 3617 // because we move entries from the global stack in chunks or 3618 // because another task might be doing the same, we might in fact 3619 // drop below the target. But, this is not a problem. 3620 size_t target_size; 3621 if (partially) { 3622 target_size = _cm->partial_mark_stack_size_target(); 3623 } else { 3624 target_size = 0; 3625 } 3626 3627 if (_cm->mark_stack_size() > target_size) { 3628 if (_cm->verbose_low()) { 3629 gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT, 3630 _worker_id, target_size); 3631 } 3632 3633 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3634 get_entries_from_global_stack(); 3635 drain_local_queue(partially); 3636 } 3637 3638 if (_cm->verbose_low()) { 3639 gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT, 3640 _worker_id, _cm->mark_stack_size()); 3641 } 3642 } 3643 } 3644 3645 // SATB Queue has several assumptions on whether to call the par or 3646 // non-par versions of the methods. this is why some of the code is 3647 // replicated. We should really get rid of the single-threaded version 3648 // of the code to simplify things. 3649 void CMTask::drain_satb_buffers() { 3650 if (has_aborted()) return; 3651 3652 // We set this so that the regular clock knows that we're in the 3653 // middle of draining buffers and doesn't set the abort flag when it 3654 // notices that SATB buffers are available for draining. It'd be 3655 // very counter productive if it did that. :-) 3656 _draining_satb_buffers = true; 3657 3658 CMSATBBufferClosure satb_cl(this, _g1h); 3659 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3660 3661 // This keeps claiming and applying the closure to completed buffers 3662 // until we run out of buffers or we need to abort. 3663 while (!has_aborted() && 3664 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 3665 if (_cm->verbose_medium()) { 3666 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3667 } 3668 statsOnly( ++_satb_buffers_processed ); 3669 regular_clock_call(); 3670 } 3671 3672 _draining_satb_buffers = false; 3673 3674 assert(has_aborted() || 3675 concurrent() || 3676 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3677 3678 // again, this was a potentially expensive operation, decrease the 3679 // limits to get the regular clock call early 3680 decrease_limits(); 3681 } 3682 3683 void CMTask::print_stats() { 3684 gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", 3685 _worker_id, _calls); 3686 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3687 _elapsed_time_ms, _termination_time_ms); 3688 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3689 _step_times_ms.num(), _step_times_ms.avg(), 3690 _step_times_ms.sd()); 3691 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3692 _step_times_ms.maximum(), _step_times_ms.sum()); 3693 3694 #if _MARKING_STATS_ 3695 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3696 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3697 _all_clock_intervals_ms.sd()); 3698 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3699 _all_clock_intervals_ms.maximum(), 3700 _all_clock_intervals_ms.sum()); 3701 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = " SIZE_FORMAT ", marking = " SIZE_FORMAT, 3702 _clock_due_to_scanning, _clock_due_to_marking); 3703 gclog_or_tty->print_cr(" Objects: scanned = " SIZE_FORMAT ", found on the bitmap = " SIZE_FORMAT, 3704 _objs_scanned, _objs_found_on_bitmap); 3705 gclog_or_tty->print_cr(" Local Queue: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3706 _local_pushes, _local_pops, _local_max_size); 3707 gclog_or_tty->print_cr(" Global Stack: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, 3708 _global_pushes, _global_pops, _global_max_size); 3709 gclog_or_tty->print_cr(" transfers to = " SIZE_FORMAT ", transfers from = " SIZE_FORMAT, 3710 _global_transfers_to,_global_transfers_from); 3711 gclog_or_tty->print_cr(" Regions: claimed = " SIZE_FORMAT, _regions_claimed); 3712 gclog_or_tty->print_cr(" SATB buffers: processed = " SIZE_FORMAT, _satb_buffers_processed); 3713 gclog_or_tty->print_cr(" Steals: attempts = " SIZE_FORMAT ", successes = " SIZE_FORMAT, 3714 _steal_attempts, _steals); 3715 gclog_or_tty->print_cr(" Aborted: " SIZE_FORMAT ", due to", _aborted); 3716 gclog_or_tty->print_cr(" overflow: " SIZE_FORMAT ", global abort: " SIZE_FORMAT ", yield: " SIZE_FORMAT, 3717 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3718 gclog_or_tty->print_cr(" time out: " SIZE_FORMAT ", SATB: " SIZE_FORMAT ", termination: " SIZE_FORMAT, 3719 _aborted_timed_out, _aborted_satb, _aborted_termination); 3720 #endif // _MARKING_STATS_ 3721 } 3722 3723 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 3724 return _task_queues->steal(worker_id, hash_seed, obj); 3725 } 3726 3727 /***************************************************************************** 3728 3729 The do_marking_step(time_target_ms, ...) method is the building 3730 block of the parallel marking framework. It can be called in parallel 3731 with other invocations of do_marking_step() on different tasks 3732 (but only one per task, obviously) and concurrently with the 3733 mutator threads, or during remark, hence it eliminates the need 3734 for two versions of the code. When called during remark, it will 3735 pick up from where the task left off during the concurrent marking 3736 phase. Interestingly, tasks are also claimable during evacuation 3737 pauses too, since do_marking_step() ensures that it aborts before 3738 it needs to yield. 3739 3740 The data structures that it uses to do marking work are the 3741 following: 3742 3743 (1) Marking Bitmap. If there are gray objects that appear only 3744 on the bitmap (this happens either when dealing with an overflow 3745 or when the initial marking phase has simply marked the roots 3746 and didn't push them on the stack), then tasks claim heap 3747 regions whose bitmap they then scan to find gray objects. A 3748 global finger indicates where the end of the last claimed region 3749 is. A local finger indicates how far into the region a task has 3750 scanned. The two fingers are used to determine how to gray an 3751 object (i.e. whether simply marking it is OK, as it will be 3752 visited by a task in the future, or whether it needs to be also 3753 pushed on a stack). 3754 3755 (2) Local Queue. The local queue of the task which is accessed 3756 reasonably efficiently by the task. Other tasks can steal from 3757 it when they run out of work. Throughout the marking phase, a 3758 task attempts to keep its local queue short but not totally 3759 empty, so that entries are available for stealing by other 3760 tasks. Only when there is no more work, a task will totally 3761 drain its local queue. 3762 3763 (3) Global Mark Stack. This handles local queue overflow. During 3764 marking only sets of entries are moved between it and the local 3765 queues, as access to it requires a mutex and more fine-grain 3766 interaction with it which might cause contention. If it 3767 overflows, then the marking phase should restart and iterate 3768 over the bitmap to identify gray objects. Throughout the marking 3769 phase, tasks attempt to keep the global mark stack at a small 3770 length but not totally empty, so that entries are available for 3771 popping by other tasks. Only when there is no more work, tasks 3772 will totally drain the global mark stack. 3773 3774 (4) SATB Buffer Queue. This is where completed SATB buffers are 3775 made available. Buffers are regularly removed from this queue 3776 and scanned for roots, so that the queue doesn't get too 3777 long. During remark, all completed buffers are processed, as 3778 well as the filled in parts of any uncompleted buffers. 3779 3780 The do_marking_step() method tries to abort when the time target 3781 has been reached. There are a few other cases when the 3782 do_marking_step() method also aborts: 3783 3784 (1) When the marking phase has been aborted (after a Full GC). 3785 3786 (2) When a global overflow (on the global stack) has been 3787 triggered. Before the task aborts, it will actually sync up with 3788 the other tasks to ensure that all the marking data structures 3789 (local queues, stacks, fingers etc.) are re-initialized so that 3790 when do_marking_step() completes, the marking phase can 3791 immediately restart. 3792 3793 (3) When enough completed SATB buffers are available. The 3794 do_marking_step() method only tries to drain SATB buffers right 3795 at the beginning. So, if enough buffers are available, the 3796 marking step aborts and the SATB buffers are processed at 3797 the beginning of the next invocation. 3798 3799 (4) To yield. when we have to yield then we abort and yield 3800 right at the end of do_marking_step(). This saves us from a lot 3801 of hassle as, by yielding we might allow a Full GC. If this 3802 happens then objects will be compacted underneath our feet, the 3803 heap might shrink, etc. We save checking for this by just 3804 aborting and doing the yield right at the end. 3805 3806 From the above it follows that the do_marking_step() method should 3807 be called in a loop (or, otherwise, regularly) until it completes. 3808 3809 If a marking step completes without its has_aborted() flag being 3810 true, it means it has completed the current marking phase (and 3811 also all other marking tasks have done so and have all synced up). 3812 3813 A method called regular_clock_call() is invoked "regularly" (in 3814 sub ms intervals) throughout marking. It is this clock method that 3815 checks all the abort conditions which were mentioned above and 3816 decides when the task should abort. A work-based scheme is used to 3817 trigger this clock method: when the number of object words the 3818 marking phase has scanned or the number of references the marking 3819 phase has visited reach a given limit. Additional invocations to 3820 the method clock have been planted in a few other strategic places 3821 too. The initial reason for the clock method was to avoid calling 3822 vtime too regularly, as it is quite expensive. So, once it was in 3823 place, it was natural to piggy-back all the other conditions on it 3824 too and not constantly check them throughout the code. 3825 3826 If do_termination is true then do_marking_step will enter its 3827 termination protocol. 3828 3829 The value of is_serial must be true when do_marking_step is being 3830 called serially (i.e. by the VMThread) and do_marking_step should 3831 skip any synchronization in the termination and overflow code. 3832 Examples include the serial remark code and the serial reference 3833 processing closures. 3834 3835 The value of is_serial must be false when do_marking_step is 3836 being called by any of the worker threads in a work gang. 3837 Examples include the concurrent marking code (CMMarkingTask), 3838 the MT remark code, and the MT reference processing closures. 3839 3840 *****************************************************************************/ 3841 3842 void CMTask::do_marking_step(double time_target_ms, 3843 bool do_termination, 3844 bool is_serial) { 3845 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3846 assert(concurrent() == _cm->concurrent(), "they should be the same"); 3847 3848 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3849 assert(_task_queues != NULL, "invariant"); 3850 assert(_task_queue != NULL, "invariant"); 3851 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 3852 3853 assert(!_claimed, 3854 "only one thread should claim this task at any one time"); 3855 3856 // OK, this doesn't safeguard again all possible scenarios, as it is 3857 // possible for two threads to set the _claimed flag at the same 3858 // time. But it is only for debugging purposes anyway and it will 3859 // catch most problems. 3860 _claimed = true; 3861 3862 _start_time_ms = os::elapsedVTime() * 1000.0; 3863 statsOnly( _interval_start_time_ms = _start_time_ms ); 3864 3865 // If do_stealing is true then do_marking_step will attempt to 3866 // steal work from the other CMTasks. It only makes sense to 3867 // enable stealing when the termination protocol is enabled 3868 // and do_marking_step() is not being called serially. 3869 bool do_stealing = do_termination && !is_serial; 3870 3871 double diff_prediction_ms = 3872 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 3873 _time_target_ms = time_target_ms - diff_prediction_ms; 3874 3875 // set up the variables that are used in the work-based scheme to 3876 // call the regular clock method 3877 _words_scanned = 0; 3878 _refs_reached = 0; 3879 recalculate_limits(); 3880 3881 // clear all flags 3882 clear_has_aborted(); 3883 _has_timed_out = false; 3884 _draining_satb_buffers = false; 3885 3886 ++_calls; 3887 3888 if (_cm->verbose_low()) { 3889 gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, " 3890 "target = %1.2lfms >>>>>>>>>>", 3891 _worker_id, _calls, _time_target_ms); 3892 } 3893 3894 // Set up the bitmap and oop closures. Anything that uses them is 3895 // eventually called from this method, so it is OK to allocate these 3896 // statically. 3897 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 3898 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 3899 set_cm_oop_closure(&cm_oop_closure); 3900 3901 if (_cm->has_overflown()) { 3902 // This can happen if the mark stack overflows during a GC pause 3903 // and this task, after a yield point, restarts. We have to abort 3904 // as we need to get into the overflow protocol which happens 3905 // right at the end of this task. 3906 set_has_aborted(); 3907 } 3908 3909 // First drain any available SATB buffers. After this, we will not 3910 // look at SATB buffers before the next invocation of this method. 3911 // If enough completed SATB buffers are queued up, the regular clock 3912 // will abort this task so that it restarts. 3913 drain_satb_buffers(); 3914 // ...then partially drain the local queue and the global stack 3915 drain_local_queue(true); 3916 drain_global_stack(true); 3917 3918 do { 3919 if (!has_aborted() && _curr_region != NULL) { 3920 // This means that we're already holding on to a region. 3921 assert(_finger != NULL, "if region is not NULL, then the finger " 3922 "should not be NULL either"); 3923 3924 // We might have restarted this task after an evacuation pause 3925 // which might have evacuated the region we're holding on to 3926 // underneath our feet. Let's read its limit again to make sure 3927 // that we do not iterate over a region of the heap that 3928 // contains garbage (update_region_limit() will also move 3929 // _finger to the start of the region if it is found empty). 3930 update_region_limit(); 3931 // We will start from _finger not from the start of the region, 3932 // as we might be restarting this task after aborting half-way 3933 // through scanning this region. In this case, _finger points to 3934 // the address where we last found a marked object. If this is a 3935 // fresh region, _finger points to start(). 3936 MemRegion mr = MemRegion(_finger, _region_limit); 3937 3938 if (_cm->verbose_low()) { 3939 gclog_or_tty->print_cr("[%u] we're scanning part " 3940 "["PTR_FORMAT", "PTR_FORMAT") " 3941 "of region "HR_FORMAT, 3942 _worker_id, p2i(_finger), p2i(_region_limit), 3943 HR_FORMAT_PARAMS(_curr_region)); 3944 } 3945 3946 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 3947 "humongous regions should go around loop once only"); 3948 3949 // Some special cases: 3950 // If the memory region is empty, we can just give up the region. 3951 // If the current region is humongous then we only need to check 3952 // the bitmap for the bit associated with the start of the object, 3953 // scan the object if it's live, and give up the region. 3954 // Otherwise, let's iterate over the bitmap of the part of the region 3955 // that is left. 3956 // If the iteration is successful, give up the region. 3957 if (mr.is_empty()) { 3958 giveup_current_region(); 3959 regular_clock_call(); 3960 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 3961 if (_nextMarkBitMap->isMarked(mr.start())) { 3962 // The object is marked - apply the closure 3963 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 3964 bitmap_closure.do_bit(offset); 3965 } 3966 // Even if this task aborted while scanning the humongous object 3967 // we can (and should) give up the current region. 3968 giveup_current_region(); 3969 regular_clock_call(); 3970 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 3971 giveup_current_region(); 3972 regular_clock_call(); 3973 } else { 3974 assert(has_aborted(), "currently the only way to do so"); 3975 // The only way to abort the bitmap iteration is to return 3976 // false from the do_bit() method. However, inside the 3977 // do_bit() method we move the _finger to point to the 3978 // object currently being looked at. So, if we bail out, we 3979 // have definitely set _finger to something non-null. 3980 assert(_finger != NULL, "invariant"); 3981 3982 // Region iteration was actually aborted. So now _finger 3983 // points to the address of the object we last scanned. If we 3984 // leave it there, when we restart this task, we will rescan 3985 // the object. It is easy to avoid this. We move the finger by 3986 // enough to point to the next possible object header (the 3987 // bitmap knows by how much we need to move it as it knows its 3988 // granularity). 3989 assert(_finger < _region_limit, "invariant"); 3990 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 3991 // Check if bitmap iteration was aborted while scanning the last object 3992 if (new_finger >= _region_limit) { 3993 giveup_current_region(); 3994 } else { 3995 move_finger_to(new_finger); 3996 } 3997 } 3998 } 3999 // At this point we have either completed iterating over the 4000 // region we were holding on to, or we have aborted. 4001 4002 // We then partially drain the local queue and the global stack. 4003 // (Do we really need this?) 4004 drain_local_queue(true); 4005 drain_global_stack(true); 4006 4007 // Read the note on the claim_region() method on why it might 4008 // return NULL with potentially more regions available for 4009 // claiming and why we have to check out_of_regions() to determine 4010 // whether we're done or not. 4011 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4012 // We are going to try to claim a new region. We should have 4013 // given up on the previous one. 4014 // Separated the asserts so that we know which one fires. 4015 assert(_curr_region == NULL, "invariant"); 4016 assert(_finger == NULL, "invariant"); 4017 assert(_region_limit == NULL, "invariant"); 4018 if (_cm->verbose_low()) { 4019 gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id); 4020 } 4021 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 4022 if (claimed_region != NULL) { 4023 // Yes, we managed to claim one 4024 statsOnly( ++_regions_claimed ); 4025 4026 if (_cm->verbose_low()) { 4027 gclog_or_tty->print_cr("[%u] we successfully claimed " 4028 "region "PTR_FORMAT, 4029 _worker_id, p2i(claimed_region)); 4030 } 4031 4032 setup_for_region(claimed_region); 4033 assert(_curr_region == claimed_region, "invariant"); 4034 } 4035 // It is important to call the regular clock here. It might take 4036 // a while to claim a region if, for example, we hit a large 4037 // block of empty regions. So we need to call the regular clock 4038 // method once round the loop to make sure it's called 4039 // frequently enough. 4040 regular_clock_call(); 4041 } 4042 4043 if (!has_aborted() && _curr_region == NULL) { 4044 assert(_cm->out_of_regions(), 4045 "at this point we should be out of regions"); 4046 } 4047 } while ( _curr_region != NULL && !has_aborted()); 4048 4049 if (!has_aborted()) { 4050 // We cannot check whether the global stack is empty, since other 4051 // tasks might be pushing objects to it concurrently. 4052 assert(_cm->out_of_regions(), 4053 "at this point we should be out of regions"); 4054 4055 if (_cm->verbose_low()) { 4056 gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id); 4057 } 4058 4059 // Try to reduce the number of available SATB buffers so that 4060 // remark has less work to do. 4061 drain_satb_buffers(); 4062 } 4063 4064 // Since we've done everything else, we can now totally drain the 4065 // local queue and global stack. 4066 drain_local_queue(false); 4067 drain_global_stack(false); 4068 4069 // Attempt at work stealing from other task's queues. 4070 if (do_stealing && !has_aborted()) { 4071 // We have not aborted. This means that we have finished all that 4072 // we could. Let's try to do some stealing... 4073 4074 // We cannot check whether the global stack is empty, since other 4075 // tasks might be pushing objects to it concurrently. 4076 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4077 "only way to reach here"); 4078 4079 if (_cm->verbose_low()) { 4080 gclog_or_tty->print_cr("[%u] starting to steal", _worker_id); 4081 } 4082 4083 while (!has_aborted()) { 4084 oop obj; 4085 statsOnly( ++_steal_attempts ); 4086 4087 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 4088 if (_cm->verbose_medium()) { 4089 gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully", 4090 _worker_id, p2i((void*) obj)); 4091 } 4092 4093 statsOnly( ++_steals ); 4094 4095 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4096 "any stolen object should be marked"); 4097 scan_object(obj); 4098 4099 // And since we're towards the end, let's totally drain the 4100 // local queue and global stack. 4101 drain_local_queue(false); 4102 drain_global_stack(false); 4103 } else { 4104 break; 4105 } 4106 } 4107 } 4108 4109 // If we are about to wrap up and go into termination, check if we 4110 // should raise the overflow flag. 4111 if (do_termination && !has_aborted()) { 4112 if (_cm->force_overflow()->should_force()) { 4113 _cm->set_has_overflown(); 4114 regular_clock_call(); 4115 } 4116 } 4117 4118 // We still haven't aborted. Now, let's try to get into the 4119 // termination protocol. 4120 if (do_termination && !has_aborted()) { 4121 // We cannot check whether the global stack is empty, since other 4122 // tasks might be concurrently pushing objects on it. 4123 // Separated the asserts so that we know which one fires. 4124 assert(_cm->out_of_regions(), "only way to reach here"); 4125 assert(_task_queue->size() == 0, "only way to reach here"); 4126 4127 if (_cm->verbose_low()) { 4128 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); 4129 } 4130 4131 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4132 4133 // The CMTask class also extends the TerminatorTerminator class, 4134 // hence its should_exit_termination() method will also decide 4135 // whether to exit the termination protocol or not. 4136 bool finished = (is_serial || 4137 _cm->terminator()->offer_termination(this)); 4138 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4139 _termination_time_ms += 4140 termination_end_time_ms - _termination_start_time_ms; 4141 4142 if (finished) { 4143 // We're all done. 4144 4145 if (_worker_id == 0) { 4146 // let's allow task 0 to do this 4147 if (concurrent()) { 4148 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4149 // we need to set this to false before the next 4150 // safepoint. This way we ensure that the marking phase 4151 // doesn't observe any more heap expansions. 4152 _cm->clear_concurrent_marking_in_progress(); 4153 } 4154 } 4155 4156 // We can now guarantee that the global stack is empty, since 4157 // all other tasks have finished. We separated the guarantees so 4158 // that, if a condition is false, we can immediately find out 4159 // which one. 4160 guarantee(_cm->out_of_regions(), "only way to reach here"); 4161 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4162 guarantee(_task_queue->size() == 0, "only way to reach here"); 4163 guarantee(!_cm->has_overflown(), "only way to reach here"); 4164 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4165 4166 if (_cm->verbose_low()) { 4167 gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id); 4168 } 4169 } else { 4170 // Apparently there's more work to do. Let's abort this task. It 4171 // will restart it and we can hopefully find more things to do. 4172 4173 if (_cm->verbose_low()) { 4174 gclog_or_tty->print_cr("[%u] apparently there is more work to do", 4175 _worker_id); 4176 } 4177 4178 set_has_aborted(); 4179 statsOnly( ++_aborted_termination ); 4180 } 4181 } 4182 4183 // Mainly for debugging purposes to make sure that a pointer to the 4184 // closure which was statically allocated in this frame doesn't 4185 // escape it by accident. 4186 set_cm_oop_closure(NULL); 4187 double end_time_ms = os::elapsedVTime() * 1000.0; 4188 double elapsed_time_ms = end_time_ms - _start_time_ms; 4189 // Update the step history. 4190 _step_times_ms.add(elapsed_time_ms); 4191 4192 if (has_aborted()) { 4193 // The task was aborted for some reason. 4194 4195 statsOnly( ++_aborted ); 4196 4197 if (_has_timed_out) { 4198 double diff_ms = elapsed_time_ms - _time_target_ms; 4199 // Keep statistics of how well we did with respect to hitting 4200 // our target only if we actually timed out (if we aborted for 4201 // other reasons, then the results might get skewed). 4202 _marking_step_diffs_ms.add(diff_ms); 4203 } 4204 4205 if (_cm->has_overflown()) { 4206 // This is the interesting one. We aborted because a global 4207 // overflow was raised. This means we have to restart the 4208 // marking phase and start iterating over regions. However, in 4209 // order to do this we have to make sure that all tasks stop 4210 // what they are doing and re-initialize in a safe manner. We 4211 // will achieve this with the use of two barrier sync points. 4212 4213 if (_cm->verbose_low()) { 4214 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4215 } 4216 4217 if (!is_serial) { 4218 // We only need to enter the sync barrier if being called 4219 // from a parallel context 4220 _cm->enter_first_sync_barrier(_worker_id); 4221 4222 // When we exit this sync barrier we know that all tasks have 4223 // stopped doing marking work. So, it's now safe to 4224 // re-initialize our data structures. At the end of this method, 4225 // task 0 will clear the global data structures. 4226 } 4227 4228 statsOnly( ++_aborted_overflow ); 4229 4230 // We clear the local state of this task... 4231 clear_region_fields(); 4232 4233 if (!is_serial) { 4234 // ...and enter the second barrier. 4235 _cm->enter_second_sync_barrier(_worker_id); 4236 } 4237 // At this point, if we're during the concurrent phase of 4238 // marking, everything has been re-initialized and we're 4239 // ready to restart. 4240 } 4241 4242 if (_cm->verbose_low()) { 4243 gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4244 "elapsed = %1.2lfms <<<<<<<<<<", 4245 _worker_id, _time_target_ms, elapsed_time_ms); 4246 if (_cm->has_aborted()) { 4247 gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========", 4248 _worker_id); 4249 } 4250 } 4251 } else { 4252 if (_cm->verbose_low()) { 4253 gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4254 "elapsed = %1.2lfms <<<<<<<<<<", 4255 _worker_id, _time_target_ms, elapsed_time_ms); 4256 } 4257 } 4258 4259 _claimed = false; 4260 } 4261 4262 CMTask::CMTask(uint worker_id, 4263 ConcurrentMark* cm, 4264 size_t* marked_bytes, 4265 BitMap* card_bm, 4266 CMTaskQueue* task_queue, 4267 CMTaskQueueSet* task_queues) 4268 : _g1h(G1CollectedHeap::heap()), 4269 _worker_id(worker_id), _cm(cm), 4270 _claimed(false), 4271 _nextMarkBitMap(NULL), _hash_seed(17), 4272 _task_queue(task_queue), 4273 _task_queues(task_queues), 4274 _cm_oop_closure(NULL), 4275 _marked_bytes_array(marked_bytes), 4276 _card_bm(card_bm) { 4277 guarantee(task_queue != NULL, "invariant"); 4278 guarantee(task_queues != NULL, "invariant"); 4279 4280 statsOnly( _clock_due_to_scanning = 0; 4281 _clock_due_to_marking = 0 ); 4282 4283 _marking_step_diffs_ms.add(0.5); 4284 } 4285 4286 // These are formatting macros that are used below to ensure 4287 // consistent formatting. The *_H_* versions are used to format the 4288 // header for a particular value and they should be kept consistent 4289 // with the corresponding macro. Also note that most of the macros add 4290 // the necessary white space (as a prefix) which makes them a bit 4291 // easier to compose. 4292 4293 // All the output lines are prefixed with this string to be able to 4294 // identify them easily in a large log file. 4295 #define G1PPRL_LINE_PREFIX "###" 4296 4297 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4298 #ifdef _LP64 4299 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4300 #else // _LP64 4301 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4302 #endif // _LP64 4303 4304 // For per-region info 4305 #define G1PPRL_TYPE_FORMAT " %-4s" 4306 #define G1PPRL_TYPE_H_FORMAT " %4s" 4307 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4308 #define G1PPRL_BYTE_H_FORMAT " %9s" 4309 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4310 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4311 4312 // For summary info 4313 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4314 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4315 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4316 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4317 4318 G1PrintRegionLivenessInfoClosure:: 4319 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4320 : _out(out), 4321 _total_used_bytes(0), _total_capacity_bytes(0), 4322 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4323 _hum_used_bytes(0), _hum_capacity_bytes(0), 4324 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 4325 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 4326 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4327 MemRegion g1_reserved = g1h->g1_reserved(); 4328 double now = os::elapsedTime(); 4329 4330 // Print the header of the output. 4331 _out->cr(); 4332 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4333 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4334 G1PPRL_SUM_ADDR_FORMAT("reserved") 4335 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4336 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 4337 HeapRegion::GrainBytes); 4338 _out->print_cr(G1PPRL_LINE_PREFIX); 4339 _out->print_cr(G1PPRL_LINE_PREFIX 4340 G1PPRL_TYPE_H_FORMAT 4341 G1PPRL_ADDR_BASE_H_FORMAT 4342 G1PPRL_BYTE_H_FORMAT 4343 G1PPRL_BYTE_H_FORMAT 4344 G1PPRL_BYTE_H_FORMAT 4345 G1PPRL_DOUBLE_H_FORMAT 4346 G1PPRL_BYTE_H_FORMAT 4347 G1PPRL_BYTE_H_FORMAT, 4348 "type", "address-range", 4349 "used", "prev-live", "next-live", "gc-eff", 4350 "remset", "code-roots"); 4351 _out->print_cr(G1PPRL_LINE_PREFIX 4352 G1PPRL_TYPE_H_FORMAT 4353 G1PPRL_ADDR_BASE_H_FORMAT 4354 G1PPRL_BYTE_H_FORMAT 4355 G1PPRL_BYTE_H_FORMAT 4356 G1PPRL_BYTE_H_FORMAT 4357 G1PPRL_DOUBLE_H_FORMAT 4358 G1PPRL_BYTE_H_FORMAT 4359 G1PPRL_BYTE_H_FORMAT, 4360 "", "", 4361 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 4362 "(bytes)", "(bytes)"); 4363 } 4364 4365 // It takes as a parameter a reference to one of the _hum_* fields, it 4366 // deduces the corresponding value for a region in a humongous region 4367 // series (either the region size, or what's left if the _hum_* field 4368 // is < the region size), and updates the _hum_* field accordingly. 4369 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4370 size_t bytes = 0; 4371 // The > 0 check is to deal with the prev and next live bytes which 4372 // could be 0. 4373 if (*hum_bytes > 0) { 4374 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 4375 *hum_bytes -= bytes; 4376 } 4377 return bytes; 4378 } 4379 4380 // It deduces the values for a region in a humongous region series 4381 // from the _hum_* fields and updates those accordingly. It assumes 4382 // that that _hum_* fields have already been set up from the "starts 4383 // humongous" region and we visit the regions in address order. 4384 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4385 size_t* capacity_bytes, 4386 size_t* prev_live_bytes, 4387 size_t* next_live_bytes) { 4388 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4389 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4390 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4391 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4392 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4393 } 4394 4395 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4396 const char* type = r->get_type_str(); 4397 HeapWord* bottom = r->bottom(); 4398 HeapWord* end = r->end(); 4399 size_t capacity_bytes = r->capacity(); 4400 size_t used_bytes = r->used(); 4401 size_t prev_live_bytes = r->live_bytes(); 4402 size_t next_live_bytes = r->next_live_bytes(); 4403 double gc_eff = r->gc_efficiency(); 4404 size_t remset_bytes = r->rem_set()->mem_size(); 4405 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 4406 4407 if (r->is_starts_humongous()) { 4408 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4409 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4410 "they should have been zeroed after the last time we used them"); 4411 // Set up the _hum_* fields. 4412 _hum_capacity_bytes = capacity_bytes; 4413 _hum_used_bytes = used_bytes; 4414 _hum_prev_live_bytes = prev_live_bytes; 4415 _hum_next_live_bytes = next_live_bytes; 4416 get_hum_bytes(&used_bytes, &capacity_bytes, 4417 &prev_live_bytes, &next_live_bytes); 4418 end = bottom + HeapRegion::GrainWords; 4419 } else if (r->is_continues_humongous()) { 4420 get_hum_bytes(&used_bytes, &capacity_bytes, 4421 &prev_live_bytes, &next_live_bytes); 4422 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4423 } 4424 4425 _total_used_bytes += used_bytes; 4426 _total_capacity_bytes += capacity_bytes; 4427 _total_prev_live_bytes += prev_live_bytes; 4428 _total_next_live_bytes += next_live_bytes; 4429 _total_remset_bytes += remset_bytes; 4430 _total_strong_code_roots_bytes += strong_code_roots_bytes; 4431 4432 // Print a line for this particular region. 4433 _out->print_cr(G1PPRL_LINE_PREFIX 4434 G1PPRL_TYPE_FORMAT 4435 G1PPRL_ADDR_BASE_FORMAT 4436 G1PPRL_BYTE_FORMAT 4437 G1PPRL_BYTE_FORMAT 4438 G1PPRL_BYTE_FORMAT 4439 G1PPRL_DOUBLE_FORMAT 4440 G1PPRL_BYTE_FORMAT 4441 G1PPRL_BYTE_FORMAT, 4442 type, p2i(bottom), p2i(end), 4443 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 4444 remset_bytes, strong_code_roots_bytes); 4445 4446 return false; 4447 } 4448 4449 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4450 // add static memory usages to remembered set sizes 4451 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 4452 // Print the footer of the output. 4453 _out->print_cr(G1PPRL_LINE_PREFIX); 4454 _out->print_cr(G1PPRL_LINE_PREFIX 4455 " SUMMARY" 4456 G1PPRL_SUM_MB_FORMAT("capacity") 4457 G1PPRL_SUM_MB_PERC_FORMAT("used") 4458 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4459 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 4460 G1PPRL_SUM_MB_FORMAT("remset") 4461 G1PPRL_SUM_MB_FORMAT("code-roots"), 4462 bytes_to_mb(_total_capacity_bytes), 4463 bytes_to_mb(_total_used_bytes), 4464 perc(_total_used_bytes, _total_capacity_bytes), 4465 bytes_to_mb(_total_prev_live_bytes), 4466 perc(_total_prev_live_bytes, _total_capacity_bytes), 4467 bytes_to_mb(_total_next_live_bytes), 4468 perc(_total_next_live_bytes, _total_capacity_bytes), 4469 bytes_to_mb(_total_remset_bytes), 4470 bytes_to_mb(_total_strong_code_roots_bytes)); 4471 _out->cr(); 4472 }