1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMark.inline.hpp" 30 #include "gc/g1/concurrentMarkThread.inline.hpp" 31 #include "gc/g1/g1CollectedHeap.inline.hpp" 32 #include "gc/g1/g1CollectorPolicy.hpp" 33 #include "gc/g1/g1CollectorState.hpp" 34 #include "gc/g1/g1HeapVerifier.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1StringDedup.hpp" 37 #include "gc/g1/heapRegion.inline.hpp" 38 #include "gc/g1/heapRegionRemSet.hpp" 39 #include "gc/g1/heapRegionSet.inline.hpp" 40 #include "gc/g1/suspendibleThreadSet.hpp" 41 #include "gc/shared/gcId.hpp" 42 #include "gc/shared/gcTimer.hpp" 43 #include "gc/shared/gcTrace.hpp" 44 #include "gc/shared/gcTraceTime.inline.hpp" 45 #include "gc/shared/genOopClosures.inline.hpp" 46 #include "gc/shared/referencePolicy.hpp" 47 #include "gc/shared/strongRootsScope.hpp" 48 #include "gc/shared/taskqueue.inline.hpp" 49 #include "gc/shared/vmGCOperations.hpp" 50 #include "logging/log.hpp" 51 #include "memory/allocation.hpp" 52 #include "memory/resourceArea.hpp" 53 #include "oops/oop.inline.hpp" 54 #include "runtime/atomic.inline.hpp" 55 #include "runtime/handles.inline.hpp" 56 #include "runtime/java.hpp" 57 #include "runtime/prefetch.inline.hpp" 58 #include "services/memTracker.hpp" 59 60 // Concurrent marking bit map wrapper 61 62 CMBitMapRO::CMBitMapRO(int shifter) : 63 _bm(), 64 _shifter(shifter) { 65 _bmStartWord = 0; 66 _bmWordSize = 0; 67 } 68 69 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 70 const HeapWord* limit) const { 71 // First we must round addr *up* to a possible object boundary. 72 addr = (HeapWord*)align_size_up((intptr_t)addr, 73 HeapWordSize << _shifter); 74 size_t addrOffset = heapWordToOffset(addr); 75 assert(limit != NULL, "limit must not be NULL"); 76 size_t limitOffset = heapWordToOffset(limit); 77 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 78 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 79 assert(nextAddr >= addr, "get_next_one postcondition"); 80 assert(nextAddr == limit || isMarked(nextAddr), 81 "get_next_one postcondition"); 82 return nextAddr; 83 } 84 85 #ifndef PRODUCT 86 bool CMBitMapRO::covers(MemRegion heap_rs) const { 87 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 88 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 89 "size inconsistency"); 90 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 91 _bmWordSize == heap_rs.word_size(); 92 } 93 #endif 94 95 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 96 _bm.print_on_error(st, prefix); 97 } 98 99 size_t CMBitMap::compute_size(size_t heap_size) { 100 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 101 } 102 103 size_t CMBitMap::mark_distance() { 104 return MinObjAlignmentInBytes * BitsPerByte; 105 } 106 107 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 108 _bmStartWord = heap.start(); 109 _bmWordSize = heap.word_size(); 110 111 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 112 _bm.set_size(_bmWordSize >> _shifter); 113 114 storage->set_mapping_changed_listener(&_listener); 115 } 116 117 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 118 if (zero_filled) { 119 return; 120 } 121 // We need to clear the bitmap on commit, removing any existing information. 122 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 123 _bm->clearRange(mr); 124 } 125 126 // Closure used for clearing the given mark bitmap. 127 class ClearBitmapHRClosure : public HeapRegionClosure { 128 private: 129 ConcurrentMark* _cm; 130 CMBitMap* _bitmap; 131 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 132 public: 133 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 134 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 135 } 136 137 virtual bool doHeapRegion(HeapRegion* r) { 138 size_t const chunk_size_in_words = M / HeapWordSize; 139 140 HeapWord* cur = r->bottom(); 141 HeapWord* const end = r->end(); 142 143 while (cur < end) { 144 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 145 _bitmap->clearRange(mr); 146 147 cur += chunk_size_in_words; 148 149 // Abort iteration if after yielding the marking has been aborted. 150 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 151 return true; 152 } 153 // Repeat the asserts from before the start of the closure. We will do them 154 // as asserts here to minimize their overhead on the product. However, we 155 // will have them as guarantees at the beginning / end of the bitmap 156 // clearing to get some checking in the product. 157 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 158 assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 159 } 160 161 return false; 162 } 163 }; 164 165 class ParClearNextMarkBitmapTask : public AbstractGangTask { 166 ClearBitmapHRClosure* _cl; 167 HeapRegionClaimer _hrclaimer; 168 bool _suspendible; // If the task is suspendible, workers must join the STS. 169 170 public: 171 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 172 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 173 174 void work(uint worker_id) { 175 SuspendibleThreadSetJoiner sts_join(_suspendible); 176 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 177 } 178 }; 179 180 void CMBitMap::clearAll() { 181 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 182 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 183 uint n_workers = g1h->workers()->active_workers(); 184 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 185 g1h->workers()->run_task(&task); 186 guarantee(cl.complete(), "Must have completed iteration."); 187 return; 188 } 189 190 void CMBitMap::clearRange(MemRegion mr) { 191 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 192 assert(!mr.is_empty(), "unexpected empty region"); 193 // convert address range into offset range 194 _bm.at_put_range(heapWordToOffset(mr.start()), 195 heapWordToOffset(mr.end()), false); 196 } 197 198 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 199 _base(NULL), _cm(cm) 200 {} 201 202 bool CMMarkStack::allocate(size_t capacity) { 203 // allocate a stack of the requisite depth 204 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 205 if (!rs.is_reserved()) { 206 warning("ConcurrentMark MarkStack allocation failure"); 207 return false; 208 } 209 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 210 if (!_virtual_space.initialize(rs, rs.size())) { 211 warning("ConcurrentMark MarkStack backing store failure"); 212 // Release the virtual memory reserved for the marking stack 213 rs.release(); 214 return false; 215 } 216 assert(_virtual_space.committed_size() == rs.size(), 217 "Didn't reserve backing store for all of ConcurrentMark stack?"); 218 _base = (oop*) _virtual_space.low(); 219 setEmpty(); 220 _capacity = (jint) capacity; 221 _saved_index = -1; 222 _should_expand = false; 223 return true; 224 } 225 226 void CMMarkStack::expand() { 227 // Called, during remark, if we've overflown the marking stack during marking. 228 assert(isEmpty(), "stack should been emptied while handling overflow"); 229 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 230 // Clear expansion flag 231 _should_expand = false; 232 if (_capacity == (jint) MarkStackSizeMax) { 233 log_trace(gc)("(benign) Can't expand marking stack capacity, at max size limit"); 234 return; 235 } 236 // Double capacity if possible 237 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 238 // Do not give up existing stack until we have managed to 239 // get the double capacity that we desired. 240 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 241 sizeof(oop))); 242 if (rs.is_reserved()) { 243 // Release the backing store associated with old stack 244 _virtual_space.release(); 245 // Reinitialize virtual space for new stack 246 if (!_virtual_space.initialize(rs, rs.size())) { 247 fatal("Not enough swap for expanded marking stack capacity"); 248 } 249 _base = (oop*)(_virtual_space.low()); 250 _index = 0; 251 _capacity = new_capacity; 252 } else { 253 // Failed to double capacity, continue; 254 log_trace(gc)("(benign) Failed to expand marking stack capacity from " SIZE_FORMAT "K to " SIZE_FORMAT "K", 255 _capacity / K, new_capacity / K); 256 } 257 } 258 259 void CMMarkStack::set_should_expand() { 260 // If we're resetting the marking state because of an 261 // marking stack overflow, record that we should, if 262 // possible, expand the stack. 263 _should_expand = _cm->has_overflown(); 264 } 265 266 CMMarkStack::~CMMarkStack() { 267 if (_base != NULL) { 268 _base = NULL; 269 _virtual_space.release(); 270 } 271 } 272 273 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 274 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 275 jint start = _index; 276 jint next_index = start + n; 277 if (next_index > _capacity) { 278 _overflow = true; 279 return; 280 } 281 // Otherwise. 282 _index = next_index; 283 for (int i = 0; i < n; i++) { 284 int ind = start + i; 285 assert(ind < _capacity, "By overflow test above."); 286 _base[ind] = ptr_arr[i]; 287 } 288 } 289 290 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 291 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 292 jint index = _index; 293 if (index == 0) { 294 *n = 0; 295 return false; 296 } else { 297 int k = MIN2(max, index); 298 jint new_ind = index - k; 299 for (int j = 0; j < k; j++) { 300 ptr_arr[j] = _base[new_ind + j]; 301 } 302 _index = new_ind; 303 *n = k; 304 return true; 305 } 306 } 307 308 void CMMarkStack::note_start_of_gc() { 309 assert(_saved_index == -1, 310 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 311 _saved_index = _index; 312 } 313 314 void CMMarkStack::note_end_of_gc() { 315 // This is intentionally a guarantee, instead of an assert. If we 316 // accidentally add something to the mark stack during GC, it 317 // will be a correctness issue so it's better if we crash. we'll 318 // only check this once per GC anyway, so it won't be a performance 319 // issue in any way. 320 guarantee(_saved_index == _index, 321 "saved index: %d index: %d", _saved_index, _index); 322 _saved_index = -1; 323 } 324 325 CMRootRegions::CMRootRegions() : 326 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 327 _should_abort(false), _next_survivor(NULL) { } 328 329 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 330 _young_list = g1h->young_list(); 331 _cm = cm; 332 } 333 334 void CMRootRegions::prepare_for_scan() { 335 assert(!scan_in_progress(), "pre-condition"); 336 337 // Currently, only survivors can be root regions. 338 assert(_next_survivor == NULL, "pre-condition"); 339 _next_survivor = _young_list->first_survivor_region(); 340 _scan_in_progress = (_next_survivor != NULL); 341 _should_abort = false; 342 } 343 344 HeapRegion* CMRootRegions::claim_next() { 345 if (_should_abort) { 346 // If someone has set the should_abort flag, we return NULL to 347 // force the caller to bail out of their loop. 348 return NULL; 349 } 350 351 // Currently, only survivors can be root regions. 352 HeapRegion* res = _next_survivor; 353 if (res != NULL) { 354 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 355 // Read it again in case it changed while we were waiting for the lock. 356 res = _next_survivor; 357 if (res != NULL) { 358 if (res == _young_list->last_survivor_region()) { 359 // We just claimed the last survivor so store NULL to indicate 360 // that we're done. 361 _next_survivor = NULL; 362 } else { 363 _next_survivor = res->get_next_young_region(); 364 } 365 } else { 366 // Someone else claimed the last survivor while we were trying 367 // to take the lock so nothing else to do. 368 } 369 } 370 assert(res == NULL || res->is_survivor(), "post-condition"); 371 372 return res; 373 } 374 375 void CMRootRegions::cancel_scan() { 376 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 377 _scan_in_progress = false; 378 RootRegionScan_lock->notify_all(); 379 } 380 381 void CMRootRegions::scan_finished() { 382 assert(scan_in_progress(), "pre-condition"); 383 384 // Currently, only survivors can be root regions. 385 if (!_should_abort) { 386 assert(_next_survivor == NULL, "we should have claimed all survivors"); 387 } 388 _next_survivor = NULL; 389 390 { 391 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 392 _scan_in_progress = false; 393 RootRegionScan_lock->notify_all(); 394 } 395 } 396 397 bool CMRootRegions::wait_until_scan_finished() { 398 if (!scan_in_progress()) return false; 399 400 { 401 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 402 while (scan_in_progress()) { 403 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 404 } 405 } 406 return true; 407 } 408 409 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 410 return MAX2((n_par_threads + 2) / 4, 1U); 411 } 412 413 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 414 _g1h(g1h), 415 _markBitMap1(), 416 _markBitMap2(), 417 _parallel_marking_threads(0), 418 _max_parallel_marking_threads(0), 419 _sleep_factor(0.0), 420 _marking_task_overhead(1.0), 421 _cleanup_list("Cleanup List"), 422 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 423 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 424 CardTableModRefBS::card_shift, 425 false /* in_resource_area*/), 426 427 _prevMarkBitMap(&_markBitMap1), 428 _nextMarkBitMap(&_markBitMap2), 429 430 _markStack(this), 431 // _finger set in set_non_marking_state 432 433 _max_worker_id(ParallelGCThreads), 434 // _active_tasks set in set_non_marking_state 435 // _tasks set inside the constructor 436 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 437 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 438 439 _has_overflown(false), 440 _concurrent(false), 441 _has_aborted(false), 442 _restart_for_overflow(false), 443 _concurrent_marking_in_progress(false), 444 _concurrent_phase_started(false), 445 446 // _verbose_level set below 447 448 _init_times(), 449 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 450 _cleanup_times(), 451 _total_counting_time(0.0), 452 _total_rs_scrub_time(0.0), 453 454 _parallel_workers(NULL), 455 456 _count_card_bitmaps(NULL), 457 _count_marked_bytes(NULL), 458 _completed_initialization(false) { 459 460 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 461 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 462 463 // Create & start a ConcurrentMark thread. 464 _cmThread = new ConcurrentMarkThread(this); 465 assert(cmThread() != NULL, "CM Thread should have been created"); 466 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 467 if (_cmThread->osthread() == NULL) { 468 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 469 } 470 471 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 472 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 473 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 474 475 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 476 satb_qs.set_buffer_size(G1SATBBufferSize); 477 478 _root_regions.init(_g1h, this); 479 480 if (ConcGCThreads > ParallelGCThreads) { 481 warning("Can't have more ConcGCThreads (%u) " 482 "than ParallelGCThreads (%u).", 483 ConcGCThreads, ParallelGCThreads); 484 return; 485 } 486 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 487 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 488 // if both are set 489 _sleep_factor = 0.0; 490 _marking_task_overhead = 1.0; 491 } else if (G1MarkingOverheadPercent > 0) { 492 // We will calculate the number of parallel marking threads based 493 // on a target overhead with respect to the soft real-time goal 494 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 495 double overall_cm_overhead = 496 (double) MaxGCPauseMillis * marking_overhead / 497 (double) GCPauseIntervalMillis; 498 double cpu_ratio = 1.0 / (double) os::processor_count(); 499 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 500 double marking_task_overhead = 501 overall_cm_overhead / marking_thread_num * 502 (double) os::processor_count(); 503 double sleep_factor = 504 (1.0 - marking_task_overhead) / marking_task_overhead; 505 506 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 507 _sleep_factor = sleep_factor; 508 _marking_task_overhead = marking_task_overhead; 509 } else { 510 // Calculate the number of parallel marking threads by scaling 511 // the number of parallel GC threads. 512 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 513 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 514 _sleep_factor = 0.0; 515 _marking_task_overhead = 1.0; 516 } 517 518 assert(ConcGCThreads > 0, "Should have been set"); 519 _parallel_marking_threads = ConcGCThreads; 520 _max_parallel_marking_threads = _parallel_marking_threads; 521 522 _parallel_workers = new WorkGang("G1 Marker", 523 _max_parallel_marking_threads, false, true); 524 if (_parallel_workers == NULL) { 525 vm_exit_during_initialization("Failed necessary allocation."); 526 } else { 527 _parallel_workers->initialize_workers(); 528 } 529 530 if (FLAG_IS_DEFAULT(MarkStackSize)) { 531 size_t mark_stack_size = 532 MIN2(MarkStackSizeMax, 533 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 534 // Verify that the calculated value for MarkStackSize is in range. 535 // It would be nice to use the private utility routine from Arguments. 536 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 537 warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 538 "must be between 1 and " SIZE_FORMAT, 539 mark_stack_size, MarkStackSizeMax); 540 return; 541 } 542 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 543 } else { 544 // Verify MarkStackSize is in range. 545 if (FLAG_IS_CMDLINE(MarkStackSize)) { 546 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 547 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 548 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 549 "must be between 1 and " SIZE_FORMAT, 550 MarkStackSize, MarkStackSizeMax); 551 return; 552 } 553 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 554 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 555 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 556 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 557 MarkStackSize, MarkStackSizeMax); 558 return; 559 } 560 } 561 } 562 } 563 564 if (!_markStack.allocate(MarkStackSize)) { 565 warning("Failed to allocate CM marking stack"); 566 return; 567 } 568 569 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 570 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 571 572 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 573 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 574 575 BitMap::idx_t card_bm_size = _card_bm.size(); 576 577 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 578 _active_tasks = _max_worker_id; 579 580 uint max_regions = _g1h->max_regions(); 581 for (uint i = 0; i < _max_worker_id; ++i) { 582 CMTaskQueue* task_queue = new CMTaskQueue(); 583 task_queue->initialize(); 584 _task_queues->register_queue(i, task_queue); 585 586 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 587 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 588 589 _tasks[i] = new CMTask(i, this, 590 _count_marked_bytes[i], 591 &_count_card_bitmaps[i], 592 task_queue, _task_queues); 593 594 _accum_task_vtime[i] = 0.0; 595 } 596 597 // Calculate the card number for the bottom of the heap. Used 598 // in biasing indexes into the accounting card bitmaps. 599 _heap_bottom_card_num = 600 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 601 CardTableModRefBS::card_shift); 602 603 // Clear all the liveness counting data 604 clear_all_count_data(); 605 606 // so that the call below can read a sensible value 607 _heap_start = g1h->reserved_region().start(); 608 set_non_marking_state(); 609 _completed_initialization = true; 610 } 611 612 void ConcurrentMark::reset() { 613 // Starting values for these two. This should be called in a STW 614 // phase. 615 MemRegion reserved = _g1h->g1_reserved(); 616 _heap_start = reserved.start(); 617 _heap_end = reserved.end(); 618 619 // Separated the asserts so that we know which one fires. 620 assert(_heap_start != NULL, "heap bounds should look ok"); 621 assert(_heap_end != NULL, "heap bounds should look ok"); 622 assert(_heap_start < _heap_end, "heap bounds should look ok"); 623 624 // Reset all the marking data structures and any necessary flags 625 reset_marking_state(); 626 627 // We do reset all of them, since different phases will use 628 // different number of active threads. So, it's easiest to have all 629 // of them ready. 630 for (uint i = 0; i < _max_worker_id; ++i) { 631 _tasks[i]->reset(_nextMarkBitMap); 632 } 633 634 // we need this to make sure that the flag is on during the evac 635 // pause with initial mark piggy-backed 636 set_concurrent_marking_in_progress(); 637 } 638 639 640 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 641 _markStack.set_should_expand(); 642 _markStack.setEmpty(); // Also clears the _markStack overflow flag 643 if (clear_overflow) { 644 clear_has_overflown(); 645 } else { 646 assert(has_overflown(), "pre-condition"); 647 } 648 _finger = _heap_start; 649 650 for (uint i = 0; i < _max_worker_id; ++i) { 651 CMTaskQueue* queue = _task_queues->queue(i); 652 queue->set_empty(); 653 } 654 } 655 656 void ConcurrentMark::set_concurrency(uint active_tasks) { 657 assert(active_tasks <= _max_worker_id, "we should not have more"); 658 659 _active_tasks = active_tasks; 660 // Need to update the three data structures below according to the 661 // number of active threads for this phase. 662 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 663 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 664 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 665 } 666 667 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 668 set_concurrency(active_tasks); 669 670 _concurrent = concurrent; 671 // We propagate this to all tasks, not just the active ones. 672 for (uint i = 0; i < _max_worker_id; ++i) 673 _tasks[i]->set_concurrent(concurrent); 674 675 if (concurrent) { 676 set_concurrent_marking_in_progress(); 677 } else { 678 // We currently assume that the concurrent flag has been set to 679 // false before we start remark. At this point we should also be 680 // in a STW phase. 681 assert(!concurrent_marking_in_progress(), "invariant"); 682 assert(out_of_regions(), 683 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 684 p2i(_finger), p2i(_heap_end)); 685 } 686 } 687 688 void ConcurrentMark::set_non_marking_state() { 689 // We set the global marking state to some default values when we're 690 // not doing marking. 691 reset_marking_state(); 692 _active_tasks = 0; 693 clear_concurrent_marking_in_progress(); 694 } 695 696 ConcurrentMark::~ConcurrentMark() { 697 // The ConcurrentMark instance is never freed. 698 ShouldNotReachHere(); 699 } 700 701 void ConcurrentMark::clearNextBitmap() { 702 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 703 704 // Make sure that the concurrent mark thread looks to still be in 705 // the current cycle. 706 guarantee(cmThread()->during_cycle(), "invariant"); 707 708 // We are finishing up the current cycle by clearing the next 709 // marking bitmap and getting it ready for the next cycle. During 710 // this time no other cycle can start. So, let's make sure that this 711 // is the case. 712 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 713 714 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 715 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 716 _parallel_workers->run_task(&task); 717 718 // Clear the liveness counting data. If the marking has been aborted, the abort() 719 // call already did that. 720 if (cl.complete()) { 721 clear_all_count_data(); 722 } 723 724 // Repeat the asserts from above. 725 guarantee(cmThread()->during_cycle(), "invariant"); 726 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 727 } 728 729 class CheckBitmapClearHRClosure : public HeapRegionClosure { 730 CMBitMap* _bitmap; 731 bool _error; 732 public: 733 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 734 } 735 736 virtual bool doHeapRegion(HeapRegion* r) { 737 // This closure can be called concurrently to the mutator, so we must make sure 738 // that the result of the getNextMarkedWordAddress() call is compared to the 739 // value passed to it as limit to detect any found bits. 740 // end never changes in G1. 741 HeapWord* end = r->end(); 742 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 743 } 744 }; 745 746 bool ConcurrentMark::nextMarkBitmapIsClear() { 747 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 748 _g1h->heap_region_iterate(&cl); 749 return cl.complete(); 750 } 751 752 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 753 public: 754 bool doHeapRegion(HeapRegion* r) { 755 r->note_start_of_marking(); 756 return false; 757 } 758 }; 759 760 void ConcurrentMark::checkpointRootsInitialPre() { 761 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 762 G1CollectorPolicy* g1p = g1h->g1_policy(); 763 764 _has_aborted = false; 765 766 // Initialize marking structures. This has to be done in a STW phase. 767 reset(); 768 769 // For each region note start of marking. 770 NoteStartOfMarkHRClosure startcl; 771 g1h->heap_region_iterate(&startcl); 772 } 773 774 775 void ConcurrentMark::checkpointRootsInitialPost() { 776 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 777 778 // Start Concurrent Marking weak-reference discovery. 779 ReferenceProcessor* rp = g1h->ref_processor_cm(); 780 // enable ("weak") refs discovery 781 rp->enable_discovery(); 782 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 783 784 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 785 // This is the start of the marking cycle, we're expected all 786 // threads to have SATB queues with active set to false. 787 satb_mq_set.set_active_all_threads(true, /* new active value */ 788 false /* expected_active */); 789 790 _root_regions.prepare_for_scan(); 791 792 // update_g1_committed() will be called at the end of an evac pause 793 // when marking is on. So, it's also called at the end of the 794 // initial-mark pause to update the heap end, if the heap expands 795 // during it. No need to call it here. 796 } 797 798 /* 799 * Notice that in the next two methods, we actually leave the STS 800 * during the barrier sync and join it immediately afterwards. If we 801 * do not do this, the following deadlock can occur: one thread could 802 * be in the barrier sync code, waiting for the other thread to also 803 * sync up, whereas another one could be trying to yield, while also 804 * waiting for the other threads to sync up too. 805 * 806 * Note, however, that this code is also used during remark and in 807 * this case we should not attempt to leave / enter the STS, otherwise 808 * we'll either hit an assert (debug / fastdebug) or deadlock 809 * (product). So we should only leave / enter the STS if we are 810 * operating concurrently. 811 * 812 * Because the thread that does the sync barrier has left the STS, it 813 * is possible to be suspended for a Full GC or an evacuation pause 814 * could occur. This is actually safe, since the entering the sync 815 * barrier is one of the last things do_marking_step() does, and it 816 * doesn't manipulate any data structures afterwards. 817 */ 818 819 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 820 bool barrier_aborted; 821 { 822 SuspendibleThreadSetLeaver sts_leave(concurrent()); 823 barrier_aborted = !_first_overflow_barrier_sync.enter(); 824 } 825 826 // at this point everyone should have synced up and not be doing any 827 // more work 828 829 if (barrier_aborted) { 830 // If the barrier aborted we ignore the overflow condition and 831 // just abort the whole marking phase as quickly as possible. 832 return; 833 } 834 835 // If we're executing the concurrent phase of marking, reset the marking 836 // state; otherwise the marking state is reset after reference processing, 837 // during the remark pause. 838 // If we reset here as a result of an overflow during the remark we will 839 // see assertion failures from any subsequent set_concurrency_and_phase() 840 // calls. 841 if (concurrent()) { 842 // let the task associated with with worker 0 do this 843 if (worker_id == 0) { 844 // task 0 is responsible for clearing the global data structures 845 // We should be here because of an overflow. During STW we should 846 // not clear the overflow flag since we rely on it being true when 847 // we exit this method to abort the pause and restart concurrent 848 // marking. 849 reset_marking_state(true /* clear_overflow */); 850 851 log_info(gc)("Concurrent Mark reset for overflow"); 852 } 853 } 854 855 // after this, each task should reset its own data structures then 856 // then go into the second barrier 857 } 858 859 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 860 SuspendibleThreadSetLeaver sts_leave(concurrent()); 861 _second_overflow_barrier_sync.enter(); 862 863 // at this point everything should be re-initialized and ready to go 864 } 865 866 class CMConcurrentMarkingTask: public AbstractGangTask { 867 private: 868 ConcurrentMark* _cm; 869 ConcurrentMarkThread* _cmt; 870 871 public: 872 void work(uint worker_id) { 873 assert(Thread::current()->is_ConcurrentGC_thread(), 874 "this should only be done by a conc GC thread"); 875 ResourceMark rm; 876 877 double start_vtime = os::elapsedVTime(); 878 879 { 880 SuspendibleThreadSetJoiner sts_join; 881 882 assert(worker_id < _cm->active_tasks(), "invariant"); 883 CMTask* the_task = _cm->task(worker_id); 884 the_task->record_start_time(); 885 if (!_cm->has_aborted()) { 886 do { 887 double start_vtime_sec = os::elapsedVTime(); 888 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 889 890 the_task->do_marking_step(mark_step_duration_ms, 891 true /* do_termination */, 892 false /* is_serial*/); 893 894 double end_vtime_sec = os::elapsedVTime(); 895 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 896 _cm->clear_has_overflown(); 897 898 _cm->do_yield_check(worker_id); 899 900 jlong sleep_time_ms; 901 if (!_cm->has_aborted() && the_task->has_aborted()) { 902 sleep_time_ms = 903 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 904 { 905 SuspendibleThreadSetLeaver sts_leave; 906 os::sleep(Thread::current(), sleep_time_ms, false); 907 } 908 } 909 } while (!_cm->has_aborted() && the_task->has_aborted()); 910 } 911 the_task->record_end_time(); 912 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 913 } 914 915 double end_vtime = os::elapsedVTime(); 916 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 917 } 918 919 CMConcurrentMarkingTask(ConcurrentMark* cm, 920 ConcurrentMarkThread* cmt) : 921 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 922 923 ~CMConcurrentMarkingTask() { } 924 }; 925 926 // Calculates the number of active workers for a concurrent 927 // phase. 928 uint ConcurrentMark::calc_parallel_marking_threads() { 929 uint n_conc_workers = 0; 930 if (!UseDynamicNumberOfGCThreads || 931 (!FLAG_IS_DEFAULT(ConcGCThreads) && 932 !ForceDynamicNumberOfGCThreads)) { 933 n_conc_workers = max_parallel_marking_threads(); 934 } else { 935 n_conc_workers = 936 AdaptiveSizePolicy::calc_default_active_workers( 937 max_parallel_marking_threads(), 938 1, /* Minimum workers */ 939 parallel_marking_threads(), 940 Threads::number_of_non_daemon_threads()); 941 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 942 // that scaling has already gone into "_max_parallel_marking_threads". 943 } 944 assert(n_conc_workers > 0, "Always need at least 1"); 945 return n_conc_workers; 946 } 947 948 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 949 // Currently, only survivors can be root regions. 950 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 951 G1RootRegionScanClosure cl(_g1h, this, worker_id); 952 953 const uintx interval = PrefetchScanIntervalInBytes; 954 HeapWord* curr = hr->bottom(); 955 const HeapWord* end = hr->top(); 956 while (curr < end) { 957 Prefetch::read(curr, interval); 958 oop obj = oop(curr); 959 int size = obj->oop_iterate_size(&cl); 960 assert(size == obj->size(), "sanity"); 961 curr += size; 962 } 963 } 964 965 class CMRootRegionScanTask : public AbstractGangTask { 966 private: 967 ConcurrentMark* _cm; 968 969 public: 970 CMRootRegionScanTask(ConcurrentMark* cm) : 971 AbstractGangTask("Root Region Scan"), _cm(cm) { } 972 973 void work(uint worker_id) { 974 assert(Thread::current()->is_ConcurrentGC_thread(), 975 "this should only be done by a conc GC thread"); 976 977 CMRootRegions* root_regions = _cm->root_regions(); 978 HeapRegion* hr = root_regions->claim_next(); 979 while (hr != NULL) { 980 _cm->scanRootRegion(hr, worker_id); 981 hr = root_regions->claim_next(); 982 } 983 } 984 }; 985 986 void ConcurrentMark::scanRootRegions() { 987 // Start of concurrent marking. 988 ClassLoaderDataGraph::clear_claimed_marks(); 989 990 // scan_in_progress() will have been set to true only if there was 991 // at least one root region to scan. So, if it's false, we 992 // should not attempt to do any further work. 993 if (root_regions()->scan_in_progress()) { 994 GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan"); 995 996 _parallel_marking_threads = calc_parallel_marking_threads(); 997 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 998 "Maximum number of marking threads exceeded"); 999 uint active_workers = MAX2(1U, parallel_marking_threads()); 1000 1001 CMRootRegionScanTask task(this); 1002 _parallel_workers->set_active_workers(active_workers); 1003 _parallel_workers->run_task(&task); 1004 1005 // It's possible that has_aborted() is true here without actually 1006 // aborting the survivor scan earlier. This is OK as it's 1007 // mainly used for sanity checking. 1008 root_regions()->scan_finished(); 1009 } 1010 } 1011 1012 void ConcurrentMark::register_concurrent_phase_start(const char* title) { 1013 assert(!_concurrent_phase_started, "Sanity"); 1014 _concurrent_phase_started = true; 1015 _g1h->gc_timer_cm()->register_gc_concurrent_start(title); 1016 } 1017 1018 void ConcurrentMark::register_concurrent_phase_end() { 1019 if (_concurrent_phase_started) { 1020 _concurrent_phase_started = false; 1021 _g1h->gc_timer_cm()->register_gc_concurrent_end(); 1022 } 1023 } 1024 1025 void ConcurrentMark::markFromRoots() { 1026 // we might be tempted to assert that: 1027 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1028 // "inconsistent argument?"); 1029 // However that wouldn't be right, because it's possible that 1030 // a safepoint is indeed in progress as a younger generation 1031 // stop-the-world GC happens even as we mark in this generation. 1032 1033 _restart_for_overflow = false; 1034 1035 // _g1h has _n_par_threads 1036 _parallel_marking_threads = calc_parallel_marking_threads(); 1037 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1038 "Maximum number of marking threads exceeded"); 1039 1040 uint active_workers = MAX2(1U, parallel_marking_threads()); 1041 assert(active_workers > 0, "Should have been set"); 1042 1043 // Parallel task terminator is set in "set_concurrency_and_phase()" 1044 set_concurrency_and_phase(active_workers, true /* concurrent */); 1045 1046 CMConcurrentMarkingTask markingTask(this, cmThread()); 1047 _parallel_workers->set_active_workers(active_workers); 1048 _parallel_workers->run_task(&markingTask); 1049 print_stats(); 1050 } 1051 1052 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1053 // world is stopped at this checkpoint 1054 assert(SafepointSynchronize::is_at_safepoint(), 1055 "world should be stopped"); 1056 1057 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1058 1059 // If a full collection has happened, we shouldn't do this. 1060 if (has_aborted()) { 1061 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1062 return; 1063 } 1064 1065 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1066 1067 if (VerifyDuringGC) { 1068 HandleMark hm; // handle scope 1069 g1h->prepare_for_verify(); 1070 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1071 } 1072 g1h->verifier()->check_bitmaps("Remark Start"); 1073 1074 G1CollectorPolicy* g1p = g1h->g1_policy(); 1075 g1p->record_concurrent_mark_remark_start(); 1076 1077 double start = os::elapsedTime(); 1078 1079 checkpointRootsFinalWork(); 1080 1081 double mark_work_end = os::elapsedTime(); 1082 1083 weakRefsWork(clear_all_soft_refs); 1084 1085 if (has_overflown()) { 1086 // Oops. We overflowed. Restart concurrent marking. 1087 _restart_for_overflow = true; 1088 log_develop_trace(gc)("Remark led to restart for overflow."); 1089 1090 // Verify the heap w.r.t. the previous marking bitmap. 1091 if (VerifyDuringGC) { 1092 HandleMark hm; // handle scope 1093 g1h->prepare_for_verify(); 1094 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)"); 1095 } 1096 1097 // Clear the marking state because we will be restarting 1098 // marking due to overflowing the global mark stack. 1099 reset_marking_state(); 1100 } else { 1101 { 1102 GCTraceTime(Debug, gc) trace("GC Aggregate Data", g1h->gc_timer_cm()); 1103 1104 // Aggregate the per-task counting data that we have accumulated 1105 // while marking. 1106 aggregate_count_data(); 1107 } 1108 1109 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1110 // We're done with marking. 1111 // This is the end of the marking cycle, we're expected all 1112 // threads to have SATB queues with active set to true. 1113 satb_mq_set.set_active_all_threads(false, /* new active value */ 1114 true /* expected_active */); 1115 1116 if (VerifyDuringGC) { 1117 HandleMark hm; // handle scope 1118 g1h->prepare_for_verify(); 1119 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)"); 1120 } 1121 g1h->verifier()->check_bitmaps("Remark End"); 1122 assert(!restart_for_overflow(), "sanity"); 1123 // Completely reset the marking state since marking completed 1124 set_non_marking_state(); 1125 } 1126 1127 // Expand the marking stack, if we have to and if we can. 1128 if (_markStack.should_expand()) { 1129 _markStack.expand(); 1130 } 1131 1132 // Statistics 1133 double now = os::elapsedTime(); 1134 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1135 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1136 _remark_times.add((now - start) * 1000.0); 1137 1138 g1p->record_concurrent_mark_remark_end(); 1139 1140 G1CMIsAliveClosure is_alive(g1h); 1141 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1142 } 1143 1144 // Base class of the closures that finalize and verify the 1145 // liveness counting data. 1146 class CMCountDataClosureBase: public HeapRegionClosure { 1147 protected: 1148 G1CollectedHeap* _g1h; 1149 ConcurrentMark* _cm; 1150 CardTableModRefBS* _ct_bs; 1151 1152 BitMap* _region_bm; 1153 BitMap* _card_bm; 1154 1155 // Takes a region that's not empty (i.e., it has at least one 1156 // live object in it and sets its corresponding bit on the region 1157 // bitmap to 1. 1158 void set_bit_for_region(HeapRegion* hr) { 1159 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1160 _region_bm->par_at_put(index, true); 1161 } 1162 1163 public: 1164 CMCountDataClosureBase(G1CollectedHeap* g1h, 1165 BitMap* region_bm, BitMap* card_bm): 1166 _g1h(g1h), _cm(g1h->concurrent_mark()), 1167 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 1168 _region_bm(region_bm), _card_bm(card_bm) { } 1169 }; 1170 1171 // Closure that calculates the # live objects per region. Used 1172 // for verification purposes during the cleanup pause. 1173 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1174 CMBitMapRO* _bm; 1175 size_t _region_marked_bytes; 1176 1177 public: 1178 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1179 BitMap* region_bm, BitMap* card_bm) : 1180 CMCountDataClosureBase(g1h, region_bm, card_bm), 1181 _bm(bm), _region_marked_bytes(0) { } 1182 1183 bool doHeapRegion(HeapRegion* hr) { 1184 HeapWord* ntams = hr->next_top_at_mark_start(); 1185 HeapWord* start = hr->bottom(); 1186 1187 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1188 "Preconditions not met - " 1189 "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT, 1190 p2i(start), p2i(ntams), p2i(hr->end())); 1191 1192 // Find the first marked object at or after "start". 1193 start = _bm->getNextMarkedWordAddress(start, ntams); 1194 1195 size_t marked_bytes = 0; 1196 1197 while (start < ntams) { 1198 oop obj = oop(start); 1199 int obj_sz = obj->size(); 1200 HeapWord* obj_end = start + obj_sz; 1201 1202 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1203 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1204 1205 // Note: if we're looking at the last region in heap - obj_end 1206 // could be actually just beyond the end of the heap; end_idx 1207 // will then correspond to a (non-existent) card that is also 1208 // just beyond the heap. 1209 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1210 // end of object is not card aligned - increment to cover 1211 // all the cards spanned by the object 1212 end_idx += 1; 1213 } 1214 1215 // Set the bits in the card BM for the cards spanned by this object. 1216 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1217 1218 // Add the size of this object to the number of marked bytes. 1219 marked_bytes += (size_t)obj_sz * HeapWordSize; 1220 1221 // This will happen if we are handling a humongous object that spans 1222 // several heap regions. 1223 if (obj_end > hr->end()) { 1224 break; 1225 } 1226 // Find the next marked object after this one. 1227 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1228 } 1229 1230 // Mark the allocated-since-marking portion... 1231 HeapWord* top = hr->top(); 1232 if (ntams < top) { 1233 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1234 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1235 1236 // Note: if we're looking at the last region in heap - top 1237 // could be actually just beyond the end of the heap; end_idx 1238 // will then correspond to a (non-existent) card that is also 1239 // just beyond the heap. 1240 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1241 // end of object is not card aligned - increment to cover 1242 // all the cards spanned by the object 1243 end_idx += 1; 1244 } 1245 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1246 1247 // This definitely means the region has live objects. 1248 set_bit_for_region(hr); 1249 } 1250 1251 // Update the live region bitmap. 1252 if (marked_bytes > 0) { 1253 set_bit_for_region(hr); 1254 } 1255 1256 // Set the marked bytes for the current region so that 1257 // it can be queried by a calling verification routine 1258 _region_marked_bytes = marked_bytes; 1259 1260 return false; 1261 } 1262 1263 size_t region_marked_bytes() const { return _region_marked_bytes; } 1264 }; 1265 1266 // Heap region closure used for verifying the counting data 1267 // that was accumulated concurrently and aggregated during 1268 // the remark pause. This closure is applied to the heap 1269 // regions during the STW cleanup pause. 1270 1271 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1272 G1CollectedHeap* _g1h; 1273 ConcurrentMark* _cm; 1274 CalcLiveObjectsClosure _calc_cl; 1275 BitMap* _region_bm; // Region BM to be verified 1276 BitMap* _card_bm; // Card BM to be verified 1277 1278 BitMap* _exp_region_bm; // Expected Region BM values 1279 BitMap* _exp_card_bm; // Expected card BM values 1280 1281 int _failures; 1282 1283 public: 1284 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1285 BitMap* region_bm, 1286 BitMap* card_bm, 1287 BitMap* exp_region_bm, 1288 BitMap* exp_card_bm) : 1289 _g1h(g1h), _cm(g1h->concurrent_mark()), 1290 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1291 _region_bm(region_bm), _card_bm(card_bm), 1292 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1293 _failures(0) { } 1294 1295 int failures() const { return _failures; } 1296 1297 bool doHeapRegion(HeapRegion* hr) { 1298 int failures = 0; 1299 1300 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1301 // this region and set the corresponding bits in the expected region 1302 // and card bitmaps. 1303 bool res = _calc_cl.doHeapRegion(hr); 1304 assert(res == false, "should be continuing"); 1305 1306 // Verify the marked bytes for this region. 1307 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1308 size_t act_marked_bytes = hr->next_marked_bytes(); 1309 1310 if (exp_marked_bytes > act_marked_bytes) { 1311 if (hr->is_starts_humongous()) { 1312 // For start_humongous regions, the size of the whole object will be 1313 // in exp_marked_bytes. 1314 HeapRegion* region = hr; 1315 int num_regions; 1316 for (num_regions = 0; region != NULL; num_regions++) { 1317 region = _g1h->next_region_in_humongous(region); 1318 } 1319 if ((num_regions-1) * HeapRegion::GrainBytes >= exp_marked_bytes) { 1320 failures += 1; 1321 } else if (num_regions * HeapRegion::GrainBytes < exp_marked_bytes) { 1322 failures += 1; 1323 } 1324 } else { 1325 // We're not OK if expected marked bytes > actual marked bytes. It means 1326 // we have missed accounting some objects during the actual marking. 1327 failures += 1; 1328 } 1329 } 1330 1331 // Verify the bit, for this region, in the actual and expected 1332 // (which was just calculated) region bit maps. 1333 // We're not OK if the bit in the calculated expected region 1334 // bitmap is set and the bit in the actual region bitmap is not. 1335 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1336 1337 bool expected = _exp_region_bm->at(index); 1338 bool actual = _region_bm->at(index); 1339 if (expected && !actual) { 1340 failures += 1; 1341 } 1342 1343 // Verify that the card bit maps for the cards spanned by the current 1344 // region match. We have an error if we have a set bit in the expected 1345 // bit map and the corresponding bit in the actual bitmap is not set. 1346 1347 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1348 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1349 1350 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1351 expected = _exp_card_bm->at(i); 1352 actual = _card_bm->at(i); 1353 1354 if (expected && !actual) { 1355 failures += 1; 1356 } 1357 } 1358 1359 _failures += failures; 1360 1361 // We could stop iteration over the heap when we 1362 // find the first violating region by returning true. 1363 return false; 1364 } 1365 }; 1366 1367 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1368 protected: 1369 G1CollectedHeap* _g1h; 1370 ConcurrentMark* _cm; 1371 BitMap* _actual_region_bm; 1372 BitMap* _actual_card_bm; 1373 1374 uint _n_workers; 1375 1376 BitMap* _expected_region_bm; 1377 BitMap* _expected_card_bm; 1378 1379 int _failures; 1380 1381 HeapRegionClaimer _hrclaimer; 1382 1383 public: 1384 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1385 BitMap* region_bm, BitMap* card_bm, 1386 BitMap* expected_region_bm, BitMap* expected_card_bm) 1387 : AbstractGangTask("G1 verify final counting"), 1388 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1389 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1390 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1391 _failures(0), 1392 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1393 assert(VerifyDuringGC, "don't call this otherwise"); 1394 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1395 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1396 } 1397 1398 void work(uint worker_id) { 1399 assert(worker_id < _n_workers, "invariant"); 1400 1401 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1402 _actual_region_bm, _actual_card_bm, 1403 _expected_region_bm, 1404 _expected_card_bm); 1405 1406 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1407 1408 Atomic::add(verify_cl.failures(), &_failures); 1409 } 1410 1411 int failures() const { return _failures; } 1412 }; 1413 1414 // Closure that finalizes the liveness counting data. 1415 // Used during the cleanup pause. 1416 // Sets the bits corresponding to the interval [NTAMS, top] 1417 // (which contains the implicitly live objects) in the 1418 // card liveness bitmap. Also sets the bit for each region, 1419 // containing live data, in the region liveness bitmap. 1420 1421 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1422 public: 1423 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1424 BitMap* region_bm, 1425 BitMap* card_bm) : 1426 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1427 1428 bool doHeapRegion(HeapRegion* hr) { 1429 HeapWord* ntams = hr->next_top_at_mark_start(); 1430 HeapWord* top = hr->top(); 1431 1432 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1433 1434 // Mark the allocated-since-marking portion... 1435 if (ntams < top) { 1436 // This definitely means the region has live objects. 1437 set_bit_for_region(hr); 1438 1439 // Now set the bits in the card bitmap for [ntams, top) 1440 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1441 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1442 1443 // Note: if we're looking at the last region in heap - top 1444 // could be actually just beyond the end of the heap; end_idx 1445 // will then correspond to a (non-existent) card that is also 1446 // just beyond the heap. 1447 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1448 // end of object is not card aligned - increment to cover 1449 // all the cards spanned by the object 1450 end_idx += 1; 1451 } 1452 1453 assert(end_idx <= _card_bm->size(), 1454 "oob: end_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT, 1455 end_idx, _card_bm->size()); 1456 assert(start_idx < _card_bm->size(), 1457 "oob: start_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT, 1458 start_idx, _card_bm->size()); 1459 1460 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1461 } 1462 1463 // Set the bit for the region if it contains live data 1464 if (hr->next_marked_bytes() > 0) { 1465 set_bit_for_region(hr); 1466 } 1467 1468 return false; 1469 } 1470 }; 1471 1472 class G1ParFinalCountTask: public AbstractGangTask { 1473 protected: 1474 G1CollectedHeap* _g1h; 1475 ConcurrentMark* _cm; 1476 BitMap* _actual_region_bm; 1477 BitMap* _actual_card_bm; 1478 1479 uint _n_workers; 1480 HeapRegionClaimer _hrclaimer; 1481 1482 public: 1483 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1484 : AbstractGangTask("G1 final counting"), 1485 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1486 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1487 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1488 } 1489 1490 void work(uint worker_id) { 1491 assert(worker_id < _n_workers, "invariant"); 1492 1493 FinalCountDataUpdateClosure final_update_cl(_g1h, 1494 _actual_region_bm, 1495 _actual_card_bm); 1496 1497 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1498 } 1499 }; 1500 1501 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1502 G1CollectedHeap* _g1; 1503 size_t _freed_bytes; 1504 FreeRegionList* _local_cleanup_list; 1505 uint _old_regions_removed; 1506 uint _humongous_regions_removed; 1507 HRRSCleanupTask* _hrrs_cleanup_task; 1508 1509 public: 1510 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1511 FreeRegionList* local_cleanup_list, 1512 HRRSCleanupTask* hrrs_cleanup_task) : 1513 _g1(g1), 1514 _freed_bytes(0), 1515 _local_cleanup_list(local_cleanup_list), 1516 _old_regions_removed(0), 1517 _humongous_regions_removed(0), 1518 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1519 1520 size_t freed_bytes() { return _freed_bytes; } 1521 const uint old_regions_removed() { return _old_regions_removed; } 1522 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1523 1524 bool doHeapRegion(HeapRegion *hr) { 1525 if (hr->is_archive()) { 1526 return false; 1527 } 1528 // We use a claim value of zero here because all regions 1529 // were claimed with value 1 in the FinalCount task. 1530 _g1->reset_gc_time_stamps(hr); 1531 hr->note_end_of_marking(); 1532 1533 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1534 _freed_bytes += hr->used(); 1535 hr->set_containing_set(NULL); 1536 if (hr->is_humongous()) { 1537 _humongous_regions_removed++; 1538 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1539 } else { 1540 _old_regions_removed++; 1541 _g1->free_region(hr, _local_cleanup_list, true); 1542 } 1543 } else { 1544 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1545 } 1546 1547 return false; 1548 } 1549 }; 1550 1551 class G1ParNoteEndTask: public AbstractGangTask { 1552 friend class G1NoteEndOfConcMarkClosure; 1553 1554 protected: 1555 G1CollectedHeap* _g1h; 1556 FreeRegionList* _cleanup_list; 1557 HeapRegionClaimer _hrclaimer; 1558 1559 public: 1560 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1561 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1562 } 1563 1564 void work(uint worker_id) { 1565 FreeRegionList local_cleanup_list("Local Cleanup List"); 1566 HRRSCleanupTask hrrs_cleanup_task; 1567 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1568 &hrrs_cleanup_task); 1569 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1570 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1571 1572 // Now update the lists 1573 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1574 { 1575 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1576 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1577 1578 // If we iterate over the global cleanup list at the end of 1579 // cleanup to do this printing we will not guarantee to only 1580 // generate output for the newly-reclaimed regions (the list 1581 // might not be empty at the beginning of cleanup; we might 1582 // still be working on its previous contents). So we do the 1583 // printing here, before we append the new regions to the global 1584 // cleanup list. 1585 1586 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1587 if (hr_printer->is_active()) { 1588 FreeRegionListIterator iter(&local_cleanup_list); 1589 while (iter.more_available()) { 1590 HeapRegion* hr = iter.get_next(); 1591 hr_printer->cleanup(hr); 1592 } 1593 } 1594 1595 _cleanup_list->add_ordered(&local_cleanup_list); 1596 assert(local_cleanup_list.is_empty(), "post-condition"); 1597 1598 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1599 } 1600 } 1601 }; 1602 1603 void ConcurrentMark::cleanup() { 1604 // world is stopped at this checkpoint 1605 assert(SafepointSynchronize::is_at_safepoint(), 1606 "world should be stopped"); 1607 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1608 1609 // If a full collection has happened, we shouldn't do this. 1610 if (has_aborted()) { 1611 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1612 return; 1613 } 1614 1615 g1h->verifier()->verify_region_sets_optional(); 1616 1617 if (VerifyDuringGC) { 1618 HandleMark hm; // handle scope 1619 g1h->prepare_for_verify(); 1620 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1621 } 1622 g1h->verifier()->check_bitmaps("Cleanup Start"); 1623 1624 G1CollectorPolicy* g1p = g1h->g1_policy(); 1625 g1p->record_concurrent_mark_cleanup_start(); 1626 1627 double start = os::elapsedTime(); 1628 1629 HeapRegionRemSet::reset_for_cleanup_tasks(); 1630 1631 // Do counting once more with the world stopped for good measure. 1632 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 1633 1634 g1h->workers()->run_task(&g1_par_count_task); 1635 1636 if (VerifyDuringGC) { 1637 // Verify that the counting data accumulated during marking matches 1638 // that calculated by walking the marking bitmap. 1639 1640 // Bitmaps to hold expected values 1641 BitMap expected_region_bm(_region_bm.size(), true); 1642 BitMap expected_card_bm(_card_bm.size(), true); 1643 1644 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 1645 &_region_bm, 1646 &_card_bm, 1647 &expected_region_bm, 1648 &expected_card_bm); 1649 1650 g1h->workers()->run_task(&g1_par_verify_task); 1651 1652 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 1653 } 1654 1655 size_t start_used_bytes = g1h->used(); 1656 g1h->collector_state()->set_mark_in_progress(false); 1657 1658 double count_end = os::elapsedTime(); 1659 double this_final_counting_time = (count_end - start); 1660 _total_counting_time += this_final_counting_time; 1661 1662 if (log_is_enabled(Trace, gc, liveness)) { 1663 G1PrintRegionLivenessInfoClosure cl("Post-Marking"); 1664 _g1h->heap_region_iterate(&cl); 1665 } 1666 1667 // Install newly created mark bitMap as "prev". 1668 swapMarkBitMaps(); 1669 1670 g1h->reset_gc_time_stamp(); 1671 1672 uint n_workers = _g1h->workers()->active_workers(); 1673 1674 // Note end of marking in all heap regions. 1675 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1676 g1h->workers()->run_task(&g1_par_note_end_task); 1677 g1h->check_gc_time_stamps(); 1678 1679 if (!cleanup_list_is_empty()) { 1680 // The cleanup list is not empty, so we'll have to process it 1681 // concurrently. Notify anyone else that might be wanting free 1682 // regions that there will be more free regions coming soon. 1683 g1h->set_free_regions_coming(); 1684 } 1685 1686 // call below, since it affects the metric by which we sort the heap 1687 // regions. 1688 if (G1ScrubRemSets) { 1689 double rs_scrub_start = os::elapsedTime(); 1690 g1h->scrub_rem_set(&_region_bm, &_card_bm); 1691 _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start); 1692 } 1693 1694 // this will also free any regions totally full of garbage objects, 1695 // and sort the regions. 1696 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1697 1698 // Statistics. 1699 double end = os::elapsedTime(); 1700 _cleanup_times.add((end - start) * 1000.0); 1701 1702 // Clean up will have freed any regions completely full of garbage. 1703 // Update the soft reference policy with the new heap occupancy. 1704 Universe::update_heap_info_at_gc(); 1705 1706 if (VerifyDuringGC) { 1707 HandleMark hm; // handle scope 1708 g1h->prepare_for_verify(); 1709 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)"); 1710 } 1711 1712 g1h->verifier()->check_bitmaps("Cleanup End"); 1713 1714 g1h->verifier()->verify_region_sets_optional(); 1715 1716 // We need to make this be a "collection" so any collection pause that 1717 // races with it goes around and waits for completeCleanup to finish. 1718 g1h->increment_total_collections(); 1719 1720 // Clean out dead classes and update Metaspace sizes. 1721 if (ClassUnloadingWithConcurrentMark) { 1722 ClassLoaderDataGraph::purge(); 1723 } 1724 MetaspaceGC::compute_new_size(); 1725 1726 // We reclaimed old regions so we should calculate the sizes to make 1727 // sure we update the old gen/space data. 1728 g1h->g1mm()->update_sizes(); 1729 g1h->allocation_context_stats().update_after_mark(); 1730 1731 g1h->trace_heap_after_concurrent_cycle(); 1732 } 1733 1734 void ConcurrentMark::completeCleanup() { 1735 if (has_aborted()) return; 1736 1737 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1738 1739 _cleanup_list.verify_optional(); 1740 FreeRegionList tmp_free_list("Tmp Free List"); 1741 1742 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1743 "cleanup list has %u entries", 1744 _cleanup_list.length()); 1745 1746 // No one else should be accessing the _cleanup_list at this point, 1747 // so it is not necessary to take any locks 1748 while (!_cleanup_list.is_empty()) { 1749 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 1750 assert(hr != NULL, "Got NULL from a non-empty list"); 1751 hr->par_clear(); 1752 tmp_free_list.add_ordered(hr); 1753 1754 // Instead of adding one region at a time to the secondary_free_list, 1755 // we accumulate them in the local list and move them a few at a 1756 // time. This also cuts down on the number of notify_all() calls 1757 // we do during this process. We'll also append the local list when 1758 // _cleanup_list is empty (which means we just removed the last 1759 // region from the _cleanup_list). 1760 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1761 _cleanup_list.is_empty()) { 1762 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1763 "appending %u entries to the secondary_free_list, " 1764 "cleanup list still has %u entries", 1765 tmp_free_list.length(), 1766 _cleanup_list.length()); 1767 1768 { 1769 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1770 g1h->secondary_free_list_add(&tmp_free_list); 1771 SecondaryFreeList_lock->notify_all(); 1772 } 1773 #ifndef PRODUCT 1774 if (G1StressConcRegionFreeing) { 1775 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1776 os::sleep(Thread::current(), (jlong) 1, false); 1777 } 1778 } 1779 #endif 1780 } 1781 } 1782 assert(tmp_free_list.is_empty(), "post-condition"); 1783 } 1784 1785 // Supporting Object and Oop closures for reference discovery 1786 // and processing in during marking 1787 1788 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1789 HeapWord* addr = (HeapWord*)obj; 1790 return addr != NULL && 1791 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1792 } 1793 1794 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1795 // Uses the CMTask associated with a worker thread (for serial reference 1796 // processing the CMTask for worker 0 is used) to preserve (mark) and 1797 // trace referent objects. 1798 // 1799 // Using the CMTask and embedded local queues avoids having the worker 1800 // threads operating on the global mark stack. This reduces the risk 1801 // of overflowing the stack - which we would rather avoid at this late 1802 // state. Also using the tasks' local queues removes the potential 1803 // of the workers interfering with each other that could occur if 1804 // operating on the global stack. 1805 1806 class G1CMKeepAliveAndDrainClosure: public OopClosure { 1807 ConcurrentMark* _cm; 1808 CMTask* _task; 1809 int _ref_counter_limit; 1810 int _ref_counter; 1811 bool _is_serial; 1812 public: 1813 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 1814 _cm(cm), _task(task), _is_serial(is_serial), 1815 _ref_counter_limit(G1RefProcDrainInterval) { 1816 assert(_ref_counter_limit > 0, "sanity"); 1817 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1818 _ref_counter = _ref_counter_limit; 1819 } 1820 1821 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1822 virtual void do_oop( oop* p) { do_oop_work(p); } 1823 1824 template <class T> void do_oop_work(T* p) { 1825 if (!_cm->has_overflown()) { 1826 oop obj = oopDesc::load_decode_heap_oop(p); 1827 _task->deal_with_reference(obj); 1828 _ref_counter--; 1829 1830 if (_ref_counter == 0) { 1831 // We have dealt with _ref_counter_limit references, pushing them 1832 // and objects reachable from them on to the local stack (and 1833 // possibly the global stack). Call CMTask::do_marking_step() to 1834 // process these entries. 1835 // 1836 // We call CMTask::do_marking_step() in a loop, which we'll exit if 1837 // there's nothing more to do (i.e. we're done with the entries that 1838 // were pushed as a result of the CMTask::deal_with_reference() calls 1839 // above) or we overflow. 1840 // 1841 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 1842 // flag while there may still be some work to do. (See the comment at 1843 // the beginning of CMTask::do_marking_step() for those conditions - 1844 // one of which is reaching the specified time target.) It is only 1845 // when CMTask::do_marking_step() returns without setting the 1846 // has_aborted() flag that the marking step has completed. 1847 do { 1848 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1849 _task->do_marking_step(mark_step_duration_ms, 1850 false /* do_termination */, 1851 _is_serial); 1852 } while (_task->has_aborted() && !_cm->has_overflown()); 1853 _ref_counter = _ref_counter_limit; 1854 } 1855 } 1856 } 1857 }; 1858 1859 // 'Drain' oop closure used by both serial and parallel reference processing. 1860 // Uses the CMTask associated with a given worker thread (for serial 1861 // reference processing the CMtask for worker 0 is used). Calls the 1862 // do_marking_step routine, with an unbelievably large timeout value, 1863 // to drain the marking data structures of the remaining entries 1864 // added by the 'keep alive' oop closure above. 1865 1866 class G1CMDrainMarkingStackClosure: public VoidClosure { 1867 ConcurrentMark* _cm; 1868 CMTask* _task; 1869 bool _is_serial; 1870 public: 1871 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 1872 _cm(cm), _task(task), _is_serial(is_serial) { 1873 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1874 } 1875 1876 void do_void() { 1877 do { 1878 // We call CMTask::do_marking_step() to completely drain the local 1879 // and global marking stacks of entries pushed by the 'keep alive' 1880 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1881 // 1882 // CMTask::do_marking_step() is called in a loop, which we'll exit 1883 // if there's nothing more to do (i.e. we've completely drained the 1884 // entries that were pushed as a a result of applying the 'keep alive' 1885 // closure to the entries on the discovered ref lists) or we overflow 1886 // the global marking stack. 1887 // 1888 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 1889 // flag while there may still be some work to do. (See the comment at 1890 // the beginning of CMTask::do_marking_step() for those conditions - 1891 // one of which is reaching the specified time target.) It is only 1892 // when CMTask::do_marking_step() returns without setting the 1893 // has_aborted() flag that the marking step has completed. 1894 1895 _task->do_marking_step(1000000000.0 /* something very large */, 1896 true /* do_termination */, 1897 _is_serial); 1898 } while (_task->has_aborted() && !_cm->has_overflown()); 1899 } 1900 }; 1901 1902 // Implementation of AbstractRefProcTaskExecutor for parallel 1903 // reference processing at the end of G1 concurrent marking 1904 1905 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 1906 private: 1907 G1CollectedHeap* _g1h; 1908 ConcurrentMark* _cm; 1909 WorkGang* _workers; 1910 uint _active_workers; 1911 1912 public: 1913 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1914 ConcurrentMark* cm, 1915 WorkGang* workers, 1916 uint n_workers) : 1917 _g1h(g1h), _cm(cm), 1918 _workers(workers), _active_workers(n_workers) { } 1919 1920 // Executes the given task using concurrent marking worker threads. 1921 virtual void execute(ProcessTask& task); 1922 virtual void execute(EnqueueTask& task); 1923 }; 1924 1925 class G1CMRefProcTaskProxy: public AbstractGangTask { 1926 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1927 ProcessTask& _proc_task; 1928 G1CollectedHeap* _g1h; 1929 ConcurrentMark* _cm; 1930 1931 public: 1932 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1933 G1CollectedHeap* g1h, 1934 ConcurrentMark* cm) : 1935 AbstractGangTask("Process reference objects in parallel"), 1936 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1937 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1938 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1939 } 1940 1941 virtual void work(uint worker_id) { 1942 ResourceMark rm; 1943 HandleMark hm; 1944 CMTask* task = _cm->task(worker_id); 1945 G1CMIsAliveClosure g1_is_alive(_g1h); 1946 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1947 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1948 1949 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1950 } 1951 }; 1952 1953 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1954 assert(_workers != NULL, "Need parallel worker threads."); 1955 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1956 1957 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1958 1959 // We need to reset the concurrency level before each 1960 // proxy task execution, so that the termination protocol 1961 // and overflow handling in CMTask::do_marking_step() knows 1962 // how many workers to wait for. 1963 _cm->set_concurrency(_active_workers); 1964 _workers->run_task(&proc_task_proxy); 1965 } 1966 1967 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 1968 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1969 EnqueueTask& _enq_task; 1970 1971 public: 1972 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1973 AbstractGangTask("Enqueue reference objects in parallel"), 1974 _enq_task(enq_task) { } 1975 1976 virtual void work(uint worker_id) { 1977 _enq_task.work(worker_id); 1978 } 1979 }; 1980 1981 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 1982 assert(_workers != NULL, "Need parallel worker threads."); 1983 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1984 1985 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 1986 1987 // Not strictly necessary but... 1988 // 1989 // We need to reset the concurrency level before each 1990 // proxy task execution, so that the termination protocol 1991 // and overflow handling in CMTask::do_marking_step() knows 1992 // how many workers to wait for. 1993 _cm->set_concurrency(_active_workers); 1994 _workers->run_task(&enq_task_proxy); 1995 } 1996 1997 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 1998 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 1999 } 2000 2001 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2002 if (has_overflown()) { 2003 // Skip processing the discovered references if we have 2004 // overflown the global marking stack. Reference objects 2005 // only get discovered once so it is OK to not 2006 // de-populate the discovered reference lists. We could have, 2007 // but the only benefit would be that, when marking restarts, 2008 // less reference objects are discovered. 2009 return; 2010 } 2011 2012 ResourceMark rm; 2013 HandleMark hm; 2014 2015 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2016 2017 // Is alive closure. 2018 G1CMIsAliveClosure g1_is_alive(g1h); 2019 2020 // Inner scope to exclude the cleaning of the string and symbol 2021 // tables from the displayed time. 2022 { 2023 GCTraceTime(Debug, gc) trace("GC Ref Proc", g1h->gc_timer_cm()); 2024 2025 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2026 2027 // See the comment in G1CollectedHeap::ref_processing_init() 2028 // about how reference processing currently works in G1. 2029 2030 // Set the soft reference policy 2031 rp->setup_policy(clear_all_soft_refs); 2032 assert(_markStack.isEmpty(), "mark stack should be empty"); 2033 2034 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2035 // in serial reference processing. Note these closures are also 2036 // used for serially processing (by the the current thread) the 2037 // JNI references during parallel reference processing. 2038 // 2039 // These closures do not need to synchronize with the worker 2040 // threads involved in parallel reference processing as these 2041 // instances are executed serially by the current thread (e.g. 2042 // reference processing is not multi-threaded and is thus 2043 // performed by the current thread instead of a gang worker). 2044 // 2045 // The gang tasks involved in parallel reference processing create 2046 // their own instances of these closures, which do their own 2047 // synchronization among themselves. 2048 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2049 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2050 2051 // We need at least one active thread. If reference processing 2052 // is not multi-threaded we use the current (VMThread) thread, 2053 // otherwise we use the work gang from the G1CollectedHeap and 2054 // we utilize all the worker threads we can. 2055 bool processing_is_mt = rp->processing_is_mt(); 2056 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2057 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2058 2059 // Parallel processing task executor. 2060 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2061 g1h->workers(), active_workers); 2062 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2063 2064 // Set the concurrency level. The phase was already set prior to 2065 // executing the remark task. 2066 set_concurrency(active_workers); 2067 2068 // Set the degree of MT processing here. If the discovery was done MT, 2069 // the number of threads involved during discovery could differ from 2070 // the number of active workers. This is OK as long as the discovered 2071 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2072 rp->set_active_mt_degree(active_workers); 2073 2074 // Process the weak references. 2075 const ReferenceProcessorStats& stats = 2076 rp->process_discovered_references(&g1_is_alive, 2077 &g1_keep_alive, 2078 &g1_drain_mark_stack, 2079 executor, 2080 g1h->gc_timer_cm()); 2081 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2082 2083 // The do_oop work routines of the keep_alive and drain_marking_stack 2084 // oop closures will set the has_overflown flag if we overflow the 2085 // global marking stack. 2086 2087 assert(_markStack.overflow() || _markStack.isEmpty(), 2088 "mark stack should be empty (unless it overflowed)"); 2089 2090 if (_markStack.overflow()) { 2091 // This should have been done already when we tried to push an 2092 // entry on to the global mark stack. But let's do it again. 2093 set_has_overflown(); 2094 } 2095 2096 assert(rp->num_q() == active_workers, "why not"); 2097 2098 rp->enqueue_discovered_references(executor); 2099 2100 rp->verify_no_references_recorded(); 2101 assert(!rp->discovery_enabled(), "Post condition"); 2102 } 2103 2104 if (has_overflown()) { 2105 // We can not trust g1_is_alive if the marking stack overflowed 2106 return; 2107 } 2108 2109 assert(_markStack.isEmpty(), "Marking should have completed"); 2110 2111 // Unload Klasses, String, Symbols, Code Cache, etc. 2112 { 2113 GCTraceTime(Debug, gc) trace("Unloading", g1h->gc_timer_cm()); 2114 2115 if (ClassUnloadingWithConcurrentMark) { 2116 bool purged_classes; 2117 2118 { 2119 GCTraceTime(Trace, gc) trace("System Dictionary Unloading", g1h->gc_timer_cm()); 2120 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2121 } 2122 2123 { 2124 GCTraceTime(Trace, gc) trace("Parallel Unloading", g1h->gc_timer_cm()); 2125 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2126 } 2127 } 2128 2129 if (G1StringDedup::is_enabled()) { 2130 GCTraceTime(Trace, gc) trace("String Deduplication Unlink", g1h->gc_timer_cm()); 2131 G1StringDedup::unlink(&g1_is_alive); 2132 } 2133 } 2134 } 2135 2136 void ConcurrentMark::swapMarkBitMaps() { 2137 CMBitMapRO* temp = _prevMarkBitMap; 2138 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2139 _nextMarkBitMap = (CMBitMap*) temp; 2140 } 2141 2142 // Closure for marking entries in SATB buffers. 2143 class CMSATBBufferClosure : public SATBBufferClosure { 2144 private: 2145 CMTask* _task; 2146 G1CollectedHeap* _g1h; 2147 2148 // This is very similar to CMTask::deal_with_reference, but with 2149 // more relaxed requirements for the argument, so this must be more 2150 // circumspect about treating the argument as an object. 2151 void do_entry(void* entry) const { 2152 _task->increment_refs_reached(); 2153 HeapRegion* hr = _g1h->heap_region_containing(entry); 2154 if (entry < hr->next_top_at_mark_start()) { 2155 // Until we get here, we don't know whether entry refers to a valid 2156 // object; it could instead have been a stale reference. 2157 oop obj = static_cast<oop>(entry); 2158 assert(obj->is_oop(true /* ignore mark word */), 2159 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)); 2160 _task->make_reference_grey(obj, hr); 2161 } 2162 } 2163 2164 public: 2165 CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h) 2166 : _task(task), _g1h(g1h) { } 2167 2168 virtual void do_buffer(void** buffer, size_t size) { 2169 for (size_t i = 0; i < size; ++i) { 2170 do_entry(buffer[i]); 2171 } 2172 } 2173 }; 2174 2175 class G1RemarkThreadsClosure : public ThreadClosure { 2176 CMSATBBufferClosure _cm_satb_cl; 2177 G1CMOopClosure _cm_cl; 2178 MarkingCodeBlobClosure _code_cl; 2179 int _thread_parity; 2180 2181 public: 2182 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) : 2183 _cm_satb_cl(task, g1h), 2184 _cm_cl(g1h, g1h->concurrent_mark(), task), 2185 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2186 _thread_parity(Threads::thread_claim_parity()) {} 2187 2188 void do_thread(Thread* thread) { 2189 if (thread->is_Java_thread()) { 2190 if (thread->claim_oops_do(true, _thread_parity)) { 2191 JavaThread* jt = (JavaThread*)thread; 2192 2193 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2194 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2195 // * Alive if on the stack of an executing method 2196 // * Weakly reachable otherwise 2197 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2198 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2199 jt->nmethods_do(&_code_cl); 2200 2201 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 2202 } 2203 } else if (thread->is_VM_thread()) { 2204 if (thread->claim_oops_do(true, _thread_parity)) { 2205 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 2206 } 2207 } 2208 } 2209 }; 2210 2211 class CMRemarkTask: public AbstractGangTask { 2212 private: 2213 ConcurrentMark* _cm; 2214 public: 2215 void work(uint worker_id) { 2216 // Since all available tasks are actually started, we should 2217 // only proceed if we're supposed to be active. 2218 if (worker_id < _cm->active_tasks()) { 2219 CMTask* task = _cm->task(worker_id); 2220 task->record_start_time(); 2221 { 2222 ResourceMark rm; 2223 HandleMark hm; 2224 2225 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2226 Threads::threads_do(&threads_f); 2227 } 2228 2229 do { 2230 task->do_marking_step(1000000000.0 /* something very large */, 2231 true /* do_termination */, 2232 false /* is_serial */); 2233 } while (task->has_aborted() && !_cm->has_overflown()); 2234 // If we overflow, then we do not want to restart. We instead 2235 // want to abort remark and do concurrent marking again. 2236 task->record_end_time(); 2237 } 2238 } 2239 2240 CMRemarkTask(ConcurrentMark* cm, uint active_workers) : 2241 AbstractGangTask("Par Remark"), _cm(cm) { 2242 _cm->terminator()->reset_for_reuse(active_workers); 2243 } 2244 }; 2245 2246 void ConcurrentMark::checkpointRootsFinalWork() { 2247 ResourceMark rm; 2248 HandleMark hm; 2249 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2250 2251 GCTraceTime(Debug, gc) trace("Finalize Marking", g1h->gc_timer_cm()); 2252 2253 g1h->ensure_parsability(false); 2254 2255 // this is remark, so we'll use up all active threads 2256 uint active_workers = g1h->workers()->active_workers(); 2257 set_concurrency_and_phase(active_workers, false /* concurrent */); 2258 // Leave _parallel_marking_threads at it's 2259 // value originally calculated in the ConcurrentMark 2260 // constructor and pass values of the active workers 2261 // through the gang in the task. 2262 2263 { 2264 StrongRootsScope srs(active_workers); 2265 2266 CMRemarkTask remarkTask(this, active_workers); 2267 // We will start all available threads, even if we decide that the 2268 // active_workers will be fewer. The extra ones will just bail out 2269 // immediately. 2270 g1h->workers()->run_task(&remarkTask); 2271 } 2272 2273 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2274 guarantee(has_overflown() || 2275 satb_mq_set.completed_buffers_num() == 0, 2276 "Invariant: has_overflown = %s, num buffers = %d", 2277 BOOL_TO_STR(has_overflown()), 2278 satb_mq_set.completed_buffers_num()); 2279 2280 print_stats(); 2281 } 2282 2283 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2284 // Note we are overriding the read-only view of the prev map here, via 2285 // the cast. 2286 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2287 } 2288 2289 HeapRegion* 2290 ConcurrentMark::claim_region(uint worker_id) { 2291 // "checkpoint" the finger 2292 HeapWord* finger = _finger; 2293 2294 // _heap_end will not change underneath our feet; it only changes at 2295 // yield points. 2296 while (finger < _heap_end) { 2297 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2298 2299 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 2300 2301 // Above heap_region_containing may return NULL as we always scan claim 2302 // until the end of the heap. In this case, just jump to the next region. 2303 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2304 2305 // Is the gap between reading the finger and doing the CAS too long? 2306 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2307 if (res == finger && curr_region != NULL) { 2308 // we succeeded 2309 HeapWord* bottom = curr_region->bottom(); 2310 HeapWord* limit = curr_region->next_top_at_mark_start(); 2311 2312 // notice that _finger == end cannot be guaranteed here since, 2313 // someone else might have moved the finger even further 2314 assert(_finger >= end, "the finger should have moved forward"); 2315 2316 if (limit > bottom) { 2317 return curr_region; 2318 } else { 2319 assert(limit == bottom, 2320 "the region limit should be at bottom"); 2321 // we return NULL and the caller should try calling 2322 // claim_region() again. 2323 return NULL; 2324 } 2325 } else { 2326 assert(_finger > finger, "the finger should have moved forward"); 2327 // read it again 2328 finger = _finger; 2329 } 2330 } 2331 2332 return NULL; 2333 } 2334 2335 #ifndef PRODUCT 2336 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 2337 private: 2338 G1CollectedHeap* _g1h; 2339 const char* _phase; 2340 int _info; 2341 2342 public: 2343 VerifyNoCSetOops(const char* phase, int info = -1) : 2344 _g1h(G1CollectedHeap::heap()), 2345 _phase(phase), 2346 _info(info) 2347 { } 2348 2349 void operator()(oop obj) const { 2350 guarantee(obj->is_oop(), 2351 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 2352 p2i(obj), _phase, _info); 2353 guarantee(!_g1h->obj_in_cs(obj), 2354 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 2355 p2i(obj), _phase, _info); 2356 } 2357 }; 2358 2359 void ConcurrentMark::verify_no_cset_oops() { 2360 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2361 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 2362 return; 2363 } 2364 2365 // Verify entries on the global mark stack 2366 _markStack.iterate(VerifyNoCSetOops("Stack")); 2367 2368 // Verify entries on the task queues 2369 for (uint i = 0; i < _max_worker_id; ++i) { 2370 CMTaskQueue* queue = _task_queues->queue(i); 2371 queue->iterate(VerifyNoCSetOops("Queue", i)); 2372 } 2373 2374 // Verify the global finger 2375 HeapWord* global_finger = finger(); 2376 if (global_finger != NULL && global_finger < _heap_end) { 2377 // Since we always iterate over all regions, we might get a NULL HeapRegion 2378 // here. 2379 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 2380 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2381 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 2382 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 2383 } 2384 2385 // Verify the task fingers 2386 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2387 for (uint i = 0; i < parallel_marking_threads(); ++i) { 2388 CMTask* task = _tasks[i]; 2389 HeapWord* task_finger = task->finger(); 2390 if (task_finger != NULL && task_finger < _heap_end) { 2391 // See above note on the global finger verification. 2392 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 2393 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2394 !task_hr->in_collection_set(), 2395 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 2396 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 2397 } 2398 } 2399 } 2400 #endif // PRODUCT 2401 2402 // Aggregate the counting data that was constructed concurrently 2403 // with marking. 2404 class AggregateCountDataHRClosure: public HeapRegionClosure { 2405 G1CollectedHeap* _g1h; 2406 ConcurrentMark* _cm; 2407 CardTableModRefBS* _ct_bs; 2408 BitMap* _cm_card_bm; 2409 uint _max_worker_id; 2410 2411 public: 2412 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 2413 BitMap* cm_card_bm, 2414 uint max_worker_id) : 2415 _g1h(g1h), _cm(g1h->concurrent_mark()), 2416 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 2417 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 2418 2419 bool doHeapRegion(HeapRegion* hr) { 2420 HeapWord* start = hr->bottom(); 2421 HeapWord* limit = hr->next_top_at_mark_start(); 2422 HeapWord* end = hr->end(); 2423 2424 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 2425 "Preconditions not met - " 2426 "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", " 2427 "top: " PTR_FORMAT ", end: " PTR_FORMAT, 2428 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())); 2429 2430 assert(hr->next_marked_bytes() == 0, "Precondition"); 2431 2432 if (start == limit) { 2433 // NTAMS of this region has not been set so nothing to do. 2434 return false; 2435 } 2436 2437 // 'start' should be in the heap. 2438 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 2439 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 2440 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 2441 2442 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 2443 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 2444 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 2445 2446 // If ntams is not card aligned then we bump card bitmap index 2447 // for limit so that we get the all the cards spanned by 2448 // the object ending at ntams. 2449 // Note: if this is the last region in the heap then ntams 2450 // could be actually just beyond the end of the the heap; 2451 // limit_idx will then correspond to a (non-existent) card 2452 // that is also outside the heap. 2453 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 2454 limit_idx += 1; 2455 } 2456 2457 assert(limit_idx <= end_idx, "or else use atomics"); 2458 2459 // Aggregate the "stripe" in the count data associated with hr. 2460 uint hrm_index = hr->hrm_index(); 2461 size_t marked_bytes = 0; 2462 2463 for (uint i = 0; i < _max_worker_id; i += 1) { 2464 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 2465 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 2466 2467 // Fetch the marked_bytes in this region for task i and 2468 // add it to the running total for this region. 2469 marked_bytes += marked_bytes_array[hrm_index]; 2470 2471 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 2472 // into the global card bitmap. 2473 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 2474 2475 while (scan_idx < limit_idx) { 2476 assert(task_card_bm->at(scan_idx) == true, "should be"); 2477 _cm_card_bm->set_bit(scan_idx); 2478 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 2479 2480 // BitMap::get_next_one_offset() can handle the case when 2481 // its left_offset parameter is greater than its right_offset 2482 // parameter. It does, however, have an early exit if 2483 // left_offset == right_offset. So let's limit the value 2484 // passed in for left offset here. 2485 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 2486 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 2487 } 2488 } 2489 2490 // Update the marked bytes for this region. 2491 hr->add_to_marked_bytes(marked_bytes); 2492 2493 // Next heap region 2494 return false; 2495 } 2496 }; 2497 2498 class G1AggregateCountDataTask: public AbstractGangTask { 2499 protected: 2500 G1CollectedHeap* _g1h; 2501 ConcurrentMark* _cm; 2502 BitMap* _cm_card_bm; 2503 uint _max_worker_id; 2504 uint _active_workers; 2505 HeapRegionClaimer _hrclaimer; 2506 2507 public: 2508 G1AggregateCountDataTask(G1CollectedHeap* g1h, 2509 ConcurrentMark* cm, 2510 BitMap* cm_card_bm, 2511 uint max_worker_id, 2512 uint n_workers) : 2513 AbstractGangTask("Count Aggregation"), 2514 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 2515 _max_worker_id(max_worker_id), 2516 _active_workers(n_workers), 2517 _hrclaimer(_active_workers) { 2518 } 2519 2520 void work(uint worker_id) { 2521 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 2522 2523 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 2524 } 2525 }; 2526 2527 2528 void ConcurrentMark::aggregate_count_data() { 2529 uint n_workers = _g1h->workers()->active_workers(); 2530 2531 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 2532 _max_worker_id, n_workers); 2533 2534 _g1h->workers()->run_task(&g1_par_agg_task); 2535 } 2536 2537 // Clear the per-worker arrays used to store the per-region counting data 2538 void ConcurrentMark::clear_all_count_data() { 2539 // Clear the global card bitmap - it will be filled during 2540 // liveness count aggregation (during remark) and the 2541 // final counting task. 2542 _card_bm.clear(); 2543 2544 // Clear the global region bitmap - it will be filled as part 2545 // of the final counting task. 2546 _region_bm.clear(); 2547 2548 uint max_regions = _g1h->max_regions(); 2549 assert(_max_worker_id > 0, "uninitialized"); 2550 2551 for (uint i = 0; i < _max_worker_id; i += 1) { 2552 BitMap* task_card_bm = count_card_bitmap_for(i); 2553 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 2554 2555 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 2556 assert(marked_bytes_array != NULL, "uninitialized"); 2557 2558 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 2559 task_card_bm->clear(); 2560 } 2561 } 2562 2563 void ConcurrentMark::print_stats() { 2564 if (!log_is_enabled(Debug, gc, stats)) { 2565 return; 2566 } 2567 log_debug(gc, stats)("---------------------------------------------------------------------"); 2568 for (size_t i = 0; i < _active_tasks; ++i) { 2569 _tasks[i]->print_stats(); 2570 log_debug(gc, stats)("---------------------------------------------------------------------"); 2571 } 2572 } 2573 2574 // abandon current marking iteration due to a Full GC 2575 void ConcurrentMark::abort() { 2576 if (!cmThread()->during_cycle() || _has_aborted) { 2577 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2578 return; 2579 } 2580 2581 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2582 // concurrent bitmap clearing. 2583 _nextMarkBitMap->clearAll(); 2584 2585 // Note we cannot clear the previous marking bitmap here 2586 // since VerifyDuringGC verifies the objects marked during 2587 // a full GC against the previous bitmap. 2588 2589 // Clear the liveness counting data 2590 clear_all_count_data(); 2591 // Empty mark stack 2592 reset_marking_state(); 2593 for (uint i = 0; i < _max_worker_id; ++i) { 2594 _tasks[i]->clear_region_fields(); 2595 } 2596 _first_overflow_barrier_sync.abort(); 2597 _second_overflow_barrier_sync.abort(); 2598 _has_aborted = true; 2599 2600 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2601 satb_mq_set.abandon_partial_marking(); 2602 // This can be called either during or outside marking, we'll read 2603 // the expected_active value from the SATB queue set. 2604 satb_mq_set.set_active_all_threads( 2605 false, /* new active value */ 2606 satb_mq_set.is_active() /* expected_active */); 2607 2608 _g1h->trace_heap_after_concurrent_cycle(); 2609 2610 // Close any open concurrent phase timing 2611 register_concurrent_phase_end(); 2612 2613 _g1h->register_concurrent_cycle_end(); 2614 } 2615 2616 static void print_ms_time_info(const char* prefix, const char* name, 2617 NumberSeq& ns) { 2618 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2619 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2620 if (ns.num() > 0) { 2621 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2622 prefix, ns.sd(), ns.maximum()); 2623 } 2624 } 2625 2626 void ConcurrentMark::print_summary_info() { 2627 LogHandle(gc, marking) log; 2628 if (!log.is_trace()) { 2629 return; 2630 } 2631 2632 log.trace(" Concurrent marking:"); 2633 print_ms_time_info(" ", "init marks", _init_times); 2634 print_ms_time_info(" ", "remarks", _remark_times); 2635 { 2636 print_ms_time_info(" ", "final marks", _remark_mark_times); 2637 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2638 2639 } 2640 print_ms_time_info(" ", "cleanups", _cleanup_times); 2641 log.trace(" Final counting total time = %8.2f s (avg = %8.2f ms).", 2642 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2643 if (G1ScrubRemSets) { 2644 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 2645 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2646 } 2647 log.trace(" Total stop_world time = %8.2f s.", 2648 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2649 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2650 cmThread()->vtime_accum(), cmThread()->vtime_mark_accum()); 2651 } 2652 2653 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2654 _parallel_workers->print_worker_threads_on(st); 2655 } 2656 2657 void ConcurrentMark::print_on_error(outputStream* st) const { 2658 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2659 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 2660 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 2661 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 2662 } 2663 2664 // We take a break if someone is trying to stop the world. 2665 bool ConcurrentMark::do_yield_check(uint worker_id) { 2666 if (SuspendibleThreadSet::should_yield()) { 2667 if (worker_id == 0) { 2668 _g1h->g1_policy()->record_concurrent_pause(); 2669 } 2670 SuspendibleThreadSet::yield(); 2671 return true; 2672 } else { 2673 return false; 2674 } 2675 } 2676 2677 // Closure for iteration over bitmaps 2678 class CMBitMapClosure : public BitMapClosure { 2679 private: 2680 // the bitmap that is being iterated over 2681 CMBitMap* _nextMarkBitMap; 2682 ConcurrentMark* _cm; 2683 CMTask* _task; 2684 2685 public: 2686 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 2687 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 2688 2689 bool do_bit(size_t offset) { 2690 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 2691 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 2692 assert( addr < _cm->finger(), "invariant"); 2693 assert(addr >= _task->finger(), "invariant"); 2694 2695 // We move that task's local finger along. 2696 _task->move_finger_to(addr); 2697 2698 _task->scan_object(oop(addr)); 2699 // we only partially drain the local queue and global stack 2700 _task->drain_local_queue(true); 2701 _task->drain_global_stack(true); 2702 2703 // if the has_aborted flag has been raised, we need to bail out of 2704 // the iteration 2705 return !_task->has_aborted(); 2706 } 2707 }; 2708 2709 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2710 ReferenceProcessor* result = NULL; 2711 if (G1UseConcMarkReferenceProcessing) { 2712 result = g1h->ref_processor_cm(); 2713 assert(result != NULL, "should not be NULL"); 2714 } 2715 return result; 2716 } 2717 2718 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2719 ConcurrentMark* cm, 2720 CMTask* task) 2721 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2722 _g1h(g1h), _cm(cm), _task(task) 2723 { } 2724 2725 void CMTask::setup_for_region(HeapRegion* hr) { 2726 assert(hr != NULL, 2727 "claim_region() should have filtered out NULL regions"); 2728 _curr_region = hr; 2729 _finger = hr->bottom(); 2730 update_region_limit(); 2731 } 2732 2733 void CMTask::update_region_limit() { 2734 HeapRegion* hr = _curr_region; 2735 HeapWord* bottom = hr->bottom(); 2736 HeapWord* limit = hr->next_top_at_mark_start(); 2737 2738 if (limit == bottom) { 2739 // The region was collected underneath our feet. 2740 // We set the finger to bottom to ensure that the bitmap 2741 // iteration that will follow this will not do anything. 2742 // (this is not a condition that holds when we set the region up, 2743 // as the region is not supposed to be empty in the first place) 2744 _finger = bottom; 2745 } else if (limit >= _region_limit) { 2746 assert(limit >= _finger, "peace of mind"); 2747 } else { 2748 assert(limit < _region_limit, "only way to get here"); 2749 // This can happen under some pretty unusual circumstances. An 2750 // evacuation pause empties the region underneath our feet (NTAMS 2751 // at bottom). We then do some allocation in the region (NTAMS 2752 // stays at bottom), followed by the region being used as a GC 2753 // alloc region (NTAMS will move to top() and the objects 2754 // originally below it will be grayed). All objects now marked in 2755 // the region are explicitly grayed, if below the global finger, 2756 // and we do not need in fact to scan anything else. So, we simply 2757 // set _finger to be limit to ensure that the bitmap iteration 2758 // doesn't do anything. 2759 _finger = limit; 2760 } 2761 2762 _region_limit = limit; 2763 } 2764 2765 void CMTask::giveup_current_region() { 2766 assert(_curr_region != NULL, "invariant"); 2767 clear_region_fields(); 2768 } 2769 2770 void CMTask::clear_region_fields() { 2771 // Values for these three fields that indicate that we're not 2772 // holding on to a region. 2773 _curr_region = NULL; 2774 _finger = NULL; 2775 _region_limit = NULL; 2776 } 2777 2778 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2779 if (cm_oop_closure == NULL) { 2780 assert(_cm_oop_closure != NULL, "invariant"); 2781 } else { 2782 assert(_cm_oop_closure == NULL, "invariant"); 2783 } 2784 _cm_oop_closure = cm_oop_closure; 2785 } 2786 2787 void CMTask::reset(CMBitMap* nextMarkBitMap) { 2788 guarantee(nextMarkBitMap != NULL, "invariant"); 2789 _nextMarkBitMap = nextMarkBitMap; 2790 clear_region_fields(); 2791 2792 _calls = 0; 2793 _elapsed_time_ms = 0.0; 2794 _termination_time_ms = 0.0; 2795 _termination_start_time_ms = 0.0; 2796 } 2797 2798 bool CMTask::should_exit_termination() { 2799 regular_clock_call(); 2800 // This is called when we are in the termination protocol. We should 2801 // quit if, for some reason, this task wants to abort or the global 2802 // stack is not empty (this means that we can get work from it). 2803 return !_cm->mark_stack_empty() || has_aborted(); 2804 } 2805 2806 void CMTask::reached_limit() { 2807 assert(_words_scanned >= _words_scanned_limit || 2808 _refs_reached >= _refs_reached_limit , 2809 "shouldn't have been called otherwise"); 2810 regular_clock_call(); 2811 } 2812 2813 void CMTask::regular_clock_call() { 2814 if (has_aborted()) return; 2815 2816 // First, we need to recalculate the words scanned and refs reached 2817 // limits for the next clock call. 2818 recalculate_limits(); 2819 2820 // During the regular clock call we do the following 2821 2822 // (1) If an overflow has been flagged, then we abort. 2823 if (_cm->has_overflown()) { 2824 set_has_aborted(); 2825 return; 2826 } 2827 2828 // If we are not concurrent (i.e. we're doing remark) we don't need 2829 // to check anything else. The other steps are only needed during 2830 // the concurrent marking phase. 2831 if (!concurrent()) return; 2832 2833 // (2) If marking has been aborted for Full GC, then we also abort. 2834 if (_cm->has_aborted()) { 2835 set_has_aborted(); 2836 return; 2837 } 2838 2839 double curr_time_ms = os::elapsedVTime() * 1000.0; 2840 2841 // (4) We check whether we should yield. If we have to, then we abort. 2842 if (SuspendibleThreadSet::should_yield()) { 2843 // We should yield. To do this we abort the task. The caller is 2844 // responsible for yielding. 2845 set_has_aborted(); 2846 return; 2847 } 2848 2849 // (5) We check whether we've reached our time quota. If we have, 2850 // then we abort. 2851 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2852 if (elapsed_time_ms > _time_target_ms) { 2853 set_has_aborted(); 2854 _has_timed_out = true; 2855 return; 2856 } 2857 2858 // (6) Finally, we check whether there are enough completed STAB 2859 // buffers available for processing. If there are, we abort. 2860 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2861 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2862 // we do need to process SATB buffers, we'll abort and restart 2863 // the marking task to do so 2864 set_has_aborted(); 2865 return; 2866 } 2867 } 2868 2869 void CMTask::recalculate_limits() { 2870 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2871 _words_scanned_limit = _real_words_scanned_limit; 2872 2873 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2874 _refs_reached_limit = _real_refs_reached_limit; 2875 } 2876 2877 void CMTask::decrease_limits() { 2878 // This is called when we believe that we're going to do an infrequent 2879 // operation which will increase the per byte scanned cost (i.e. move 2880 // entries to/from the global stack). It basically tries to decrease the 2881 // scanning limit so that the clock is called earlier. 2882 2883 _words_scanned_limit = _real_words_scanned_limit - 2884 3 * words_scanned_period / 4; 2885 _refs_reached_limit = _real_refs_reached_limit - 2886 3 * refs_reached_period / 4; 2887 } 2888 2889 void CMTask::move_entries_to_global_stack() { 2890 // local array where we'll store the entries that will be popped 2891 // from the local queue 2892 oop buffer[global_stack_transfer_size]; 2893 2894 int n = 0; 2895 oop obj; 2896 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 2897 buffer[n] = obj; 2898 ++n; 2899 } 2900 2901 if (n > 0) { 2902 // we popped at least one entry from the local queue 2903 2904 if (!_cm->mark_stack_push(buffer, n)) { 2905 set_has_aborted(); 2906 } 2907 } 2908 2909 // this operation was quite expensive, so decrease the limits 2910 decrease_limits(); 2911 } 2912 2913 void CMTask::get_entries_from_global_stack() { 2914 // local array where we'll store the entries that will be popped 2915 // from the global stack. 2916 oop buffer[global_stack_transfer_size]; 2917 int n; 2918 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 2919 assert(n <= global_stack_transfer_size, 2920 "we should not pop more than the given limit"); 2921 if (n > 0) { 2922 // yes, we did actually pop at least one entry 2923 for (int i = 0; i < n; ++i) { 2924 bool success = _task_queue->push(buffer[i]); 2925 // We only call this when the local queue is empty or under a 2926 // given target limit. So, we do not expect this push to fail. 2927 assert(success, "invariant"); 2928 } 2929 } 2930 2931 // this operation was quite expensive, so decrease the limits 2932 decrease_limits(); 2933 } 2934 2935 void CMTask::drain_local_queue(bool partially) { 2936 if (has_aborted()) return; 2937 2938 // Decide what the target size is, depending whether we're going to 2939 // drain it partially (so that other tasks can steal if they run out 2940 // of things to do) or totally (at the very end). 2941 size_t target_size; 2942 if (partially) { 2943 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2944 } else { 2945 target_size = 0; 2946 } 2947 2948 if (_task_queue->size() > target_size) { 2949 oop obj; 2950 bool ret = _task_queue->pop_local(obj); 2951 while (ret) { 2952 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 2953 assert(!_g1h->is_on_master_free_list( 2954 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 2955 2956 scan_object(obj); 2957 2958 if (_task_queue->size() <= target_size || has_aborted()) { 2959 ret = false; 2960 } else { 2961 ret = _task_queue->pop_local(obj); 2962 } 2963 } 2964 } 2965 } 2966 2967 void CMTask::drain_global_stack(bool partially) { 2968 if (has_aborted()) return; 2969 2970 // We have a policy to drain the local queue before we attempt to 2971 // drain the global stack. 2972 assert(partially || _task_queue->size() == 0, "invariant"); 2973 2974 // Decide what the target size is, depending whether we're going to 2975 // drain it partially (so that other tasks can steal if they run out 2976 // of things to do) or totally (at the very end). Notice that, 2977 // because we move entries from the global stack in chunks or 2978 // because another task might be doing the same, we might in fact 2979 // drop below the target. But, this is not a problem. 2980 size_t target_size; 2981 if (partially) { 2982 target_size = _cm->partial_mark_stack_size_target(); 2983 } else { 2984 target_size = 0; 2985 } 2986 2987 if (_cm->mark_stack_size() > target_size) { 2988 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2989 get_entries_from_global_stack(); 2990 drain_local_queue(partially); 2991 } 2992 } 2993 } 2994 2995 // SATB Queue has several assumptions on whether to call the par or 2996 // non-par versions of the methods. this is why some of the code is 2997 // replicated. We should really get rid of the single-threaded version 2998 // of the code to simplify things. 2999 void CMTask::drain_satb_buffers() { 3000 if (has_aborted()) return; 3001 3002 // We set this so that the regular clock knows that we're in the 3003 // middle of draining buffers and doesn't set the abort flag when it 3004 // notices that SATB buffers are available for draining. It'd be 3005 // very counter productive if it did that. :-) 3006 _draining_satb_buffers = true; 3007 3008 CMSATBBufferClosure satb_cl(this, _g1h); 3009 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3010 3011 // This keeps claiming and applying the closure to completed buffers 3012 // until we run out of buffers or we need to abort. 3013 while (!has_aborted() && 3014 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 3015 regular_clock_call(); 3016 } 3017 3018 _draining_satb_buffers = false; 3019 3020 assert(has_aborted() || 3021 concurrent() || 3022 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3023 3024 // again, this was a potentially expensive operation, decrease the 3025 // limits to get the regular clock call early 3026 decrease_limits(); 3027 } 3028 3029 void CMTask::print_stats() { 3030 log_debug(gc, stats)("Marking Stats, task = %u, calls = %d", 3031 _worker_id, _calls); 3032 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3033 _elapsed_time_ms, _termination_time_ms); 3034 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3035 _step_times_ms.num(), _step_times_ms.avg(), 3036 _step_times_ms.sd()); 3037 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", 3038 _step_times_ms.maximum(), _step_times_ms.sum()); 3039 } 3040 3041 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 3042 return _task_queues->steal(worker_id, hash_seed, obj); 3043 } 3044 3045 /***************************************************************************** 3046 3047 The do_marking_step(time_target_ms, ...) method is the building 3048 block of the parallel marking framework. It can be called in parallel 3049 with other invocations of do_marking_step() on different tasks 3050 (but only one per task, obviously) and concurrently with the 3051 mutator threads, or during remark, hence it eliminates the need 3052 for two versions of the code. When called during remark, it will 3053 pick up from where the task left off during the concurrent marking 3054 phase. Interestingly, tasks are also claimable during evacuation 3055 pauses too, since do_marking_step() ensures that it aborts before 3056 it needs to yield. 3057 3058 The data structures that it uses to do marking work are the 3059 following: 3060 3061 (1) Marking Bitmap. If there are gray objects that appear only 3062 on the bitmap (this happens either when dealing with an overflow 3063 or when the initial marking phase has simply marked the roots 3064 and didn't push them on the stack), then tasks claim heap 3065 regions whose bitmap they then scan to find gray objects. A 3066 global finger indicates where the end of the last claimed region 3067 is. A local finger indicates how far into the region a task has 3068 scanned. The two fingers are used to determine how to gray an 3069 object (i.e. whether simply marking it is OK, as it will be 3070 visited by a task in the future, or whether it needs to be also 3071 pushed on a stack). 3072 3073 (2) Local Queue. The local queue of the task which is accessed 3074 reasonably efficiently by the task. Other tasks can steal from 3075 it when they run out of work. Throughout the marking phase, a 3076 task attempts to keep its local queue short but not totally 3077 empty, so that entries are available for stealing by other 3078 tasks. Only when there is no more work, a task will totally 3079 drain its local queue. 3080 3081 (3) Global Mark Stack. This handles local queue overflow. During 3082 marking only sets of entries are moved between it and the local 3083 queues, as access to it requires a mutex and more fine-grain 3084 interaction with it which might cause contention. If it 3085 overflows, then the marking phase should restart and iterate 3086 over the bitmap to identify gray objects. Throughout the marking 3087 phase, tasks attempt to keep the global mark stack at a small 3088 length but not totally empty, so that entries are available for 3089 popping by other tasks. Only when there is no more work, tasks 3090 will totally drain the global mark stack. 3091 3092 (4) SATB Buffer Queue. This is where completed SATB buffers are 3093 made available. Buffers are regularly removed from this queue 3094 and scanned for roots, so that the queue doesn't get too 3095 long. During remark, all completed buffers are processed, as 3096 well as the filled in parts of any uncompleted buffers. 3097 3098 The do_marking_step() method tries to abort when the time target 3099 has been reached. There are a few other cases when the 3100 do_marking_step() method also aborts: 3101 3102 (1) When the marking phase has been aborted (after a Full GC). 3103 3104 (2) When a global overflow (on the global stack) has been 3105 triggered. Before the task aborts, it will actually sync up with 3106 the other tasks to ensure that all the marking data structures 3107 (local queues, stacks, fingers etc.) are re-initialized so that 3108 when do_marking_step() completes, the marking phase can 3109 immediately restart. 3110 3111 (3) When enough completed SATB buffers are available. The 3112 do_marking_step() method only tries to drain SATB buffers right 3113 at the beginning. So, if enough buffers are available, the 3114 marking step aborts and the SATB buffers are processed at 3115 the beginning of the next invocation. 3116 3117 (4) To yield. when we have to yield then we abort and yield 3118 right at the end of do_marking_step(). This saves us from a lot 3119 of hassle as, by yielding we might allow a Full GC. If this 3120 happens then objects will be compacted underneath our feet, the 3121 heap might shrink, etc. We save checking for this by just 3122 aborting and doing the yield right at the end. 3123 3124 From the above it follows that the do_marking_step() method should 3125 be called in a loop (or, otherwise, regularly) until it completes. 3126 3127 If a marking step completes without its has_aborted() flag being 3128 true, it means it has completed the current marking phase (and 3129 also all other marking tasks have done so and have all synced up). 3130 3131 A method called regular_clock_call() is invoked "regularly" (in 3132 sub ms intervals) throughout marking. It is this clock method that 3133 checks all the abort conditions which were mentioned above and 3134 decides when the task should abort. A work-based scheme is used to 3135 trigger this clock method: when the number of object words the 3136 marking phase has scanned or the number of references the marking 3137 phase has visited reach a given limit. Additional invocations to 3138 the method clock have been planted in a few other strategic places 3139 too. The initial reason for the clock method was to avoid calling 3140 vtime too regularly, as it is quite expensive. So, once it was in 3141 place, it was natural to piggy-back all the other conditions on it 3142 too and not constantly check them throughout the code. 3143 3144 If do_termination is true then do_marking_step will enter its 3145 termination protocol. 3146 3147 The value of is_serial must be true when do_marking_step is being 3148 called serially (i.e. by the VMThread) and do_marking_step should 3149 skip any synchronization in the termination and overflow code. 3150 Examples include the serial remark code and the serial reference 3151 processing closures. 3152 3153 The value of is_serial must be false when do_marking_step is 3154 being called by any of the worker threads in a work gang. 3155 Examples include the concurrent marking code (CMMarkingTask), 3156 the MT remark code, and the MT reference processing closures. 3157 3158 *****************************************************************************/ 3159 3160 void CMTask::do_marking_step(double time_target_ms, 3161 bool do_termination, 3162 bool is_serial) { 3163 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3164 assert(concurrent() == _cm->concurrent(), "they should be the same"); 3165 3166 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3167 assert(_task_queues != NULL, "invariant"); 3168 assert(_task_queue != NULL, "invariant"); 3169 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 3170 3171 assert(!_claimed, 3172 "only one thread should claim this task at any one time"); 3173 3174 // OK, this doesn't safeguard again all possible scenarios, as it is 3175 // possible for two threads to set the _claimed flag at the same 3176 // time. But it is only for debugging purposes anyway and it will 3177 // catch most problems. 3178 _claimed = true; 3179 3180 _start_time_ms = os::elapsedVTime() * 1000.0; 3181 3182 // If do_stealing is true then do_marking_step will attempt to 3183 // steal work from the other CMTasks. It only makes sense to 3184 // enable stealing when the termination protocol is enabled 3185 // and do_marking_step() is not being called serially. 3186 bool do_stealing = do_termination && !is_serial; 3187 3188 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 3189 _time_target_ms = time_target_ms - diff_prediction_ms; 3190 3191 // set up the variables that are used in the work-based scheme to 3192 // call the regular clock method 3193 _words_scanned = 0; 3194 _refs_reached = 0; 3195 recalculate_limits(); 3196 3197 // clear all flags 3198 clear_has_aborted(); 3199 _has_timed_out = false; 3200 _draining_satb_buffers = false; 3201 3202 ++_calls; 3203 3204 // Set up the bitmap and oop closures. Anything that uses them is 3205 // eventually called from this method, so it is OK to allocate these 3206 // statically. 3207 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 3208 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 3209 set_cm_oop_closure(&cm_oop_closure); 3210 3211 if (_cm->has_overflown()) { 3212 // This can happen if the mark stack overflows during a GC pause 3213 // and this task, after a yield point, restarts. We have to abort 3214 // as we need to get into the overflow protocol which happens 3215 // right at the end of this task. 3216 set_has_aborted(); 3217 } 3218 3219 // First drain any available SATB buffers. After this, we will not 3220 // look at SATB buffers before the next invocation of this method. 3221 // If enough completed SATB buffers are queued up, the regular clock 3222 // will abort this task so that it restarts. 3223 drain_satb_buffers(); 3224 // ...then partially drain the local queue and the global stack 3225 drain_local_queue(true); 3226 drain_global_stack(true); 3227 3228 do { 3229 if (!has_aborted() && _curr_region != NULL) { 3230 // This means that we're already holding on to a region. 3231 assert(_finger != NULL, "if region is not NULL, then the finger " 3232 "should not be NULL either"); 3233 3234 // We might have restarted this task after an evacuation pause 3235 // which might have evacuated the region we're holding on to 3236 // underneath our feet. Let's read its limit again to make sure 3237 // that we do not iterate over a region of the heap that 3238 // contains garbage (update_region_limit() will also move 3239 // _finger to the start of the region if it is found empty). 3240 update_region_limit(); 3241 // We will start from _finger not from the start of the region, 3242 // as we might be restarting this task after aborting half-way 3243 // through scanning this region. In this case, _finger points to 3244 // the address where we last found a marked object. If this is a 3245 // fresh region, _finger points to start(). 3246 MemRegion mr = MemRegion(_finger, _region_limit); 3247 3248 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 3249 "humongous regions should go around loop once only"); 3250 3251 // Some special cases: 3252 // If the memory region is empty, we can just give up the region. 3253 // If the current region is humongous then we only need to check 3254 // the bitmap for the bit associated with the start of the object, 3255 // scan the object if it's live, and give up the region. 3256 // Otherwise, let's iterate over the bitmap of the part of the region 3257 // that is left. 3258 // If the iteration is successful, give up the region. 3259 if (mr.is_empty()) { 3260 giveup_current_region(); 3261 regular_clock_call(); 3262 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 3263 if (_nextMarkBitMap->isMarked(mr.start())) { 3264 // The object is marked - apply the closure 3265 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 3266 bitmap_closure.do_bit(offset); 3267 } 3268 // Even if this task aborted while scanning the humongous object 3269 // we can (and should) give up the current region. 3270 giveup_current_region(); 3271 regular_clock_call(); 3272 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 3273 giveup_current_region(); 3274 regular_clock_call(); 3275 } else { 3276 assert(has_aborted(), "currently the only way to do so"); 3277 // The only way to abort the bitmap iteration is to return 3278 // false from the do_bit() method. However, inside the 3279 // do_bit() method we move the _finger to point to the 3280 // object currently being looked at. So, if we bail out, we 3281 // have definitely set _finger to something non-null. 3282 assert(_finger != NULL, "invariant"); 3283 3284 // Region iteration was actually aborted. So now _finger 3285 // points to the address of the object we last scanned. If we 3286 // leave it there, when we restart this task, we will rescan 3287 // the object. It is easy to avoid this. We move the finger by 3288 // enough to point to the next possible object header (the 3289 // bitmap knows by how much we need to move it as it knows its 3290 // granularity). 3291 assert(_finger < _region_limit, "invariant"); 3292 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 3293 // Check if bitmap iteration was aborted while scanning the last object 3294 if (new_finger >= _region_limit) { 3295 giveup_current_region(); 3296 } else { 3297 move_finger_to(new_finger); 3298 } 3299 } 3300 } 3301 // At this point we have either completed iterating over the 3302 // region we were holding on to, or we have aborted. 3303 3304 // We then partially drain the local queue and the global stack. 3305 // (Do we really need this?) 3306 drain_local_queue(true); 3307 drain_global_stack(true); 3308 3309 // Read the note on the claim_region() method on why it might 3310 // return NULL with potentially more regions available for 3311 // claiming and why we have to check out_of_regions() to determine 3312 // whether we're done or not. 3313 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 3314 // We are going to try to claim a new region. We should have 3315 // given up on the previous one. 3316 // Separated the asserts so that we know which one fires. 3317 assert(_curr_region == NULL, "invariant"); 3318 assert(_finger == NULL, "invariant"); 3319 assert(_region_limit == NULL, "invariant"); 3320 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 3321 if (claimed_region != NULL) { 3322 // Yes, we managed to claim one 3323 setup_for_region(claimed_region); 3324 assert(_curr_region == claimed_region, "invariant"); 3325 } 3326 // It is important to call the regular clock here. It might take 3327 // a while to claim a region if, for example, we hit a large 3328 // block of empty regions. So we need to call the regular clock 3329 // method once round the loop to make sure it's called 3330 // frequently enough. 3331 regular_clock_call(); 3332 } 3333 3334 if (!has_aborted() && _curr_region == NULL) { 3335 assert(_cm->out_of_regions(), 3336 "at this point we should be out of regions"); 3337 } 3338 } while ( _curr_region != NULL && !has_aborted()); 3339 3340 if (!has_aborted()) { 3341 // We cannot check whether the global stack is empty, since other 3342 // tasks might be pushing objects to it concurrently. 3343 assert(_cm->out_of_regions(), 3344 "at this point we should be out of regions"); 3345 // Try to reduce the number of available SATB buffers so that 3346 // remark has less work to do. 3347 drain_satb_buffers(); 3348 } 3349 3350 // Since we've done everything else, we can now totally drain the 3351 // local queue and global stack. 3352 drain_local_queue(false); 3353 drain_global_stack(false); 3354 3355 // Attempt at work stealing from other task's queues. 3356 if (do_stealing && !has_aborted()) { 3357 // We have not aborted. This means that we have finished all that 3358 // we could. Let's try to do some stealing... 3359 3360 // We cannot check whether the global stack is empty, since other 3361 // tasks might be pushing objects to it concurrently. 3362 assert(_cm->out_of_regions() && _task_queue->size() == 0, 3363 "only way to reach here"); 3364 while (!has_aborted()) { 3365 oop obj; 3366 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 3367 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 3368 "any stolen object should be marked"); 3369 scan_object(obj); 3370 3371 // And since we're towards the end, let's totally drain the 3372 // local queue and global stack. 3373 drain_local_queue(false); 3374 drain_global_stack(false); 3375 } else { 3376 break; 3377 } 3378 } 3379 } 3380 3381 // We still haven't aborted. Now, let's try to get into the 3382 // termination protocol. 3383 if (do_termination && !has_aborted()) { 3384 // We cannot check whether the global stack is empty, since other 3385 // tasks might be concurrently pushing objects on it. 3386 // Separated the asserts so that we know which one fires. 3387 assert(_cm->out_of_regions(), "only way to reach here"); 3388 assert(_task_queue->size() == 0, "only way to reach here"); 3389 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 3390 3391 // The CMTask class also extends the TerminatorTerminator class, 3392 // hence its should_exit_termination() method will also decide 3393 // whether to exit the termination protocol or not. 3394 bool finished = (is_serial || 3395 _cm->terminator()->offer_termination(this)); 3396 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 3397 _termination_time_ms += 3398 termination_end_time_ms - _termination_start_time_ms; 3399 3400 if (finished) { 3401 // We're all done. 3402 3403 if (_worker_id == 0) { 3404 // let's allow task 0 to do this 3405 if (concurrent()) { 3406 assert(_cm->concurrent_marking_in_progress(), "invariant"); 3407 // we need to set this to false before the next 3408 // safepoint. This way we ensure that the marking phase 3409 // doesn't observe any more heap expansions. 3410 _cm->clear_concurrent_marking_in_progress(); 3411 } 3412 } 3413 3414 // We can now guarantee that the global stack is empty, since 3415 // all other tasks have finished. We separated the guarantees so 3416 // that, if a condition is false, we can immediately find out 3417 // which one. 3418 guarantee(_cm->out_of_regions(), "only way to reach here"); 3419 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 3420 guarantee(_task_queue->size() == 0, "only way to reach here"); 3421 guarantee(!_cm->has_overflown(), "only way to reach here"); 3422 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 3423 } else { 3424 // Apparently there's more work to do. Let's abort this task. It 3425 // will restart it and we can hopefully find more things to do. 3426 set_has_aborted(); 3427 } 3428 } 3429 3430 // Mainly for debugging purposes to make sure that a pointer to the 3431 // closure which was statically allocated in this frame doesn't 3432 // escape it by accident. 3433 set_cm_oop_closure(NULL); 3434 double end_time_ms = os::elapsedVTime() * 1000.0; 3435 double elapsed_time_ms = end_time_ms - _start_time_ms; 3436 // Update the step history. 3437 _step_times_ms.add(elapsed_time_ms); 3438 3439 if (has_aborted()) { 3440 // The task was aborted for some reason. 3441 if (_has_timed_out) { 3442 double diff_ms = elapsed_time_ms - _time_target_ms; 3443 // Keep statistics of how well we did with respect to hitting 3444 // our target only if we actually timed out (if we aborted for 3445 // other reasons, then the results might get skewed). 3446 _marking_step_diffs_ms.add(diff_ms); 3447 } 3448 3449 if (_cm->has_overflown()) { 3450 // This is the interesting one. We aborted because a global 3451 // overflow was raised. This means we have to restart the 3452 // marking phase and start iterating over regions. However, in 3453 // order to do this we have to make sure that all tasks stop 3454 // what they are doing and re-initialize in a safe manner. We 3455 // will achieve this with the use of two barrier sync points. 3456 3457 if (!is_serial) { 3458 // We only need to enter the sync barrier if being called 3459 // from a parallel context 3460 _cm->enter_first_sync_barrier(_worker_id); 3461 3462 // When we exit this sync barrier we know that all tasks have 3463 // stopped doing marking work. So, it's now safe to 3464 // re-initialize our data structures. At the end of this method, 3465 // task 0 will clear the global data structures. 3466 } 3467 3468 // We clear the local state of this task... 3469 clear_region_fields(); 3470 3471 if (!is_serial) { 3472 // ...and enter the second barrier. 3473 _cm->enter_second_sync_barrier(_worker_id); 3474 } 3475 // At this point, if we're during the concurrent phase of 3476 // marking, everything has been re-initialized and we're 3477 // ready to restart. 3478 } 3479 } 3480 3481 _claimed = false; 3482 } 3483 3484 CMTask::CMTask(uint worker_id, 3485 ConcurrentMark* cm, 3486 size_t* marked_bytes, 3487 BitMap* card_bm, 3488 CMTaskQueue* task_queue, 3489 CMTaskQueueSet* task_queues) 3490 : _g1h(G1CollectedHeap::heap()), 3491 _worker_id(worker_id), _cm(cm), 3492 _claimed(false), 3493 _nextMarkBitMap(NULL), _hash_seed(17), 3494 _task_queue(task_queue), 3495 _task_queues(task_queues), 3496 _cm_oop_closure(NULL), 3497 _marked_bytes_array(marked_bytes), 3498 _card_bm(card_bm) { 3499 guarantee(task_queue != NULL, "invariant"); 3500 guarantee(task_queues != NULL, "invariant"); 3501 3502 _marking_step_diffs_ms.add(0.5); 3503 } 3504 3505 // These are formatting macros that are used below to ensure 3506 // consistent formatting. The *_H_* versions are used to format the 3507 // header for a particular value and they should be kept consistent 3508 // with the corresponding macro. Also note that most of the macros add 3509 // the necessary white space (as a prefix) which makes them a bit 3510 // easier to compose. 3511 3512 // All the output lines are prefixed with this string to be able to 3513 // identify them easily in a large log file. 3514 #define G1PPRL_LINE_PREFIX "###" 3515 3516 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 3517 #ifdef _LP64 3518 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 3519 #else // _LP64 3520 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 3521 #endif // _LP64 3522 3523 // For per-region info 3524 #define G1PPRL_TYPE_FORMAT " %-4s" 3525 #define G1PPRL_TYPE_H_FORMAT " %4s" 3526 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 3527 #define G1PPRL_BYTE_H_FORMAT " %9s" 3528 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 3529 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 3530 3531 // For summary info 3532 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 3533 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 3534 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 3535 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 3536 3537 G1PrintRegionLivenessInfoClosure:: 3538 G1PrintRegionLivenessInfoClosure(const char* phase_name) 3539 : _total_used_bytes(0), _total_capacity_bytes(0), 3540 _total_prev_live_bytes(0), _total_next_live_bytes(0), 3541 _hum_used_bytes(0), _hum_capacity_bytes(0), 3542 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 3543 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 3544 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 3545 MemRegion g1_reserved = g1h->g1_reserved(); 3546 double now = os::elapsedTime(); 3547 3548 // Print the header of the output. 3549 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 3550 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 3551 G1PPRL_SUM_ADDR_FORMAT("reserved") 3552 G1PPRL_SUM_BYTE_FORMAT("region-size"), 3553 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 3554 HeapRegion::GrainBytes); 3555 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3556 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3557 G1PPRL_TYPE_H_FORMAT 3558 G1PPRL_ADDR_BASE_H_FORMAT 3559 G1PPRL_BYTE_H_FORMAT 3560 G1PPRL_BYTE_H_FORMAT 3561 G1PPRL_BYTE_H_FORMAT 3562 G1PPRL_DOUBLE_H_FORMAT 3563 G1PPRL_BYTE_H_FORMAT 3564 G1PPRL_BYTE_H_FORMAT, 3565 "type", "address-range", 3566 "used", "prev-live", "next-live", "gc-eff", 3567 "remset", "code-roots"); 3568 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3569 G1PPRL_TYPE_H_FORMAT 3570 G1PPRL_ADDR_BASE_H_FORMAT 3571 G1PPRL_BYTE_H_FORMAT 3572 G1PPRL_BYTE_H_FORMAT 3573 G1PPRL_BYTE_H_FORMAT 3574 G1PPRL_DOUBLE_H_FORMAT 3575 G1PPRL_BYTE_H_FORMAT 3576 G1PPRL_BYTE_H_FORMAT, 3577 "", "", 3578 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 3579 "(bytes)", "(bytes)"); 3580 } 3581 3582 // It takes as a parameter a reference to one of the _hum_* fields, it 3583 // deduces the corresponding value for a region in a humongous region 3584 // series (either the region size, or what's left if the _hum_* field 3585 // is < the region size), and updates the _hum_* field accordingly. 3586 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 3587 size_t bytes = 0; 3588 // The > 0 check is to deal with the prev and next live bytes which 3589 // could be 0. 3590 if (*hum_bytes > 0) { 3591 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 3592 *hum_bytes -= bytes; 3593 } 3594 return bytes; 3595 } 3596 3597 // It deduces the values for a region in a humongous region series 3598 // from the _hum_* fields and updates those accordingly. It assumes 3599 // that that _hum_* fields have already been set up from the "starts 3600 // humongous" region and we visit the regions in address order. 3601 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 3602 size_t* capacity_bytes, 3603 size_t* prev_live_bytes, 3604 size_t* next_live_bytes) { 3605 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 3606 *used_bytes = get_hum_bytes(&_hum_used_bytes); 3607 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 3608 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 3609 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 3610 } 3611 3612 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 3613 const char* type = r->get_type_str(); 3614 HeapWord* bottom = r->bottom(); 3615 HeapWord* end = r->end(); 3616 size_t capacity_bytes = r->capacity(); 3617 size_t used_bytes = r->used(); 3618 size_t prev_live_bytes = r->live_bytes(); 3619 size_t next_live_bytes = r->next_live_bytes(); 3620 double gc_eff = r->gc_efficiency(); 3621 size_t remset_bytes = r->rem_set()->mem_size(); 3622 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3623 3624 if (r->is_starts_humongous()) { 3625 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 3626 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 3627 "they should have been zeroed after the last time we used them"); 3628 // Set up the _hum_* fields. 3629 _hum_capacity_bytes = capacity_bytes; 3630 _hum_used_bytes = used_bytes; 3631 _hum_prev_live_bytes = prev_live_bytes; 3632 _hum_next_live_bytes = next_live_bytes; 3633 get_hum_bytes(&used_bytes, &capacity_bytes, 3634 &prev_live_bytes, &next_live_bytes); 3635 end = bottom + HeapRegion::GrainWords; 3636 } else if (r->is_continues_humongous()) { 3637 get_hum_bytes(&used_bytes, &capacity_bytes, 3638 &prev_live_bytes, &next_live_bytes); 3639 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 3640 } 3641 3642 _total_used_bytes += used_bytes; 3643 _total_capacity_bytes += capacity_bytes; 3644 _total_prev_live_bytes += prev_live_bytes; 3645 _total_next_live_bytes += next_live_bytes; 3646 _total_remset_bytes += remset_bytes; 3647 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3648 3649 // Print a line for this particular region. 3650 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3651 G1PPRL_TYPE_FORMAT 3652 G1PPRL_ADDR_BASE_FORMAT 3653 G1PPRL_BYTE_FORMAT 3654 G1PPRL_BYTE_FORMAT 3655 G1PPRL_BYTE_FORMAT 3656 G1PPRL_DOUBLE_FORMAT 3657 G1PPRL_BYTE_FORMAT 3658 G1PPRL_BYTE_FORMAT, 3659 type, p2i(bottom), p2i(end), 3660 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3661 remset_bytes, strong_code_roots_bytes); 3662 3663 return false; 3664 } 3665 3666 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3667 // add static memory usages to remembered set sizes 3668 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3669 // Print the footer of the output. 3670 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3671 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3672 " SUMMARY" 3673 G1PPRL_SUM_MB_FORMAT("capacity") 3674 G1PPRL_SUM_MB_PERC_FORMAT("used") 3675 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3676 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3677 G1PPRL_SUM_MB_FORMAT("remset") 3678 G1PPRL_SUM_MB_FORMAT("code-roots"), 3679 bytes_to_mb(_total_capacity_bytes), 3680 bytes_to_mb(_total_used_bytes), 3681 perc(_total_used_bytes, _total_capacity_bytes), 3682 bytes_to_mb(_total_prev_live_bytes), 3683 perc(_total_prev_live_bytes, _total_capacity_bytes), 3684 bytes_to_mb(_total_next_live_bytes), 3685 perc(_total_next_live_bytes, _total_capacity_bytes), 3686 bytes_to_mb(_total_remset_bytes), 3687 bytes_to_mb(_total_strong_code_roots_bytes)); 3688 }