1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMarkThread.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorPolicy.hpp" 32 #include "gc/g1/g1CollectorState.hpp" 33 #include "gc/g1/g1ConcurrentMark.inline.hpp" 34 #include "gc/g1/g1HeapVerifier.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1StringDedup.hpp" 37 #include "gc/g1/heapRegion.inline.hpp" 38 #include "gc/g1/heapRegionRemSet.hpp" 39 #include "gc/g1/heapRegionSet.inline.hpp" 40 #include "gc/g1/suspendibleThreadSet.hpp" 41 #include "gc/shared/gcId.hpp" 42 #include "gc/shared/gcTimer.hpp" 43 #include "gc/shared/gcTrace.hpp" 44 #include "gc/shared/gcTraceTime.inline.hpp" 45 #include "gc/shared/genOopClosures.inline.hpp" 46 #include "gc/shared/referencePolicy.hpp" 47 #include "gc/shared/strongRootsScope.hpp" 48 #include "gc/shared/taskqueue.inline.hpp" 49 #include "gc/shared/vmGCOperations.hpp" 50 #include "logging/log.hpp" 51 #include "logging/logTag.hpp" 52 #include "memory/allocation.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "runtime/atomic.inline.hpp" 56 #include "runtime/handles.inline.hpp" 57 #include "runtime/java.hpp" 58 #include "runtime/prefetch.inline.hpp" 59 #include "services/memTracker.hpp" 60 61 // Concurrent marking bit map wrapper 62 63 G1CMBitMapRO::G1CMBitMapRO(int shifter) : 64 _bm(), 65 _shifter(shifter) { 66 _bmStartWord = 0; 67 _bmWordSize = 0; 68 } 69 70 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 71 const HeapWord* limit) const { 72 // First we must round addr *up* to a possible object boundary. 73 addr = (HeapWord*)align_size_up((intptr_t)addr, 74 HeapWordSize << _shifter); 75 size_t addrOffset = heapWordToOffset(addr); 76 assert(limit != NULL, "limit must not be NULL"); 77 size_t limitOffset = heapWordToOffset(limit); 78 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 79 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 80 assert(nextAddr >= addr, "get_next_one postcondition"); 81 assert(nextAddr == limit || isMarked(nextAddr), 82 "get_next_one postcondition"); 83 return nextAddr; 84 } 85 86 #ifndef PRODUCT 87 bool G1CMBitMapRO::covers(MemRegion heap_rs) const { 88 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 89 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 90 "size inconsistency"); 91 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 92 _bmWordSize == heap_rs.word_size(); 93 } 94 #endif 95 96 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 97 _bm.print_on_error(st, prefix); 98 } 99 100 size_t G1CMBitMap::compute_size(size_t heap_size) { 101 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 102 } 103 104 size_t G1CMBitMap::mark_distance() { 105 return MinObjAlignmentInBytes * BitsPerByte; 106 } 107 108 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 109 _bmStartWord = heap.start(); 110 _bmWordSize = heap.word_size(); 111 112 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 113 _bm.set_size(_bmWordSize >> _shifter); 114 115 storage->set_mapping_changed_listener(&_listener); 116 } 117 118 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 119 if (zero_filled) { 120 return; 121 } 122 // We need to clear the bitmap on commit, removing any existing information. 123 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 124 _bm->clear_range(mr); 125 } 126 127 void G1CMBitMap::clear_range(MemRegion mr) { 128 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 129 assert(!mr.is_empty(), "unexpected empty region"); 130 // convert address range into offset range 131 _bm.at_put_range(heapWordToOffset(mr.start()), 132 heapWordToOffset(mr.end()), false); 133 } 134 135 G1CMMarkStack::G1CMMarkStack(G1ConcurrentMark* cm) : 136 _base(NULL), _cm(cm) 137 {} 138 139 bool G1CMMarkStack::allocate(size_t capacity) { 140 // allocate a stack of the requisite depth 141 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 142 if (!rs.is_reserved()) { 143 log_warning(gc)("ConcurrentMark MarkStack allocation failure"); 144 return false; 145 } 146 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 147 if (!_virtual_space.initialize(rs, rs.size())) { 148 log_warning(gc)("ConcurrentMark MarkStack backing store failure"); 149 // Release the virtual memory reserved for the marking stack 150 rs.release(); 151 return false; 152 } 153 assert(_virtual_space.committed_size() == rs.size(), 154 "Didn't reserve backing store for all of G1ConcurrentMark stack?"); 155 _base = (oop*) _virtual_space.low(); 156 setEmpty(); 157 _capacity = (jint) capacity; 158 _saved_index = -1; 159 _should_expand = false; 160 return true; 161 } 162 163 void G1CMMarkStack::expand() { 164 // Called, during remark, if we've overflown the marking stack during marking. 165 assert(isEmpty(), "stack should been emptied while handling overflow"); 166 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 167 // Clear expansion flag 168 _should_expand = false; 169 if (_capacity == (jint) MarkStackSizeMax) { 170 log_trace(gc)("(benign) Can't expand marking stack capacity, at max size limit"); 171 return; 172 } 173 // Double capacity if possible 174 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 175 // Do not give up existing stack until we have managed to 176 // get the double capacity that we desired. 177 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 178 sizeof(oop))); 179 if (rs.is_reserved()) { 180 // Release the backing store associated with old stack 181 _virtual_space.release(); 182 // Reinitialize virtual space for new stack 183 if (!_virtual_space.initialize(rs, rs.size())) { 184 fatal("Not enough swap for expanded marking stack capacity"); 185 } 186 _base = (oop*)(_virtual_space.low()); 187 _index = 0; 188 _capacity = new_capacity; 189 } else { 190 // Failed to double capacity, continue; 191 log_trace(gc)("(benign) Failed to expand marking stack capacity from " SIZE_FORMAT "K to " SIZE_FORMAT "K", 192 _capacity / K, new_capacity / K); 193 } 194 } 195 196 void G1CMMarkStack::set_should_expand() { 197 // If we're resetting the marking state because of an 198 // marking stack overflow, record that we should, if 199 // possible, expand the stack. 200 _should_expand = _cm->has_overflown(); 201 } 202 203 G1CMMarkStack::~G1CMMarkStack() { 204 if (_base != NULL) { 205 _base = NULL; 206 _virtual_space.release(); 207 } 208 } 209 210 void G1CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 211 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 212 jint start = _index; 213 jint next_index = start + n; 214 if (next_index > _capacity) { 215 _overflow = true; 216 return; 217 } 218 // Otherwise. 219 _index = next_index; 220 for (int i = 0; i < n; i++) { 221 int ind = start + i; 222 assert(ind < _capacity, "By overflow test above."); 223 _base[ind] = ptr_arr[i]; 224 } 225 } 226 227 bool G1CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 228 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 229 jint index = _index; 230 if (index == 0) { 231 *n = 0; 232 return false; 233 } else { 234 int k = MIN2(max, index); 235 jint new_ind = index - k; 236 for (int j = 0; j < k; j++) { 237 ptr_arr[j] = _base[new_ind + j]; 238 } 239 _index = new_ind; 240 *n = k; 241 return true; 242 } 243 } 244 245 void G1CMMarkStack::note_start_of_gc() { 246 assert(_saved_index == -1, 247 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 248 _saved_index = _index; 249 } 250 251 void G1CMMarkStack::note_end_of_gc() { 252 // This is intentionally a guarantee, instead of an assert. If we 253 // accidentally add something to the mark stack during GC, it 254 // will be a correctness issue so it's better if we crash. we'll 255 // only check this once per GC anyway, so it won't be a performance 256 // issue in any way. 257 guarantee(_saved_index == _index, 258 "saved index: %d index: %d", _saved_index, _index); 259 _saved_index = -1; 260 } 261 262 G1CMRootRegions::G1CMRootRegions() : 263 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 264 _should_abort(false), _next_survivor(NULL) { } 265 266 void G1CMRootRegions::init(G1CollectedHeap* g1h, G1ConcurrentMark* cm) { 267 _young_list = g1h->young_list(); 268 _cm = cm; 269 } 270 271 void G1CMRootRegions::prepare_for_scan() { 272 assert(!scan_in_progress(), "pre-condition"); 273 274 // Currently, only survivors can be root regions. 275 assert(_next_survivor == NULL, "pre-condition"); 276 _next_survivor = _young_list->first_survivor_region(); 277 _scan_in_progress = (_next_survivor != NULL); 278 _should_abort = false; 279 } 280 281 HeapRegion* G1CMRootRegions::claim_next() { 282 if (_should_abort) { 283 // If someone has set the should_abort flag, we return NULL to 284 // force the caller to bail out of their loop. 285 return NULL; 286 } 287 288 // Currently, only survivors can be root regions. 289 HeapRegion* res = _next_survivor; 290 if (res != NULL) { 291 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 292 // Read it again in case it changed while we were waiting for the lock. 293 res = _next_survivor; 294 if (res != NULL) { 295 if (res == _young_list->last_survivor_region()) { 296 // We just claimed the last survivor so store NULL to indicate 297 // that we're done. 298 _next_survivor = NULL; 299 } else { 300 _next_survivor = res->get_next_young_region(); 301 } 302 } else { 303 // Someone else claimed the last survivor while we were trying 304 // to take the lock so nothing else to do. 305 } 306 } 307 assert(res == NULL || res->is_survivor(), "post-condition"); 308 309 return res; 310 } 311 312 void G1CMRootRegions::notify_scan_done() { 313 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 314 _scan_in_progress = false; 315 RootRegionScan_lock->notify_all(); 316 } 317 318 void G1CMRootRegions::cancel_scan() { 319 notify_scan_done(); 320 } 321 322 void G1CMRootRegions::scan_finished() { 323 assert(scan_in_progress(), "pre-condition"); 324 325 // Currently, only survivors can be root regions. 326 if (!_should_abort) { 327 assert(_next_survivor == NULL, "we should have claimed all survivors"); 328 } 329 _next_survivor = NULL; 330 331 notify_scan_done(); 332 } 333 334 bool G1CMRootRegions::wait_until_scan_finished() { 335 if (!scan_in_progress()) return false; 336 337 { 338 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 339 while (scan_in_progress()) { 340 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 341 } 342 } 343 return true; 344 } 345 346 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 347 return MAX2((n_par_threads + 2) / 4, 1U); 348 } 349 350 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 351 _g1h(g1h), 352 _markBitMap1(), 353 _markBitMap2(), 354 _parallel_marking_threads(0), 355 _max_parallel_marking_threads(0), 356 _sleep_factor(0.0), 357 _marking_task_overhead(1.0), 358 _cleanup_list("Cleanup List"), 359 _region_live_bm(), 360 _card_live_bm(), 361 362 _prevMarkBitMap(&_markBitMap1), 363 _nextMarkBitMap(&_markBitMap2), 364 365 _markStack(this), 366 // _finger set in set_non_marking_state 367 368 _max_worker_id(ParallelGCThreads), 369 // _active_tasks set in set_non_marking_state 370 // _tasks set inside the constructor 371 _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)), 372 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 373 374 _has_overflown(false), 375 _concurrent(false), 376 _has_aborted(false), 377 _restart_for_overflow(false), 378 _concurrent_marking_in_progress(false), 379 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 380 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 381 382 // _verbose_level set below 383 384 _init_times(), 385 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 386 _cleanup_times(), 387 _total_counting_time(0.0), 388 _total_rs_scrub_time(0.0), 389 390 _parallel_workers(NULL), 391 392 _completed_initialization(false) { 393 394 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 395 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 396 397 // Create & start a ConcurrentMark thread. 398 _cmThread = new ConcurrentMarkThread(this); 399 assert(cmThread() != NULL, "CM Thread should have been created"); 400 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 401 if (_cmThread->osthread() == NULL) { 402 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 403 } 404 405 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 406 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 407 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 408 409 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 410 satb_qs.set_buffer_size(G1SATBBufferSize); 411 412 _root_regions.init(_g1h, this); 413 414 if (ConcGCThreads > ParallelGCThreads) { 415 log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).", 416 ConcGCThreads, ParallelGCThreads); 417 return; 418 } 419 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 420 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 421 // if both are set 422 _sleep_factor = 0.0; 423 _marking_task_overhead = 1.0; 424 } else if (G1MarkingOverheadPercent > 0) { 425 // We will calculate the number of parallel marking threads based 426 // on a target overhead with respect to the soft real-time goal 427 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 428 double overall_cm_overhead = 429 (double) MaxGCPauseMillis * marking_overhead / 430 (double) GCPauseIntervalMillis; 431 double cpu_ratio = 1.0 / (double) os::processor_count(); 432 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 433 double marking_task_overhead = 434 overall_cm_overhead / marking_thread_num * 435 (double) os::processor_count(); 436 double sleep_factor = 437 (1.0 - marking_task_overhead) / marking_task_overhead; 438 439 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 440 _sleep_factor = sleep_factor; 441 _marking_task_overhead = marking_task_overhead; 442 } else { 443 // Calculate the number of parallel marking threads by scaling 444 // the number of parallel GC threads. 445 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 446 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 447 _sleep_factor = 0.0; 448 _marking_task_overhead = 1.0; 449 } 450 451 assert(ConcGCThreads > 0, "Should have been set"); 452 _parallel_marking_threads = ConcGCThreads; 453 _max_parallel_marking_threads = _parallel_marking_threads; 454 455 _parallel_workers = new WorkGang("G1 Marker", 456 _max_parallel_marking_threads, false, true); 457 if (_parallel_workers == NULL) { 458 vm_exit_during_initialization("Failed necessary allocation."); 459 } else { 460 _parallel_workers->initialize_workers(); 461 } 462 463 if (FLAG_IS_DEFAULT(MarkStackSize)) { 464 size_t mark_stack_size = 465 MIN2(MarkStackSizeMax, 466 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 467 // Verify that the calculated value for MarkStackSize is in range. 468 // It would be nice to use the private utility routine from Arguments. 469 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 470 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 471 "must be between 1 and " SIZE_FORMAT, 472 mark_stack_size, MarkStackSizeMax); 473 return; 474 } 475 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 476 } else { 477 // Verify MarkStackSize is in range. 478 if (FLAG_IS_CMDLINE(MarkStackSize)) { 479 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 480 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 481 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 482 "must be between 1 and " SIZE_FORMAT, 483 MarkStackSize, MarkStackSizeMax); 484 return; 485 } 486 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 487 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 488 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 489 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 490 MarkStackSize, MarkStackSizeMax); 491 return; 492 } 493 } 494 } 495 } 496 497 if (!_markStack.allocate(MarkStackSize)) { 498 log_warning(gc)("Failed to allocate CM marking stack"); 499 return; 500 } 501 502 allocate_internal_bitmaps(); 503 504 if (G1PretouchAuxiliaryMemory) { 505 pretouch_internal_bitmaps(); 506 } 507 508 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC); 509 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 510 511 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 512 _active_tasks = _max_worker_id; 513 514 for (uint i = 0; i < _max_worker_id; ++i) { 515 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 516 task_queue->initialize(); 517 _task_queues->register_queue(i, task_queue); 518 519 _tasks[i] = new G1CMTask(i, this, task_queue, _task_queues); 520 521 _accum_task_vtime[i] = 0.0; 522 } 523 524 // so that the call below can read a sensible value 525 _heap_start = g1h->reserved_region().start(); 526 set_non_marking_state(); 527 _completed_initialization = true; 528 } 529 530 void G1ConcurrentMark::reset() { 531 // Starting values for these two. This should be called in a STW 532 // phase. 533 MemRegion reserved = _g1h->g1_reserved(); 534 _heap_start = reserved.start(); 535 _heap_end = reserved.end(); 536 537 // Separated the asserts so that we know which one fires. 538 assert(_heap_start != NULL, "heap bounds should look ok"); 539 assert(_heap_end != NULL, "heap bounds should look ok"); 540 assert(_heap_start < _heap_end, "heap bounds should look ok"); 541 542 // Reset all the marking data structures and any necessary flags 543 reset_marking_state(); 544 545 // We do reset all of them, since different phases will use 546 // different number of active threads. So, it's easiest to have all 547 // of them ready. 548 for (uint i = 0; i < _max_worker_id; ++i) { 549 _tasks[i]->reset(_nextMarkBitMap); 550 } 551 552 // we need this to make sure that the flag is on during the evac 553 // pause with initial mark piggy-backed 554 set_concurrent_marking_in_progress(); 555 } 556 557 558 void G1ConcurrentMark::reset_marking_state(bool clear_overflow) { 559 _markStack.set_should_expand(); 560 _markStack.setEmpty(); // Also clears the _markStack overflow flag 561 if (clear_overflow) { 562 clear_has_overflown(); 563 } else { 564 assert(has_overflown(), "pre-condition"); 565 } 566 _finger = _heap_start; 567 568 for (uint i = 0; i < _max_worker_id; ++i) { 569 G1CMTaskQueue* queue = _task_queues->queue(i); 570 queue->set_empty(); 571 } 572 } 573 574 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 575 assert(active_tasks <= _max_worker_id, "we should not have more"); 576 577 _active_tasks = active_tasks; 578 // Need to update the three data structures below according to the 579 // number of active threads for this phase. 580 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 581 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 582 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 583 } 584 585 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 586 set_concurrency(active_tasks); 587 588 _concurrent = concurrent; 589 // We propagate this to all tasks, not just the active ones. 590 for (uint i = 0; i < _max_worker_id; ++i) 591 _tasks[i]->set_concurrent(concurrent); 592 593 if (concurrent) { 594 set_concurrent_marking_in_progress(); 595 } else { 596 // We currently assume that the concurrent flag has been set to 597 // false before we start remark. At this point we should also be 598 // in a STW phase. 599 assert(!concurrent_marking_in_progress(), "invariant"); 600 assert(out_of_regions(), 601 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 602 p2i(_finger), p2i(_heap_end)); 603 } 604 } 605 606 void G1ConcurrentMark::set_non_marking_state() { 607 // We set the global marking state to some default values when we're 608 // not doing marking. 609 reset_marking_state(); 610 _active_tasks = 0; 611 clear_concurrent_marking_in_progress(); 612 } 613 614 G1ConcurrentMark::~G1ConcurrentMark() { 615 // The G1ConcurrentMark instance is never freed. 616 ShouldNotReachHere(); 617 } 618 619 class G1ClearBitMapTask : public AbstractGangTask { 620 // Heap region closure used for clearing the given mark bitmap. 621 class G1ClearBitmapHRClosure : public HeapRegionClosure { 622 private: 623 G1CMBitMap* _bitmap; 624 G1ConcurrentMark* _cm; 625 public: 626 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) { 627 } 628 629 virtual bool doHeapRegion(HeapRegion* r) { 630 size_t const chunk_size_in_words = M / HeapWordSize; 631 632 HeapWord* cur = r->bottom(); 633 HeapWord* const end = r->end(); 634 635 while (cur < end) { 636 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 637 _bitmap->clear_range(mr); 638 639 cur += chunk_size_in_words; 640 641 // Abort iteration if after yielding the marking has been aborted. 642 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 643 return true; 644 } 645 // Repeat the asserts from before the start of the closure. We will do them 646 // as asserts here to minimize their overhead on the product. However, we 647 // will have them as guarantees at the beginning / end of the bitmap 648 // clearing to get some checking in the product. 649 assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant"); 650 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 651 } 652 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 653 654 return false; 655 } 656 }; 657 658 G1ClearBitmapHRClosure _cl; 659 HeapRegionClaimer _hr_claimer; 660 bool _suspendible; // If the task is suspendible, workers must join the STS. 661 662 public: 663 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 664 AbstractGangTask("Parallel Clear Bitmap Task"), 665 _cl(bitmap, suspendible ? cm : NULL), 666 _hr_claimer(n_workers), 667 _suspendible(suspendible) 668 { } 669 670 void work(uint worker_id) { 671 SuspendibleThreadSetJoiner sts_join(_suspendible); 672 G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true); 673 } 674 675 bool is_complete() { 676 return _cl.complete(); 677 } 678 }; 679 680 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 681 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 682 683 G1ClearBitMapTask task(bitmap, this, workers->active_workers(), may_yield); 684 workers->run_task(&task); 685 guarantee(!may_yield || task.is_complete(), "Must have completed iteration when not yielding."); 686 } 687 688 void G1ConcurrentMark::cleanup_for_next_mark() { 689 // Make sure that the concurrent mark thread looks to still be in 690 // the current cycle. 691 guarantee(cmThread()->during_cycle(), "invariant"); 692 693 // We are finishing up the current cycle by clearing the next 694 // marking bitmap and getting it ready for the next cycle. During 695 // this time no other cycle can start. So, let's make sure that this 696 // is the case. 697 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 698 699 clear_bitmap(_nextMarkBitMap, _parallel_workers, true); 700 701 // Clear the live count data. If the marking has been aborted, the abort() 702 // call already did that. 703 if (!has_aborted()) { 704 clear_all_live_data(_parallel_workers); 705 DEBUG_ONLY(verify_all_live_data()); 706 } 707 708 // Repeat the asserts from above. 709 guarantee(cmThread()->during_cycle(), "invariant"); 710 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 711 } 712 713 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 714 assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint."); 715 clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false); 716 } 717 718 class CheckBitmapClearHRClosure : public HeapRegionClosure { 719 G1CMBitMap* _bitmap; 720 bool _error; 721 public: 722 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { 723 } 724 725 virtual bool doHeapRegion(HeapRegion* r) { 726 // This closure can be called concurrently to the mutator, so we must make sure 727 // that the result of the getNextMarkedWordAddress() call is compared to the 728 // value passed to it as limit to detect any found bits. 729 // end never changes in G1. 730 HeapWord* end = r->end(); 731 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 732 } 733 }; 734 735 bool G1ConcurrentMark::nextMarkBitmapIsClear() { 736 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 737 _g1h->heap_region_iterate(&cl); 738 return cl.complete(); 739 } 740 741 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 742 public: 743 bool doHeapRegion(HeapRegion* r) { 744 r->note_start_of_marking(); 745 return false; 746 } 747 }; 748 749 void G1ConcurrentMark::checkpointRootsInitialPre() { 750 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 751 G1CollectorPolicy* g1p = g1h->g1_policy(); 752 753 _has_aborted = false; 754 755 // Initialize marking structures. This has to be done in a STW phase. 756 reset(); 757 758 // For each region note start of marking. 759 NoteStartOfMarkHRClosure startcl; 760 g1h->heap_region_iterate(&startcl); 761 } 762 763 764 void G1ConcurrentMark::checkpointRootsInitialPost() { 765 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 766 767 // Start Concurrent Marking weak-reference discovery. 768 ReferenceProcessor* rp = g1h->ref_processor_cm(); 769 // enable ("weak") refs discovery 770 rp->enable_discovery(); 771 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 772 773 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 774 // This is the start of the marking cycle, we're expected all 775 // threads to have SATB queues with active set to false. 776 satb_mq_set.set_active_all_threads(true, /* new active value */ 777 false /* expected_active */); 778 779 _root_regions.prepare_for_scan(); 780 781 // update_g1_committed() will be called at the end of an evac pause 782 // when marking is on. So, it's also called at the end of the 783 // initial-mark pause to update the heap end, if the heap expands 784 // during it. No need to call it here. 785 } 786 787 /* 788 * Notice that in the next two methods, we actually leave the STS 789 * during the barrier sync and join it immediately afterwards. If we 790 * do not do this, the following deadlock can occur: one thread could 791 * be in the barrier sync code, waiting for the other thread to also 792 * sync up, whereas another one could be trying to yield, while also 793 * waiting for the other threads to sync up too. 794 * 795 * Note, however, that this code is also used during remark and in 796 * this case we should not attempt to leave / enter the STS, otherwise 797 * we'll either hit an assert (debug / fastdebug) or deadlock 798 * (product). So we should only leave / enter the STS if we are 799 * operating concurrently. 800 * 801 * Because the thread that does the sync barrier has left the STS, it 802 * is possible to be suspended for a Full GC or an evacuation pause 803 * could occur. This is actually safe, since the entering the sync 804 * barrier is one of the last things do_marking_step() does, and it 805 * doesn't manipulate any data structures afterwards. 806 */ 807 808 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 809 bool barrier_aborted; 810 { 811 SuspendibleThreadSetLeaver sts_leave(concurrent()); 812 barrier_aborted = !_first_overflow_barrier_sync.enter(); 813 } 814 815 // at this point everyone should have synced up and not be doing any 816 // more work 817 818 if (barrier_aborted) { 819 // If the barrier aborted we ignore the overflow condition and 820 // just abort the whole marking phase as quickly as possible. 821 return; 822 } 823 824 // If we're executing the concurrent phase of marking, reset the marking 825 // state; otherwise the marking state is reset after reference processing, 826 // during the remark pause. 827 // If we reset here as a result of an overflow during the remark we will 828 // see assertion failures from any subsequent set_concurrency_and_phase() 829 // calls. 830 if (concurrent()) { 831 // let the task associated with with worker 0 do this 832 if (worker_id == 0) { 833 // task 0 is responsible for clearing the global data structures 834 // We should be here because of an overflow. During STW we should 835 // not clear the overflow flag since we rely on it being true when 836 // we exit this method to abort the pause and restart concurrent 837 // marking. 838 reset_marking_state(true /* clear_overflow */); 839 840 log_info(gc, marking)("Concurrent Mark reset for overflow"); 841 } 842 } 843 844 // after this, each task should reset its own data structures then 845 // then go into the second barrier 846 } 847 848 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 849 SuspendibleThreadSetLeaver sts_leave(concurrent()); 850 _second_overflow_barrier_sync.enter(); 851 852 // at this point everything should be re-initialized and ready to go 853 } 854 855 class G1CMConcurrentMarkingTask: public AbstractGangTask { 856 private: 857 G1ConcurrentMark* _cm; 858 ConcurrentMarkThread* _cmt; 859 860 public: 861 void work(uint worker_id) { 862 assert(Thread::current()->is_ConcurrentGC_thread(), 863 "this should only be done by a conc GC thread"); 864 ResourceMark rm; 865 866 double start_vtime = os::elapsedVTime(); 867 868 { 869 SuspendibleThreadSetJoiner sts_join; 870 871 assert(worker_id < _cm->active_tasks(), "invariant"); 872 G1CMTask* the_task = _cm->task(worker_id); 873 the_task->record_start_time(); 874 if (!_cm->has_aborted()) { 875 do { 876 double start_vtime_sec = os::elapsedVTime(); 877 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 878 879 the_task->do_marking_step(mark_step_duration_ms, 880 true /* do_termination */, 881 false /* is_serial*/); 882 883 double end_vtime_sec = os::elapsedVTime(); 884 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 885 _cm->clear_has_overflown(); 886 887 _cm->do_yield_check(worker_id); 888 889 jlong sleep_time_ms; 890 if (!_cm->has_aborted() && the_task->has_aborted()) { 891 sleep_time_ms = 892 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 893 { 894 SuspendibleThreadSetLeaver sts_leave; 895 os::sleep(Thread::current(), sleep_time_ms, false); 896 } 897 } 898 } while (!_cm->has_aborted() && the_task->has_aborted()); 899 } 900 the_task->record_end_time(); 901 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 902 } 903 904 double end_vtime = os::elapsedVTime(); 905 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 906 } 907 908 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm, 909 ConcurrentMarkThread* cmt) : 910 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 911 912 ~G1CMConcurrentMarkingTask() { } 913 }; 914 915 // Calculates the number of active workers for a concurrent 916 // phase. 917 uint G1ConcurrentMark::calc_parallel_marking_threads() { 918 uint n_conc_workers = 0; 919 if (!UseDynamicNumberOfGCThreads || 920 (!FLAG_IS_DEFAULT(ConcGCThreads) && 921 !ForceDynamicNumberOfGCThreads)) { 922 n_conc_workers = max_parallel_marking_threads(); 923 } else { 924 n_conc_workers = 925 AdaptiveSizePolicy::calc_default_active_workers( 926 max_parallel_marking_threads(), 927 1, /* Minimum workers */ 928 parallel_marking_threads(), 929 Threads::number_of_non_daemon_threads()); 930 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 931 // that scaling has already gone into "_max_parallel_marking_threads". 932 } 933 assert(n_conc_workers > 0, "Always need at least 1"); 934 return n_conc_workers; 935 } 936 937 void G1ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 938 // Currently, only survivors can be root regions. 939 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 940 G1RootRegionScanClosure cl(_g1h, this, worker_id); 941 942 const uintx interval = PrefetchScanIntervalInBytes; 943 HeapWord* curr = hr->bottom(); 944 const HeapWord* end = hr->top(); 945 while (curr < end) { 946 Prefetch::read(curr, interval); 947 oop obj = oop(curr); 948 int size = obj->oop_iterate_size(&cl); 949 assert(size == obj->size(), "sanity"); 950 curr += size; 951 } 952 } 953 954 class G1CMRootRegionScanTask : public AbstractGangTask { 955 private: 956 G1ConcurrentMark* _cm; 957 958 public: 959 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 960 AbstractGangTask("Root Region Scan"), _cm(cm) { } 961 962 void work(uint worker_id) { 963 assert(Thread::current()->is_ConcurrentGC_thread(), 964 "this should only be done by a conc GC thread"); 965 966 G1CMRootRegions* root_regions = _cm->root_regions(); 967 HeapRegion* hr = root_regions->claim_next(); 968 while (hr != NULL) { 969 _cm->scanRootRegion(hr, worker_id); 970 hr = root_regions->claim_next(); 971 } 972 } 973 }; 974 975 void G1ConcurrentMark::scan_root_regions() { 976 // scan_in_progress() will have been set to true only if there was 977 // at least one root region to scan. So, if it's false, we 978 // should not attempt to do any further work. 979 if (root_regions()->scan_in_progress()) { 980 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 981 982 _parallel_marking_threads = calc_parallel_marking_threads(); 983 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 984 "Maximum number of marking threads exceeded"); 985 uint active_workers = MAX2(1U, parallel_marking_threads()); 986 987 G1CMRootRegionScanTask task(this); 988 _parallel_workers->set_active_workers(active_workers); 989 _parallel_workers->run_task(&task); 990 991 // It's possible that has_aborted() is true here without actually 992 // aborting the survivor scan earlier. This is OK as it's 993 // mainly used for sanity checking. 994 root_regions()->scan_finished(); 995 } 996 } 997 998 void G1ConcurrentMark::concurrent_cycle_start() { 999 _gc_timer_cm->register_gc_start(); 1000 1001 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 1002 1003 _g1h->trace_heap_before_gc(_gc_tracer_cm); 1004 } 1005 1006 void G1ConcurrentMark::concurrent_cycle_end() { 1007 _g1h->trace_heap_after_gc(_gc_tracer_cm); 1008 1009 if (has_aborted()) { 1010 _gc_tracer_cm->report_concurrent_mode_failure(); 1011 } 1012 1013 _gc_timer_cm->register_gc_end(); 1014 1015 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 1016 } 1017 1018 void G1ConcurrentMark::mark_from_roots() { 1019 // we might be tempted to assert that: 1020 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1021 // "inconsistent argument?"); 1022 // However that wouldn't be right, because it's possible that 1023 // a safepoint is indeed in progress as a younger generation 1024 // stop-the-world GC happens even as we mark in this generation. 1025 1026 _restart_for_overflow = false; 1027 1028 // _g1h has _n_par_threads 1029 _parallel_marking_threads = calc_parallel_marking_threads(); 1030 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1031 "Maximum number of marking threads exceeded"); 1032 1033 uint active_workers = MAX2(1U, parallel_marking_threads()); 1034 assert(active_workers > 0, "Should have been set"); 1035 1036 // Parallel task terminator is set in "set_concurrency_and_phase()" 1037 set_concurrency_and_phase(active_workers, true /* concurrent */); 1038 1039 G1CMConcurrentMarkingTask markingTask(this, cmThread()); 1040 _parallel_workers->set_active_workers(active_workers); 1041 _parallel_workers->run_task(&markingTask); 1042 print_stats(); 1043 } 1044 1045 void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1046 // world is stopped at this checkpoint 1047 assert(SafepointSynchronize::is_at_safepoint(), 1048 "world should be stopped"); 1049 1050 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1051 1052 // If a full collection has happened, we shouldn't do this. 1053 if (has_aborted()) { 1054 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1055 return; 1056 } 1057 1058 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1059 1060 if (VerifyDuringGC) { 1061 HandleMark hm; // handle scope 1062 g1h->prepare_for_verify(); 1063 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1064 } 1065 g1h->verifier()->check_bitmaps("Remark Start"); 1066 1067 G1CollectorPolicy* g1p = g1h->g1_policy(); 1068 g1p->record_concurrent_mark_remark_start(); 1069 1070 double start = os::elapsedTime(); 1071 1072 checkpointRootsFinalWork(); 1073 1074 double mark_work_end = os::elapsedTime(); 1075 1076 weakRefsWork(clear_all_soft_refs); 1077 1078 if (has_overflown()) { 1079 // Oops. We overflowed. Restart concurrent marking. 1080 _restart_for_overflow = true; 1081 1082 // Verify the heap w.r.t. the previous marking bitmap. 1083 if (VerifyDuringGC) { 1084 HandleMark hm; // handle scope 1085 g1h->prepare_for_verify(); 1086 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)"); 1087 } 1088 1089 // Clear the marking state because we will be restarting 1090 // marking due to overflowing the global mark stack. 1091 reset_marking_state(); 1092 } else { 1093 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1094 // We're done with marking. 1095 // This is the end of the marking cycle, we're expected all 1096 // threads to have SATB queues with active set to true. 1097 satb_mq_set.set_active_all_threads(false, /* new active value */ 1098 true /* expected_active */); 1099 1100 if (VerifyDuringGC) { 1101 HandleMark hm; // handle scope 1102 g1h->prepare_for_verify(); 1103 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)"); 1104 } 1105 g1h->verifier()->check_bitmaps("Remark End"); 1106 assert(!restart_for_overflow(), "sanity"); 1107 // Completely reset the marking state since marking completed 1108 set_non_marking_state(); 1109 } 1110 1111 // Expand the marking stack, if we have to and if we can. 1112 if (_markStack.should_expand()) { 1113 _markStack.expand(); 1114 } 1115 1116 // Statistics 1117 double now = os::elapsedTime(); 1118 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1119 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1120 _remark_times.add((now - start) * 1000.0); 1121 1122 g1p->record_concurrent_mark_remark_end(); 1123 1124 G1CMIsAliveClosure is_alive(g1h); 1125 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1126 } 1127 1128 // Helper class that provides functionality to generate the Live Data Count 1129 // information. 1130 class G1LiveDataHelper VALUE_OBJ_CLASS_SPEC { 1131 private: 1132 BitMap* _region_bm; 1133 BitMap* _card_bm; 1134 1135 // The card number of the bottom of the G1 heap. Used for converting addresses 1136 // to bitmap indices quickly. 1137 BitMap::idx_t _heap_card_bias; 1138 1139 // Utility routine to set an exclusive range of bits on the given 1140 // bitmap, optimized for very small ranges. 1141 // There must be at least one bit to set. 1142 inline void set_card_bitmap_range(BitMap* bm, 1143 BitMap::idx_t start_idx, 1144 BitMap::idx_t end_idx) { 1145 1146 // Set the exclusive bit range [start_idx, end_idx). 1147 assert((end_idx - start_idx) > 0, "at least one bit"); 1148 assert(end_idx <= bm->size(), "sanity"); 1149 1150 // For small ranges use a simple loop; otherwise use set_range or 1151 // use par_at_put_range (if parallel). The range is made up of the 1152 // cards that are spanned by an object/mem region so 8 cards will 1153 // allow up to object sizes up to 4K to be handled using the loop. 1154 if ((end_idx - start_idx) <= 8) { 1155 for (BitMap::idx_t i = start_idx; i < end_idx; i += 1) { 1156 bm->set_bit(i); 1157 } 1158 } else { 1159 bm->set_range(start_idx, end_idx); 1160 } 1161 } 1162 1163 // We cache the last mark set. This avoids setting the same bit multiple times. 1164 // This is particularly interesting for dense bitmaps, as this avoids doing 1165 // lots of work most of the time. 1166 BitMap::idx_t _last_marked_bit_idx; 1167 1168 // Mark the card liveness bitmap for the object spanning from start to end. 1169 void mark_card_bitmap_range(HeapWord* start, HeapWord* end) { 1170 BitMap::idx_t start_idx = card_live_bitmap_index_for(start); 1171 BitMap::idx_t end_idx = card_live_bitmap_index_for((HeapWord*)align_ptr_up(end, CardTableModRefBS::card_size)); 1172 1173 assert((end_idx - start_idx) > 0, "Trying to mark zero sized range."); 1174 1175 if (start_idx == _last_marked_bit_idx) { 1176 start_idx++; 1177 } 1178 if (start_idx == end_idx) { 1179 return; 1180 } 1181 1182 // Set the bits in the card bitmap for the cards spanned by this object. 1183 set_card_bitmap_range(_card_bm, start_idx, end_idx); 1184 _last_marked_bit_idx = end_idx - 1; 1185 } 1186 1187 void reset_mark_cache() { 1188 _last_marked_bit_idx = (BitMap::idx_t)-1; 1189 } 1190 1191 public: 1192 // Returns the index in the per-card liveness count bitmap 1193 // for the given address 1194 inline BitMap::idx_t card_live_bitmap_index_for(HeapWord* addr) { 1195 // Below, the term "card num" means the result of shifting an address 1196 // by the card shift -- address 0 corresponds to card number 0. One 1197 // must subtract the card num of the bottom of the heap to obtain a 1198 // card table index. 1199 BitMap::idx_t card_num = (BitMap::idx_t)(uintptr_t(addr) >> CardTableModRefBS::card_shift); 1200 return card_num - _heap_card_bias; 1201 } 1202 1203 // Takes a region that's not empty (i.e., it has at least one 1204 // live object in it and sets its corresponding bit on the region 1205 // bitmap to 1. 1206 void set_bit_for_region(HeapRegion* hr) { 1207 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1208 _region_bm->par_at_put(index, true); 1209 } 1210 1211 // Mark the range of bits covered by allocations done since the last marking 1212 // in the given heap region, i.e. from NTAMS to top of the given region. 1213 // Returns if there has been some allocation in this region since the last marking. 1214 bool mark_allocated_since_marking(HeapRegion* hr) { 1215 reset_mark_cache(); 1216 1217 HeapWord* ntams = hr->next_top_at_mark_start(); 1218 HeapWord* top = hr->top(); 1219 1220 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1221 1222 // Mark the allocated-since-marking portion... 1223 if (ntams < top) { 1224 mark_card_bitmap_range(ntams, top); 1225 return true; 1226 } else { 1227 return false; 1228 } 1229 } 1230 1231 // Mark the range of bits covered by live objects on the mark bitmap between 1232 // bottom and NTAMS of the given region. 1233 // Returns the number of live bytes marked within that area for the given 1234 // heap region. 1235 size_t mark_marked_during_marking(G1CMBitMap* mark_bitmap, HeapRegion* hr) { 1236 reset_mark_cache(); 1237 1238 size_t marked_bytes = 0; 1239 1240 HeapWord* ntams = hr->next_top_at_mark_start(); 1241 HeapWord* start = hr->bottom(); 1242 1243 if (ntams <= start) { 1244 // Skip empty regions. 1245 return 0; 1246 } else if (hr->is_humongous()) { 1247 mark_card_bitmap_range(start, hr->top()); 1248 return pointer_delta(hr->top(), start, 1); 1249 } 1250 1251 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1252 "Preconditions not met - " 1253 "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT, 1254 p2i(start), p2i(ntams), p2i(hr->end())); 1255 1256 // Find the first marked object at or after "start". 1257 start = mark_bitmap->getNextMarkedWordAddress(start, ntams); 1258 while (start < ntams) { 1259 oop obj = oop(start); 1260 int obj_sz = obj->size(); 1261 HeapWord* obj_end = start + obj_sz; 1262 1263 assert(obj_end <= hr->end(), "Humongous objects must have been handled elsewhere."); 1264 1265 mark_card_bitmap_range(start, obj_end); 1266 1267 // Add the size of this object to the number of marked bytes. 1268 marked_bytes += (size_t)obj_sz * HeapWordSize; 1269 1270 // Find the next marked object after this one. 1271 start = mark_bitmap->getNextMarkedWordAddress(obj_end, ntams); 1272 } 1273 1274 return marked_bytes; 1275 } 1276 1277 G1LiveDataHelper(BitMap* region_bm, 1278 BitMap* card_bm): 1279 _region_bm(region_bm), 1280 _card_bm(card_bm) { 1281 //assert(region_bm != NULL, ""); 1282 assert(card_bm != NULL, ""); 1283 // Calculate the card number for the bottom of the heap. Used 1284 // in biasing indexes into the accounting card bitmaps. 1285 _heap_card_bias = 1286 (BitMap::idx_t)(uintptr_t(G1CollectedHeap::heap()->reserved_region().start()) >> CardTableModRefBS::card_shift); 1287 } 1288 }; 1289 1290 // Heap region closure used for verifying the live count data 1291 // that was created concurrently and finalized during 1292 // the remark pause. This closure is applied to the heap 1293 // regions during the STW cleanup pause. 1294 class G1VerifyLiveDataHRClosure: public HeapRegionClosure { 1295 private: 1296 G1CollectedHeap* _g1h; 1297 G1CMBitMap* _mark_bitmap; 1298 G1LiveDataHelper _calc_helper; 1299 1300 BitMap* _act_region_bm; // Region BM to be verified 1301 BitMap* _act_card_bm; // Card BM to be verified 1302 1303 BitMap* _exp_region_bm; // Expected Region BM values 1304 BitMap* _exp_card_bm; // Expected card BM values 1305 1306 int _failures; 1307 1308 // Updates the live data count for the given heap region and returns the number 1309 // of bytes marked. 1310 size_t create_live_data_count(HeapRegion* hr) { 1311 size_t bytes_marked = _calc_helper.mark_marked_during_marking(_mark_bitmap, hr); 1312 bool allocated_since_marking = _calc_helper.mark_allocated_since_marking(hr); 1313 if (allocated_since_marking || bytes_marked > 0) { 1314 _calc_helper.set_bit_for_region(hr); 1315 } 1316 return bytes_marked; 1317 } 1318 1319 public: 1320 G1VerifyLiveDataHRClosure(G1CollectedHeap* g1h, 1321 G1CMBitMap* mark_bitmap, 1322 BitMap* act_region_bm, 1323 BitMap* act_card_bm, 1324 BitMap* exp_region_bm, 1325 BitMap* exp_card_bm) : 1326 _g1h(g1h), 1327 _mark_bitmap(mark_bitmap), 1328 _calc_helper(exp_region_bm, exp_card_bm), 1329 _act_region_bm(act_region_bm), 1330 _act_card_bm(act_card_bm), 1331 _exp_region_bm(exp_region_bm), 1332 _exp_card_bm(exp_card_bm), 1333 _failures(0) { } 1334 1335 int failures() const { return _failures; } 1336 1337 bool doHeapRegion(HeapRegion* hr) { 1338 int failures = 0; 1339 1340 // Walk the marking bitmap for this region and set the corresponding bits 1341 // in the expected region and card bitmaps. 1342 size_t exp_marked_bytes = create_live_data_count(hr); 1343 size_t act_marked_bytes = hr->next_marked_bytes(); 1344 // Verify the marked bytes for this region. 1345 1346 if (exp_marked_bytes != act_marked_bytes) { 1347 failures += 1; 1348 } else if (exp_marked_bytes > HeapRegion::GrainBytes) { 1349 failures += 1; 1350 } 1351 1352 // Verify the bit, for this region, in the actual and expected 1353 // (which was just calculated) region bit maps. 1354 // We're not OK if the bit in the calculated expected region 1355 // bitmap is set and the bit in the actual region bitmap is not. 1356 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1357 1358 bool expected = _exp_region_bm->at(index); 1359 bool actual = _act_region_bm->at(index); 1360 if (expected && !actual) { 1361 failures += 1; 1362 } 1363 1364 // Verify that the card bit maps for the cards spanned by the current 1365 // region match. We have an error if we have a set bit in the expected 1366 // bit map and the corresponding bit in the actual bitmap is not set. 1367 1368 BitMap::idx_t start_idx = _calc_helper.card_live_bitmap_index_for(hr->bottom()); 1369 BitMap::idx_t end_idx = _calc_helper.card_live_bitmap_index_for(hr->top()); 1370 1371 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1372 expected = _exp_card_bm->at(i); 1373 actual = _act_card_bm->at(i); 1374 1375 if (expected && !actual) { 1376 failures += 1; 1377 } 1378 } 1379 1380 _failures += failures; 1381 1382 // We could stop iteration over the heap when we 1383 // find the first violating region by returning true. 1384 return false; 1385 } 1386 }; 1387 1388 class G1VerifyLiveDataTask: public AbstractGangTask { 1389 protected: 1390 G1CollectedHeap* _g1h; 1391 G1CMBitMap* _mark_bitmap; 1392 BitMap* _actual_region_bm; 1393 BitMap* _actual_card_bm; 1394 1395 BitMap _expected_region_bm; 1396 BitMap _expected_card_bm; 1397 1398 int _failures; 1399 1400 HeapRegionClaimer _hr_claimer; 1401 1402 public: 1403 G1VerifyLiveDataTask(G1CollectedHeap* g1h, 1404 G1CMBitMap* bitmap, 1405 BitMap* region_bm, 1406 BitMap* card_bm, 1407 uint n_workers) 1408 : AbstractGangTask("G1 verify final counting"), 1409 _g1h(g1h), 1410 _mark_bitmap(bitmap), 1411 _actual_region_bm(region_bm), 1412 _actual_card_bm(card_bm), 1413 _expected_region_bm(region_bm->size(), true /* in_resource_area */), 1414 _expected_card_bm(card_bm->size(), true /* in_resource_area */), 1415 _failures(0), 1416 _hr_claimer(n_workers) { 1417 assert(VerifyDuringGC, "don't call this otherwise"); 1418 } 1419 1420 void work(uint worker_id) { 1421 G1VerifyLiveDataHRClosure cl(_g1h, 1422 _mark_bitmap, 1423 _actual_region_bm, 1424 _actual_card_bm, 1425 &_expected_region_bm, 1426 &_expected_card_bm); 1427 _g1h->heap_region_par_iterate(&cl, worker_id, &_hr_claimer); 1428 1429 Atomic::add(cl.failures(), &_failures); 1430 } 1431 1432 int failures() const { return _failures; } 1433 }; 1434 1435 class G1FinalizeLiveDataTask: public AbstractGangTask { 1436 // Finalizes the liveness counting data. 1437 // Sets the bits corresponding to the interval [NTAMS, top] 1438 // (which contains the implicitly live objects) in the 1439 // card liveness bitmap. Also sets the bit for each region 1440 // containing live data, in the region liveness bitmap. 1441 class G1FinalizeCountDataClosure: public HeapRegionClosure { 1442 private: 1443 G1LiveDataHelper _helper; 1444 public: 1445 G1FinalizeCountDataClosure(G1CMBitMap* bitmap, 1446 BitMap* region_bm, 1447 BitMap* card_bm) : 1448 HeapRegionClosure(), 1449 _helper(region_bm, card_bm) { } 1450 1451 bool doHeapRegion(HeapRegion* hr) { 1452 bool allocated_since_marking = _helper.mark_allocated_since_marking(hr); 1453 if (allocated_since_marking || hr->next_marked_bytes() > 0) { 1454 _helper.set_bit_for_region(hr); 1455 } 1456 return false; 1457 } 1458 }; 1459 1460 G1CMBitMap* _bitmap; 1461 1462 BitMap* _actual_region_bm; 1463 BitMap* _actual_card_bm; 1464 1465 HeapRegionClaimer _hr_claimer; 1466 1467 public: 1468 G1FinalizeLiveDataTask(G1CMBitMap* bitmap, BitMap* region_bm, BitMap* card_bm, uint n_workers) : 1469 AbstractGangTask("G1 final counting"), 1470 _bitmap(bitmap), 1471 _actual_region_bm(region_bm), 1472 _actual_card_bm(card_bm), 1473 _hr_claimer(n_workers) { 1474 } 1475 1476 void work(uint worker_id) { 1477 G1FinalizeCountDataClosure cl(_bitmap, 1478 _actual_region_bm, 1479 _actual_card_bm); 1480 1481 G1CollectedHeap::heap()->heap_region_par_iterate(&cl, worker_id, &_hr_claimer); 1482 } 1483 }; 1484 1485 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1486 G1CollectedHeap* _g1; 1487 size_t _freed_bytes; 1488 FreeRegionList* _local_cleanup_list; 1489 uint _old_regions_removed; 1490 uint _humongous_regions_removed; 1491 HRRSCleanupTask* _hrrs_cleanup_task; 1492 1493 public: 1494 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1495 FreeRegionList* local_cleanup_list, 1496 HRRSCleanupTask* hrrs_cleanup_task) : 1497 _g1(g1), 1498 _freed_bytes(0), 1499 _local_cleanup_list(local_cleanup_list), 1500 _old_regions_removed(0), 1501 _humongous_regions_removed(0), 1502 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1503 1504 size_t freed_bytes() { return _freed_bytes; } 1505 const uint old_regions_removed() { return _old_regions_removed; } 1506 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1507 1508 bool doHeapRegion(HeapRegion *hr) { 1509 if (hr->is_archive()) { 1510 return false; 1511 } 1512 // We use a claim value of zero here because all regions 1513 // were claimed with value 1 in the FinalCount task. 1514 _g1->reset_gc_time_stamps(hr); 1515 hr->note_end_of_marking(); 1516 1517 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1518 _freed_bytes += hr->used(); 1519 hr->set_containing_set(NULL); 1520 if (hr->is_humongous()) { 1521 _humongous_regions_removed++; 1522 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1523 } else { 1524 _old_regions_removed++; 1525 _g1->free_region(hr, _local_cleanup_list, true); 1526 } 1527 } else { 1528 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1529 } 1530 1531 return false; 1532 } 1533 }; 1534 1535 class G1ParNoteEndTask: public AbstractGangTask { 1536 friend class G1NoteEndOfConcMarkClosure; 1537 1538 protected: 1539 G1CollectedHeap* _g1h; 1540 FreeRegionList* _cleanup_list; 1541 HeapRegionClaimer _hrclaimer; 1542 1543 public: 1544 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1545 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1546 } 1547 1548 void work(uint worker_id) { 1549 FreeRegionList local_cleanup_list("Local Cleanup List"); 1550 HRRSCleanupTask hrrs_cleanup_task; 1551 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1552 &hrrs_cleanup_task); 1553 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1554 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1555 1556 // Now update the lists 1557 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1558 { 1559 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1560 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1561 1562 // If we iterate over the global cleanup list at the end of 1563 // cleanup to do this printing we will not guarantee to only 1564 // generate output for the newly-reclaimed regions (the list 1565 // might not be empty at the beginning of cleanup; we might 1566 // still be working on its previous contents). So we do the 1567 // printing here, before we append the new regions to the global 1568 // cleanup list. 1569 1570 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1571 if (hr_printer->is_active()) { 1572 FreeRegionListIterator iter(&local_cleanup_list); 1573 while (iter.more_available()) { 1574 HeapRegion* hr = iter.get_next(); 1575 hr_printer->cleanup(hr); 1576 } 1577 } 1578 1579 _cleanup_list->add_ordered(&local_cleanup_list); 1580 assert(local_cleanup_list.is_empty(), "post-condition"); 1581 1582 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1583 } 1584 } 1585 }; 1586 1587 void G1ConcurrentMark::cleanup() { 1588 // world is stopped at this checkpoint 1589 assert(SafepointSynchronize::is_at_safepoint(), 1590 "world should be stopped"); 1591 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1592 1593 // If a full collection has happened, we shouldn't do this. 1594 if (has_aborted()) { 1595 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1596 return; 1597 } 1598 1599 g1h->verifier()->verify_region_sets_optional(); 1600 1601 if (VerifyDuringGC) { 1602 HandleMark hm; // handle scope 1603 g1h->prepare_for_verify(); 1604 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1605 } 1606 g1h->verifier()->check_bitmaps("Cleanup Start"); 1607 1608 G1CollectorPolicy* g1p = g1h->g1_policy(); 1609 g1p->record_concurrent_mark_cleanup_start(); 1610 1611 double start = os::elapsedTime(); 1612 1613 HeapRegionRemSet::reset_for_cleanup_tasks(); 1614 1615 { 1616 // Finalize the live data. 1617 G1FinalizeLiveDataTask cl(_nextMarkBitMap, 1618 &_region_live_bm, 1619 &_card_live_bm, 1620 g1h->workers()->active_workers()); 1621 g1h->workers()->run_task(&cl); 1622 } 1623 1624 if (VerifyDuringGC) { 1625 // Verify that the liveness count data created concurrently matches one created 1626 // during this safepoint. 1627 ResourceMark rm; 1628 G1VerifyLiveDataTask cl(G1CollectedHeap::heap(), 1629 _nextMarkBitMap, 1630 &_region_live_bm, 1631 &_card_live_bm, 1632 g1h->workers()->active_workers()); 1633 g1h->workers()->run_task(&cl); 1634 1635 guarantee(cl.failures() == 0, "Unexpected accounting failures"); 1636 } 1637 1638 g1h->collector_state()->set_mark_in_progress(false); 1639 1640 double count_end = os::elapsedTime(); 1641 double this_final_counting_time = (count_end - start); 1642 _total_counting_time += this_final_counting_time; 1643 1644 if (log_is_enabled(Trace, gc, liveness)) { 1645 G1PrintRegionLivenessInfoClosure cl("Post-Marking"); 1646 _g1h->heap_region_iterate(&cl); 1647 } 1648 1649 // Install newly created mark bitMap as "prev". 1650 swapMarkBitMaps(); 1651 1652 g1h->reset_gc_time_stamp(); 1653 1654 uint n_workers = _g1h->workers()->active_workers(); 1655 1656 // Note end of marking in all heap regions. 1657 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1658 g1h->workers()->run_task(&g1_par_note_end_task); 1659 g1h->check_gc_time_stamps(); 1660 1661 if (!cleanup_list_is_empty()) { 1662 // The cleanup list is not empty, so we'll have to process it 1663 // concurrently. Notify anyone else that might be wanting free 1664 // regions that there will be more free regions coming soon. 1665 g1h->set_free_regions_coming(); 1666 } 1667 1668 // call below, since it affects the metric by which we sort the heap 1669 // regions. 1670 if (G1ScrubRemSets) { 1671 double rs_scrub_start = os::elapsedTime(); 1672 g1h->scrub_rem_set(&_region_live_bm, &_card_live_bm); 1673 _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start); 1674 } 1675 1676 // this will also free any regions totally full of garbage objects, 1677 // and sort the regions. 1678 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1679 1680 // Statistics. 1681 double end = os::elapsedTime(); 1682 _cleanup_times.add((end - start) * 1000.0); 1683 1684 // Clean up will have freed any regions completely full of garbage. 1685 // Update the soft reference policy with the new heap occupancy. 1686 Universe::update_heap_info_at_gc(); 1687 1688 if (VerifyDuringGC) { 1689 HandleMark hm; // handle scope 1690 g1h->prepare_for_verify(); 1691 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)"); 1692 } 1693 1694 g1h->verifier()->check_bitmaps("Cleanup End"); 1695 1696 g1h->verifier()->verify_region_sets_optional(); 1697 1698 // We need to make this be a "collection" so any collection pause that 1699 // races with it goes around and waits for completeCleanup to finish. 1700 g1h->increment_total_collections(); 1701 1702 // Clean out dead classes and update Metaspace sizes. 1703 if (ClassUnloadingWithConcurrentMark) { 1704 ClassLoaderDataGraph::purge(); 1705 } 1706 MetaspaceGC::compute_new_size(); 1707 1708 // We reclaimed old regions so we should calculate the sizes to make 1709 // sure we update the old gen/space data. 1710 g1h->g1mm()->update_sizes(); 1711 g1h->allocation_context_stats().update_after_mark(); 1712 } 1713 1714 void G1ConcurrentMark::complete_cleanup() { 1715 if (has_aborted()) return; 1716 1717 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1718 1719 _cleanup_list.verify_optional(); 1720 FreeRegionList tmp_free_list("Tmp Free List"); 1721 1722 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1723 "cleanup list has %u entries", 1724 _cleanup_list.length()); 1725 1726 // No one else should be accessing the _cleanup_list at this point, 1727 // so it is not necessary to take any locks 1728 while (!_cleanup_list.is_empty()) { 1729 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 1730 assert(hr != NULL, "Got NULL from a non-empty list"); 1731 hr->par_clear(); 1732 tmp_free_list.add_ordered(hr); 1733 1734 // Instead of adding one region at a time to the secondary_free_list, 1735 // we accumulate them in the local list and move them a few at a 1736 // time. This also cuts down on the number of notify_all() calls 1737 // we do during this process. We'll also append the local list when 1738 // _cleanup_list is empty (which means we just removed the last 1739 // region from the _cleanup_list). 1740 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1741 _cleanup_list.is_empty()) { 1742 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1743 "appending %u entries to the secondary_free_list, " 1744 "cleanup list still has %u entries", 1745 tmp_free_list.length(), 1746 _cleanup_list.length()); 1747 1748 { 1749 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1750 g1h->secondary_free_list_add(&tmp_free_list); 1751 SecondaryFreeList_lock->notify_all(); 1752 } 1753 #ifndef PRODUCT 1754 if (G1StressConcRegionFreeing) { 1755 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1756 os::sleep(Thread::current(), (jlong) 1, false); 1757 } 1758 } 1759 #endif 1760 } 1761 } 1762 assert(tmp_free_list.is_empty(), "post-condition"); 1763 } 1764 1765 // Supporting Object and Oop closures for reference discovery 1766 // and processing in during marking 1767 1768 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1769 HeapWord* addr = (HeapWord*)obj; 1770 return addr != NULL && 1771 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1772 } 1773 1774 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1775 // Uses the G1CMTask associated with a worker thread (for serial reference 1776 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1777 // trace referent objects. 1778 // 1779 // Using the G1CMTask and embedded local queues avoids having the worker 1780 // threads operating on the global mark stack. This reduces the risk 1781 // of overflowing the stack - which we would rather avoid at this late 1782 // state. Also using the tasks' local queues removes the potential 1783 // of the workers interfering with each other that could occur if 1784 // operating on the global stack. 1785 1786 class G1CMKeepAliveAndDrainClosure: public OopClosure { 1787 G1ConcurrentMark* _cm; 1788 G1CMTask* _task; 1789 int _ref_counter_limit; 1790 int _ref_counter; 1791 bool _is_serial; 1792 public: 1793 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1794 _cm(cm), _task(task), _is_serial(is_serial), 1795 _ref_counter_limit(G1RefProcDrainInterval) { 1796 assert(_ref_counter_limit > 0, "sanity"); 1797 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1798 _ref_counter = _ref_counter_limit; 1799 } 1800 1801 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1802 virtual void do_oop( oop* p) { do_oop_work(p); } 1803 1804 template <class T> void do_oop_work(T* p) { 1805 if (!_cm->has_overflown()) { 1806 oop obj = oopDesc::load_decode_heap_oop(p); 1807 _task->deal_with_reference(obj); 1808 _ref_counter--; 1809 1810 if (_ref_counter == 0) { 1811 // We have dealt with _ref_counter_limit references, pushing them 1812 // and objects reachable from them on to the local stack (and 1813 // possibly the global stack). Call G1CMTask::do_marking_step() to 1814 // process these entries. 1815 // 1816 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1817 // there's nothing more to do (i.e. we're done with the entries that 1818 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1819 // above) or we overflow. 1820 // 1821 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1822 // flag while there may still be some work to do. (See the comment at 1823 // the beginning of G1CMTask::do_marking_step() for those conditions - 1824 // one of which is reaching the specified time target.) It is only 1825 // when G1CMTask::do_marking_step() returns without setting the 1826 // has_aborted() flag that the marking step has completed. 1827 do { 1828 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1829 _task->do_marking_step(mark_step_duration_ms, 1830 false /* do_termination */, 1831 _is_serial); 1832 } while (_task->has_aborted() && !_cm->has_overflown()); 1833 _ref_counter = _ref_counter_limit; 1834 } 1835 } 1836 } 1837 }; 1838 1839 // 'Drain' oop closure used by both serial and parallel reference processing. 1840 // Uses the G1CMTask associated with a given worker thread (for serial 1841 // reference processing the G1CMtask for worker 0 is used). Calls the 1842 // do_marking_step routine, with an unbelievably large timeout value, 1843 // to drain the marking data structures of the remaining entries 1844 // added by the 'keep alive' oop closure above. 1845 1846 class G1CMDrainMarkingStackClosure: public VoidClosure { 1847 G1ConcurrentMark* _cm; 1848 G1CMTask* _task; 1849 bool _is_serial; 1850 public: 1851 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1852 _cm(cm), _task(task), _is_serial(is_serial) { 1853 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1854 } 1855 1856 void do_void() { 1857 do { 1858 // We call G1CMTask::do_marking_step() to completely drain the local 1859 // and global marking stacks of entries pushed by the 'keep alive' 1860 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1861 // 1862 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1863 // if there's nothing more to do (i.e. we've completely drained the 1864 // entries that were pushed as a a result of applying the 'keep alive' 1865 // closure to the entries on the discovered ref lists) or we overflow 1866 // the global marking stack. 1867 // 1868 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1869 // flag while there may still be some work to do. (See the comment at 1870 // the beginning of G1CMTask::do_marking_step() for those conditions - 1871 // one of which is reaching the specified time target.) It is only 1872 // when G1CMTask::do_marking_step() returns without setting the 1873 // has_aborted() flag that the marking step has completed. 1874 1875 _task->do_marking_step(1000000000.0 /* something very large */, 1876 true /* do_termination */, 1877 _is_serial); 1878 } while (_task->has_aborted() && !_cm->has_overflown()); 1879 } 1880 }; 1881 1882 // Implementation of AbstractRefProcTaskExecutor for parallel 1883 // reference processing at the end of G1 concurrent marking 1884 1885 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 1886 private: 1887 G1CollectedHeap* _g1h; 1888 G1ConcurrentMark* _cm; 1889 WorkGang* _workers; 1890 uint _active_workers; 1891 1892 public: 1893 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1894 G1ConcurrentMark* cm, 1895 WorkGang* workers, 1896 uint n_workers) : 1897 _g1h(g1h), _cm(cm), 1898 _workers(workers), _active_workers(n_workers) { } 1899 1900 // Executes the given task using concurrent marking worker threads. 1901 virtual void execute(ProcessTask& task); 1902 virtual void execute(EnqueueTask& task); 1903 }; 1904 1905 class G1CMRefProcTaskProxy: public AbstractGangTask { 1906 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1907 ProcessTask& _proc_task; 1908 G1CollectedHeap* _g1h; 1909 G1ConcurrentMark* _cm; 1910 1911 public: 1912 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1913 G1CollectedHeap* g1h, 1914 G1ConcurrentMark* cm) : 1915 AbstractGangTask("Process reference objects in parallel"), 1916 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1917 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1918 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1919 } 1920 1921 virtual void work(uint worker_id) { 1922 ResourceMark rm; 1923 HandleMark hm; 1924 G1CMTask* task = _cm->task(worker_id); 1925 G1CMIsAliveClosure g1_is_alive(_g1h); 1926 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1927 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1928 1929 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1930 } 1931 }; 1932 1933 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1934 assert(_workers != NULL, "Need parallel worker threads."); 1935 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1936 1937 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1938 1939 // We need to reset the concurrency level before each 1940 // proxy task execution, so that the termination protocol 1941 // and overflow handling in G1CMTask::do_marking_step() knows 1942 // how many workers to wait for. 1943 _cm->set_concurrency(_active_workers); 1944 _workers->run_task(&proc_task_proxy); 1945 } 1946 1947 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 1948 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1949 EnqueueTask& _enq_task; 1950 1951 public: 1952 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1953 AbstractGangTask("Enqueue reference objects in parallel"), 1954 _enq_task(enq_task) { } 1955 1956 virtual void work(uint worker_id) { 1957 _enq_task.work(worker_id); 1958 } 1959 }; 1960 1961 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 1962 assert(_workers != NULL, "Need parallel worker threads."); 1963 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1964 1965 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 1966 1967 // Not strictly necessary but... 1968 // 1969 // We need to reset the concurrency level before each 1970 // proxy task execution, so that the termination protocol 1971 // and overflow handling in G1CMTask::do_marking_step() knows 1972 // how many workers to wait for. 1973 _cm->set_concurrency(_active_workers); 1974 _workers->run_task(&enq_task_proxy); 1975 } 1976 1977 void G1ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 1978 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 1979 } 1980 1981 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 1982 if (has_overflown()) { 1983 // Skip processing the discovered references if we have 1984 // overflown the global marking stack. Reference objects 1985 // only get discovered once so it is OK to not 1986 // de-populate the discovered reference lists. We could have, 1987 // but the only benefit would be that, when marking restarts, 1988 // less reference objects are discovered. 1989 return; 1990 } 1991 1992 ResourceMark rm; 1993 HandleMark hm; 1994 1995 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1996 1997 // Is alive closure. 1998 G1CMIsAliveClosure g1_is_alive(g1h); 1999 2000 // Inner scope to exclude the cleaning of the string and symbol 2001 // tables from the displayed time. 2002 { 2003 GCTraceTime(Debug, gc) trace("Reference Processing", _gc_timer_cm); 2004 2005 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2006 2007 // See the comment in G1CollectedHeap::ref_processing_init() 2008 // about how reference processing currently works in G1. 2009 2010 // Set the soft reference policy 2011 rp->setup_policy(clear_all_soft_refs); 2012 assert(_markStack.isEmpty(), "mark stack should be empty"); 2013 2014 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2015 // in serial reference processing. Note these closures are also 2016 // used for serially processing (by the the current thread) the 2017 // JNI references during parallel reference processing. 2018 // 2019 // These closures do not need to synchronize with the worker 2020 // threads involved in parallel reference processing as these 2021 // instances are executed serially by the current thread (e.g. 2022 // reference processing is not multi-threaded and is thus 2023 // performed by the current thread instead of a gang worker). 2024 // 2025 // The gang tasks involved in parallel reference processing create 2026 // their own instances of these closures, which do their own 2027 // synchronization among themselves. 2028 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2029 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2030 2031 // We need at least one active thread. If reference processing 2032 // is not multi-threaded we use the current (VMThread) thread, 2033 // otherwise we use the work gang from the G1CollectedHeap and 2034 // we utilize all the worker threads we can. 2035 bool processing_is_mt = rp->processing_is_mt(); 2036 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2037 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2038 2039 // Parallel processing task executor. 2040 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2041 g1h->workers(), active_workers); 2042 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2043 2044 // Set the concurrency level. The phase was already set prior to 2045 // executing the remark task. 2046 set_concurrency(active_workers); 2047 2048 // Set the degree of MT processing here. If the discovery was done MT, 2049 // the number of threads involved during discovery could differ from 2050 // the number of active workers. This is OK as long as the discovered 2051 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2052 rp->set_active_mt_degree(active_workers); 2053 2054 // Process the weak references. 2055 const ReferenceProcessorStats& stats = 2056 rp->process_discovered_references(&g1_is_alive, 2057 &g1_keep_alive, 2058 &g1_drain_mark_stack, 2059 executor, 2060 _gc_timer_cm); 2061 _gc_tracer_cm->report_gc_reference_stats(stats); 2062 2063 // The do_oop work routines of the keep_alive and drain_marking_stack 2064 // oop closures will set the has_overflown flag if we overflow the 2065 // global marking stack. 2066 2067 assert(_markStack.overflow() || _markStack.isEmpty(), 2068 "mark stack should be empty (unless it overflowed)"); 2069 2070 if (_markStack.overflow()) { 2071 // This should have been done already when we tried to push an 2072 // entry on to the global mark stack. But let's do it again. 2073 set_has_overflown(); 2074 } 2075 2076 assert(rp->num_q() == active_workers, "why not"); 2077 2078 rp->enqueue_discovered_references(executor); 2079 2080 rp->verify_no_references_recorded(); 2081 assert(!rp->discovery_enabled(), "Post condition"); 2082 } 2083 2084 if (has_overflown()) { 2085 // We can not trust g1_is_alive if the marking stack overflowed 2086 return; 2087 } 2088 2089 assert(_markStack.isEmpty(), "Marking should have completed"); 2090 2091 // Unload Klasses, String, Symbols, Code Cache, etc. 2092 { 2093 GCTraceTime(Debug, gc) trace("Unloading", _gc_timer_cm); 2094 2095 if (ClassUnloadingWithConcurrentMark) { 2096 bool purged_classes; 2097 2098 { 2099 GCTraceTime(Trace, gc) trace("System Dictionary Unloading", _gc_timer_cm); 2100 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2101 } 2102 2103 { 2104 GCTraceTime(Trace, gc) trace("Parallel Unloading", _gc_timer_cm); 2105 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2106 } 2107 } 2108 2109 if (G1StringDedup::is_enabled()) { 2110 GCTraceTime(Trace, gc) trace("String Deduplication Unlink", _gc_timer_cm); 2111 G1StringDedup::unlink(&g1_is_alive); 2112 } 2113 } 2114 } 2115 2116 void G1ConcurrentMark::swapMarkBitMaps() { 2117 G1CMBitMapRO* temp = _prevMarkBitMap; 2118 _prevMarkBitMap = (G1CMBitMapRO*)_nextMarkBitMap; 2119 _nextMarkBitMap = (G1CMBitMap*) temp; 2120 } 2121 2122 BitMap G1ConcurrentMark::allocate_large_bitmap(BitMap::idx_t size_in_bits) { 2123 size_t size_in_words = BitMap::size_in_words(size_in_bits); 2124 2125 BitMap::bm_word_t* map = MmapArrayAllocator<BitMap::bm_word_t, mtGC>::allocate(size_in_words); 2126 2127 return BitMap(map, size_in_bits); 2128 } 2129 2130 void G1ConcurrentMark::allocate_internal_bitmaps() { 2131 double start_time = os::elapsedTime(); 2132 2133 _region_live_bm = allocate_large_bitmap(_g1h->max_regions()); 2134 2135 guarantee(_g1h->max_capacity() % CardTableModRefBS::card_size == 0, 2136 "Heap capacity must be aligned to card size."); 2137 _card_live_bm = allocate_large_bitmap(_g1h->max_capacity() / CardTableModRefBS::card_size); 2138 2139 log_debug(gc, marking)("Allocating internal bitmaps took %1.2f seconds.", os::elapsedTime() - start_time); 2140 } 2141 2142 void G1ConcurrentMark::pretouch_internal_bitmaps() { 2143 double start_time = os::elapsedTime(); 2144 2145 _region_live_bm.pretouch(); 2146 _card_live_bm.pretouch(); 2147 2148 log_debug(gc, marking)("Pre-touching internal bitmaps took %1.2f seconds.", os::elapsedTime() - start_time); 2149 } 2150 2151 // Closure for marking entries in SATB buffers. 2152 class G1CMSATBBufferClosure : public SATBBufferClosure { 2153 private: 2154 G1CMTask* _task; 2155 G1CollectedHeap* _g1h; 2156 2157 // This is very similar to G1CMTask::deal_with_reference, but with 2158 // more relaxed requirements for the argument, so this must be more 2159 // circumspect about treating the argument as an object. 2160 void do_entry(void* entry) const { 2161 _task->increment_refs_reached(); 2162 HeapRegion* hr = _g1h->heap_region_containing(entry); 2163 if (entry < hr->next_top_at_mark_start()) { 2164 // Until we get here, we don't know whether entry refers to a valid 2165 // object; it could instead have been a stale reference. 2166 oop obj = static_cast<oop>(entry); 2167 assert(obj->is_oop(true /* ignore mark word */), 2168 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)); 2169 _task->make_reference_grey(obj); 2170 } 2171 } 2172 2173 public: 2174 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 2175 : _task(task), _g1h(g1h) { } 2176 2177 virtual void do_buffer(void** buffer, size_t size) { 2178 for (size_t i = 0; i < size; ++i) { 2179 do_entry(buffer[i]); 2180 } 2181 } 2182 }; 2183 2184 class G1RemarkThreadsClosure : public ThreadClosure { 2185 G1CMSATBBufferClosure _cm_satb_cl; 2186 G1CMOopClosure _cm_cl; 2187 MarkingCodeBlobClosure _code_cl; 2188 int _thread_parity; 2189 2190 public: 2191 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 2192 _cm_satb_cl(task, g1h), 2193 _cm_cl(g1h, g1h->concurrent_mark(), task), 2194 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2195 _thread_parity(Threads::thread_claim_parity()) {} 2196 2197 void do_thread(Thread* thread) { 2198 if (thread->is_Java_thread()) { 2199 if (thread->claim_oops_do(true, _thread_parity)) { 2200 JavaThread* jt = (JavaThread*)thread; 2201 2202 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2203 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2204 // * Alive if on the stack of an executing method 2205 // * Weakly reachable otherwise 2206 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2207 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2208 jt->nmethods_do(&_code_cl); 2209 2210 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 2211 } 2212 } else if (thread->is_VM_thread()) { 2213 if (thread->claim_oops_do(true, _thread_parity)) { 2214 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 2215 } 2216 } 2217 } 2218 }; 2219 2220 class G1CMRemarkTask: public AbstractGangTask { 2221 private: 2222 G1ConcurrentMark* _cm; 2223 public: 2224 void work(uint worker_id) { 2225 // Since all available tasks are actually started, we should 2226 // only proceed if we're supposed to be active. 2227 if (worker_id < _cm->active_tasks()) { 2228 G1CMTask* task = _cm->task(worker_id); 2229 task->record_start_time(); 2230 { 2231 ResourceMark rm; 2232 HandleMark hm; 2233 2234 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2235 Threads::threads_do(&threads_f); 2236 } 2237 2238 do { 2239 task->do_marking_step(1000000000.0 /* something very large */, 2240 true /* do_termination */, 2241 false /* is_serial */); 2242 } while (task->has_aborted() && !_cm->has_overflown()); 2243 // If we overflow, then we do not want to restart. We instead 2244 // want to abort remark and do concurrent marking again. 2245 task->record_end_time(); 2246 } 2247 } 2248 2249 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 2250 AbstractGangTask("Par Remark"), _cm(cm) { 2251 _cm->terminator()->reset_for_reuse(active_workers); 2252 } 2253 }; 2254 2255 void G1ConcurrentMark::checkpointRootsFinalWork() { 2256 ResourceMark rm; 2257 HandleMark hm; 2258 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2259 2260 GCTraceTime(Debug, gc) trace("Finalize Marking", _gc_timer_cm); 2261 2262 g1h->ensure_parsability(false); 2263 2264 // this is remark, so we'll use up all active threads 2265 uint active_workers = g1h->workers()->active_workers(); 2266 set_concurrency_and_phase(active_workers, false /* concurrent */); 2267 // Leave _parallel_marking_threads at it's 2268 // value originally calculated in the G1ConcurrentMark 2269 // constructor and pass values of the active workers 2270 // through the gang in the task. 2271 2272 { 2273 StrongRootsScope srs(active_workers); 2274 2275 G1CMRemarkTask remarkTask(this, active_workers); 2276 // We will start all available threads, even if we decide that the 2277 // active_workers will be fewer. The extra ones will just bail out 2278 // immediately. 2279 g1h->workers()->run_task(&remarkTask); 2280 } 2281 2282 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2283 guarantee(has_overflown() || 2284 satb_mq_set.completed_buffers_num() == 0, 2285 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 2286 BOOL_TO_STR(has_overflown()), 2287 satb_mq_set.completed_buffers_num()); 2288 2289 print_stats(); 2290 } 2291 2292 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2293 // Note we are overriding the read-only view of the prev map here, via 2294 // the cast. 2295 ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr); 2296 } 2297 2298 HeapRegion* 2299 G1ConcurrentMark::claim_region(uint worker_id) { 2300 // "checkpoint" the finger 2301 HeapWord* finger = _finger; 2302 2303 // _heap_end will not change underneath our feet; it only changes at 2304 // yield points. 2305 while (finger < _heap_end) { 2306 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2307 2308 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 2309 2310 // Above heap_region_containing may return NULL as we always scan claim 2311 // until the end of the heap. In this case, just jump to the next region. 2312 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2313 2314 // Is the gap between reading the finger and doing the CAS too long? 2315 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2316 if (res == finger && curr_region != NULL) { 2317 // we succeeded 2318 HeapWord* bottom = curr_region->bottom(); 2319 HeapWord* limit = curr_region->next_top_at_mark_start(); 2320 2321 // notice that _finger == end cannot be guaranteed here since, 2322 // someone else might have moved the finger even further 2323 assert(_finger >= end, "the finger should have moved forward"); 2324 2325 if (limit > bottom) { 2326 return curr_region; 2327 } else { 2328 assert(limit == bottom, 2329 "the region limit should be at bottom"); 2330 // we return NULL and the caller should try calling 2331 // claim_region() again. 2332 return NULL; 2333 } 2334 } else { 2335 assert(_finger > finger, "the finger should have moved forward"); 2336 // read it again 2337 finger = _finger; 2338 } 2339 } 2340 2341 return NULL; 2342 } 2343 2344 #ifndef PRODUCT 2345 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 2346 private: 2347 G1CollectedHeap* _g1h; 2348 const char* _phase; 2349 int _info; 2350 2351 public: 2352 VerifyNoCSetOops(const char* phase, int info = -1) : 2353 _g1h(G1CollectedHeap::heap()), 2354 _phase(phase), 2355 _info(info) 2356 { } 2357 2358 void operator()(oop obj) const { 2359 guarantee(obj->is_oop(), 2360 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 2361 p2i(obj), _phase, _info); 2362 guarantee(!_g1h->obj_in_cs(obj), 2363 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 2364 p2i(obj), _phase, _info); 2365 } 2366 }; 2367 2368 void G1ConcurrentMark::verify_no_cset_oops() { 2369 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2370 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 2371 return; 2372 } 2373 2374 // Verify entries on the global mark stack 2375 _markStack.iterate(VerifyNoCSetOops("Stack")); 2376 2377 // Verify entries on the task queues 2378 for (uint i = 0; i < _max_worker_id; ++i) { 2379 G1CMTaskQueue* queue = _task_queues->queue(i); 2380 queue->iterate(VerifyNoCSetOops("Queue", i)); 2381 } 2382 2383 // Verify the global finger 2384 HeapWord* global_finger = finger(); 2385 if (global_finger != NULL && global_finger < _heap_end) { 2386 // Since we always iterate over all regions, we might get a NULL HeapRegion 2387 // here. 2388 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 2389 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2390 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 2391 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 2392 } 2393 2394 // Verify the task fingers 2395 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2396 for (uint i = 0; i < parallel_marking_threads(); ++i) { 2397 G1CMTask* task = _tasks[i]; 2398 HeapWord* task_finger = task->finger(); 2399 if (task_finger != NULL && task_finger < _heap_end) { 2400 // See above note on the global finger verification. 2401 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 2402 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2403 !task_hr->in_collection_set(), 2404 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 2405 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 2406 } 2407 } 2408 } 2409 #endif // PRODUCT 2410 2411 class G1CreateLiveDataTask: public AbstractGangTask { 2412 // Aggregate the counting data that was constructed concurrently 2413 // with marking. 2414 class G1CreateLiveDataHRClosure: public HeapRegionClosure { 2415 G1LiveDataHelper _helper; 2416 2417 G1CMBitMap* _mark_bitmap; 2418 2419 G1ConcurrentMark* _cm; 2420 public: 2421 G1CreateLiveDataHRClosure(G1ConcurrentMark* cm, 2422 G1CMBitMap* mark_bitmap, 2423 BitMap* cm_card_bm) : 2424 HeapRegionClosure(), 2425 _helper(NULL, cm_card_bm), 2426 _mark_bitmap(mark_bitmap), 2427 _cm(cm) { } 2428 2429 bool doHeapRegion(HeapRegion* hr) { 2430 size_t marked_bytes = _helper.mark_marked_during_marking(_mark_bitmap, hr); 2431 if (marked_bytes > 0) { 2432 hr->add_to_marked_bytes(marked_bytes); 2433 } 2434 2435 if (_cm->do_yield_check() && _cm->has_aborted()) { 2436 return true; 2437 } 2438 return false; 2439 } 2440 }; 2441 2442 G1CollectedHeap* _g1h; 2443 G1ConcurrentMark* _cm; 2444 BitMap* _cm_card_bm; 2445 HeapRegionClaimer _hr_claimer; 2446 2447 public: 2448 G1CreateLiveDataTask(G1CollectedHeap* g1h, 2449 BitMap* cm_card_bm, 2450 uint n_workers) : 2451 AbstractGangTask("Create Live Data"), 2452 _g1h(g1h), 2453 _cm_card_bm(cm_card_bm), 2454 _hr_claimer(n_workers) { 2455 } 2456 2457 void work(uint worker_id) { 2458 SuspendibleThreadSetJoiner sts_join; 2459 2460 G1CreateLiveDataHRClosure cl(_g1h->concurrent_mark(), _g1h->concurrent_mark()->nextMarkBitMap(), _cm_card_bm); 2461 _g1h->heap_region_par_iterate(&cl, worker_id, &_hr_claimer); 2462 } 2463 }; 2464 2465 2466 void G1ConcurrentMark::create_live_data() { 2467 uint n_workers = _parallel_workers->active_workers(); 2468 2469 G1CreateLiveDataTask cl(_g1h, 2470 &_card_live_bm, 2471 n_workers); 2472 _parallel_workers->run_task(&cl); 2473 } 2474 2475 class G1ClearAllLiveDataTask : public AbstractGangTask { 2476 BitMap* _bitmap; 2477 size_t _num_tasks; 2478 size_t _cur_task; 2479 public: 2480 G1ClearAllLiveDataTask(BitMap* bitmap, size_t num_tasks) : 2481 AbstractGangTask("Clear All Live Data"), 2482 _bitmap(bitmap), 2483 _num_tasks(num_tasks), 2484 _cur_task(0) { 2485 } 2486 2487 virtual void work(uint worker_id) { 2488 while (true) { 2489 size_t to_process = Atomic::add(1, &_cur_task) - 1; 2490 if (to_process >= _num_tasks) { 2491 break; 2492 } 2493 2494 BitMap::idx_t start = M * BitsPerByte * to_process; 2495 BitMap::idx_t end = MIN2(start + M * BitsPerByte, _bitmap->size()); 2496 _bitmap->clear_range(start, end); 2497 } 2498 } 2499 }; 2500 2501 void G1ConcurrentMark::clear_all_live_data(WorkGang* workers) { 2502 double start_time = os::elapsedTime(); 2503 2504 guarantee(Universe::is_fully_initialized(), "Should not call this during initialization."); 2505 2506 size_t const num_chunks = align_size_up(_card_live_bm.size_in_words() * HeapWordSize, M) / M; 2507 2508 G1ClearAllLiveDataTask cl(&_card_live_bm, num_chunks); 2509 workers->run_task(&cl); 2510 2511 // The region live bitmap is always very small, even for huge heaps. Clear 2512 // directly. 2513 _region_live_bm.clear(); 2514 2515 2516 log_debug(gc, marking)("Clear Live Data took %.3fms", (os::elapsedTime() - start_time) * 1000.0); 2517 } 2518 2519 void G1ConcurrentMark::verify_all_live_data() { 2520 assert(_card_live_bm.count_one_bits() == 0, "Master card bitmap not clear"); 2521 assert(_region_live_bm.count_one_bits() == 0, "Master region bitmap not clear"); 2522 } 2523 2524 void G1ConcurrentMark::print_stats() { 2525 if (!log_is_enabled(Debug, gc, stats)) { 2526 return; 2527 } 2528 log_debug(gc, stats)("---------------------------------------------------------------------"); 2529 for (size_t i = 0; i < _active_tasks; ++i) { 2530 _tasks[i]->print_stats(); 2531 log_debug(gc, stats)("---------------------------------------------------------------------"); 2532 } 2533 } 2534 2535 void G1ConcurrentMark::abort() { 2536 if (!cmThread()->during_cycle() || _has_aborted) { 2537 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2538 return; 2539 } 2540 2541 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2542 // concurrent bitmap clearing. 2543 clear_bitmap(_nextMarkBitMap, _g1h->workers(), false); 2544 2545 // Note we cannot clear the previous marking bitmap here 2546 // since VerifyDuringGC verifies the objects marked during 2547 // a full GC against the previous bitmap. 2548 2549 clear_all_live_data(_g1h->workers()); 2550 DEBUG_ONLY(verify_all_live_data()); 2551 // Empty mark stack 2552 reset_marking_state(); 2553 for (uint i = 0; i < _max_worker_id; ++i) { 2554 _tasks[i]->clear_region_fields(); 2555 } 2556 _first_overflow_barrier_sync.abort(); 2557 _second_overflow_barrier_sync.abort(); 2558 _has_aborted = true; 2559 2560 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2561 satb_mq_set.abandon_partial_marking(); 2562 // This can be called either during or outside marking, we'll read 2563 // the expected_active value from the SATB queue set. 2564 satb_mq_set.set_active_all_threads( 2565 false, /* new active value */ 2566 satb_mq_set.is_active() /* expected_active */); 2567 } 2568 2569 static void print_ms_time_info(const char* prefix, const char* name, 2570 NumberSeq& ns) { 2571 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2572 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2573 if (ns.num() > 0) { 2574 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2575 prefix, ns.sd(), ns.maximum()); 2576 } 2577 } 2578 2579 void G1ConcurrentMark::print_summary_info() { 2580 LogHandle(gc, marking) log; 2581 if (!log.is_trace()) { 2582 return; 2583 } 2584 2585 log.trace(" Concurrent marking:"); 2586 print_ms_time_info(" ", "init marks", _init_times); 2587 print_ms_time_info(" ", "remarks", _remark_times); 2588 { 2589 print_ms_time_info(" ", "final marks", _remark_mark_times); 2590 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2591 2592 } 2593 print_ms_time_info(" ", "cleanups", _cleanup_times); 2594 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2595 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2596 if (G1ScrubRemSets) { 2597 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 2598 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2599 } 2600 log.trace(" Total stop_world time = %8.2f s.", 2601 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2602 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2603 cmThread()->vtime_accum(), cmThread()->vtime_mark_accum()); 2604 } 2605 2606 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2607 _parallel_workers->print_worker_threads_on(st); 2608 } 2609 2610 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2611 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2612 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 2613 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 2614 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 2615 } 2616 2617 // We take a break if someone is trying to stop the world. 2618 bool G1ConcurrentMark::do_yield_check(uint worker_id) { 2619 if (SuspendibleThreadSet::should_yield()) { 2620 SuspendibleThreadSet::yield(); 2621 return true; 2622 } else { 2623 return false; 2624 } 2625 } 2626 2627 // Closure for iteration over bitmaps 2628 class G1CMBitMapClosure : public BitMapClosure { 2629 private: 2630 // the bitmap that is being iterated over 2631 G1CMBitMap* _nextMarkBitMap; 2632 G1ConcurrentMark* _cm; 2633 G1CMTask* _task; 2634 2635 public: 2636 G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm, G1CMBitMap* nextMarkBitMap) : 2637 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 2638 2639 bool do_bit(size_t offset) { 2640 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 2641 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 2642 assert( addr < _cm->finger(), "invariant"); 2643 assert(addr >= _task->finger(), "invariant"); 2644 2645 // We move that task's local finger along. 2646 _task->move_finger_to(addr); 2647 2648 _task->scan_object(oop(addr)); 2649 // we only partially drain the local queue and global stack 2650 _task->drain_local_queue(true); 2651 _task->drain_global_stack(true); 2652 2653 // if the has_aborted flag has been raised, we need to bail out of 2654 // the iteration 2655 return !_task->has_aborted(); 2656 } 2657 }; 2658 2659 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2660 ReferenceProcessor* result = g1h->ref_processor_cm(); 2661 assert(result != NULL, "CM reference processor should not be NULL"); 2662 return result; 2663 } 2664 2665 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2666 G1ConcurrentMark* cm, 2667 G1CMTask* task) 2668 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2669 _g1h(g1h), _cm(cm), _task(task) 2670 { } 2671 2672 void G1CMTask::setup_for_region(HeapRegion* hr) { 2673 assert(hr != NULL, 2674 "claim_region() should have filtered out NULL regions"); 2675 _curr_region = hr; 2676 _finger = hr->bottom(); 2677 update_region_limit(); 2678 } 2679 2680 void G1CMTask::update_region_limit() { 2681 HeapRegion* hr = _curr_region; 2682 HeapWord* bottom = hr->bottom(); 2683 HeapWord* limit = hr->next_top_at_mark_start(); 2684 2685 if (limit == bottom) { 2686 // The region was collected underneath our feet. 2687 // We set the finger to bottom to ensure that the bitmap 2688 // iteration that will follow this will not do anything. 2689 // (this is not a condition that holds when we set the region up, 2690 // as the region is not supposed to be empty in the first place) 2691 _finger = bottom; 2692 } else if (limit >= _region_limit) { 2693 assert(limit >= _finger, "peace of mind"); 2694 } else { 2695 assert(limit < _region_limit, "only way to get here"); 2696 // This can happen under some pretty unusual circumstances. An 2697 // evacuation pause empties the region underneath our feet (NTAMS 2698 // at bottom). We then do some allocation in the region (NTAMS 2699 // stays at bottom), followed by the region being used as a GC 2700 // alloc region (NTAMS will move to top() and the objects 2701 // originally below it will be grayed). All objects now marked in 2702 // the region are explicitly grayed, if below the global finger, 2703 // and we do not need in fact to scan anything else. So, we simply 2704 // set _finger to be limit to ensure that the bitmap iteration 2705 // doesn't do anything. 2706 _finger = limit; 2707 } 2708 2709 _region_limit = limit; 2710 } 2711 2712 void G1CMTask::giveup_current_region() { 2713 assert(_curr_region != NULL, "invariant"); 2714 clear_region_fields(); 2715 } 2716 2717 void G1CMTask::clear_region_fields() { 2718 // Values for these three fields that indicate that we're not 2719 // holding on to a region. 2720 _curr_region = NULL; 2721 _finger = NULL; 2722 _region_limit = NULL; 2723 } 2724 2725 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2726 if (cm_oop_closure == NULL) { 2727 assert(_cm_oop_closure != NULL, "invariant"); 2728 } else { 2729 assert(_cm_oop_closure == NULL, "invariant"); 2730 } 2731 _cm_oop_closure = cm_oop_closure; 2732 } 2733 2734 void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) { 2735 guarantee(nextMarkBitMap != NULL, "invariant"); 2736 _nextMarkBitMap = nextMarkBitMap; 2737 clear_region_fields(); 2738 2739 _calls = 0; 2740 _elapsed_time_ms = 0.0; 2741 _termination_time_ms = 0.0; 2742 _termination_start_time_ms = 0.0; 2743 } 2744 2745 bool G1CMTask::should_exit_termination() { 2746 regular_clock_call(); 2747 // This is called when we are in the termination protocol. We should 2748 // quit if, for some reason, this task wants to abort or the global 2749 // stack is not empty (this means that we can get work from it). 2750 return !_cm->mark_stack_empty() || has_aborted(); 2751 } 2752 2753 void G1CMTask::reached_limit() { 2754 assert(_words_scanned >= _words_scanned_limit || 2755 _refs_reached >= _refs_reached_limit , 2756 "shouldn't have been called otherwise"); 2757 regular_clock_call(); 2758 } 2759 2760 void G1CMTask::regular_clock_call() { 2761 if (has_aborted()) return; 2762 2763 // First, we need to recalculate the words scanned and refs reached 2764 // limits for the next clock call. 2765 recalculate_limits(); 2766 2767 // During the regular clock call we do the following 2768 2769 // (1) If an overflow has been flagged, then we abort. 2770 if (_cm->has_overflown()) { 2771 set_has_aborted(); 2772 return; 2773 } 2774 2775 // If we are not concurrent (i.e. we're doing remark) we don't need 2776 // to check anything else. The other steps are only needed during 2777 // the concurrent marking phase. 2778 if (!concurrent()) return; 2779 2780 // (2) If marking has been aborted for Full GC, then we also abort. 2781 if (_cm->has_aborted()) { 2782 set_has_aborted(); 2783 return; 2784 } 2785 2786 double curr_time_ms = os::elapsedVTime() * 1000.0; 2787 2788 // (4) We check whether we should yield. If we have to, then we abort. 2789 if (SuspendibleThreadSet::should_yield()) { 2790 // We should yield. To do this we abort the task. The caller is 2791 // responsible for yielding. 2792 set_has_aborted(); 2793 return; 2794 } 2795 2796 // (5) We check whether we've reached our time quota. If we have, 2797 // then we abort. 2798 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2799 if (elapsed_time_ms > _time_target_ms) { 2800 set_has_aborted(); 2801 _has_timed_out = true; 2802 return; 2803 } 2804 2805 // (6) Finally, we check whether there are enough completed STAB 2806 // buffers available for processing. If there are, we abort. 2807 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2808 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2809 // we do need to process SATB buffers, we'll abort and restart 2810 // the marking task to do so 2811 set_has_aborted(); 2812 return; 2813 } 2814 } 2815 2816 void G1CMTask::recalculate_limits() { 2817 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2818 _words_scanned_limit = _real_words_scanned_limit; 2819 2820 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2821 _refs_reached_limit = _real_refs_reached_limit; 2822 } 2823 2824 void G1CMTask::decrease_limits() { 2825 // This is called when we believe that we're going to do an infrequent 2826 // operation which will increase the per byte scanned cost (i.e. move 2827 // entries to/from the global stack). It basically tries to decrease the 2828 // scanning limit so that the clock is called earlier. 2829 2830 _words_scanned_limit = _real_words_scanned_limit - 2831 3 * words_scanned_period / 4; 2832 _refs_reached_limit = _real_refs_reached_limit - 2833 3 * refs_reached_period / 4; 2834 } 2835 2836 void G1CMTask::move_entries_to_global_stack() { 2837 // local array where we'll store the entries that will be popped 2838 // from the local queue 2839 oop buffer[global_stack_transfer_size]; 2840 2841 int n = 0; 2842 oop obj; 2843 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 2844 buffer[n] = obj; 2845 ++n; 2846 } 2847 2848 if (n > 0) { 2849 // we popped at least one entry from the local queue 2850 2851 if (!_cm->mark_stack_push(buffer, n)) { 2852 set_has_aborted(); 2853 } 2854 } 2855 2856 // this operation was quite expensive, so decrease the limits 2857 decrease_limits(); 2858 } 2859 2860 void G1CMTask::get_entries_from_global_stack() { 2861 // local array where we'll store the entries that will be popped 2862 // from the global stack. 2863 oop buffer[global_stack_transfer_size]; 2864 int n; 2865 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 2866 assert(n <= global_stack_transfer_size, 2867 "we should not pop more than the given limit"); 2868 if (n > 0) { 2869 // yes, we did actually pop at least one entry 2870 for (int i = 0; i < n; ++i) { 2871 bool success = _task_queue->push(buffer[i]); 2872 // We only call this when the local queue is empty or under a 2873 // given target limit. So, we do not expect this push to fail. 2874 assert(success, "invariant"); 2875 } 2876 } 2877 2878 // this operation was quite expensive, so decrease the limits 2879 decrease_limits(); 2880 } 2881 2882 void G1CMTask::drain_local_queue(bool partially) { 2883 if (has_aborted()) return; 2884 2885 // Decide what the target size is, depending whether we're going to 2886 // drain it partially (so that other tasks can steal if they run out 2887 // of things to do) or totally (at the very end). 2888 size_t target_size; 2889 if (partially) { 2890 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2891 } else { 2892 target_size = 0; 2893 } 2894 2895 if (_task_queue->size() > target_size) { 2896 oop obj; 2897 bool ret = _task_queue->pop_local(obj); 2898 while (ret) { 2899 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 2900 assert(!_g1h->is_on_master_free_list( 2901 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 2902 2903 scan_object(obj); 2904 2905 if (_task_queue->size() <= target_size || has_aborted()) { 2906 ret = false; 2907 } else { 2908 ret = _task_queue->pop_local(obj); 2909 } 2910 } 2911 } 2912 } 2913 2914 void G1CMTask::drain_global_stack(bool partially) { 2915 if (has_aborted()) return; 2916 2917 // We have a policy to drain the local queue before we attempt to 2918 // drain the global stack. 2919 assert(partially || _task_queue->size() == 0, "invariant"); 2920 2921 // Decide what the target size is, depending whether we're going to 2922 // drain it partially (so that other tasks can steal if they run out 2923 // of things to do) or totally (at the very end). Notice that, 2924 // because we move entries from the global stack in chunks or 2925 // because another task might be doing the same, we might in fact 2926 // drop below the target. But, this is not a problem. 2927 size_t target_size; 2928 if (partially) { 2929 target_size = _cm->partial_mark_stack_size_target(); 2930 } else { 2931 target_size = 0; 2932 } 2933 2934 if (_cm->mark_stack_size() > target_size) { 2935 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2936 get_entries_from_global_stack(); 2937 drain_local_queue(partially); 2938 } 2939 } 2940 } 2941 2942 // SATB Queue has several assumptions on whether to call the par or 2943 // non-par versions of the methods. this is why some of the code is 2944 // replicated. We should really get rid of the single-threaded version 2945 // of the code to simplify things. 2946 void G1CMTask::drain_satb_buffers() { 2947 if (has_aborted()) return; 2948 2949 // We set this so that the regular clock knows that we're in the 2950 // middle of draining buffers and doesn't set the abort flag when it 2951 // notices that SATB buffers are available for draining. It'd be 2952 // very counter productive if it did that. :-) 2953 _draining_satb_buffers = true; 2954 2955 G1CMSATBBufferClosure satb_cl(this, _g1h); 2956 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2957 2958 // This keeps claiming and applying the closure to completed buffers 2959 // until we run out of buffers or we need to abort. 2960 while (!has_aborted() && 2961 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2962 regular_clock_call(); 2963 } 2964 2965 _draining_satb_buffers = false; 2966 2967 assert(has_aborted() || 2968 concurrent() || 2969 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2970 2971 // again, this was a potentially expensive operation, decrease the 2972 // limits to get the regular clock call early 2973 decrease_limits(); 2974 } 2975 2976 void G1CMTask::print_stats() { 2977 log_debug(gc, stats)("Marking Stats, task = %u, calls = %d", 2978 _worker_id, _calls); 2979 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2980 _elapsed_time_ms, _termination_time_ms); 2981 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 2982 _step_times_ms.num(), _step_times_ms.avg(), 2983 _step_times_ms.sd()); 2984 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", 2985 _step_times_ms.maximum(), _step_times_ms.sum()); 2986 } 2987 2988 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 2989 return _task_queues->steal(worker_id, hash_seed, obj); 2990 } 2991 2992 /***************************************************************************** 2993 2994 The do_marking_step(time_target_ms, ...) method is the building 2995 block of the parallel marking framework. It can be called in parallel 2996 with other invocations of do_marking_step() on different tasks 2997 (but only one per task, obviously) and concurrently with the 2998 mutator threads, or during remark, hence it eliminates the need 2999 for two versions of the code. When called during remark, it will 3000 pick up from where the task left off during the concurrent marking 3001 phase. Interestingly, tasks are also claimable during evacuation 3002 pauses too, since do_marking_step() ensures that it aborts before 3003 it needs to yield. 3004 3005 The data structures that it uses to do marking work are the 3006 following: 3007 3008 (1) Marking Bitmap. If there are gray objects that appear only 3009 on the bitmap (this happens either when dealing with an overflow 3010 or when the initial marking phase has simply marked the roots 3011 and didn't push them on the stack), then tasks claim heap 3012 regions whose bitmap they then scan to find gray objects. A 3013 global finger indicates where the end of the last claimed region 3014 is. A local finger indicates how far into the region a task has 3015 scanned. The two fingers are used to determine how to gray an 3016 object (i.e. whether simply marking it is OK, as it will be 3017 visited by a task in the future, or whether it needs to be also 3018 pushed on a stack). 3019 3020 (2) Local Queue. The local queue of the task which is accessed 3021 reasonably efficiently by the task. Other tasks can steal from 3022 it when they run out of work. Throughout the marking phase, a 3023 task attempts to keep its local queue short but not totally 3024 empty, so that entries are available for stealing by other 3025 tasks. Only when there is no more work, a task will totally 3026 drain its local queue. 3027 3028 (3) Global Mark Stack. This handles local queue overflow. During 3029 marking only sets of entries are moved between it and the local 3030 queues, as access to it requires a mutex and more fine-grain 3031 interaction with it which might cause contention. If it 3032 overflows, then the marking phase should restart and iterate 3033 over the bitmap to identify gray objects. Throughout the marking 3034 phase, tasks attempt to keep the global mark stack at a small 3035 length but not totally empty, so that entries are available for 3036 popping by other tasks. Only when there is no more work, tasks 3037 will totally drain the global mark stack. 3038 3039 (4) SATB Buffer Queue. This is where completed SATB buffers are 3040 made available. Buffers are regularly removed from this queue 3041 and scanned for roots, so that the queue doesn't get too 3042 long. During remark, all completed buffers are processed, as 3043 well as the filled in parts of any uncompleted buffers. 3044 3045 The do_marking_step() method tries to abort when the time target 3046 has been reached. There are a few other cases when the 3047 do_marking_step() method also aborts: 3048 3049 (1) When the marking phase has been aborted (after a Full GC). 3050 3051 (2) When a global overflow (on the global stack) has been 3052 triggered. Before the task aborts, it will actually sync up with 3053 the other tasks to ensure that all the marking data structures 3054 (local queues, stacks, fingers etc.) are re-initialized so that 3055 when do_marking_step() completes, the marking phase can 3056 immediately restart. 3057 3058 (3) When enough completed SATB buffers are available. The 3059 do_marking_step() method only tries to drain SATB buffers right 3060 at the beginning. So, if enough buffers are available, the 3061 marking step aborts and the SATB buffers are processed at 3062 the beginning of the next invocation. 3063 3064 (4) To yield. when we have to yield then we abort and yield 3065 right at the end of do_marking_step(). This saves us from a lot 3066 of hassle as, by yielding we might allow a Full GC. If this 3067 happens then objects will be compacted underneath our feet, the 3068 heap might shrink, etc. We save checking for this by just 3069 aborting and doing the yield right at the end. 3070 3071 From the above it follows that the do_marking_step() method should 3072 be called in a loop (or, otherwise, regularly) until it completes. 3073 3074 If a marking step completes without its has_aborted() flag being 3075 true, it means it has completed the current marking phase (and 3076 also all other marking tasks have done so and have all synced up). 3077 3078 A method called regular_clock_call() is invoked "regularly" (in 3079 sub ms intervals) throughout marking. It is this clock method that 3080 checks all the abort conditions which were mentioned above and 3081 decides when the task should abort. A work-based scheme is used to 3082 trigger this clock method: when the number of object words the 3083 marking phase has scanned or the number of references the marking 3084 phase has visited reach a given limit. Additional invocations to 3085 the method clock have been planted in a few other strategic places 3086 too. The initial reason for the clock method was to avoid calling 3087 vtime too regularly, as it is quite expensive. So, once it was in 3088 place, it was natural to piggy-back all the other conditions on it 3089 too and not constantly check them throughout the code. 3090 3091 If do_termination is true then do_marking_step will enter its 3092 termination protocol. 3093 3094 The value of is_serial must be true when do_marking_step is being 3095 called serially (i.e. by the VMThread) and do_marking_step should 3096 skip any synchronization in the termination and overflow code. 3097 Examples include the serial remark code and the serial reference 3098 processing closures. 3099 3100 The value of is_serial must be false when do_marking_step is 3101 being called by any of the worker threads in a work gang. 3102 Examples include the concurrent marking code (CMMarkingTask), 3103 the MT remark code, and the MT reference processing closures. 3104 3105 *****************************************************************************/ 3106 3107 void G1CMTask::do_marking_step(double time_target_ms, 3108 bool do_termination, 3109 bool is_serial) { 3110 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3111 assert(concurrent() == _cm->concurrent(), "they should be the same"); 3112 3113 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3114 assert(_task_queues != NULL, "invariant"); 3115 assert(_task_queue != NULL, "invariant"); 3116 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 3117 3118 assert(!_claimed, 3119 "only one thread should claim this task at any one time"); 3120 3121 // OK, this doesn't safeguard again all possible scenarios, as it is 3122 // possible for two threads to set the _claimed flag at the same 3123 // time. But it is only for debugging purposes anyway and it will 3124 // catch most problems. 3125 _claimed = true; 3126 3127 _start_time_ms = os::elapsedVTime() * 1000.0; 3128 3129 // If do_stealing is true then do_marking_step will attempt to 3130 // steal work from the other G1CMTasks. It only makes sense to 3131 // enable stealing when the termination protocol is enabled 3132 // and do_marking_step() is not being called serially. 3133 bool do_stealing = do_termination && !is_serial; 3134 3135 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 3136 _time_target_ms = time_target_ms - diff_prediction_ms; 3137 3138 // set up the variables that are used in the work-based scheme to 3139 // call the regular clock method 3140 _words_scanned = 0; 3141 _refs_reached = 0; 3142 recalculate_limits(); 3143 3144 // clear all flags 3145 clear_has_aborted(); 3146 _has_timed_out = false; 3147 _draining_satb_buffers = false; 3148 3149 ++_calls; 3150 3151 // Set up the bitmap and oop closures. Anything that uses them is 3152 // eventually called from this method, so it is OK to allocate these 3153 // statically. 3154 G1CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 3155 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 3156 set_cm_oop_closure(&cm_oop_closure); 3157 3158 if (_cm->has_overflown()) { 3159 // This can happen if the mark stack overflows during a GC pause 3160 // and this task, after a yield point, restarts. We have to abort 3161 // as we need to get into the overflow protocol which happens 3162 // right at the end of this task. 3163 set_has_aborted(); 3164 } 3165 3166 // First drain any available SATB buffers. After this, we will not 3167 // look at SATB buffers before the next invocation of this method. 3168 // If enough completed SATB buffers are queued up, the regular clock 3169 // will abort this task so that it restarts. 3170 drain_satb_buffers(); 3171 // ...then partially drain the local queue and the global stack 3172 drain_local_queue(true); 3173 drain_global_stack(true); 3174 3175 do { 3176 if (!has_aborted() && _curr_region != NULL) { 3177 // This means that we're already holding on to a region. 3178 assert(_finger != NULL, "if region is not NULL, then the finger " 3179 "should not be NULL either"); 3180 3181 // We might have restarted this task after an evacuation pause 3182 // which might have evacuated the region we're holding on to 3183 // underneath our feet. Let's read its limit again to make sure 3184 // that we do not iterate over a region of the heap that 3185 // contains garbage (update_region_limit() will also move 3186 // _finger to the start of the region if it is found empty). 3187 update_region_limit(); 3188 // We will start from _finger not from the start of the region, 3189 // as we might be restarting this task after aborting half-way 3190 // through scanning this region. In this case, _finger points to 3191 // the address where we last found a marked object. If this is a 3192 // fresh region, _finger points to start(). 3193 MemRegion mr = MemRegion(_finger, _region_limit); 3194 3195 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 3196 "humongous regions should go around loop once only"); 3197 3198 // Some special cases: 3199 // If the memory region is empty, we can just give up the region. 3200 // If the current region is humongous then we only need to check 3201 // the bitmap for the bit associated with the start of the object, 3202 // scan the object if it's live, and give up the region. 3203 // Otherwise, let's iterate over the bitmap of the part of the region 3204 // that is left. 3205 // If the iteration is successful, give up the region. 3206 if (mr.is_empty()) { 3207 giveup_current_region(); 3208 regular_clock_call(); 3209 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 3210 if (_nextMarkBitMap->isMarked(mr.start())) { 3211 // The object is marked - apply the closure 3212 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 3213 bitmap_closure.do_bit(offset); 3214 } 3215 // Even if this task aborted while scanning the humongous object 3216 // we can (and should) give up the current region. 3217 giveup_current_region(); 3218 regular_clock_call(); 3219 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 3220 giveup_current_region(); 3221 regular_clock_call(); 3222 } else { 3223 assert(has_aborted(), "currently the only way to do so"); 3224 // The only way to abort the bitmap iteration is to return 3225 // false from the do_bit() method. However, inside the 3226 // do_bit() method we move the _finger to point to the 3227 // object currently being looked at. So, if we bail out, we 3228 // have definitely set _finger to something non-null. 3229 assert(_finger != NULL, "invariant"); 3230 3231 // Region iteration was actually aborted. So now _finger 3232 // points to the address of the object we last scanned. If we 3233 // leave it there, when we restart this task, we will rescan 3234 // the object. It is easy to avoid this. We move the finger by 3235 // enough to point to the next possible object header (the 3236 // bitmap knows by how much we need to move it as it knows its 3237 // granularity). 3238 assert(_finger < _region_limit, "invariant"); 3239 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 3240 // Check if bitmap iteration was aborted while scanning the last object 3241 if (new_finger >= _region_limit) { 3242 giveup_current_region(); 3243 } else { 3244 move_finger_to(new_finger); 3245 } 3246 } 3247 } 3248 // At this point we have either completed iterating over the 3249 // region we were holding on to, or we have aborted. 3250 3251 // We then partially drain the local queue and the global stack. 3252 // (Do we really need this?) 3253 drain_local_queue(true); 3254 drain_global_stack(true); 3255 3256 // Read the note on the claim_region() method on why it might 3257 // return NULL with potentially more regions available for 3258 // claiming and why we have to check out_of_regions() to determine 3259 // whether we're done or not. 3260 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 3261 // We are going to try to claim a new region. We should have 3262 // given up on the previous one. 3263 // Separated the asserts so that we know which one fires. 3264 assert(_curr_region == NULL, "invariant"); 3265 assert(_finger == NULL, "invariant"); 3266 assert(_region_limit == NULL, "invariant"); 3267 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 3268 if (claimed_region != NULL) { 3269 // Yes, we managed to claim one 3270 setup_for_region(claimed_region); 3271 assert(_curr_region == claimed_region, "invariant"); 3272 } 3273 // It is important to call the regular clock here. It might take 3274 // a while to claim a region if, for example, we hit a large 3275 // block of empty regions. So we need to call the regular clock 3276 // method once round the loop to make sure it's called 3277 // frequently enough. 3278 regular_clock_call(); 3279 } 3280 3281 if (!has_aborted() && _curr_region == NULL) { 3282 assert(_cm->out_of_regions(), 3283 "at this point we should be out of regions"); 3284 } 3285 } while ( _curr_region != NULL && !has_aborted()); 3286 3287 if (!has_aborted()) { 3288 // We cannot check whether the global stack is empty, since other 3289 // tasks might be pushing objects to it concurrently. 3290 assert(_cm->out_of_regions(), 3291 "at this point we should be out of regions"); 3292 // Try to reduce the number of available SATB buffers so that 3293 // remark has less work to do. 3294 drain_satb_buffers(); 3295 } 3296 3297 // Since we've done everything else, we can now totally drain the 3298 // local queue and global stack. 3299 drain_local_queue(false); 3300 drain_global_stack(false); 3301 3302 // Attempt at work stealing from other task's queues. 3303 if (do_stealing && !has_aborted()) { 3304 // We have not aborted. This means that we have finished all that 3305 // we could. Let's try to do some stealing... 3306 3307 // We cannot check whether the global stack is empty, since other 3308 // tasks might be pushing objects to it concurrently. 3309 assert(_cm->out_of_regions() && _task_queue->size() == 0, 3310 "only way to reach here"); 3311 while (!has_aborted()) { 3312 oop obj; 3313 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 3314 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 3315 "any stolen object should be marked"); 3316 scan_object(obj); 3317 3318 // And since we're towards the end, let's totally drain the 3319 // local queue and global stack. 3320 drain_local_queue(false); 3321 drain_global_stack(false); 3322 } else { 3323 break; 3324 } 3325 } 3326 } 3327 3328 // We still haven't aborted. Now, let's try to get into the 3329 // termination protocol. 3330 if (do_termination && !has_aborted()) { 3331 // We cannot check whether the global stack is empty, since other 3332 // tasks might be concurrently pushing objects on it. 3333 // Separated the asserts so that we know which one fires. 3334 assert(_cm->out_of_regions(), "only way to reach here"); 3335 assert(_task_queue->size() == 0, "only way to reach here"); 3336 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 3337 3338 // The G1CMTask class also extends the TerminatorTerminator class, 3339 // hence its should_exit_termination() method will also decide 3340 // whether to exit the termination protocol or not. 3341 bool finished = (is_serial || 3342 _cm->terminator()->offer_termination(this)); 3343 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 3344 _termination_time_ms += 3345 termination_end_time_ms - _termination_start_time_ms; 3346 3347 if (finished) { 3348 // We're all done. 3349 3350 if (_worker_id == 0) { 3351 // let's allow task 0 to do this 3352 if (concurrent()) { 3353 assert(_cm->concurrent_marking_in_progress(), "invariant"); 3354 // we need to set this to false before the next 3355 // safepoint. This way we ensure that the marking phase 3356 // doesn't observe any more heap expansions. 3357 _cm->clear_concurrent_marking_in_progress(); 3358 } 3359 } 3360 3361 // We can now guarantee that the global stack is empty, since 3362 // all other tasks have finished. We separated the guarantees so 3363 // that, if a condition is false, we can immediately find out 3364 // which one. 3365 guarantee(_cm->out_of_regions(), "only way to reach here"); 3366 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 3367 guarantee(_task_queue->size() == 0, "only way to reach here"); 3368 guarantee(!_cm->has_overflown(), "only way to reach here"); 3369 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 3370 } else { 3371 // Apparently there's more work to do. Let's abort this task. It 3372 // will restart it and we can hopefully find more things to do. 3373 set_has_aborted(); 3374 } 3375 } 3376 3377 // Mainly for debugging purposes to make sure that a pointer to the 3378 // closure which was statically allocated in this frame doesn't 3379 // escape it by accident. 3380 set_cm_oop_closure(NULL); 3381 double end_time_ms = os::elapsedVTime() * 1000.0; 3382 double elapsed_time_ms = end_time_ms - _start_time_ms; 3383 // Update the step history. 3384 _step_times_ms.add(elapsed_time_ms); 3385 3386 if (has_aborted()) { 3387 // The task was aborted for some reason. 3388 if (_has_timed_out) { 3389 double diff_ms = elapsed_time_ms - _time_target_ms; 3390 // Keep statistics of how well we did with respect to hitting 3391 // our target only if we actually timed out (if we aborted for 3392 // other reasons, then the results might get skewed). 3393 _marking_step_diffs_ms.add(diff_ms); 3394 } 3395 3396 if (_cm->has_overflown()) { 3397 // This is the interesting one. We aborted because a global 3398 // overflow was raised. This means we have to restart the 3399 // marking phase and start iterating over regions. However, in 3400 // order to do this we have to make sure that all tasks stop 3401 // what they are doing and re-initialize in a safe manner. We 3402 // will achieve this with the use of two barrier sync points. 3403 3404 if (!is_serial) { 3405 // We only need to enter the sync barrier if being called 3406 // from a parallel context 3407 _cm->enter_first_sync_barrier(_worker_id); 3408 3409 // When we exit this sync barrier we know that all tasks have 3410 // stopped doing marking work. So, it's now safe to 3411 // re-initialize our data structures. At the end of this method, 3412 // task 0 will clear the global data structures. 3413 } 3414 3415 // We clear the local state of this task... 3416 clear_region_fields(); 3417 3418 if (!is_serial) { 3419 // ...and enter the second barrier. 3420 _cm->enter_second_sync_barrier(_worker_id); 3421 } 3422 // At this point, if we're during the concurrent phase of 3423 // marking, everything has been re-initialized and we're 3424 // ready to restart. 3425 } 3426 } 3427 3428 _claimed = false; 3429 } 3430 3431 G1CMTask::G1CMTask(uint worker_id, 3432 G1ConcurrentMark* cm, 3433 G1CMTaskQueue* task_queue, 3434 G1CMTaskQueueSet* task_queues) 3435 : _g1h(G1CollectedHeap::heap()), 3436 _worker_id(worker_id), _cm(cm), 3437 _claimed(false), 3438 _nextMarkBitMap(NULL), _hash_seed(17), 3439 _task_queue(task_queue), 3440 _task_queues(task_queues), 3441 _cm_oop_closure(NULL) { 3442 guarantee(task_queue != NULL, "invariant"); 3443 guarantee(task_queues != NULL, "invariant"); 3444 3445 _marking_step_diffs_ms.add(0.5); 3446 } 3447 3448 // These are formatting macros that are used below to ensure 3449 // consistent formatting. The *_H_* versions are used to format the 3450 // header for a particular value and they should be kept consistent 3451 // with the corresponding macro. Also note that most of the macros add 3452 // the necessary white space (as a prefix) which makes them a bit 3453 // easier to compose. 3454 3455 // All the output lines are prefixed with this string to be able to 3456 // identify them easily in a large log file. 3457 #define G1PPRL_LINE_PREFIX "###" 3458 3459 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 3460 #ifdef _LP64 3461 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 3462 #else // _LP64 3463 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 3464 #endif // _LP64 3465 3466 // For per-region info 3467 #define G1PPRL_TYPE_FORMAT " %-4s" 3468 #define G1PPRL_TYPE_H_FORMAT " %4s" 3469 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 3470 #define G1PPRL_BYTE_H_FORMAT " %9s" 3471 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 3472 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 3473 3474 // For summary info 3475 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 3476 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 3477 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 3478 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 3479 3480 G1PrintRegionLivenessInfoClosure:: 3481 G1PrintRegionLivenessInfoClosure(const char* phase_name) 3482 : _total_used_bytes(0), _total_capacity_bytes(0), 3483 _total_prev_live_bytes(0), _total_next_live_bytes(0), 3484 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 3485 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 3486 MemRegion g1_reserved = g1h->g1_reserved(); 3487 double now = os::elapsedTime(); 3488 3489 // Print the header of the output. 3490 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 3491 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 3492 G1PPRL_SUM_ADDR_FORMAT("reserved") 3493 G1PPRL_SUM_BYTE_FORMAT("region-size"), 3494 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 3495 HeapRegion::GrainBytes); 3496 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3497 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3498 G1PPRL_TYPE_H_FORMAT 3499 G1PPRL_ADDR_BASE_H_FORMAT 3500 G1PPRL_BYTE_H_FORMAT 3501 G1PPRL_BYTE_H_FORMAT 3502 G1PPRL_BYTE_H_FORMAT 3503 G1PPRL_DOUBLE_H_FORMAT 3504 G1PPRL_BYTE_H_FORMAT 3505 G1PPRL_BYTE_H_FORMAT, 3506 "type", "address-range", 3507 "used", "prev-live", "next-live", "gc-eff", 3508 "remset", "code-roots"); 3509 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3510 G1PPRL_TYPE_H_FORMAT 3511 G1PPRL_ADDR_BASE_H_FORMAT 3512 G1PPRL_BYTE_H_FORMAT 3513 G1PPRL_BYTE_H_FORMAT 3514 G1PPRL_BYTE_H_FORMAT 3515 G1PPRL_DOUBLE_H_FORMAT 3516 G1PPRL_BYTE_H_FORMAT 3517 G1PPRL_BYTE_H_FORMAT, 3518 "", "", 3519 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 3520 "(bytes)", "(bytes)"); 3521 } 3522 3523 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 3524 const char* type = r->get_type_str(); 3525 HeapWord* bottom = r->bottom(); 3526 HeapWord* end = r->end(); 3527 size_t capacity_bytes = r->capacity(); 3528 size_t used_bytes = r->used(); 3529 size_t prev_live_bytes = r->live_bytes(); 3530 size_t next_live_bytes = r->next_live_bytes(); 3531 double gc_eff = r->gc_efficiency(); 3532 size_t remset_bytes = r->rem_set()->mem_size(); 3533 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3534 3535 _total_used_bytes += used_bytes; 3536 _total_capacity_bytes += capacity_bytes; 3537 _total_prev_live_bytes += prev_live_bytes; 3538 _total_next_live_bytes += next_live_bytes; 3539 _total_remset_bytes += remset_bytes; 3540 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3541 3542 // Print a line for this particular region. 3543 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3544 G1PPRL_TYPE_FORMAT 3545 G1PPRL_ADDR_BASE_FORMAT 3546 G1PPRL_BYTE_FORMAT 3547 G1PPRL_BYTE_FORMAT 3548 G1PPRL_BYTE_FORMAT 3549 G1PPRL_DOUBLE_FORMAT 3550 G1PPRL_BYTE_FORMAT 3551 G1PPRL_BYTE_FORMAT, 3552 type, p2i(bottom), p2i(end), 3553 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3554 remset_bytes, strong_code_roots_bytes); 3555 3556 return false; 3557 } 3558 3559 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3560 // add static memory usages to remembered set sizes 3561 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3562 // Print the footer of the output. 3563 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3564 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3565 " SUMMARY" 3566 G1PPRL_SUM_MB_FORMAT("capacity") 3567 G1PPRL_SUM_MB_PERC_FORMAT("used") 3568 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3569 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3570 G1PPRL_SUM_MB_FORMAT("remset") 3571 G1PPRL_SUM_MB_FORMAT("code-roots"), 3572 bytes_to_mb(_total_capacity_bytes), 3573 bytes_to_mb(_total_used_bytes), 3574 perc(_total_used_bytes, _total_capacity_bytes), 3575 bytes_to_mb(_total_prev_live_bytes), 3576 perc(_total_prev_live_bytes, _total_capacity_bytes), 3577 bytes_to_mb(_total_next_live_bytes), 3578 perc(_total_next_live_bytes, _total_capacity_bytes), 3579 bytes_to_mb(_total_remset_bytes), 3580 bytes_to_mb(_total_strong_code_roots_bytes)); 3581 }