1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMarkThread.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorState.hpp" 32 #include "gc/g1/g1ConcurrentMark.inline.hpp" 33 #include "gc/g1/g1HeapVerifier.hpp" 34 #include "gc/g1/g1OopClosures.inline.hpp" 35 #include "gc/g1/g1CardLiveData.inline.hpp" 36 #include "gc/g1/g1Policy.hpp" 37 #include "gc/g1/g1StringDedup.hpp" 38 #include "gc/g1/heapRegion.inline.hpp" 39 #include "gc/g1/heapRegionRemSet.hpp" 40 #include "gc/g1/heapRegionSet.inline.hpp" 41 #include "gc/g1/suspendibleThreadSet.hpp" 42 #include "gc/shared/gcId.hpp" 43 #include "gc/shared/gcTimer.hpp" 44 #include "gc/shared/gcTrace.hpp" 45 #include "gc/shared/gcTraceTime.inline.hpp" 46 #include "gc/shared/genOopClosures.inline.hpp" 47 #include "gc/shared/referencePolicy.hpp" 48 #include "gc/shared/strongRootsScope.hpp" 49 #include "gc/shared/taskqueue.inline.hpp" 50 #include "gc/shared/vmGCOperations.hpp" 51 #include "logging/log.hpp" 52 #include "memory/allocation.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "runtime/atomic.inline.hpp" 56 #include "runtime/handles.inline.hpp" 57 #include "runtime/java.hpp" 58 #include "runtime/prefetch.inline.hpp" 59 #include "services/memTracker.hpp" 60 #include "utilities/growableArray.hpp" 61 62 // Concurrent marking bit map wrapper 63 64 G1CMBitMapRO::G1CMBitMapRO(int shifter) : 65 _bm(), 66 _shifter(shifter) { 67 _bmStartWord = 0; 68 _bmWordSize = 0; 69 } 70 71 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 72 const HeapWord* limit) const { 73 // First we must round addr *up* to a possible object boundary. 74 addr = (HeapWord*)align_size_up((intptr_t)addr, 75 HeapWordSize << _shifter); 76 size_t addrOffset = heapWordToOffset(addr); 77 assert(limit != NULL, "limit must not be NULL"); 78 size_t limitOffset = heapWordToOffset(limit); 79 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 80 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 81 assert(nextAddr >= addr, "get_next_one postcondition"); 82 assert(nextAddr == limit || isMarked(nextAddr), 83 "get_next_one postcondition"); 84 return nextAddr; 85 } 86 87 #ifndef PRODUCT 88 bool G1CMBitMapRO::covers(MemRegion heap_rs) const { 89 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 90 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 91 "size inconsistency"); 92 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 93 _bmWordSize == heap_rs.word_size(); 94 } 95 #endif 96 97 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 98 _bm.print_on_error(st, prefix); 99 } 100 101 size_t G1CMBitMap::compute_size(size_t heap_size) { 102 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 103 } 104 105 size_t G1CMBitMap::mark_distance() { 106 return MinObjAlignmentInBytes * BitsPerByte; 107 } 108 109 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 110 _bmStartWord = heap.start(); 111 _bmWordSize = heap.word_size(); 112 113 _bm = BitMapView((BitMap::bm_word_t*) storage->reserved().start(), _bmWordSize >> _shifter); 114 115 storage->set_mapping_changed_listener(&_listener); 116 } 117 118 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 119 if (zero_filled) { 120 return; 121 } 122 // We need to clear the bitmap on commit, removing any existing information. 123 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 124 _bm->clear_range(mr); 125 } 126 127 void G1CMBitMap::clear_range(MemRegion mr) { 128 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 129 assert(!mr.is_empty(), "unexpected empty region"); 130 // convert address range into offset range 131 _bm.at_put_range(heapWordToOffset(mr.start()), 132 heapWordToOffset(mr.end()), false); 133 } 134 135 G1CMMarkStack::G1CMMarkStack(G1ConcurrentMark* cm) : 136 _base(NULL), _cm(cm) 137 {} 138 139 bool G1CMMarkStack::allocate(size_t capacity) { 140 // allocate a stack of the requisite depth 141 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 142 if (!rs.is_reserved()) { 143 log_warning(gc)("ConcurrentMark MarkStack allocation failure"); 144 return false; 145 } 146 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 147 if (!_virtual_space.initialize(rs, rs.size())) { 148 log_warning(gc)("ConcurrentMark MarkStack backing store failure"); 149 // Release the virtual memory reserved for the marking stack 150 rs.release(); 151 return false; 152 } 153 assert(_virtual_space.committed_size() == rs.size(), 154 "Didn't reserve backing store for all of G1ConcurrentMark stack?"); 155 _base = (oop*) _virtual_space.low(); 156 setEmpty(); 157 _capacity = (jint) capacity; 158 _saved_index = -1; 159 _should_expand = false; 160 return true; 161 } 162 163 void G1CMMarkStack::expand() { 164 // Called, during remark, if we've overflown the marking stack during marking. 165 assert(isEmpty(), "stack should been emptied while handling overflow"); 166 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 167 // Clear expansion flag 168 _should_expand = false; 169 if (_capacity == (jint) MarkStackSizeMax) { 170 log_trace(gc)("(benign) Can't expand marking stack capacity, at max size limit"); 171 return; 172 } 173 // Double capacity if possible 174 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 175 // Do not give up existing stack until we have managed to 176 // get the double capacity that we desired. 177 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 178 sizeof(oop))); 179 if (rs.is_reserved()) { 180 // Release the backing store associated with old stack 181 _virtual_space.release(); 182 // Reinitialize virtual space for new stack 183 if (!_virtual_space.initialize(rs, rs.size())) { 184 fatal("Not enough swap for expanded marking stack capacity"); 185 } 186 _base = (oop*)(_virtual_space.low()); 187 _index = 0; 188 _capacity = new_capacity; 189 } else { 190 // Failed to double capacity, continue; 191 log_trace(gc)("(benign) Failed to expand marking stack capacity from " SIZE_FORMAT "K to " SIZE_FORMAT "K", 192 _capacity / K, new_capacity / K); 193 } 194 } 195 196 void G1CMMarkStack::set_should_expand() { 197 // If we're resetting the marking state because of an 198 // marking stack overflow, record that we should, if 199 // possible, expand the stack. 200 _should_expand = _cm->has_overflown(); 201 } 202 203 G1CMMarkStack::~G1CMMarkStack() { 204 if (_base != NULL) { 205 _base = NULL; 206 _virtual_space.release(); 207 } 208 } 209 210 void G1CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 211 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 212 jint start = _index; 213 jint next_index = start + n; 214 if (next_index > _capacity) { 215 _overflow = true; 216 return; 217 } 218 // Otherwise. 219 _index = next_index; 220 for (int i = 0; i < n; i++) { 221 int ind = start + i; 222 assert(ind < _capacity, "By overflow test above."); 223 _base[ind] = ptr_arr[i]; 224 } 225 } 226 227 bool G1CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 228 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 229 jint index = _index; 230 if (index == 0) { 231 *n = 0; 232 return false; 233 } else { 234 int k = MIN2(max, index); 235 jint new_ind = index - k; 236 for (int j = 0; j < k; j++) { 237 ptr_arr[j] = _base[new_ind + j]; 238 } 239 _index = new_ind; 240 *n = k; 241 return true; 242 } 243 } 244 245 void G1CMMarkStack::note_start_of_gc() { 246 assert(_saved_index == -1, 247 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 248 _saved_index = _index; 249 } 250 251 void G1CMMarkStack::note_end_of_gc() { 252 // This is intentionally a guarantee, instead of an assert. If we 253 // accidentally add something to the mark stack during GC, it 254 // will be a correctness issue so it's better if we crash. we'll 255 // only check this once per GC anyway, so it won't be a performance 256 // issue in any way. 257 guarantee(_saved_index == _index, 258 "saved index: %d index: %d", _saved_index, _index); 259 _saved_index = -1; 260 } 261 262 G1CMRootRegions::G1CMRootRegions() : 263 _cm(NULL), _scan_in_progress(false), 264 _should_abort(false), _claimed_survivor_index(0) { } 265 266 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) { 267 _survivors = survivors; 268 _cm = cm; 269 } 270 271 void G1CMRootRegions::prepare_for_scan() { 272 assert(!scan_in_progress(), "pre-condition"); 273 274 // Currently, only survivors can be root regions. 275 _claimed_survivor_index = 0; 276 _scan_in_progress = true; 277 _should_abort = false; 278 } 279 280 HeapRegion* G1CMRootRegions::claim_next() { 281 if (_should_abort) { 282 // If someone has set the should_abort flag, we return NULL to 283 // force the caller to bail out of their loop. 284 return NULL; 285 } 286 287 // Currently, only survivors can be root regions. 288 const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions(); 289 290 int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1; 291 if (claimed_index < survivor_regions->length()) { 292 return survivor_regions->at(claimed_index); 293 } 294 return NULL; 295 } 296 297 void G1CMRootRegions::notify_scan_done() { 298 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 299 _scan_in_progress = false; 300 RootRegionScan_lock->notify_all(); 301 } 302 303 void G1CMRootRegions::cancel_scan() { 304 notify_scan_done(); 305 } 306 307 void G1CMRootRegions::scan_finished() { 308 assert(scan_in_progress(), "pre-condition"); 309 310 // Currently, only survivors can be root regions. 311 if (!_should_abort) { 312 assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index); 313 assert((uint)_claimed_survivor_index >= _survivors->length(), 314 "we should have claimed all survivors, claimed index = %u, length = %u", 315 (uint)_claimed_survivor_index, _survivors->length()); 316 } 317 318 notify_scan_done(); 319 } 320 321 bool G1CMRootRegions::wait_until_scan_finished() { 322 if (!scan_in_progress()) return false; 323 324 { 325 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 326 while (scan_in_progress()) { 327 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 328 } 329 } 330 return true; 331 } 332 333 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 334 return MAX2((n_par_threads + 2) / 4, 1U); 335 } 336 337 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 338 _g1h(g1h), 339 _markBitMap1(), 340 _markBitMap2(), 341 _parallel_marking_threads(0), 342 _max_parallel_marking_threads(0), 343 _sleep_factor(0.0), 344 _marking_task_overhead(1.0), 345 _cleanup_list("Cleanup List"), 346 347 _prevMarkBitMap(&_markBitMap1), 348 _nextMarkBitMap(&_markBitMap2), 349 350 _markStack(this), 351 // _finger set in set_non_marking_state 352 353 _max_worker_id(ParallelGCThreads), 354 // _active_tasks set in set_non_marking_state 355 // _tasks set inside the constructor 356 _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)), 357 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 358 359 _has_overflown(false), 360 _concurrent(false), 361 _has_aborted(false), 362 _restart_for_overflow(false), 363 _concurrent_marking_in_progress(false), 364 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 365 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 366 367 // _verbose_level set below 368 369 _init_times(), 370 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 371 _cleanup_times(), 372 _total_counting_time(0.0), 373 _total_rs_scrub_time(0.0), 374 375 _parallel_workers(NULL), 376 377 _completed_initialization(false) { 378 379 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 380 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 381 382 // Create & start a ConcurrentMark thread. 383 _cmThread = new ConcurrentMarkThread(this); 384 assert(cmThread() != NULL, "CM Thread should have been created"); 385 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 386 if (_cmThread->osthread() == NULL) { 387 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 388 } 389 390 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 391 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 392 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 393 394 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 395 satb_qs.set_buffer_size(G1SATBBufferSize); 396 397 _root_regions.init(_g1h->survivor(), this); 398 399 if (ConcGCThreads > ParallelGCThreads) { 400 log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).", 401 ConcGCThreads, ParallelGCThreads); 402 return; 403 } 404 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 405 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 406 // if both are set 407 _sleep_factor = 0.0; 408 _marking_task_overhead = 1.0; 409 } else if (G1MarkingOverheadPercent > 0) { 410 // We will calculate the number of parallel marking threads based 411 // on a target overhead with respect to the soft real-time goal 412 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 413 double overall_cm_overhead = 414 (double) MaxGCPauseMillis * marking_overhead / 415 (double) GCPauseIntervalMillis; 416 double cpu_ratio = 1.0 / (double) os::processor_count(); 417 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 418 double marking_task_overhead = 419 overall_cm_overhead / marking_thread_num * 420 (double) os::processor_count(); 421 double sleep_factor = 422 (1.0 - marking_task_overhead) / marking_task_overhead; 423 424 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 425 _sleep_factor = sleep_factor; 426 _marking_task_overhead = marking_task_overhead; 427 } else { 428 // Calculate the number of parallel marking threads by scaling 429 // the number of parallel GC threads. 430 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 431 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 432 _sleep_factor = 0.0; 433 _marking_task_overhead = 1.0; 434 } 435 436 assert(ConcGCThreads > 0, "Should have been set"); 437 _parallel_marking_threads = ConcGCThreads; 438 _max_parallel_marking_threads = _parallel_marking_threads; 439 440 _parallel_workers = new WorkGang("G1 Marker", 441 _max_parallel_marking_threads, false, true); 442 if (_parallel_workers == NULL) { 443 vm_exit_during_initialization("Failed necessary allocation."); 444 } else { 445 _parallel_workers->initialize_workers(); 446 } 447 448 if (FLAG_IS_DEFAULT(MarkStackSize)) { 449 size_t mark_stack_size = 450 MIN2(MarkStackSizeMax, 451 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 452 // Verify that the calculated value for MarkStackSize is in range. 453 // It would be nice to use the private utility routine from Arguments. 454 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 455 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 456 "must be between 1 and " SIZE_FORMAT, 457 mark_stack_size, MarkStackSizeMax); 458 return; 459 } 460 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 461 } else { 462 // Verify MarkStackSize is in range. 463 if (FLAG_IS_CMDLINE(MarkStackSize)) { 464 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 465 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 466 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 467 "must be between 1 and " SIZE_FORMAT, 468 MarkStackSize, MarkStackSizeMax); 469 return; 470 } 471 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 472 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 473 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 474 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 475 MarkStackSize, MarkStackSizeMax); 476 return; 477 } 478 } 479 } 480 } 481 482 if (!_markStack.allocate(MarkStackSize)) { 483 log_warning(gc)("Failed to allocate CM marking stack"); 484 return; 485 } 486 487 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC); 488 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 489 490 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 491 _active_tasks = _max_worker_id; 492 493 for (uint i = 0; i < _max_worker_id; ++i) { 494 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 495 task_queue->initialize(); 496 _task_queues->register_queue(i, task_queue); 497 498 _tasks[i] = new G1CMTask(i, this, task_queue, _task_queues); 499 500 _accum_task_vtime[i] = 0.0; 501 } 502 503 // so that the call below can read a sensible value 504 _heap_start = g1h->reserved_region().start(); 505 set_non_marking_state(); 506 _completed_initialization = true; 507 } 508 509 void G1ConcurrentMark::reset() { 510 // Starting values for these two. This should be called in a STW 511 // phase. 512 MemRegion reserved = _g1h->g1_reserved(); 513 _heap_start = reserved.start(); 514 _heap_end = reserved.end(); 515 516 // Separated the asserts so that we know which one fires. 517 assert(_heap_start != NULL, "heap bounds should look ok"); 518 assert(_heap_end != NULL, "heap bounds should look ok"); 519 assert(_heap_start < _heap_end, "heap bounds should look ok"); 520 521 // Reset all the marking data structures and any necessary flags 522 reset_marking_state(); 523 524 // We do reset all of them, since different phases will use 525 // different number of active threads. So, it's easiest to have all 526 // of them ready. 527 for (uint i = 0; i < _max_worker_id; ++i) { 528 _tasks[i]->reset(_nextMarkBitMap); 529 } 530 531 // we need this to make sure that the flag is on during the evac 532 // pause with initial mark piggy-backed 533 set_concurrent_marking_in_progress(); 534 } 535 536 537 void G1ConcurrentMark::reset_marking_state(bool clear_overflow) { 538 _markStack.set_should_expand(); 539 _markStack.setEmpty(); // Also clears the _markStack overflow flag 540 if (clear_overflow) { 541 clear_has_overflown(); 542 } else { 543 assert(has_overflown(), "pre-condition"); 544 } 545 _finger = _heap_start; 546 547 for (uint i = 0; i < _max_worker_id; ++i) { 548 G1CMTaskQueue* queue = _task_queues->queue(i); 549 queue->set_empty(); 550 } 551 } 552 553 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 554 assert(active_tasks <= _max_worker_id, "we should not have more"); 555 556 _active_tasks = active_tasks; 557 // Need to update the three data structures below according to the 558 // number of active threads for this phase. 559 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 560 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 561 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 562 } 563 564 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 565 set_concurrency(active_tasks); 566 567 _concurrent = concurrent; 568 // We propagate this to all tasks, not just the active ones. 569 for (uint i = 0; i < _max_worker_id; ++i) 570 _tasks[i]->set_concurrent(concurrent); 571 572 if (concurrent) { 573 set_concurrent_marking_in_progress(); 574 } else { 575 // We currently assume that the concurrent flag has been set to 576 // false before we start remark. At this point we should also be 577 // in a STW phase. 578 assert(!concurrent_marking_in_progress(), "invariant"); 579 assert(out_of_regions(), 580 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 581 p2i(_finger), p2i(_heap_end)); 582 } 583 } 584 585 void G1ConcurrentMark::set_non_marking_state() { 586 // We set the global marking state to some default values when we're 587 // not doing marking. 588 reset_marking_state(); 589 _active_tasks = 0; 590 clear_concurrent_marking_in_progress(); 591 } 592 593 G1ConcurrentMark::~G1ConcurrentMark() { 594 // The G1ConcurrentMark instance is never freed. 595 ShouldNotReachHere(); 596 } 597 598 class G1ClearBitMapTask : public AbstractGangTask { 599 public: 600 static size_t chunk_size() { return M; } 601 602 private: 603 // Heap region closure used for clearing the given mark bitmap. 604 class G1ClearBitmapHRClosure : public HeapRegionClosure { 605 private: 606 G1CMBitMap* _bitmap; 607 G1ConcurrentMark* _cm; 608 public: 609 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) { 610 } 611 612 virtual bool doHeapRegion(HeapRegion* r) { 613 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 614 615 HeapWord* cur = r->bottom(); 616 HeapWord* const end = r->end(); 617 618 while (cur < end) { 619 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 620 _bitmap->clear_range(mr); 621 622 cur += chunk_size_in_words; 623 624 // Abort iteration if after yielding the marking has been aborted. 625 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 626 return true; 627 } 628 // Repeat the asserts from before the start of the closure. We will do them 629 // as asserts here to minimize their overhead on the product. However, we 630 // will have them as guarantees at the beginning / end of the bitmap 631 // clearing to get some checking in the product. 632 assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant"); 633 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 634 } 635 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 636 637 return false; 638 } 639 }; 640 641 G1ClearBitmapHRClosure _cl; 642 HeapRegionClaimer _hr_claimer; 643 bool _suspendible; // If the task is suspendible, workers must join the STS. 644 645 public: 646 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 647 AbstractGangTask("G1 Clear Bitmap"), 648 _cl(bitmap, suspendible ? cm : NULL), 649 _hr_claimer(n_workers), 650 _suspendible(suspendible) 651 { } 652 653 void work(uint worker_id) { 654 SuspendibleThreadSetJoiner sts_join(_suspendible); 655 G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true); 656 } 657 658 bool is_complete() { 659 return _cl.complete(); 660 } 661 }; 662 663 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 664 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 665 666 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 667 size_t const num_chunks = align_size_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 668 669 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 670 671 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 672 673 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 674 workers->run_task(&cl, num_workers); 675 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 676 } 677 678 void G1ConcurrentMark::cleanup_for_next_mark() { 679 // Make sure that the concurrent mark thread looks to still be in 680 // the current cycle. 681 guarantee(cmThread()->during_cycle(), "invariant"); 682 683 // We are finishing up the current cycle by clearing the next 684 // marking bitmap and getting it ready for the next cycle. During 685 // this time no other cycle can start. So, let's make sure that this 686 // is the case. 687 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 688 689 clear_bitmap(_nextMarkBitMap, _parallel_workers, true); 690 691 // Clear the live count data. If the marking has been aborted, the abort() 692 // call already did that. 693 if (!has_aborted()) { 694 clear_live_data(_parallel_workers); 695 DEBUG_ONLY(verify_live_data_clear()); 696 } 697 698 // Repeat the asserts from above. 699 guarantee(cmThread()->during_cycle(), "invariant"); 700 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 701 } 702 703 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 704 assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint."); 705 clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false); 706 } 707 708 class CheckBitmapClearHRClosure : public HeapRegionClosure { 709 G1CMBitMap* _bitmap; 710 bool _error; 711 public: 712 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { 713 } 714 715 virtual bool doHeapRegion(HeapRegion* r) { 716 // This closure can be called concurrently to the mutator, so we must make sure 717 // that the result of the getNextMarkedWordAddress() call is compared to the 718 // value passed to it as limit to detect any found bits. 719 // end never changes in G1. 720 HeapWord* end = r->end(); 721 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 722 } 723 }; 724 725 bool G1ConcurrentMark::nextMarkBitmapIsClear() { 726 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 727 _g1h->heap_region_iterate(&cl); 728 return cl.complete(); 729 } 730 731 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 732 public: 733 bool doHeapRegion(HeapRegion* r) { 734 r->note_start_of_marking(); 735 return false; 736 } 737 }; 738 739 void G1ConcurrentMark::checkpointRootsInitialPre() { 740 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 741 G1Policy* g1p = g1h->g1_policy(); 742 743 _has_aborted = false; 744 745 // Initialize marking structures. This has to be done in a STW phase. 746 reset(); 747 748 // For each region note start of marking. 749 NoteStartOfMarkHRClosure startcl; 750 g1h->heap_region_iterate(&startcl); 751 } 752 753 754 void G1ConcurrentMark::checkpointRootsInitialPost() { 755 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 756 757 // Start Concurrent Marking weak-reference discovery. 758 ReferenceProcessor* rp = g1h->ref_processor_cm(); 759 // enable ("weak") refs discovery 760 rp->enable_discovery(); 761 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 762 763 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 764 // This is the start of the marking cycle, we're expected all 765 // threads to have SATB queues with active set to false. 766 satb_mq_set.set_active_all_threads(true, /* new active value */ 767 false /* expected_active */); 768 769 _root_regions.prepare_for_scan(); 770 771 // update_g1_committed() will be called at the end of an evac pause 772 // when marking is on. So, it's also called at the end of the 773 // initial-mark pause to update the heap end, if the heap expands 774 // during it. No need to call it here. 775 } 776 777 /* 778 * Notice that in the next two methods, we actually leave the STS 779 * during the barrier sync and join it immediately afterwards. If we 780 * do not do this, the following deadlock can occur: one thread could 781 * be in the barrier sync code, waiting for the other thread to also 782 * sync up, whereas another one could be trying to yield, while also 783 * waiting for the other threads to sync up too. 784 * 785 * Note, however, that this code is also used during remark and in 786 * this case we should not attempt to leave / enter the STS, otherwise 787 * we'll either hit an assert (debug / fastdebug) or deadlock 788 * (product). So we should only leave / enter the STS if we are 789 * operating concurrently. 790 * 791 * Because the thread that does the sync barrier has left the STS, it 792 * is possible to be suspended for a Full GC or an evacuation pause 793 * could occur. This is actually safe, since the entering the sync 794 * barrier is one of the last things do_marking_step() does, and it 795 * doesn't manipulate any data structures afterwards. 796 */ 797 798 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 799 bool barrier_aborted; 800 { 801 SuspendibleThreadSetLeaver sts_leave(concurrent()); 802 barrier_aborted = !_first_overflow_barrier_sync.enter(); 803 } 804 805 // at this point everyone should have synced up and not be doing any 806 // more work 807 808 if (barrier_aborted) { 809 // If the barrier aborted we ignore the overflow condition and 810 // just abort the whole marking phase as quickly as possible. 811 return; 812 } 813 814 // If we're executing the concurrent phase of marking, reset the marking 815 // state; otherwise the marking state is reset after reference processing, 816 // during the remark pause. 817 // If we reset here as a result of an overflow during the remark we will 818 // see assertion failures from any subsequent set_concurrency_and_phase() 819 // calls. 820 if (concurrent()) { 821 // let the task associated with with worker 0 do this 822 if (worker_id == 0) { 823 // task 0 is responsible for clearing the global data structures 824 // We should be here because of an overflow. During STW we should 825 // not clear the overflow flag since we rely on it being true when 826 // we exit this method to abort the pause and restart concurrent 827 // marking. 828 reset_marking_state(true /* clear_overflow */); 829 830 log_info(gc, marking)("Concurrent Mark reset for overflow"); 831 } 832 } 833 834 // after this, each task should reset its own data structures then 835 // then go into the second barrier 836 } 837 838 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 839 SuspendibleThreadSetLeaver sts_leave(concurrent()); 840 _second_overflow_barrier_sync.enter(); 841 842 // at this point everything should be re-initialized and ready to go 843 } 844 845 class G1CMConcurrentMarkingTask: public AbstractGangTask { 846 private: 847 G1ConcurrentMark* _cm; 848 ConcurrentMarkThread* _cmt; 849 850 public: 851 void work(uint worker_id) { 852 assert(Thread::current()->is_ConcurrentGC_thread(), 853 "this should only be done by a conc GC thread"); 854 ResourceMark rm; 855 856 double start_vtime = os::elapsedVTime(); 857 858 { 859 SuspendibleThreadSetJoiner sts_join; 860 861 assert(worker_id < _cm->active_tasks(), "invariant"); 862 G1CMTask* the_task = _cm->task(worker_id); 863 the_task->record_start_time(); 864 if (!_cm->has_aborted()) { 865 do { 866 double start_vtime_sec = os::elapsedVTime(); 867 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 868 869 the_task->do_marking_step(mark_step_duration_ms, 870 true /* do_termination */, 871 false /* is_serial*/); 872 873 double end_vtime_sec = os::elapsedVTime(); 874 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 875 _cm->clear_has_overflown(); 876 877 _cm->do_yield_check(); 878 879 jlong sleep_time_ms; 880 if (!_cm->has_aborted() && the_task->has_aborted()) { 881 sleep_time_ms = 882 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 883 { 884 SuspendibleThreadSetLeaver sts_leave; 885 os::sleep(Thread::current(), sleep_time_ms, false); 886 } 887 } 888 } while (!_cm->has_aborted() && the_task->has_aborted()); 889 } 890 the_task->record_end_time(); 891 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 892 } 893 894 double end_vtime = os::elapsedVTime(); 895 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 896 } 897 898 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm, 899 ConcurrentMarkThread* cmt) : 900 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 901 902 ~G1CMConcurrentMarkingTask() { } 903 }; 904 905 // Calculates the number of active workers for a concurrent 906 // phase. 907 uint G1ConcurrentMark::calc_parallel_marking_threads() { 908 uint n_conc_workers = 0; 909 if (!UseDynamicNumberOfGCThreads || 910 (!FLAG_IS_DEFAULT(ConcGCThreads) && 911 !ForceDynamicNumberOfGCThreads)) { 912 n_conc_workers = max_parallel_marking_threads(); 913 } else { 914 n_conc_workers = 915 AdaptiveSizePolicy::calc_default_active_workers( 916 max_parallel_marking_threads(), 917 1, /* Minimum workers */ 918 parallel_marking_threads(), 919 Threads::number_of_non_daemon_threads()); 920 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 921 // that scaling has already gone into "_max_parallel_marking_threads". 922 } 923 assert(n_conc_workers > 0, "Always need at least 1"); 924 return n_conc_workers; 925 } 926 927 void G1ConcurrentMark::scanRootRegion(HeapRegion* hr) { 928 // Currently, only survivors can be root regions. 929 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 930 G1RootRegionScanClosure cl(_g1h, this); 931 932 const uintx interval = PrefetchScanIntervalInBytes; 933 HeapWord* curr = hr->bottom(); 934 const HeapWord* end = hr->top(); 935 while (curr < end) { 936 Prefetch::read(curr, interval); 937 oop obj = oop(curr); 938 int size = obj->oop_iterate_size(&cl); 939 assert(size == obj->size(), "sanity"); 940 curr += size; 941 } 942 } 943 944 class G1CMRootRegionScanTask : public AbstractGangTask { 945 private: 946 G1ConcurrentMark* _cm; 947 948 public: 949 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 950 AbstractGangTask("Root Region Scan"), _cm(cm) { } 951 952 void work(uint worker_id) { 953 assert(Thread::current()->is_ConcurrentGC_thread(), 954 "this should only be done by a conc GC thread"); 955 956 G1CMRootRegions* root_regions = _cm->root_regions(); 957 HeapRegion* hr = root_regions->claim_next(); 958 while (hr != NULL) { 959 _cm->scanRootRegion(hr); 960 hr = root_regions->claim_next(); 961 } 962 } 963 }; 964 965 void G1ConcurrentMark::scan_root_regions() { 966 // scan_in_progress() will have been set to true only if there was 967 // at least one root region to scan. So, if it's false, we 968 // should not attempt to do any further work. 969 if (root_regions()->scan_in_progress()) { 970 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 971 972 _parallel_marking_threads = calc_parallel_marking_threads(); 973 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 974 "Maximum number of marking threads exceeded"); 975 uint active_workers = MAX2(1U, parallel_marking_threads()); 976 977 G1CMRootRegionScanTask task(this); 978 _parallel_workers->set_active_workers(active_workers); 979 _parallel_workers->run_task(&task); 980 981 // It's possible that has_aborted() is true here without actually 982 // aborting the survivor scan earlier. This is OK as it's 983 // mainly used for sanity checking. 984 root_regions()->scan_finished(); 985 } 986 } 987 988 void G1ConcurrentMark::concurrent_cycle_start() { 989 _gc_timer_cm->register_gc_start(); 990 991 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 992 993 _g1h->trace_heap_before_gc(_gc_tracer_cm); 994 } 995 996 void G1ConcurrentMark::concurrent_cycle_end() { 997 _g1h->trace_heap_after_gc(_gc_tracer_cm); 998 999 if (has_aborted()) { 1000 _gc_tracer_cm->report_concurrent_mode_failure(); 1001 } 1002 1003 _gc_timer_cm->register_gc_end(); 1004 1005 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 1006 } 1007 1008 void G1ConcurrentMark::mark_from_roots() { 1009 // we might be tempted to assert that: 1010 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1011 // "inconsistent argument?"); 1012 // However that wouldn't be right, because it's possible that 1013 // a safepoint is indeed in progress as a younger generation 1014 // stop-the-world GC happens even as we mark in this generation. 1015 1016 _restart_for_overflow = false; 1017 1018 // _g1h has _n_par_threads 1019 _parallel_marking_threads = calc_parallel_marking_threads(); 1020 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1021 "Maximum number of marking threads exceeded"); 1022 1023 uint active_workers = MAX2(1U, parallel_marking_threads()); 1024 assert(active_workers > 0, "Should have been set"); 1025 1026 // Parallel task terminator is set in "set_concurrency_and_phase()" 1027 set_concurrency_and_phase(active_workers, true /* concurrent */); 1028 1029 G1CMConcurrentMarkingTask markingTask(this, cmThread()); 1030 _parallel_workers->set_active_workers(active_workers); 1031 _parallel_workers->run_task(&markingTask); 1032 print_stats(); 1033 } 1034 1035 void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1036 // world is stopped at this checkpoint 1037 assert(SafepointSynchronize::is_at_safepoint(), 1038 "world should be stopped"); 1039 1040 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1041 1042 // If a full collection has happened, we shouldn't do this. 1043 if (has_aborted()) { 1044 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1045 return; 1046 } 1047 1048 if (VerifyDuringGC) { 1049 HandleMark hm; // handle scope 1050 g1h->prepare_for_verify(); 1051 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1052 } 1053 g1h->verifier()->check_bitmaps("Remark Start"); 1054 1055 G1Policy* g1p = g1h->g1_policy(); 1056 g1p->record_concurrent_mark_remark_start(); 1057 1058 double start = os::elapsedTime(); 1059 1060 checkpointRootsFinalWork(); 1061 1062 double mark_work_end = os::elapsedTime(); 1063 1064 weakRefsWork(clear_all_soft_refs); 1065 1066 if (has_overflown()) { 1067 // Oops. We overflowed. Restart concurrent marking. 1068 _restart_for_overflow = true; 1069 1070 // Verify the heap w.r.t. the previous marking bitmap. 1071 if (VerifyDuringGC) { 1072 HandleMark hm; // handle scope 1073 g1h->prepare_for_verify(); 1074 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)"); 1075 } 1076 1077 // Clear the marking state because we will be restarting 1078 // marking due to overflowing the global mark stack. 1079 reset_marking_state(); 1080 } else { 1081 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1082 // We're done with marking. 1083 // This is the end of the marking cycle, we're expected all 1084 // threads to have SATB queues with active set to true. 1085 satb_mq_set.set_active_all_threads(false, /* new active value */ 1086 true /* expected_active */); 1087 1088 if (VerifyDuringGC) { 1089 HandleMark hm; // handle scope 1090 g1h->prepare_for_verify(); 1091 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)"); 1092 } 1093 g1h->verifier()->check_bitmaps("Remark End"); 1094 assert(!restart_for_overflow(), "sanity"); 1095 // Completely reset the marking state since marking completed 1096 set_non_marking_state(); 1097 } 1098 1099 // Expand the marking stack, if we have to and if we can. 1100 if (_markStack.should_expand()) { 1101 _markStack.expand(); 1102 } 1103 1104 // Statistics 1105 double now = os::elapsedTime(); 1106 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1107 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1108 _remark_times.add((now - start) * 1000.0); 1109 1110 g1p->record_concurrent_mark_remark_end(); 1111 1112 G1CMIsAliveClosure is_alive(g1h); 1113 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1114 } 1115 1116 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1117 G1CollectedHeap* _g1; 1118 size_t _freed_bytes; 1119 FreeRegionList* _local_cleanup_list; 1120 uint _old_regions_removed; 1121 uint _humongous_regions_removed; 1122 HRRSCleanupTask* _hrrs_cleanup_task; 1123 1124 public: 1125 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1126 FreeRegionList* local_cleanup_list, 1127 HRRSCleanupTask* hrrs_cleanup_task) : 1128 _g1(g1), 1129 _freed_bytes(0), 1130 _local_cleanup_list(local_cleanup_list), 1131 _old_regions_removed(0), 1132 _humongous_regions_removed(0), 1133 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1134 1135 size_t freed_bytes() { return _freed_bytes; } 1136 const uint old_regions_removed() { return _old_regions_removed; } 1137 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1138 1139 bool doHeapRegion(HeapRegion *hr) { 1140 if (hr->is_archive()) { 1141 return false; 1142 } 1143 _g1->reset_gc_time_stamps(hr); 1144 hr->note_end_of_marking(); 1145 1146 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1147 _freed_bytes += hr->used(); 1148 hr->set_containing_set(NULL); 1149 if (hr->is_humongous()) { 1150 _humongous_regions_removed++; 1151 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1152 } else { 1153 _old_regions_removed++; 1154 _g1->free_region(hr, _local_cleanup_list, true); 1155 } 1156 } else { 1157 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1158 } 1159 1160 return false; 1161 } 1162 }; 1163 1164 class G1ParNoteEndTask: public AbstractGangTask { 1165 friend class G1NoteEndOfConcMarkClosure; 1166 1167 protected: 1168 G1CollectedHeap* _g1h; 1169 FreeRegionList* _cleanup_list; 1170 HeapRegionClaimer _hrclaimer; 1171 1172 public: 1173 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1174 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1175 } 1176 1177 void work(uint worker_id) { 1178 FreeRegionList local_cleanup_list("Local Cleanup List"); 1179 HRRSCleanupTask hrrs_cleanup_task; 1180 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1181 &hrrs_cleanup_task); 1182 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1183 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1184 1185 // Now update the lists 1186 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1187 { 1188 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1189 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1190 1191 // If we iterate over the global cleanup list at the end of 1192 // cleanup to do this printing we will not guarantee to only 1193 // generate output for the newly-reclaimed regions (the list 1194 // might not be empty at the beginning of cleanup; we might 1195 // still be working on its previous contents). So we do the 1196 // printing here, before we append the new regions to the global 1197 // cleanup list. 1198 1199 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1200 if (hr_printer->is_active()) { 1201 FreeRegionListIterator iter(&local_cleanup_list); 1202 while (iter.more_available()) { 1203 HeapRegion* hr = iter.get_next(); 1204 hr_printer->cleanup(hr); 1205 } 1206 } 1207 1208 _cleanup_list->add_ordered(&local_cleanup_list); 1209 assert(local_cleanup_list.is_empty(), "post-condition"); 1210 1211 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1212 } 1213 } 1214 }; 1215 1216 void G1ConcurrentMark::cleanup() { 1217 // world is stopped at this checkpoint 1218 assert(SafepointSynchronize::is_at_safepoint(), 1219 "world should be stopped"); 1220 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1221 1222 // If a full collection has happened, we shouldn't do this. 1223 if (has_aborted()) { 1224 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1225 return; 1226 } 1227 1228 g1h->verifier()->verify_region_sets_optional(); 1229 1230 if (VerifyDuringGC) { 1231 HandleMark hm; // handle scope 1232 g1h->prepare_for_verify(); 1233 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1234 } 1235 g1h->verifier()->check_bitmaps("Cleanup Start"); 1236 1237 G1Policy* g1p = g1h->g1_policy(); 1238 g1p->record_concurrent_mark_cleanup_start(); 1239 1240 double start = os::elapsedTime(); 1241 1242 HeapRegionRemSet::reset_for_cleanup_tasks(); 1243 1244 { 1245 GCTraceTime(Debug, gc)("Finalize Live Data"); 1246 finalize_live_data(); 1247 } 1248 1249 if (VerifyDuringGC) { 1250 GCTraceTime(Debug, gc)("Verify Live Data"); 1251 verify_live_data(); 1252 } 1253 1254 g1h->collector_state()->set_mark_in_progress(false); 1255 1256 double count_end = os::elapsedTime(); 1257 double this_final_counting_time = (count_end - start); 1258 _total_counting_time += this_final_counting_time; 1259 1260 if (log_is_enabled(Trace, gc, liveness)) { 1261 G1PrintRegionLivenessInfoClosure cl("Post-Marking"); 1262 _g1h->heap_region_iterate(&cl); 1263 } 1264 1265 // Install newly created mark bitMap as "prev". 1266 swapMarkBitMaps(); 1267 1268 g1h->reset_gc_time_stamp(); 1269 1270 uint n_workers = _g1h->workers()->active_workers(); 1271 1272 // Note end of marking in all heap regions. 1273 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1274 g1h->workers()->run_task(&g1_par_note_end_task); 1275 g1h->check_gc_time_stamps(); 1276 1277 if (!cleanup_list_is_empty()) { 1278 // The cleanup list is not empty, so we'll have to process it 1279 // concurrently. Notify anyone else that might be wanting free 1280 // regions that there will be more free regions coming soon. 1281 g1h->set_free_regions_coming(); 1282 } 1283 1284 // call below, since it affects the metric by which we sort the heap 1285 // regions. 1286 if (G1ScrubRemSets) { 1287 double rs_scrub_start = os::elapsedTime(); 1288 g1h->scrub_rem_set(); 1289 _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start); 1290 } 1291 1292 // this will also free any regions totally full of garbage objects, 1293 // and sort the regions. 1294 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1295 1296 // Statistics. 1297 double end = os::elapsedTime(); 1298 _cleanup_times.add((end - start) * 1000.0); 1299 1300 // Clean up will have freed any regions completely full of garbage. 1301 // Update the soft reference policy with the new heap occupancy. 1302 Universe::update_heap_info_at_gc(); 1303 1304 if (VerifyDuringGC) { 1305 HandleMark hm; // handle scope 1306 g1h->prepare_for_verify(); 1307 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)"); 1308 } 1309 1310 g1h->verifier()->check_bitmaps("Cleanup End"); 1311 1312 g1h->verifier()->verify_region_sets_optional(); 1313 1314 // We need to make this be a "collection" so any collection pause that 1315 // races with it goes around and waits for completeCleanup to finish. 1316 g1h->increment_total_collections(); 1317 1318 // Clean out dead classes and update Metaspace sizes. 1319 if (ClassUnloadingWithConcurrentMark) { 1320 ClassLoaderDataGraph::purge(); 1321 } 1322 MetaspaceGC::compute_new_size(); 1323 1324 // We reclaimed old regions so we should calculate the sizes to make 1325 // sure we update the old gen/space data. 1326 g1h->g1mm()->update_sizes(); 1327 g1h->allocation_context_stats().update_after_mark(); 1328 } 1329 1330 void G1ConcurrentMark::complete_cleanup() { 1331 if (has_aborted()) return; 1332 1333 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1334 1335 _cleanup_list.verify_optional(); 1336 FreeRegionList tmp_free_list("Tmp Free List"); 1337 1338 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1339 "cleanup list has %u entries", 1340 _cleanup_list.length()); 1341 1342 // No one else should be accessing the _cleanup_list at this point, 1343 // so it is not necessary to take any locks 1344 while (!_cleanup_list.is_empty()) { 1345 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 1346 assert(hr != NULL, "Got NULL from a non-empty list"); 1347 hr->par_clear(); 1348 tmp_free_list.add_ordered(hr); 1349 1350 // Instead of adding one region at a time to the secondary_free_list, 1351 // we accumulate them in the local list and move them a few at a 1352 // time. This also cuts down on the number of notify_all() calls 1353 // we do during this process. We'll also append the local list when 1354 // _cleanup_list is empty (which means we just removed the last 1355 // region from the _cleanup_list). 1356 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1357 _cleanup_list.is_empty()) { 1358 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1359 "appending %u entries to the secondary_free_list, " 1360 "cleanup list still has %u entries", 1361 tmp_free_list.length(), 1362 _cleanup_list.length()); 1363 1364 { 1365 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1366 g1h->secondary_free_list_add(&tmp_free_list); 1367 SecondaryFreeList_lock->notify_all(); 1368 } 1369 #ifndef PRODUCT 1370 if (G1StressConcRegionFreeing) { 1371 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1372 os::sleep(Thread::current(), (jlong) 1, false); 1373 } 1374 } 1375 #endif 1376 } 1377 } 1378 assert(tmp_free_list.is_empty(), "post-condition"); 1379 } 1380 1381 // Supporting Object and Oop closures for reference discovery 1382 // and processing in during marking 1383 1384 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1385 HeapWord* addr = (HeapWord*)obj; 1386 return addr != NULL && 1387 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1388 } 1389 1390 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1391 // Uses the G1CMTask associated with a worker thread (for serial reference 1392 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1393 // trace referent objects. 1394 // 1395 // Using the G1CMTask and embedded local queues avoids having the worker 1396 // threads operating on the global mark stack. This reduces the risk 1397 // of overflowing the stack - which we would rather avoid at this late 1398 // state. Also using the tasks' local queues removes the potential 1399 // of the workers interfering with each other that could occur if 1400 // operating on the global stack. 1401 1402 class G1CMKeepAliveAndDrainClosure: public OopClosure { 1403 G1ConcurrentMark* _cm; 1404 G1CMTask* _task; 1405 int _ref_counter_limit; 1406 int _ref_counter; 1407 bool _is_serial; 1408 public: 1409 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1410 _cm(cm), _task(task), _is_serial(is_serial), 1411 _ref_counter_limit(G1RefProcDrainInterval) { 1412 assert(_ref_counter_limit > 0, "sanity"); 1413 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1414 _ref_counter = _ref_counter_limit; 1415 } 1416 1417 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1418 virtual void do_oop( oop* p) { do_oop_work(p); } 1419 1420 template <class T> void do_oop_work(T* p) { 1421 if (!_cm->has_overflown()) { 1422 oop obj = oopDesc::load_decode_heap_oop(p); 1423 _task->deal_with_reference(obj); 1424 _ref_counter--; 1425 1426 if (_ref_counter == 0) { 1427 // We have dealt with _ref_counter_limit references, pushing them 1428 // and objects reachable from them on to the local stack (and 1429 // possibly the global stack). Call G1CMTask::do_marking_step() to 1430 // process these entries. 1431 // 1432 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1433 // there's nothing more to do (i.e. we're done with the entries that 1434 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1435 // above) or we overflow. 1436 // 1437 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1438 // flag while there may still be some work to do. (See the comment at 1439 // the beginning of G1CMTask::do_marking_step() for those conditions - 1440 // one of which is reaching the specified time target.) It is only 1441 // when G1CMTask::do_marking_step() returns without setting the 1442 // has_aborted() flag that the marking step has completed. 1443 do { 1444 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1445 _task->do_marking_step(mark_step_duration_ms, 1446 false /* do_termination */, 1447 _is_serial); 1448 } while (_task->has_aborted() && !_cm->has_overflown()); 1449 _ref_counter = _ref_counter_limit; 1450 } 1451 } 1452 } 1453 }; 1454 1455 // 'Drain' oop closure used by both serial and parallel reference processing. 1456 // Uses the G1CMTask associated with a given worker thread (for serial 1457 // reference processing the G1CMtask for worker 0 is used). Calls the 1458 // do_marking_step routine, with an unbelievably large timeout value, 1459 // to drain the marking data structures of the remaining entries 1460 // added by the 'keep alive' oop closure above. 1461 1462 class G1CMDrainMarkingStackClosure: public VoidClosure { 1463 G1ConcurrentMark* _cm; 1464 G1CMTask* _task; 1465 bool _is_serial; 1466 public: 1467 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1468 _cm(cm), _task(task), _is_serial(is_serial) { 1469 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1470 } 1471 1472 void do_void() { 1473 do { 1474 // We call G1CMTask::do_marking_step() to completely drain the local 1475 // and global marking stacks of entries pushed by the 'keep alive' 1476 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1477 // 1478 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1479 // if there's nothing more to do (i.e. we've completely drained the 1480 // entries that were pushed as a a result of applying the 'keep alive' 1481 // closure to the entries on the discovered ref lists) or we overflow 1482 // the global marking stack. 1483 // 1484 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1485 // flag while there may still be some work to do. (See the comment at 1486 // the beginning of G1CMTask::do_marking_step() for those conditions - 1487 // one of which is reaching the specified time target.) It is only 1488 // when G1CMTask::do_marking_step() returns without setting the 1489 // has_aborted() flag that the marking step has completed. 1490 1491 _task->do_marking_step(1000000000.0 /* something very large */, 1492 true /* do_termination */, 1493 _is_serial); 1494 } while (_task->has_aborted() && !_cm->has_overflown()); 1495 } 1496 }; 1497 1498 // Implementation of AbstractRefProcTaskExecutor for parallel 1499 // reference processing at the end of G1 concurrent marking 1500 1501 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 1502 private: 1503 G1CollectedHeap* _g1h; 1504 G1ConcurrentMark* _cm; 1505 WorkGang* _workers; 1506 uint _active_workers; 1507 1508 public: 1509 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1510 G1ConcurrentMark* cm, 1511 WorkGang* workers, 1512 uint n_workers) : 1513 _g1h(g1h), _cm(cm), 1514 _workers(workers), _active_workers(n_workers) { } 1515 1516 // Executes the given task using concurrent marking worker threads. 1517 virtual void execute(ProcessTask& task); 1518 virtual void execute(EnqueueTask& task); 1519 }; 1520 1521 class G1CMRefProcTaskProxy: public AbstractGangTask { 1522 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1523 ProcessTask& _proc_task; 1524 G1CollectedHeap* _g1h; 1525 G1ConcurrentMark* _cm; 1526 1527 public: 1528 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1529 G1CollectedHeap* g1h, 1530 G1ConcurrentMark* cm) : 1531 AbstractGangTask("Process reference objects in parallel"), 1532 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1533 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1534 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1535 } 1536 1537 virtual void work(uint worker_id) { 1538 ResourceMark rm; 1539 HandleMark hm; 1540 G1CMTask* task = _cm->task(worker_id); 1541 G1CMIsAliveClosure g1_is_alive(_g1h); 1542 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1543 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1544 1545 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1546 } 1547 }; 1548 1549 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1550 assert(_workers != NULL, "Need parallel worker threads."); 1551 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1552 1553 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1554 1555 // We need to reset the concurrency level before each 1556 // proxy task execution, so that the termination protocol 1557 // and overflow handling in G1CMTask::do_marking_step() knows 1558 // how many workers to wait for. 1559 _cm->set_concurrency(_active_workers); 1560 _workers->run_task(&proc_task_proxy); 1561 } 1562 1563 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 1564 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1565 EnqueueTask& _enq_task; 1566 1567 public: 1568 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1569 AbstractGangTask("Enqueue reference objects in parallel"), 1570 _enq_task(enq_task) { } 1571 1572 virtual void work(uint worker_id) { 1573 _enq_task.work(worker_id); 1574 } 1575 }; 1576 1577 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 1578 assert(_workers != NULL, "Need parallel worker threads."); 1579 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1580 1581 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 1582 1583 // Not strictly necessary but... 1584 // 1585 // We need to reset the concurrency level before each 1586 // proxy task execution, so that the termination protocol 1587 // and overflow handling in G1CMTask::do_marking_step() knows 1588 // how many workers to wait for. 1589 _cm->set_concurrency(_active_workers); 1590 _workers->run_task(&enq_task_proxy); 1591 } 1592 1593 void G1ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 1594 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 1595 } 1596 1597 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 1598 if (has_overflown()) { 1599 // Skip processing the discovered references if we have 1600 // overflown the global marking stack. Reference objects 1601 // only get discovered once so it is OK to not 1602 // de-populate the discovered reference lists. We could have, 1603 // but the only benefit would be that, when marking restarts, 1604 // less reference objects are discovered. 1605 return; 1606 } 1607 1608 ResourceMark rm; 1609 HandleMark hm; 1610 1611 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1612 1613 // Is alive closure. 1614 G1CMIsAliveClosure g1_is_alive(g1h); 1615 1616 // Inner scope to exclude the cleaning of the string and symbol 1617 // tables from the displayed time. 1618 { 1619 GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm); 1620 1621 ReferenceProcessor* rp = g1h->ref_processor_cm(); 1622 1623 // See the comment in G1CollectedHeap::ref_processing_init() 1624 // about how reference processing currently works in G1. 1625 1626 // Set the soft reference policy 1627 rp->setup_policy(clear_all_soft_refs); 1628 assert(_markStack.isEmpty(), "mark stack should be empty"); 1629 1630 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1631 // in serial reference processing. Note these closures are also 1632 // used for serially processing (by the the current thread) the 1633 // JNI references during parallel reference processing. 1634 // 1635 // These closures do not need to synchronize with the worker 1636 // threads involved in parallel reference processing as these 1637 // instances are executed serially by the current thread (e.g. 1638 // reference processing is not multi-threaded and is thus 1639 // performed by the current thread instead of a gang worker). 1640 // 1641 // The gang tasks involved in parallel reference processing create 1642 // their own instances of these closures, which do their own 1643 // synchronization among themselves. 1644 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1645 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1646 1647 // We need at least one active thread. If reference processing 1648 // is not multi-threaded we use the current (VMThread) thread, 1649 // otherwise we use the work gang from the G1CollectedHeap and 1650 // we utilize all the worker threads we can. 1651 bool processing_is_mt = rp->processing_is_mt(); 1652 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 1653 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 1654 1655 // Parallel processing task executor. 1656 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 1657 g1h->workers(), active_workers); 1658 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1659 1660 // Set the concurrency level. The phase was already set prior to 1661 // executing the remark task. 1662 set_concurrency(active_workers); 1663 1664 // Set the degree of MT processing here. If the discovery was done MT, 1665 // the number of threads involved during discovery could differ from 1666 // the number of active workers. This is OK as long as the discovered 1667 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1668 rp->set_active_mt_degree(active_workers); 1669 1670 // Process the weak references. 1671 const ReferenceProcessorStats& stats = 1672 rp->process_discovered_references(&g1_is_alive, 1673 &g1_keep_alive, 1674 &g1_drain_mark_stack, 1675 executor, 1676 _gc_timer_cm); 1677 _gc_tracer_cm->report_gc_reference_stats(stats); 1678 1679 // The do_oop work routines of the keep_alive and drain_marking_stack 1680 // oop closures will set the has_overflown flag if we overflow the 1681 // global marking stack. 1682 1683 assert(_markStack.overflow() || _markStack.isEmpty(), 1684 "mark stack should be empty (unless it overflowed)"); 1685 1686 if (_markStack.overflow()) { 1687 // This should have been done already when we tried to push an 1688 // entry on to the global mark stack. But let's do it again. 1689 set_has_overflown(); 1690 } 1691 1692 assert(rp->num_q() == active_workers, "why not"); 1693 1694 rp->enqueue_discovered_references(executor); 1695 1696 rp->verify_no_references_recorded(); 1697 assert(!rp->discovery_enabled(), "Post condition"); 1698 } 1699 1700 if (has_overflown()) { 1701 // We can not trust g1_is_alive if the marking stack overflowed 1702 return; 1703 } 1704 1705 assert(_markStack.isEmpty(), "Marking should have completed"); 1706 1707 // Unload Klasses, String, Symbols, Code Cache, etc. 1708 if (ClassUnloadingWithConcurrentMark) { 1709 bool purged_classes; 1710 1711 { 1712 GCTraceTime(Debug, gc, phases) trace("System Dictionary Unloading", _gc_timer_cm); 1713 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 1714 } 1715 1716 { 1717 GCTraceTime(Debug, gc, phases) trace("Parallel Unloading", _gc_timer_cm); 1718 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 1719 } 1720 } 1721 1722 if (G1StringDedup::is_enabled()) { 1723 GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", _gc_timer_cm); 1724 G1StringDedup::unlink(&g1_is_alive); 1725 } 1726 } 1727 1728 void G1ConcurrentMark::swapMarkBitMaps() { 1729 G1CMBitMapRO* temp = _prevMarkBitMap; 1730 _prevMarkBitMap = (G1CMBitMapRO*)_nextMarkBitMap; 1731 _nextMarkBitMap = (G1CMBitMap*) temp; 1732 } 1733 1734 // Closure for marking entries in SATB buffers. 1735 class G1CMSATBBufferClosure : public SATBBufferClosure { 1736 private: 1737 G1CMTask* _task; 1738 G1CollectedHeap* _g1h; 1739 1740 // This is very similar to G1CMTask::deal_with_reference, but with 1741 // more relaxed requirements for the argument, so this must be more 1742 // circumspect about treating the argument as an object. 1743 void do_entry(void* entry) const { 1744 _task->increment_refs_reached(); 1745 HeapRegion* hr = _g1h->heap_region_containing(entry); 1746 if (entry < hr->next_top_at_mark_start()) { 1747 // Until we get here, we don't know whether entry refers to a valid 1748 // object; it could instead have been a stale reference. 1749 oop obj = static_cast<oop>(entry); 1750 assert(obj->is_oop(true /* ignore mark word */), 1751 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)); 1752 _task->make_reference_grey(obj); 1753 } 1754 } 1755 1756 public: 1757 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1758 : _task(task), _g1h(g1h) { } 1759 1760 virtual void do_buffer(void** buffer, size_t size) { 1761 for (size_t i = 0; i < size; ++i) { 1762 do_entry(buffer[i]); 1763 } 1764 } 1765 }; 1766 1767 class G1RemarkThreadsClosure : public ThreadClosure { 1768 G1CMSATBBufferClosure _cm_satb_cl; 1769 G1CMOopClosure _cm_cl; 1770 MarkingCodeBlobClosure _code_cl; 1771 int _thread_parity; 1772 1773 public: 1774 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1775 _cm_satb_cl(task, g1h), 1776 _cm_cl(g1h, g1h->concurrent_mark(), task), 1777 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1778 _thread_parity(Threads::thread_claim_parity()) {} 1779 1780 void do_thread(Thread* thread) { 1781 if (thread->is_Java_thread()) { 1782 if (thread->claim_oops_do(true, _thread_parity)) { 1783 JavaThread* jt = (JavaThread*)thread; 1784 1785 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1786 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1787 // * Alive if on the stack of an executing method 1788 // * Weakly reachable otherwise 1789 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1790 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1791 jt->nmethods_do(&_code_cl); 1792 1793 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 1794 } 1795 } else if (thread->is_VM_thread()) { 1796 if (thread->claim_oops_do(true, _thread_parity)) { 1797 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1798 } 1799 } 1800 } 1801 }; 1802 1803 class G1CMRemarkTask: public AbstractGangTask { 1804 private: 1805 G1ConcurrentMark* _cm; 1806 public: 1807 void work(uint worker_id) { 1808 // Since all available tasks are actually started, we should 1809 // only proceed if we're supposed to be active. 1810 if (worker_id < _cm->active_tasks()) { 1811 G1CMTask* task = _cm->task(worker_id); 1812 task->record_start_time(); 1813 { 1814 ResourceMark rm; 1815 HandleMark hm; 1816 1817 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1818 Threads::threads_do(&threads_f); 1819 } 1820 1821 do { 1822 task->do_marking_step(1000000000.0 /* something very large */, 1823 true /* do_termination */, 1824 false /* is_serial */); 1825 } while (task->has_aborted() && !_cm->has_overflown()); 1826 // If we overflow, then we do not want to restart. We instead 1827 // want to abort remark and do concurrent marking again. 1828 task->record_end_time(); 1829 } 1830 } 1831 1832 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1833 AbstractGangTask("Par Remark"), _cm(cm) { 1834 _cm->terminator()->reset_for_reuse(active_workers); 1835 } 1836 }; 1837 1838 void G1ConcurrentMark::checkpointRootsFinalWork() { 1839 ResourceMark rm; 1840 HandleMark hm; 1841 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1842 1843 GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm); 1844 1845 g1h->ensure_parsability(false); 1846 1847 // this is remark, so we'll use up all active threads 1848 uint active_workers = g1h->workers()->active_workers(); 1849 set_concurrency_and_phase(active_workers, false /* concurrent */); 1850 // Leave _parallel_marking_threads at it's 1851 // value originally calculated in the G1ConcurrentMark 1852 // constructor and pass values of the active workers 1853 // through the gang in the task. 1854 1855 { 1856 StrongRootsScope srs(active_workers); 1857 1858 G1CMRemarkTask remarkTask(this, active_workers); 1859 // We will start all available threads, even if we decide that the 1860 // active_workers will be fewer. The extra ones will just bail out 1861 // immediately. 1862 g1h->workers()->run_task(&remarkTask); 1863 } 1864 1865 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1866 guarantee(has_overflown() || 1867 satb_mq_set.completed_buffers_num() == 0, 1868 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1869 BOOL_TO_STR(has_overflown()), 1870 satb_mq_set.completed_buffers_num()); 1871 1872 print_stats(); 1873 } 1874 1875 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 1876 // Note we are overriding the read-only view of the prev map here, via 1877 // the cast. 1878 ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr); 1879 } 1880 1881 HeapRegion* 1882 G1ConcurrentMark::claim_region(uint worker_id) { 1883 // "checkpoint" the finger 1884 HeapWord* finger = _finger; 1885 1886 // _heap_end will not change underneath our feet; it only changes at 1887 // yield points. 1888 while (finger < _heap_end) { 1889 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1890 1891 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1892 1893 // Above heap_region_containing may return NULL as we always scan claim 1894 // until the end of the heap. In this case, just jump to the next region. 1895 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1896 1897 // Is the gap between reading the finger and doing the CAS too long? 1898 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 1899 if (res == finger && curr_region != NULL) { 1900 // we succeeded 1901 HeapWord* bottom = curr_region->bottom(); 1902 HeapWord* limit = curr_region->next_top_at_mark_start(); 1903 1904 // notice that _finger == end cannot be guaranteed here since, 1905 // someone else might have moved the finger even further 1906 assert(_finger >= end, "the finger should have moved forward"); 1907 1908 if (limit > bottom) { 1909 return curr_region; 1910 } else { 1911 assert(limit == bottom, 1912 "the region limit should be at bottom"); 1913 // we return NULL and the caller should try calling 1914 // claim_region() again. 1915 return NULL; 1916 } 1917 } else { 1918 assert(_finger > finger, "the finger should have moved forward"); 1919 // read it again 1920 finger = _finger; 1921 } 1922 } 1923 1924 return NULL; 1925 } 1926 1927 #ifndef PRODUCT 1928 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 1929 private: 1930 G1CollectedHeap* _g1h; 1931 const char* _phase; 1932 int _info; 1933 1934 public: 1935 VerifyNoCSetOops(const char* phase, int info = -1) : 1936 _g1h(G1CollectedHeap::heap()), 1937 _phase(phase), 1938 _info(info) 1939 { } 1940 1941 void operator()(oop obj) const { 1942 guarantee(obj->is_oop(), 1943 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1944 p2i(obj), _phase, _info); 1945 guarantee(!_g1h->obj_in_cs(obj), 1946 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1947 p2i(obj), _phase, _info); 1948 } 1949 }; 1950 1951 void G1ConcurrentMark::verify_no_cset_oops() { 1952 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1953 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 1954 return; 1955 } 1956 1957 // Verify entries on the global mark stack 1958 _markStack.iterate(VerifyNoCSetOops("Stack")); 1959 1960 // Verify entries on the task queues 1961 for (uint i = 0; i < _max_worker_id; ++i) { 1962 G1CMTaskQueue* queue = _task_queues->queue(i); 1963 queue->iterate(VerifyNoCSetOops("Queue", i)); 1964 } 1965 1966 // Verify the global finger 1967 HeapWord* global_finger = finger(); 1968 if (global_finger != NULL && global_finger < _heap_end) { 1969 // Since we always iterate over all regions, we might get a NULL HeapRegion 1970 // here. 1971 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1972 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1973 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1974 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1975 } 1976 1977 // Verify the task fingers 1978 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 1979 for (uint i = 0; i < parallel_marking_threads(); ++i) { 1980 G1CMTask* task = _tasks[i]; 1981 HeapWord* task_finger = task->finger(); 1982 if (task_finger != NULL && task_finger < _heap_end) { 1983 // See above note on the global finger verification. 1984 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 1985 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 1986 !task_hr->in_collection_set(), 1987 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 1988 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 1989 } 1990 } 1991 } 1992 #endif // PRODUCT 1993 void G1ConcurrentMark::create_live_data() { 1994 _g1h->g1_rem_set()->create_card_live_data(_parallel_workers, _nextMarkBitMap); 1995 } 1996 1997 void G1ConcurrentMark::finalize_live_data() { 1998 _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _nextMarkBitMap); 1999 } 2000 2001 void G1ConcurrentMark::verify_live_data() { 2002 _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _nextMarkBitMap); 2003 } 2004 2005 void G1ConcurrentMark::clear_live_data(WorkGang* workers) { 2006 _g1h->g1_rem_set()->clear_card_live_data(workers); 2007 } 2008 2009 #ifdef ASSERT 2010 void G1ConcurrentMark::verify_live_data_clear() { 2011 _g1h->g1_rem_set()->verify_card_live_data_is_clear(); 2012 } 2013 #endif 2014 2015 void G1ConcurrentMark::print_stats() { 2016 if (!log_is_enabled(Debug, gc, stats)) { 2017 return; 2018 } 2019 log_debug(gc, stats)("---------------------------------------------------------------------"); 2020 for (size_t i = 0; i < _active_tasks; ++i) { 2021 _tasks[i]->print_stats(); 2022 log_debug(gc, stats)("---------------------------------------------------------------------"); 2023 } 2024 } 2025 2026 void G1ConcurrentMark::abort() { 2027 if (!cmThread()->during_cycle() || _has_aborted) { 2028 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2029 return; 2030 } 2031 2032 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2033 // concurrent bitmap clearing. 2034 { 2035 GCTraceTime(Debug, gc)("Clear Next Bitmap"); 2036 clear_bitmap(_nextMarkBitMap, _g1h->workers(), false); 2037 } 2038 // Note we cannot clear the previous marking bitmap here 2039 // since VerifyDuringGC verifies the objects marked during 2040 // a full GC against the previous bitmap. 2041 2042 { 2043 GCTraceTime(Debug, gc)("Clear Live Data"); 2044 clear_live_data(_g1h->workers()); 2045 } 2046 DEBUG_ONLY({ 2047 GCTraceTime(Debug, gc)("Verify Live Data Clear"); 2048 verify_live_data_clear(); 2049 }) 2050 // Empty mark stack 2051 reset_marking_state(); 2052 for (uint i = 0; i < _max_worker_id; ++i) { 2053 _tasks[i]->clear_region_fields(); 2054 } 2055 _first_overflow_barrier_sync.abort(); 2056 _second_overflow_barrier_sync.abort(); 2057 _has_aborted = true; 2058 2059 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2060 satb_mq_set.abandon_partial_marking(); 2061 // This can be called either during or outside marking, we'll read 2062 // the expected_active value from the SATB queue set. 2063 satb_mq_set.set_active_all_threads( 2064 false, /* new active value */ 2065 satb_mq_set.is_active() /* expected_active */); 2066 } 2067 2068 static void print_ms_time_info(const char* prefix, const char* name, 2069 NumberSeq& ns) { 2070 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2071 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2072 if (ns.num() > 0) { 2073 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2074 prefix, ns.sd(), ns.maximum()); 2075 } 2076 } 2077 2078 void G1ConcurrentMark::print_summary_info() { 2079 Log(gc, marking) log; 2080 if (!log.is_trace()) { 2081 return; 2082 } 2083 2084 log.trace(" Concurrent marking:"); 2085 print_ms_time_info(" ", "init marks", _init_times); 2086 print_ms_time_info(" ", "remarks", _remark_times); 2087 { 2088 print_ms_time_info(" ", "final marks", _remark_mark_times); 2089 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2090 2091 } 2092 print_ms_time_info(" ", "cleanups", _cleanup_times); 2093 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2094 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2095 if (G1ScrubRemSets) { 2096 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 2097 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2098 } 2099 log.trace(" Total stop_world time = %8.2f s.", 2100 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2101 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2102 cmThread()->vtime_accum(), cmThread()->vtime_mark_accum()); 2103 } 2104 2105 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2106 _parallel_workers->print_worker_threads_on(st); 2107 } 2108 2109 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2110 _parallel_workers->threads_do(tc); 2111 } 2112 2113 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2114 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2115 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 2116 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 2117 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 2118 } 2119 2120 // Closure for iteration over bitmaps 2121 class G1CMBitMapClosure : public BitMapClosure { 2122 private: 2123 // the bitmap that is being iterated over 2124 G1CMBitMap* _nextMarkBitMap; 2125 G1ConcurrentMark* _cm; 2126 G1CMTask* _task; 2127 2128 public: 2129 G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm, G1CMBitMap* nextMarkBitMap) : 2130 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 2131 2132 bool do_bit(size_t offset) { 2133 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 2134 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 2135 assert( addr < _cm->finger(), "invariant"); 2136 assert(addr >= _task->finger(), "invariant"); 2137 2138 // We move that task's local finger along. 2139 _task->move_finger_to(addr); 2140 2141 _task->scan_object(oop(addr)); 2142 // we only partially drain the local queue and global stack 2143 _task->drain_local_queue(true); 2144 _task->drain_global_stack(true); 2145 2146 // if the has_aborted flag has been raised, we need to bail out of 2147 // the iteration 2148 return !_task->has_aborted(); 2149 } 2150 }; 2151 2152 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2153 ReferenceProcessor* result = g1h->ref_processor_cm(); 2154 assert(result != NULL, "CM reference processor should not be NULL"); 2155 return result; 2156 } 2157 2158 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2159 G1ConcurrentMark* cm, 2160 G1CMTask* task) 2161 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2162 _g1h(g1h), _cm(cm), _task(task) 2163 { } 2164 2165 void G1CMTask::setup_for_region(HeapRegion* hr) { 2166 assert(hr != NULL, 2167 "claim_region() should have filtered out NULL regions"); 2168 _curr_region = hr; 2169 _finger = hr->bottom(); 2170 update_region_limit(); 2171 } 2172 2173 void G1CMTask::update_region_limit() { 2174 HeapRegion* hr = _curr_region; 2175 HeapWord* bottom = hr->bottom(); 2176 HeapWord* limit = hr->next_top_at_mark_start(); 2177 2178 if (limit == bottom) { 2179 // The region was collected underneath our feet. 2180 // We set the finger to bottom to ensure that the bitmap 2181 // iteration that will follow this will not do anything. 2182 // (this is not a condition that holds when we set the region up, 2183 // as the region is not supposed to be empty in the first place) 2184 _finger = bottom; 2185 } else if (limit >= _region_limit) { 2186 assert(limit >= _finger, "peace of mind"); 2187 } else { 2188 assert(limit < _region_limit, "only way to get here"); 2189 // This can happen under some pretty unusual circumstances. An 2190 // evacuation pause empties the region underneath our feet (NTAMS 2191 // at bottom). We then do some allocation in the region (NTAMS 2192 // stays at bottom), followed by the region being used as a GC 2193 // alloc region (NTAMS will move to top() and the objects 2194 // originally below it will be grayed). All objects now marked in 2195 // the region are explicitly grayed, if below the global finger, 2196 // and we do not need in fact to scan anything else. So, we simply 2197 // set _finger to be limit to ensure that the bitmap iteration 2198 // doesn't do anything. 2199 _finger = limit; 2200 } 2201 2202 _region_limit = limit; 2203 } 2204 2205 void G1CMTask::giveup_current_region() { 2206 assert(_curr_region != NULL, "invariant"); 2207 clear_region_fields(); 2208 } 2209 2210 void G1CMTask::clear_region_fields() { 2211 // Values for these three fields that indicate that we're not 2212 // holding on to a region. 2213 _curr_region = NULL; 2214 _finger = NULL; 2215 _region_limit = NULL; 2216 } 2217 2218 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2219 if (cm_oop_closure == NULL) { 2220 assert(_cm_oop_closure != NULL, "invariant"); 2221 } else { 2222 assert(_cm_oop_closure == NULL, "invariant"); 2223 } 2224 _cm_oop_closure = cm_oop_closure; 2225 } 2226 2227 void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) { 2228 guarantee(nextMarkBitMap != NULL, "invariant"); 2229 _nextMarkBitMap = nextMarkBitMap; 2230 clear_region_fields(); 2231 2232 _calls = 0; 2233 _elapsed_time_ms = 0.0; 2234 _termination_time_ms = 0.0; 2235 _termination_start_time_ms = 0.0; 2236 } 2237 2238 bool G1CMTask::should_exit_termination() { 2239 regular_clock_call(); 2240 // This is called when we are in the termination protocol. We should 2241 // quit if, for some reason, this task wants to abort or the global 2242 // stack is not empty (this means that we can get work from it). 2243 return !_cm->mark_stack_empty() || has_aborted(); 2244 } 2245 2246 void G1CMTask::reached_limit() { 2247 assert(_words_scanned >= _words_scanned_limit || 2248 _refs_reached >= _refs_reached_limit , 2249 "shouldn't have been called otherwise"); 2250 regular_clock_call(); 2251 } 2252 2253 void G1CMTask::regular_clock_call() { 2254 if (has_aborted()) return; 2255 2256 // First, we need to recalculate the words scanned and refs reached 2257 // limits for the next clock call. 2258 recalculate_limits(); 2259 2260 // During the regular clock call we do the following 2261 2262 // (1) If an overflow has been flagged, then we abort. 2263 if (_cm->has_overflown()) { 2264 set_has_aborted(); 2265 return; 2266 } 2267 2268 // If we are not concurrent (i.e. we're doing remark) we don't need 2269 // to check anything else. The other steps are only needed during 2270 // the concurrent marking phase. 2271 if (!concurrent()) return; 2272 2273 // (2) If marking has been aborted for Full GC, then we also abort. 2274 if (_cm->has_aborted()) { 2275 set_has_aborted(); 2276 return; 2277 } 2278 2279 double curr_time_ms = os::elapsedVTime() * 1000.0; 2280 2281 // (4) We check whether we should yield. If we have to, then we abort. 2282 if (SuspendibleThreadSet::should_yield()) { 2283 // We should yield. To do this we abort the task. The caller is 2284 // responsible for yielding. 2285 set_has_aborted(); 2286 return; 2287 } 2288 2289 // (5) We check whether we've reached our time quota. If we have, 2290 // then we abort. 2291 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2292 if (elapsed_time_ms > _time_target_ms) { 2293 set_has_aborted(); 2294 _has_timed_out = true; 2295 return; 2296 } 2297 2298 // (6) Finally, we check whether there are enough completed STAB 2299 // buffers available for processing. If there are, we abort. 2300 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2301 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2302 // we do need to process SATB buffers, we'll abort and restart 2303 // the marking task to do so 2304 set_has_aborted(); 2305 return; 2306 } 2307 } 2308 2309 void G1CMTask::recalculate_limits() { 2310 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2311 _words_scanned_limit = _real_words_scanned_limit; 2312 2313 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2314 _refs_reached_limit = _real_refs_reached_limit; 2315 } 2316 2317 void G1CMTask::decrease_limits() { 2318 // This is called when we believe that we're going to do an infrequent 2319 // operation which will increase the per byte scanned cost (i.e. move 2320 // entries to/from the global stack). It basically tries to decrease the 2321 // scanning limit so that the clock is called earlier. 2322 2323 _words_scanned_limit = _real_words_scanned_limit - 2324 3 * words_scanned_period / 4; 2325 _refs_reached_limit = _real_refs_reached_limit - 2326 3 * refs_reached_period / 4; 2327 } 2328 2329 void G1CMTask::move_entries_to_global_stack() { 2330 // local array where we'll store the entries that will be popped 2331 // from the local queue 2332 oop buffer[global_stack_transfer_size]; 2333 2334 int n = 0; 2335 oop obj; 2336 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 2337 buffer[n] = obj; 2338 ++n; 2339 } 2340 2341 if (n > 0) { 2342 // we popped at least one entry from the local queue 2343 2344 if (!_cm->mark_stack_push(buffer, n)) { 2345 set_has_aborted(); 2346 } 2347 } 2348 2349 // this operation was quite expensive, so decrease the limits 2350 decrease_limits(); 2351 } 2352 2353 void G1CMTask::get_entries_from_global_stack() { 2354 // local array where we'll store the entries that will be popped 2355 // from the global stack. 2356 oop buffer[global_stack_transfer_size]; 2357 int n; 2358 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 2359 assert(n <= global_stack_transfer_size, 2360 "we should not pop more than the given limit"); 2361 if (n > 0) { 2362 // yes, we did actually pop at least one entry 2363 for (int i = 0; i < n; ++i) { 2364 bool success = _task_queue->push(buffer[i]); 2365 // We only call this when the local queue is empty or under a 2366 // given target limit. So, we do not expect this push to fail. 2367 assert(success, "invariant"); 2368 } 2369 } 2370 2371 // this operation was quite expensive, so decrease the limits 2372 decrease_limits(); 2373 } 2374 2375 void G1CMTask::drain_local_queue(bool partially) { 2376 if (has_aborted()) return; 2377 2378 // Decide what the target size is, depending whether we're going to 2379 // drain it partially (so that other tasks can steal if they run out 2380 // of things to do) or totally (at the very end). 2381 size_t target_size; 2382 if (partially) { 2383 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2384 } else { 2385 target_size = 0; 2386 } 2387 2388 if (_task_queue->size() > target_size) { 2389 oop obj; 2390 bool ret = _task_queue->pop_local(obj); 2391 while (ret) { 2392 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 2393 assert(!_g1h->is_on_master_free_list( 2394 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 2395 2396 scan_object(obj); 2397 2398 if (_task_queue->size() <= target_size || has_aborted()) { 2399 ret = false; 2400 } else { 2401 ret = _task_queue->pop_local(obj); 2402 } 2403 } 2404 } 2405 } 2406 2407 void G1CMTask::drain_global_stack(bool partially) { 2408 if (has_aborted()) return; 2409 2410 // We have a policy to drain the local queue before we attempt to 2411 // drain the global stack. 2412 assert(partially || _task_queue->size() == 0, "invariant"); 2413 2414 // Decide what the target size is, depending whether we're going to 2415 // drain it partially (so that other tasks can steal if they run out 2416 // of things to do) or totally (at the very end). Notice that, 2417 // because we move entries from the global stack in chunks or 2418 // because another task might be doing the same, we might in fact 2419 // drop below the target. But, this is not a problem. 2420 size_t target_size; 2421 if (partially) { 2422 target_size = _cm->partial_mark_stack_size_target(); 2423 } else { 2424 target_size = 0; 2425 } 2426 2427 if (_cm->mark_stack_size() > target_size) { 2428 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2429 get_entries_from_global_stack(); 2430 drain_local_queue(partially); 2431 } 2432 } 2433 } 2434 2435 // SATB Queue has several assumptions on whether to call the par or 2436 // non-par versions of the methods. this is why some of the code is 2437 // replicated. We should really get rid of the single-threaded version 2438 // of the code to simplify things. 2439 void G1CMTask::drain_satb_buffers() { 2440 if (has_aborted()) return; 2441 2442 // We set this so that the regular clock knows that we're in the 2443 // middle of draining buffers and doesn't set the abort flag when it 2444 // notices that SATB buffers are available for draining. It'd be 2445 // very counter productive if it did that. :-) 2446 _draining_satb_buffers = true; 2447 2448 G1CMSATBBufferClosure satb_cl(this, _g1h); 2449 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2450 2451 // This keeps claiming and applying the closure to completed buffers 2452 // until we run out of buffers or we need to abort. 2453 while (!has_aborted() && 2454 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2455 regular_clock_call(); 2456 } 2457 2458 _draining_satb_buffers = false; 2459 2460 assert(has_aborted() || 2461 concurrent() || 2462 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2463 2464 // again, this was a potentially expensive operation, decrease the 2465 // limits to get the regular clock call early 2466 decrease_limits(); 2467 } 2468 2469 void G1CMTask::print_stats() { 2470 log_debug(gc, stats)("Marking Stats, task = %u, calls = %d", 2471 _worker_id, _calls); 2472 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2473 _elapsed_time_ms, _termination_time_ms); 2474 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 2475 _step_times_ms.num(), _step_times_ms.avg(), 2476 _step_times_ms.sd()); 2477 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", 2478 _step_times_ms.maximum(), _step_times_ms.sum()); 2479 } 2480 2481 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 2482 return _task_queues->steal(worker_id, hash_seed, obj); 2483 } 2484 2485 /***************************************************************************** 2486 2487 The do_marking_step(time_target_ms, ...) method is the building 2488 block of the parallel marking framework. It can be called in parallel 2489 with other invocations of do_marking_step() on different tasks 2490 (but only one per task, obviously) and concurrently with the 2491 mutator threads, or during remark, hence it eliminates the need 2492 for two versions of the code. When called during remark, it will 2493 pick up from where the task left off during the concurrent marking 2494 phase. Interestingly, tasks are also claimable during evacuation 2495 pauses too, since do_marking_step() ensures that it aborts before 2496 it needs to yield. 2497 2498 The data structures that it uses to do marking work are the 2499 following: 2500 2501 (1) Marking Bitmap. If there are gray objects that appear only 2502 on the bitmap (this happens either when dealing with an overflow 2503 or when the initial marking phase has simply marked the roots 2504 and didn't push them on the stack), then tasks claim heap 2505 regions whose bitmap they then scan to find gray objects. A 2506 global finger indicates where the end of the last claimed region 2507 is. A local finger indicates how far into the region a task has 2508 scanned. The two fingers are used to determine how to gray an 2509 object (i.e. whether simply marking it is OK, as it will be 2510 visited by a task in the future, or whether it needs to be also 2511 pushed on a stack). 2512 2513 (2) Local Queue. The local queue of the task which is accessed 2514 reasonably efficiently by the task. Other tasks can steal from 2515 it when they run out of work. Throughout the marking phase, a 2516 task attempts to keep its local queue short but not totally 2517 empty, so that entries are available for stealing by other 2518 tasks. Only when there is no more work, a task will totally 2519 drain its local queue. 2520 2521 (3) Global Mark Stack. This handles local queue overflow. During 2522 marking only sets of entries are moved between it and the local 2523 queues, as access to it requires a mutex and more fine-grain 2524 interaction with it which might cause contention. If it 2525 overflows, then the marking phase should restart and iterate 2526 over the bitmap to identify gray objects. Throughout the marking 2527 phase, tasks attempt to keep the global mark stack at a small 2528 length but not totally empty, so that entries are available for 2529 popping by other tasks. Only when there is no more work, tasks 2530 will totally drain the global mark stack. 2531 2532 (4) SATB Buffer Queue. This is where completed SATB buffers are 2533 made available. Buffers are regularly removed from this queue 2534 and scanned for roots, so that the queue doesn't get too 2535 long. During remark, all completed buffers are processed, as 2536 well as the filled in parts of any uncompleted buffers. 2537 2538 The do_marking_step() method tries to abort when the time target 2539 has been reached. There are a few other cases when the 2540 do_marking_step() method also aborts: 2541 2542 (1) When the marking phase has been aborted (after a Full GC). 2543 2544 (2) When a global overflow (on the global stack) has been 2545 triggered. Before the task aborts, it will actually sync up with 2546 the other tasks to ensure that all the marking data structures 2547 (local queues, stacks, fingers etc.) are re-initialized so that 2548 when do_marking_step() completes, the marking phase can 2549 immediately restart. 2550 2551 (3) When enough completed SATB buffers are available. The 2552 do_marking_step() method only tries to drain SATB buffers right 2553 at the beginning. So, if enough buffers are available, the 2554 marking step aborts and the SATB buffers are processed at 2555 the beginning of the next invocation. 2556 2557 (4) To yield. when we have to yield then we abort and yield 2558 right at the end of do_marking_step(). This saves us from a lot 2559 of hassle as, by yielding we might allow a Full GC. If this 2560 happens then objects will be compacted underneath our feet, the 2561 heap might shrink, etc. We save checking for this by just 2562 aborting and doing the yield right at the end. 2563 2564 From the above it follows that the do_marking_step() method should 2565 be called in a loop (or, otherwise, regularly) until it completes. 2566 2567 If a marking step completes without its has_aborted() flag being 2568 true, it means it has completed the current marking phase (and 2569 also all other marking tasks have done so and have all synced up). 2570 2571 A method called regular_clock_call() is invoked "regularly" (in 2572 sub ms intervals) throughout marking. It is this clock method that 2573 checks all the abort conditions which were mentioned above and 2574 decides when the task should abort. A work-based scheme is used to 2575 trigger this clock method: when the number of object words the 2576 marking phase has scanned or the number of references the marking 2577 phase has visited reach a given limit. Additional invocations to 2578 the method clock have been planted in a few other strategic places 2579 too. The initial reason for the clock method was to avoid calling 2580 vtime too regularly, as it is quite expensive. So, once it was in 2581 place, it was natural to piggy-back all the other conditions on it 2582 too and not constantly check them throughout the code. 2583 2584 If do_termination is true then do_marking_step will enter its 2585 termination protocol. 2586 2587 The value of is_serial must be true when do_marking_step is being 2588 called serially (i.e. by the VMThread) and do_marking_step should 2589 skip any synchronization in the termination and overflow code. 2590 Examples include the serial remark code and the serial reference 2591 processing closures. 2592 2593 The value of is_serial must be false when do_marking_step is 2594 being called by any of the worker threads in a work gang. 2595 Examples include the concurrent marking code (CMMarkingTask), 2596 the MT remark code, and the MT reference processing closures. 2597 2598 *****************************************************************************/ 2599 2600 void G1CMTask::do_marking_step(double time_target_ms, 2601 bool do_termination, 2602 bool is_serial) { 2603 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2604 assert(concurrent() == _cm->concurrent(), "they should be the same"); 2605 2606 G1Policy* g1_policy = _g1h->g1_policy(); 2607 assert(_task_queues != NULL, "invariant"); 2608 assert(_task_queue != NULL, "invariant"); 2609 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 2610 2611 assert(!_claimed, 2612 "only one thread should claim this task at any one time"); 2613 2614 // OK, this doesn't safeguard again all possible scenarios, as it is 2615 // possible for two threads to set the _claimed flag at the same 2616 // time. But it is only for debugging purposes anyway and it will 2617 // catch most problems. 2618 _claimed = true; 2619 2620 _start_time_ms = os::elapsedVTime() * 1000.0; 2621 2622 // If do_stealing is true then do_marking_step will attempt to 2623 // steal work from the other G1CMTasks. It only makes sense to 2624 // enable stealing when the termination protocol is enabled 2625 // and do_marking_step() is not being called serially. 2626 bool do_stealing = do_termination && !is_serial; 2627 2628 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2629 _time_target_ms = time_target_ms - diff_prediction_ms; 2630 2631 // set up the variables that are used in the work-based scheme to 2632 // call the regular clock method 2633 _words_scanned = 0; 2634 _refs_reached = 0; 2635 recalculate_limits(); 2636 2637 // clear all flags 2638 clear_has_aborted(); 2639 _has_timed_out = false; 2640 _draining_satb_buffers = false; 2641 2642 ++_calls; 2643 2644 // Set up the bitmap and oop closures. Anything that uses them is 2645 // eventually called from this method, so it is OK to allocate these 2646 // statically. 2647 G1CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 2648 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 2649 set_cm_oop_closure(&cm_oop_closure); 2650 2651 if (_cm->has_overflown()) { 2652 // This can happen if the mark stack overflows during a GC pause 2653 // and this task, after a yield point, restarts. We have to abort 2654 // as we need to get into the overflow protocol which happens 2655 // right at the end of this task. 2656 set_has_aborted(); 2657 } 2658 2659 // First drain any available SATB buffers. After this, we will not 2660 // look at SATB buffers before the next invocation of this method. 2661 // If enough completed SATB buffers are queued up, the regular clock 2662 // will abort this task so that it restarts. 2663 drain_satb_buffers(); 2664 // ...then partially drain the local queue and the global stack 2665 drain_local_queue(true); 2666 drain_global_stack(true); 2667 2668 do { 2669 if (!has_aborted() && _curr_region != NULL) { 2670 // This means that we're already holding on to a region. 2671 assert(_finger != NULL, "if region is not NULL, then the finger " 2672 "should not be NULL either"); 2673 2674 // We might have restarted this task after an evacuation pause 2675 // which might have evacuated the region we're holding on to 2676 // underneath our feet. Let's read its limit again to make sure 2677 // that we do not iterate over a region of the heap that 2678 // contains garbage (update_region_limit() will also move 2679 // _finger to the start of the region if it is found empty). 2680 update_region_limit(); 2681 // We will start from _finger not from the start of the region, 2682 // as we might be restarting this task after aborting half-way 2683 // through scanning this region. In this case, _finger points to 2684 // the address where we last found a marked object. If this is a 2685 // fresh region, _finger points to start(). 2686 MemRegion mr = MemRegion(_finger, _region_limit); 2687 2688 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2689 "humongous regions should go around loop once only"); 2690 2691 // Some special cases: 2692 // If the memory region is empty, we can just give up the region. 2693 // If the current region is humongous then we only need to check 2694 // the bitmap for the bit associated with the start of the object, 2695 // scan the object if it's live, and give up the region. 2696 // Otherwise, let's iterate over the bitmap of the part of the region 2697 // that is left. 2698 // If the iteration is successful, give up the region. 2699 if (mr.is_empty()) { 2700 giveup_current_region(); 2701 regular_clock_call(); 2702 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2703 if (_nextMarkBitMap->isMarked(mr.start())) { 2704 // The object is marked - apply the closure 2705 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 2706 bitmap_closure.do_bit(offset); 2707 } 2708 // Even if this task aborted while scanning the humongous object 2709 // we can (and should) give up the current region. 2710 giveup_current_region(); 2711 regular_clock_call(); 2712 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 2713 giveup_current_region(); 2714 regular_clock_call(); 2715 } else { 2716 assert(has_aborted(), "currently the only way to do so"); 2717 // The only way to abort the bitmap iteration is to return 2718 // false from the do_bit() method. However, inside the 2719 // do_bit() method we move the _finger to point to the 2720 // object currently being looked at. So, if we bail out, we 2721 // have definitely set _finger to something non-null. 2722 assert(_finger != NULL, "invariant"); 2723 2724 // Region iteration was actually aborted. So now _finger 2725 // points to the address of the object we last scanned. If we 2726 // leave it there, when we restart this task, we will rescan 2727 // the object. It is easy to avoid this. We move the finger by 2728 // enough to point to the next possible object header (the 2729 // bitmap knows by how much we need to move it as it knows its 2730 // granularity). 2731 assert(_finger < _region_limit, "invariant"); 2732 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 2733 // Check if bitmap iteration was aborted while scanning the last object 2734 if (new_finger >= _region_limit) { 2735 giveup_current_region(); 2736 } else { 2737 move_finger_to(new_finger); 2738 } 2739 } 2740 } 2741 // At this point we have either completed iterating over the 2742 // region we were holding on to, or we have aborted. 2743 2744 // We then partially drain the local queue and the global stack. 2745 // (Do we really need this?) 2746 drain_local_queue(true); 2747 drain_global_stack(true); 2748 2749 // Read the note on the claim_region() method on why it might 2750 // return NULL with potentially more regions available for 2751 // claiming and why we have to check out_of_regions() to determine 2752 // whether we're done or not. 2753 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2754 // We are going to try to claim a new region. We should have 2755 // given up on the previous one. 2756 // Separated the asserts so that we know which one fires. 2757 assert(_curr_region == NULL, "invariant"); 2758 assert(_finger == NULL, "invariant"); 2759 assert(_region_limit == NULL, "invariant"); 2760 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2761 if (claimed_region != NULL) { 2762 // Yes, we managed to claim one 2763 setup_for_region(claimed_region); 2764 assert(_curr_region == claimed_region, "invariant"); 2765 } 2766 // It is important to call the regular clock here. It might take 2767 // a while to claim a region if, for example, we hit a large 2768 // block of empty regions. So we need to call the regular clock 2769 // method once round the loop to make sure it's called 2770 // frequently enough. 2771 regular_clock_call(); 2772 } 2773 2774 if (!has_aborted() && _curr_region == NULL) { 2775 assert(_cm->out_of_regions(), 2776 "at this point we should be out of regions"); 2777 } 2778 } while ( _curr_region != NULL && !has_aborted()); 2779 2780 if (!has_aborted()) { 2781 // We cannot check whether the global stack is empty, since other 2782 // tasks might be pushing objects to it concurrently. 2783 assert(_cm->out_of_regions(), 2784 "at this point we should be out of regions"); 2785 // Try to reduce the number of available SATB buffers so that 2786 // remark has less work to do. 2787 drain_satb_buffers(); 2788 } 2789 2790 // Since we've done everything else, we can now totally drain the 2791 // local queue and global stack. 2792 drain_local_queue(false); 2793 drain_global_stack(false); 2794 2795 // Attempt at work stealing from other task's queues. 2796 if (do_stealing && !has_aborted()) { 2797 // We have not aborted. This means that we have finished all that 2798 // we could. Let's try to do some stealing... 2799 2800 // We cannot check whether the global stack is empty, since other 2801 // tasks might be pushing objects to it concurrently. 2802 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2803 "only way to reach here"); 2804 while (!has_aborted()) { 2805 oop obj; 2806 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 2807 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 2808 "any stolen object should be marked"); 2809 scan_object(obj); 2810 2811 // And since we're towards the end, let's totally drain the 2812 // local queue and global stack. 2813 drain_local_queue(false); 2814 drain_global_stack(false); 2815 } else { 2816 break; 2817 } 2818 } 2819 } 2820 2821 // We still haven't aborted. Now, let's try to get into the 2822 // termination protocol. 2823 if (do_termination && !has_aborted()) { 2824 // We cannot check whether the global stack is empty, since other 2825 // tasks might be concurrently pushing objects on it. 2826 // Separated the asserts so that we know which one fires. 2827 assert(_cm->out_of_regions(), "only way to reach here"); 2828 assert(_task_queue->size() == 0, "only way to reach here"); 2829 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2830 2831 // The G1CMTask class also extends the TerminatorTerminator class, 2832 // hence its should_exit_termination() method will also decide 2833 // whether to exit the termination protocol or not. 2834 bool finished = (is_serial || 2835 _cm->terminator()->offer_termination(this)); 2836 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2837 _termination_time_ms += 2838 termination_end_time_ms - _termination_start_time_ms; 2839 2840 if (finished) { 2841 // We're all done. 2842 2843 if (_worker_id == 0) { 2844 // let's allow task 0 to do this 2845 if (concurrent()) { 2846 assert(_cm->concurrent_marking_in_progress(), "invariant"); 2847 // we need to set this to false before the next 2848 // safepoint. This way we ensure that the marking phase 2849 // doesn't observe any more heap expansions. 2850 _cm->clear_concurrent_marking_in_progress(); 2851 } 2852 } 2853 2854 // We can now guarantee that the global stack is empty, since 2855 // all other tasks have finished. We separated the guarantees so 2856 // that, if a condition is false, we can immediately find out 2857 // which one. 2858 guarantee(_cm->out_of_regions(), "only way to reach here"); 2859 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2860 guarantee(_task_queue->size() == 0, "only way to reach here"); 2861 guarantee(!_cm->has_overflown(), "only way to reach here"); 2862 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 2863 } else { 2864 // Apparently there's more work to do. Let's abort this task. It 2865 // will restart it and we can hopefully find more things to do. 2866 set_has_aborted(); 2867 } 2868 } 2869 2870 // Mainly for debugging purposes to make sure that a pointer to the 2871 // closure which was statically allocated in this frame doesn't 2872 // escape it by accident. 2873 set_cm_oop_closure(NULL); 2874 double end_time_ms = os::elapsedVTime() * 1000.0; 2875 double elapsed_time_ms = end_time_ms - _start_time_ms; 2876 // Update the step history. 2877 _step_times_ms.add(elapsed_time_ms); 2878 2879 if (has_aborted()) { 2880 // The task was aborted for some reason. 2881 if (_has_timed_out) { 2882 double diff_ms = elapsed_time_ms - _time_target_ms; 2883 // Keep statistics of how well we did with respect to hitting 2884 // our target only if we actually timed out (if we aborted for 2885 // other reasons, then the results might get skewed). 2886 _marking_step_diffs_ms.add(diff_ms); 2887 } 2888 2889 if (_cm->has_overflown()) { 2890 // This is the interesting one. We aborted because a global 2891 // overflow was raised. This means we have to restart the 2892 // marking phase and start iterating over regions. However, in 2893 // order to do this we have to make sure that all tasks stop 2894 // what they are doing and re-initialize in a safe manner. We 2895 // will achieve this with the use of two barrier sync points. 2896 2897 if (!is_serial) { 2898 // We only need to enter the sync barrier if being called 2899 // from a parallel context 2900 _cm->enter_first_sync_barrier(_worker_id); 2901 2902 // When we exit this sync barrier we know that all tasks have 2903 // stopped doing marking work. So, it's now safe to 2904 // re-initialize our data structures. At the end of this method, 2905 // task 0 will clear the global data structures. 2906 } 2907 2908 // We clear the local state of this task... 2909 clear_region_fields(); 2910 2911 if (!is_serial) { 2912 // ...and enter the second barrier. 2913 _cm->enter_second_sync_barrier(_worker_id); 2914 } 2915 // At this point, if we're during the concurrent phase of 2916 // marking, everything has been re-initialized and we're 2917 // ready to restart. 2918 } 2919 } 2920 2921 _claimed = false; 2922 } 2923 2924 G1CMTask::G1CMTask(uint worker_id, 2925 G1ConcurrentMark* cm, 2926 G1CMTaskQueue* task_queue, 2927 G1CMTaskQueueSet* task_queues) 2928 : _g1h(G1CollectedHeap::heap()), 2929 _worker_id(worker_id), _cm(cm), 2930 _claimed(false), 2931 _nextMarkBitMap(NULL), _hash_seed(17), 2932 _task_queue(task_queue), 2933 _task_queues(task_queues), 2934 _cm_oop_closure(NULL) { 2935 guarantee(task_queue != NULL, "invariant"); 2936 guarantee(task_queues != NULL, "invariant"); 2937 2938 _marking_step_diffs_ms.add(0.5); 2939 } 2940 2941 // These are formatting macros that are used below to ensure 2942 // consistent formatting. The *_H_* versions are used to format the 2943 // header for a particular value and they should be kept consistent 2944 // with the corresponding macro. Also note that most of the macros add 2945 // the necessary white space (as a prefix) which makes them a bit 2946 // easier to compose. 2947 2948 // All the output lines are prefixed with this string to be able to 2949 // identify them easily in a large log file. 2950 #define G1PPRL_LINE_PREFIX "###" 2951 2952 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2953 #ifdef _LP64 2954 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2955 #else // _LP64 2956 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2957 #endif // _LP64 2958 2959 // For per-region info 2960 #define G1PPRL_TYPE_FORMAT " %-4s" 2961 #define G1PPRL_TYPE_H_FORMAT " %4s" 2962 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2963 #define G1PPRL_BYTE_H_FORMAT " %9s" 2964 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2965 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2966 2967 // For summary info 2968 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2969 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2970 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2971 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2972 2973 G1PrintRegionLivenessInfoClosure:: 2974 G1PrintRegionLivenessInfoClosure(const char* phase_name) 2975 : _total_used_bytes(0), _total_capacity_bytes(0), 2976 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2977 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 2978 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2979 MemRegion g1_reserved = g1h->g1_reserved(); 2980 double now = os::elapsedTime(); 2981 2982 // Print the header of the output. 2983 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2984 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2985 G1PPRL_SUM_ADDR_FORMAT("reserved") 2986 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2987 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 2988 HeapRegion::GrainBytes); 2989 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2990 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2991 G1PPRL_TYPE_H_FORMAT 2992 G1PPRL_ADDR_BASE_H_FORMAT 2993 G1PPRL_BYTE_H_FORMAT 2994 G1PPRL_BYTE_H_FORMAT 2995 G1PPRL_BYTE_H_FORMAT 2996 G1PPRL_DOUBLE_H_FORMAT 2997 G1PPRL_BYTE_H_FORMAT 2998 G1PPRL_BYTE_H_FORMAT, 2999 "type", "address-range", 3000 "used", "prev-live", "next-live", "gc-eff", 3001 "remset", "code-roots"); 3002 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3003 G1PPRL_TYPE_H_FORMAT 3004 G1PPRL_ADDR_BASE_H_FORMAT 3005 G1PPRL_BYTE_H_FORMAT 3006 G1PPRL_BYTE_H_FORMAT 3007 G1PPRL_BYTE_H_FORMAT 3008 G1PPRL_DOUBLE_H_FORMAT 3009 G1PPRL_BYTE_H_FORMAT 3010 G1PPRL_BYTE_H_FORMAT, 3011 "", "", 3012 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 3013 "(bytes)", "(bytes)"); 3014 } 3015 3016 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 3017 const char* type = r->get_type_str(); 3018 HeapWord* bottom = r->bottom(); 3019 HeapWord* end = r->end(); 3020 size_t capacity_bytes = r->capacity(); 3021 size_t used_bytes = r->used(); 3022 size_t prev_live_bytes = r->live_bytes(); 3023 size_t next_live_bytes = r->next_live_bytes(); 3024 double gc_eff = r->gc_efficiency(); 3025 size_t remset_bytes = r->rem_set()->mem_size(); 3026 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3027 3028 _total_used_bytes += used_bytes; 3029 _total_capacity_bytes += capacity_bytes; 3030 _total_prev_live_bytes += prev_live_bytes; 3031 _total_next_live_bytes += next_live_bytes; 3032 _total_remset_bytes += remset_bytes; 3033 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3034 3035 // Print a line for this particular region. 3036 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3037 G1PPRL_TYPE_FORMAT 3038 G1PPRL_ADDR_BASE_FORMAT 3039 G1PPRL_BYTE_FORMAT 3040 G1PPRL_BYTE_FORMAT 3041 G1PPRL_BYTE_FORMAT 3042 G1PPRL_DOUBLE_FORMAT 3043 G1PPRL_BYTE_FORMAT 3044 G1PPRL_BYTE_FORMAT, 3045 type, p2i(bottom), p2i(end), 3046 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3047 remset_bytes, strong_code_roots_bytes); 3048 3049 return false; 3050 } 3051 3052 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3053 // add static memory usages to remembered set sizes 3054 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3055 // Print the footer of the output. 3056 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3057 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3058 " SUMMARY" 3059 G1PPRL_SUM_MB_FORMAT("capacity") 3060 G1PPRL_SUM_MB_PERC_FORMAT("used") 3061 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3062 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3063 G1PPRL_SUM_MB_FORMAT("remset") 3064 G1PPRL_SUM_MB_FORMAT("code-roots"), 3065 bytes_to_mb(_total_capacity_bytes), 3066 bytes_to_mb(_total_used_bytes), 3067 perc(_total_used_bytes, _total_capacity_bytes), 3068 bytes_to_mb(_total_prev_live_bytes), 3069 perc(_total_prev_live_bytes, _total_capacity_bytes), 3070 bytes_to_mb(_total_next_live_bytes), 3071 perc(_total_next_live_bytes, _total_capacity_bytes), 3072 bytes_to_mb(_total_remset_bytes), 3073 bytes_to_mb(_total_strong_code_roots_bytes)); 3074 }