1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMarkThread.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorState.hpp" 32 #include "gc/g1/g1ConcurrentMark.inline.hpp" 33 #include "gc/g1/g1HeapVerifier.hpp" 34 #include "gc/g1/g1OopClosures.inline.hpp" 35 #include "gc/g1/g1CardLiveData.inline.hpp" 36 #include "gc/g1/g1Policy.hpp" 37 #include "gc/g1/g1StringDedup.hpp" 38 #include "gc/g1/heapRegion.inline.hpp" 39 #include "gc/g1/heapRegionRemSet.hpp" 40 #include "gc/g1/heapRegionSet.inline.hpp" 41 #include "gc/g1/suspendibleThreadSet.hpp" 42 #include "gc/shared/gcId.hpp" 43 #include "gc/shared/gcTimer.hpp" 44 #include "gc/shared/gcTrace.hpp" 45 #include "gc/shared/gcTraceTime.inline.hpp" 46 #include "gc/shared/genOopClosures.inline.hpp" 47 #include "gc/shared/referencePolicy.hpp" 48 #include "gc/shared/strongRootsScope.hpp" 49 #include "gc/shared/taskqueue.inline.hpp" 50 #include "gc/shared/vmGCOperations.hpp" 51 #include "logging/log.hpp" 52 #include "memory/allocation.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "runtime/atomic.inline.hpp" 56 #include "runtime/handles.inline.hpp" 57 #include "runtime/java.hpp" 58 #include "runtime/prefetch.inline.hpp" 59 #include "services/memTracker.hpp" 60 61 // Concurrent marking bit map wrapper 62 63 G1CMBitMapRO::G1CMBitMapRO(int shifter) : 64 _bm(), 65 _shifter(shifter) { 66 _bmStartWord = 0; 67 _bmWordSize = 0; 68 } 69 70 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 71 const HeapWord* limit) const { 72 // First we must round addr *up* to a possible object boundary. 73 addr = (HeapWord*)align_size_up((intptr_t)addr, 74 HeapWordSize << _shifter); 75 size_t addrOffset = heapWordToOffset(addr); 76 assert(limit != NULL, "limit must not be NULL"); 77 size_t limitOffset = heapWordToOffset(limit); 78 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 79 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 80 assert(nextAddr >= addr, "get_next_one postcondition"); 81 assert(nextAddr == limit || isMarked(nextAddr), 82 "get_next_one postcondition"); 83 return nextAddr; 84 } 85 86 #ifndef PRODUCT 87 bool G1CMBitMapRO::covers(MemRegion heap_rs) const { 88 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 89 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 90 "size inconsistency"); 91 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 92 _bmWordSize == heap_rs.word_size(); 93 } 94 #endif 95 96 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 97 _bm.print_on_error(st, prefix); 98 } 99 100 size_t G1CMBitMap::compute_size(size_t heap_size) { 101 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 102 } 103 104 size_t G1CMBitMap::mark_distance() { 105 return MinObjAlignmentInBytes * BitsPerByte; 106 } 107 108 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 109 _bmStartWord = heap.start(); 110 _bmWordSize = heap.word_size(); 111 112 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 113 _bm.set_size(_bmWordSize >> _shifter); 114 115 storage->set_mapping_changed_listener(&_listener); 116 } 117 118 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 119 if (zero_filled) { 120 return; 121 } 122 // We need to clear the bitmap on commit, removing any existing information. 123 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 124 _bm->clear_range(mr); 125 } 126 127 void G1CMBitMap::clear_range(MemRegion mr) { 128 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 129 assert(!mr.is_empty(), "unexpected empty region"); 130 // convert address range into offset range 131 _bm.at_put_range(heapWordToOffset(mr.start()), 132 heapWordToOffset(mr.end()), false); 133 } 134 135 G1CMMarkStack::G1CMMarkStack(G1ConcurrentMark* cm) : 136 _base(NULL), _cm(cm) 137 {} 138 139 bool G1CMMarkStack::allocate(size_t capacity) { 140 // allocate a stack of the requisite depth 141 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 142 if (!rs.is_reserved()) { 143 log_warning(gc)("ConcurrentMark MarkStack allocation failure"); 144 return false; 145 } 146 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 147 if (!_virtual_space.initialize(rs, rs.size())) { 148 log_warning(gc)("ConcurrentMark MarkStack backing store failure"); 149 // Release the virtual memory reserved for the marking stack 150 rs.release(); 151 return false; 152 } 153 assert(_virtual_space.committed_size() == rs.size(), 154 "Didn't reserve backing store for all of G1ConcurrentMark stack?"); 155 _base = (oop*) _virtual_space.low(); 156 setEmpty(); 157 _capacity = (jint) capacity; 158 _saved_index = -1; 159 _should_expand = false; 160 return true; 161 } 162 163 void G1CMMarkStack::expand() { 164 // Called, during remark, if we've overflown the marking stack during marking. 165 assert(isEmpty(), "stack should been emptied while handling overflow"); 166 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 167 // Clear expansion flag 168 _should_expand = false; 169 if (_capacity == (jint) MarkStackSizeMax) { 170 log_trace(gc)("(benign) Can't expand marking stack capacity, at max size limit"); 171 return; 172 } 173 // Double capacity if possible 174 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 175 // Do not give up existing stack until we have managed to 176 // get the double capacity that we desired. 177 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 178 sizeof(oop))); 179 if (rs.is_reserved()) { 180 // Release the backing store associated with old stack 181 _virtual_space.release(); 182 // Reinitialize virtual space for new stack 183 if (!_virtual_space.initialize(rs, rs.size())) { 184 fatal("Not enough swap for expanded marking stack capacity"); 185 } 186 _base = (oop*)(_virtual_space.low()); 187 _index = 0; 188 _capacity = new_capacity; 189 } else { 190 // Failed to double capacity, continue; 191 log_trace(gc)("(benign) Failed to expand marking stack capacity from " SIZE_FORMAT "K to " SIZE_FORMAT "K", 192 _capacity / K, new_capacity / K); 193 } 194 } 195 196 void G1CMMarkStack::set_should_expand() { 197 // If we're resetting the marking state because of an 198 // marking stack overflow, record that we should, if 199 // possible, expand the stack. 200 _should_expand = _cm->has_overflown(); 201 } 202 203 G1CMMarkStack::~G1CMMarkStack() { 204 if (_base != NULL) { 205 _base = NULL; 206 _virtual_space.release(); 207 } 208 } 209 210 void G1CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 211 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 212 jint start = _index; 213 jint next_index = start + n; 214 if (next_index > _capacity) { 215 _overflow = true; 216 return; 217 } 218 // Otherwise. 219 _index = next_index; 220 for (int i = 0; i < n; i++) { 221 int ind = start + i; 222 assert(ind < _capacity, "By overflow test above."); 223 _base[ind] = ptr_arr[i]; 224 } 225 } 226 227 bool G1CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 228 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 229 jint index = _index; 230 if (index == 0) { 231 *n = 0; 232 return false; 233 } else { 234 int k = MIN2(max, index); 235 jint new_ind = index - k; 236 for (int j = 0; j < k; j++) { 237 ptr_arr[j] = _base[new_ind + j]; 238 } 239 _index = new_ind; 240 *n = k; 241 return true; 242 } 243 } 244 245 void G1CMMarkStack::note_start_of_gc() { 246 assert(_saved_index == -1, 247 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 248 _saved_index = _index; 249 } 250 251 void G1CMMarkStack::note_end_of_gc() { 252 // This is intentionally a guarantee, instead of an assert. If we 253 // accidentally add something to the mark stack during GC, it 254 // will be a correctness issue so it's better if we crash. we'll 255 // only check this once per GC anyway, so it won't be a performance 256 // issue in any way. 257 guarantee(_saved_index == _index, 258 "saved index: %d index: %d", _saved_index, _index); 259 _saved_index = -1; 260 } 261 262 G1CMRootRegions::G1CMRootRegions() : 263 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 264 _should_abort(false), _next_survivor(NULL) { } 265 266 void G1CMRootRegions::init(G1CollectedHeap* g1h, G1ConcurrentMark* cm) { 267 _young_list = g1h->young_list(); 268 _cm = cm; 269 } 270 271 void G1CMRootRegions::prepare_for_scan() { 272 assert(!scan_in_progress(), "pre-condition"); 273 274 // Currently, only survivors can be root regions. 275 assert(_next_survivor == NULL, "pre-condition"); 276 _next_survivor = _young_list->first_survivor_region(); 277 _scan_in_progress = (_next_survivor != NULL); 278 _should_abort = false; 279 } 280 281 HeapRegion* G1CMRootRegions::claim_next() { 282 if (_should_abort) { 283 // If someone has set the should_abort flag, we return NULL to 284 // force the caller to bail out of their loop. 285 return NULL; 286 } 287 288 // Currently, only survivors can be root regions. 289 HeapRegion* res = _next_survivor; 290 if (res != NULL) { 291 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 292 // Read it again in case it changed while we were waiting for the lock. 293 res = _next_survivor; 294 if (res != NULL) { 295 if (res == _young_list->last_survivor_region()) { 296 // We just claimed the last survivor so store NULL to indicate 297 // that we're done. 298 _next_survivor = NULL; 299 } else { 300 _next_survivor = res->get_next_young_region(); 301 } 302 } else { 303 // Someone else claimed the last survivor while we were trying 304 // to take the lock so nothing else to do. 305 } 306 } 307 assert(res == NULL || res->is_survivor(), "post-condition"); 308 309 return res; 310 } 311 312 void G1CMRootRegions::notify_scan_done() { 313 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 314 _scan_in_progress = false; 315 RootRegionScan_lock->notify_all(); 316 } 317 318 void G1CMRootRegions::cancel_scan() { 319 notify_scan_done(); 320 } 321 322 void G1CMRootRegions::scan_finished() { 323 assert(scan_in_progress(), "pre-condition"); 324 325 // Currently, only survivors can be root regions. 326 if (!_should_abort) { 327 assert(_next_survivor == NULL, "we should have claimed all survivors"); 328 } 329 _next_survivor = NULL; 330 331 notify_scan_done(); 332 } 333 334 bool G1CMRootRegions::wait_until_scan_finished() { 335 if (!scan_in_progress()) return false; 336 337 { 338 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 339 while (scan_in_progress()) { 340 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 341 } 342 } 343 return true; 344 } 345 346 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 347 return MAX2((n_par_threads + 2) / 4, 1U); 348 } 349 350 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 351 _g1h(g1h), 352 _markBitMap1(), 353 _markBitMap2(), 354 _parallel_marking_threads(0), 355 _max_parallel_marking_threads(0), 356 _sleep_factor(0.0), 357 _marking_task_overhead(1.0), 358 _cleanup_list("Cleanup List"), 359 360 _prevMarkBitMap(&_markBitMap1), 361 _nextMarkBitMap(&_markBitMap2), 362 363 _markStack(this), 364 // _finger set in set_non_marking_state 365 366 _max_worker_id(ParallelGCThreads), 367 // _active_tasks set in set_non_marking_state 368 // _tasks set inside the constructor 369 _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)), 370 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 371 372 _has_overflown(false), 373 _concurrent(false), 374 _has_aborted(false), 375 _restart_for_overflow(false), 376 _concurrent_marking_in_progress(false), 377 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 378 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 379 380 // _verbose_level set below 381 382 _init_times(), 383 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 384 _cleanup_times(), 385 _total_counting_time(0.0), 386 _total_rs_scrub_time(0.0), 387 388 _parallel_workers(NULL), 389 390 _completed_initialization(false) { 391 392 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 393 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 394 395 // Create & start a ConcurrentMark thread. 396 _cmThread = new ConcurrentMarkThread(this); 397 assert(cmThread() != NULL, "CM Thread should have been created"); 398 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 399 if (_cmThread->osthread() == NULL) { 400 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 401 } 402 403 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 404 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 405 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 406 407 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 408 satb_qs.set_buffer_size(G1SATBBufferSize); 409 410 _root_regions.init(_g1h, this); 411 412 if (ConcGCThreads > ParallelGCThreads) { 413 log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).", 414 ConcGCThreads, ParallelGCThreads); 415 return; 416 } 417 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 418 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 419 // if both are set 420 _sleep_factor = 0.0; 421 _marking_task_overhead = 1.0; 422 } else if (G1MarkingOverheadPercent > 0) { 423 // We will calculate the number of parallel marking threads based 424 // on a target overhead with respect to the soft real-time goal 425 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 426 double overall_cm_overhead = 427 (double) MaxGCPauseMillis * marking_overhead / 428 (double) GCPauseIntervalMillis; 429 double cpu_ratio = 1.0 / (double) os::processor_count(); 430 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 431 double marking_task_overhead = 432 overall_cm_overhead / marking_thread_num * 433 (double) os::processor_count(); 434 double sleep_factor = 435 (1.0 - marking_task_overhead) / marking_task_overhead; 436 437 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 438 _sleep_factor = sleep_factor; 439 _marking_task_overhead = marking_task_overhead; 440 } else { 441 // Calculate the number of parallel marking threads by scaling 442 // the number of parallel GC threads. 443 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 444 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 445 _sleep_factor = 0.0; 446 _marking_task_overhead = 1.0; 447 } 448 449 assert(ConcGCThreads > 0, "Should have been set"); 450 _parallel_marking_threads = ConcGCThreads; 451 _max_parallel_marking_threads = _parallel_marking_threads; 452 453 _parallel_workers = new WorkGang("G1 Marker", 454 _max_parallel_marking_threads, false, true); 455 if (_parallel_workers == NULL) { 456 vm_exit_during_initialization("Failed necessary allocation."); 457 } else { 458 _parallel_workers->initialize_workers(); 459 } 460 461 if (FLAG_IS_DEFAULT(MarkStackSize)) { 462 size_t mark_stack_size = 463 MIN2(MarkStackSizeMax, 464 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 465 // Verify that the calculated value for MarkStackSize is in range. 466 // It would be nice to use the private utility routine from Arguments. 467 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 468 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 469 "must be between 1 and " SIZE_FORMAT, 470 mark_stack_size, MarkStackSizeMax); 471 return; 472 } 473 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 474 } else { 475 // Verify MarkStackSize is in range. 476 if (FLAG_IS_CMDLINE(MarkStackSize)) { 477 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 478 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 479 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 480 "must be between 1 and " SIZE_FORMAT, 481 MarkStackSize, MarkStackSizeMax); 482 return; 483 } 484 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 485 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 486 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 487 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 488 MarkStackSize, MarkStackSizeMax); 489 return; 490 } 491 } 492 } 493 } 494 495 if (!_markStack.allocate(MarkStackSize)) { 496 log_warning(gc)("Failed to allocate CM marking stack"); 497 return; 498 } 499 500 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC); 501 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 502 503 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 504 _active_tasks = _max_worker_id; 505 506 for (uint i = 0; i < _max_worker_id; ++i) { 507 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 508 task_queue->initialize(); 509 _task_queues->register_queue(i, task_queue); 510 511 _tasks[i] = new G1CMTask(i, this, task_queue, _task_queues); 512 513 _accum_task_vtime[i] = 0.0; 514 } 515 516 // so that the call below can read a sensible value 517 _heap_start = g1h->reserved_region().start(); 518 set_non_marking_state(); 519 _completed_initialization = true; 520 } 521 522 void G1ConcurrentMark::reset() { 523 // Starting values for these two. This should be called in a STW 524 // phase. 525 MemRegion reserved = _g1h->g1_reserved(); 526 _heap_start = reserved.start(); 527 _heap_end = reserved.end(); 528 529 // Separated the asserts so that we know which one fires. 530 assert(_heap_start != NULL, "heap bounds should look ok"); 531 assert(_heap_end != NULL, "heap bounds should look ok"); 532 assert(_heap_start < _heap_end, "heap bounds should look ok"); 533 534 // Reset all the marking data structures and any necessary flags 535 reset_marking_state(); 536 537 // We do reset all of them, since different phases will use 538 // different number of active threads. So, it's easiest to have all 539 // of them ready. 540 for (uint i = 0; i < _max_worker_id; ++i) { 541 _tasks[i]->reset(_nextMarkBitMap); 542 } 543 544 // we need this to make sure that the flag is on during the evac 545 // pause with initial mark piggy-backed 546 set_concurrent_marking_in_progress(); 547 } 548 549 550 void G1ConcurrentMark::reset_marking_state(bool clear_overflow) { 551 _markStack.set_should_expand(); 552 _markStack.setEmpty(); // Also clears the _markStack overflow flag 553 if (clear_overflow) { 554 clear_has_overflown(); 555 } else { 556 assert(has_overflown(), "pre-condition"); 557 } 558 _finger = _heap_start; 559 560 for (uint i = 0; i < _max_worker_id; ++i) { 561 G1CMTaskQueue* queue = _task_queues->queue(i); 562 queue->set_empty(); 563 } 564 } 565 566 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 567 assert(active_tasks <= _max_worker_id, "we should not have more"); 568 569 _active_tasks = active_tasks; 570 // Need to update the three data structures below according to the 571 // number of active threads for this phase. 572 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 573 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 574 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 575 } 576 577 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 578 set_concurrency(active_tasks); 579 580 _concurrent = concurrent; 581 // We propagate this to all tasks, not just the active ones. 582 for (uint i = 0; i < _max_worker_id; ++i) 583 _tasks[i]->set_concurrent(concurrent); 584 585 if (concurrent) { 586 set_concurrent_marking_in_progress(); 587 } else { 588 // We currently assume that the concurrent flag has been set to 589 // false before we start remark. At this point we should also be 590 // in a STW phase. 591 assert(!concurrent_marking_in_progress(), "invariant"); 592 assert(out_of_regions(), 593 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 594 p2i(_finger), p2i(_heap_end)); 595 } 596 } 597 598 void G1ConcurrentMark::set_non_marking_state() { 599 // We set the global marking state to some default values when we're 600 // not doing marking. 601 reset_marking_state(); 602 _active_tasks = 0; 603 clear_concurrent_marking_in_progress(); 604 } 605 606 G1ConcurrentMark::~G1ConcurrentMark() { 607 // The G1ConcurrentMark instance is never freed. 608 ShouldNotReachHere(); 609 } 610 611 class G1ClearBitMapTask : public AbstractGangTask { 612 // Heap region closure used for clearing the given mark bitmap. 613 class G1ClearBitmapHRClosure : public HeapRegionClosure { 614 private: 615 G1CMBitMap* _bitmap; 616 G1ConcurrentMark* _cm; 617 public: 618 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) { 619 } 620 621 virtual bool doHeapRegion(HeapRegion* r) { 622 size_t const chunk_size_in_words = M / HeapWordSize; 623 624 HeapWord* cur = r->bottom(); 625 HeapWord* const end = r->end(); 626 627 while (cur < end) { 628 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 629 _bitmap->clear_range(mr); 630 631 cur += chunk_size_in_words; 632 633 // Abort iteration if after yielding the marking has been aborted. 634 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 635 return true; 636 } 637 // Repeat the asserts from before the start of the closure. We will do them 638 // as asserts here to minimize their overhead on the product. However, we 639 // will have them as guarantees at the beginning / end of the bitmap 640 // clearing to get some checking in the product. 641 assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant"); 642 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 643 } 644 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 645 646 return false; 647 } 648 }; 649 650 G1ClearBitmapHRClosure _cl; 651 HeapRegionClaimer _hr_claimer; 652 bool _suspendible; // If the task is suspendible, workers must join the STS. 653 654 public: 655 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 656 AbstractGangTask("Parallel Clear Bitmap Task"), 657 _cl(bitmap, suspendible ? cm : NULL), 658 _hr_claimer(n_workers), 659 _suspendible(suspendible) 660 { } 661 662 void work(uint worker_id) { 663 SuspendibleThreadSetJoiner sts_join(_suspendible); 664 G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true); 665 } 666 667 bool is_complete() { 668 return _cl.complete(); 669 } 670 }; 671 672 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 673 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 674 675 G1ClearBitMapTask task(bitmap, this, workers->active_workers(), may_yield); 676 workers->run_task(&task); 677 guarantee(!may_yield || task.is_complete(), "Must have completed iteration when not yielding."); 678 } 679 680 void G1ConcurrentMark::cleanup_for_next_mark() { 681 // Make sure that the concurrent mark thread looks to still be in 682 // the current cycle. 683 guarantee(cmThread()->during_cycle(), "invariant"); 684 685 // We are finishing up the current cycle by clearing the next 686 // marking bitmap and getting it ready for the next cycle. During 687 // this time no other cycle can start. So, let's make sure that this 688 // is the case. 689 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 690 691 clear_bitmap(_nextMarkBitMap, _parallel_workers, true); 692 693 // Clear the live count data. If the marking has been aborted, the abort() 694 // call already did that. 695 if (!has_aborted()) { 696 clear_live_data(_parallel_workers); 697 DEBUG_ONLY(verify_live_data_clear()); 698 } 699 700 // Repeat the asserts from above. 701 guarantee(cmThread()->during_cycle(), "invariant"); 702 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 703 } 704 705 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 706 assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint."); 707 clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false); 708 } 709 710 class CheckBitmapClearHRClosure : public HeapRegionClosure { 711 G1CMBitMap* _bitmap; 712 bool _error; 713 public: 714 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { 715 } 716 717 virtual bool doHeapRegion(HeapRegion* r) { 718 // This closure can be called concurrently to the mutator, so we must make sure 719 // that the result of the getNextMarkedWordAddress() call is compared to the 720 // value passed to it as limit to detect any found bits. 721 // end never changes in G1. 722 HeapWord* end = r->end(); 723 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 724 } 725 }; 726 727 bool G1ConcurrentMark::nextMarkBitmapIsClear() { 728 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 729 _g1h->heap_region_iterate(&cl); 730 return cl.complete(); 731 } 732 733 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 734 public: 735 bool doHeapRegion(HeapRegion* r) { 736 r->note_start_of_marking(); 737 return false; 738 } 739 }; 740 741 void G1ConcurrentMark::checkpointRootsInitialPre() { 742 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 743 G1Policy* g1p = g1h->g1_policy(); 744 745 _has_aborted = false; 746 747 // Initialize marking structures. This has to be done in a STW phase. 748 reset(); 749 750 // For each region note start of marking. 751 NoteStartOfMarkHRClosure startcl; 752 g1h->heap_region_iterate(&startcl); 753 } 754 755 756 void G1ConcurrentMark::checkpointRootsInitialPost() { 757 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 758 759 // Start Concurrent Marking weak-reference discovery. 760 ReferenceProcessor* rp = g1h->ref_processor_cm(); 761 // enable ("weak") refs discovery 762 rp->enable_discovery(); 763 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 764 765 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 766 // This is the start of the marking cycle, we're expected all 767 // threads to have SATB queues with active set to false. 768 satb_mq_set.set_active_all_threads(true, /* new active value */ 769 false /* expected_active */); 770 771 _root_regions.prepare_for_scan(); 772 773 // update_g1_committed() will be called at the end of an evac pause 774 // when marking is on. So, it's also called at the end of the 775 // initial-mark pause to update the heap end, if the heap expands 776 // during it. No need to call it here. 777 } 778 779 /* 780 * Notice that in the next two methods, we actually leave the STS 781 * during the barrier sync and join it immediately afterwards. If we 782 * do not do this, the following deadlock can occur: one thread could 783 * be in the barrier sync code, waiting for the other thread to also 784 * sync up, whereas another one could be trying to yield, while also 785 * waiting for the other threads to sync up too. 786 * 787 * Note, however, that this code is also used during remark and in 788 * this case we should not attempt to leave / enter the STS, otherwise 789 * we'll either hit an assert (debug / fastdebug) or deadlock 790 * (product). So we should only leave / enter the STS if we are 791 * operating concurrently. 792 * 793 * Because the thread that does the sync barrier has left the STS, it 794 * is possible to be suspended for a Full GC or an evacuation pause 795 * could occur. This is actually safe, since the entering the sync 796 * barrier is one of the last things do_marking_step() does, and it 797 * doesn't manipulate any data structures afterwards. 798 */ 799 800 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 801 bool barrier_aborted; 802 { 803 SuspendibleThreadSetLeaver sts_leave(concurrent()); 804 barrier_aborted = !_first_overflow_barrier_sync.enter(); 805 } 806 807 // at this point everyone should have synced up and not be doing any 808 // more work 809 810 if (barrier_aborted) { 811 // If the barrier aborted we ignore the overflow condition and 812 // just abort the whole marking phase as quickly as possible. 813 return; 814 } 815 816 // If we're executing the concurrent phase of marking, reset the marking 817 // state; otherwise the marking state is reset after reference processing, 818 // during the remark pause. 819 // If we reset here as a result of an overflow during the remark we will 820 // see assertion failures from any subsequent set_concurrency_and_phase() 821 // calls. 822 if (concurrent()) { 823 // let the task associated with with worker 0 do this 824 if (worker_id == 0) { 825 // task 0 is responsible for clearing the global data structures 826 // We should be here because of an overflow. During STW we should 827 // not clear the overflow flag since we rely on it being true when 828 // we exit this method to abort the pause and restart concurrent 829 // marking. 830 reset_marking_state(true /* clear_overflow */); 831 832 log_info(gc, marking)("Concurrent Mark reset for overflow"); 833 } 834 } 835 836 // after this, each task should reset its own data structures then 837 // then go into the second barrier 838 } 839 840 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 841 SuspendibleThreadSetLeaver sts_leave(concurrent()); 842 _second_overflow_barrier_sync.enter(); 843 844 // at this point everything should be re-initialized and ready to go 845 } 846 847 class G1CMConcurrentMarkingTask: public AbstractGangTask { 848 private: 849 G1ConcurrentMark* _cm; 850 ConcurrentMarkThread* _cmt; 851 852 public: 853 void work(uint worker_id) { 854 assert(Thread::current()->is_ConcurrentGC_thread(), 855 "this should only be done by a conc GC thread"); 856 ResourceMark rm; 857 858 double start_vtime = os::elapsedVTime(); 859 860 { 861 SuspendibleThreadSetJoiner sts_join; 862 863 assert(worker_id < _cm->active_tasks(), "invariant"); 864 G1CMTask* the_task = _cm->task(worker_id); 865 the_task->record_start_time(); 866 if (!_cm->has_aborted()) { 867 do { 868 double start_vtime_sec = os::elapsedVTime(); 869 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 870 871 the_task->do_marking_step(mark_step_duration_ms, 872 true /* do_termination */, 873 false /* is_serial*/); 874 875 double end_vtime_sec = os::elapsedVTime(); 876 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 877 _cm->clear_has_overflown(); 878 879 _cm->do_yield_check(); 880 881 jlong sleep_time_ms; 882 if (!_cm->has_aborted() && the_task->has_aborted()) { 883 sleep_time_ms = 884 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 885 { 886 SuspendibleThreadSetLeaver sts_leave; 887 os::sleep(Thread::current(), sleep_time_ms, false); 888 } 889 } 890 } while (!_cm->has_aborted() && the_task->has_aborted()); 891 } 892 the_task->record_end_time(); 893 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 894 } 895 896 double end_vtime = os::elapsedVTime(); 897 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 898 } 899 900 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm, 901 ConcurrentMarkThread* cmt) : 902 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 903 904 ~G1CMConcurrentMarkingTask() { } 905 }; 906 907 // Calculates the number of active workers for a concurrent 908 // phase. 909 uint G1ConcurrentMark::calc_parallel_marking_threads() { 910 uint n_conc_workers = 0; 911 if (!UseDynamicNumberOfGCThreads || 912 (!FLAG_IS_DEFAULT(ConcGCThreads) && 913 !ForceDynamicNumberOfGCThreads)) { 914 n_conc_workers = max_parallel_marking_threads(); 915 } else { 916 n_conc_workers = 917 AdaptiveSizePolicy::calc_default_active_workers( 918 max_parallel_marking_threads(), 919 1, /* Minimum workers */ 920 parallel_marking_threads(), 921 Threads::number_of_non_daemon_threads()); 922 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 923 // that scaling has already gone into "_max_parallel_marking_threads". 924 } 925 assert(n_conc_workers > 0, "Always need at least 1"); 926 return n_conc_workers; 927 } 928 929 void G1ConcurrentMark::scanRootRegion(HeapRegion* hr) { 930 // Currently, only survivors can be root regions. 931 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 932 G1RootRegionScanClosure cl(_g1h, this); 933 934 const uintx interval = PrefetchScanIntervalInBytes; 935 HeapWord* curr = hr->bottom(); 936 const HeapWord* end = hr->top(); 937 while (curr < end) { 938 Prefetch::read(curr, interval); 939 oop obj = oop(curr); 940 int size = obj->oop_iterate_size(&cl); 941 assert(size == obj->size(), "sanity"); 942 curr += size; 943 } 944 } 945 946 class G1CMRootRegionScanTask : public AbstractGangTask { 947 private: 948 G1ConcurrentMark* _cm; 949 950 public: 951 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 952 AbstractGangTask("Root Region Scan"), _cm(cm) { } 953 954 void work(uint worker_id) { 955 assert(Thread::current()->is_ConcurrentGC_thread(), 956 "this should only be done by a conc GC thread"); 957 958 G1CMRootRegions* root_regions = _cm->root_regions(); 959 HeapRegion* hr = root_regions->claim_next(); 960 while (hr != NULL) { 961 _cm->scanRootRegion(hr); 962 hr = root_regions->claim_next(); 963 } 964 } 965 }; 966 967 void G1ConcurrentMark::scan_root_regions() { 968 // scan_in_progress() will have been set to true only if there was 969 // at least one root region to scan. So, if it's false, we 970 // should not attempt to do any further work. 971 if (root_regions()->scan_in_progress()) { 972 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 973 974 _parallel_marking_threads = calc_parallel_marking_threads(); 975 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 976 "Maximum number of marking threads exceeded"); 977 uint active_workers = MAX2(1U, parallel_marking_threads()); 978 979 G1CMRootRegionScanTask task(this); 980 _parallel_workers->set_active_workers(active_workers); 981 _parallel_workers->run_task(&task); 982 983 // It's possible that has_aborted() is true here without actually 984 // aborting the survivor scan earlier. This is OK as it's 985 // mainly used for sanity checking. 986 root_regions()->scan_finished(); 987 } 988 } 989 990 void G1ConcurrentMark::concurrent_cycle_start() { 991 _gc_timer_cm->register_gc_start(); 992 993 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 994 995 _g1h->trace_heap_before_gc(_gc_tracer_cm); 996 } 997 998 void G1ConcurrentMark::concurrent_cycle_end() { 999 _g1h->trace_heap_after_gc(_gc_tracer_cm); 1000 1001 if (has_aborted()) { 1002 _gc_tracer_cm->report_concurrent_mode_failure(); 1003 } 1004 1005 _gc_timer_cm->register_gc_end(); 1006 1007 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 1008 } 1009 1010 void G1ConcurrentMark::mark_from_roots() { 1011 // we might be tempted to assert that: 1012 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1013 // "inconsistent argument?"); 1014 // However that wouldn't be right, because it's possible that 1015 // a safepoint is indeed in progress as a younger generation 1016 // stop-the-world GC happens even as we mark in this generation. 1017 1018 _restart_for_overflow = false; 1019 1020 // _g1h has _n_par_threads 1021 _parallel_marking_threads = calc_parallel_marking_threads(); 1022 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1023 "Maximum number of marking threads exceeded"); 1024 1025 uint active_workers = MAX2(1U, parallel_marking_threads()); 1026 assert(active_workers > 0, "Should have been set"); 1027 1028 // Parallel task terminator is set in "set_concurrency_and_phase()" 1029 set_concurrency_and_phase(active_workers, true /* concurrent */); 1030 1031 G1CMConcurrentMarkingTask markingTask(this, cmThread()); 1032 _parallel_workers->set_active_workers(active_workers); 1033 _parallel_workers->run_task(&markingTask); 1034 print_stats(); 1035 } 1036 1037 void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1038 // world is stopped at this checkpoint 1039 assert(SafepointSynchronize::is_at_safepoint(), 1040 "world should be stopped"); 1041 1042 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1043 1044 // If a full collection has happened, we shouldn't do this. 1045 if (has_aborted()) { 1046 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1047 return; 1048 } 1049 1050 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1051 1052 if (VerifyDuringGC) { 1053 HandleMark hm; // handle scope 1054 g1h->prepare_for_verify(); 1055 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1056 } 1057 g1h->verifier()->check_bitmaps("Remark Start"); 1058 1059 G1Policy* g1p = g1h->g1_policy(); 1060 g1p->record_concurrent_mark_remark_start(); 1061 1062 double start = os::elapsedTime(); 1063 1064 checkpointRootsFinalWork(); 1065 1066 double mark_work_end = os::elapsedTime(); 1067 1068 weakRefsWork(clear_all_soft_refs); 1069 1070 if (has_overflown()) { 1071 // Oops. We overflowed. Restart concurrent marking. 1072 _restart_for_overflow = true; 1073 1074 // Verify the heap w.r.t. the previous marking bitmap. 1075 if (VerifyDuringGC) { 1076 HandleMark hm; // handle scope 1077 g1h->prepare_for_verify(); 1078 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)"); 1079 } 1080 1081 // Clear the marking state because we will be restarting 1082 // marking due to overflowing the global mark stack. 1083 reset_marking_state(); 1084 } else { 1085 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1086 // We're done with marking. 1087 // This is the end of the marking cycle, we're expected all 1088 // threads to have SATB queues with active set to true. 1089 satb_mq_set.set_active_all_threads(false, /* new active value */ 1090 true /* expected_active */); 1091 1092 if (VerifyDuringGC) { 1093 HandleMark hm; // handle scope 1094 g1h->prepare_for_verify(); 1095 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)"); 1096 } 1097 g1h->verifier()->check_bitmaps("Remark End"); 1098 assert(!restart_for_overflow(), "sanity"); 1099 // Completely reset the marking state since marking completed 1100 set_non_marking_state(); 1101 } 1102 1103 // Expand the marking stack, if we have to and if we can. 1104 if (_markStack.should_expand()) { 1105 _markStack.expand(); 1106 } 1107 1108 // Statistics 1109 double now = os::elapsedTime(); 1110 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1111 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1112 _remark_times.add((now - start) * 1000.0); 1113 1114 g1p->record_concurrent_mark_remark_end(); 1115 1116 G1CMIsAliveClosure is_alive(g1h); 1117 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1118 } 1119 1120 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1121 G1CollectedHeap* _g1; 1122 size_t _freed_bytes; 1123 FreeRegionList* _local_cleanup_list; 1124 uint _old_regions_removed; 1125 uint _humongous_regions_removed; 1126 HRRSCleanupTask* _hrrs_cleanup_task; 1127 1128 public: 1129 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1130 FreeRegionList* local_cleanup_list, 1131 HRRSCleanupTask* hrrs_cleanup_task) : 1132 _g1(g1), 1133 _freed_bytes(0), 1134 _local_cleanup_list(local_cleanup_list), 1135 _old_regions_removed(0), 1136 _humongous_regions_removed(0), 1137 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1138 1139 size_t freed_bytes() { return _freed_bytes; } 1140 const uint old_regions_removed() { return _old_regions_removed; } 1141 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1142 1143 bool doHeapRegion(HeapRegion *hr) { 1144 if (hr->is_archive()) { 1145 return false; 1146 } 1147 _g1->reset_gc_time_stamps(hr); 1148 hr->note_end_of_marking(); 1149 1150 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1151 _freed_bytes += hr->used(); 1152 hr->set_containing_set(NULL); 1153 if (hr->is_humongous()) { 1154 _humongous_regions_removed++; 1155 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1156 } else { 1157 _old_regions_removed++; 1158 _g1->free_region(hr, _local_cleanup_list, true); 1159 } 1160 } else { 1161 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1162 } 1163 1164 return false; 1165 } 1166 }; 1167 1168 class G1ParNoteEndTask: public AbstractGangTask { 1169 friend class G1NoteEndOfConcMarkClosure; 1170 1171 protected: 1172 G1CollectedHeap* _g1h; 1173 FreeRegionList* _cleanup_list; 1174 HeapRegionClaimer _hrclaimer; 1175 1176 public: 1177 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1178 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1179 } 1180 1181 void work(uint worker_id) { 1182 FreeRegionList local_cleanup_list("Local Cleanup List"); 1183 HRRSCleanupTask hrrs_cleanup_task; 1184 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1185 &hrrs_cleanup_task); 1186 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1187 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1188 1189 // Now update the lists 1190 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1191 { 1192 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1193 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1194 1195 // If we iterate over the global cleanup list at the end of 1196 // cleanup to do this printing we will not guarantee to only 1197 // generate output for the newly-reclaimed regions (the list 1198 // might not be empty at the beginning of cleanup; we might 1199 // still be working on its previous contents). So we do the 1200 // printing here, before we append the new regions to the global 1201 // cleanup list. 1202 1203 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1204 if (hr_printer->is_active()) { 1205 FreeRegionListIterator iter(&local_cleanup_list); 1206 while (iter.more_available()) { 1207 HeapRegion* hr = iter.get_next(); 1208 hr_printer->cleanup(hr); 1209 } 1210 } 1211 1212 _cleanup_list->add_ordered(&local_cleanup_list); 1213 assert(local_cleanup_list.is_empty(), "post-condition"); 1214 1215 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1216 } 1217 } 1218 }; 1219 1220 void G1ConcurrentMark::cleanup() { 1221 // world is stopped at this checkpoint 1222 assert(SafepointSynchronize::is_at_safepoint(), 1223 "world should be stopped"); 1224 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1225 1226 // If a full collection has happened, we shouldn't do this. 1227 if (has_aborted()) { 1228 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1229 return; 1230 } 1231 1232 g1h->verifier()->verify_region_sets_optional(); 1233 1234 if (VerifyDuringGC) { 1235 HandleMark hm; // handle scope 1236 g1h->prepare_for_verify(); 1237 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1238 } 1239 g1h->verifier()->check_bitmaps("Cleanup Start"); 1240 1241 G1Policy* g1p = g1h->g1_policy(); 1242 g1p->record_concurrent_mark_cleanup_start(); 1243 1244 double start = os::elapsedTime(); 1245 1246 HeapRegionRemSet::reset_for_cleanup_tasks(); 1247 1248 { 1249 GCTraceTime(Debug, gc)("Finalize Live Data"); 1250 finalize_live_data(); 1251 } 1252 1253 if (VerifyDuringGC) { 1254 GCTraceTime(Debug, gc)("Verify Live Data"); 1255 verify_live_data(); 1256 } 1257 1258 g1h->collector_state()->set_mark_in_progress(false); 1259 1260 double count_end = os::elapsedTime(); 1261 double this_final_counting_time = (count_end - start); 1262 _total_counting_time += this_final_counting_time; 1263 1264 if (log_is_enabled(Trace, gc, liveness)) { 1265 G1PrintRegionLivenessInfoClosure cl("Post-Marking"); 1266 _g1h->heap_region_iterate(&cl); 1267 } 1268 1269 // Install newly created mark bitMap as "prev". 1270 swapMarkBitMaps(); 1271 1272 g1h->reset_gc_time_stamp(); 1273 1274 uint n_workers = _g1h->workers()->active_workers(); 1275 1276 // Note end of marking in all heap regions. 1277 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1278 g1h->workers()->run_task(&g1_par_note_end_task); 1279 g1h->check_gc_time_stamps(); 1280 1281 if (!cleanup_list_is_empty()) { 1282 // The cleanup list is not empty, so we'll have to process it 1283 // concurrently. Notify anyone else that might be wanting free 1284 // regions that there will be more free regions coming soon. 1285 g1h->set_free_regions_coming(); 1286 } 1287 1288 // call below, since it affects the metric by which we sort the heap 1289 // regions. 1290 if (G1ScrubRemSets) { 1291 double rs_scrub_start = os::elapsedTime(); 1292 g1h->scrub_rem_set(); 1293 _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start); 1294 } 1295 1296 // this will also free any regions totally full of garbage objects, 1297 // and sort the regions. 1298 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1299 1300 // Statistics. 1301 double end = os::elapsedTime(); 1302 _cleanup_times.add((end - start) * 1000.0); 1303 1304 // Clean up will have freed any regions completely full of garbage. 1305 // Update the soft reference policy with the new heap occupancy. 1306 Universe::update_heap_info_at_gc(); 1307 1308 if (VerifyDuringGC) { 1309 HandleMark hm; // handle scope 1310 g1h->prepare_for_verify(); 1311 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)"); 1312 } 1313 1314 g1h->verifier()->check_bitmaps("Cleanup End"); 1315 1316 g1h->verifier()->verify_region_sets_optional(); 1317 1318 // We need to make this be a "collection" so any collection pause that 1319 // races with it goes around and waits for completeCleanup to finish. 1320 g1h->increment_total_collections(); 1321 1322 // Clean out dead classes and update Metaspace sizes. 1323 if (ClassUnloadingWithConcurrentMark) { 1324 ClassLoaderDataGraph::purge(); 1325 } 1326 MetaspaceGC::compute_new_size(); 1327 1328 // We reclaimed old regions so we should calculate the sizes to make 1329 // sure we update the old gen/space data. 1330 g1h->g1mm()->update_sizes(); 1331 g1h->allocation_context_stats().update_after_mark(); 1332 } 1333 1334 void G1ConcurrentMark::complete_cleanup() { 1335 if (has_aborted()) return; 1336 1337 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1338 1339 _cleanup_list.verify_optional(); 1340 FreeRegionList tmp_free_list("Tmp Free List"); 1341 1342 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1343 "cleanup list has %u entries", 1344 _cleanup_list.length()); 1345 1346 // No one else should be accessing the _cleanup_list at this point, 1347 // so it is not necessary to take any locks 1348 while (!_cleanup_list.is_empty()) { 1349 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 1350 assert(hr != NULL, "Got NULL from a non-empty list"); 1351 hr->par_clear(); 1352 tmp_free_list.add_ordered(hr); 1353 1354 // Instead of adding one region at a time to the secondary_free_list, 1355 // we accumulate them in the local list and move them a few at a 1356 // time. This also cuts down on the number of notify_all() calls 1357 // we do during this process. We'll also append the local list when 1358 // _cleanup_list is empty (which means we just removed the last 1359 // region from the _cleanup_list). 1360 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1361 _cleanup_list.is_empty()) { 1362 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1363 "appending %u entries to the secondary_free_list, " 1364 "cleanup list still has %u entries", 1365 tmp_free_list.length(), 1366 _cleanup_list.length()); 1367 1368 { 1369 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1370 g1h->secondary_free_list_add(&tmp_free_list); 1371 SecondaryFreeList_lock->notify_all(); 1372 } 1373 #ifndef PRODUCT 1374 if (G1StressConcRegionFreeing) { 1375 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1376 os::sleep(Thread::current(), (jlong) 1, false); 1377 } 1378 } 1379 #endif 1380 } 1381 } 1382 assert(tmp_free_list.is_empty(), "post-condition"); 1383 } 1384 1385 // Supporting Object and Oop closures for reference discovery 1386 // and processing in during marking 1387 1388 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1389 HeapWord* addr = (HeapWord*)obj; 1390 return addr != NULL && 1391 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1392 } 1393 1394 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1395 // Uses the G1CMTask associated with a worker thread (for serial reference 1396 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1397 // trace referent objects. 1398 // 1399 // Using the G1CMTask and embedded local queues avoids having the worker 1400 // threads operating on the global mark stack. This reduces the risk 1401 // of overflowing the stack - which we would rather avoid at this late 1402 // state. Also using the tasks' local queues removes the potential 1403 // of the workers interfering with each other that could occur if 1404 // operating on the global stack. 1405 1406 class G1CMKeepAliveAndDrainClosure: public OopClosure { 1407 G1ConcurrentMark* _cm; 1408 G1CMTask* _task; 1409 int _ref_counter_limit; 1410 int _ref_counter; 1411 bool _is_serial; 1412 public: 1413 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1414 _cm(cm), _task(task), _is_serial(is_serial), 1415 _ref_counter_limit(G1RefProcDrainInterval) { 1416 assert(_ref_counter_limit > 0, "sanity"); 1417 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1418 _ref_counter = _ref_counter_limit; 1419 } 1420 1421 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1422 virtual void do_oop( oop* p) { do_oop_work(p); } 1423 1424 template <class T> void do_oop_work(T* p) { 1425 if (!_cm->has_overflown()) { 1426 oop obj = oopDesc::load_decode_heap_oop(p); 1427 _task->deal_with_reference(obj); 1428 _ref_counter--; 1429 1430 if (_ref_counter == 0) { 1431 // We have dealt with _ref_counter_limit references, pushing them 1432 // and objects reachable from them on to the local stack (and 1433 // possibly the global stack). Call G1CMTask::do_marking_step() to 1434 // process these entries. 1435 // 1436 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1437 // there's nothing more to do (i.e. we're done with the entries that 1438 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1439 // above) or we overflow. 1440 // 1441 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1442 // flag while there may still be some work to do. (See the comment at 1443 // the beginning of G1CMTask::do_marking_step() for those conditions - 1444 // one of which is reaching the specified time target.) It is only 1445 // when G1CMTask::do_marking_step() returns without setting the 1446 // has_aborted() flag that the marking step has completed. 1447 do { 1448 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1449 _task->do_marking_step(mark_step_duration_ms, 1450 false /* do_termination */, 1451 _is_serial); 1452 } while (_task->has_aborted() && !_cm->has_overflown()); 1453 _ref_counter = _ref_counter_limit; 1454 } 1455 } 1456 } 1457 }; 1458 1459 // 'Drain' oop closure used by both serial and parallel reference processing. 1460 // Uses the G1CMTask associated with a given worker thread (for serial 1461 // reference processing the G1CMtask for worker 0 is used). Calls the 1462 // do_marking_step routine, with an unbelievably large timeout value, 1463 // to drain the marking data structures of the remaining entries 1464 // added by the 'keep alive' oop closure above. 1465 1466 class G1CMDrainMarkingStackClosure: public VoidClosure { 1467 G1ConcurrentMark* _cm; 1468 G1CMTask* _task; 1469 bool _is_serial; 1470 public: 1471 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1472 _cm(cm), _task(task), _is_serial(is_serial) { 1473 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1474 } 1475 1476 void do_void() { 1477 do { 1478 // We call G1CMTask::do_marking_step() to completely drain the local 1479 // and global marking stacks of entries pushed by the 'keep alive' 1480 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1481 // 1482 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1483 // if there's nothing more to do (i.e. we've completely drained the 1484 // entries that were pushed as a a result of applying the 'keep alive' 1485 // closure to the entries on the discovered ref lists) or we overflow 1486 // the global marking stack. 1487 // 1488 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1489 // flag while there may still be some work to do. (See the comment at 1490 // the beginning of G1CMTask::do_marking_step() for those conditions - 1491 // one of which is reaching the specified time target.) It is only 1492 // when G1CMTask::do_marking_step() returns without setting the 1493 // has_aborted() flag that the marking step has completed. 1494 1495 _task->do_marking_step(1000000000.0 /* something very large */, 1496 true /* do_termination */, 1497 _is_serial); 1498 } while (_task->has_aborted() && !_cm->has_overflown()); 1499 } 1500 }; 1501 1502 // Implementation of AbstractRefProcTaskExecutor for parallel 1503 // reference processing at the end of G1 concurrent marking 1504 1505 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 1506 private: 1507 G1CollectedHeap* _g1h; 1508 G1ConcurrentMark* _cm; 1509 WorkGang* _workers; 1510 uint _active_workers; 1511 1512 public: 1513 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1514 G1ConcurrentMark* cm, 1515 WorkGang* workers, 1516 uint n_workers) : 1517 _g1h(g1h), _cm(cm), 1518 _workers(workers), _active_workers(n_workers) { } 1519 1520 // Executes the given task using concurrent marking worker threads. 1521 virtual void execute(ProcessTask& task); 1522 virtual void execute(EnqueueTask& task); 1523 }; 1524 1525 class G1CMRefProcTaskProxy: public AbstractGangTask { 1526 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1527 ProcessTask& _proc_task; 1528 G1CollectedHeap* _g1h; 1529 G1ConcurrentMark* _cm; 1530 1531 public: 1532 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1533 G1CollectedHeap* g1h, 1534 G1ConcurrentMark* cm) : 1535 AbstractGangTask("Process reference objects in parallel"), 1536 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1537 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1538 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1539 } 1540 1541 virtual void work(uint worker_id) { 1542 ResourceMark rm; 1543 HandleMark hm; 1544 G1CMTask* task = _cm->task(worker_id); 1545 G1CMIsAliveClosure g1_is_alive(_g1h); 1546 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1547 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1548 1549 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1550 } 1551 }; 1552 1553 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1554 assert(_workers != NULL, "Need parallel worker threads."); 1555 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1556 1557 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1558 1559 // We need to reset the concurrency level before each 1560 // proxy task execution, so that the termination protocol 1561 // and overflow handling in G1CMTask::do_marking_step() knows 1562 // how many workers to wait for. 1563 _cm->set_concurrency(_active_workers); 1564 _workers->run_task(&proc_task_proxy); 1565 } 1566 1567 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 1568 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1569 EnqueueTask& _enq_task; 1570 1571 public: 1572 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1573 AbstractGangTask("Enqueue reference objects in parallel"), 1574 _enq_task(enq_task) { } 1575 1576 virtual void work(uint worker_id) { 1577 _enq_task.work(worker_id); 1578 } 1579 }; 1580 1581 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 1582 assert(_workers != NULL, "Need parallel worker threads."); 1583 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1584 1585 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 1586 1587 // Not strictly necessary but... 1588 // 1589 // We need to reset the concurrency level before each 1590 // proxy task execution, so that the termination protocol 1591 // and overflow handling in G1CMTask::do_marking_step() knows 1592 // how many workers to wait for. 1593 _cm->set_concurrency(_active_workers); 1594 _workers->run_task(&enq_task_proxy); 1595 } 1596 1597 void G1ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 1598 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 1599 } 1600 1601 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 1602 if (has_overflown()) { 1603 // Skip processing the discovered references if we have 1604 // overflown the global marking stack. Reference objects 1605 // only get discovered once so it is OK to not 1606 // de-populate the discovered reference lists. We could have, 1607 // but the only benefit would be that, when marking restarts, 1608 // less reference objects are discovered. 1609 return; 1610 } 1611 1612 ResourceMark rm; 1613 HandleMark hm; 1614 1615 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1616 1617 // Is alive closure. 1618 G1CMIsAliveClosure g1_is_alive(g1h); 1619 1620 // Inner scope to exclude the cleaning of the string and symbol 1621 // tables from the displayed time. 1622 { 1623 GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm); 1624 1625 ReferenceProcessor* rp = g1h->ref_processor_cm(); 1626 1627 // See the comment in G1CollectedHeap::ref_processing_init() 1628 // about how reference processing currently works in G1. 1629 1630 // Set the soft reference policy 1631 rp->setup_policy(clear_all_soft_refs); 1632 assert(_markStack.isEmpty(), "mark stack should be empty"); 1633 1634 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1635 // in serial reference processing. Note these closures are also 1636 // used for serially processing (by the the current thread) the 1637 // JNI references during parallel reference processing. 1638 // 1639 // These closures do not need to synchronize with the worker 1640 // threads involved in parallel reference processing as these 1641 // instances are executed serially by the current thread (e.g. 1642 // reference processing is not multi-threaded and is thus 1643 // performed by the current thread instead of a gang worker). 1644 // 1645 // The gang tasks involved in parallel reference processing create 1646 // their own instances of these closures, which do their own 1647 // synchronization among themselves. 1648 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1649 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1650 1651 // We need at least one active thread. If reference processing 1652 // is not multi-threaded we use the current (VMThread) thread, 1653 // otherwise we use the work gang from the G1CollectedHeap and 1654 // we utilize all the worker threads we can. 1655 bool processing_is_mt = rp->processing_is_mt(); 1656 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 1657 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 1658 1659 // Parallel processing task executor. 1660 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 1661 g1h->workers(), active_workers); 1662 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1663 1664 // Set the concurrency level. The phase was already set prior to 1665 // executing the remark task. 1666 set_concurrency(active_workers); 1667 1668 // Set the degree of MT processing here. If the discovery was done MT, 1669 // the number of threads involved during discovery could differ from 1670 // the number of active workers. This is OK as long as the discovered 1671 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1672 rp->set_active_mt_degree(active_workers); 1673 1674 // Process the weak references. 1675 const ReferenceProcessorStats& stats = 1676 rp->process_discovered_references(&g1_is_alive, 1677 &g1_keep_alive, 1678 &g1_drain_mark_stack, 1679 executor, 1680 _gc_timer_cm); 1681 _gc_tracer_cm->report_gc_reference_stats(stats); 1682 1683 // The do_oop work routines of the keep_alive and drain_marking_stack 1684 // oop closures will set the has_overflown flag if we overflow the 1685 // global marking stack. 1686 1687 assert(_markStack.overflow() || _markStack.isEmpty(), 1688 "mark stack should be empty (unless it overflowed)"); 1689 1690 if (_markStack.overflow()) { 1691 // This should have been done already when we tried to push an 1692 // entry on to the global mark stack. But let's do it again. 1693 set_has_overflown(); 1694 } 1695 1696 assert(rp->num_q() == active_workers, "why not"); 1697 1698 rp->enqueue_discovered_references(executor); 1699 1700 rp->verify_no_references_recorded(); 1701 assert(!rp->discovery_enabled(), "Post condition"); 1702 } 1703 1704 if (has_overflown()) { 1705 // We can not trust g1_is_alive if the marking stack overflowed 1706 return; 1707 } 1708 1709 assert(_markStack.isEmpty(), "Marking should have completed"); 1710 1711 // Unload Klasses, String, Symbols, Code Cache, etc. 1712 if (ClassUnloadingWithConcurrentMark) { 1713 bool purged_classes; 1714 1715 { 1716 GCTraceTime(Debug, gc, phases) trace("System Dictionary Unloading", _gc_timer_cm); 1717 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 1718 } 1719 1720 { 1721 GCTraceTime(Debug, gc, phases) trace("Parallel Unloading", _gc_timer_cm); 1722 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 1723 } 1724 } 1725 1726 if (G1StringDedup::is_enabled()) { 1727 GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", _gc_timer_cm); 1728 G1StringDedup::unlink(&g1_is_alive); 1729 } 1730 } 1731 1732 void G1ConcurrentMark::swapMarkBitMaps() { 1733 G1CMBitMapRO* temp = _prevMarkBitMap; 1734 _prevMarkBitMap = (G1CMBitMapRO*)_nextMarkBitMap; 1735 _nextMarkBitMap = (G1CMBitMap*) temp; 1736 } 1737 1738 // Closure for marking entries in SATB buffers. 1739 class G1CMSATBBufferClosure : public SATBBufferClosure { 1740 private: 1741 G1CMTask* _task; 1742 G1CollectedHeap* _g1h; 1743 1744 // This is very similar to G1CMTask::deal_with_reference, but with 1745 // more relaxed requirements for the argument, so this must be more 1746 // circumspect about treating the argument as an object. 1747 void do_entry(void* entry) const { 1748 _task->increment_refs_reached(); 1749 HeapRegion* hr = _g1h->heap_region_containing(entry); 1750 if (entry < hr->next_top_at_mark_start()) { 1751 // Until we get here, we don't know whether entry refers to a valid 1752 // object; it could instead have been a stale reference. 1753 oop obj = static_cast<oop>(entry); 1754 assert(obj->is_oop(true /* ignore mark word */), 1755 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)); 1756 _task->make_reference_grey(obj); 1757 } 1758 } 1759 1760 public: 1761 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1762 : _task(task), _g1h(g1h) { } 1763 1764 virtual void do_buffer(void** buffer, size_t size) { 1765 for (size_t i = 0; i < size; ++i) { 1766 do_entry(buffer[i]); 1767 } 1768 } 1769 }; 1770 1771 class G1RemarkThreadsClosure : public ThreadClosure { 1772 G1CMSATBBufferClosure _cm_satb_cl; 1773 G1CMOopClosure _cm_cl; 1774 MarkingCodeBlobClosure _code_cl; 1775 int _thread_parity; 1776 1777 public: 1778 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1779 _cm_satb_cl(task, g1h), 1780 _cm_cl(g1h, g1h->concurrent_mark(), task), 1781 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1782 _thread_parity(Threads::thread_claim_parity()) {} 1783 1784 void do_thread(Thread* thread) { 1785 if (thread->is_Java_thread()) { 1786 if (thread->claim_oops_do(true, _thread_parity)) { 1787 JavaThread* jt = (JavaThread*)thread; 1788 1789 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1790 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1791 // * Alive if on the stack of an executing method 1792 // * Weakly reachable otherwise 1793 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1794 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1795 jt->nmethods_do(&_code_cl); 1796 1797 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 1798 } 1799 } else if (thread->is_VM_thread()) { 1800 if (thread->claim_oops_do(true, _thread_parity)) { 1801 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1802 } 1803 } 1804 } 1805 }; 1806 1807 class G1CMRemarkTask: public AbstractGangTask { 1808 private: 1809 G1ConcurrentMark* _cm; 1810 public: 1811 void work(uint worker_id) { 1812 // Since all available tasks are actually started, we should 1813 // only proceed if we're supposed to be active. 1814 if (worker_id < _cm->active_tasks()) { 1815 G1CMTask* task = _cm->task(worker_id); 1816 task->record_start_time(); 1817 { 1818 ResourceMark rm; 1819 HandleMark hm; 1820 1821 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1822 Threads::threads_do(&threads_f); 1823 } 1824 1825 do { 1826 task->do_marking_step(1000000000.0 /* something very large */, 1827 true /* do_termination */, 1828 false /* is_serial */); 1829 } while (task->has_aborted() && !_cm->has_overflown()); 1830 // If we overflow, then we do not want to restart. We instead 1831 // want to abort remark and do concurrent marking again. 1832 task->record_end_time(); 1833 } 1834 } 1835 1836 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1837 AbstractGangTask("Par Remark"), _cm(cm) { 1838 _cm->terminator()->reset_for_reuse(active_workers); 1839 } 1840 }; 1841 1842 void G1ConcurrentMark::checkpointRootsFinalWork() { 1843 ResourceMark rm; 1844 HandleMark hm; 1845 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1846 1847 GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm); 1848 1849 g1h->ensure_parsability(false); 1850 1851 // this is remark, so we'll use up all active threads 1852 uint active_workers = g1h->workers()->active_workers(); 1853 set_concurrency_and_phase(active_workers, false /* concurrent */); 1854 // Leave _parallel_marking_threads at it's 1855 // value originally calculated in the G1ConcurrentMark 1856 // constructor and pass values of the active workers 1857 // through the gang in the task. 1858 1859 { 1860 StrongRootsScope srs(active_workers); 1861 1862 G1CMRemarkTask remarkTask(this, active_workers); 1863 // We will start all available threads, even if we decide that the 1864 // active_workers will be fewer. The extra ones will just bail out 1865 // immediately. 1866 g1h->workers()->run_task(&remarkTask); 1867 } 1868 1869 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1870 guarantee(has_overflown() || 1871 satb_mq_set.completed_buffers_num() == 0, 1872 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1873 BOOL_TO_STR(has_overflown()), 1874 satb_mq_set.completed_buffers_num()); 1875 1876 print_stats(); 1877 } 1878 1879 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 1880 // Note we are overriding the read-only view of the prev map here, via 1881 // the cast. 1882 ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr); 1883 } 1884 1885 HeapRegion* 1886 G1ConcurrentMark::claim_region(uint worker_id) { 1887 // "checkpoint" the finger 1888 HeapWord* finger = _finger; 1889 1890 // _heap_end will not change underneath our feet; it only changes at 1891 // yield points. 1892 while (finger < _heap_end) { 1893 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1894 1895 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1896 1897 // Above heap_region_containing may return NULL as we always scan claim 1898 // until the end of the heap. In this case, just jump to the next region. 1899 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1900 1901 // Is the gap between reading the finger and doing the CAS too long? 1902 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 1903 if (res == finger && curr_region != NULL) { 1904 // we succeeded 1905 HeapWord* bottom = curr_region->bottom(); 1906 HeapWord* limit = curr_region->next_top_at_mark_start(); 1907 1908 // notice that _finger == end cannot be guaranteed here since, 1909 // someone else might have moved the finger even further 1910 assert(_finger >= end, "the finger should have moved forward"); 1911 1912 if (limit > bottom) { 1913 return curr_region; 1914 } else { 1915 assert(limit == bottom, 1916 "the region limit should be at bottom"); 1917 // we return NULL and the caller should try calling 1918 // claim_region() again. 1919 return NULL; 1920 } 1921 } else { 1922 assert(_finger > finger, "the finger should have moved forward"); 1923 // read it again 1924 finger = _finger; 1925 } 1926 } 1927 1928 return NULL; 1929 } 1930 1931 #ifndef PRODUCT 1932 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 1933 private: 1934 G1CollectedHeap* _g1h; 1935 const char* _phase; 1936 int _info; 1937 1938 public: 1939 VerifyNoCSetOops(const char* phase, int info = -1) : 1940 _g1h(G1CollectedHeap::heap()), 1941 _phase(phase), 1942 _info(info) 1943 { } 1944 1945 void operator()(oop obj) const { 1946 guarantee(obj->is_oop(), 1947 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1948 p2i(obj), _phase, _info); 1949 guarantee(!_g1h->obj_in_cs(obj), 1950 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1951 p2i(obj), _phase, _info); 1952 } 1953 }; 1954 1955 void G1ConcurrentMark::verify_no_cset_oops() { 1956 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1957 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 1958 return; 1959 } 1960 1961 // Verify entries on the global mark stack 1962 _markStack.iterate(VerifyNoCSetOops("Stack")); 1963 1964 // Verify entries on the task queues 1965 for (uint i = 0; i < _max_worker_id; ++i) { 1966 G1CMTaskQueue* queue = _task_queues->queue(i); 1967 queue->iterate(VerifyNoCSetOops("Queue", i)); 1968 } 1969 1970 // Verify the global finger 1971 HeapWord* global_finger = finger(); 1972 if (global_finger != NULL && global_finger < _heap_end) { 1973 // Since we always iterate over all regions, we might get a NULL HeapRegion 1974 // here. 1975 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1976 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1977 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1978 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1979 } 1980 1981 // Verify the task fingers 1982 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 1983 for (uint i = 0; i < parallel_marking_threads(); ++i) { 1984 G1CMTask* task = _tasks[i]; 1985 HeapWord* task_finger = task->finger(); 1986 if (task_finger != NULL && task_finger < _heap_end) { 1987 // See above note on the global finger verification. 1988 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 1989 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 1990 !task_hr->in_collection_set(), 1991 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 1992 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 1993 } 1994 } 1995 } 1996 #endif // PRODUCT 1997 void G1ConcurrentMark::create_live_data() { 1998 _g1h->g1_rem_set()->create_card_live_data(_parallel_workers, _nextMarkBitMap); 1999 } 2000 2001 void G1ConcurrentMark::finalize_live_data() { 2002 _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _nextMarkBitMap); 2003 } 2004 2005 void G1ConcurrentMark::verify_live_data() { 2006 _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _nextMarkBitMap); 2007 } 2008 2009 void G1ConcurrentMark::clear_live_data(WorkGang* workers) { 2010 _g1h->g1_rem_set()->clear_card_live_data(workers); 2011 } 2012 2013 #ifdef ASSERT 2014 void G1ConcurrentMark::verify_live_data_clear() { 2015 _g1h->g1_rem_set()->verify_card_live_data_is_clear(); 2016 } 2017 #endif 2018 2019 void G1ConcurrentMark::print_stats() { 2020 if (!log_is_enabled(Debug, gc, stats)) { 2021 return; 2022 } 2023 log_debug(gc, stats)("---------------------------------------------------------------------"); 2024 for (size_t i = 0; i < _active_tasks; ++i) { 2025 _tasks[i]->print_stats(); 2026 log_debug(gc, stats)("---------------------------------------------------------------------"); 2027 } 2028 } 2029 2030 void G1ConcurrentMark::abort() { 2031 if (!cmThread()->during_cycle() || _has_aborted) { 2032 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2033 return; 2034 } 2035 2036 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2037 // concurrent bitmap clearing. 2038 { 2039 GCTraceTime(Debug, gc)("Clear Next Bitmap"); 2040 clear_bitmap(_nextMarkBitMap, _g1h->workers(), false); 2041 } 2042 // Note we cannot clear the previous marking bitmap here 2043 // since VerifyDuringGC verifies the objects marked during 2044 // a full GC against the previous bitmap. 2045 2046 { 2047 GCTraceTime(Debug, gc)("Clear Live Data"); 2048 clear_live_data(_g1h->workers()); 2049 } 2050 DEBUG_ONLY({ 2051 GCTraceTime(Debug, gc)("Verify Live Data Clear"); 2052 verify_live_data_clear(); 2053 }) 2054 // Empty mark stack 2055 reset_marking_state(); 2056 for (uint i = 0; i < _max_worker_id; ++i) { 2057 _tasks[i]->clear_region_fields(); 2058 } 2059 _first_overflow_barrier_sync.abort(); 2060 _second_overflow_barrier_sync.abort(); 2061 _has_aborted = true; 2062 2063 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2064 satb_mq_set.abandon_partial_marking(); 2065 // This can be called either during or outside marking, we'll read 2066 // the expected_active value from the SATB queue set. 2067 satb_mq_set.set_active_all_threads( 2068 false, /* new active value */ 2069 satb_mq_set.is_active() /* expected_active */); 2070 } 2071 2072 static void print_ms_time_info(const char* prefix, const char* name, 2073 NumberSeq& ns) { 2074 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2075 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2076 if (ns.num() > 0) { 2077 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2078 prefix, ns.sd(), ns.maximum()); 2079 } 2080 } 2081 2082 void G1ConcurrentMark::print_summary_info() { 2083 Log(gc, marking) log; 2084 if (!log.is_trace()) { 2085 return; 2086 } 2087 2088 log.trace(" Concurrent marking:"); 2089 print_ms_time_info(" ", "init marks", _init_times); 2090 print_ms_time_info(" ", "remarks", _remark_times); 2091 { 2092 print_ms_time_info(" ", "final marks", _remark_mark_times); 2093 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2094 2095 } 2096 print_ms_time_info(" ", "cleanups", _cleanup_times); 2097 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2098 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2099 if (G1ScrubRemSets) { 2100 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 2101 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2102 } 2103 log.trace(" Total stop_world time = %8.2f s.", 2104 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2105 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2106 cmThread()->vtime_accum(), cmThread()->vtime_mark_accum()); 2107 } 2108 2109 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2110 _parallel_workers->print_worker_threads_on(st); 2111 } 2112 2113 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2114 _parallel_workers->threads_do(tc); 2115 } 2116 2117 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2118 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2119 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 2120 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 2121 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 2122 } 2123 2124 // Closure for iteration over bitmaps 2125 class G1CMBitMapClosure : public BitMapClosure { 2126 private: 2127 // the bitmap that is being iterated over 2128 G1CMBitMap* _nextMarkBitMap; 2129 G1ConcurrentMark* _cm; 2130 G1CMTask* _task; 2131 2132 public: 2133 G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm, G1CMBitMap* nextMarkBitMap) : 2134 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 2135 2136 bool do_bit(size_t offset) { 2137 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 2138 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 2139 assert( addr < _cm->finger(), "invariant"); 2140 assert(addr >= _task->finger(), "invariant"); 2141 2142 // We move that task's local finger along. 2143 _task->move_finger_to(addr); 2144 2145 _task->scan_object(oop(addr)); 2146 // we only partially drain the local queue and global stack 2147 _task->drain_local_queue(true); 2148 _task->drain_global_stack(true); 2149 2150 // if the has_aborted flag has been raised, we need to bail out of 2151 // the iteration 2152 return !_task->has_aborted(); 2153 } 2154 }; 2155 2156 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2157 ReferenceProcessor* result = g1h->ref_processor_cm(); 2158 assert(result != NULL, "CM reference processor should not be NULL"); 2159 return result; 2160 } 2161 2162 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2163 G1ConcurrentMark* cm, 2164 G1CMTask* task) 2165 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2166 _g1h(g1h), _cm(cm), _task(task) 2167 { } 2168 2169 void G1CMTask::setup_for_region(HeapRegion* hr) { 2170 assert(hr != NULL, 2171 "claim_region() should have filtered out NULL regions"); 2172 _curr_region = hr; 2173 _finger = hr->bottom(); 2174 update_region_limit(); 2175 } 2176 2177 void G1CMTask::update_region_limit() { 2178 HeapRegion* hr = _curr_region; 2179 HeapWord* bottom = hr->bottom(); 2180 HeapWord* limit = hr->next_top_at_mark_start(); 2181 2182 if (limit == bottom) { 2183 // The region was collected underneath our feet. 2184 // We set the finger to bottom to ensure that the bitmap 2185 // iteration that will follow this will not do anything. 2186 // (this is not a condition that holds when we set the region up, 2187 // as the region is not supposed to be empty in the first place) 2188 _finger = bottom; 2189 } else if (limit >= _region_limit) { 2190 assert(limit >= _finger, "peace of mind"); 2191 } else { 2192 assert(limit < _region_limit, "only way to get here"); 2193 // This can happen under some pretty unusual circumstances. An 2194 // evacuation pause empties the region underneath our feet (NTAMS 2195 // at bottom). We then do some allocation in the region (NTAMS 2196 // stays at bottom), followed by the region being used as a GC 2197 // alloc region (NTAMS will move to top() and the objects 2198 // originally below it will be grayed). All objects now marked in 2199 // the region are explicitly grayed, if below the global finger, 2200 // and we do not need in fact to scan anything else. So, we simply 2201 // set _finger to be limit to ensure that the bitmap iteration 2202 // doesn't do anything. 2203 _finger = limit; 2204 } 2205 2206 _region_limit = limit; 2207 } 2208 2209 void G1CMTask::giveup_current_region() { 2210 assert(_curr_region != NULL, "invariant"); 2211 clear_region_fields(); 2212 } 2213 2214 void G1CMTask::clear_region_fields() { 2215 // Values for these three fields that indicate that we're not 2216 // holding on to a region. 2217 _curr_region = NULL; 2218 _finger = NULL; 2219 _region_limit = NULL; 2220 } 2221 2222 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2223 if (cm_oop_closure == NULL) { 2224 assert(_cm_oop_closure != NULL, "invariant"); 2225 } else { 2226 assert(_cm_oop_closure == NULL, "invariant"); 2227 } 2228 _cm_oop_closure = cm_oop_closure; 2229 } 2230 2231 void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) { 2232 guarantee(nextMarkBitMap != NULL, "invariant"); 2233 _nextMarkBitMap = nextMarkBitMap; 2234 clear_region_fields(); 2235 2236 _calls = 0; 2237 _elapsed_time_ms = 0.0; 2238 _termination_time_ms = 0.0; 2239 _termination_start_time_ms = 0.0; 2240 } 2241 2242 bool G1CMTask::should_exit_termination() { 2243 regular_clock_call(); 2244 // This is called when we are in the termination protocol. We should 2245 // quit if, for some reason, this task wants to abort or the global 2246 // stack is not empty (this means that we can get work from it). 2247 return !_cm->mark_stack_empty() || has_aborted(); 2248 } 2249 2250 void G1CMTask::reached_limit() { 2251 assert(_words_scanned >= _words_scanned_limit || 2252 _refs_reached >= _refs_reached_limit , 2253 "shouldn't have been called otherwise"); 2254 regular_clock_call(); 2255 } 2256 2257 void G1CMTask::regular_clock_call() { 2258 if (has_aborted()) return; 2259 2260 // First, we need to recalculate the words scanned and refs reached 2261 // limits for the next clock call. 2262 recalculate_limits(); 2263 2264 // During the regular clock call we do the following 2265 2266 // (1) If an overflow has been flagged, then we abort. 2267 if (_cm->has_overflown()) { 2268 set_has_aborted(); 2269 return; 2270 } 2271 2272 // If we are not concurrent (i.e. we're doing remark) we don't need 2273 // to check anything else. The other steps are only needed during 2274 // the concurrent marking phase. 2275 if (!concurrent()) return; 2276 2277 // (2) If marking has been aborted for Full GC, then we also abort. 2278 if (_cm->has_aborted()) { 2279 set_has_aborted(); 2280 return; 2281 } 2282 2283 double curr_time_ms = os::elapsedVTime() * 1000.0; 2284 2285 // (4) We check whether we should yield. If we have to, then we abort. 2286 if (SuspendibleThreadSet::should_yield()) { 2287 // We should yield. To do this we abort the task. The caller is 2288 // responsible for yielding. 2289 set_has_aborted(); 2290 return; 2291 } 2292 2293 // (5) We check whether we've reached our time quota. If we have, 2294 // then we abort. 2295 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2296 if (elapsed_time_ms > _time_target_ms) { 2297 set_has_aborted(); 2298 _has_timed_out = true; 2299 return; 2300 } 2301 2302 // (6) Finally, we check whether there are enough completed STAB 2303 // buffers available for processing. If there are, we abort. 2304 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2305 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2306 // we do need to process SATB buffers, we'll abort and restart 2307 // the marking task to do so 2308 set_has_aborted(); 2309 return; 2310 } 2311 } 2312 2313 void G1CMTask::recalculate_limits() { 2314 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2315 _words_scanned_limit = _real_words_scanned_limit; 2316 2317 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2318 _refs_reached_limit = _real_refs_reached_limit; 2319 } 2320 2321 void G1CMTask::decrease_limits() { 2322 // This is called when we believe that we're going to do an infrequent 2323 // operation which will increase the per byte scanned cost (i.e. move 2324 // entries to/from the global stack). It basically tries to decrease the 2325 // scanning limit so that the clock is called earlier. 2326 2327 _words_scanned_limit = _real_words_scanned_limit - 2328 3 * words_scanned_period / 4; 2329 _refs_reached_limit = _real_refs_reached_limit - 2330 3 * refs_reached_period / 4; 2331 } 2332 2333 void G1CMTask::move_entries_to_global_stack() { 2334 // local array where we'll store the entries that will be popped 2335 // from the local queue 2336 oop buffer[global_stack_transfer_size]; 2337 2338 int n = 0; 2339 oop obj; 2340 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 2341 buffer[n] = obj; 2342 ++n; 2343 } 2344 2345 if (n > 0) { 2346 // we popped at least one entry from the local queue 2347 2348 if (!_cm->mark_stack_push(buffer, n)) { 2349 set_has_aborted(); 2350 } 2351 } 2352 2353 // this operation was quite expensive, so decrease the limits 2354 decrease_limits(); 2355 } 2356 2357 void G1CMTask::get_entries_from_global_stack() { 2358 // local array where we'll store the entries that will be popped 2359 // from the global stack. 2360 oop buffer[global_stack_transfer_size]; 2361 int n; 2362 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 2363 assert(n <= global_stack_transfer_size, 2364 "we should not pop more than the given limit"); 2365 if (n > 0) { 2366 // yes, we did actually pop at least one entry 2367 for (int i = 0; i < n; ++i) { 2368 bool success = _task_queue->push(buffer[i]); 2369 // We only call this when the local queue is empty or under a 2370 // given target limit. So, we do not expect this push to fail. 2371 assert(success, "invariant"); 2372 } 2373 } 2374 2375 // this operation was quite expensive, so decrease the limits 2376 decrease_limits(); 2377 } 2378 2379 void G1CMTask::drain_local_queue(bool partially) { 2380 if (has_aborted()) return; 2381 2382 // Decide what the target size is, depending whether we're going to 2383 // drain it partially (so that other tasks can steal if they run out 2384 // of things to do) or totally (at the very end). 2385 size_t target_size; 2386 if (partially) { 2387 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2388 } else { 2389 target_size = 0; 2390 } 2391 2392 if (_task_queue->size() > target_size) { 2393 oop obj; 2394 bool ret = _task_queue->pop_local(obj); 2395 while (ret) { 2396 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 2397 assert(!_g1h->is_on_master_free_list( 2398 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 2399 2400 scan_object(obj); 2401 2402 if (_task_queue->size() <= target_size || has_aborted()) { 2403 ret = false; 2404 } else { 2405 ret = _task_queue->pop_local(obj); 2406 } 2407 } 2408 } 2409 } 2410 2411 void G1CMTask::drain_global_stack(bool partially) { 2412 if (has_aborted()) return; 2413 2414 // We have a policy to drain the local queue before we attempt to 2415 // drain the global stack. 2416 assert(partially || _task_queue->size() == 0, "invariant"); 2417 2418 // Decide what the target size is, depending whether we're going to 2419 // drain it partially (so that other tasks can steal if they run out 2420 // of things to do) or totally (at the very end). Notice that, 2421 // because we move entries from the global stack in chunks or 2422 // because another task might be doing the same, we might in fact 2423 // drop below the target. But, this is not a problem. 2424 size_t target_size; 2425 if (partially) { 2426 target_size = _cm->partial_mark_stack_size_target(); 2427 } else { 2428 target_size = 0; 2429 } 2430 2431 if (_cm->mark_stack_size() > target_size) { 2432 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2433 get_entries_from_global_stack(); 2434 drain_local_queue(partially); 2435 } 2436 } 2437 } 2438 2439 // SATB Queue has several assumptions on whether to call the par or 2440 // non-par versions of the methods. this is why some of the code is 2441 // replicated. We should really get rid of the single-threaded version 2442 // of the code to simplify things. 2443 void G1CMTask::drain_satb_buffers() { 2444 if (has_aborted()) return; 2445 2446 // We set this so that the regular clock knows that we're in the 2447 // middle of draining buffers and doesn't set the abort flag when it 2448 // notices that SATB buffers are available for draining. It'd be 2449 // very counter productive if it did that. :-) 2450 _draining_satb_buffers = true; 2451 2452 G1CMSATBBufferClosure satb_cl(this, _g1h); 2453 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2454 2455 // This keeps claiming and applying the closure to completed buffers 2456 // until we run out of buffers or we need to abort. 2457 while (!has_aborted() && 2458 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2459 regular_clock_call(); 2460 } 2461 2462 _draining_satb_buffers = false; 2463 2464 assert(has_aborted() || 2465 concurrent() || 2466 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2467 2468 // again, this was a potentially expensive operation, decrease the 2469 // limits to get the regular clock call early 2470 decrease_limits(); 2471 } 2472 2473 void G1CMTask::print_stats() { 2474 log_debug(gc, stats)("Marking Stats, task = %u, calls = %d", 2475 _worker_id, _calls); 2476 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2477 _elapsed_time_ms, _termination_time_ms); 2478 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 2479 _step_times_ms.num(), _step_times_ms.avg(), 2480 _step_times_ms.sd()); 2481 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", 2482 _step_times_ms.maximum(), _step_times_ms.sum()); 2483 } 2484 2485 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 2486 return _task_queues->steal(worker_id, hash_seed, obj); 2487 } 2488 2489 /***************************************************************************** 2490 2491 The do_marking_step(time_target_ms, ...) method is the building 2492 block of the parallel marking framework. It can be called in parallel 2493 with other invocations of do_marking_step() on different tasks 2494 (but only one per task, obviously) and concurrently with the 2495 mutator threads, or during remark, hence it eliminates the need 2496 for two versions of the code. When called during remark, it will 2497 pick up from where the task left off during the concurrent marking 2498 phase. Interestingly, tasks are also claimable during evacuation 2499 pauses too, since do_marking_step() ensures that it aborts before 2500 it needs to yield. 2501 2502 The data structures that it uses to do marking work are the 2503 following: 2504 2505 (1) Marking Bitmap. If there are gray objects that appear only 2506 on the bitmap (this happens either when dealing with an overflow 2507 or when the initial marking phase has simply marked the roots 2508 and didn't push them on the stack), then tasks claim heap 2509 regions whose bitmap they then scan to find gray objects. A 2510 global finger indicates where the end of the last claimed region 2511 is. A local finger indicates how far into the region a task has 2512 scanned. The two fingers are used to determine how to gray an 2513 object (i.e. whether simply marking it is OK, as it will be 2514 visited by a task in the future, or whether it needs to be also 2515 pushed on a stack). 2516 2517 (2) Local Queue. The local queue of the task which is accessed 2518 reasonably efficiently by the task. Other tasks can steal from 2519 it when they run out of work. Throughout the marking phase, a 2520 task attempts to keep its local queue short but not totally 2521 empty, so that entries are available for stealing by other 2522 tasks. Only when there is no more work, a task will totally 2523 drain its local queue. 2524 2525 (3) Global Mark Stack. This handles local queue overflow. During 2526 marking only sets of entries are moved between it and the local 2527 queues, as access to it requires a mutex and more fine-grain 2528 interaction with it which might cause contention. If it 2529 overflows, then the marking phase should restart and iterate 2530 over the bitmap to identify gray objects. Throughout the marking 2531 phase, tasks attempt to keep the global mark stack at a small 2532 length but not totally empty, so that entries are available for 2533 popping by other tasks. Only when there is no more work, tasks 2534 will totally drain the global mark stack. 2535 2536 (4) SATB Buffer Queue. This is where completed SATB buffers are 2537 made available. Buffers are regularly removed from this queue 2538 and scanned for roots, so that the queue doesn't get too 2539 long. During remark, all completed buffers are processed, as 2540 well as the filled in parts of any uncompleted buffers. 2541 2542 The do_marking_step() method tries to abort when the time target 2543 has been reached. There are a few other cases when the 2544 do_marking_step() method also aborts: 2545 2546 (1) When the marking phase has been aborted (after a Full GC). 2547 2548 (2) When a global overflow (on the global stack) has been 2549 triggered. Before the task aborts, it will actually sync up with 2550 the other tasks to ensure that all the marking data structures 2551 (local queues, stacks, fingers etc.) are re-initialized so that 2552 when do_marking_step() completes, the marking phase can 2553 immediately restart. 2554 2555 (3) When enough completed SATB buffers are available. The 2556 do_marking_step() method only tries to drain SATB buffers right 2557 at the beginning. So, if enough buffers are available, the 2558 marking step aborts and the SATB buffers are processed at 2559 the beginning of the next invocation. 2560 2561 (4) To yield. when we have to yield then we abort and yield 2562 right at the end of do_marking_step(). This saves us from a lot 2563 of hassle as, by yielding we might allow a Full GC. If this 2564 happens then objects will be compacted underneath our feet, the 2565 heap might shrink, etc. We save checking for this by just 2566 aborting and doing the yield right at the end. 2567 2568 From the above it follows that the do_marking_step() method should 2569 be called in a loop (or, otherwise, regularly) until it completes. 2570 2571 If a marking step completes without its has_aborted() flag being 2572 true, it means it has completed the current marking phase (and 2573 also all other marking tasks have done so and have all synced up). 2574 2575 A method called regular_clock_call() is invoked "regularly" (in 2576 sub ms intervals) throughout marking. It is this clock method that 2577 checks all the abort conditions which were mentioned above and 2578 decides when the task should abort. A work-based scheme is used to 2579 trigger this clock method: when the number of object words the 2580 marking phase has scanned or the number of references the marking 2581 phase has visited reach a given limit. Additional invocations to 2582 the method clock have been planted in a few other strategic places 2583 too. The initial reason for the clock method was to avoid calling 2584 vtime too regularly, as it is quite expensive. So, once it was in 2585 place, it was natural to piggy-back all the other conditions on it 2586 too and not constantly check them throughout the code. 2587 2588 If do_termination is true then do_marking_step will enter its 2589 termination protocol. 2590 2591 The value of is_serial must be true when do_marking_step is being 2592 called serially (i.e. by the VMThread) and do_marking_step should 2593 skip any synchronization in the termination and overflow code. 2594 Examples include the serial remark code and the serial reference 2595 processing closures. 2596 2597 The value of is_serial must be false when do_marking_step is 2598 being called by any of the worker threads in a work gang. 2599 Examples include the concurrent marking code (CMMarkingTask), 2600 the MT remark code, and the MT reference processing closures. 2601 2602 *****************************************************************************/ 2603 2604 void G1CMTask::do_marking_step(double time_target_ms, 2605 bool do_termination, 2606 bool is_serial) { 2607 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2608 assert(concurrent() == _cm->concurrent(), "they should be the same"); 2609 2610 G1Policy* g1_policy = _g1h->g1_policy(); 2611 assert(_task_queues != NULL, "invariant"); 2612 assert(_task_queue != NULL, "invariant"); 2613 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 2614 2615 assert(!_claimed, 2616 "only one thread should claim this task at any one time"); 2617 2618 // OK, this doesn't safeguard again all possible scenarios, as it is 2619 // possible for two threads to set the _claimed flag at the same 2620 // time. But it is only for debugging purposes anyway and it will 2621 // catch most problems. 2622 _claimed = true; 2623 2624 _start_time_ms = os::elapsedVTime() * 1000.0; 2625 2626 // If do_stealing is true then do_marking_step will attempt to 2627 // steal work from the other G1CMTasks. It only makes sense to 2628 // enable stealing when the termination protocol is enabled 2629 // and do_marking_step() is not being called serially. 2630 bool do_stealing = do_termination && !is_serial; 2631 2632 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2633 _time_target_ms = time_target_ms - diff_prediction_ms; 2634 2635 // set up the variables that are used in the work-based scheme to 2636 // call the regular clock method 2637 _words_scanned = 0; 2638 _refs_reached = 0; 2639 recalculate_limits(); 2640 2641 // clear all flags 2642 clear_has_aborted(); 2643 _has_timed_out = false; 2644 _draining_satb_buffers = false; 2645 2646 ++_calls; 2647 2648 // Set up the bitmap and oop closures. Anything that uses them is 2649 // eventually called from this method, so it is OK to allocate these 2650 // statically. 2651 G1CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 2652 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 2653 set_cm_oop_closure(&cm_oop_closure); 2654 2655 if (_cm->has_overflown()) { 2656 // This can happen if the mark stack overflows during a GC pause 2657 // and this task, after a yield point, restarts. We have to abort 2658 // as we need to get into the overflow protocol which happens 2659 // right at the end of this task. 2660 set_has_aborted(); 2661 } 2662 2663 // First drain any available SATB buffers. After this, we will not 2664 // look at SATB buffers before the next invocation of this method. 2665 // If enough completed SATB buffers are queued up, the regular clock 2666 // will abort this task so that it restarts. 2667 drain_satb_buffers(); 2668 // ...then partially drain the local queue and the global stack 2669 drain_local_queue(true); 2670 drain_global_stack(true); 2671 2672 do { 2673 if (!has_aborted() && _curr_region != NULL) { 2674 // This means that we're already holding on to a region. 2675 assert(_finger != NULL, "if region is not NULL, then the finger " 2676 "should not be NULL either"); 2677 2678 // We might have restarted this task after an evacuation pause 2679 // which might have evacuated the region we're holding on to 2680 // underneath our feet. Let's read its limit again to make sure 2681 // that we do not iterate over a region of the heap that 2682 // contains garbage (update_region_limit() will also move 2683 // _finger to the start of the region if it is found empty). 2684 update_region_limit(); 2685 // We will start from _finger not from the start of the region, 2686 // as we might be restarting this task after aborting half-way 2687 // through scanning this region. In this case, _finger points to 2688 // the address where we last found a marked object. If this is a 2689 // fresh region, _finger points to start(). 2690 MemRegion mr = MemRegion(_finger, _region_limit); 2691 2692 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2693 "humongous regions should go around loop once only"); 2694 2695 // Some special cases: 2696 // If the memory region is empty, we can just give up the region. 2697 // If the current region is humongous then we only need to check 2698 // the bitmap for the bit associated with the start of the object, 2699 // scan the object if it's live, and give up the region. 2700 // Otherwise, let's iterate over the bitmap of the part of the region 2701 // that is left. 2702 // If the iteration is successful, give up the region. 2703 if (mr.is_empty()) { 2704 giveup_current_region(); 2705 regular_clock_call(); 2706 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2707 if (_nextMarkBitMap->isMarked(mr.start())) { 2708 // The object is marked - apply the closure 2709 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 2710 bitmap_closure.do_bit(offset); 2711 } 2712 // Even if this task aborted while scanning the humongous object 2713 // we can (and should) give up the current region. 2714 giveup_current_region(); 2715 regular_clock_call(); 2716 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 2717 giveup_current_region(); 2718 regular_clock_call(); 2719 } else { 2720 assert(has_aborted(), "currently the only way to do so"); 2721 // The only way to abort the bitmap iteration is to return 2722 // false from the do_bit() method. However, inside the 2723 // do_bit() method we move the _finger to point to the 2724 // object currently being looked at. So, if we bail out, we 2725 // have definitely set _finger to something non-null. 2726 assert(_finger != NULL, "invariant"); 2727 2728 // Region iteration was actually aborted. So now _finger 2729 // points to the address of the object we last scanned. If we 2730 // leave it there, when we restart this task, we will rescan 2731 // the object. It is easy to avoid this. We move the finger by 2732 // enough to point to the next possible object header (the 2733 // bitmap knows by how much we need to move it as it knows its 2734 // granularity). 2735 assert(_finger < _region_limit, "invariant"); 2736 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 2737 // Check if bitmap iteration was aborted while scanning the last object 2738 if (new_finger >= _region_limit) { 2739 giveup_current_region(); 2740 } else { 2741 move_finger_to(new_finger); 2742 } 2743 } 2744 } 2745 // At this point we have either completed iterating over the 2746 // region we were holding on to, or we have aborted. 2747 2748 // We then partially drain the local queue and the global stack. 2749 // (Do we really need this?) 2750 drain_local_queue(true); 2751 drain_global_stack(true); 2752 2753 // Read the note on the claim_region() method on why it might 2754 // return NULL with potentially more regions available for 2755 // claiming and why we have to check out_of_regions() to determine 2756 // whether we're done or not. 2757 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2758 // We are going to try to claim a new region. We should have 2759 // given up on the previous one. 2760 // Separated the asserts so that we know which one fires. 2761 assert(_curr_region == NULL, "invariant"); 2762 assert(_finger == NULL, "invariant"); 2763 assert(_region_limit == NULL, "invariant"); 2764 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2765 if (claimed_region != NULL) { 2766 // Yes, we managed to claim one 2767 setup_for_region(claimed_region); 2768 assert(_curr_region == claimed_region, "invariant"); 2769 } 2770 // It is important to call the regular clock here. It might take 2771 // a while to claim a region if, for example, we hit a large 2772 // block of empty regions. So we need to call the regular clock 2773 // method once round the loop to make sure it's called 2774 // frequently enough. 2775 regular_clock_call(); 2776 } 2777 2778 if (!has_aborted() && _curr_region == NULL) { 2779 assert(_cm->out_of_regions(), 2780 "at this point we should be out of regions"); 2781 } 2782 } while ( _curr_region != NULL && !has_aborted()); 2783 2784 if (!has_aborted()) { 2785 // We cannot check whether the global stack is empty, since other 2786 // tasks might be pushing objects to it concurrently. 2787 assert(_cm->out_of_regions(), 2788 "at this point we should be out of regions"); 2789 // Try to reduce the number of available SATB buffers so that 2790 // remark has less work to do. 2791 drain_satb_buffers(); 2792 } 2793 2794 // Since we've done everything else, we can now totally drain the 2795 // local queue and global stack. 2796 drain_local_queue(false); 2797 drain_global_stack(false); 2798 2799 // Attempt at work stealing from other task's queues. 2800 if (do_stealing && !has_aborted()) { 2801 // We have not aborted. This means that we have finished all that 2802 // we could. Let's try to do some stealing... 2803 2804 // We cannot check whether the global stack is empty, since other 2805 // tasks might be pushing objects to it concurrently. 2806 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2807 "only way to reach here"); 2808 while (!has_aborted()) { 2809 oop obj; 2810 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 2811 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 2812 "any stolen object should be marked"); 2813 scan_object(obj); 2814 2815 // And since we're towards the end, let's totally drain the 2816 // local queue and global stack. 2817 drain_local_queue(false); 2818 drain_global_stack(false); 2819 } else { 2820 break; 2821 } 2822 } 2823 } 2824 2825 // We still haven't aborted. Now, let's try to get into the 2826 // termination protocol. 2827 if (do_termination && !has_aborted()) { 2828 // We cannot check whether the global stack is empty, since other 2829 // tasks might be concurrently pushing objects on it. 2830 // Separated the asserts so that we know which one fires. 2831 assert(_cm->out_of_regions(), "only way to reach here"); 2832 assert(_task_queue->size() == 0, "only way to reach here"); 2833 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2834 2835 // The G1CMTask class also extends the TerminatorTerminator class, 2836 // hence its should_exit_termination() method will also decide 2837 // whether to exit the termination protocol or not. 2838 bool finished = (is_serial || 2839 _cm->terminator()->offer_termination(this)); 2840 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2841 _termination_time_ms += 2842 termination_end_time_ms - _termination_start_time_ms; 2843 2844 if (finished) { 2845 // We're all done. 2846 2847 if (_worker_id == 0) { 2848 // let's allow task 0 to do this 2849 if (concurrent()) { 2850 assert(_cm->concurrent_marking_in_progress(), "invariant"); 2851 // we need to set this to false before the next 2852 // safepoint. This way we ensure that the marking phase 2853 // doesn't observe any more heap expansions. 2854 _cm->clear_concurrent_marking_in_progress(); 2855 } 2856 } 2857 2858 // We can now guarantee that the global stack is empty, since 2859 // all other tasks have finished. We separated the guarantees so 2860 // that, if a condition is false, we can immediately find out 2861 // which one. 2862 guarantee(_cm->out_of_regions(), "only way to reach here"); 2863 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2864 guarantee(_task_queue->size() == 0, "only way to reach here"); 2865 guarantee(!_cm->has_overflown(), "only way to reach here"); 2866 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 2867 } else { 2868 // Apparently there's more work to do. Let's abort this task. It 2869 // will restart it and we can hopefully find more things to do. 2870 set_has_aborted(); 2871 } 2872 } 2873 2874 // Mainly for debugging purposes to make sure that a pointer to the 2875 // closure which was statically allocated in this frame doesn't 2876 // escape it by accident. 2877 set_cm_oop_closure(NULL); 2878 double end_time_ms = os::elapsedVTime() * 1000.0; 2879 double elapsed_time_ms = end_time_ms - _start_time_ms; 2880 // Update the step history. 2881 _step_times_ms.add(elapsed_time_ms); 2882 2883 if (has_aborted()) { 2884 // The task was aborted for some reason. 2885 if (_has_timed_out) { 2886 double diff_ms = elapsed_time_ms - _time_target_ms; 2887 // Keep statistics of how well we did with respect to hitting 2888 // our target only if we actually timed out (if we aborted for 2889 // other reasons, then the results might get skewed). 2890 _marking_step_diffs_ms.add(diff_ms); 2891 } 2892 2893 if (_cm->has_overflown()) { 2894 // This is the interesting one. We aborted because a global 2895 // overflow was raised. This means we have to restart the 2896 // marking phase and start iterating over regions. However, in 2897 // order to do this we have to make sure that all tasks stop 2898 // what they are doing and re-initialize in a safe manner. We 2899 // will achieve this with the use of two barrier sync points. 2900 2901 if (!is_serial) { 2902 // We only need to enter the sync barrier if being called 2903 // from a parallel context 2904 _cm->enter_first_sync_barrier(_worker_id); 2905 2906 // When we exit this sync barrier we know that all tasks have 2907 // stopped doing marking work. So, it's now safe to 2908 // re-initialize our data structures. At the end of this method, 2909 // task 0 will clear the global data structures. 2910 } 2911 2912 // We clear the local state of this task... 2913 clear_region_fields(); 2914 2915 if (!is_serial) { 2916 // ...and enter the second barrier. 2917 _cm->enter_second_sync_barrier(_worker_id); 2918 } 2919 // At this point, if we're during the concurrent phase of 2920 // marking, everything has been re-initialized and we're 2921 // ready to restart. 2922 } 2923 } 2924 2925 _claimed = false; 2926 } 2927 2928 G1CMTask::G1CMTask(uint worker_id, 2929 G1ConcurrentMark* cm, 2930 G1CMTaskQueue* task_queue, 2931 G1CMTaskQueueSet* task_queues) 2932 : _g1h(G1CollectedHeap::heap()), 2933 _worker_id(worker_id), _cm(cm), 2934 _claimed(false), 2935 _nextMarkBitMap(NULL), _hash_seed(17), 2936 _task_queue(task_queue), 2937 _task_queues(task_queues), 2938 _cm_oop_closure(NULL) { 2939 guarantee(task_queue != NULL, "invariant"); 2940 guarantee(task_queues != NULL, "invariant"); 2941 2942 _marking_step_diffs_ms.add(0.5); 2943 } 2944 2945 // These are formatting macros that are used below to ensure 2946 // consistent formatting. The *_H_* versions are used to format the 2947 // header for a particular value and they should be kept consistent 2948 // with the corresponding macro. Also note that most of the macros add 2949 // the necessary white space (as a prefix) which makes them a bit 2950 // easier to compose. 2951 2952 // All the output lines are prefixed with this string to be able to 2953 // identify them easily in a large log file. 2954 #define G1PPRL_LINE_PREFIX "###" 2955 2956 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2957 #ifdef _LP64 2958 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2959 #else // _LP64 2960 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2961 #endif // _LP64 2962 2963 // For per-region info 2964 #define G1PPRL_TYPE_FORMAT " %-4s" 2965 #define G1PPRL_TYPE_H_FORMAT " %4s" 2966 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2967 #define G1PPRL_BYTE_H_FORMAT " %9s" 2968 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2969 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2970 2971 // For summary info 2972 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2973 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2974 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2975 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2976 2977 G1PrintRegionLivenessInfoClosure:: 2978 G1PrintRegionLivenessInfoClosure(const char* phase_name) 2979 : _total_used_bytes(0), _total_capacity_bytes(0), 2980 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2981 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 2982 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2983 MemRegion g1_reserved = g1h->g1_reserved(); 2984 double now = os::elapsedTime(); 2985 2986 // Print the header of the output. 2987 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2988 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2989 G1PPRL_SUM_ADDR_FORMAT("reserved") 2990 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2991 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 2992 HeapRegion::GrainBytes); 2993 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2994 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2995 G1PPRL_TYPE_H_FORMAT 2996 G1PPRL_ADDR_BASE_H_FORMAT 2997 G1PPRL_BYTE_H_FORMAT 2998 G1PPRL_BYTE_H_FORMAT 2999 G1PPRL_BYTE_H_FORMAT 3000 G1PPRL_DOUBLE_H_FORMAT 3001 G1PPRL_BYTE_H_FORMAT 3002 G1PPRL_BYTE_H_FORMAT, 3003 "type", "address-range", 3004 "used", "prev-live", "next-live", "gc-eff", 3005 "remset", "code-roots"); 3006 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3007 G1PPRL_TYPE_H_FORMAT 3008 G1PPRL_ADDR_BASE_H_FORMAT 3009 G1PPRL_BYTE_H_FORMAT 3010 G1PPRL_BYTE_H_FORMAT 3011 G1PPRL_BYTE_H_FORMAT 3012 G1PPRL_DOUBLE_H_FORMAT 3013 G1PPRL_BYTE_H_FORMAT 3014 G1PPRL_BYTE_H_FORMAT, 3015 "", "", 3016 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 3017 "(bytes)", "(bytes)"); 3018 } 3019 3020 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 3021 const char* type = r->get_type_str(); 3022 HeapWord* bottom = r->bottom(); 3023 HeapWord* end = r->end(); 3024 size_t capacity_bytes = r->capacity(); 3025 size_t used_bytes = r->used(); 3026 size_t prev_live_bytes = r->live_bytes(); 3027 size_t next_live_bytes = r->next_live_bytes(); 3028 double gc_eff = r->gc_efficiency(); 3029 size_t remset_bytes = r->rem_set()->mem_size(); 3030 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3031 3032 _total_used_bytes += used_bytes; 3033 _total_capacity_bytes += capacity_bytes; 3034 _total_prev_live_bytes += prev_live_bytes; 3035 _total_next_live_bytes += next_live_bytes; 3036 _total_remset_bytes += remset_bytes; 3037 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3038 3039 // Print a line for this particular region. 3040 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3041 G1PPRL_TYPE_FORMAT 3042 G1PPRL_ADDR_BASE_FORMAT 3043 G1PPRL_BYTE_FORMAT 3044 G1PPRL_BYTE_FORMAT 3045 G1PPRL_BYTE_FORMAT 3046 G1PPRL_DOUBLE_FORMAT 3047 G1PPRL_BYTE_FORMAT 3048 G1PPRL_BYTE_FORMAT, 3049 type, p2i(bottom), p2i(end), 3050 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3051 remset_bytes, strong_code_roots_bytes); 3052 3053 return false; 3054 } 3055 3056 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3057 // add static memory usages to remembered set sizes 3058 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3059 // Print the footer of the output. 3060 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3061 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3062 " SUMMARY" 3063 G1PPRL_SUM_MB_FORMAT("capacity") 3064 G1PPRL_SUM_MB_PERC_FORMAT("used") 3065 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3066 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3067 G1PPRL_SUM_MB_FORMAT("remset") 3068 G1PPRL_SUM_MB_FORMAT("code-roots"), 3069 bytes_to_mb(_total_capacity_bytes), 3070 bytes_to_mb(_total_used_bytes), 3071 perc(_total_used_bytes, _total_capacity_bytes), 3072 bytes_to_mb(_total_prev_live_bytes), 3073 perc(_total_prev_live_bytes, _total_capacity_bytes), 3074 bytes_to_mb(_total_next_live_bytes), 3075 perc(_total_next_live_bytes, _total_capacity_bytes), 3076 bytes_to_mb(_total_remset_bytes), 3077 bytes_to_mb(_total_strong_code_roots_bytes)); 3078 }