1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMarkThread.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorState.hpp" 32 #include "gc/g1/g1ConcurrentMark.inline.hpp" 33 #include "gc/g1/g1HeapVerifier.hpp" 34 #include "gc/g1/g1OopClosures.inline.hpp" 35 #include "gc/g1/g1CardLiveData.inline.hpp" 36 #include "gc/g1/g1Policy.hpp" 37 #include "gc/g1/g1StringDedup.hpp" 38 #include "gc/g1/heapRegion.inline.hpp" 39 #include "gc/g1/heapRegionRemSet.hpp" 40 #include "gc/g1/heapRegionSet.inline.hpp" 41 #include "gc/g1/suspendibleThreadSet.hpp" 42 #include "gc/shared/gcId.hpp" 43 #include "gc/shared/gcTimer.hpp" 44 #include "gc/shared/gcTrace.hpp" 45 #include "gc/shared/gcTraceTime.inline.hpp" 46 #include "gc/shared/genOopClosures.inline.hpp" 47 #include "gc/shared/referencePolicy.hpp" 48 #include "gc/shared/strongRootsScope.hpp" 49 #include "gc/shared/taskqueue.inline.hpp" 50 #include "gc/shared/vmGCOperations.hpp" 51 #include "logging/log.hpp" 52 #include "memory/allocation.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "runtime/atomic.inline.hpp" 56 #include "runtime/handles.inline.hpp" 57 #include "runtime/java.hpp" 58 #include "runtime/prefetch.inline.hpp" 59 #include "services/memTracker.hpp" 60 #include "utilities/growableArray.hpp" 61 62 // Concurrent marking bit map wrapper 63 64 G1CMBitMapRO::G1CMBitMapRO(int shifter) : 65 _bm(), 66 _shifter(shifter) { 67 _bmStartWord = 0; 68 _bmWordSize = 0; 69 } 70 71 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 72 const HeapWord* limit) const { 73 // First we must round addr *up* to a possible object boundary. 74 addr = (HeapWord*)align_size_up((intptr_t)addr, 75 HeapWordSize << _shifter); 76 size_t addrOffset = heapWordToOffset(addr); 77 assert(limit != NULL, "limit must not be NULL"); 78 size_t limitOffset = heapWordToOffset(limit); 79 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 80 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 81 assert(nextAddr >= addr, "get_next_one postcondition"); 82 assert(nextAddr == limit || isMarked(nextAddr), 83 "get_next_one postcondition"); 84 return nextAddr; 85 } 86 87 #ifndef PRODUCT 88 bool G1CMBitMapRO::covers(MemRegion heap_rs) const { 89 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 90 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 91 "size inconsistency"); 92 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 93 _bmWordSize == heap_rs.word_size(); 94 } 95 #endif 96 97 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 98 _bm.print_on_error(st, prefix); 99 } 100 101 size_t G1CMBitMap::compute_size(size_t heap_size) { 102 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 103 } 104 105 size_t G1CMBitMap::mark_distance() { 106 return MinObjAlignmentInBytes * BitsPerByte; 107 } 108 109 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 110 _bmStartWord = heap.start(); 111 _bmWordSize = heap.word_size(); 112 113 _bm = BitMapView((BitMap::bm_word_t*) storage->reserved().start(), _bmWordSize >> _shifter); 114 115 storage->set_mapping_changed_listener(&_listener); 116 } 117 118 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 119 if (zero_filled) { 120 return; 121 } 122 // We need to clear the bitmap on commit, removing any existing information. 123 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 124 _bm->clear_range(mr); 125 } 126 127 void G1CMBitMap::clear_range(MemRegion mr) { 128 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 129 assert(!mr.is_empty(), "unexpected empty region"); 130 // convert address range into offset range 131 _bm.at_put_range(heapWordToOffset(mr.start()), 132 heapWordToOffset(mr.end()), false); 133 } 134 135 G1CMMarkStack::G1CMMarkStack(G1ConcurrentMark* cm) : 136 _base(NULL), _cm(cm) 137 {} 138 139 bool G1CMMarkStack::allocate(size_t capacity) { 140 // allocate a stack of the requisite depth 141 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 142 if (!rs.is_reserved()) { 143 log_warning(gc)("ConcurrentMark MarkStack allocation failure"); 144 return false; 145 } 146 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 147 if (!_virtual_space.initialize(rs, rs.size())) { 148 log_warning(gc)("ConcurrentMark MarkStack backing store failure"); 149 // Release the virtual memory reserved for the marking stack 150 rs.release(); 151 return false; 152 } 153 assert(_virtual_space.committed_size() == rs.size(), 154 "Didn't reserve backing store for all of G1ConcurrentMark stack?"); 155 _base = (oop*) _virtual_space.low(); 156 setEmpty(); 157 _capacity = (jint) capacity; 158 _saved_index = -1; 159 _should_expand = false; 160 return true; 161 } 162 163 void G1CMMarkStack::expand() { 164 // Called, during remark, if we've overflown the marking stack during marking. 165 assert(isEmpty(), "stack should been emptied while handling overflow"); 166 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 167 // Clear expansion flag 168 _should_expand = false; 169 if (_capacity == (jint) MarkStackSizeMax) { 170 log_trace(gc)("(benign) Can't expand marking stack capacity, at max size limit"); 171 return; 172 } 173 // Double capacity if possible 174 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 175 // Do not give up existing stack until we have managed to 176 // get the double capacity that we desired. 177 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 178 sizeof(oop))); 179 if (rs.is_reserved()) { 180 // Release the backing store associated with old stack 181 _virtual_space.release(); 182 // Reinitialize virtual space for new stack 183 if (!_virtual_space.initialize(rs, rs.size())) { 184 fatal("Not enough swap for expanded marking stack capacity"); 185 } 186 _base = (oop*)(_virtual_space.low()); 187 _index = 0; 188 _capacity = new_capacity; 189 } else { 190 // Failed to double capacity, continue; 191 log_trace(gc)("(benign) Failed to expand marking stack capacity from " SIZE_FORMAT "K to " SIZE_FORMAT "K", 192 _capacity / K, new_capacity / K); 193 } 194 } 195 196 void G1CMMarkStack::set_should_expand() { 197 // If we're resetting the marking state because of an 198 // marking stack overflow, record that we should, if 199 // possible, expand the stack. 200 _should_expand = _cm->has_overflown(); 201 } 202 203 G1CMMarkStack::~G1CMMarkStack() { 204 if (_base != NULL) { 205 _base = NULL; 206 _virtual_space.release(); 207 } 208 } 209 210 void G1CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 211 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 212 jint start = _index; 213 jint next_index = start + n; 214 if (next_index > _capacity) { 215 _overflow = true; 216 return; 217 } 218 // Otherwise. 219 _index = next_index; 220 for (int i = 0; i < n; i++) { 221 int ind = start + i; 222 assert(ind < _capacity, "By overflow test above."); 223 _base[ind] = ptr_arr[i]; 224 } 225 } 226 227 bool G1CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 228 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 229 jint index = _index; 230 if (index == 0) { 231 *n = 0; 232 return false; 233 } else { 234 int k = MIN2(max, index); 235 jint new_ind = index - k; 236 for (int j = 0; j < k; j++) { 237 ptr_arr[j] = _base[new_ind + j]; 238 } 239 _index = new_ind; 240 *n = k; 241 return true; 242 } 243 } 244 245 void G1CMMarkStack::note_start_of_gc() { 246 assert(_saved_index == -1, 247 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 248 _saved_index = _index; 249 } 250 251 void G1CMMarkStack::note_end_of_gc() { 252 // This is intentionally a guarantee, instead of an assert. If we 253 // accidentally add something to the mark stack during GC, it 254 // will be a correctness issue so it's better if we crash. we'll 255 // only check this once per GC anyway, so it won't be a performance 256 // issue in any way. 257 guarantee(_saved_index == _index, 258 "saved index: %d index: %d", _saved_index, _index); 259 _saved_index = -1; 260 } 261 262 G1CMRootRegions::G1CMRootRegions() : 263 _cm(NULL), _scan_in_progress(false), 264 _should_abort(false), _claimed_survivor_index(0) { } 265 266 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) { 267 _survivors = survivors; 268 _cm = cm; 269 } 270 271 void G1CMRootRegions::prepare_for_scan() { 272 assert(!scan_in_progress(), "pre-condition"); 273 274 // Currently, only survivors can be root regions. 275 _claimed_survivor_index = 0; 276 _scan_in_progress = _survivors->regions()->is_nonempty(); 277 _should_abort = false; 278 } 279 280 HeapRegion* G1CMRootRegions::claim_next() { 281 if (_should_abort) { 282 // If someone has set the should_abort flag, we return NULL to 283 // force the caller to bail out of their loop. 284 return NULL; 285 } 286 287 // Currently, only survivors can be root regions. 288 const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions(); 289 290 int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1; 291 if (claimed_index < survivor_regions->length()) { 292 return survivor_regions->at(claimed_index); 293 } 294 return NULL; 295 } 296 297 uint G1CMRootRegions::num_root_regions() const { 298 return (uint)_survivors->regions()->length(); 299 } 300 301 void G1CMRootRegions::notify_scan_done() { 302 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 303 _scan_in_progress = false; 304 RootRegionScan_lock->notify_all(); 305 } 306 307 void G1CMRootRegions::cancel_scan() { 308 notify_scan_done(); 309 } 310 311 void G1CMRootRegions::scan_finished() { 312 assert(scan_in_progress(), "pre-condition"); 313 314 // Currently, only survivors can be root regions. 315 if (!_should_abort) { 316 assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index); 317 assert((uint)_claimed_survivor_index >= _survivors->length(), 318 "we should have claimed all survivors, claimed index = %u, length = %u", 319 (uint)_claimed_survivor_index, _survivors->length()); 320 } 321 322 notify_scan_done(); 323 } 324 325 bool G1CMRootRegions::wait_until_scan_finished() { 326 if (!scan_in_progress()) return false; 327 328 { 329 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 330 while (scan_in_progress()) { 331 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 332 } 333 } 334 return true; 335 } 336 337 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 338 return MAX2((n_par_threads + 2) / 4, 1U); 339 } 340 341 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 342 _g1h(g1h), 343 _markBitMap1(), 344 _markBitMap2(), 345 _parallel_marking_threads(0), 346 _max_parallel_marking_threads(0), 347 _sleep_factor(0.0), 348 _marking_task_overhead(1.0), 349 _cleanup_list("Cleanup List"), 350 351 _prevMarkBitMap(&_markBitMap1), 352 _nextMarkBitMap(&_markBitMap2), 353 354 _markStack(this), 355 // _finger set in set_non_marking_state 356 357 _max_worker_id(ParallelGCThreads), 358 // _active_tasks set in set_non_marking_state 359 // _tasks set inside the constructor 360 _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)), 361 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 362 363 _has_overflown(false), 364 _concurrent(false), 365 _has_aborted(false), 366 _restart_for_overflow(false), 367 _concurrent_marking_in_progress(false), 368 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 369 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 370 371 // _verbose_level set below 372 373 _init_times(), 374 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 375 _cleanup_times(), 376 _total_counting_time(0.0), 377 _total_rs_scrub_time(0.0), 378 379 _parallel_workers(NULL), 380 381 _completed_initialization(false) { 382 383 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 384 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 385 386 // Create & start a ConcurrentMark thread. 387 _cmThread = new ConcurrentMarkThread(this); 388 assert(cmThread() != NULL, "CM Thread should have been created"); 389 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 390 if (_cmThread->osthread() == NULL) { 391 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 392 } 393 394 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 395 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 396 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 397 398 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 399 satb_qs.set_buffer_size(G1SATBBufferSize); 400 401 _root_regions.init(_g1h->survivor(), this); 402 403 if (ConcGCThreads > ParallelGCThreads) { 404 log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).", 405 ConcGCThreads, ParallelGCThreads); 406 return; 407 } 408 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 409 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 410 // if both are set 411 _sleep_factor = 0.0; 412 _marking_task_overhead = 1.0; 413 } else if (G1MarkingOverheadPercent > 0) { 414 // We will calculate the number of parallel marking threads based 415 // on a target overhead with respect to the soft real-time goal 416 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 417 double overall_cm_overhead = 418 (double) MaxGCPauseMillis * marking_overhead / 419 (double) GCPauseIntervalMillis; 420 double cpu_ratio = 1.0 / (double) os::processor_count(); 421 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 422 double marking_task_overhead = 423 overall_cm_overhead / marking_thread_num * 424 (double) os::processor_count(); 425 double sleep_factor = 426 (1.0 - marking_task_overhead) / marking_task_overhead; 427 428 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 429 _sleep_factor = sleep_factor; 430 _marking_task_overhead = marking_task_overhead; 431 } else { 432 // Calculate the number of parallel marking threads by scaling 433 // the number of parallel GC threads. 434 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 435 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 436 _sleep_factor = 0.0; 437 _marking_task_overhead = 1.0; 438 } 439 440 assert(ConcGCThreads > 0, "Should have been set"); 441 log_debug(gc)("ConcGCThreads: %u", ConcGCThreads); 442 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 443 _parallel_marking_threads = ConcGCThreads; 444 _max_parallel_marking_threads = _parallel_marking_threads; 445 446 _parallel_workers = new WorkGang("G1 Marker", 447 _max_parallel_marking_threads, false, true); 448 if (_parallel_workers == NULL) { 449 vm_exit_during_initialization("Failed necessary allocation."); 450 } else { 451 _parallel_workers->initialize_workers(); 452 } 453 454 if (FLAG_IS_DEFAULT(MarkStackSize)) { 455 size_t mark_stack_size = 456 MIN2(MarkStackSizeMax, 457 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 458 // Verify that the calculated value for MarkStackSize is in range. 459 // It would be nice to use the private utility routine from Arguments. 460 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 461 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 462 "must be between 1 and " SIZE_FORMAT, 463 mark_stack_size, MarkStackSizeMax); 464 return; 465 } 466 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 467 } else { 468 // Verify MarkStackSize is in range. 469 if (FLAG_IS_CMDLINE(MarkStackSize)) { 470 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 471 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 472 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 473 "must be between 1 and " SIZE_FORMAT, 474 MarkStackSize, MarkStackSizeMax); 475 return; 476 } 477 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 478 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 479 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 480 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 481 MarkStackSize, MarkStackSizeMax); 482 return; 483 } 484 } 485 } 486 } 487 488 if (!_markStack.allocate(MarkStackSize)) { 489 log_warning(gc)("Failed to allocate CM marking stack"); 490 return; 491 } 492 493 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC); 494 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 495 496 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 497 _active_tasks = _max_worker_id; 498 499 for (uint i = 0; i < _max_worker_id; ++i) { 500 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 501 task_queue->initialize(); 502 _task_queues->register_queue(i, task_queue); 503 504 _tasks[i] = new G1CMTask(i, this, task_queue, _task_queues); 505 506 _accum_task_vtime[i] = 0.0; 507 } 508 509 // so that the call below can read a sensible value 510 _heap_start = g1h->reserved_region().start(); 511 set_non_marking_state(); 512 _completed_initialization = true; 513 } 514 515 void G1ConcurrentMark::reset() { 516 // Starting values for these two. This should be called in a STW 517 // phase. 518 MemRegion reserved = _g1h->g1_reserved(); 519 _heap_start = reserved.start(); 520 _heap_end = reserved.end(); 521 522 // Separated the asserts so that we know which one fires. 523 assert(_heap_start != NULL, "heap bounds should look ok"); 524 assert(_heap_end != NULL, "heap bounds should look ok"); 525 assert(_heap_start < _heap_end, "heap bounds should look ok"); 526 527 // Reset all the marking data structures and any necessary flags 528 reset_marking_state(); 529 530 // We do reset all of them, since different phases will use 531 // different number of active threads. So, it's easiest to have all 532 // of them ready. 533 for (uint i = 0; i < _max_worker_id; ++i) { 534 _tasks[i]->reset(_nextMarkBitMap); 535 } 536 537 // we need this to make sure that the flag is on during the evac 538 // pause with initial mark piggy-backed 539 set_concurrent_marking_in_progress(); 540 } 541 542 543 void G1ConcurrentMark::reset_marking_state(bool clear_overflow) { 544 _markStack.set_should_expand(); 545 _markStack.setEmpty(); // Also clears the _markStack overflow flag 546 if (clear_overflow) { 547 clear_has_overflown(); 548 } else { 549 assert(has_overflown(), "pre-condition"); 550 } 551 _finger = _heap_start; 552 553 for (uint i = 0; i < _max_worker_id; ++i) { 554 G1CMTaskQueue* queue = _task_queues->queue(i); 555 queue->set_empty(); 556 } 557 } 558 559 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 560 assert(active_tasks <= _max_worker_id, "we should not have more"); 561 562 _active_tasks = active_tasks; 563 // Need to update the three data structures below according to the 564 // number of active threads for this phase. 565 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 566 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 567 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 568 } 569 570 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 571 set_concurrency(active_tasks); 572 573 _concurrent = concurrent; 574 // We propagate this to all tasks, not just the active ones. 575 for (uint i = 0; i < _max_worker_id; ++i) 576 _tasks[i]->set_concurrent(concurrent); 577 578 if (concurrent) { 579 set_concurrent_marking_in_progress(); 580 } else { 581 // We currently assume that the concurrent flag has been set to 582 // false before we start remark. At this point we should also be 583 // in a STW phase. 584 assert(!concurrent_marking_in_progress(), "invariant"); 585 assert(out_of_regions(), 586 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 587 p2i(_finger), p2i(_heap_end)); 588 } 589 } 590 591 void G1ConcurrentMark::set_non_marking_state() { 592 // We set the global marking state to some default values when we're 593 // not doing marking. 594 reset_marking_state(); 595 _active_tasks = 0; 596 clear_concurrent_marking_in_progress(); 597 } 598 599 G1ConcurrentMark::~G1ConcurrentMark() { 600 // The G1ConcurrentMark instance is never freed. 601 ShouldNotReachHere(); 602 } 603 604 class G1ClearBitMapTask : public AbstractGangTask { 605 public: 606 static size_t chunk_size() { return M; } 607 608 private: 609 // Heap region closure used for clearing the given mark bitmap. 610 class G1ClearBitmapHRClosure : public HeapRegionClosure { 611 private: 612 G1CMBitMap* _bitmap; 613 G1ConcurrentMark* _cm; 614 public: 615 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) { 616 } 617 618 virtual bool doHeapRegion(HeapRegion* r) { 619 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 620 621 HeapWord* cur = r->bottom(); 622 HeapWord* const end = r->end(); 623 624 while (cur < end) { 625 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 626 _bitmap->clear_range(mr); 627 628 cur += chunk_size_in_words; 629 630 // Abort iteration if after yielding the marking has been aborted. 631 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 632 return true; 633 } 634 // Repeat the asserts from before the start of the closure. We will do them 635 // as asserts here to minimize their overhead on the product. However, we 636 // will have them as guarantees at the beginning / end of the bitmap 637 // clearing to get some checking in the product. 638 assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant"); 639 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 640 } 641 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 642 643 return false; 644 } 645 }; 646 647 G1ClearBitmapHRClosure _cl; 648 HeapRegionClaimer _hr_claimer; 649 bool _suspendible; // If the task is suspendible, workers must join the STS. 650 651 public: 652 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 653 AbstractGangTask("G1 Clear Bitmap"), 654 _cl(bitmap, suspendible ? cm : NULL), 655 _hr_claimer(n_workers), 656 _suspendible(suspendible) 657 { } 658 659 void work(uint worker_id) { 660 SuspendibleThreadSetJoiner sts_join(_suspendible); 661 G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true); 662 } 663 664 bool is_complete() { 665 return _cl.complete(); 666 } 667 }; 668 669 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 670 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 671 672 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 673 size_t const num_chunks = align_size_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 674 675 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 676 677 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 678 679 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 680 workers->run_task(&cl, num_workers); 681 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 682 } 683 684 void G1ConcurrentMark::cleanup_for_next_mark() { 685 // Make sure that the concurrent mark thread looks to still be in 686 // the current cycle. 687 guarantee(cmThread()->during_cycle(), "invariant"); 688 689 // We are finishing up the current cycle by clearing the next 690 // marking bitmap and getting it ready for the next cycle. During 691 // this time no other cycle can start. So, let's make sure that this 692 // is the case. 693 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 694 695 clear_bitmap(_nextMarkBitMap, _parallel_workers, true); 696 697 // Clear the live count data. If the marking has been aborted, the abort() 698 // call already did that. 699 if (!has_aborted()) { 700 clear_live_data(_parallel_workers); 701 DEBUG_ONLY(verify_live_data_clear()); 702 } 703 704 // Repeat the asserts from above. 705 guarantee(cmThread()->during_cycle(), "invariant"); 706 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 707 } 708 709 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 710 assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint."); 711 clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false); 712 } 713 714 class CheckBitmapClearHRClosure : public HeapRegionClosure { 715 G1CMBitMap* _bitmap; 716 bool _error; 717 public: 718 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { 719 } 720 721 virtual bool doHeapRegion(HeapRegion* r) { 722 // This closure can be called concurrently to the mutator, so we must make sure 723 // that the result of the getNextMarkedWordAddress() call is compared to the 724 // value passed to it as limit to detect any found bits. 725 // end never changes in G1. 726 HeapWord* end = r->end(); 727 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 728 } 729 }; 730 731 bool G1ConcurrentMark::nextMarkBitmapIsClear() { 732 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 733 _g1h->heap_region_iterate(&cl); 734 return cl.complete(); 735 } 736 737 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 738 public: 739 bool doHeapRegion(HeapRegion* r) { 740 r->note_start_of_marking(); 741 return false; 742 } 743 }; 744 745 void G1ConcurrentMark::checkpointRootsInitialPre() { 746 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 747 G1Policy* g1p = g1h->g1_policy(); 748 749 _has_aborted = false; 750 751 // Initialize marking structures. This has to be done in a STW phase. 752 reset(); 753 754 // For each region note start of marking. 755 NoteStartOfMarkHRClosure startcl; 756 g1h->heap_region_iterate(&startcl); 757 } 758 759 760 void G1ConcurrentMark::checkpointRootsInitialPost() { 761 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 762 763 // Start Concurrent Marking weak-reference discovery. 764 ReferenceProcessor* rp = g1h->ref_processor_cm(); 765 // enable ("weak") refs discovery 766 rp->enable_discovery(); 767 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 768 769 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 770 // This is the start of the marking cycle, we're expected all 771 // threads to have SATB queues with active set to false. 772 satb_mq_set.set_active_all_threads(true, /* new active value */ 773 false /* expected_active */); 774 775 _root_regions.prepare_for_scan(); 776 777 // update_g1_committed() will be called at the end of an evac pause 778 // when marking is on. So, it's also called at the end of the 779 // initial-mark pause to update the heap end, if the heap expands 780 // during it. No need to call it here. 781 } 782 783 /* 784 * Notice that in the next two methods, we actually leave the STS 785 * during the barrier sync and join it immediately afterwards. If we 786 * do not do this, the following deadlock can occur: one thread could 787 * be in the barrier sync code, waiting for the other thread to also 788 * sync up, whereas another one could be trying to yield, while also 789 * waiting for the other threads to sync up too. 790 * 791 * Note, however, that this code is also used during remark and in 792 * this case we should not attempt to leave / enter the STS, otherwise 793 * we'll either hit an assert (debug / fastdebug) or deadlock 794 * (product). So we should only leave / enter the STS if we are 795 * operating concurrently. 796 * 797 * Because the thread that does the sync barrier has left the STS, it 798 * is possible to be suspended for a Full GC or an evacuation pause 799 * could occur. This is actually safe, since the entering the sync 800 * barrier is one of the last things do_marking_step() does, and it 801 * doesn't manipulate any data structures afterwards. 802 */ 803 804 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 805 bool barrier_aborted; 806 { 807 SuspendibleThreadSetLeaver sts_leave(concurrent()); 808 barrier_aborted = !_first_overflow_barrier_sync.enter(); 809 } 810 811 // at this point everyone should have synced up and not be doing any 812 // more work 813 814 if (barrier_aborted) { 815 // If the barrier aborted we ignore the overflow condition and 816 // just abort the whole marking phase as quickly as possible. 817 return; 818 } 819 820 // If we're executing the concurrent phase of marking, reset the marking 821 // state; otherwise the marking state is reset after reference processing, 822 // during the remark pause. 823 // If we reset here as a result of an overflow during the remark we will 824 // see assertion failures from any subsequent set_concurrency_and_phase() 825 // calls. 826 if (concurrent()) { 827 // let the task associated with with worker 0 do this 828 if (worker_id == 0) { 829 // task 0 is responsible for clearing the global data structures 830 // We should be here because of an overflow. During STW we should 831 // not clear the overflow flag since we rely on it being true when 832 // we exit this method to abort the pause and restart concurrent 833 // marking. 834 reset_marking_state(true /* clear_overflow */); 835 836 log_info(gc, marking)("Concurrent Mark reset for overflow"); 837 } 838 } 839 840 // after this, each task should reset its own data structures then 841 // then go into the second barrier 842 } 843 844 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 845 SuspendibleThreadSetLeaver sts_leave(concurrent()); 846 _second_overflow_barrier_sync.enter(); 847 848 // at this point everything should be re-initialized and ready to go 849 } 850 851 class G1CMConcurrentMarkingTask: public AbstractGangTask { 852 private: 853 G1ConcurrentMark* _cm; 854 ConcurrentMarkThread* _cmt; 855 856 public: 857 void work(uint worker_id) { 858 assert(Thread::current()->is_ConcurrentGC_thread(), 859 "this should only be done by a conc GC thread"); 860 ResourceMark rm; 861 862 double start_vtime = os::elapsedVTime(); 863 864 { 865 SuspendibleThreadSetJoiner sts_join; 866 867 assert(worker_id < _cm->active_tasks(), "invariant"); 868 G1CMTask* the_task = _cm->task(worker_id); 869 the_task->record_start_time(); 870 if (!_cm->has_aborted()) { 871 do { 872 double start_vtime_sec = os::elapsedVTime(); 873 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 874 875 the_task->do_marking_step(mark_step_duration_ms, 876 true /* do_termination */, 877 false /* is_serial*/); 878 879 double end_vtime_sec = os::elapsedVTime(); 880 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 881 _cm->clear_has_overflown(); 882 883 _cm->do_yield_check(); 884 885 jlong sleep_time_ms; 886 if (!_cm->has_aborted() && the_task->has_aborted()) { 887 sleep_time_ms = 888 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 889 { 890 SuspendibleThreadSetLeaver sts_leave; 891 os::sleep(Thread::current(), sleep_time_ms, false); 892 } 893 } 894 } while (!_cm->has_aborted() && the_task->has_aborted()); 895 } 896 the_task->record_end_time(); 897 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 898 } 899 900 double end_vtime = os::elapsedVTime(); 901 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 902 } 903 904 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm, 905 ConcurrentMarkThread* cmt) : 906 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 907 908 ~G1CMConcurrentMarkingTask() { } 909 }; 910 911 // Calculates the number of active workers for a concurrent 912 // phase. 913 uint G1ConcurrentMark::calc_parallel_marking_threads() { 914 uint n_conc_workers = 0; 915 if (!UseDynamicNumberOfGCThreads || 916 (!FLAG_IS_DEFAULT(ConcGCThreads) && 917 !ForceDynamicNumberOfGCThreads)) { 918 n_conc_workers = max_parallel_marking_threads(); 919 } else { 920 n_conc_workers = 921 AdaptiveSizePolicy::calc_default_active_workers(max_parallel_marking_threads(), 922 1, /* Minimum workers */ 923 parallel_marking_threads(), 924 Threads::number_of_non_daemon_threads()); 925 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 926 // that scaling has already gone into "_max_parallel_marking_threads". 927 } 928 assert(n_conc_workers > 0 && n_conc_workers <= max_parallel_marking_threads(), 929 "Calculated number of workers must be larger than zero and at most the maximum %u, but is %u", 930 max_parallel_marking_threads(), n_conc_workers); 931 return n_conc_workers; 932 } 933 934 void G1ConcurrentMark::scanRootRegion(HeapRegion* hr) { 935 // Currently, only survivors can be root regions. 936 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 937 G1RootRegionScanClosure cl(_g1h, this); 938 939 const uintx interval = PrefetchScanIntervalInBytes; 940 HeapWord* curr = hr->bottom(); 941 const HeapWord* end = hr->top(); 942 while (curr < end) { 943 Prefetch::read(curr, interval); 944 oop obj = oop(curr); 945 int size = obj->oop_iterate_size(&cl); 946 assert(size == obj->size(), "sanity"); 947 curr += size; 948 } 949 } 950 951 class G1CMRootRegionScanTask : public AbstractGangTask { 952 private: 953 G1ConcurrentMark* _cm; 954 955 public: 956 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 957 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 958 959 void work(uint worker_id) { 960 assert(Thread::current()->is_ConcurrentGC_thread(), 961 "this should only be done by a conc GC thread"); 962 963 G1CMRootRegions* root_regions = _cm->root_regions(); 964 HeapRegion* hr = root_regions->claim_next(); 965 while (hr != NULL) { 966 _cm->scanRootRegion(hr); 967 hr = root_regions->claim_next(); 968 } 969 } 970 }; 971 972 void G1ConcurrentMark::scan_root_regions() { 973 // scan_in_progress() will have been set to true only if there was 974 // at least one root region to scan. So, if it's false, we 975 // should not attempt to do any further work. 976 if (root_regions()->scan_in_progress()) { 977 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 978 979 _parallel_marking_threads = MIN2(calc_parallel_marking_threads(), 980 // We distribute work on a per-region basis, so starting 981 // more threads than that is useless. 982 root_regions()->num_root_regions()); 983 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 984 "Maximum number of marking threads exceeded"); 985 986 G1CMRootRegionScanTask task(this); 987 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 988 task.name(), _parallel_marking_threads, root_regions()->num_root_regions()); 989 _parallel_workers->run_task(&task, _parallel_marking_threads); 990 991 // It's possible that has_aborted() is true here without actually 992 // aborting the survivor scan earlier. This is OK as it's 993 // mainly used for sanity checking. 994 root_regions()->scan_finished(); 995 } 996 } 997 998 void G1ConcurrentMark::concurrent_cycle_start() { 999 _gc_timer_cm->register_gc_start(); 1000 1001 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 1002 1003 _g1h->trace_heap_before_gc(_gc_tracer_cm); 1004 } 1005 1006 void G1ConcurrentMark::concurrent_cycle_end() { 1007 _g1h->trace_heap_after_gc(_gc_tracer_cm); 1008 1009 if (has_aborted()) { 1010 _gc_tracer_cm->report_concurrent_mode_failure(); 1011 } 1012 1013 _gc_timer_cm->register_gc_end(); 1014 1015 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 1016 } 1017 1018 void G1ConcurrentMark::mark_from_roots() { 1019 // we might be tempted to assert that: 1020 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1021 // "inconsistent argument?"); 1022 // However that wouldn't be right, because it's possible that 1023 // a safepoint is indeed in progress as a younger generation 1024 // stop-the-world GC happens even as we mark in this generation. 1025 1026 _restart_for_overflow = false; 1027 1028 // _g1h has _n_par_threads 1029 _parallel_marking_threads = calc_parallel_marking_threads(); 1030 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1031 "Maximum number of marking threads exceeded"); 1032 1033 uint active_workers = MAX2(1U, parallel_marking_threads()); 1034 assert(active_workers > 0, "Should have been set"); 1035 1036 // Parallel task terminator is set in "set_concurrency_and_phase()" 1037 set_concurrency_and_phase(active_workers, true /* concurrent */); 1038 1039 G1CMConcurrentMarkingTask markingTask(this, cmThread()); 1040 _parallel_workers->set_active_workers(active_workers); 1041 _parallel_workers->run_task(&markingTask); 1042 print_stats(); 1043 } 1044 1045 void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1046 // world is stopped at this checkpoint 1047 assert(SafepointSynchronize::is_at_safepoint(), 1048 "world should be stopped"); 1049 1050 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1051 1052 // If a full collection has happened, we shouldn't do this. 1053 if (has_aborted()) { 1054 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1055 return; 1056 } 1057 1058 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1059 1060 if (VerifyDuringGC) { 1061 HandleMark hm; // handle scope 1062 g1h->prepare_for_verify(); 1063 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1064 } 1065 g1h->verifier()->check_bitmaps("Remark Start"); 1066 1067 G1Policy* g1p = g1h->g1_policy(); 1068 g1p->record_concurrent_mark_remark_start(); 1069 1070 double start = os::elapsedTime(); 1071 1072 checkpointRootsFinalWork(); 1073 1074 double mark_work_end = os::elapsedTime(); 1075 1076 weakRefsWork(clear_all_soft_refs); 1077 1078 if (has_overflown()) { 1079 // Oops. We overflowed. Restart concurrent marking. 1080 _restart_for_overflow = true; 1081 1082 // Verify the heap w.r.t. the previous marking bitmap. 1083 if (VerifyDuringGC) { 1084 HandleMark hm; // handle scope 1085 g1h->prepare_for_verify(); 1086 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)"); 1087 } 1088 1089 // Clear the marking state because we will be restarting 1090 // marking due to overflowing the global mark stack. 1091 reset_marking_state(); 1092 } else { 1093 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1094 // We're done with marking. 1095 // This is the end of the marking cycle, we're expected all 1096 // threads to have SATB queues with active set to true. 1097 satb_mq_set.set_active_all_threads(false, /* new active value */ 1098 true /* expected_active */); 1099 1100 if (VerifyDuringGC) { 1101 HandleMark hm; // handle scope 1102 g1h->prepare_for_verify(); 1103 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)"); 1104 } 1105 g1h->verifier()->check_bitmaps("Remark End"); 1106 assert(!restart_for_overflow(), "sanity"); 1107 // Completely reset the marking state since marking completed 1108 set_non_marking_state(); 1109 } 1110 1111 // Expand the marking stack, if we have to and if we can. 1112 if (_markStack.should_expand()) { 1113 _markStack.expand(); 1114 } 1115 1116 // Statistics 1117 double now = os::elapsedTime(); 1118 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1119 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1120 _remark_times.add((now - start) * 1000.0); 1121 1122 g1p->record_concurrent_mark_remark_end(); 1123 1124 G1CMIsAliveClosure is_alive(g1h); 1125 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1126 } 1127 1128 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1129 G1CollectedHeap* _g1; 1130 size_t _freed_bytes; 1131 FreeRegionList* _local_cleanup_list; 1132 uint _old_regions_removed; 1133 uint _humongous_regions_removed; 1134 HRRSCleanupTask* _hrrs_cleanup_task; 1135 1136 public: 1137 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1138 FreeRegionList* local_cleanup_list, 1139 HRRSCleanupTask* hrrs_cleanup_task) : 1140 _g1(g1), 1141 _freed_bytes(0), 1142 _local_cleanup_list(local_cleanup_list), 1143 _old_regions_removed(0), 1144 _humongous_regions_removed(0), 1145 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1146 1147 size_t freed_bytes() { return _freed_bytes; } 1148 const uint old_regions_removed() { return _old_regions_removed; } 1149 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1150 1151 bool doHeapRegion(HeapRegion *hr) { 1152 if (hr->is_archive()) { 1153 return false; 1154 } 1155 _g1->reset_gc_time_stamps(hr); 1156 hr->note_end_of_marking(); 1157 1158 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1159 _freed_bytes += hr->used(); 1160 hr->set_containing_set(NULL); 1161 if (hr->is_humongous()) { 1162 _humongous_regions_removed++; 1163 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1164 } else { 1165 _old_regions_removed++; 1166 _g1->free_region(hr, _local_cleanup_list, true); 1167 } 1168 } else { 1169 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1170 } 1171 1172 return false; 1173 } 1174 }; 1175 1176 class G1ParNoteEndTask: public AbstractGangTask { 1177 friend class G1NoteEndOfConcMarkClosure; 1178 1179 protected: 1180 G1CollectedHeap* _g1h; 1181 FreeRegionList* _cleanup_list; 1182 HeapRegionClaimer _hrclaimer; 1183 1184 public: 1185 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1186 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1187 } 1188 1189 void work(uint worker_id) { 1190 FreeRegionList local_cleanup_list("Local Cleanup List"); 1191 HRRSCleanupTask hrrs_cleanup_task; 1192 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1193 &hrrs_cleanup_task); 1194 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1195 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1196 1197 // Now update the lists 1198 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1199 { 1200 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1201 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1202 1203 // If we iterate over the global cleanup list at the end of 1204 // cleanup to do this printing we will not guarantee to only 1205 // generate output for the newly-reclaimed regions (the list 1206 // might not be empty at the beginning of cleanup; we might 1207 // still be working on its previous contents). So we do the 1208 // printing here, before we append the new regions to the global 1209 // cleanup list. 1210 1211 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1212 if (hr_printer->is_active()) { 1213 FreeRegionListIterator iter(&local_cleanup_list); 1214 while (iter.more_available()) { 1215 HeapRegion* hr = iter.get_next(); 1216 hr_printer->cleanup(hr); 1217 } 1218 } 1219 1220 _cleanup_list->add_ordered(&local_cleanup_list); 1221 assert(local_cleanup_list.is_empty(), "post-condition"); 1222 1223 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1224 } 1225 } 1226 }; 1227 1228 void G1ConcurrentMark::cleanup() { 1229 // world is stopped at this checkpoint 1230 assert(SafepointSynchronize::is_at_safepoint(), 1231 "world should be stopped"); 1232 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1233 1234 // If a full collection has happened, we shouldn't do this. 1235 if (has_aborted()) { 1236 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1237 return; 1238 } 1239 1240 g1h->verifier()->verify_region_sets_optional(); 1241 1242 if (VerifyDuringGC) { 1243 HandleMark hm; // handle scope 1244 g1h->prepare_for_verify(); 1245 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1246 } 1247 g1h->verifier()->check_bitmaps("Cleanup Start"); 1248 1249 G1Policy* g1p = g1h->g1_policy(); 1250 g1p->record_concurrent_mark_cleanup_start(); 1251 1252 double start = os::elapsedTime(); 1253 1254 HeapRegionRemSet::reset_for_cleanup_tasks(); 1255 1256 { 1257 GCTraceTime(Debug, gc)("Finalize Live Data"); 1258 finalize_live_data(); 1259 } 1260 1261 if (VerifyDuringGC) { 1262 GCTraceTime(Debug, gc)("Verify Live Data"); 1263 verify_live_data(); 1264 } 1265 1266 g1h->collector_state()->set_mark_in_progress(false); 1267 1268 double count_end = os::elapsedTime(); 1269 double this_final_counting_time = (count_end - start); 1270 _total_counting_time += this_final_counting_time; 1271 1272 if (log_is_enabled(Trace, gc, liveness)) { 1273 G1PrintRegionLivenessInfoClosure cl("Post-Marking"); 1274 _g1h->heap_region_iterate(&cl); 1275 } 1276 1277 // Install newly created mark bitMap as "prev". 1278 swapMarkBitMaps(); 1279 1280 g1h->reset_gc_time_stamp(); 1281 1282 uint n_workers = _g1h->workers()->active_workers(); 1283 1284 // Note end of marking in all heap regions. 1285 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1286 g1h->workers()->run_task(&g1_par_note_end_task); 1287 g1h->check_gc_time_stamps(); 1288 1289 if (!cleanup_list_is_empty()) { 1290 // The cleanup list is not empty, so we'll have to process it 1291 // concurrently. Notify anyone else that might be wanting free 1292 // regions that there will be more free regions coming soon. 1293 g1h->set_free_regions_coming(); 1294 } 1295 1296 // call below, since it affects the metric by which we sort the heap 1297 // regions. 1298 if (G1ScrubRemSets) { 1299 double rs_scrub_start = os::elapsedTime(); 1300 g1h->scrub_rem_set(); 1301 _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start); 1302 } 1303 1304 // this will also free any regions totally full of garbage objects, 1305 // and sort the regions. 1306 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1307 1308 // Statistics. 1309 double end = os::elapsedTime(); 1310 _cleanup_times.add((end - start) * 1000.0); 1311 1312 // Clean up will have freed any regions completely full of garbage. 1313 // Update the soft reference policy with the new heap occupancy. 1314 Universe::update_heap_info_at_gc(); 1315 1316 if (VerifyDuringGC) { 1317 HandleMark hm; // handle scope 1318 g1h->prepare_for_verify(); 1319 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)"); 1320 } 1321 1322 g1h->verifier()->check_bitmaps("Cleanup End"); 1323 1324 g1h->verifier()->verify_region_sets_optional(); 1325 1326 // We need to make this be a "collection" so any collection pause that 1327 // races with it goes around and waits for completeCleanup to finish. 1328 g1h->increment_total_collections(); 1329 1330 // Clean out dead classes and update Metaspace sizes. 1331 if (ClassUnloadingWithConcurrentMark) { 1332 ClassLoaderDataGraph::purge(); 1333 } 1334 MetaspaceGC::compute_new_size(); 1335 1336 // We reclaimed old regions so we should calculate the sizes to make 1337 // sure we update the old gen/space data. 1338 g1h->g1mm()->update_sizes(); 1339 g1h->allocation_context_stats().update_after_mark(); 1340 } 1341 1342 void G1ConcurrentMark::complete_cleanup() { 1343 if (has_aborted()) return; 1344 1345 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1346 1347 _cleanup_list.verify_optional(); 1348 FreeRegionList tmp_free_list("Tmp Free List"); 1349 1350 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1351 "cleanup list has %u entries", 1352 _cleanup_list.length()); 1353 1354 // No one else should be accessing the _cleanup_list at this point, 1355 // so it is not necessary to take any locks 1356 while (!_cleanup_list.is_empty()) { 1357 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 1358 assert(hr != NULL, "Got NULL from a non-empty list"); 1359 hr->par_clear(); 1360 tmp_free_list.add_ordered(hr); 1361 1362 // Instead of adding one region at a time to the secondary_free_list, 1363 // we accumulate them in the local list and move them a few at a 1364 // time. This also cuts down on the number of notify_all() calls 1365 // we do during this process. We'll also append the local list when 1366 // _cleanup_list is empty (which means we just removed the last 1367 // region from the _cleanup_list). 1368 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1369 _cleanup_list.is_empty()) { 1370 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1371 "appending %u entries to the secondary_free_list, " 1372 "cleanup list still has %u entries", 1373 tmp_free_list.length(), 1374 _cleanup_list.length()); 1375 1376 { 1377 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1378 g1h->secondary_free_list_add(&tmp_free_list); 1379 SecondaryFreeList_lock->notify_all(); 1380 } 1381 #ifndef PRODUCT 1382 if (G1StressConcRegionFreeing) { 1383 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1384 os::sleep(Thread::current(), (jlong) 1, false); 1385 } 1386 } 1387 #endif 1388 } 1389 } 1390 assert(tmp_free_list.is_empty(), "post-condition"); 1391 } 1392 1393 // Supporting Object and Oop closures for reference discovery 1394 // and processing in during marking 1395 1396 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1397 HeapWord* addr = (HeapWord*)obj; 1398 return addr != NULL && 1399 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1400 } 1401 1402 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1403 // Uses the G1CMTask associated with a worker thread (for serial reference 1404 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1405 // trace referent objects. 1406 // 1407 // Using the G1CMTask and embedded local queues avoids having the worker 1408 // threads operating on the global mark stack. This reduces the risk 1409 // of overflowing the stack - which we would rather avoid at this late 1410 // state. Also using the tasks' local queues removes the potential 1411 // of the workers interfering with each other that could occur if 1412 // operating on the global stack. 1413 1414 class G1CMKeepAliveAndDrainClosure: public OopClosure { 1415 G1ConcurrentMark* _cm; 1416 G1CMTask* _task; 1417 int _ref_counter_limit; 1418 int _ref_counter; 1419 bool _is_serial; 1420 public: 1421 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1422 _cm(cm), _task(task), _is_serial(is_serial), 1423 _ref_counter_limit(G1RefProcDrainInterval) { 1424 assert(_ref_counter_limit > 0, "sanity"); 1425 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1426 _ref_counter = _ref_counter_limit; 1427 } 1428 1429 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1430 virtual void do_oop( oop* p) { do_oop_work(p); } 1431 1432 template <class T> void do_oop_work(T* p) { 1433 if (!_cm->has_overflown()) { 1434 oop obj = oopDesc::load_decode_heap_oop(p); 1435 _task->deal_with_reference(obj); 1436 _ref_counter--; 1437 1438 if (_ref_counter == 0) { 1439 // We have dealt with _ref_counter_limit references, pushing them 1440 // and objects reachable from them on to the local stack (and 1441 // possibly the global stack). Call G1CMTask::do_marking_step() to 1442 // process these entries. 1443 // 1444 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1445 // there's nothing more to do (i.e. we're done with the entries that 1446 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1447 // above) or we overflow. 1448 // 1449 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1450 // flag while there may still be some work to do. (See the comment at 1451 // the beginning of G1CMTask::do_marking_step() for those conditions - 1452 // one of which is reaching the specified time target.) It is only 1453 // when G1CMTask::do_marking_step() returns without setting the 1454 // has_aborted() flag that the marking step has completed. 1455 do { 1456 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1457 _task->do_marking_step(mark_step_duration_ms, 1458 false /* do_termination */, 1459 _is_serial); 1460 } while (_task->has_aborted() && !_cm->has_overflown()); 1461 _ref_counter = _ref_counter_limit; 1462 } 1463 } 1464 } 1465 }; 1466 1467 // 'Drain' oop closure used by both serial and parallel reference processing. 1468 // Uses the G1CMTask associated with a given worker thread (for serial 1469 // reference processing the G1CMtask for worker 0 is used). Calls the 1470 // do_marking_step routine, with an unbelievably large timeout value, 1471 // to drain the marking data structures of the remaining entries 1472 // added by the 'keep alive' oop closure above. 1473 1474 class G1CMDrainMarkingStackClosure: public VoidClosure { 1475 G1ConcurrentMark* _cm; 1476 G1CMTask* _task; 1477 bool _is_serial; 1478 public: 1479 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1480 _cm(cm), _task(task), _is_serial(is_serial) { 1481 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1482 } 1483 1484 void do_void() { 1485 do { 1486 // We call G1CMTask::do_marking_step() to completely drain the local 1487 // and global marking stacks of entries pushed by the 'keep alive' 1488 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1489 // 1490 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1491 // if there's nothing more to do (i.e. we've completely drained the 1492 // entries that were pushed as a a result of applying the 'keep alive' 1493 // closure to the entries on the discovered ref lists) or we overflow 1494 // the global marking stack. 1495 // 1496 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1497 // flag while there may still be some work to do. (See the comment at 1498 // the beginning of G1CMTask::do_marking_step() for those conditions - 1499 // one of which is reaching the specified time target.) It is only 1500 // when G1CMTask::do_marking_step() returns without setting the 1501 // has_aborted() flag that the marking step has completed. 1502 1503 _task->do_marking_step(1000000000.0 /* something very large */, 1504 true /* do_termination */, 1505 _is_serial); 1506 } while (_task->has_aborted() && !_cm->has_overflown()); 1507 } 1508 }; 1509 1510 // Implementation of AbstractRefProcTaskExecutor for parallel 1511 // reference processing at the end of G1 concurrent marking 1512 1513 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 1514 private: 1515 G1CollectedHeap* _g1h; 1516 G1ConcurrentMark* _cm; 1517 WorkGang* _workers; 1518 uint _active_workers; 1519 1520 public: 1521 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1522 G1ConcurrentMark* cm, 1523 WorkGang* workers, 1524 uint n_workers) : 1525 _g1h(g1h), _cm(cm), 1526 _workers(workers), _active_workers(n_workers) { } 1527 1528 // Executes the given task using concurrent marking worker threads. 1529 virtual void execute(ProcessTask& task); 1530 virtual void execute(EnqueueTask& task); 1531 }; 1532 1533 class G1CMRefProcTaskProxy: public AbstractGangTask { 1534 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1535 ProcessTask& _proc_task; 1536 G1CollectedHeap* _g1h; 1537 G1ConcurrentMark* _cm; 1538 1539 public: 1540 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1541 G1CollectedHeap* g1h, 1542 G1ConcurrentMark* cm) : 1543 AbstractGangTask("Process reference objects in parallel"), 1544 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1545 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1546 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1547 } 1548 1549 virtual void work(uint worker_id) { 1550 ResourceMark rm; 1551 HandleMark hm; 1552 G1CMTask* task = _cm->task(worker_id); 1553 G1CMIsAliveClosure g1_is_alive(_g1h); 1554 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1555 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1556 1557 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1558 } 1559 }; 1560 1561 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1562 assert(_workers != NULL, "Need parallel worker threads."); 1563 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1564 1565 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1566 1567 // We need to reset the concurrency level before each 1568 // proxy task execution, so that the termination protocol 1569 // and overflow handling in G1CMTask::do_marking_step() knows 1570 // how many workers to wait for. 1571 _cm->set_concurrency(_active_workers); 1572 _workers->run_task(&proc_task_proxy); 1573 } 1574 1575 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 1576 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1577 EnqueueTask& _enq_task; 1578 1579 public: 1580 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1581 AbstractGangTask("Enqueue reference objects in parallel"), 1582 _enq_task(enq_task) { } 1583 1584 virtual void work(uint worker_id) { 1585 _enq_task.work(worker_id); 1586 } 1587 }; 1588 1589 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 1590 assert(_workers != NULL, "Need parallel worker threads."); 1591 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1592 1593 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 1594 1595 // Not strictly necessary but... 1596 // 1597 // We need to reset the concurrency level before each 1598 // proxy task execution, so that the termination protocol 1599 // and overflow handling in G1CMTask::do_marking_step() knows 1600 // how many workers to wait for. 1601 _cm->set_concurrency(_active_workers); 1602 _workers->run_task(&enq_task_proxy); 1603 } 1604 1605 void G1ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 1606 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 1607 } 1608 1609 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 1610 if (has_overflown()) { 1611 // Skip processing the discovered references if we have 1612 // overflown the global marking stack. Reference objects 1613 // only get discovered once so it is OK to not 1614 // de-populate the discovered reference lists. We could have, 1615 // but the only benefit would be that, when marking restarts, 1616 // less reference objects are discovered. 1617 return; 1618 } 1619 1620 ResourceMark rm; 1621 HandleMark hm; 1622 1623 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1624 1625 // Is alive closure. 1626 G1CMIsAliveClosure g1_is_alive(g1h); 1627 1628 // Inner scope to exclude the cleaning of the string and symbol 1629 // tables from the displayed time. 1630 { 1631 GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm); 1632 1633 ReferenceProcessor* rp = g1h->ref_processor_cm(); 1634 1635 // See the comment in G1CollectedHeap::ref_processing_init() 1636 // about how reference processing currently works in G1. 1637 1638 // Set the soft reference policy 1639 rp->setup_policy(clear_all_soft_refs); 1640 assert(_markStack.isEmpty(), "mark stack should be empty"); 1641 1642 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1643 // in serial reference processing. Note these closures are also 1644 // used for serially processing (by the the current thread) the 1645 // JNI references during parallel reference processing. 1646 // 1647 // These closures do not need to synchronize with the worker 1648 // threads involved in parallel reference processing as these 1649 // instances are executed serially by the current thread (e.g. 1650 // reference processing is not multi-threaded and is thus 1651 // performed by the current thread instead of a gang worker). 1652 // 1653 // The gang tasks involved in parallel reference processing create 1654 // their own instances of these closures, which do their own 1655 // synchronization among themselves. 1656 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1657 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1658 1659 // We need at least one active thread. If reference processing 1660 // is not multi-threaded we use the current (VMThread) thread, 1661 // otherwise we use the work gang from the G1CollectedHeap and 1662 // we utilize all the worker threads we can. 1663 bool processing_is_mt = rp->processing_is_mt(); 1664 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 1665 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 1666 1667 // Parallel processing task executor. 1668 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 1669 g1h->workers(), active_workers); 1670 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1671 1672 // Set the concurrency level. The phase was already set prior to 1673 // executing the remark task. 1674 set_concurrency(active_workers); 1675 1676 // Set the degree of MT processing here. If the discovery was done MT, 1677 // the number of threads involved during discovery could differ from 1678 // the number of active workers. This is OK as long as the discovered 1679 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1680 rp->set_active_mt_degree(active_workers); 1681 1682 // Process the weak references. 1683 const ReferenceProcessorStats& stats = 1684 rp->process_discovered_references(&g1_is_alive, 1685 &g1_keep_alive, 1686 &g1_drain_mark_stack, 1687 executor, 1688 _gc_timer_cm); 1689 _gc_tracer_cm->report_gc_reference_stats(stats); 1690 1691 // The do_oop work routines of the keep_alive and drain_marking_stack 1692 // oop closures will set the has_overflown flag if we overflow the 1693 // global marking stack. 1694 1695 assert(_markStack.overflow() || _markStack.isEmpty(), 1696 "mark stack should be empty (unless it overflowed)"); 1697 1698 if (_markStack.overflow()) { 1699 // This should have been done already when we tried to push an 1700 // entry on to the global mark stack. But let's do it again. 1701 set_has_overflown(); 1702 } 1703 1704 assert(rp->num_q() == active_workers, "why not"); 1705 1706 rp->enqueue_discovered_references(executor); 1707 1708 rp->verify_no_references_recorded(); 1709 assert(!rp->discovery_enabled(), "Post condition"); 1710 } 1711 1712 if (has_overflown()) { 1713 // We can not trust g1_is_alive if the marking stack overflowed 1714 return; 1715 } 1716 1717 assert(_markStack.isEmpty(), "Marking should have completed"); 1718 1719 // Unload Klasses, String, Symbols, Code Cache, etc. 1720 if (ClassUnloadingWithConcurrentMark) { 1721 bool purged_classes; 1722 1723 { 1724 GCTraceTime(Debug, gc, phases) trace("System Dictionary Unloading", _gc_timer_cm); 1725 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 1726 } 1727 1728 { 1729 GCTraceTime(Debug, gc, phases) trace("Parallel Unloading", _gc_timer_cm); 1730 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 1731 } 1732 } 1733 1734 if (G1StringDedup::is_enabled()) { 1735 GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", _gc_timer_cm); 1736 G1StringDedup::unlink(&g1_is_alive); 1737 } 1738 } 1739 1740 void G1ConcurrentMark::swapMarkBitMaps() { 1741 G1CMBitMapRO* temp = _prevMarkBitMap; 1742 _prevMarkBitMap = (G1CMBitMapRO*)_nextMarkBitMap; 1743 _nextMarkBitMap = (G1CMBitMap*) temp; 1744 } 1745 1746 // Closure for marking entries in SATB buffers. 1747 class G1CMSATBBufferClosure : public SATBBufferClosure { 1748 private: 1749 G1CMTask* _task; 1750 G1CollectedHeap* _g1h; 1751 1752 // This is very similar to G1CMTask::deal_with_reference, but with 1753 // more relaxed requirements for the argument, so this must be more 1754 // circumspect about treating the argument as an object. 1755 void do_entry(void* entry) const { 1756 _task->increment_refs_reached(); 1757 HeapRegion* hr = _g1h->heap_region_containing(entry); 1758 if (entry < hr->next_top_at_mark_start()) { 1759 // Until we get here, we don't know whether entry refers to a valid 1760 // object; it could instead have been a stale reference. 1761 oop obj = static_cast<oop>(entry); 1762 assert(obj->is_oop(true /* ignore mark word */), 1763 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)); 1764 _task->make_reference_grey(obj); 1765 } 1766 } 1767 1768 public: 1769 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1770 : _task(task), _g1h(g1h) { } 1771 1772 virtual void do_buffer(void** buffer, size_t size) { 1773 for (size_t i = 0; i < size; ++i) { 1774 do_entry(buffer[i]); 1775 } 1776 } 1777 }; 1778 1779 class G1RemarkThreadsClosure : public ThreadClosure { 1780 G1CMSATBBufferClosure _cm_satb_cl; 1781 G1CMOopClosure _cm_cl; 1782 MarkingCodeBlobClosure _code_cl; 1783 int _thread_parity; 1784 1785 public: 1786 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1787 _cm_satb_cl(task, g1h), 1788 _cm_cl(g1h, g1h->concurrent_mark(), task), 1789 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1790 _thread_parity(Threads::thread_claim_parity()) {} 1791 1792 void do_thread(Thread* thread) { 1793 if (thread->is_Java_thread()) { 1794 if (thread->claim_oops_do(true, _thread_parity)) { 1795 JavaThread* jt = (JavaThread*)thread; 1796 1797 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1798 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1799 // * Alive if on the stack of an executing method 1800 // * Weakly reachable otherwise 1801 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1802 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1803 jt->nmethods_do(&_code_cl); 1804 1805 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 1806 } 1807 } else if (thread->is_VM_thread()) { 1808 if (thread->claim_oops_do(true, _thread_parity)) { 1809 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1810 } 1811 } 1812 } 1813 }; 1814 1815 class G1CMRemarkTask: public AbstractGangTask { 1816 private: 1817 G1ConcurrentMark* _cm; 1818 public: 1819 void work(uint worker_id) { 1820 // Since all available tasks are actually started, we should 1821 // only proceed if we're supposed to be active. 1822 if (worker_id < _cm->active_tasks()) { 1823 G1CMTask* task = _cm->task(worker_id); 1824 task->record_start_time(); 1825 { 1826 ResourceMark rm; 1827 HandleMark hm; 1828 1829 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1830 Threads::threads_do(&threads_f); 1831 } 1832 1833 do { 1834 task->do_marking_step(1000000000.0 /* something very large */, 1835 true /* do_termination */, 1836 false /* is_serial */); 1837 } while (task->has_aborted() && !_cm->has_overflown()); 1838 // If we overflow, then we do not want to restart. We instead 1839 // want to abort remark and do concurrent marking again. 1840 task->record_end_time(); 1841 } 1842 } 1843 1844 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1845 AbstractGangTask("Par Remark"), _cm(cm) { 1846 _cm->terminator()->reset_for_reuse(active_workers); 1847 } 1848 }; 1849 1850 void G1ConcurrentMark::checkpointRootsFinalWork() { 1851 ResourceMark rm; 1852 HandleMark hm; 1853 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1854 1855 GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm); 1856 1857 g1h->ensure_parsability(false); 1858 1859 // this is remark, so we'll use up all active threads 1860 uint active_workers = g1h->workers()->active_workers(); 1861 set_concurrency_and_phase(active_workers, false /* concurrent */); 1862 // Leave _parallel_marking_threads at it's 1863 // value originally calculated in the G1ConcurrentMark 1864 // constructor and pass values of the active workers 1865 // through the gang in the task. 1866 1867 { 1868 StrongRootsScope srs(active_workers); 1869 1870 G1CMRemarkTask remarkTask(this, active_workers); 1871 // We will start all available threads, even if we decide that the 1872 // active_workers will be fewer. The extra ones will just bail out 1873 // immediately. 1874 g1h->workers()->run_task(&remarkTask); 1875 } 1876 1877 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1878 guarantee(has_overflown() || 1879 satb_mq_set.completed_buffers_num() == 0, 1880 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1881 BOOL_TO_STR(has_overflown()), 1882 satb_mq_set.completed_buffers_num()); 1883 1884 print_stats(); 1885 } 1886 1887 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 1888 // Note we are overriding the read-only view of the prev map here, via 1889 // the cast. 1890 ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr); 1891 } 1892 1893 HeapRegion* 1894 G1ConcurrentMark::claim_region(uint worker_id) { 1895 // "checkpoint" the finger 1896 HeapWord* finger = _finger; 1897 1898 // _heap_end will not change underneath our feet; it only changes at 1899 // yield points. 1900 while (finger < _heap_end) { 1901 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1902 1903 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1904 1905 // Above heap_region_containing may return NULL as we always scan claim 1906 // until the end of the heap. In this case, just jump to the next region. 1907 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1908 1909 // Is the gap between reading the finger and doing the CAS too long? 1910 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 1911 if (res == finger && curr_region != NULL) { 1912 // we succeeded 1913 HeapWord* bottom = curr_region->bottom(); 1914 HeapWord* limit = curr_region->next_top_at_mark_start(); 1915 1916 // notice that _finger == end cannot be guaranteed here since, 1917 // someone else might have moved the finger even further 1918 assert(_finger >= end, "the finger should have moved forward"); 1919 1920 if (limit > bottom) { 1921 return curr_region; 1922 } else { 1923 assert(limit == bottom, 1924 "the region limit should be at bottom"); 1925 // we return NULL and the caller should try calling 1926 // claim_region() again. 1927 return NULL; 1928 } 1929 } else { 1930 assert(_finger > finger, "the finger should have moved forward"); 1931 // read it again 1932 finger = _finger; 1933 } 1934 } 1935 1936 return NULL; 1937 } 1938 1939 #ifndef PRODUCT 1940 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 1941 private: 1942 G1CollectedHeap* _g1h; 1943 const char* _phase; 1944 int _info; 1945 1946 public: 1947 VerifyNoCSetOops(const char* phase, int info = -1) : 1948 _g1h(G1CollectedHeap::heap()), 1949 _phase(phase), 1950 _info(info) 1951 { } 1952 1953 void operator()(oop obj) const { 1954 guarantee(obj->is_oop(), 1955 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1956 p2i(obj), _phase, _info); 1957 guarantee(!_g1h->obj_in_cs(obj), 1958 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1959 p2i(obj), _phase, _info); 1960 } 1961 }; 1962 1963 void G1ConcurrentMark::verify_no_cset_oops() { 1964 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1965 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 1966 return; 1967 } 1968 1969 // Verify entries on the global mark stack 1970 _markStack.iterate(VerifyNoCSetOops("Stack")); 1971 1972 // Verify entries on the task queues 1973 for (uint i = 0; i < _max_worker_id; ++i) { 1974 G1CMTaskQueue* queue = _task_queues->queue(i); 1975 queue->iterate(VerifyNoCSetOops("Queue", i)); 1976 } 1977 1978 // Verify the global finger 1979 HeapWord* global_finger = finger(); 1980 if (global_finger != NULL && global_finger < _heap_end) { 1981 // Since we always iterate over all regions, we might get a NULL HeapRegion 1982 // here. 1983 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1984 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1985 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1986 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1987 } 1988 1989 // Verify the task fingers 1990 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 1991 for (uint i = 0; i < parallel_marking_threads(); ++i) { 1992 G1CMTask* task = _tasks[i]; 1993 HeapWord* task_finger = task->finger(); 1994 if (task_finger != NULL && task_finger < _heap_end) { 1995 // See above note on the global finger verification. 1996 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 1997 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 1998 !task_hr->in_collection_set(), 1999 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 2000 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 2001 } 2002 } 2003 } 2004 #endif // PRODUCT 2005 void G1ConcurrentMark::create_live_data() { 2006 _g1h->g1_rem_set()->create_card_live_data(_parallel_workers, _nextMarkBitMap); 2007 } 2008 2009 void G1ConcurrentMark::finalize_live_data() { 2010 _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _nextMarkBitMap); 2011 } 2012 2013 void G1ConcurrentMark::verify_live_data() { 2014 _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _nextMarkBitMap); 2015 } 2016 2017 void G1ConcurrentMark::clear_live_data(WorkGang* workers) { 2018 _g1h->g1_rem_set()->clear_card_live_data(workers); 2019 } 2020 2021 #ifdef ASSERT 2022 void G1ConcurrentMark::verify_live_data_clear() { 2023 _g1h->g1_rem_set()->verify_card_live_data_is_clear(); 2024 } 2025 #endif 2026 2027 void G1ConcurrentMark::print_stats() { 2028 if (!log_is_enabled(Debug, gc, stats)) { 2029 return; 2030 } 2031 log_debug(gc, stats)("---------------------------------------------------------------------"); 2032 for (size_t i = 0; i < _active_tasks; ++i) { 2033 _tasks[i]->print_stats(); 2034 log_debug(gc, stats)("---------------------------------------------------------------------"); 2035 } 2036 } 2037 2038 void G1ConcurrentMark::abort() { 2039 if (!cmThread()->during_cycle() || _has_aborted) { 2040 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2041 return; 2042 } 2043 2044 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2045 // concurrent bitmap clearing. 2046 { 2047 GCTraceTime(Debug, gc)("Clear Next Bitmap"); 2048 clear_bitmap(_nextMarkBitMap, _g1h->workers(), false); 2049 } 2050 // Note we cannot clear the previous marking bitmap here 2051 // since VerifyDuringGC verifies the objects marked during 2052 // a full GC against the previous bitmap. 2053 2054 { 2055 GCTraceTime(Debug, gc)("Clear Live Data"); 2056 clear_live_data(_g1h->workers()); 2057 } 2058 DEBUG_ONLY({ 2059 GCTraceTime(Debug, gc)("Verify Live Data Clear"); 2060 verify_live_data_clear(); 2061 }) 2062 // Empty mark stack 2063 reset_marking_state(); 2064 for (uint i = 0; i < _max_worker_id; ++i) { 2065 _tasks[i]->clear_region_fields(); 2066 } 2067 _first_overflow_barrier_sync.abort(); 2068 _second_overflow_barrier_sync.abort(); 2069 _has_aborted = true; 2070 2071 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2072 satb_mq_set.abandon_partial_marking(); 2073 // This can be called either during or outside marking, we'll read 2074 // the expected_active value from the SATB queue set. 2075 satb_mq_set.set_active_all_threads( 2076 false, /* new active value */ 2077 satb_mq_set.is_active() /* expected_active */); 2078 } 2079 2080 static void print_ms_time_info(const char* prefix, const char* name, 2081 NumberSeq& ns) { 2082 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2083 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2084 if (ns.num() > 0) { 2085 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2086 prefix, ns.sd(), ns.maximum()); 2087 } 2088 } 2089 2090 void G1ConcurrentMark::print_summary_info() { 2091 Log(gc, marking) log; 2092 if (!log.is_trace()) { 2093 return; 2094 } 2095 2096 log.trace(" Concurrent marking:"); 2097 print_ms_time_info(" ", "init marks", _init_times); 2098 print_ms_time_info(" ", "remarks", _remark_times); 2099 { 2100 print_ms_time_info(" ", "final marks", _remark_mark_times); 2101 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2102 2103 } 2104 print_ms_time_info(" ", "cleanups", _cleanup_times); 2105 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2106 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2107 if (G1ScrubRemSets) { 2108 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 2109 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2110 } 2111 log.trace(" Total stop_world time = %8.2f s.", 2112 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2113 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2114 cmThread()->vtime_accum(), cmThread()->vtime_mark_accum()); 2115 } 2116 2117 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2118 _parallel_workers->print_worker_threads_on(st); 2119 } 2120 2121 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2122 _parallel_workers->threads_do(tc); 2123 } 2124 2125 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2126 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2127 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 2128 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 2129 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 2130 } 2131 2132 // Closure for iteration over bitmaps 2133 class G1CMBitMapClosure : public BitMapClosure { 2134 private: 2135 // the bitmap that is being iterated over 2136 G1CMBitMap* _nextMarkBitMap; 2137 G1ConcurrentMark* _cm; 2138 G1CMTask* _task; 2139 2140 public: 2141 G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm, G1CMBitMap* nextMarkBitMap) : 2142 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 2143 2144 bool do_bit(size_t offset) { 2145 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 2146 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 2147 assert( addr < _cm->finger(), "invariant"); 2148 assert(addr >= _task->finger(), "invariant"); 2149 2150 // We move that task's local finger along. 2151 _task->move_finger_to(addr); 2152 2153 _task->scan_object(oop(addr)); 2154 // we only partially drain the local queue and global stack 2155 _task->drain_local_queue(true); 2156 _task->drain_global_stack(true); 2157 2158 // if the has_aborted flag has been raised, we need to bail out of 2159 // the iteration 2160 return !_task->has_aborted(); 2161 } 2162 }; 2163 2164 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2165 ReferenceProcessor* result = g1h->ref_processor_cm(); 2166 assert(result != NULL, "CM reference processor should not be NULL"); 2167 return result; 2168 } 2169 2170 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2171 G1ConcurrentMark* cm, 2172 G1CMTask* task) 2173 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2174 _g1h(g1h), _cm(cm), _task(task) 2175 { } 2176 2177 void G1CMTask::setup_for_region(HeapRegion* hr) { 2178 assert(hr != NULL, 2179 "claim_region() should have filtered out NULL regions"); 2180 _curr_region = hr; 2181 _finger = hr->bottom(); 2182 update_region_limit(); 2183 } 2184 2185 void G1CMTask::update_region_limit() { 2186 HeapRegion* hr = _curr_region; 2187 HeapWord* bottom = hr->bottom(); 2188 HeapWord* limit = hr->next_top_at_mark_start(); 2189 2190 if (limit == bottom) { 2191 // The region was collected underneath our feet. 2192 // We set the finger to bottom to ensure that the bitmap 2193 // iteration that will follow this will not do anything. 2194 // (this is not a condition that holds when we set the region up, 2195 // as the region is not supposed to be empty in the first place) 2196 _finger = bottom; 2197 } else if (limit >= _region_limit) { 2198 assert(limit >= _finger, "peace of mind"); 2199 } else { 2200 assert(limit < _region_limit, "only way to get here"); 2201 // This can happen under some pretty unusual circumstances. An 2202 // evacuation pause empties the region underneath our feet (NTAMS 2203 // at bottom). We then do some allocation in the region (NTAMS 2204 // stays at bottom), followed by the region being used as a GC 2205 // alloc region (NTAMS will move to top() and the objects 2206 // originally below it will be grayed). All objects now marked in 2207 // the region are explicitly grayed, if below the global finger, 2208 // and we do not need in fact to scan anything else. So, we simply 2209 // set _finger to be limit to ensure that the bitmap iteration 2210 // doesn't do anything. 2211 _finger = limit; 2212 } 2213 2214 _region_limit = limit; 2215 } 2216 2217 void G1CMTask::giveup_current_region() { 2218 assert(_curr_region != NULL, "invariant"); 2219 clear_region_fields(); 2220 } 2221 2222 void G1CMTask::clear_region_fields() { 2223 // Values for these three fields that indicate that we're not 2224 // holding on to a region. 2225 _curr_region = NULL; 2226 _finger = NULL; 2227 _region_limit = NULL; 2228 } 2229 2230 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2231 if (cm_oop_closure == NULL) { 2232 assert(_cm_oop_closure != NULL, "invariant"); 2233 } else { 2234 assert(_cm_oop_closure == NULL, "invariant"); 2235 } 2236 _cm_oop_closure = cm_oop_closure; 2237 } 2238 2239 void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) { 2240 guarantee(nextMarkBitMap != NULL, "invariant"); 2241 _nextMarkBitMap = nextMarkBitMap; 2242 clear_region_fields(); 2243 2244 _calls = 0; 2245 _elapsed_time_ms = 0.0; 2246 _termination_time_ms = 0.0; 2247 _termination_start_time_ms = 0.0; 2248 } 2249 2250 bool G1CMTask::should_exit_termination() { 2251 regular_clock_call(); 2252 // This is called when we are in the termination protocol. We should 2253 // quit if, for some reason, this task wants to abort or the global 2254 // stack is not empty (this means that we can get work from it). 2255 return !_cm->mark_stack_empty() || has_aborted(); 2256 } 2257 2258 void G1CMTask::reached_limit() { 2259 assert(_words_scanned >= _words_scanned_limit || 2260 _refs_reached >= _refs_reached_limit , 2261 "shouldn't have been called otherwise"); 2262 regular_clock_call(); 2263 } 2264 2265 void G1CMTask::regular_clock_call() { 2266 if (has_aborted()) return; 2267 2268 // First, we need to recalculate the words scanned and refs reached 2269 // limits for the next clock call. 2270 recalculate_limits(); 2271 2272 // During the regular clock call we do the following 2273 2274 // (1) If an overflow has been flagged, then we abort. 2275 if (_cm->has_overflown()) { 2276 set_has_aborted(); 2277 return; 2278 } 2279 2280 // If we are not concurrent (i.e. we're doing remark) we don't need 2281 // to check anything else. The other steps are only needed during 2282 // the concurrent marking phase. 2283 if (!concurrent()) return; 2284 2285 // (2) If marking has been aborted for Full GC, then we also abort. 2286 if (_cm->has_aborted()) { 2287 set_has_aborted(); 2288 return; 2289 } 2290 2291 double curr_time_ms = os::elapsedVTime() * 1000.0; 2292 2293 // (4) We check whether we should yield. If we have to, then we abort. 2294 if (SuspendibleThreadSet::should_yield()) { 2295 // We should yield. To do this we abort the task. The caller is 2296 // responsible for yielding. 2297 set_has_aborted(); 2298 return; 2299 } 2300 2301 // (5) We check whether we've reached our time quota. If we have, 2302 // then we abort. 2303 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2304 if (elapsed_time_ms > _time_target_ms) { 2305 set_has_aborted(); 2306 _has_timed_out = true; 2307 return; 2308 } 2309 2310 // (6) Finally, we check whether there are enough completed STAB 2311 // buffers available for processing. If there are, we abort. 2312 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2313 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2314 // we do need to process SATB buffers, we'll abort and restart 2315 // the marking task to do so 2316 set_has_aborted(); 2317 return; 2318 } 2319 } 2320 2321 void G1CMTask::recalculate_limits() { 2322 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2323 _words_scanned_limit = _real_words_scanned_limit; 2324 2325 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2326 _refs_reached_limit = _real_refs_reached_limit; 2327 } 2328 2329 void G1CMTask::decrease_limits() { 2330 // This is called when we believe that we're going to do an infrequent 2331 // operation which will increase the per byte scanned cost (i.e. move 2332 // entries to/from the global stack). It basically tries to decrease the 2333 // scanning limit so that the clock is called earlier. 2334 2335 _words_scanned_limit = _real_words_scanned_limit - 2336 3 * words_scanned_period / 4; 2337 _refs_reached_limit = _real_refs_reached_limit - 2338 3 * refs_reached_period / 4; 2339 } 2340 2341 void G1CMTask::move_entries_to_global_stack() { 2342 // local array where we'll store the entries that will be popped 2343 // from the local queue 2344 oop buffer[global_stack_transfer_size]; 2345 2346 int n = 0; 2347 oop obj; 2348 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 2349 buffer[n] = obj; 2350 ++n; 2351 } 2352 2353 if (n > 0) { 2354 // we popped at least one entry from the local queue 2355 2356 if (!_cm->mark_stack_push(buffer, n)) { 2357 set_has_aborted(); 2358 } 2359 } 2360 2361 // this operation was quite expensive, so decrease the limits 2362 decrease_limits(); 2363 } 2364 2365 void G1CMTask::get_entries_from_global_stack() { 2366 // local array where we'll store the entries that will be popped 2367 // from the global stack. 2368 oop buffer[global_stack_transfer_size]; 2369 int n; 2370 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 2371 assert(n <= global_stack_transfer_size, 2372 "we should not pop more than the given limit"); 2373 if (n > 0) { 2374 // yes, we did actually pop at least one entry 2375 for (int i = 0; i < n; ++i) { 2376 bool success = _task_queue->push(buffer[i]); 2377 // We only call this when the local queue is empty or under a 2378 // given target limit. So, we do not expect this push to fail. 2379 assert(success, "invariant"); 2380 } 2381 } 2382 2383 // this operation was quite expensive, so decrease the limits 2384 decrease_limits(); 2385 } 2386 2387 void G1CMTask::drain_local_queue(bool partially) { 2388 if (has_aborted()) return; 2389 2390 // Decide what the target size is, depending whether we're going to 2391 // drain it partially (so that other tasks can steal if they run out 2392 // of things to do) or totally (at the very end). 2393 size_t target_size; 2394 if (partially) { 2395 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2396 } else { 2397 target_size = 0; 2398 } 2399 2400 if (_task_queue->size() > target_size) { 2401 oop obj; 2402 bool ret = _task_queue->pop_local(obj); 2403 while (ret) { 2404 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 2405 assert(!_g1h->is_on_master_free_list( 2406 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 2407 2408 scan_object(obj); 2409 2410 if (_task_queue->size() <= target_size || has_aborted()) { 2411 ret = false; 2412 } else { 2413 ret = _task_queue->pop_local(obj); 2414 } 2415 } 2416 } 2417 } 2418 2419 void G1CMTask::drain_global_stack(bool partially) { 2420 if (has_aborted()) return; 2421 2422 // We have a policy to drain the local queue before we attempt to 2423 // drain the global stack. 2424 assert(partially || _task_queue->size() == 0, "invariant"); 2425 2426 // Decide what the target size is, depending whether we're going to 2427 // drain it partially (so that other tasks can steal if they run out 2428 // of things to do) or totally (at the very end). Notice that, 2429 // because we move entries from the global stack in chunks or 2430 // because another task might be doing the same, we might in fact 2431 // drop below the target. But, this is not a problem. 2432 size_t target_size; 2433 if (partially) { 2434 target_size = _cm->partial_mark_stack_size_target(); 2435 } else { 2436 target_size = 0; 2437 } 2438 2439 if (_cm->mark_stack_size() > target_size) { 2440 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2441 get_entries_from_global_stack(); 2442 drain_local_queue(partially); 2443 } 2444 } 2445 } 2446 2447 // SATB Queue has several assumptions on whether to call the par or 2448 // non-par versions of the methods. this is why some of the code is 2449 // replicated. We should really get rid of the single-threaded version 2450 // of the code to simplify things. 2451 void G1CMTask::drain_satb_buffers() { 2452 if (has_aborted()) return; 2453 2454 // We set this so that the regular clock knows that we're in the 2455 // middle of draining buffers and doesn't set the abort flag when it 2456 // notices that SATB buffers are available for draining. It'd be 2457 // very counter productive if it did that. :-) 2458 _draining_satb_buffers = true; 2459 2460 G1CMSATBBufferClosure satb_cl(this, _g1h); 2461 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2462 2463 // This keeps claiming and applying the closure to completed buffers 2464 // until we run out of buffers or we need to abort. 2465 while (!has_aborted() && 2466 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2467 regular_clock_call(); 2468 } 2469 2470 _draining_satb_buffers = false; 2471 2472 assert(has_aborted() || 2473 concurrent() || 2474 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2475 2476 // again, this was a potentially expensive operation, decrease the 2477 // limits to get the regular clock call early 2478 decrease_limits(); 2479 } 2480 2481 void G1CMTask::print_stats() { 2482 log_debug(gc, stats)("Marking Stats, task = %u, calls = %d", 2483 _worker_id, _calls); 2484 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2485 _elapsed_time_ms, _termination_time_ms); 2486 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 2487 _step_times_ms.num(), _step_times_ms.avg(), 2488 _step_times_ms.sd()); 2489 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", 2490 _step_times_ms.maximum(), _step_times_ms.sum()); 2491 } 2492 2493 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 2494 return _task_queues->steal(worker_id, hash_seed, obj); 2495 } 2496 2497 /***************************************************************************** 2498 2499 The do_marking_step(time_target_ms, ...) method is the building 2500 block of the parallel marking framework. It can be called in parallel 2501 with other invocations of do_marking_step() on different tasks 2502 (but only one per task, obviously) and concurrently with the 2503 mutator threads, or during remark, hence it eliminates the need 2504 for two versions of the code. When called during remark, it will 2505 pick up from where the task left off during the concurrent marking 2506 phase. Interestingly, tasks are also claimable during evacuation 2507 pauses too, since do_marking_step() ensures that it aborts before 2508 it needs to yield. 2509 2510 The data structures that it uses to do marking work are the 2511 following: 2512 2513 (1) Marking Bitmap. If there are gray objects that appear only 2514 on the bitmap (this happens either when dealing with an overflow 2515 or when the initial marking phase has simply marked the roots 2516 and didn't push them on the stack), then tasks claim heap 2517 regions whose bitmap they then scan to find gray objects. A 2518 global finger indicates where the end of the last claimed region 2519 is. A local finger indicates how far into the region a task has 2520 scanned. The two fingers are used to determine how to gray an 2521 object (i.e. whether simply marking it is OK, as it will be 2522 visited by a task in the future, or whether it needs to be also 2523 pushed on a stack). 2524 2525 (2) Local Queue. The local queue of the task which is accessed 2526 reasonably efficiently by the task. Other tasks can steal from 2527 it when they run out of work. Throughout the marking phase, a 2528 task attempts to keep its local queue short but not totally 2529 empty, so that entries are available for stealing by other 2530 tasks. Only when there is no more work, a task will totally 2531 drain its local queue. 2532 2533 (3) Global Mark Stack. This handles local queue overflow. During 2534 marking only sets of entries are moved between it and the local 2535 queues, as access to it requires a mutex and more fine-grain 2536 interaction with it which might cause contention. If it 2537 overflows, then the marking phase should restart and iterate 2538 over the bitmap to identify gray objects. Throughout the marking 2539 phase, tasks attempt to keep the global mark stack at a small 2540 length but not totally empty, so that entries are available for 2541 popping by other tasks. Only when there is no more work, tasks 2542 will totally drain the global mark stack. 2543 2544 (4) SATB Buffer Queue. This is where completed SATB buffers are 2545 made available. Buffers are regularly removed from this queue 2546 and scanned for roots, so that the queue doesn't get too 2547 long. During remark, all completed buffers are processed, as 2548 well as the filled in parts of any uncompleted buffers. 2549 2550 The do_marking_step() method tries to abort when the time target 2551 has been reached. There are a few other cases when the 2552 do_marking_step() method also aborts: 2553 2554 (1) When the marking phase has been aborted (after a Full GC). 2555 2556 (2) When a global overflow (on the global stack) has been 2557 triggered. Before the task aborts, it will actually sync up with 2558 the other tasks to ensure that all the marking data structures 2559 (local queues, stacks, fingers etc.) are re-initialized so that 2560 when do_marking_step() completes, the marking phase can 2561 immediately restart. 2562 2563 (3) When enough completed SATB buffers are available. The 2564 do_marking_step() method only tries to drain SATB buffers right 2565 at the beginning. So, if enough buffers are available, the 2566 marking step aborts and the SATB buffers are processed at 2567 the beginning of the next invocation. 2568 2569 (4) To yield. when we have to yield then we abort and yield 2570 right at the end of do_marking_step(). This saves us from a lot 2571 of hassle as, by yielding we might allow a Full GC. If this 2572 happens then objects will be compacted underneath our feet, the 2573 heap might shrink, etc. We save checking for this by just 2574 aborting and doing the yield right at the end. 2575 2576 From the above it follows that the do_marking_step() method should 2577 be called in a loop (or, otherwise, regularly) until it completes. 2578 2579 If a marking step completes without its has_aborted() flag being 2580 true, it means it has completed the current marking phase (and 2581 also all other marking tasks have done so and have all synced up). 2582 2583 A method called regular_clock_call() is invoked "regularly" (in 2584 sub ms intervals) throughout marking. It is this clock method that 2585 checks all the abort conditions which were mentioned above and 2586 decides when the task should abort. A work-based scheme is used to 2587 trigger this clock method: when the number of object words the 2588 marking phase has scanned or the number of references the marking 2589 phase has visited reach a given limit. Additional invocations to 2590 the method clock have been planted in a few other strategic places 2591 too. The initial reason for the clock method was to avoid calling 2592 vtime too regularly, as it is quite expensive. So, once it was in 2593 place, it was natural to piggy-back all the other conditions on it 2594 too and not constantly check them throughout the code. 2595 2596 If do_termination is true then do_marking_step will enter its 2597 termination protocol. 2598 2599 The value of is_serial must be true when do_marking_step is being 2600 called serially (i.e. by the VMThread) and do_marking_step should 2601 skip any synchronization in the termination and overflow code. 2602 Examples include the serial remark code and the serial reference 2603 processing closures. 2604 2605 The value of is_serial must be false when do_marking_step is 2606 being called by any of the worker threads in a work gang. 2607 Examples include the concurrent marking code (CMMarkingTask), 2608 the MT remark code, and the MT reference processing closures. 2609 2610 *****************************************************************************/ 2611 2612 void G1CMTask::do_marking_step(double time_target_ms, 2613 bool do_termination, 2614 bool is_serial) { 2615 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2616 assert(concurrent() == _cm->concurrent(), "they should be the same"); 2617 2618 G1Policy* g1_policy = _g1h->g1_policy(); 2619 assert(_task_queues != NULL, "invariant"); 2620 assert(_task_queue != NULL, "invariant"); 2621 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 2622 2623 assert(!_claimed, 2624 "only one thread should claim this task at any one time"); 2625 2626 // OK, this doesn't safeguard again all possible scenarios, as it is 2627 // possible for two threads to set the _claimed flag at the same 2628 // time. But it is only for debugging purposes anyway and it will 2629 // catch most problems. 2630 _claimed = true; 2631 2632 _start_time_ms = os::elapsedVTime() * 1000.0; 2633 2634 // If do_stealing is true then do_marking_step will attempt to 2635 // steal work from the other G1CMTasks. It only makes sense to 2636 // enable stealing when the termination protocol is enabled 2637 // and do_marking_step() is not being called serially. 2638 bool do_stealing = do_termination && !is_serial; 2639 2640 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2641 _time_target_ms = time_target_ms - diff_prediction_ms; 2642 2643 // set up the variables that are used in the work-based scheme to 2644 // call the regular clock method 2645 _words_scanned = 0; 2646 _refs_reached = 0; 2647 recalculate_limits(); 2648 2649 // clear all flags 2650 clear_has_aborted(); 2651 _has_timed_out = false; 2652 _draining_satb_buffers = false; 2653 2654 ++_calls; 2655 2656 // Set up the bitmap and oop closures. Anything that uses them is 2657 // eventually called from this method, so it is OK to allocate these 2658 // statically. 2659 G1CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 2660 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 2661 set_cm_oop_closure(&cm_oop_closure); 2662 2663 if (_cm->has_overflown()) { 2664 // This can happen if the mark stack overflows during a GC pause 2665 // and this task, after a yield point, restarts. We have to abort 2666 // as we need to get into the overflow protocol which happens 2667 // right at the end of this task. 2668 set_has_aborted(); 2669 } 2670 2671 // First drain any available SATB buffers. After this, we will not 2672 // look at SATB buffers before the next invocation of this method. 2673 // If enough completed SATB buffers are queued up, the regular clock 2674 // will abort this task so that it restarts. 2675 drain_satb_buffers(); 2676 // ...then partially drain the local queue and the global stack 2677 drain_local_queue(true); 2678 drain_global_stack(true); 2679 2680 do { 2681 if (!has_aborted() && _curr_region != NULL) { 2682 // This means that we're already holding on to a region. 2683 assert(_finger != NULL, "if region is not NULL, then the finger " 2684 "should not be NULL either"); 2685 2686 // We might have restarted this task after an evacuation pause 2687 // which might have evacuated the region we're holding on to 2688 // underneath our feet. Let's read its limit again to make sure 2689 // that we do not iterate over a region of the heap that 2690 // contains garbage (update_region_limit() will also move 2691 // _finger to the start of the region if it is found empty). 2692 update_region_limit(); 2693 // We will start from _finger not from the start of the region, 2694 // as we might be restarting this task after aborting half-way 2695 // through scanning this region. In this case, _finger points to 2696 // the address where we last found a marked object. If this is a 2697 // fresh region, _finger points to start(). 2698 MemRegion mr = MemRegion(_finger, _region_limit); 2699 2700 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2701 "humongous regions should go around loop once only"); 2702 2703 // Some special cases: 2704 // If the memory region is empty, we can just give up the region. 2705 // If the current region is humongous then we only need to check 2706 // the bitmap for the bit associated with the start of the object, 2707 // scan the object if it's live, and give up the region. 2708 // Otherwise, let's iterate over the bitmap of the part of the region 2709 // that is left. 2710 // If the iteration is successful, give up the region. 2711 if (mr.is_empty()) { 2712 giveup_current_region(); 2713 regular_clock_call(); 2714 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2715 if (_nextMarkBitMap->isMarked(mr.start())) { 2716 // The object is marked - apply the closure 2717 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 2718 bitmap_closure.do_bit(offset); 2719 } 2720 // Even if this task aborted while scanning the humongous object 2721 // we can (and should) give up the current region. 2722 giveup_current_region(); 2723 regular_clock_call(); 2724 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 2725 giveup_current_region(); 2726 regular_clock_call(); 2727 } else { 2728 assert(has_aborted(), "currently the only way to do so"); 2729 // The only way to abort the bitmap iteration is to return 2730 // false from the do_bit() method. However, inside the 2731 // do_bit() method we move the _finger to point to the 2732 // object currently being looked at. So, if we bail out, we 2733 // have definitely set _finger to something non-null. 2734 assert(_finger != NULL, "invariant"); 2735 2736 // Region iteration was actually aborted. So now _finger 2737 // points to the address of the object we last scanned. If we 2738 // leave it there, when we restart this task, we will rescan 2739 // the object. It is easy to avoid this. We move the finger by 2740 // enough to point to the next possible object header (the 2741 // bitmap knows by how much we need to move it as it knows its 2742 // granularity). 2743 assert(_finger < _region_limit, "invariant"); 2744 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 2745 // Check if bitmap iteration was aborted while scanning the last object 2746 if (new_finger >= _region_limit) { 2747 giveup_current_region(); 2748 } else { 2749 move_finger_to(new_finger); 2750 } 2751 } 2752 } 2753 // At this point we have either completed iterating over the 2754 // region we were holding on to, or we have aborted. 2755 2756 // We then partially drain the local queue and the global stack. 2757 // (Do we really need this?) 2758 drain_local_queue(true); 2759 drain_global_stack(true); 2760 2761 // Read the note on the claim_region() method on why it might 2762 // return NULL with potentially more regions available for 2763 // claiming and why we have to check out_of_regions() to determine 2764 // whether we're done or not. 2765 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2766 // We are going to try to claim a new region. We should have 2767 // given up on the previous one. 2768 // Separated the asserts so that we know which one fires. 2769 assert(_curr_region == NULL, "invariant"); 2770 assert(_finger == NULL, "invariant"); 2771 assert(_region_limit == NULL, "invariant"); 2772 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2773 if (claimed_region != NULL) { 2774 // Yes, we managed to claim one 2775 setup_for_region(claimed_region); 2776 assert(_curr_region == claimed_region, "invariant"); 2777 } 2778 // It is important to call the regular clock here. It might take 2779 // a while to claim a region if, for example, we hit a large 2780 // block of empty regions. So we need to call the regular clock 2781 // method once round the loop to make sure it's called 2782 // frequently enough. 2783 regular_clock_call(); 2784 } 2785 2786 if (!has_aborted() && _curr_region == NULL) { 2787 assert(_cm->out_of_regions(), 2788 "at this point we should be out of regions"); 2789 } 2790 } while ( _curr_region != NULL && !has_aborted()); 2791 2792 if (!has_aborted()) { 2793 // We cannot check whether the global stack is empty, since other 2794 // tasks might be pushing objects to it concurrently. 2795 assert(_cm->out_of_regions(), 2796 "at this point we should be out of regions"); 2797 // Try to reduce the number of available SATB buffers so that 2798 // remark has less work to do. 2799 drain_satb_buffers(); 2800 } 2801 2802 // Since we've done everything else, we can now totally drain the 2803 // local queue and global stack. 2804 drain_local_queue(false); 2805 drain_global_stack(false); 2806 2807 // Attempt at work stealing from other task's queues. 2808 if (do_stealing && !has_aborted()) { 2809 // We have not aborted. This means that we have finished all that 2810 // we could. Let's try to do some stealing... 2811 2812 // We cannot check whether the global stack is empty, since other 2813 // tasks might be pushing objects to it concurrently. 2814 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2815 "only way to reach here"); 2816 while (!has_aborted()) { 2817 oop obj; 2818 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 2819 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 2820 "any stolen object should be marked"); 2821 scan_object(obj); 2822 2823 // And since we're towards the end, let's totally drain the 2824 // local queue and global stack. 2825 drain_local_queue(false); 2826 drain_global_stack(false); 2827 } else { 2828 break; 2829 } 2830 } 2831 } 2832 2833 // We still haven't aborted. Now, let's try to get into the 2834 // termination protocol. 2835 if (do_termination && !has_aborted()) { 2836 // We cannot check whether the global stack is empty, since other 2837 // tasks might be concurrently pushing objects on it. 2838 // Separated the asserts so that we know which one fires. 2839 assert(_cm->out_of_regions(), "only way to reach here"); 2840 assert(_task_queue->size() == 0, "only way to reach here"); 2841 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2842 2843 // The G1CMTask class also extends the TerminatorTerminator class, 2844 // hence its should_exit_termination() method will also decide 2845 // whether to exit the termination protocol or not. 2846 bool finished = (is_serial || 2847 _cm->terminator()->offer_termination(this)); 2848 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2849 _termination_time_ms += 2850 termination_end_time_ms - _termination_start_time_ms; 2851 2852 if (finished) { 2853 // We're all done. 2854 2855 if (_worker_id == 0) { 2856 // let's allow task 0 to do this 2857 if (concurrent()) { 2858 assert(_cm->concurrent_marking_in_progress(), "invariant"); 2859 // we need to set this to false before the next 2860 // safepoint. This way we ensure that the marking phase 2861 // doesn't observe any more heap expansions. 2862 _cm->clear_concurrent_marking_in_progress(); 2863 } 2864 } 2865 2866 // We can now guarantee that the global stack is empty, since 2867 // all other tasks have finished. We separated the guarantees so 2868 // that, if a condition is false, we can immediately find out 2869 // which one. 2870 guarantee(_cm->out_of_regions(), "only way to reach here"); 2871 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2872 guarantee(_task_queue->size() == 0, "only way to reach here"); 2873 guarantee(!_cm->has_overflown(), "only way to reach here"); 2874 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 2875 } else { 2876 // Apparently there's more work to do. Let's abort this task. It 2877 // will restart it and we can hopefully find more things to do. 2878 set_has_aborted(); 2879 } 2880 } 2881 2882 // Mainly for debugging purposes to make sure that a pointer to the 2883 // closure which was statically allocated in this frame doesn't 2884 // escape it by accident. 2885 set_cm_oop_closure(NULL); 2886 double end_time_ms = os::elapsedVTime() * 1000.0; 2887 double elapsed_time_ms = end_time_ms - _start_time_ms; 2888 // Update the step history. 2889 _step_times_ms.add(elapsed_time_ms); 2890 2891 if (has_aborted()) { 2892 // The task was aborted for some reason. 2893 if (_has_timed_out) { 2894 double diff_ms = elapsed_time_ms - _time_target_ms; 2895 // Keep statistics of how well we did with respect to hitting 2896 // our target only if we actually timed out (if we aborted for 2897 // other reasons, then the results might get skewed). 2898 _marking_step_diffs_ms.add(diff_ms); 2899 } 2900 2901 if (_cm->has_overflown()) { 2902 // This is the interesting one. We aborted because a global 2903 // overflow was raised. This means we have to restart the 2904 // marking phase and start iterating over regions. However, in 2905 // order to do this we have to make sure that all tasks stop 2906 // what they are doing and re-initialize in a safe manner. We 2907 // will achieve this with the use of two barrier sync points. 2908 2909 if (!is_serial) { 2910 // We only need to enter the sync barrier if being called 2911 // from a parallel context 2912 _cm->enter_first_sync_barrier(_worker_id); 2913 2914 // When we exit this sync barrier we know that all tasks have 2915 // stopped doing marking work. So, it's now safe to 2916 // re-initialize our data structures. At the end of this method, 2917 // task 0 will clear the global data structures. 2918 } 2919 2920 // We clear the local state of this task... 2921 clear_region_fields(); 2922 2923 if (!is_serial) { 2924 // ...and enter the second barrier. 2925 _cm->enter_second_sync_barrier(_worker_id); 2926 } 2927 // At this point, if we're during the concurrent phase of 2928 // marking, everything has been re-initialized and we're 2929 // ready to restart. 2930 } 2931 } 2932 2933 _claimed = false; 2934 } 2935 2936 G1CMTask::G1CMTask(uint worker_id, 2937 G1ConcurrentMark* cm, 2938 G1CMTaskQueue* task_queue, 2939 G1CMTaskQueueSet* task_queues) 2940 : _g1h(G1CollectedHeap::heap()), 2941 _worker_id(worker_id), _cm(cm), 2942 _claimed(false), 2943 _nextMarkBitMap(NULL), _hash_seed(17), 2944 _task_queue(task_queue), 2945 _task_queues(task_queues), 2946 _cm_oop_closure(NULL) { 2947 guarantee(task_queue != NULL, "invariant"); 2948 guarantee(task_queues != NULL, "invariant"); 2949 2950 _marking_step_diffs_ms.add(0.5); 2951 } 2952 2953 // These are formatting macros that are used below to ensure 2954 // consistent formatting. The *_H_* versions are used to format the 2955 // header for a particular value and they should be kept consistent 2956 // with the corresponding macro. Also note that most of the macros add 2957 // the necessary white space (as a prefix) which makes them a bit 2958 // easier to compose. 2959 2960 // All the output lines are prefixed with this string to be able to 2961 // identify them easily in a large log file. 2962 #define G1PPRL_LINE_PREFIX "###" 2963 2964 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2965 #ifdef _LP64 2966 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2967 #else // _LP64 2968 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2969 #endif // _LP64 2970 2971 // For per-region info 2972 #define G1PPRL_TYPE_FORMAT " %-4s" 2973 #define G1PPRL_TYPE_H_FORMAT " %4s" 2974 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2975 #define G1PPRL_BYTE_H_FORMAT " %9s" 2976 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2977 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2978 2979 // For summary info 2980 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2981 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2982 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2983 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2984 2985 G1PrintRegionLivenessInfoClosure:: 2986 G1PrintRegionLivenessInfoClosure(const char* phase_name) 2987 : _total_used_bytes(0), _total_capacity_bytes(0), 2988 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2989 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 2990 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2991 MemRegion g1_reserved = g1h->g1_reserved(); 2992 double now = os::elapsedTime(); 2993 2994 // Print the header of the output. 2995 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2996 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2997 G1PPRL_SUM_ADDR_FORMAT("reserved") 2998 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2999 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 3000 HeapRegion::GrainBytes); 3001 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3002 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3003 G1PPRL_TYPE_H_FORMAT 3004 G1PPRL_ADDR_BASE_H_FORMAT 3005 G1PPRL_BYTE_H_FORMAT 3006 G1PPRL_BYTE_H_FORMAT 3007 G1PPRL_BYTE_H_FORMAT 3008 G1PPRL_DOUBLE_H_FORMAT 3009 G1PPRL_BYTE_H_FORMAT 3010 G1PPRL_BYTE_H_FORMAT, 3011 "type", "address-range", 3012 "used", "prev-live", "next-live", "gc-eff", 3013 "remset", "code-roots"); 3014 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3015 G1PPRL_TYPE_H_FORMAT 3016 G1PPRL_ADDR_BASE_H_FORMAT 3017 G1PPRL_BYTE_H_FORMAT 3018 G1PPRL_BYTE_H_FORMAT 3019 G1PPRL_BYTE_H_FORMAT 3020 G1PPRL_DOUBLE_H_FORMAT 3021 G1PPRL_BYTE_H_FORMAT 3022 G1PPRL_BYTE_H_FORMAT, 3023 "", "", 3024 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 3025 "(bytes)", "(bytes)"); 3026 } 3027 3028 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 3029 const char* type = r->get_type_str(); 3030 HeapWord* bottom = r->bottom(); 3031 HeapWord* end = r->end(); 3032 size_t capacity_bytes = r->capacity(); 3033 size_t used_bytes = r->used(); 3034 size_t prev_live_bytes = r->live_bytes(); 3035 size_t next_live_bytes = r->next_live_bytes(); 3036 double gc_eff = r->gc_efficiency(); 3037 size_t remset_bytes = r->rem_set()->mem_size(); 3038 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3039 3040 _total_used_bytes += used_bytes; 3041 _total_capacity_bytes += capacity_bytes; 3042 _total_prev_live_bytes += prev_live_bytes; 3043 _total_next_live_bytes += next_live_bytes; 3044 _total_remset_bytes += remset_bytes; 3045 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3046 3047 // Print a line for this particular region. 3048 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3049 G1PPRL_TYPE_FORMAT 3050 G1PPRL_ADDR_BASE_FORMAT 3051 G1PPRL_BYTE_FORMAT 3052 G1PPRL_BYTE_FORMAT 3053 G1PPRL_BYTE_FORMAT 3054 G1PPRL_DOUBLE_FORMAT 3055 G1PPRL_BYTE_FORMAT 3056 G1PPRL_BYTE_FORMAT, 3057 type, p2i(bottom), p2i(end), 3058 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3059 remset_bytes, strong_code_roots_bytes); 3060 3061 return false; 3062 } 3063 3064 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3065 // add static memory usages to remembered set sizes 3066 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3067 // Print the footer of the output. 3068 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3069 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3070 " SUMMARY" 3071 G1PPRL_SUM_MB_FORMAT("capacity") 3072 G1PPRL_SUM_MB_PERC_FORMAT("used") 3073 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3074 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3075 G1PPRL_SUM_MB_FORMAT("remset") 3076 G1PPRL_SUM_MB_FORMAT("code-roots"), 3077 bytes_to_mb(_total_capacity_bytes), 3078 bytes_to_mb(_total_used_bytes), 3079 perc(_total_used_bytes, _total_capacity_bytes), 3080 bytes_to_mb(_total_prev_live_bytes), 3081 perc(_total_prev_live_bytes, _total_capacity_bytes), 3082 bytes_to_mb(_total_next_live_bytes), 3083 perc(_total_next_live_bytes, _total_capacity_bytes), 3084 bytes_to_mb(_total_remset_bytes), 3085 bytes_to_mb(_total_strong_code_roots_bytes)); 3086 }