1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMarkThread.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorState.hpp" 32 #include "gc/g1/g1ConcurrentMark.inline.hpp" 33 #include "gc/g1/g1HeapVerifier.hpp" 34 #include "gc/g1/g1OopClosures.inline.hpp" 35 #include "gc/g1/g1CardLiveData.inline.hpp" 36 #include "gc/g1/g1Policy.hpp" 37 #include "gc/g1/g1StringDedup.hpp" 38 #include "gc/g1/heapRegion.inline.hpp" 39 #include "gc/g1/heapRegionRemSet.hpp" 40 #include "gc/g1/heapRegionSet.inline.hpp" 41 #include "gc/g1/suspendibleThreadSet.hpp" 42 #include "gc/shared/gcId.hpp" 43 #include "gc/shared/gcTimer.hpp" 44 #include "gc/shared/gcTrace.hpp" 45 #include "gc/shared/gcTraceTime.inline.hpp" 46 #include "gc/shared/genOopClosures.inline.hpp" 47 #include "gc/shared/referencePolicy.hpp" 48 #include "gc/shared/strongRootsScope.hpp" 49 #include "gc/shared/taskqueue.inline.hpp" 50 #include "gc/shared/vmGCOperations.hpp" 51 #include "logging/log.hpp" 52 #include "memory/allocation.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "runtime/atomic.hpp" 56 #include "runtime/handles.inline.hpp" 57 #include "runtime/java.hpp" 58 #include "runtime/prefetch.inline.hpp" 59 #include "services/memTracker.hpp" 60 #include "utilities/growableArray.hpp" 61 62 // Concurrent marking bit map wrapper 63 64 G1CMBitMapRO::G1CMBitMapRO(int shifter) : 65 _bm(), 66 _shifter(shifter) { 67 _bmStartWord = 0; 68 _bmWordSize = 0; 69 } 70 71 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 72 const HeapWord* limit) const { 73 // First we must round addr *up* to a possible object boundary. 74 addr = (HeapWord*)align_size_up((intptr_t)addr, 75 HeapWordSize << _shifter); 76 size_t addrOffset = heapWordToOffset(addr); 77 assert(limit != NULL, "limit must not be NULL"); 78 size_t limitOffset = heapWordToOffset(limit); 79 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 80 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 81 assert(nextAddr >= addr, "get_next_one postcondition"); 82 assert(nextAddr == limit || isMarked(nextAddr), 83 "get_next_one postcondition"); 84 return nextAddr; 85 } 86 87 #ifndef PRODUCT 88 bool G1CMBitMapRO::covers(MemRegion heap_rs) const { 89 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 90 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 91 "size inconsistency"); 92 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 93 _bmWordSize == heap_rs.word_size(); 94 } 95 #endif 96 97 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 98 _bm.print_on_error(st, prefix); 99 } 100 101 size_t G1CMBitMap::compute_size(size_t heap_size) { 102 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 103 } 104 105 size_t G1CMBitMap::mark_distance() { 106 return MinObjAlignmentInBytes * BitsPerByte; 107 } 108 109 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 110 _bmStartWord = heap.start(); 111 _bmWordSize = heap.word_size(); 112 113 _bm = BitMapView((BitMap::bm_word_t*) storage->reserved().start(), _bmWordSize >> _shifter); 114 115 storage->set_mapping_changed_listener(&_listener); 116 } 117 118 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 119 if (zero_filled) { 120 return; 121 } 122 // We need to clear the bitmap on commit, removing any existing information. 123 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 124 _bm->clear_range(mr); 125 } 126 127 void G1CMBitMap::clear_range(MemRegion mr) { 128 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 129 assert(!mr.is_empty(), "unexpected empty region"); 130 // convert address range into offset range 131 _bm.at_put_range(heapWordToOffset(mr.start()), 132 heapWordToOffset(mr.end()), false); 133 } 134 135 G1CMMarkStack::G1CMMarkStack() : 136 _reserved_space(), 137 _base(NULL), 138 _capacity(0), 139 _saved_index((size_t)AllBits), 140 _should_expand(false) { 141 set_empty(); 142 } 143 144 bool G1CMMarkStack::resize(size_t new_capacity) { 145 assert(is_empty(), "Only resize when stack is empty."); 146 assert(new_capacity <= MarkStackSizeMax, 147 "Trying to resize stack to " SIZE_FORMAT " elements when the maximum is " SIZE_FORMAT, new_capacity, MarkStackSizeMax); 148 149 size_t reservation_size = ReservedSpace::allocation_align_size_up(new_capacity * sizeof(oop)); 150 151 ReservedSpace rs(reservation_size); 152 if (!rs.is_reserved()) { 153 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " elements and size " SIZE_FORMAT "B.", new_capacity, reservation_size); 154 return false; 155 } 156 157 VirtualSpace vs; 158 159 if (!vs.initialize(rs, rs.size())) { 160 rs.release(); 161 log_warning(gc)("Failed to commit memory for new overflow mark stack of size " SIZE_FORMAT "B.", rs.size()); 162 return false; 163 } 164 165 assert(vs.committed_size() == rs.size(), "Failed to commit all of the mark stack."); 166 167 // Release old mapping. 168 _reserved_space.release(); 169 170 // Save new mapping for future unmapping. 171 _reserved_space = rs; 172 173 MemTracker::record_virtual_memory_type((address)_reserved_space.base(), mtGC); 174 175 _base = (oop*) vs.low(); 176 _capacity = new_capacity; 177 set_empty(); 178 _should_expand = false; 179 180 return true; 181 } 182 183 bool G1CMMarkStack::allocate(size_t capacity) { 184 return resize(capacity); 185 } 186 187 void G1CMMarkStack::expand() { 188 // Clear expansion flag 189 _should_expand = false; 190 191 if (_capacity == MarkStackSizeMax) { 192 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " elements.", _capacity); 193 return; 194 } 195 size_t old_capacity = _capacity; 196 // Double capacity if possible 197 size_t new_capacity = MIN2(old_capacity * 2, MarkStackSizeMax); 198 199 if (resize(new_capacity)) { 200 log_debug(gc)("Expanded marking stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " elements", 201 old_capacity, new_capacity); 202 } else { 203 log_warning(gc)("Failed to expand marking stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " elements", 204 old_capacity, new_capacity); 205 } 206 } 207 208 G1CMMarkStack::~G1CMMarkStack() { 209 if (_base != NULL) { 210 _base = NULL; 211 _reserved_space.release(); 212 } 213 } 214 215 void G1CMMarkStack::par_push_arr(oop* buffer, size_t n) { 216 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 217 size_t start = _index; 218 size_t next_index = start + n; 219 if (next_index > _capacity) { 220 _overflow = true; 221 return; 222 } 223 // Otherwise. 224 _index = next_index; 225 for (size_t i = 0; i < n; i++) { 226 size_t ind = start + i; 227 assert(ind < _capacity, "By overflow test above."); 228 _base[ind] = buffer[i]; 229 } 230 } 231 232 bool G1CMMarkStack::par_pop_arr(oop* buffer, size_t max, size_t* n) { 233 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 234 size_t index = _index; 235 if (index == 0) { 236 *n = 0; 237 return false; 238 } else { 239 size_t k = MIN2(max, index); 240 size_t new_ind = index - k; 241 for (size_t j = 0; j < k; j++) { 242 buffer[j] = _base[new_ind + j]; 243 } 244 _index = new_ind; 245 *n = k; 246 return true; 247 } 248 } 249 250 void G1CMMarkStack::note_start_of_gc() { 251 assert(_saved_index == (size_t)AllBits, "note_start_of_gc()/end_of_gc() calls bracketed incorrectly"); 252 _saved_index = _index; 253 } 254 255 void G1CMMarkStack::note_end_of_gc() { 256 guarantee(!stack_modified(), "Saved index " SIZE_FORMAT " must be the same as " SIZE_FORMAT, _saved_index, _index); 257 258 _saved_index = (size_t)AllBits; 259 } 260 261 G1CMRootRegions::G1CMRootRegions() : 262 _cm(NULL), _scan_in_progress(false), 263 _should_abort(false), _claimed_survivor_index(0) { } 264 265 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) { 266 _survivors = survivors; 267 _cm = cm; 268 } 269 270 void G1CMRootRegions::prepare_for_scan() { 271 assert(!scan_in_progress(), "pre-condition"); 272 273 // Currently, only survivors can be root regions. 274 _claimed_survivor_index = 0; 275 _scan_in_progress = _survivors->regions()->is_nonempty(); 276 _should_abort = false; 277 } 278 279 HeapRegion* G1CMRootRegions::claim_next() { 280 if (_should_abort) { 281 // If someone has set the should_abort flag, we return NULL to 282 // force the caller to bail out of their loop. 283 return NULL; 284 } 285 286 // Currently, only survivors can be root regions. 287 const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions(); 288 289 int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1; 290 if (claimed_index < survivor_regions->length()) { 291 return survivor_regions->at(claimed_index); 292 } 293 return NULL; 294 } 295 296 uint G1CMRootRegions::num_root_regions() const { 297 return (uint)_survivors->regions()->length(); 298 } 299 300 void G1CMRootRegions::notify_scan_done() { 301 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 302 _scan_in_progress = false; 303 RootRegionScan_lock->notify_all(); 304 } 305 306 void G1CMRootRegions::cancel_scan() { 307 notify_scan_done(); 308 } 309 310 void G1CMRootRegions::scan_finished() { 311 assert(scan_in_progress(), "pre-condition"); 312 313 // Currently, only survivors can be root regions. 314 if (!_should_abort) { 315 assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index); 316 assert((uint)_claimed_survivor_index >= _survivors->length(), 317 "we should have claimed all survivors, claimed index = %u, length = %u", 318 (uint)_claimed_survivor_index, _survivors->length()); 319 } 320 321 notify_scan_done(); 322 } 323 324 bool G1CMRootRegions::wait_until_scan_finished() { 325 if (!scan_in_progress()) return false; 326 327 { 328 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 329 while (scan_in_progress()) { 330 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 331 } 332 } 333 return true; 334 } 335 336 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 337 return MAX2((n_par_threads + 2) / 4, 1U); 338 } 339 340 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 341 _g1h(g1h), 342 _markBitMap1(), 343 _markBitMap2(), 344 _parallel_marking_threads(0), 345 _max_parallel_marking_threads(0), 346 _sleep_factor(0.0), 347 _marking_task_overhead(1.0), 348 _cleanup_list("Cleanup List"), 349 350 _prevMarkBitMap(&_markBitMap1), 351 _nextMarkBitMap(&_markBitMap2), 352 353 _global_mark_stack(), 354 // _finger set in set_non_marking_state 355 356 _max_worker_id(ParallelGCThreads), 357 // _active_tasks set in set_non_marking_state 358 // _tasks set inside the constructor 359 _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)), 360 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 361 362 _has_overflown(false), 363 _concurrent(false), 364 _has_aborted(false), 365 _restart_for_overflow(false), 366 _concurrent_marking_in_progress(false), 367 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 368 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 369 370 // _verbose_level set below 371 372 _init_times(), 373 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 374 _cleanup_times(), 375 _total_counting_time(0.0), 376 _total_rs_scrub_time(0.0), 377 378 _parallel_workers(NULL), 379 380 _completed_initialization(false) { 381 382 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 383 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 384 385 // Create & start a ConcurrentMark thread. 386 _cmThread = new ConcurrentMarkThread(this); 387 assert(cmThread() != NULL, "CM Thread should have been created"); 388 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 389 if (_cmThread->osthread() == NULL) { 390 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 391 } 392 393 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 394 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 395 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 396 397 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 398 satb_qs.set_buffer_size(G1SATBBufferSize); 399 400 _root_regions.init(_g1h->survivor(), this); 401 402 if (ConcGCThreads > ParallelGCThreads) { 403 log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).", 404 ConcGCThreads, ParallelGCThreads); 405 return; 406 } 407 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 408 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 409 // if both are set 410 _sleep_factor = 0.0; 411 _marking_task_overhead = 1.0; 412 } else if (G1MarkingOverheadPercent > 0) { 413 // We will calculate the number of parallel marking threads based 414 // on a target overhead with respect to the soft real-time goal 415 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 416 double overall_cm_overhead = 417 (double) MaxGCPauseMillis * marking_overhead / 418 (double) GCPauseIntervalMillis; 419 double cpu_ratio = 1.0 / os::initial_active_processor_count(); 420 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 421 double marking_task_overhead = 422 overall_cm_overhead / marking_thread_num * os::initial_active_processor_count(); 423 double sleep_factor = 424 (1.0 - marking_task_overhead) / marking_task_overhead; 425 426 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 427 _sleep_factor = sleep_factor; 428 _marking_task_overhead = marking_task_overhead; 429 } else { 430 // Calculate the number of parallel marking threads by scaling 431 // the number of parallel GC threads. 432 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 433 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 434 _sleep_factor = 0.0; 435 _marking_task_overhead = 1.0; 436 } 437 438 assert(ConcGCThreads > 0, "Should have been set"); 439 log_debug(gc)("ConcGCThreads: %u", ConcGCThreads); 440 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 441 _parallel_marking_threads = ConcGCThreads; 442 _max_parallel_marking_threads = _parallel_marking_threads; 443 444 _parallel_workers = new WorkGang("G1 Marker", 445 _max_parallel_marking_threads, false, true); 446 if (_parallel_workers == NULL) { 447 vm_exit_during_initialization("Failed necessary allocation."); 448 } else { 449 _parallel_workers->initialize_workers(); 450 } 451 452 if (FLAG_IS_DEFAULT(MarkStackSize)) { 453 size_t mark_stack_size = 454 MIN2(MarkStackSizeMax, 455 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 456 // Verify that the calculated value for MarkStackSize is in range. 457 // It would be nice to use the private utility routine from Arguments. 458 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 459 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 460 "must be between 1 and " SIZE_FORMAT, 461 mark_stack_size, MarkStackSizeMax); 462 return; 463 } 464 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 465 } else { 466 // Verify MarkStackSize is in range. 467 if (FLAG_IS_CMDLINE(MarkStackSize)) { 468 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 469 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 470 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 471 "must be between 1 and " SIZE_FORMAT, 472 MarkStackSize, MarkStackSizeMax); 473 return; 474 } 475 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 476 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 477 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 478 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 479 MarkStackSize, MarkStackSizeMax); 480 return; 481 } 482 } 483 } 484 } 485 486 if (!_global_mark_stack.allocate(MarkStackSize)) { 487 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 488 return; 489 } 490 491 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC); 492 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 493 494 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 495 _active_tasks = _max_worker_id; 496 497 for (uint i = 0; i < _max_worker_id; ++i) { 498 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 499 task_queue->initialize(); 500 _task_queues->register_queue(i, task_queue); 501 502 _tasks[i] = new G1CMTask(i, this, task_queue, _task_queues); 503 504 _accum_task_vtime[i] = 0.0; 505 } 506 507 // so that the call below can read a sensible value 508 _heap_start = g1h->reserved_region().start(); 509 set_non_marking_state(); 510 _completed_initialization = true; 511 } 512 513 void G1ConcurrentMark::reset() { 514 // Starting values for these two. This should be called in a STW 515 // phase. 516 MemRegion reserved = _g1h->g1_reserved(); 517 _heap_start = reserved.start(); 518 _heap_end = reserved.end(); 519 520 // Separated the asserts so that we know which one fires. 521 assert(_heap_start != NULL, "heap bounds should look ok"); 522 assert(_heap_end != NULL, "heap bounds should look ok"); 523 assert(_heap_start < _heap_end, "heap bounds should look ok"); 524 525 // Reset all the marking data structures and any necessary flags 526 reset_marking_state(); 527 528 // We do reset all of them, since different phases will use 529 // different number of active threads. So, it's easiest to have all 530 // of them ready. 531 for (uint i = 0; i < _max_worker_id; ++i) { 532 _tasks[i]->reset(_nextMarkBitMap); 533 } 534 535 // we need this to make sure that the flag is on during the evac 536 // pause with initial mark piggy-backed 537 set_concurrent_marking_in_progress(); 538 } 539 540 541 void G1ConcurrentMark::reset_marking_state(bool clear_overflow) { 542 _global_mark_stack.set_should_expand(has_overflown()); 543 _global_mark_stack.set_empty(); // Also clears the overflow stack's overflow flag 544 if (clear_overflow) { 545 clear_has_overflown(); 546 } else { 547 assert(has_overflown(), "pre-condition"); 548 } 549 _finger = _heap_start; 550 551 for (uint i = 0; i < _max_worker_id; ++i) { 552 G1CMTaskQueue* queue = _task_queues->queue(i); 553 queue->set_empty(); 554 } 555 } 556 557 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 558 assert(active_tasks <= _max_worker_id, "we should not have more"); 559 560 _active_tasks = active_tasks; 561 // Need to update the three data structures below according to the 562 // number of active threads for this phase. 563 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 564 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 565 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 566 } 567 568 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 569 set_concurrency(active_tasks); 570 571 _concurrent = concurrent; 572 // We propagate this to all tasks, not just the active ones. 573 for (uint i = 0; i < _max_worker_id; ++i) 574 _tasks[i]->set_concurrent(concurrent); 575 576 if (concurrent) { 577 set_concurrent_marking_in_progress(); 578 } else { 579 // We currently assume that the concurrent flag has been set to 580 // false before we start remark. At this point we should also be 581 // in a STW phase. 582 assert(!concurrent_marking_in_progress(), "invariant"); 583 assert(out_of_regions(), 584 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 585 p2i(_finger), p2i(_heap_end)); 586 } 587 } 588 589 void G1ConcurrentMark::set_non_marking_state() { 590 // We set the global marking state to some default values when we're 591 // not doing marking. 592 reset_marking_state(); 593 _active_tasks = 0; 594 clear_concurrent_marking_in_progress(); 595 } 596 597 G1ConcurrentMark::~G1ConcurrentMark() { 598 // The G1ConcurrentMark instance is never freed. 599 ShouldNotReachHere(); 600 } 601 602 class G1ClearBitMapTask : public AbstractGangTask { 603 public: 604 static size_t chunk_size() { return M; } 605 606 private: 607 // Heap region closure used for clearing the given mark bitmap. 608 class G1ClearBitmapHRClosure : public HeapRegionClosure { 609 private: 610 G1CMBitMap* _bitmap; 611 G1ConcurrentMark* _cm; 612 public: 613 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) { 614 } 615 616 virtual bool doHeapRegion(HeapRegion* r) { 617 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 618 619 HeapWord* cur = r->bottom(); 620 HeapWord* const end = r->end(); 621 622 while (cur < end) { 623 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 624 _bitmap->clear_range(mr); 625 626 cur += chunk_size_in_words; 627 628 // Abort iteration if after yielding the marking has been aborted. 629 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 630 return true; 631 } 632 // Repeat the asserts from before the start of the closure. We will do them 633 // as asserts here to minimize their overhead on the product. However, we 634 // will have them as guarantees at the beginning / end of the bitmap 635 // clearing to get some checking in the product. 636 assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant"); 637 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 638 } 639 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 640 641 return false; 642 } 643 }; 644 645 G1ClearBitmapHRClosure _cl; 646 HeapRegionClaimer _hr_claimer; 647 bool _suspendible; // If the task is suspendible, workers must join the STS. 648 649 public: 650 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 651 AbstractGangTask("G1 Clear Bitmap"), 652 _cl(bitmap, suspendible ? cm : NULL), 653 _hr_claimer(n_workers), 654 _suspendible(suspendible) 655 { } 656 657 void work(uint worker_id) { 658 SuspendibleThreadSetJoiner sts_join(_suspendible); 659 G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true); 660 } 661 662 bool is_complete() { 663 return _cl.complete(); 664 } 665 }; 666 667 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 668 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 669 670 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 671 size_t const num_chunks = align_size_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 672 673 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 674 675 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 676 677 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 678 workers->run_task(&cl, num_workers); 679 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 680 } 681 682 void G1ConcurrentMark::cleanup_for_next_mark() { 683 // Make sure that the concurrent mark thread looks to still be in 684 // the current cycle. 685 guarantee(cmThread()->during_cycle(), "invariant"); 686 687 // We are finishing up the current cycle by clearing the next 688 // marking bitmap and getting it ready for the next cycle. During 689 // this time no other cycle can start. So, let's make sure that this 690 // is the case. 691 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 692 693 clear_bitmap(_nextMarkBitMap, _parallel_workers, true); 694 695 // Clear the live count data. If the marking has been aborted, the abort() 696 // call already did that. 697 if (!has_aborted()) { 698 clear_live_data(_parallel_workers); 699 DEBUG_ONLY(verify_live_data_clear()); 700 } 701 702 // Repeat the asserts from above. 703 guarantee(cmThread()->during_cycle(), "invariant"); 704 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 705 } 706 707 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 708 assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint."); 709 clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false); 710 } 711 712 class CheckBitmapClearHRClosure : public HeapRegionClosure { 713 G1CMBitMap* _bitmap; 714 bool _error; 715 public: 716 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { 717 } 718 719 virtual bool doHeapRegion(HeapRegion* r) { 720 // This closure can be called concurrently to the mutator, so we must make sure 721 // that the result of the getNextMarkedWordAddress() call is compared to the 722 // value passed to it as limit to detect any found bits. 723 // end never changes in G1. 724 HeapWord* end = r->end(); 725 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 726 } 727 }; 728 729 bool G1ConcurrentMark::nextMarkBitmapIsClear() { 730 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 731 _g1h->heap_region_iterate(&cl); 732 return cl.complete(); 733 } 734 735 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 736 public: 737 bool doHeapRegion(HeapRegion* r) { 738 r->note_start_of_marking(); 739 return false; 740 } 741 }; 742 743 void G1ConcurrentMark::checkpointRootsInitialPre() { 744 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 745 G1Policy* g1p = g1h->g1_policy(); 746 747 _has_aborted = false; 748 749 // Initialize marking structures. This has to be done in a STW phase. 750 reset(); 751 752 // For each region note start of marking. 753 NoteStartOfMarkHRClosure startcl; 754 g1h->heap_region_iterate(&startcl); 755 } 756 757 758 void G1ConcurrentMark::checkpointRootsInitialPost() { 759 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 760 761 // Start Concurrent Marking weak-reference discovery. 762 ReferenceProcessor* rp = g1h->ref_processor_cm(); 763 // enable ("weak") refs discovery 764 rp->enable_discovery(); 765 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 766 767 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 768 // This is the start of the marking cycle, we're expected all 769 // threads to have SATB queues with active set to false. 770 satb_mq_set.set_active_all_threads(true, /* new active value */ 771 false /* expected_active */); 772 773 _root_regions.prepare_for_scan(); 774 775 // update_g1_committed() will be called at the end of an evac pause 776 // when marking is on. So, it's also called at the end of the 777 // initial-mark pause to update the heap end, if the heap expands 778 // during it. No need to call it here. 779 } 780 781 /* 782 * Notice that in the next two methods, we actually leave the STS 783 * during the barrier sync and join it immediately afterwards. If we 784 * do not do this, the following deadlock can occur: one thread could 785 * be in the barrier sync code, waiting for the other thread to also 786 * sync up, whereas another one could be trying to yield, while also 787 * waiting for the other threads to sync up too. 788 * 789 * Note, however, that this code is also used during remark and in 790 * this case we should not attempt to leave / enter the STS, otherwise 791 * we'll either hit an assert (debug / fastdebug) or deadlock 792 * (product). So we should only leave / enter the STS if we are 793 * operating concurrently. 794 * 795 * Because the thread that does the sync barrier has left the STS, it 796 * is possible to be suspended for a Full GC or an evacuation pause 797 * could occur. This is actually safe, since the entering the sync 798 * barrier is one of the last things do_marking_step() does, and it 799 * doesn't manipulate any data structures afterwards. 800 */ 801 802 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 803 bool barrier_aborted; 804 { 805 SuspendibleThreadSetLeaver sts_leave(concurrent()); 806 barrier_aborted = !_first_overflow_barrier_sync.enter(); 807 } 808 809 // at this point everyone should have synced up and not be doing any 810 // more work 811 812 if (barrier_aborted) { 813 // If the barrier aborted we ignore the overflow condition and 814 // just abort the whole marking phase as quickly as possible. 815 return; 816 } 817 818 // If we're executing the concurrent phase of marking, reset the marking 819 // state; otherwise the marking state is reset after reference processing, 820 // during the remark pause. 821 // If we reset here as a result of an overflow during the remark we will 822 // see assertion failures from any subsequent set_concurrency_and_phase() 823 // calls. 824 if (concurrent()) { 825 // let the task associated with with worker 0 do this 826 if (worker_id == 0) { 827 // task 0 is responsible for clearing the global data structures 828 // We should be here because of an overflow. During STW we should 829 // not clear the overflow flag since we rely on it being true when 830 // we exit this method to abort the pause and restart concurrent 831 // marking. 832 reset_marking_state(true /* clear_overflow */); 833 834 log_info(gc, marking)("Concurrent Mark reset for overflow"); 835 } 836 } 837 838 // after this, each task should reset its own data structures then 839 // then go into the second barrier 840 } 841 842 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 843 SuspendibleThreadSetLeaver sts_leave(concurrent()); 844 _second_overflow_barrier_sync.enter(); 845 846 // at this point everything should be re-initialized and ready to go 847 } 848 849 class G1CMConcurrentMarkingTask: public AbstractGangTask { 850 private: 851 G1ConcurrentMark* _cm; 852 ConcurrentMarkThread* _cmt; 853 854 public: 855 void work(uint worker_id) { 856 assert(Thread::current()->is_ConcurrentGC_thread(), 857 "this should only be done by a conc GC thread"); 858 ResourceMark rm; 859 860 double start_vtime = os::elapsedVTime(); 861 862 { 863 SuspendibleThreadSetJoiner sts_join; 864 865 assert(worker_id < _cm->active_tasks(), "invariant"); 866 G1CMTask* the_task = _cm->task(worker_id); 867 the_task->record_start_time(); 868 if (!_cm->has_aborted()) { 869 do { 870 double start_vtime_sec = os::elapsedVTime(); 871 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 872 873 the_task->do_marking_step(mark_step_duration_ms, 874 true /* do_termination */, 875 false /* is_serial*/); 876 877 double end_vtime_sec = os::elapsedVTime(); 878 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 879 _cm->clear_has_overflown(); 880 881 _cm->do_yield_check(); 882 883 jlong sleep_time_ms; 884 if (!_cm->has_aborted() && the_task->has_aborted()) { 885 sleep_time_ms = 886 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 887 { 888 SuspendibleThreadSetLeaver sts_leave; 889 os::sleep(Thread::current(), sleep_time_ms, false); 890 } 891 } 892 } while (!_cm->has_aborted() && the_task->has_aborted()); 893 } 894 the_task->record_end_time(); 895 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 896 } 897 898 double end_vtime = os::elapsedVTime(); 899 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 900 } 901 902 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm, 903 ConcurrentMarkThread* cmt) : 904 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 905 906 ~G1CMConcurrentMarkingTask() { } 907 }; 908 909 // Calculates the number of active workers for a concurrent 910 // phase. 911 uint G1ConcurrentMark::calc_parallel_marking_threads() { 912 uint n_conc_workers = 0; 913 if (!UseDynamicNumberOfGCThreads || 914 (!FLAG_IS_DEFAULT(ConcGCThreads) && 915 !ForceDynamicNumberOfGCThreads)) { 916 n_conc_workers = max_parallel_marking_threads(); 917 } else { 918 n_conc_workers = 919 AdaptiveSizePolicy::calc_default_active_workers(max_parallel_marking_threads(), 920 1, /* Minimum workers */ 921 parallel_marking_threads(), 922 Threads::number_of_non_daemon_threads()); 923 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 924 // that scaling has already gone into "_max_parallel_marking_threads". 925 } 926 assert(n_conc_workers > 0 && n_conc_workers <= max_parallel_marking_threads(), 927 "Calculated number of workers must be larger than zero and at most the maximum %u, but is %u", 928 max_parallel_marking_threads(), n_conc_workers); 929 return n_conc_workers; 930 } 931 932 void G1ConcurrentMark::scanRootRegion(HeapRegion* hr) { 933 // Currently, only survivors can be root regions. 934 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 935 G1RootRegionScanClosure cl(_g1h, this); 936 937 const uintx interval = PrefetchScanIntervalInBytes; 938 HeapWord* curr = hr->bottom(); 939 const HeapWord* end = hr->top(); 940 while (curr < end) { 941 Prefetch::read(curr, interval); 942 oop obj = oop(curr); 943 int size = obj->oop_iterate_size(&cl); 944 assert(size == obj->size(), "sanity"); 945 curr += size; 946 } 947 } 948 949 class G1CMRootRegionScanTask : public AbstractGangTask { 950 private: 951 G1ConcurrentMark* _cm; 952 953 public: 954 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 955 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 956 957 void work(uint worker_id) { 958 assert(Thread::current()->is_ConcurrentGC_thread(), 959 "this should only be done by a conc GC thread"); 960 961 G1CMRootRegions* root_regions = _cm->root_regions(); 962 HeapRegion* hr = root_regions->claim_next(); 963 while (hr != NULL) { 964 _cm->scanRootRegion(hr); 965 hr = root_regions->claim_next(); 966 } 967 } 968 }; 969 970 void G1ConcurrentMark::scan_root_regions() { 971 // scan_in_progress() will have been set to true only if there was 972 // at least one root region to scan. So, if it's false, we 973 // should not attempt to do any further work. 974 if (root_regions()->scan_in_progress()) { 975 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 976 977 _parallel_marking_threads = MIN2(calc_parallel_marking_threads(), 978 // We distribute work on a per-region basis, so starting 979 // more threads than that is useless. 980 root_regions()->num_root_regions()); 981 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 982 "Maximum number of marking threads exceeded"); 983 984 G1CMRootRegionScanTask task(this); 985 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 986 task.name(), _parallel_marking_threads, root_regions()->num_root_regions()); 987 _parallel_workers->run_task(&task, _parallel_marking_threads); 988 989 // It's possible that has_aborted() is true here without actually 990 // aborting the survivor scan earlier. This is OK as it's 991 // mainly used for sanity checking. 992 root_regions()->scan_finished(); 993 } 994 } 995 996 void G1ConcurrentMark::concurrent_cycle_start() { 997 _gc_timer_cm->register_gc_start(); 998 999 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 1000 1001 _g1h->trace_heap_before_gc(_gc_tracer_cm); 1002 } 1003 1004 void G1ConcurrentMark::concurrent_cycle_end() { 1005 _g1h->trace_heap_after_gc(_gc_tracer_cm); 1006 1007 if (has_aborted()) { 1008 _gc_tracer_cm->report_concurrent_mode_failure(); 1009 } 1010 1011 _gc_timer_cm->register_gc_end(); 1012 1013 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 1014 } 1015 1016 void G1ConcurrentMark::mark_from_roots() { 1017 // we might be tempted to assert that: 1018 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1019 // "inconsistent argument?"); 1020 // However that wouldn't be right, because it's possible that 1021 // a safepoint is indeed in progress as a younger generation 1022 // stop-the-world GC happens even as we mark in this generation. 1023 1024 _restart_for_overflow = false; 1025 1026 // _g1h has _n_par_threads 1027 _parallel_marking_threads = calc_parallel_marking_threads(); 1028 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1029 "Maximum number of marking threads exceeded"); 1030 1031 uint active_workers = MAX2(1U, parallel_marking_threads()); 1032 assert(active_workers > 0, "Should have been set"); 1033 1034 // Setting active workers is not guaranteed since fewer 1035 // worker threads may currently exist and more may not be 1036 // available. 1037 active_workers = _parallel_workers->update_active_workers(active_workers); 1038 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _parallel_workers->total_workers()); 1039 1040 // Parallel task terminator is set in "set_concurrency_and_phase()" 1041 set_concurrency_and_phase(active_workers, true /* concurrent */); 1042 1043 G1CMConcurrentMarkingTask markingTask(this, cmThread()); 1044 _parallel_workers->run_task(&markingTask); 1045 print_stats(); 1046 } 1047 1048 void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1049 // world is stopped at this checkpoint 1050 assert(SafepointSynchronize::is_at_safepoint(), 1051 "world should be stopped"); 1052 1053 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1054 1055 // If a full collection has happened, we shouldn't do this. 1056 if (has_aborted()) { 1057 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1058 return; 1059 } 1060 1061 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1062 1063 if (VerifyDuringGC) { 1064 HandleMark hm; // handle scope 1065 g1h->prepare_for_verify(); 1066 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1067 } 1068 g1h->verifier()->check_bitmaps("Remark Start"); 1069 1070 G1Policy* g1p = g1h->g1_policy(); 1071 g1p->record_concurrent_mark_remark_start(); 1072 1073 double start = os::elapsedTime(); 1074 1075 checkpointRootsFinalWork(); 1076 1077 double mark_work_end = os::elapsedTime(); 1078 1079 weakRefsWork(clear_all_soft_refs); 1080 1081 if (has_overflown()) { 1082 // We overflowed. Restart concurrent marking. 1083 _restart_for_overflow = true; 1084 1085 // Verify the heap w.r.t. the previous marking bitmap. 1086 if (VerifyDuringGC) { 1087 HandleMark hm; // handle scope 1088 g1h->prepare_for_verify(); 1089 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)"); 1090 } 1091 1092 // Clear the marking state because we will be restarting 1093 // marking due to overflowing the global mark stack. 1094 reset_marking_state(); 1095 } else { 1096 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1097 // We're done with marking. 1098 // This is the end of the marking cycle, we're expected all 1099 // threads to have SATB queues with active set to true. 1100 satb_mq_set.set_active_all_threads(false, /* new active value */ 1101 true /* expected_active */); 1102 1103 if (VerifyDuringGC) { 1104 HandleMark hm; // handle scope 1105 g1h->prepare_for_verify(); 1106 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)"); 1107 } 1108 g1h->verifier()->check_bitmaps("Remark End"); 1109 assert(!restart_for_overflow(), "sanity"); 1110 // Completely reset the marking state since marking completed 1111 set_non_marking_state(); 1112 } 1113 1114 // Expand the marking stack, if we have to and if we can. 1115 if (_global_mark_stack.should_expand()) { 1116 _global_mark_stack.expand(); 1117 } 1118 1119 // Statistics 1120 double now = os::elapsedTime(); 1121 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1122 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1123 _remark_times.add((now - start) * 1000.0); 1124 1125 g1p->record_concurrent_mark_remark_end(); 1126 1127 G1CMIsAliveClosure is_alive(g1h); 1128 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1129 } 1130 1131 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1132 G1CollectedHeap* _g1; 1133 size_t _freed_bytes; 1134 FreeRegionList* _local_cleanup_list; 1135 uint _old_regions_removed; 1136 uint _humongous_regions_removed; 1137 HRRSCleanupTask* _hrrs_cleanup_task; 1138 1139 public: 1140 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1141 FreeRegionList* local_cleanup_list, 1142 HRRSCleanupTask* hrrs_cleanup_task) : 1143 _g1(g1), 1144 _freed_bytes(0), 1145 _local_cleanup_list(local_cleanup_list), 1146 _old_regions_removed(0), 1147 _humongous_regions_removed(0), 1148 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1149 1150 size_t freed_bytes() { return _freed_bytes; } 1151 const uint old_regions_removed() { return _old_regions_removed; } 1152 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1153 1154 bool doHeapRegion(HeapRegion *hr) { 1155 if (hr->is_archive()) { 1156 return false; 1157 } 1158 _g1->reset_gc_time_stamps(hr); 1159 hr->note_end_of_marking(); 1160 1161 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1162 _freed_bytes += hr->used(); 1163 hr->set_containing_set(NULL); 1164 if (hr->is_humongous()) { 1165 _humongous_regions_removed++; 1166 _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */); 1167 } else { 1168 _old_regions_removed++; 1169 _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */); 1170 } 1171 } else { 1172 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1173 } 1174 1175 return false; 1176 } 1177 }; 1178 1179 class G1ParNoteEndTask: public AbstractGangTask { 1180 friend class G1NoteEndOfConcMarkClosure; 1181 1182 protected: 1183 G1CollectedHeap* _g1h; 1184 FreeRegionList* _cleanup_list; 1185 HeapRegionClaimer _hrclaimer; 1186 1187 public: 1188 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1189 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1190 } 1191 1192 void work(uint worker_id) { 1193 FreeRegionList local_cleanup_list("Local Cleanup List"); 1194 HRRSCleanupTask hrrs_cleanup_task; 1195 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1196 &hrrs_cleanup_task); 1197 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1198 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1199 1200 // Now update the lists 1201 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1202 { 1203 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1204 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1205 1206 // If we iterate over the global cleanup list at the end of 1207 // cleanup to do this printing we will not guarantee to only 1208 // generate output for the newly-reclaimed regions (the list 1209 // might not be empty at the beginning of cleanup; we might 1210 // still be working on its previous contents). So we do the 1211 // printing here, before we append the new regions to the global 1212 // cleanup list. 1213 1214 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1215 if (hr_printer->is_active()) { 1216 FreeRegionListIterator iter(&local_cleanup_list); 1217 while (iter.more_available()) { 1218 HeapRegion* hr = iter.get_next(); 1219 hr_printer->cleanup(hr); 1220 } 1221 } 1222 1223 _cleanup_list->add_ordered(&local_cleanup_list); 1224 assert(local_cleanup_list.is_empty(), "post-condition"); 1225 1226 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1227 } 1228 } 1229 }; 1230 1231 void G1ConcurrentMark::cleanup() { 1232 // world is stopped at this checkpoint 1233 assert(SafepointSynchronize::is_at_safepoint(), 1234 "world should be stopped"); 1235 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1236 1237 // If a full collection has happened, we shouldn't do this. 1238 if (has_aborted()) { 1239 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1240 return; 1241 } 1242 1243 g1h->verifier()->verify_region_sets_optional(); 1244 1245 if (VerifyDuringGC) { 1246 HandleMark hm; // handle scope 1247 g1h->prepare_for_verify(); 1248 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1249 } 1250 g1h->verifier()->check_bitmaps("Cleanup Start"); 1251 1252 G1Policy* g1p = g1h->g1_policy(); 1253 g1p->record_concurrent_mark_cleanup_start(); 1254 1255 double start = os::elapsedTime(); 1256 1257 HeapRegionRemSet::reset_for_cleanup_tasks(); 1258 1259 { 1260 GCTraceTime(Debug, gc)("Finalize Live Data"); 1261 finalize_live_data(); 1262 } 1263 1264 if (VerifyDuringGC) { 1265 GCTraceTime(Debug, gc)("Verify Live Data"); 1266 verify_live_data(); 1267 } 1268 1269 g1h->collector_state()->set_mark_in_progress(false); 1270 1271 double count_end = os::elapsedTime(); 1272 double this_final_counting_time = (count_end - start); 1273 _total_counting_time += this_final_counting_time; 1274 1275 if (log_is_enabled(Trace, gc, liveness)) { 1276 G1PrintRegionLivenessInfoClosure cl("Post-Marking"); 1277 _g1h->heap_region_iterate(&cl); 1278 } 1279 1280 // Install newly created mark bitMap as "prev". 1281 swapMarkBitMaps(); 1282 1283 g1h->reset_gc_time_stamp(); 1284 1285 uint n_workers = _g1h->workers()->active_workers(); 1286 1287 // Note end of marking in all heap regions. 1288 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1289 g1h->workers()->run_task(&g1_par_note_end_task); 1290 g1h->check_gc_time_stamps(); 1291 1292 if (!cleanup_list_is_empty()) { 1293 // The cleanup list is not empty, so we'll have to process it 1294 // concurrently. Notify anyone else that might be wanting free 1295 // regions that there will be more free regions coming soon. 1296 g1h->set_free_regions_coming(); 1297 } 1298 1299 // call below, since it affects the metric by which we sort the heap 1300 // regions. 1301 if (G1ScrubRemSets) { 1302 double rs_scrub_start = os::elapsedTime(); 1303 g1h->scrub_rem_set(); 1304 _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start); 1305 } 1306 1307 // this will also free any regions totally full of garbage objects, 1308 // and sort the regions. 1309 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1310 1311 // Statistics. 1312 double end = os::elapsedTime(); 1313 _cleanup_times.add((end - start) * 1000.0); 1314 1315 // Clean up will have freed any regions completely full of garbage. 1316 // Update the soft reference policy with the new heap occupancy. 1317 Universe::update_heap_info_at_gc(); 1318 1319 if (VerifyDuringGC) { 1320 HandleMark hm; // handle scope 1321 g1h->prepare_for_verify(); 1322 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)"); 1323 } 1324 1325 g1h->verifier()->check_bitmaps("Cleanup End"); 1326 1327 g1h->verifier()->verify_region_sets_optional(); 1328 1329 // We need to make this be a "collection" so any collection pause that 1330 // races with it goes around and waits for completeCleanup to finish. 1331 g1h->increment_total_collections(); 1332 1333 // Clean out dead classes and update Metaspace sizes. 1334 if (ClassUnloadingWithConcurrentMark) { 1335 ClassLoaderDataGraph::purge(); 1336 } 1337 MetaspaceGC::compute_new_size(); 1338 1339 // We reclaimed old regions so we should calculate the sizes to make 1340 // sure we update the old gen/space data. 1341 g1h->g1mm()->update_sizes(); 1342 g1h->allocation_context_stats().update_after_mark(); 1343 } 1344 1345 void G1ConcurrentMark::complete_cleanup() { 1346 if (has_aborted()) return; 1347 1348 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1349 1350 _cleanup_list.verify_optional(); 1351 FreeRegionList tmp_free_list("Tmp Free List"); 1352 1353 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1354 "cleanup list has %u entries", 1355 _cleanup_list.length()); 1356 1357 // No one else should be accessing the _cleanup_list at this point, 1358 // so it is not necessary to take any locks 1359 while (!_cleanup_list.is_empty()) { 1360 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 1361 assert(hr != NULL, "Got NULL from a non-empty list"); 1362 hr->par_clear(); 1363 tmp_free_list.add_ordered(hr); 1364 1365 // Instead of adding one region at a time to the secondary_free_list, 1366 // we accumulate them in the local list and move them a few at a 1367 // time. This also cuts down on the number of notify_all() calls 1368 // we do during this process. We'll also append the local list when 1369 // _cleanup_list is empty (which means we just removed the last 1370 // region from the _cleanup_list). 1371 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1372 _cleanup_list.is_empty()) { 1373 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1374 "appending %u entries to the secondary_free_list, " 1375 "cleanup list still has %u entries", 1376 tmp_free_list.length(), 1377 _cleanup_list.length()); 1378 1379 { 1380 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1381 g1h->secondary_free_list_add(&tmp_free_list); 1382 SecondaryFreeList_lock->notify_all(); 1383 } 1384 #ifndef PRODUCT 1385 if (G1StressConcRegionFreeing) { 1386 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1387 os::sleep(Thread::current(), (jlong) 1, false); 1388 } 1389 } 1390 #endif 1391 } 1392 } 1393 assert(tmp_free_list.is_empty(), "post-condition"); 1394 } 1395 1396 // Supporting Object and Oop closures for reference discovery 1397 // and processing in during marking 1398 1399 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1400 HeapWord* addr = (HeapWord*)obj; 1401 return addr != NULL && 1402 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1403 } 1404 1405 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1406 // Uses the G1CMTask associated with a worker thread (for serial reference 1407 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1408 // trace referent objects. 1409 // 1410 // Using the G1CMTask and embedded local queues avoids having the worker 1411 // threads operating on the global mark stack. This reduces the risk 1412 // of overflowing the stack - which we would rather avoid at this late 1413 // state. Also using the tasks' local queues removes the potential 1414 // of the workers interfering with each other that could occur if 1415 // operating on the global stack. 1416 1417 class G1CMKeepAliveAndDrainClosure: public OopClosure { 1418 G1ConcurrentMark* _cm; 1419 G1CMTask* _task; 1420 int _ref_counter_limit; 1421 int _ref_counter; 1422 bool _is_serial; 1423 public: 1424 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1425 _cm(cm), _task(task), _is_serial(is_serial), 1426 _ref_counter_limit(G1RefProcDrainInterval) { 1427 assert(_ref_counter_limit > 0, "sanity"); 1428 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1429 _ref_counter = _ref_counter_limit; 1430 } 1431 1432 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1433 virtual void do_oop( oop* p) { do_oop_work(p); } 1434 1435 template <class T> void do_oop_work(T* p) { 1436 if (!_cm->has_overflown()) { 1437 oop obj = oopDesc::load_decode_heap_oop(p); 1438 _task->deal_with_reference(obj); 1439 _ref_counter--; 1440 1441 if (_ref_counter == 0) { 1442 // We have dealt with _ref_counter_limit references, pushing them 1443 // and objects reachable from them on to the local stack (and 1444 // possibly the global stack). Call G1CMTask::do_marking_step() to 1445 // process these entries. 1446 // 1447 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1448 // there's nothing more to do (i.e. we're done with the entries that 1449 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1450 // above) or we overflow. 1451 // 1452 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1453 // flag while there may still be some work to do. (See the comment at 1454 // the beginning of G1CMTask::do_marking_step() for those conditions - 1455 // one of which is reaching the specified time target.) It is only 1456 // when G1CMTask::do_marking_step() returns without setting the 1457 // has_aborted() flag that the marking step has completed. 1458 do { 1459 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1460 _task->do_marking_step(mark_step_duration_ms, 1461 false /* do_termination */, 1462 _is_serial); 1463 } while (_task->has_aborted() && !_cm->has_overflown()); 1464 _ref_counter = _ref_counter_limit; 1465 } 1466 } 1467 } 1468 }; 1469 1470 // 'Drain' oop closure used by both serial and parallel reference processing. 1471 // Uses the G1CMTask associated with a given worker thread (for serial 1472 // reference processing the G1CMtask for worker 0 is used). Calls the 1473 // do_marking_step routine, with an unbelievably large timeout value, 1474 // to drain the marking data structures of the remaining entries 1475 // added by the 'keep alive' oop closure above. 1476 1477 class G1CMDrainMarkingStackClosure: public VoidClosure { 1478 G1ConcurrentMark* _cm; 1479 G1CMTask* _task; 1480 bool _is_serial; 1481 public: 1482 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1483 _cm(cm), _task(task), _is_serial(is_serial) { 1484 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1485 } 1486 1487 void do_void() { 1488 do { 1489 // We call G1CMTask::do_marking_step() to completely drain the local 1490 // and global marking stacks of entries pushed by the 'keep alive' 1491 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1492 // 1493 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1494 // if there's nothing more to do (i.e. we've completely drained the 1495 // entries that were pushed as a a result of applying the 'keep alive' 1496 // closure to the entries on the discovered ref lists) or we overflow 1497 // the global marking stack. 1498 // 1499 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1500 // flag while there may still be some work to do. (See the comment at 1501 // the beginning of G1CMTask::do_marking_step() for those conditions - 1502 // one of which is reaching the specified time target.) It is only 1503 // when G1CMTask::do_marking_step() returns without setting the 1504 // has_aborted() flag that the marking step has completed. 1505 1506 _task->do_marking_step(1000000000.0 /* something very large */, 1507 true /* do_termination */, 1508 _is_serial); 1509 } while (_task->has_aborted() && !_cm->has_overflown()); 1510 } 1511 }; 1512 1513 // Implementation of AbstractRefProcTaskExecutor for parallel 1514 // reference processing at the end of G1 concurrent marking 1515 1516 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 1517 private: 1518 G1CollectedHeap* _g1h; 1519 G1ConcurrentMark* _cm; 1520 WorkGang* _workers; 1521 uint _active_workers; 1522 1523 public: 1524 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1525 G1ConcurrentMark* cm, 1526 WorkGang* workers, 1527 uint n_workers) : 1528 _g1h(g1h), _cm(cm), 1529 _workers(workers), _active_workers(n_workers) { } 1530 1531 // Executes the given task using concurrent marking worker threads. 1532 virtual void execute(ProcessTask& task); 1533 virtual void execute(EnqueueTask& task); 1534 }; 1535 1536 class G1CMRefProcTaskProxy: public AbstractGangTask { 1537 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1538 ProcessTask& _proc_task; 1539 G1CollectedHeap* _g1h; 1540 G1ConcurrentMark* _cm; 1541 1542 public: 1543 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1544 G1CollectedHeap* g1h, 1545 G1ConcurrentMark* cm) : 1546 AbstractGangTask("Process reference objects in parallel"), 1547 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1548 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1549 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1550 } 1551 1552 virtual void work(uint worker_id) { 1553 ResourceMark rm; 1554 HandleMark hm; 1555 G1CMTask* task = _cm->task(worker_id); 1556 G1CMIsAliveClosure g1_is_alive(_g1h); 1557 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1558 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1559 1560 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1561 } 1562 }; 1563 1564 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1565 assert(_workers != NULL, "Need parallel worker threads."); 1566 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1567 1568 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1569 1570 // We need to reset the concurrency level before each 1571 // proxy task execution, so that the termination protocol 1572 // and overflow handling in G1CMTask::do_marking_step() knows 1573 // how many workers to wait for. 1574 _cm->set_concurrency(_active_workers); 1575 _workers->run_task(&proc_task_proxy); 1576 } 1577 1578 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 1579 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1580 EnqueueTask& _enq_task; 1581 1582 public: 1583 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1584 AbstractGangTask("Enqueue reference objects in parallel"), 1585 _enq_task(enq_task) { } 1586 1587 virtual void work(uint worker_id) { 1588 _enq_task.work(worker_id); 1589 } 1590 }; 1591 1592 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 1593 assert(_workers != NULL, "Need parallel worker threads."); 1594 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1595 1596 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 1597 1598 // Not strictly necessary but... 1599 // 1600 // We need to reset the concurrency level before each 1601 // proxy task execution, so that the termination protocol 1602 // and overflow handling in G1CMTask::do_marking_step() knows 1603 // how many workers to wait for. 1604 _cm->set_concurrency(_active_workers); 1605 _workers->run_task(&enq_task_proxy); 1606 } 1607 1608 void G1ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 1609 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 1610 } 1611 1612 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 1613 if (has_overflown()) { 1614 // Skip processing the discovered references if we have 1615 // overflown the global marking stack. Reference objects 1616 // only get discovered once so it is OK to not 1617 // de-populate the discovered reference lists. We could have, 1618 // but the only benefit would be that, when marking restarts, 1619 // less reference objects are discovered. 1620 return; 1621 } 1622 1623 ResourceMark rm; 1624 HandleMark hm; 1625 1626 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1627 1628 // Is alive closure. 1629 G1CMIsAliveClosure g1_is_alive(g1h); 1630 1631 // Inner scope to exclude the cleaning of the string and symbol 1632 // tables from the displayed time. 1633 { 1634 GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm); 1635 1636 ReferenceProcessor* rp = g1h->ref_processor_cm(); 1637 1638 // See the comment in G1CollectedHeap::ref_processing_init() 1639 // about how reference processing currently works in G1. 1640 1641 // Set the soft reference policy 1642 rp->setup_policy(clear_all_soft_refs); 1643 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1644 1645 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1646 // in serial reference processing. Note these closures are also 1647 // used for serially processing (by the the current thread) the 1648 // JNI references during parallel reference processing. 1649 // 1650 // These closures do not need to synchronize with the worker 1651 // threads involved in parallel reference processing as these 1652 // instances are executed serially by the current thread (e.g. 1653 // reference processing is not multi-threaded and is thus 1654 // performed by the current thread instead of a gang worker). 1655 // 1656 // The gang tasks involved in parallel reference processing create 1657 // their own instances of these closures, which do their own 1658 // synchronization among themselves. 1659 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1660 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1661 1662 // We need at least one active thread. If reference processing 1663 // is not multi-threaded we use the current (VMThread) thread, 1664 // otherwise we use the work gang from the G1CollectedHeap and 1665 // we utilize all the worker threads we can. 1666 bool processing_is_mt = rp->processing_is_mt(); 1667 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 1668 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 1669 1670 // Parallel processing task executor. 1671 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 1672 g1h->workers(), active_workers); 1673 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1674 1675 // Set the concurrency level. The phase was already set prior to 1676 // executing the remark task. 1677 set_concurrency(active_workers); 1678 1679 // Set the degree of MT processing here. If the discovery was done MT, 1680 // the number of threads involved during discovery could differ from 1681 // the number of active workers. This is OK as long as the discovered 1682 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1683 rp->set_active_mt_degree(active_workers); 1684 1685 // Process the weak references. 1686 const ReferenceProcessorStats& stats = 1687 rp->process_discovered_references(&g1_is_alive, 1688 &g1_keep_alive, 1689 &g1_drain_mark_stack, 1690 executor, 1691 _gc_timer_cm); 1692 _gc_tracer_cm->report_gc_reference_stats(stats); 1693 1694 // The do_oop work routines of the keep_alive and drain_marking_stack 1695 // oop closures will set the has_overflown flag if we overflow the 1696 // global marking stack. 1697 1698 assert(_global_mark_stack.overflow() || _global_mark_stack.is_empty(), 1699 "mark stack should be empty (unless it overflowed)"); 1700 1701 if (_global_mark_stack.overflow()) { 1702 // This should have been done already when we tried to push an 1703 // entry on to the global mark stack. But let's do it again. 1704 set_has_overflown(); 1705 } 1706 1707 assert(rp->num_q() == active_workers, "why not"); 1708 1709 rp->enqueue_discovered_references(executor); 1710 1711 rp->verify_no_references_recorded(); 1712 assert(!rp->discovery_enabled(), "Post condition"); 1713 } 1714 1715 if (has_overflown()) { 1716 // We can not trust g1_is_alive if the marking stack overflowed 1717 return; 1718 } 1719 1720 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1721 1722 // Unload Klasses, String, Symbols, Code Cache, etc. 1723 if (ClassUnloadingWithConcurrentMark) { 1724 bool purged_classes; 1725 1726 { 1727 GCTraceTime(Debug, gc, phases) trace("System Dictionary Unloading", _gc_timer_cm); 1728 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 1729 } 1730 1731 { 1732 GCTraceTime(Debug, gc, phases) trace("Parallel Unloading", _gc_timer_cm); 1733 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 1734 } 1735 } 1736 1737 if (G1StringDedup::is_enabled()) { 1738 GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", _gc_timer_cm); 1739 G1StringDedup::unlink(&g1_is_alive); 1740 } 1741 } 1742 1743 void G1ConcurrentMark::swapMarkBitMaps() { 1744 G1CMBitMapRO* temp = _prevMarkBitMap; 1745 _prevMarkBitMap = (G1CMBitMapRO*)_nextMarkBitMap; 1746 _nextMarkBitMap = (G1CMBitMap*) temp; 1747 } 1748 1749 // Closure for marking entries in SATB buffers. 1750 class G1CMSATBBufferClosure : public SATBBufferClosure { 1751 private: 1752 G1CMTask* _task; 1753 G1CollectedHeap* _g1h; 1754 1755 // This is very similar to G1CMTask::deal_with_reference, but with 1756 // more relaxed requirements for the argument, so this must be more 1757 // circumspect about treating the argument as an object. 1758 void do_entry(void* entry) const { 1759 _task->increment_refs_reached(); 1760 HeapRegion* hr = _g1h->heap_region_containing(entry); 1761 if (entry < hr->next_top_at_mark_start()) { 1762 // Until we get here, we don't know whether entry refers to a valid 1763 // object; it could instead have been a stale reference. 1764 oop obj = static_cast<oop>(entry); 1765 assert(obj->is_oop(true /* ignore mark word */), 1766 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)); 1767 _task->make_reference_grey(obj); 1768 } 1769 } 1770 1771 public: 1772 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1773 : _task(task), _g1h(g1h) { } 1774 1775 virtual void do_buffer(void** buffer, size_t size) { 1776 for (size_t i = 0; i < size; ++i) { 1777 do_entry(buffer[i]); 1778 } 1779 } 1780 }; 1781 1782 class G1RemarkThreadsClosure : public ThreadClosure { 1783 G1CMSATBBufferClosure _cm_satb_cl; 1784 G1CMOopClosure _cm_cl; 1785 MarkingCodeBlobClosure _code_cl; 1786 int _thread_parity; 1787 1788 public: 1789 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1790 _cm_satb_cl(task, g1h), 1791 _cm_cl(g1h, g1h->concurrent_mark(), task), 1792 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1793 _thread_parity(Threads::thread_claim_parity()) {} 1794 1795 void do_thread(Thread* thread) { 1796 if (thread->is_Java_thread()) { 1797 if (thread->claim_oops_do(true, _thread_parity)) { 1798 JavaThread* jt = (JavaThread*)thread; 1799 1800 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1801 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1802 // * Alive if on the stack of an executing method 1803 // * Weakly reachable otherwise 1804 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1805 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1806 jt->nmethods_do(&_code_cl); 1807 1808 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 1809 } 1810 } else if (thread->is_VM_thread()) { 1811 if (thread->claim_oops_do(true, _thread_parity)) { 1812 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1813 } 1814 } 1815 } 1816 }; 1817 1818 class G1CMRemarkTask: public AbstractGangTask { 1819 private: 1820 G1ConcurrentMark* _cm; 1821 public: 1822 void work(uint worker_id) { 1823 // Since all available tasks are actually started, we should 1824 // only proceed if we're supposed to be active. 1825 if (worker_id < _cm->active_tasks()) { 1826 G1CMTask* task = _cm->task(worker_id); 1827 task->record_start_time(); 1828 { 1829 ResourceMark rm; 1830 HandleMark hm; 1831 1832 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1833 Threads::threads_do(&threads_f); 1834 } 1835 1836 do { 1837 task->do_marking_step(1000000000.0 /* something very large */, 1838 true /* do_termination */, 1839 false /* is_serial */); 1840 } while (task->has_aborted() && !_cm->has_overflown()); 1841 // If we overflow, then we do not want to restart. We instead 1842 // want to abort remark and do concurrent marking again. 1843 task->record_end_time(); 1844 } 1845 } 1846 1847 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1848 AbstractGangTask("Par Remark"), _cm(cm) { 1849 _cm->terminator()->reset_for_reuse(active_workers); 1850 } 1851 }; 1852 1853 void G1ConcurrentMark::checkpointRootsFinalWork() { 1854 ResourceMark rm; 1855 HandleMark hm; 1856 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1857 1858 GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm); 1859 1860 g1h->ensure_parsability(false); 1861 1862 // this is remark, so we'll use up all active threads 1863 uint active_workers = g1h->workers()->active_workers(); 1864 set_concurrency_and_phase(active_workers, false /* concurrent */); 1865 // Leave _parallel_marking_threads at it's 1866 // value originally calculated in the G1ConcurrentMark 1867 // constructor and pass values of the active workers 1868 // through the gang in the task. 1869 1870 { 1871 StrongRootsScope srs(active_workers); 1872 1873 G1CMRemarkTask remarkTask(this, active_workers); 1874 // We will start all available threads, even if we decide that the 1875 // active_workers will be fewer. The extra ones will just bail out 1876 // immediately. 1877 g1h->workers()->run_task(&remarkTask); 1878 } 1879 1880 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1881 guarantee(has_overflown() || 1882 satb_mq_set.completed_buffers_num() == 0, 1883 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1884 BOOL_TO_STR(has_overflown()), 1885 satb_mq_set.completed_buffers_num()); 1886 1887 print_stats(); 1888 } 1889 1890 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 1891 // Note we are overriding the read-only view of the prev map here, via 1892 // the cast. 1893 ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr); 1894 } 1895 1896 HeapRegion* 1897 G1ConcurrentMark::claim_region(uint worker_id) { 1898 // "checkpoint" the finger 1899 HeapWord* finger = _finger; 1900 1901 // _heap_end will not change underneath our feet; it only changes at 1902 // yield points. 1903 while (finger < _heap_end) { 1904 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1905 1906 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1907 1908 // Above heap_region_containing may return NULL as we always scan claim 1909 // until the end of the heap. In this case, just jump to the next region. 1910 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1911 1912 // Is the gap between reading the finger and doing the CAS too long? 1913 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 1914 if (res == finger && curr_region != NULL) { 1915 // we succeeded 1916 HeapWord* bottom = curr_region->bottom(); 1917 HeapWord* limit = curr_region->next_top_at_mark_start(); 1918 1919 // notice that _finger == end cannot be guaranteed here since, 1920 // someone else might have moved the finger even further 1921 assert(_finger >= end, "the finger should have moved forward"); 1922 1923 if (limit > bottom) { 1924 return curr_region; 1925 } else { 1926 assert(limit == bottom, 1927 "the region limit should be at bottom"); 1928 // we return NULL and the caller should try calling 1929 // claim_region() again. 1930 return NULL; 1931 } 1932 } else { 1933 assert(_finger > finger, "the finger should have moved forward"); 1934 // read it again 1935 finger = _finger; 1936 } 1937 } 1938 1939 return NULL; 1940 } 1941 1942 #ifndef PRODUCT 1943 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 1944 private: 1945 G1CollectedHeap* _g1h; 1946 const char* _phase; 1947 int _info; 1948 1949 public: 1950 VerifyNoCSetOops(const char* phase, int info = -1) : 1951 _g1h(G1CollectedHeap::heap()), 1952 _phase(phase), 1953 _info(info) 1954 { } 1955 1956 void operator()(oop obj) const { 1957 guarantee(obj->is_oop(), 1958 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1959 p2i(obj), _phase, _info); 1960 guarantee(!_g1h->obj_in_cs(obj), 1961 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1962 p2i(obj), _phase, _info); 1963 } 1964 }; 1965 1966 void G1ConcurrentMark::verify_no_cset_oops() { 1967 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1968 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 1969 return; 1970 } 1971 1972 // Verify entries on the global mark stack 1973 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 1974 1975 // Verify entries on the task queues 1976 for (uint i = 0; i < _max_worker_id; ++i) { 1977 G1CMTaskQueue* queue = _task_queues->queue(i); 1978 queue->iterate(VerifyNoCSetOops("Queue", i)); 1979 } 1980 1981 // Verify the global finger 1982 HeapWord* global_finger = finger(); 1983 if (global_finger != NULL && global_finger < _heap_end) { 1984 // Since we always iterate over all regions, we might get a NULL HeapRegion 1985 // here. 1986 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1987 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1988 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1989 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1990 } 1991 1992 // Verify the task fingers 1993 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 1994 for (uint i = 0; i < parallel_marking_threads(); ++i) { 1995 G1CMTask* task = _tasks[i]; 1996 HeapWord* task_finger = task->finger(); 1997 if (task_finger != NULL && task_finger < _heap_end) { 1998 // See above note on the global finger verification. 1999 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 2000 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2001 !task_hr->in_collection_set(), 2002 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 2003 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 2004 } 2005 } 2006 } 2007 #endif // PRODUCT 2008 void G1ConcurrentMark::create_live_data() { 2009 _g1h->g1_rem_set()->create_card_live_data(_parallel_workers, _nextMarkBitMap); 2010 } 2011 2012 void G1ConcurrentMark::finalize_live_data() { 2013 _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _nextMarkBitMap); 2014 } 2015 2016 void G1ConcurrentMark::verify_live_data() { 2017 _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _nextMarkBitMap); 2018 } 2019 2020 void G1ConcurrentMark::clear_live_data(WorkGang* workers) { 2021 _g1h->g1_rem_set()->clear_card_live_data(workers); 2022 } 2023 2024 #ifdef ASSERT 2025 void G1ConcurrentMark::verify_live_data_clear() { 2026 _g1h->g1_rem_set()->verify_card_live_data_is_clear(); 2027 } 2028 #endif 2029 2030 void G1ConcurrentMark::print_stats() { 2031 if (!log_is_enabled(Debug, gc, stats)) { 2032 return; 2033 } 2034 log_debug(gc, stats)("---------------------------------------------------------------------"); 2035 for (size_t i = 0; i < _active_tasks; ++i) { 2036 _tasks[i]->print_stats(); 2037 log_debug(gc, stats)("---------------------------------------------------------------------"); 2038 } 2039 } 2040 2041 void G1ConcurrentMark::abort() { 2042 if (!cmThread()->during_cycle() || _has_aborted) { 2043 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2044 return; 2045 } 2046 2047 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2048 // concurrent bitmap clearing. 2049 { 2050 GCTraceTime(Debug, gc)("Clear Next Bitmap"); 2051 clear_bitmap(_nextMarkBitMap, _g1h->workers(), false); 2052 } 2053 // Note we cannot clear the previous marking bitmap here 2054 // since VerifyDuringGC verifies the objects marked during 2055 // a full GC against the previous bitmap. 2056 2057 { 2058 GCTraceTime(Debug, gc)("Clear Live Data"); 2059 clear_live_data(_g1h->workers()); 2060 } 2061 DEBUG_ONLY({ 2062 GCTraceTime(Debug, gc)("Verify Live Data Clear"); 2063 verify_live_data_clear(); 2064 }) 2065 // Empty mark stack 2066 reset_marking_state(); 2067 for (uint i = 0; i < _max_worker_id; ++i) { 2068 _tasks[i]->clear_region_fields(); 2069 } 2070 _first_overflow_barrier_sync.abort(); 2071 _second_overflow_barrier_sync.abort(); 2072 _has_aborted = true; 2073 2074 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2075 satb_mq_set.abandon_partial_marking(); 2076 // This can be called either during or outside marking, we'll read 2077 // the expected_active value from the SATB queue set. 2078 satb_mq_set.set_active_all_threads( 2079 false, /* new active value */ 2080 satb_mq_set.is_active() /* expected_active */); 2081 } 2082 2083 static void print_ms_time_info(const char* prefix, const char* name, 2084 NumberSeq& ns) { 2085 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2086 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2087 if (ns.num() > 0) { 2088 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2089 prefix, ns.sd(), ns.maximum()); 2090 } 2091 } 2092 2093 void G1ConcurrentMark::print_summary_info() { 2094 Log(gc, marking) log; 2095 if (!log.is_trace()) { 2096 return; 2097 } 2098 2099 log.trace(" Concurrent marking:"); 2100 print_ms_time_info(" ", "init marks", _init_times); 2101 print_ms_time_info(" ", "remarks", _remark_times); 2102 { 2103 print_ms_time_info(" ", "final marks", _remark_mark_times); 2104 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2105 2106 } 2107 print_ms_time_info(" ", "cleanups", _cleanup_times); 2108 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2109 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2110 if (G1ScrubRemSets) { 2111 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 2112 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2113 } 2114 log.trace(" Total stop_world time = %8.2f s.", 2115 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2116 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2117 cmThread()->vtime_accum(), cmThread()->vtime_mark_accum()); 2118 } 2119 2120 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2121 _parallel_workers->print_worker_threads_on(st); 2122 } 2123 2124 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2125 _parallel_workers->threads_do(tc); 2126 } 2127 2128 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2129 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2130 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 2131 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 2132 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 2133 } 2134 2135 // Closure for iteration over bitmaps 2136 class G1CMBitMapClosure : public BitMapClosure { 2137 private: 2138 // the bitmap that is being iterated over 2139 G1CMBitMap* _nextMarkBitMap; 2140 G1ConcurrentMark* _cm; 2141 G1CMTask* _task; 2142 2143 public: 2144 G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm, G1CMBitMap* nextMarkBitMap) : 2145 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 2146 2147 bool do_bit(size_t offset) { 2148 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 2149 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 2150 assert( addr < _cm->finger(), "invariant"); 2151 assert(addr >= _task->finger(), "invariant"); 2152 2153 // We move that task's local finger along. 2154 _task->move_finger_to(addr); 2155 2156 _task->scan_object(oop(addr)); 2157 // we only partially drain the local queue and global stack 2158 _task->drain_local_queue(true); 2159 _task->drain_global_stack(true); 2160 2161 // if the has_aborted flag has been raised, we need to bail out of 2162 // the iteration 2163 return !_task->has_aborted(); 2164 } 2165 }; 2166 2167 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2168 ReferenceProcessor* result = g1h->ref_processor_cm(); 2169 assert(result != NULL, "CM reference processor should not be NULL"); 2170 return result; 2171 } 2172 2173 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2174 G1ConcurrentMark* cm, 2175 G1CMTask* task) 2176 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2177 _g1h(g1h), _cm(cm), _task(task) 2178 { } 2179 2180 void G1CMTask::setup_for_region(HeapRegion* hr) { 2181 assert(hr != NULL, 2182 "claim_region() should have filtered out NULL regions"); 2183 _curr_region = hr; 2184 _finger = hr->bottom(); 2185 update_region_limit(); 2186 } 2187 2188 void G1CMTask::update_region_limit() { 2189 HeapRegion* hr = _curr_region; 2190 HeapWord* bottom = hr->bottom(); 2191 HeapWord* limit = hr->next_top_at_mark_start(); 2192 2193 if (limit == bottom) { 2194 // The region was collected underneath our feet. 2195 // We set the finger to bottom to ensure that the bitmap 2196 // iteration that will follow this will not do anything. 2197 // (this is not a condition that holds when we set the region up, 2198 // as the region is not supposed to be empty in the first place) 2199 _finger = bottom; 2200 } else if (limit >= _region_limit) { 2201 assert(limit >= _finger, "peace of mind"); 2202 } else { 2203 assert(limit < _region_limit, "only way to get here"); 2204 // This can happen under some pretty unusual circumstances. An 2205 // evacuation pause empties the region underneath our feet (NTAMS 2206 // at bottom). We then do some allocation in the region (NTAMS 2207 // stays at bottom), followed by the region being used as a GC 2208 // alloc region (NTAMS will move to top() and the objects 2209 // originally below it will be grayed). All objects now marked in 2210 // the region are explicitly grayed, if below the global finger, 2211 // and we do not need in fact to scan anything else. So, we simply 2212 // set _finger to be limit to ensure that the bitmap iteration 2213 // doesn't do anything. 2214 _finger = limit; 2215 } 2216 2217 _region_limit = limit; 2218 } 2219 2220 void G1CMTask::giveup_current_region() { 2221 assert(_curr_region != NULL, "invariant"); 2222 clear_region_fields(); 2223 } 2224 2225 void G1CMTask::clear_region_fields() { 2226 // Values for these three fields that indicate that we're not 2227 // holding on to a region. 2228 _curr_region = NULL; 2229 _finger = NULL; 2230 _region_limit = NULL; 2231 } 2232 2233 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2234 if (cm_oop_closure == NULL) { 2235 assert(_cm_oop_closure != NULL, "invariant"); 2236 } else { 2237 assert(_cm_oop_closure == NULL, "invariant"); 2238 } 2239 _cm_oop_closure = cm_oop_closure; 2240 } 2241 2242 void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) { 2243 guarantee(nextMarkBitMap != NULL, "invariant"); 2244 _nextMarkBitMap = nextMarkBitMap; 2245 clear_region_fields(); 2246 2247 _calls = 0; 2248 _elapsed_time_ms = 0.0; 2249 _termination_time_ms = 0.0; 2250 _termination_start_time_ms = 0.0; 2251 } 2252 2253 bool G1CMTask::should_exit_termination() { 2254 regular_clock_call(); 2255 // This is called when we are in the termination protocol. We should 2256 // quit if, for some reason, this task wants to abort or the global 2257 // stack is not empty (this means that we can get work from it). 2258 return !_cm->mark_stack_empty() || has_aborted(); 2259 } 2260 2261 void G1CMTask::reached_limit() { 2262 assert(_words_scanned >= _words_scanned_limit || 2263 _refs_reached >= _refs_reached_limit , 2264 "shouldn't have been called otherwise"); 2265 regular_clock_call(); 2266 } 2267 2268 void G1CMTask::regular_clock_call() { 2269 if (has_aborted()) return; 2270 2271 // First, we need to recalculate the words scanned and refs reached 2272 // limits for the next clock call. 2273 recalculate_limits(); 2274 2275 // During the regular clock call we do the following 2276 2277 // (1) If an overflow has been flagged, then we abort. 2278 if (_cm->has_overflown()) { 2279 set_has_aborted(); 2280 return; 2281 } 2282 2283 // If we are not concurrent (i.e. we're doing remark) we don't need 2284 // to check anything else. The other steps are only needed during 2285 // the concurrent marking phase. 2286 if (!concurrent()) return; 2287 2288 // (2) If marking has been aborted for Full GC, then we also abort. 2289 if (_cm->has_aborted()) { 2290 set_has_aborted(); 2291 return; 2292 } 2293 2294 double curr_time_ms = os::elapsedVTime() * 1000.0; 2295 2296 // (4) We check whether we should yield. If we have to, then we abort. 2297 if (SuspendibleThreadSet::should_yield()) { 2298 // We should yield. To do this we abort the task. The caller is 2299 // responsible for yielding. 2300 set_has_aborted(); 2301 return; 2302 } 2303 2304 // (5) We check whether we've reached our time quota. If we have, 2305 // then we abort. 2306 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2307 if (elapsed_time_ms > _time_target_ms) { 2308 set_has_aborted(); 2309 _has_timed_out = true; 2310 return; 2311 } 2312 2313 // (6) Finally, we check whether there are enough completed STAB 2314 // buffers available for processing. If there are, we abort. 2315 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2316 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2317 // we do need to process SATB buffers, we'll abort and restart 2318 // the marking task to do so 2319 set_has_aborted(); 2320 return; 2321 } 2322 } 2323 2324 void G1CMTask::recalculate_limits() { 2325 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2326 _words_scanned_limit = _real_words_scanned_limit; 2327 2328 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2329 _refs_reached_limit = _real_refs_reached_limit; 2330 } 2331 2332 void G1CMTask::decrease_limits() { 2333 // This is called when we believe that we're going to do an infrequent 2334 // operation which will increase the per byte scanned cost (i.e. move 2335 // entries to/from the global stack). It basically tries to decrease the 2336 // scanning limit so that the clock is called earlier. 2337 2338 _words_scanned_limit = _real_words_scanned_limit - 2339 3 * words_scanned_period / 4; 2340 _refs_reached_limit = _real_refs_reached_limit - 2341 3 * refs_reached_period / 4; 2342 } 2343 2344 void G1CMTask::move_entries_to_global_stack() { 2345 // local array where we'll store the entries that will be popped 2346 // from the local queue 2347 oop buffer[global_stack_transfer_size]; 2348 2349 int n = 0; 2350 oop obj; 2351 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 2352 buffer[n] = obj; 2353 ++n; 2354 } 2355 2356 if (n > 0) { 2357 // we popped at least one entry from the local queue 2358 2359 if (!_cm->mark_stack_push(buffer, n)) { 2360 set_has_aborted(); 2361 } 2362 } 2363 2364 // this operation was quite expensive, so decrease the limits 2365 decrease_limits(); 2366 } 2367 2368 void G1CMTask::get_entries_from_global_stack() { 2369 // local array where we'll store the entries that will be popped 2370 // from the global stack. 2371 oop buffer[global_stack_transfer_size]; 2372 size_t n; 2373 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 2374 assert(n <= global_stack_transfer_size, 2375 "we should not pop more than the given limit"); 2376 if (n > 0) { 2377 // yes, we did actually pop at least one entry 2378 for (size_t i = 0; i < n; ++i) { 2379 bool success = _task_queue->push(buffer[i]); 2380 // We only call this when the local queue is empty or under a 2381 // given target limit. So, we do not expect this push to fail. 2382 assert(success, "invariant"); 2383 } 2384 } 2385 2386 // this operation was quite expensive, so decrease the limits 2387 decrease_limits(); 2388 } 2389 2390 void G1CMTask::drain_local_queue(bool partially) { 2391 if (has_aborted()) return; 2392 2393 // Decide what the target size is, depending whether we're going to 2394 // drain it partially (so that other tasks can steal if they run out 2395 // of things to do) or totally (at the very end). 2396 size_t target_size; 2397 if (partially) { 2398 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2399 } else { 2400 target_size = 0; 2401 } 2402 2403 if (_task_queue->size() > target_size) { 2404 oop obj; 2405 bool ret = _task_queue->pop_local(obj); 2406 while (ret) { 2407 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 2408 assert(!_g1h->is_on_master_free_list( 2409 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 2410 2411 scan_object(obj); 2412 2413 if (_task_queue->size() <= target_size || has_aborted()) { 2414 ret = false; 2415 } else { 2416 ret = _task_queue->pop_local(obj); 2417 } 2418 } 2419 } 2420 } 2421 2422 void G1CMTask::drain_global_stack(bool partially) { 2423 if (has_aborted()) return; 2424 2425 // We have a policy to drain the local queue before we attempt to 2426 // drain the global stack. 2427 assert(partially || _task_queue->size() == 0, "invariant"); 2428 2429 // Decide what the target size is, depending whether we're going to 2430 // drain it partially (so that other tasks can steal if they run out 2431 // of things to do) or totally (at the very end). Notice that, 2432 // because we move entries from the global stack in chunks or 2433 // because another task might be doing the same, we might in fact 2434 // drop below the target. But, this is not a problem. 2435 size_t target_size; 2436 if (partially) { 2437 target_size = _cm->partial_mark_stack_size_target(); 2438 } else { 2439 target_size = 0; 2440 } 2441 2442 if (_cm->mark_stack_size() > target_size) { 2443 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2444 get_entries_from_global_stack(); 2445 drain_local_queue(partially); 2446 } 2447 } 2448 } 2449 2450 // SATB Queue has several assumptions on whether to call the par or 2451 // non-par versions of the methods. this is why some of the code is 2452 // replicated. We should really get rid of the single-threaded version 2453 // of the code to simplify things. 2454 void G1CMTask::drain_satb_buffers() { 2455 if (has_aborted()) return; 2456 2457 // We set this so that the regular clock knows that we're in the 2458 // middle of draining buffers and doesn't set the abort flag when it 2459 // notices that SATB buffers are available for draining. It'd be 2460 // very counter productive if it did that. :-) 2461 _draining_satb_buffers = true; 2462 2463 G1CMSATBBufferClosure satb_cl(this, _g1h); 2464 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2465 2466 // This keeps claiming and applying the closure to completed buffers 2467 // until we run out of buffers or we need to abort. 2468 while (!has_aborted() && 2469 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2470 regular_clock_call(); 2471 } 2472 2473 _draining_satb_buffers = false; 2474 2475 assert(has_aborted() || 2476 concurrent() || 2477 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2478 2479 // again, this was a potentially expensive operation, decrease the 2480 // limits to get the regular clock call early 2481 decrease_limits(); 2482 } 2483 2484 void G1CMTask::print_stats() { 2485 log_debug(gc, stats)("Marking Stats, task = %u, calls = %d", 2486 _worker_id, _calls); 2487 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2488 _elapsed_time_ms, _termination_time_ms); 2489 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 2490 _step_times_ms.num(), _step_times_ms.avg(), 2491 _step_times_ms.sd()); 2492 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", 2493 _step_times_ms.maximum(), _step_times_ms.sum()); 2494 } 2495 2496 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 2497 return _task_queues->steal(worker_id, hash_seed, obj); 2498 } 2499 2500 /***************************************************************************** 2501 2502 The do_marking_step(time_target_ms, ...) method is the building 2503 block of the parallel marking framework. It can be called in parallel 2504 with other invocations of do_marking_step() on different tasks 2505 (but only one per task, obviously) and concurrently with the 2506 mutator threads, or during remark, hence it eliminates the need 2507 for two versions of the code. When called during remark, it will 2508 pick up from where the task left off during the concurrent marking 2509 phase. Interestingly, tasks are also claimable during evacuation 2510 pauses too, since do_marking_step() ensures that it aborts before 2511 it needs to yield. 2512 2513 The data structures that it uses to do marking work are the 2514 following: 2515 2516 (1) Marking Bitmap. If there are gray objects that appear only 2517 on the bitmap (this happens either when dealing with an overflow 2518 or when the initial marking phase has simply marked the roots 2519 and didn't push them on the stack), then tasks claim heap 2520 regions whose bitmap they then scan to find gray objects. A 2521 global finger indicates where the end of the last claimed region 2522 is. A local finger indicates how far into the region a task has 2523 scanned. The two fingers are used to determine how to gray an 2524 object (i.e. whether simply marking it is OK, as it will be 2525 visited by a task in the future, or whether it needs to be also 2526 pushed on a stack). 2527 2528 (2) Local Queue. The local queue of the task which is accessed 2529 reasonably efficiently by the task. Other tasks can steal from 2530 it when they run out of work. Throughout the marking phase, a 2531 task attempts to keep its local queue short but not totally 2532 empty, so that entries are available for stealing by other 2533 tasks. Only when there is no more work, a task will totally 2534 drain its local queue. 2535 2536 (3) Global Mark Stack. This handles local queue overflow. During 2537 marking only sets of entries are moved between it and the local 2538 queues, as access to it requires a mutex and more fine-grain 2539 interaction with it which might cause contention. If it 2540 overflows, then the marking phase should restart and iterate 2541 over the bitmap to identify gray objects. Throughout the marking 2542 phase, tasks attempt to keep the global mark stack at a small 2543 length but not totally empty, so that entries are available for 2544 popping by other tasks. Only when there is no more work, tasks 2545 will totally drain the global mark stack. 2546 2547 (4) SATB Buffer Queue. This is where completed SATB buffers are 2548 made available. Buffers are regularly removed from this queue 2549 and scanned for roots, so that the queue doesn't get too 2550 long. During remark, all completed buffers are processed, as 2551 well as the filled in parts of any uncompleted buffers. 2552 2553 The do_marking_step() method tries to abort when the time target 2554 has been reached. There are a few other cases when the 2555 do_marking_step() method also aborts: 2556 2557 (1) When the marking phase has been aborted (after a Full GC). 2558 2559 (2) When a global overflow (on the global stack) has been 2560 triggered. Before the task aborts, it will actually sync up with 2561 the other tasks to ensure that all the marking data structures 2562 (local queues, stacks, fingers etc.) are re-initialized so that 2563 when do_marking_step() completes, the marking phase can 2564 immediately restart. 2565 2566 (3) When enough completed SATB buffers are available. The 2567 do_marking_step() method only tries to drain SATB buffers right 2568 at the beginning. So, if enough buffers are available, the 2569 marking step aborts and the SATB buffers are processed at 2570 the beginning of the next invocation. 2571 2572 (4) To yield. when we have to yield then we abort and yield 2573 right at the end of do_marking_step(). This saves us from a lot 2574 of hassle as, by yielding we might allow a Full GC. If this 2575 happens then objects will be compacted underneath our feet, the 2576 heap might shrink, etc. We save checking for this by just 2577 aborting and doing the yield right at the end. 2578 2579 From the above it follows that the do_marking_step() method should 2580 be called in a loop (or, otherwise, regularly) until it completes. 2581 2582 If a marking step completes without its has_aborted() flag being 2583 true, it means it has completed the current marking phase (and 2584 also all other marking tasks have done so and have all synced up). 2585 2586 A method called regular_clock_call() is invoked "regularly" (in 2587 sub ms intervals) throughout marking. It is this clock method that 2588 checks all the abort conditions which were mentioned above and 2589 decides when the task should abort. A work-based scheme is used to 2590 trigger this clock method: when the number of object words the 2591 marking phase has scanned or the number of references the marking 2592 phase has visited reach a given limit. Additional invocations to 2593 the method clock have been planted in a few other strategic places 2594 too. The initial reason for the clock method was to avoid calling 2595 vtime too regularly, as it is quite expensive. So, once it was in 2596 place, it was natural to piggy-back all the other conditions on it 2597 too and not constantly check them throughout the code. 2598 2599 If do_termination is true then do_marking_step will enter its 2600 termination protocol. 2601 2602 The value of is_serial must be true when do_marking_step is being 2603 called serially (i.e. by the VMThread) and do_marking_step should 2604 skip any synchronization in the termination and overflow code. 2605 Examples include the serial remark code and the serial reference 2606 processing closures. 2607 2608 The value of is_serial must be false when do_marking_step is 2609 being called by any of the worker threads in a work gang. 2610 Examples include the concurrent marking code (CMMarkingTask), 2611 the MT remark code, and the MT reference processing closures. 2612 2613 *****************************************************************************/ 2614 2615 void G1CMTask::do_marking_step(double time_target_ms, 2616 bool do_termination, 2617 bool is_serial) { 2618 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2619 assert(concurrent() == _cm->concurrent(), "they should be the same"); 2620 2621 G1Policy* g1_policy = _g1h->g1_policy(); 2622 assert(_task_queues != NULL, "invariant"); 2623 assert(_task_queue != NULL, "invariant"); 2624 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 2625 2626 assert(!_claimed, 2627 "only one thread should claim this task at any one time"); 2628 2629 // OK, this doesn't safeguard again all possible scenarios, as it is 2630 // possible for two threads to set the _claimed flag at the same 2631 // time. But it is only for debugging purposes anyway and it will 2632 // catch most problems. 2633 _claimed = true; 2634 2635 _start_time_ms = os::elapsedVTime() * 1000.0; 2636 2637 // If do_stealing is true then do_marking_step will attempt to 2638 // steal work from the other G1CMTasks. It only makes sense to 2639 // enable stealing when the termination protocol is enabled 2640 // and do_marking_step() is not being called serially. 2641 bool do_stealing = do_termination && !is_serial; 2642 2643 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2644 _time_target_ms = time_target_ms - diff_prediction_ms; 2645 2646 // set up the variables that are used in the work-based scheme to 2647 // call the regular clock method 2648 _words_scanned = 0; 2649 _refs_reached = 0; 2650 recalculate_limits(); 2651 2652 // clear all flags 2653 clear_has_aborted(); 2654 _has_timed_out = false; 2655 _draining_satb_buffers = false; 2656 2657 ++_calls; 2658 2659 // Set up the bitmap and oop closures. Anything that uses them is 2660 // eventually called from this method, so it is OK to allocate these 2661 // statically. 2662 G1CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 2663 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 2664 set_cm_oop_closure(&cm_oop_closure); 2665 2666 if (_cm->has_overflown()) { 2667 // This can happen if the mark stack overflows during a GC pause 2668 // and this task, after a yield point, restarts. We have to abort 2669 // as we need to get into the overflow protocol which happens 2670 // right at the end of this task. 2671 set_has_aborted(); 2672 } 2673 2674 // First drain any available SATB buffers. After this, we will not 2675 // look at SATB buffers before the next invocation of this method. 2676 // If enough completed SATB buffers are queued up, the regular clock 2677 // will abort this task so that it restarts. 2678 drain_satb_buffers(); 2679 // ...then partially drain the local queue and the global stack 2680 drain_local_queue(true); 2681 drain_global_stack(true); 2682 2683 do { 2684 if (!has_aborted() && _curr_region != NULL) { 2685 // This means that we're already holding on to a region. 2686 assert(_finger != NULL, "if region is not NULL, then the finger " 2687 "should not be NULL either"); 2688 2689 // We might have restarted this task after an evacuation pause 2690 // which might have evacuated the region we're holding on to 2691 // underneath our feet. Let's read its limit again to make sure 2692 // that we do not iterate over a region of the heap that 2693 // contains garbage (update_region_limit() will also move 2694 // _finger to the start of the region if it is found empty). 2695 update_region_limit(); 2696 // We will start from _finger not from the start of the region, 2697 // as we might be restarting this task after aborting half-way 2698 // through scanning this region. In this case, _finger points to 2699 // the address where we last found a marked object. If this is a 2700 // fresh region, _finger points to start(). 2701 MemRegion mr = MemRegion(_finger, _region_limit); 2702 2703 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2704 "humongous regions should go around loop once only"); 2705 2706 // Some special cases: 2707 // If the memory region is empty, we can just give up the region. 2708 // If the current region is humongous then we only need to check 2709 // the bitmap for the bit associated with the start of the object, 2710 // scan the object if it's live, and give up the region. 2711 // Otherwise, let's iterate over the bitmap of the part of the region 2712 // that is left. 2713 // If the iteration is successful, give up the region. 2714 if (mr.is_empty()) { 2715 giveup_current_region(); 2716 regular_clock_call(); 2717 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2718 if (_nextMarkBitMap->isMarked(mr.start())) { 2719 // The object is marked - apply the closure 2720 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 2721 bitmap_closure.do_bit(offset); 2722 } 2723 // Even if this task aborted while scanning the humongous object 2724 // we can (and should) give up the current region. 2725 giveup_current_region(); 2726 regular_clock_call(); 2727 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 2728 giveup_current_region(); 2729 regular_clock_call(); 2730 } else { 2731 assert(has_aborted(), "currently the only way to do so"); 2732 // The only way to abort the bitmap iteration is to return 2733 // false from the do_bit() method. However, inside the 2734 // do_bit() method we move the _finger to point to the 2735 // object currently being looked at. So, if we bail out, we 2736 // have definitely set _finger to something non-null. 2737 assert(_finger != NULL, "invariant"); 2738 2739 // Region iteration was actually aborted. So now _finger 2740 // points to the address of the object we last scanned. If we 2741 // leave it there, when we restart this task, we will rescan 2742 // the object. It is easy to avoid this. We move the finger by 2743 // enough to point to the next possible object header (the 2744 // bitmap knows by how much we need to move it as it knows its 2745 // granularity). 2746 assert(_finger < _region_limit, "invariant"); 2747 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 2748 // Check if bitmap iteration was aborted while scanning the last object 2749 if (new_finger >= _region_limit) { 2750 giveup_current_region(); 2751 } else { 2752 move_finger_to(new_finger); 2753 } 2754 } 2755 } 2756 // At this point we have either completed iterating over the 2757 // region we were holding on to, or we have aborted. 2758 2759 // We then partially drain the local queue and the global stack. 2760 // (Do we really need this?) 2761 drain_local_queue(true); 2762 drain_global_stack(true); 2763 2764 // Read the note on the claim_region() method on why it might 2765 // return NULL with potentially more regions available for 2766 // claiming and why we have to check out_of_regions() to determine 2767 // whether we're done or not. 2768 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2769 // We are going to try to claim a new region. We should have 2770 // given up on the previous one. 2771 // Separated the asserts so that we know which one fires. 2772 assert(_curr_region == NULL, "invariant"); 2773 assert(_finger == NULL, "invariant"); 2774 assert(_region_limit == NULL, "invariant"); 2775 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2776 if (claimed_region != NULL) { 2777 // Yes, we managed to claim one 2778 setup_for_region(claimed_region); 2779 assert(_curr_region == claimed_region, "invariant"); 2780 } 2781 // It is important to call the regular clock here. It might take 2782 // a while to claim a region if, for example, we hit a large 2783 // block of empty regions. So we need to call the regular clock 2784 // method once round the loop to make sure it's called 2785 // frequently enough. 2786 regular_clock_call(); 2787 } 2788 2789 if (!has_aborted() && _curr_region == NULL) { 2790 assert(_cm->out_of_regions(), 2791 "at this point we should be out of regions"); 2792 } 2793 } while ( _curr_region != NULL && !has_aborted()); 2794 2795 if (!has_aborted()) { 2796 // We cannot check whether the global stack is empty, since other 2797 // tasks might be pushing objects to it concurrently. 2798 assert(_cm->out_of_regions(), 2799 "at this point we should be out of regions"); 2800 // Try to reduce the number of available SATB buffers so that 2801 // remark has less work to do. 2802 drain_satb_buffers(); 2803 } 2804 2805 // Since we've done everything else, we can now totally drain the 2806 // local queue and global stack. 2807 drain_local_queue(false); 2808 drain_global_stack(false); 2809 2810 // Attempt at work stealing from other task's queues. 2811 if (do_stealing && !has_aborted()) { 2812 // We have not aborted. This means that we have finished all that 2813 // we could. Let's try to do some stealing... 2814 2815 // We cannot check whether the global stack is empty, since other 2816 // tasks might be pushing objects to it concurrently. 2817 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2818 "only way to reach here"); 2819 while (!has_aborted()) { 2820 oop obj; 2821 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 2822 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 2823 "any stolen object should be marked"); 2824 scan_object(obj); 2825 2826 // And since we're towards the end, let's totally drain the 2827 // local queue and global stack. 2828 drain_local_queue(false); 2829 drain_global_stack(false); 2830 } else { 2831 break; 2832 } 2833 } 2834 } 2835 2836 // We still haven't aborted. Now, let's try to get into the 2837 // termination protocol. 2838 if (do_termination && !has_aborted()) { 2839 // We cannot check whether the global stack is empty, since other 2840 // tasks might be concurrently pushing objects on it. 2841 // Separated the asserts so that we know which one fires. 2842 assert(_cm->out_of_regions(), "only way to reach here"); 2843 assert(_task_queue->size() == 0, "only way to reach here"); 2844 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2845 2846 // The G1CMTask class also extends the TerminatorTerminator class, 2847 // hence its should_exit_termination() method will also decide 2848 // whether to exit the termination protocol or not. 2849 bool finished = (is_serial || 2850 _cm->terminator()->offer_termination(this)); 2851 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2852 _termination_time_ms += 2853 termination_end_time_ms - _termination_start_time_ms; 2854 2855 if (finished) { 2856 // We're all done. 2857 2858 if (_worker_id == 0) { 2859 // let's allow task 0 to do this 2860 if (concurrent()) { 2861 assert(_cm->concurrent_marking_in_progress(), "invariant"); 2862 // we need to set this to false before the next 2863 // safepoint. This way we ensure that the marking phase 2864 // doesn't observe any more heap expansions. 2865 _cm->clear_concurrent_marking_in_progress(); 2866 } 2867 } 2868 2869 // We can now guarantee that the global stack is empty, since 2870 // all other tasks have finished. We separated the guarantees so 2871 // that, if a condition is false, we can immediately find out 2872 // which one. 2873 guarantee(_cm->out_of_regions(), "only way to reach here"); 2874 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2875 guarantee(_task_queue->size() == 0, "only way to reach here"); 2876 guarantee(!_cm->has_overflown(), "only way to reach here"); 2877 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 2878 } else { 2879 // Apparently there's more work to do. Let's abort this task. It 2880 // will restart it and we can hopefully find more things to do. 2881 set_has_aborted(); 2882 } 2883 } 2884 2885 // Mainly for debugging purposes to make sure that a pointer to the 2886 // closure which was statically allocated in this frame doesn't 2887 // escape it by accident. 2888 set_cm_oop_closure(NULL); 2889 double end_time_ms = os::elapsedVTime() * 1000.0; 2890 double elapsed_time_ms = end_time_ms - _start_time_ms; 2891 // Update the step history. 2892 _step_times_ms.add(elapsed_time_ms); 2893 2894 if (has_aborted()) { 2895 // The task was aborted for some reason. 2896 if (_has_timed_out) { 2897 double diff_ms = elapsed_time_ms - _time_target_ms; 2898 // Keep statistics of how well we did with respect to hitting 2899 // our target only if we actually timed out (if we aborted for 2900 // other reasons, then the results might get skewed). 2901 _marking_step_diffs_ms.add(diff_ms); 2902 } 2903 2904 if (_cm->has_overflown()) { 2905 // This is the interesting one. We aborted because a global 2906 // overflow was raised. This means we have to restart the 2907 // marking phase and start iterating over regions. However, in 2908 // order to do this we have to make sure that all tasks stop 2909 // what they are doing and re-initialize in a safe manner. We 2910 // will achieve this with the use of two barrier sync points. 2911 2912 if (!is_serial) { 2913 // We only need to enter the sync barrier if being called 2914 // from a parallel context 2915 _cm->enter_first_sync_barrier(_worker_id); 2916 2917 // When we exit this sync barrier we know that all tasks have 2918 // stopped doing marking work. So, it's now safe to 2919 // re-initialize our data structures. At the end of this method, 2920 // task 0 will clear the global data structures. 2921 } 2922 2923 // We clear the local state of this task... 2924 clear_region_fields(); 2925 2926 if (!is_serial) { 2927 // ...and enter the second barrier. 2928 _cm->enter_second_sync_barrier(_worker_id); 2929 } 2930 // At this point, if we're during the concurrent phase of 2931 // marking, everything has been re-initialized and we're 2932 // ready to restart. 2933 } 2934 } 2935 2936 _claimed = false; 2937 } 2938 2939 G1CMTask::G1CMTask(uint worker_id, 2940 G1ConcurrentMark* cm, 2941 G1CMTaskQueue* task_queue, 2942 G1CMTaskQueueSet* task_queues) 2943 : _g1h(G1CollectedHeap::heap()), 2944 _worker_id(worker_id), _cm(cm), 2945 _claimed(false), 2946 _nextMarkBitMap(NULL), _hash_seed(17), 2947 _task_queue(task_queue), 2948 _task_queues(task_queues), 2949 _cm_oop_closure(NULL) { 2950 guarantee(task_queue != NULL, "invariant"); 2951 guarantee(task_queues != NULL, "invariant"); 2952 2953 _marking_step_diffs_ms.add(0.5); 2954 } 2955 2956 // These are formatting macros that are used below to ensure 2957 // consistent formatting. The *_H_* versions are used to format the 2958 // header for a particular value and they should be kept consistent 2959 // with the corresponding macro. Also note that most of the macros add 2960 // the necessary white space (as a prefix) which makes them a bit 2961 // easier to compose. 2962 2963 // All the output lines are prefixed with this string to be able to 2964 // identify them easily in a large log file. 2965 #define G1PPRL_LINE_PREFIX "###" 2966 2967 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2968 #ifdef _LP64 2969 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2970 #else // _LP64 2971 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2972 #endif // _LP64 2973 2974 // For per-region info 2975 #define G1PPRL_TYPE_FORMAT " %-4s" 2976 #define G1PPRL_TYPE_H_FORMAT " %4s" 2977 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2978 #define G1PPRL_BYTE_H_FORMAT " %9s" 2979 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2980 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2981 2982 // For summary info 2983 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2984 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2985 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2986 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2987 2988 G1PrintRegionLivenessInfoClosure:: 2989 G1PrintRegionLivenessInfoClosure(const char* phase_name) 2990 : _total_used_bytes(0), _total_capacity_bytes(0), 2991 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2992 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 2993 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2994 MemRegion g1_reserved = g1h->g1_reserved(); 2995 double now = os::elapsedTime(); 2996 2997 // Print the header of the output. 2998 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2999 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 3000 G1PPRL_SUM_ADDR_FORMAT("reserved") 3001 G1PPRL_SUM_BYTE_FORMAT("region-size"), 3002 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 3003 HeapRegion::GrainBytes); 3004 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3005 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3006 G1PPRL_TYPE_H_FORMAT 3007 G1PPRL_ADDR_BASE_H_FORMAT 3008 G1PPRL_BYTE_H_FORMAT 3009 G1PPRL_BYTE_H_FORMAT 3010 G1PPRL_BYTE_H_FORMAT 3011 G1PPRL_DOUBLE_H_FORMAT 3012 G1PPRL_BYTE_H_FORMAT 3013 G1PPRL_BYTE_H_FORMAT, 3014 "type", "address-range", 3015 "used", "prev-live", "next-live", "gc-eff", 3016 "remset", "code-roots"); 3017 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3018 G1PPRL_TYPE_H_FORMAT 3019 G1PPRL_ADDR_BASE_H_FORMAT 3020 G1PPRL_BYTE_H_FORMAT 3021 G1PPRL_BYTE_H_FORMAT 3022 G1PPRL_BYTE_H_FORMAT 3023 G1PPRL_DOUBLE_H_FORMAT 3024 G1PPRL_BYTE_H_FORMAT 3025 G1PPRL_BYTE_H_FORMAT, 3026 "", "", 3027 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 3028 "(bytes)", "(bytes)"); 3029 } 3030 3031 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 3032 const char* type = r->get_type_str(); 3033 HeapWord* bottom = r->bottom(); 3034 HeapWord* end = r->end(); 3035 size_t capacity_bytes = r->capacity(); 3036 size_t used_bytes = r->used(); 3037 size_t prev_live_bytes = r->live_bytes(); 3038 size_t next_live_bytes = r->next_live_bytes(); 3039 double gc_eff = r->gc_efficiency(); 3040 size_t remset_bytes = r->rem_set()->mem_size(); 3041 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3042 3043 _total_used_bytes += used_bytes; 3044 _total_capacity_bytes += capacity_bytes; 3045 _total_prev_live_bytes += prev_live_bytes; 3046 _total_next_live_bytes += next_live_bytes; 3047 _total_remset_bytes += remset_bytes; 3048 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3049 3050 // Print a line for this particular region. 3051 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3052 G1PPRL_TYPE_FORMAT 3053 G1PPRL_ADDR_BASE_FORMAT 3054 G1PPRL_BYTE_FORMAT 3055 G1PPRL_BYTE_FORMAT 3056 G1PPRL_BYTE_FORMAT 3057 G1PPRL_DOUBLE_FORMAT 3058 G1PPRL_BYTE_FORMAT 3059 G1PPRL_BYTE_FORMAT, 3060 type, p2i(bottom), p2i(end), 3061 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3062 remset_bytes, strong_code_roots_bytes); 3063 3064 return false; 3065 } 3066 3067 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3068 // add static memory usages to remembered set sizes 3069 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3070 // Print the footer of the output. 3071 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3072 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3073 " SUMMARY" 3074 G1PPRL_SUM_MB_FORMAT("capacity") 3075 G1PPRL_SUM_MB_PERC_FORMAT("used") 3076 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3077 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3078 G1PPRL_SUM_MB_FORMAT("remset") 3079 G1PPRL_SUM_MB_FORMAT("code-roots"), 3080 bytes_to_mb(_total_capacity_bytes), 3081 bytes_to_mb(_total_used_bytes), 3082 perc(_total_used_bytes, _total_capacity_bytes), 3083 bytes_to_mb(_total_prev_live_bytes), 3084 perc(_total_prev_live_bytes, _total_capacity_bytes), 3085 bytes_to_mb(_total_next_live_bytes), 3086 perc(_total_next_live_bytes, _total_capacity_bytes), 3087 bytes_to_mb(_total_remset_bytes), 3088 bytes_to_mb(_total_strong_code_roots_bytes)); 3089 }