1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMarkThread.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorPolicy.hpp" 32 #include "gc/g1/g1CollectorState.hpp" 33 #include "gc/g1/g1ConcurrentMark.inline.hpp" 34 #include "gc/g1/g1HeapVerifier.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1StringDedup.hpp" 37 #include "gc/g1/heapRegion.inline.hpp" 38 #include "gc/g1/heapRegionRemSet.hpp" 39 #include "gc/g1/heapRegionSet.inline.hpp" 40 #include "gc/g1/suspendibleThreadSet.hpp" 41 #include "gc/shared/gcId.hpp" 42 #include "gc/shared/gcTimer.hpp" 43 #include "gc/shared/gcTrace.hpp" 44 #include "gc/shared/gcTraceTime.inline.hpp" 45 #include "gc/shared/genOopClosures.inline.hpp" 46 #include "gc/shared/referencePolicy.hpp" 47 #include "gc/shared/strongRootsScope.hpp" 48 #include "gc/shared/taskqueue.inline.hpp" 49 #include "gc/shared/vmGCOperations.hpp" 50 #include "logging/log.hpp" 51 #include "memory/allocation.hpp" 52 #include "memory/resourceArea.hpp" 53 #include "oops/oop.inline.hpp" 54 #include "runtime/atomic.inline.hpp" 55 #include "runtime/handles.inline.hpp" 56 #include "runtime/java.hpp" 57 #include "runtime/prefetch.inline.hpp" 58 #include "services/memTracker.hpp" 59 60 // Concurrent marking bit map wrapper 61 62 G1CMBitMapRO::G1CMBitMapRO(int shifter) : 63 _bm(), 64 _shifter(shifter) { 65 _bmStartWord = 0; 66 _bmWordSize = 0; 67 } 68 69 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 70 const HeapWord* limit) const { 71 // First we must round addr *up* to a possible object boundary. 72 addr = (HeapWord*)align_size_up((intptr_t)addr, 73 HeapWordSize << _shifter); 74 size_t addrOffset = heapWordToOffset(addr); 75 assert(limit != NULL, "limit must not be NULL"); 76 size_t limitOffset = heapWordToOffset(limit); 77 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 78 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 79 assert(nextAddr >= addr, "get_next_one postcondition"); 80 assert(nextAddr == limit || isMarked(nextAddr), 81 "get_next_one postcondition"); 82 return nextAddr; 83 } 84 85 #ifndef PRODUCT 86 bool G1CMBitMapRO::covers(MemRegion heap_rs) const { 87 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 88 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 89 "size inconsistency"); 90 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 91 _bmWordSize == heap_rs.word_size(); 92 } 93 #endif 94 95 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 96 _bm.print_on_error(st, prefix); 97 } 98 99 size_t G1CMBitMap::compute_size(size_t heap_size) { 100 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 101 } 102 103 size_t G1CMBitMap::mark_distance() { 104 return MinObjAlignmentInBytes * BitsPerByte; 105 } 106 107 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 108 _bmStartWord = heap.start(); 109 _bmWordSize = heap.word_size(); 110 111 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 112 _bm.set_size(_bmWordSize >> _shifter); 113 114 storage->set_mapping_changed_listener(&_listener); 115 } 116 117 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 118 if (zero_filled) { 119 return; 120 } 121 // We need to clear the bitmap on commit, removing any existing information. 122 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 123 _bm->clearRange(mr); 124 } 125 126 // Closure used for clearing the given mark bitmap. 127 class ClearBitmapHRClosure : public HeapRegionClosure { 128 private: 129 G1ConcurrentMark* _cm; 130 G1CMBitMap* _bitmap; 131 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 132 public: 133 ClearBitmapHRClosure(G1ConcurrentMark* cm, G1CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 134 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 135 } 136 137 virtual bool doHeapRegion(HeapRegion* r) { 138 size_t const chunk_size_in_words = M / HeapWordSize; 139 140 HeapWord* cur = r->bottom(); 141 HeapWord* const end = r->end(); 142 143 while (cur < end) { 144 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 145 _bitmap->clearRange(mr); 146 147 cur += chunk_size_in_words; 148 149 // Abort iteration if after yielding the marking has been aborted. 150 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 151 return true; 152 } 153 // Repeat the asserts from before the start of the closure. We will do them 154 // as asserts here to minimize their overhead on the product. However, we 155 // will have them as guarantees at the beginning / end of the bitmap 156 // clearing to get some checking in the product. 157 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 158 assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 159 } 160 161 return false; 162 } 163 }; 164 165 class ParClearNextMarkBitmapTask : public AbstractGangTask { 166 ClearBitmapHRClosure* _cl; 167 HeapRegionClaimer _hrclaimer; 168 bool _suspendible; // If the task is suspendible, workers must join the STS. 169 170 public: 171 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 172 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 173 174 void work(uint worker_id) { 175 SuspendibleThreadSetJoiner sts_join(_suspendible); 176 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 177 } 178 }; 179 180 void G1CMBitMap::clearAll() { 181 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 182 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 183 uint n_workers = g1h->workers()->active_workers(); 184 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 185 g1h->workers()->run_task(&task); 186 guarantee(cl.complete(), "Must have completed iteration."); 187 return; 188 } 189 190 void G1CMBitMap::clearRange(MemRegion mr) { 191 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 192 assert(!mr.is_empty(), "unexpected empty region"); 193 // convert address range into offset range 194 _bm.at_put_range(heapWordToOffset(mr.start()), 195 heapWordToOffset(mr.end()), false); 196 } 197 198 G1CMMarkStack::G1CMMarkStack(G1ConcurrentMark* cm) : 199 _base(NULL), _cm(cm) 200 {} 201 202 bool G1CMMarkStack::allocate(size_t capacity) { 203 // allocate a stack of the requisite depth 204 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 205 if (!rs.is_reserved()) { 206 warning("ConcurrentMark MarkStack allocation failure"); 207 return false; 208 } 209 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 210 if (!_virtual_space.initialize(rs, rs.size())) { 211 warning("ConcurrentMark MarkStack backing store failure"); 212 // Release the virtual memory reserved for the marking stack 213 rs.release(); 214 return false; 215 } 216 assert(_virtual_space.committed_size() == rs.size(), 217 "Didn't reserve backing store for all of G1ConcurrentMark stack?"); 218 _base = (oop*) _virtual_space.low(); 219 setEmpty(); 220 _capacity = (jint) capacity; 221 _saved_index = -1; 222 _should_expand = false; 223 return true; 224 } 225 226 void G1CMMarkStack::expand() { 227 // Called, during remark, if we've overflown the marking stack during marking. 228 assert(isEmpty(), "stack should been emptied while handling overflow"); 229 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 230 // Clear expansion flag 231 _should_expand = false; 232 if (_capacity == (jint) MarkStackSizeMax) { 233 log_trace(gc)("(benign) Can't expand marking stack capacity, at max size limit"); 234 return; 235 } 236 // Double capacity if possible 237 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 238 // Do not give up existing stack until we have managed to 239 // get the double capacity that we desired. 240 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 241 sizeof(oop))); 242 if (rs.is_reserved()) { 243 // Release the backing store associated with old stack 244 _virtual_space.release(); 245 // Reinitialize virtual space for new stack 246 if (!_virtual_space.initialize(rs, rs.size())) { 247 fatal("Not enough swap for expanded marking stack capacity"); 248 } 249 _base = (oop*)(_virtual_space.low()); 250 _index = 0; 251 _capacity = new_capacity; 252 } else { 253 // Failed to double capacity, continue; 254 log_trace(gc)("(benign) Failed to expand marking stack capacity from " SIZE_FORMAT "K to " SIZE_FORMAT "K", 255 _capacity / K, new_capacity / K); 256 } 257 } 258 259 void G1CMMarkStack::set_should_expand() { 260 // If we're resetting the marking state because of an 261 // marking stack overflow, record that we should, if 262 // possible, expand the stack. 263 _should_expand = _cm->has_overflown(); 264 } 265 266 G1CMMarkStack::~G1CMMarkStack() { 267 if (_base != NULL) { 268 _base = NULL; 269 _virtual_space.release(); 270 } 271 } 272 273 void G1CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 274 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 275 jint start = _index; 276 jint next_index = start + n; 277 if (next_index > _capacity) { 278 _overflow = true; 279 return; 280 } 281 // Otherwise. 282 _index = next_index; 283 for (int i = 0; i < n; i++) { 284 int ind = start + i; 285 assert(ind < _capacity, "By overflow test above."); 286 _base[ind] = ptr_arr[i]; 287 } 288 } 289 290 bool G1CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 291 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 292 jint index = _index; 293 if (index == 0) { 294 *n = 0; 295 return false; 296 } else { 297 int k = MIN2(max, index); 298 jint new_ind = index - k; 299 for (int j = 0; j < k; j++) { 300 ptr_arr[j] = _base[new_ind + j]; 301 } 302 _index = new_ind; 303 *n = k; 304 return true; 305 } 306 } 307 308 void G1CMMarkStack::note_start_of_gc() { 309 assert(_saved_index == -1, 310 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 311 _saved_index = _index; 312 } 313 314 void G1CMMarkStack::note_end_of_gc() { 315 // This is intentionally a guarantee, instead of an assert. If we 316 // accidentally add something to the mark stack during GC, it 317 // will be a correctness issue so it's better if we crash. we'll 318 // only check this once per GC anyway, so it won't be a performance 319 // issue in any way. 320 guarantee(_saved_index == _index, 321 "saved index: %d index: %d", _saved_index, _index); 322 _saved_index = -1; 323 } 324 325 G1CMRootRegions::G1CMRootRegions() : 326 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 327 _should_abort(false), _next_survivor(NULL) { } 328 329 void G1CMRootRegions::init(G1CollectedHeap* g1h, G1ConcurrentMark* cm) { 330 _young_list = g1h->young_list(); 331 _cm = cm; 332 } 333 334 void G1CMRootRegions::prepare_for_scan() { 335 assert(!scan_in_progress(), "pre-condition"); 336 337 // Currently, only survivors can be root regions. 338 assert(_next_survivor == NULL, "pre-condition"); 339 _next_survivor = _young_list->first_survivor_region(); 340 _scan_in_progress = (_next_survivor != NULL); 341 _should_abort = false; 342 } 343 344 HeapRegion* G1CMRootRegions::claim_next() { 345 if (_should_abort) { 346 // If someone has set the should_abort flag, we return NULL to 347 // force the caller to bail out of their loop. 348 return NULL; 349 } 350 351 // Currently, only survivors can be root regions. 352 HeapRegion* res = _next_survivor; 353 if (res != NULL) { 354 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 355 // Read it again in case it changed while we were waiting for the lock. 356 res = _next_survivor; 357 if (res != NULL) { 358 if (res == _young_list->last_survivor_region()) { 359 // We just claimed the last survivor so store NULL to indicate 360 // that we're done. 361 _next_survivor = NULL; 362 } else { 363 _next_survivor = res->get_next_young_region(); 364 } 365 } else { 366 // Someone else claimed the last survivor while we were trying 367 // to take the lock so nothing else to do. 368 } 369 } 370 assert(res == NULL || res->is_survivor(), "post-condition"); 371 372 return res; 373 } 374 375 void G1CMRootRegions::notify_scan_done() { 376 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 377 _scan_in_progress = false; 378 RootRegionScan_lock->notify_all(); 379 } 380 381 void G1CMRootRegions::cancel_scan() { 382 notify_scan_done(); 383 } 384 385 void G1CMRootRegions::scan_finished() { 386 assert(scan_in_progress(), "pre-condition"); 387 388 // Currently, only survivors can be root regions. 389 if (!_should_abort) { 390 assert(_next_survivor == NULL, "we should have claimed all survivors"); 391 } 392 _next_survivor = NULL; 393 394 notify_scan_done(); 395 } 396 397 bool G1CMRootRegions::wait_until_scan_finished() { 398 if (!scan_in_progress()) return false; 399 400 { 401 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 402 while (scan_in_progress()) { 403 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 404 } 405 } 406 return true; 407 } 408 409 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 410 return MAX2((n_par_threads + 2) / 4, 1U); 411 } 412 413 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 414 _g1h(g1h), 415 _markBitMap1(), 416 _markBitMap2(), 417 _parallel_marking_threads(0), 418 _max_parallel_marking_threads(0), 419 _sleep_factor(0.0), 420 _marking_task_overhead(1.0), 421 _cleanup_list("Cleanup List"), 422 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 423 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 424 CardTableModRefBS::card_shift, 425 false /* in_resource_area*/), 426 427 _prevMarkBitMap(&_markBitMap1), 428 _nextMarkBitMap(&_markBitMap2), 429 430 _markStack(this), 431 // _finger set in set_non_marking_state 432 433 _max_worker_id(ParallelGCThreads), 434 // _active_tasks set in set_non_marking_state 435 // _tasks set inside the constructor 436 _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)), 437 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 438 439 _has_overflown(false), 440 _concurrent(false), 441 _has_aborted(false), 442 _restart_for_overflow(false), 443 _concurrent_marking_in_progress(false), 444 _concurrent_phase_status(ConcPhaseNotStarted), 445 446 // _verbose_level set below 447 448 _init_times(), 449 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 450 _cleanup_times(), 451 _total_counting_time(0.0), 452 _total_rs_scrub_time(0.0), 453 454 _parallel_workers(NULL), 455 456 _count_card_bitmaps(NULL), 457 _count_marked_bytes(NULL), 458 _completed_initialization(false) { 459 460 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 461 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 462 463 // Create & start a ConcurrentMark thread. 464 _cmThread = new ConcurrentMarkThread(this); 465 assert(cmThread() != NULL, "CM Thread should have been created"); 466 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 467 if (_cmThread->osthread() == NULL) { 468 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 469 } 470 471 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 472 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 473 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 474 475 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 476 satb_qs.set_buffer_size(G1SATBBufferSize); 477 478 _root_regions.init(_g1h, this); 479 480 if (ConcGCThreads > ParallelGCThreads) { 481 warning("Can't have more ConcGCThreads (%u) " 482 "than ParallelGCThreads (%u).", 483 ConcGCThreads, ParallelGCThreads); 484 return; 485 } 486 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 487 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 488 // if both are set 489 _sleep_factor = 0.0; 490 _marking_task_overhead = 1.0; 491 } else if (G1MarkingOverheadPercent > 0) { 492 // We will calculate the number of parallel marking threads based 493 // on a target overhead with respect to the soft real-time goal 494 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 495 double overall_cm_overhead = 496 (double) MaxGCPauseMillis * marking_overhead / 497 (double) GCPauseIntervalMillis; 498 double cpu_ratio = 1.0 / (double) os::processor_count(); 499 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 500 double marking_task_overhead = 501 overall_cm_overhead / marking_thread_num * 502 (double) os::processor_count(); 503 double sleep_factor = 504 (1.0 - marking_task_overhead) / marking_task_overhead; 505 506 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 507 _sleep_factor = sleep_factor; 508 _marking_task_overhead = marking_task_overhead; 509 } else { 510 // Calculate the number of parallel marking threads by scaling 511 // the number of parallel GC threads. 512 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 513 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 514 _sleep_factor = 0.0; 515 _marking_task_overhead = 1.0; 516 } 517 518 assert(ConcGCThreads > 0, "Should have been set"); 519 _parallel_marking_threads = ConcGCThreads; 520 _max_parallel_marking_threads = _parallel_marking_threads; 521 522 _parallel_workers = new WorkGang("G1 Marker", 523 _max_parallel_marking_threads, false, true); 524 if (_parallel_workers == NULL) { 525 vm_exit_during_initialization("Failed necessary allocation."); 526 } else { 527 _parallel_workers->initialize_workers(); 528 } 529 530 if (FLAG_IS_DEFAULT(MarkStackSize)) { 531 size_t mark_stack_size = 532 MIN2(MarkStackSizeMax, 533 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 534 // Verify that the calculated value for MarkStackSize is in range. 535 // It would be nice to use the private utility routine from Arguments. 536 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 537 warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 538 "must be between 1 and " SIZE_FORMAT, 539 mark_stack_size, MarkStackSizeMax); 540 return; 541 } 542 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 543 } else { 544 // Verify MarkStackSize is in range. 545 if (FLAG_IS_CMDLINE(MarkStackSize)) { 546 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 547 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 548 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 549 "must be between 1 and " SIZE_FORMAT, 550 MarkStackSize, MarkStackSizeMax); 551 return; 552 } 553 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 554 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 555 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 556 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 557 MarkStackSize, MarkStackSizeMax); 558 return; 559 } 560 } 561 } 562 } 563 564 if (!_markStack.allocate(MarkStackSize)) { 565 warning("Failed to allocate CM marking stack"); 566 return; 567 } 568 569 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC); 570 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 571 572 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 573 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 574 575 BitMap::idx_t card_bm_size = _card_bm.size(); 576 577 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 578 _active_tasks = _max_worker_id; 579 580 uint max_regions = _g1h->max_regions(); 581 for (uint i = 0; i < _max_worker_id; ++i) { 582 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 583 task_queue->initialize(); 584 _task_queues->register_queue(i, task_queue); 585 586 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 587 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 588 589 _tasks[i] = new G1CMTask(i, this, 590 _count_marked_bytes[i], 591 &_count_card_bitmaps[i], 592 task_queue, _task_queues); 593 594 _accum_task_vtime[i] = 0.0; 595 } 596 597 // Calculate the card number for the bottom of the heap. Used 598 // in biasing indexes into the accounting card bitmaps. 599 _heap_bottom_card_num = 600 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 601 CardTableModRefBS::card_shift); 602 603 // Clear all the liveness counting data 604 clear_all_count_data(); 605 606 // so that the call below can read a sensible value 607 _heap_start = g1h->reserved_region().start(); 608 set_non_marking_state(); 609 _completed_initialization = true; 610 } 611 612 void G1ConcurrentMark::reset() { 613 // Starting values for these two. This should be called in a STW 614 // phase. 615 MemRegion reserved = _g1h->g1_reserved(); 616 _heap_start = reserved.start(); 617 _heap_end = reserved.end(); 618 619 // Separated the asserts so that we know which one fires. 620 assert(_heap_start != NULL, "heap bounds should look ok"); 621 assert(_heap_end != NULL, "heap bounds should look ok"); 622 assert(_heap_start < _heap_end, "heap bounds should look ok"); 623 624 // Reset all the marking data structures and any necessary flags 625 reset_marking_state(); 626 627 // We do reset all of them, since different phases will use 628 // different number of active threads. So, it's easiest to have all 629 // of them ready. 630 for (uint i = 0; i < _max_worker_id; ++i) { 631 _tasks[i]->reset(_nextMarkBitMap); 632 } 633 634 // we need this to make sure that the flag is on during the evac 635 // pause with initial mark piggy-backed 636 set_concurrent_marking_in_progress(); 637 } 638 639 640 void G1ConcurrentMark::reset_marking_state(bool clear_overflow) { 641 _markStack.set_should_expand(); 642 _markStack.setEmpty(); // Also clears the _markStack overflow flag 643 if (clear_overflow) { 644 clear_has_overflown(); 645 } else { 646 assert(has_overflown(), "pre-condition"); 647 } 648 _finger = _heap_start; 649 650 for (uint i = 0; i < _max_worker_id; ++i) { 651 G1CMTaskQueue* queue = _task_queues->queue(i); 652 queue->set_empty(); 653 } 654 } 655 656 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 657 assert(active_tasks <= _max_worker_id, "we should not have more"); 658 659 _active_tasks = active_tasks; 660 // Need to update the three data structures below according to the 661 // number of active threads for this phase. 662 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 663 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 664 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 665 } 666 667 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 668 set_concurrency(active_tasks); 669 670 _concurrent = concurrent; 671 // We propagate this to all tasks, not just the active ones. 672 for (uint i = 0; i < _max_worker_id; ++i) 673 _tasks[i]->set_concurrent(concurrent); 674 675 if (concurrent) { 676 set_concurrent_marking_in_progress(); 677 } else { 678 // We currently assume that the concurrent flag has been set to 679 // false before we start remark. At this point we should also be 680 // in a STW phase. 681 assert(!concurrent_marking_in_progress(), "invariant"); 682 assert(out_of_regions(), 683 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 684 p2i(_finger), p2i(_heap_end)); 685 } 686 } 687 688 void G1ConcurrentMark::set_non_marking_state() { 689 // We set the global marking state to some default values when we're 690 // not doing marking. 691 reset_marking_state(); 692 _active_tasks = 0; 693 clear_concurrent_marking_in_progress(); 694 } 695 696 G1ConcurrentMark::~G1ConcurrentMark() { 697 // The G1ConcurrentMark instance is never freed. 698 ShouldNotReachHere(); 699 } 700 701 void G1ConcurrentMark::clearNextBitmap() { 702 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 703 704 // Make sure that the concurrent mark thread looks to still be in 705 // the current cycle. 706 guarantee(cmThread()->during_cycle(), "invariant"); 707 708 // We are finishing up the current cycle by clearing the next 709 // marking bitmap and getting it ready for the next cycle. During 710 // this time no other cycle can start. So, let's make sure that this 711 // is the case. 712 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 713 714 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 715 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 716 _parallel_workers->run_task(&task); 717 718 // Clear the liveness counting data. If the marking has been aborted, the abort() 719 // call already did that. 720 if (cl.complete()) { 721 clear_all_count_data(); 722 } 723 724 // Repeat the asserts from above. 725 guarantee(cmThread()->during_cycle(), "invariant"); 726 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 727 } 728 729 class CheckBitmapClearHRClosure : public HeapRegionClosure { 730 G1CMBitMap* _bitmap; 731 bool _error; 732 public: 733 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { 734 } 735 736 virtual bool doHeapRegion(HeapRegion* r) { 737 // This closure can be called concurrently to the mutator, so we must make sure 738 // that the result of the getNextMarkedWordAddress() call is compared to the 739 // value passed to it as limit to detect any found bits. 740 // end never changes in G1. 741 HeapWord* end = r->end(); 742 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 743 } 744 }; 745 746 bool G1ConcurrentMark::nextMarkBitmapIsClear() { 747 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 748 _g1h->heap_region_iterate(&cl); 749 return cl.complete(); 750 } 751 752 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 753 public: 754 bool doHeapRegion(HeapRegion* r) { 755 r->note_start_of_marking(); 756 return false; 757 } 758 }; 759 760 void G1ConcurrentMark::checkpointRootsInitialPre() { 761 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 762 G1CollectorPolicy* g1p = g1h->g1_policy(); 763 764 _has_aborted = false; 765 766 // Initialize marking structures. This has to be done in a STW phase. 767 reset(); 768 769 // For each region note start of marking. 770 NoteStartOfMarkHRClosure startcl; 771 g1h->heap_region_iterate(&startcl); 772 } 773 774 775 void G1ConcurrentMark::checkpointRootsInitialPost() { 776 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 777 778 // Start Concurrent Marking weak-reference discovery. 779 ReferenceProcessor* rp = g1h->ref_processor_cm(); 780 // enable ("weak") refs discovery 781 rp->enable_discovery(); 782 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 783 784 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 785 // This is the start of the marking cycle, we're expected all 786 // threads to have SATB queues with active set to false. 787 satb_mq_set.set_active_all_threads(true, /* new active value */ 788 false /* expected_active */); 789 790 _root_regions.prepare_for_scan(); 791 792 // update_g1_committed() will be called at the end of an evac pause 793 // when marking is on. So, it's also called at the end of the 794 // initial-mark pause to update the heap end, if the heap expands 795 // during it. No need to call it here. 796 } 797 798 /* 799 * Notice that in the next two methods, we actually leave the STS 800 * during the barrier sync and join it immediately afterwards. If we 801 * do not do this, the following deadlock can occur: one thread could 802 * be in the barrier sync code, waiting for the other thread to also 803 * sync up, whereas another one could be trying to yield, while also 804 * waiting for the other threads to sync up too. 805 * 806 * Note, however, that this code is also used during remark and in 807 * this case we should not attempt to leave / enter the STS, otherwise 808 * we'll either hit an assert (debug / fastdebug) or deadlock 809 * (product). So we should only leave / enter the STS if we are 810 * operating concurrently. 811 * 812 * Because the thread that does the sync barrier has left the STS, it 813 * is possible to be suspended for a Full GC or an evacuation pause 814 * could occur. This is actually safe, since the entering the sync 815 * barrier is one of the last things do_marking_step() does, and it 816 * doesn't manipulate any data structures afterwards. 817 */ 818 819 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 820 bool barrier_aborted; 821 { 822 SuspendibleThreadSetLeaver sts_leave(concurrent()); 823 barrier_aborted = !_first_overflow_barrier_sync.enter(); 824 } 825 826 // at this point everyone should have synced up and not be doing any 827 // more work 828 829 if (barrier_aborted) { 830 // If the barrier aborted we ignore the overflow condition and 831 // just abort the whole marking phase as quickly as possible. 832 return; 833 } 834 835 // If we're executing the concurrent phase of marking, reset the marking 836 // state; otherwise the marking state is reset after reference processing, 837 // during the remark pause. 838 // If we reset here as a result of an overflow during the remark we will 839 // see assertion failures from any subsequent set_concurrency_and_phase() 840 // calls. 841 if (concurrent()) { 842 // let the task associated with with worker 0 do this 843 if (worker_id == 0) { 844 // task 0 is responsible for clearing the global data structures 845 // We should be here because of an overflow. During STW we should 846 // not clear the overflow flag since we rely on it being true when 847 // we exit this method to abort the pause and restart concurrent 848 // marking. 849 reset_marking_state(true /* clear_overflow */); 850 851 log_info(gc)("Concurrent Mark reset for overflow"); 852 } 853 } 854 855 // after this, each task should reset its own data structures then 856 // then go into the second barrier 857 } 858 859 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 860 SuspendibleThreadSetLeaver sts_leave(concurrent()); 861 _second_overflow_barrier_sync.enter(); 862 863 // at this point everything should be re-initialized and ready to go 864 } 865 866 class G1CMConcurrentMarkingTask: public AbstractGangTask { 867 private: 868 G1ConcurrentMark* _cm; 869 ConcurrentMarkThread* _cmt; 870 871 public: 872 void work(uint worker_id) { 873 assert(Thread::current()->is_ConcurrentGC_thread(), 874 "this should only be done by a conc GC thread"); 875 ResourceMark rm; 876 877 double start_vtime = os::elapsedVTime(); 878 879 { 880 SuspendibleThreadSetJoiner sts_join; 881 882 assert(worker_id < _cm->active_tasks(), "invariant"); 883 G1CMTask* the_task = _cm->task(worker_id); 884 the_task->record_start_time(); 885 if (!_cm->has_aborted()) { 886 do { 887 double start_vtime_sec = os::elapsedVTime(); 888 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 889 890 the_task->do_marking_step(mark_step_duration_ms, 891 true /* do_termination */, 892 false /* is_serial*/); 893 894 double end_vtime_sec = os::elapsedVTime(); 895 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 896 _cm->clear_has_overflown(); 897 898 _cm->do_yield_check(worker_id); 899 900 jlong sleep_time_ms; 901 if (!_cm->has_aborted() && the_task->has_aborted()) { 902 sleep_time_ms = 903 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 904 { 905 SuspendibleThreadSetLeaver sts_leave; 906 os::sleep(Thread::current(), sleep_time_ms, false); 907 } 908 } 909 } while (!_cm->has_aborted() && the_task->has_aborted()); 910 } 911 the_task->record_end_time(); 912 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 913 } 914 915 double end_vtime = os::elapsedVTime(); 916 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 917 } 918 919 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm, 920 ConcurrentMarkThread* cmt) : 921 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 922 923 ~G1CMConcurrentMarkingTask() { } 924 }; 925 926 // Calculates the number of active workers for a concurrent 927 // phase. 928 uint G1ConcurrentMark::calc_parallel_marking_threads() { 929 uint n_conc_workers = 0; 930 if (!UseDynamicNumberOfGCThreads || 931 (!FLAG_IS_DEFAULT(ConcGCThreads) && 932 !ForceDynamicNumberOfGCThreads)) { 933 n_conc_workers = max_parallel_marking_threads(); 934 } else { 935 n_conc_workers = 936 AdaptiveSizePolicy::calc_default_active_workers( 937 max_parallel_marking_threads(), 938 1, /* Minimum workers */ 939 parallel_marking_threads(), 940 Threads::number_of_non_daemon_threads()); 941 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 942 // that scaling has already gone into "_max_parallel_marking_threads". 943 } 944 assert(n_conc_workers > 0, "Always need at least 1"); 945 return n_conc_workers; 946 } 947 948 void G1ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 949 // Currently, only survivors can be root regions. 950 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 951 G1RootRegionScanClosure cl(_g1h, this, worker_id); 952 953 const uintx interval = PrefetchScanIntervalInBytes; 954 HeapWord* curr = hr->bottom(); 955 const HeapWord* end = hr->top(); 956 while (curr < end) { 957 Prefetch::read(curr, interval); 958 oop obj = oop(curr); 959 int size = obj->oop_iterate_size(&cl); 960 assert(size == obj->size(), "sanity"); 961 curr += size; 962 } 963 } 964 965 class G1CMRootRegionScanTask : public AbstractGangTask { 966 private: 967 G1ConcurrentMark* _cm; 968 969 public: 970 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 971 AbstractGangTask("Root Region Scan"), _cm(cm) { } 972 973 void work(uint worker_id) { 974 assert(Thread::current()->is_ConcurrentGC_thread(), 975 "this should only be done by a conc GC thread"); 976 977 G1CMRootRegions* root_regions = _cm->root_regions(); 978 HeapRegion* hr = root_regions->claim_next(); 979 while (hr != NULL) { 980 _cm->scanRootRegion(hr, worker_id); 981 hr = root_regions->claim_next(); 982 } 983 } 984 }; 985 986 void G1ConcurrentMark::scanRootRegions() { 987 // scan_in_progress() will have been set to true only if there was 988 // at least one root region to scan. So, if it's false, we 989 // should not attempt to do any further work. 990 if (root_regions()->scan_in_progress()) { 991 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 992 GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan"); 993 994 _parallel_marking_threads = calc_parallel_marking_threads(); 995 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 996 "Maximum number of marking threads exceeded"); 997 uint active_workers = MAX2(1U, parallel_marking_threads()); 998 999 G1CMRootRegionScanTask task(this); 1000 _parallel_workers->set_active_workers(active_workers); 1001 _parallel_workers->run_task(&task); 1002 1003 // It's possible that has_aborted() is true here without actually 1004 // aborting the survivor scan earlier. This is OK as it's 1005 // mainly used for sanity checking. 1006 root_regions()->scan_finished(); 1007 } 1008 } 1009 1010 void G1ConcurrentMark::register_concurrent_phase_start(const char* title) { 1011 uint old_val = 0; 1012 do { 1013 old_val = Atomic::cmpxchg(ConcPhaseStarted, &_concurrent_phase_status, ConcPhaseNotStarted); 1014 } while (old_val != ConcPhaseNotStarted); 1015 _g1h->gc_timer_cm()->register_gc_concurrent_start(title); 1016 } 1017 1018 void G1ConcurrentMark::register_concurrent_phase_end_common(bool end_timer) { 1019 if (_concurrent_phase_status == ConcPhaseNotStarted) { 1020 return; 1021 } 1022 1023 uint old_val = Atomic::cmpxchg(ConcPhaseStopping, &_concurrent_phase_status, ConcPhaseStarted); 1024 if (old_val == ConcPhaseStarted) { 1025 _g1h->gc_timer_cm()->register_gc_concurrent_end(); 1026 // If 'end_timer' is true, we came here to end timer which needs concurrent phase ended. 1027 // We need to end it before changing the status to 'ConcPhaseNotStarted' to prevent 1028 // starting a new concurrent phase by 'ConcurrentMarkThread'. 1029 if (end_timer) { 1030 _g1h->gc_timer_cm()->register_gc_end(); 1031 } 1032 old_val = Atomic::cmpxchg(ConcPhaseNotStarted, &_concurrent_phase_status, ConcPhaseStopping); 1033 assert(old_val == ConcPhaseStopping, "Should not have changed since we entered this scope."); 1034 } else { 1035 do { 1036 // Let other thread finish changing '_concurrent_phase_status' to 'ConcPhaseNotStarted'. 1037 os::naked_short_sleep(1); 1038 } while (_concurrent_phase_status != ConcPhaseNotStarted); 1039 } 1040 } 1041 1042 void G1ConcurrentMark::register_concurrent_phase_end() { 1043 register_concurrent_phase_end_common(false); 1044 } 1045 1046 void G1ConcurrentMark::register_concurrent_gc_end_and_stop_timer() { 1047 register_concurrent_phase_end_common(true); 1048 } 1049 1050 void G1ConcurrentMark::markFromRoots() { 1051 // we might be tempted to assert that: 1052 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1053 // "inconsistent argument?"); 1054 // However that wouldn't be right, because it's possible that 1055 // a safepoint is indeed in progress as a younger generation 1056 // stop-the-world GC happens even as we mark in this generation. 1057 1058 _restart_for_overflow = false; 1059 1060 // _g1h has _n_par_threads 1061 _parallel_marking_threads = calc_parallel_marking_threads(); 1062 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1063 "Maximum number of marking threads exceeded"); 1064 1065 uint active_workers = MAX2(1U, parallel_marking_threads()); 1066 assert(active_workers > 0, "Should have been set"); 1067 1068 // Parallel task terminator is set in "set_concurrency_and_phase()" 1069 set_concurrency_and_phase(active_workers, true /* concurrent */); 1070 1071 G1CMConcurrentMarkingTask markingTask(this, cmThread()); 1072 _parallel_workers->set_active_workers(active_workers); 1073 _parallel_workers->run_task(&markingTask); 1074 print_stats(); 1075 } 1076 1077 void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1078 // world is stopped at this checkpoint 1079 assert(SafepointSynchronize::is_at_safepoint(), 1080 "world should be stopped"); 1081 1082 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1083 1084 // If a full collection has happened, we shouldn't do this. 1085 if (has_aborted()) { 1086 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1087 return; 1088 } 1089 1090 if (VerifyDuringGC) { 1091 HandleMark hm; // handle scope 1092 g1h->prepare_for_verify(); 1093 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1094 } 1095 g1h->verifier()->check_bitmaps("Remark Start"); 1096 1097 G1CollectorPolicy* g1p = g1h->g1_policy(); 1098 g1p->record_concurrent_mark_remark_start(); 1099 1100 double start = os::elapsedTime(); 1101 1102 checkpointRootsFinalWork(); 1103 1104 double mark_work_end = os::elapsedTime(); 1105 1106 weakRefsWork(clear_all_soft_refs); 1107 1108 if (has_overflown()) { 1109 // Oops. We overflowed. Restart concurrent marking. 1110 _restart_for_overflow = true; 1111 log_develop_trace(gc)("Remark led to restart for overflow."); 1112 1113 // Verify the heap w.r.t. the previous marking bitmap. 1114 if (VerifyDuringGC) { 1115 HandleMark hm; // handle scope 1116 g1h->prepare_for_verify(); 1117 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)"); 1118 } 1119 1120 // Clear the marking state because we will be restarting 1121 // marking due to overflowing the global mark stack. 1122 reset_marking_state(); 1123 } else { 1124 { 1125 GCTraceTime(Debug, gc) trace("Aggregate Data", g1h->gc_timer_cm()); 1126 1127 // Aggregate the per-task counting data that we have accumulated 1128 // while marking. 1129 aggregate_count_data(); 1130 } 1131 1132 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1133 // We're done with marking. 1134 // This is the end of the marking cycle, we're expected all 1135 // threads to have SATB queues with active set to true. 1136 satb_mq_set.set_active_all_threads(false, /* new active value */ 1137 true /* expected_active */); 1138 1139 if (VerifyDuringGC) { 1140 HandleMark hm; // handle scope 1141 g1h->prepare_for_verify(); 1142 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)"); 1143 } 1144 g1h->verifier()->check_bitmaps("Remark End"); 1145 assert(!restart_for_overflow(), "sanity"); 1146 // Completely reset the marking state since marking completed 1147 set_non_marking_state(); 1148 } 1149 1150 // Expand the marking stack, if we have to and if we can. 1151 if (_markStack.should_expand()) { 1152 _markStack.expand(); 1153 } 1154 1155 // Statistics 1156 double now = os::elapsedTime(); 1157 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1158 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1159 _remark_times.add((now - start) * 1000.0); 1160 1161 g1p->record_concurrent_mark_remark_end(); 1162 1163 G1CMIsAliveClosure is_alive(g1h); 1164 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1165 } 1166 1167 // Base class of the closures that finalize and verify the 1168 // liveness counting data. 1169 class G1CMCountDataClosureBase: public HeapRegionClosure { 1170 protected: 1171 G1CollectedHeap* _g1h; 1172 G1ConcurrentMark* _cm; 1173 CardTableModRefBS* _ct_bs; 1174 1175 BitMap* _region_bm; 1176 BitMap* _card_bm; 1177 1178 // Takes a region that's not empty (i.e., it has at least one 1179 // live object in it and sets its corresponding bit on the region 1180 // bitmap to 1. 1181 void set_bit_for_region(HeapRegion* hr) { 1182 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1183 _region_bm->par_at_put(index, true); 1184 } 1185 1186 public: 1187 G1CMCountDataClosureBase(G1CollectedHeap* g1h, 1188 BitMap* region_bm, BitMap* card_bm): 1189 _g1h(g1h), _cm(g1h->concurrent_mark()), 1190 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 1191 _region_bm(region_bm), _card_bm(card_bm) { } 1192 }; 1193 1194 // Closure that calculates the # live objects per region. Used 1195 // for verification purposes during the cleanup pause. 1196 class CalcLiveObjectsClosure: public G1CMCountDataClosureBase { 1197 G1CMBitMapRO* _bm; 1198 size_t _region_marked_bytes; 1199 1200 public: 1201 CalcLiveObjectsClosure(G1CMBitMapRO *bm, G1CollectedHeap* g1h, 1202 BitMap* region_bm, BitMap* card_bm) : 1203 G1CMCountDataClosureBase(g1h, region_bm, card_bm), 1204 _bm(bm), _region_marked_bytes(0) { } 1205 1206 bool doHeapRegion(HeapRegion* hr) { 1207 HeapWord* ntams = hr->next_top_at_mark_start(); 1208 HeapWord* start = hr->bottom(); 1209 1210 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1211 "Preconditions not met - " 1212 "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT, 1213 p2i(start), p2i(ntams), p2i(hr->end())); 1214 1215 // Find the first marked object at or after "start". 1216 start = _bm->getNextMarkedWordAddress(start, ntams); 1217 1218 size_t marked_bytes = 0; 1219 1220 while (start < ntams) { 1221 oop obj = oop(start); 1222 int obj_sz = obj->size(); 1223 HeapWord* obj_end = start + obj_sz; 1224 1225 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1226 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1227 1228 // Note: if we're looking at the last region in heap - obj_end 1229 // could be actually just beyond the end of the heap; end_idx 1230 // will then correspond to a (non-existent) card that is also 1231 // just beyond the heap. 1232 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1233 // end of object is not card aligned - increment to cover 1234 // all the cards spanned by the object 1235 end_idx += 1; 1236 } 1237 1238 // Set the bits in the card BM for the cards spanned by this object. 1239 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1240 1241 // Add the size of this object to the number of marked bytes. 1242 marked_bytes += (size_t)obj_sz * HeapWordSize; 1243 1244 // This will happen if we are handling a humongous object that spans 1245 // several heap regions. 1246 if (obj_end > hr->end()) { 1247 break; 1248 } 1249 // Find the next marked object after this one. 1250 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1251 } 1252 1253 // Mark the allocated-since-marking portion... 1254 HeapWord* top = hr->top(); 1255 if (ntams < top) { 1256 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1257 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1258 1259 // Note: if we're looking at the last region in heap - top 1260 // could be actually just beyond the end of the heap; end_idx 1261 // will then correspond to a (non-existent) card that is also 1262 // just beyond the heap. 1263 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1264 // end of object is not card aligned - increment to cover 1265 // all the cards spanned by the object 1266 end_idx += 1; 1267 } 1268 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1269 1270 // This definitely means the region has live objects. 1271 set_bit_for_region(hr); 1272 } 1273 1274 // Update the live region bitmap. 1275 if (marked_bytes > 0) { 1276 set_bit_for_region(hr); 1277 } 1278 1279 // Set the marked bytes for the current region so that 1280 // it can be queried by a calling verification routine 1281 _region_marked_bytes = marked_bytes; 1282 1283 return false; 1284 } 1285 1286 size_t region_marked_bytes() const { return _region_marked_bytes; } 1287 }; 1288 1289 // Heap region closure used for verifying the counting data 1290 // that was accumulated concurrently and aggregated during 1291 // the remark pause. This closure is applied to the heap 1292 // regions during the STW cleanup pause. 1293 1294 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1295 G1CollectedHeap* _g1h; 1296 G1ConcurrentMark* _cm; 1297 CalcLiveObjectsClosure _calc_cl; 1298 BitMap* _region_bm; // Region BM to be verified 1299 BitMap* _card_bm; // Card BM to be verified 1300 1301 BitMap* _exp_region_bm; // Expected Region BM values 1302 BitMap* _exp_card_bm; // Expected card BM values 1303 1304 int _failures; 1305 1306 public: 1307 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1308 BitMap* region_bm, 1309 BitMap* card_bm, 1310 BitMap* exp_region_bm, 1311 BitMap* exp_card_bm) : 1312 _g1h(g1h), _cm(g1h->concurrent_mark()), 1313 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1314 _region_bm(region_bm), _card_bm(card_bm), 1315 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1316 _failures(0) { } 1317 1318 int failures() const { return _failures; } 1319 1320 bool doHeapRegion(HeapRegion* hr) { 1321 int failures = 0; 1322 1323 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1324 // this region and set the corresponding bits in the expected region 1325 // and card bitmaps. 1326 bool res = _calc_cl.doHeapRegion(hr); 1327 assert(res == false, "should be continuing"); 1328 1329 // Verify the marked bytes for this region. 1330 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1331 size_t act_marked_bytes = hr->next_marked_bytes(); 1332 1333 if (exp_marked_bytes > act_marked_bytes) { 1334 if (hr->is_starts_humongous()) { 1335 // For start_humongous regions, the size of the whole object will be 1336 // in exp_marked_bytes. 1337 HeapRegion* region = hr; 1338 int num_regions; 1339 for (num_regions = 0; region != NULL; num_regions++) { 1340 region = _g1h->next_region_in_humongous(region); 1341 } 1342 if ((num_regions-1) * HeapRegion::GrainBytes >= exp_marked_bytes) { 1343 failures += 1; 1344 } else if (num_regions * HeapRegion::GrainBytes < exp_marked_bytes) { 1345 failures += 1; 1346 } 1347 } else { 1348 // We're not OK if expected marked bytes > actual marked bytes. It means 1349 // we have missed accounting some objects during the actual marking. 1350 failures += 1; 1351 } 1352 } 1353 1354 // Verify the bit, for this region, in the actual and expected 1355 // (which was just calculated) region bit maps. 1356 // We're not OK if the bit in the calculated expected region 1357 // bitmap is set and the bit in the actual region bitmap is not. 1358 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1359 1360 bool expected = _exp_region_bm->at(index); 1361 bool actual = _region_bm->at(index); 1362 if (expected && !actual) { 1363 failures += 1; 1364 } 1365 1366 // Verify that the card bit maps for the cards spanned by the current 1367 // region match. We have an error if we have a set bit in the expected 1368 // bit map and the corresponding bit in the actual bitmap is not set. 1369 1370 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1371 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1372 1373 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1374 expected = _exp_card_bm->at(i); 1375 actual = _card_bm->at(i); 1376 1377 if (expected && !actual) { 1378 failures += 1; 1379 } 1380 } 1381 1382 _failures += failures; 1383 1384 // We could stop iteration over the heap when we 1385 // find the first violating region by returning true. 1386 return false; 1387 } 1388 }; 1389 1390 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1391 protected: 1392 G1CollectedHeap* _g1h; 1393 G1ConcurrentMark* _cm; 1394 BitMap* _actual_region_bm; 1395 BitMap* _actual_card_bm; 1396 1397 uint _n_workers; 1398 1399 BitMap* _expected_region_bm; 1400 BitMap* _expected_card_bm; 1401 1402 int _failures; 1403 1404 HeapRegionClaimer _hrclaimer; 1405 1406 public: 1407 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1408 BitMap* region_bm, BitMap* card_bm, 1409 BitMap* expected_region_bm, BitMap* expected_card_bm) 1410 : AbstractGangTask("G1 verify final counting"), 1411 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1412 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1413 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1414 _failures(0), 1415 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1416 assert(VerifyDuringGC, "don't call this otherwise"); 1417 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1418 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1419 } 1420 1421 void work(uint worker_id) { 1422 assert(worker_id < _n_workers, "invariant"); 1423 1424 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1425 _actual_region_bm, _actual_card_bm, 1426 _expected_region_bm, 1427 _expected_card_bm); 1428 1429 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1430 1431 Atomic::add(verify_cl.failures(), &_failures); 1432 } 1433 1434 int failures() const { return _failures; } 1435 }; 1436 1437 // Closure that finalizes the liveness counting data. 1438 // Used during the cleanup pause. 1439 // Sets the bits corresponding to the interval [NTAMS, top] 1440 // (which contains the implicitly live objects) in the 1441 // card liveness bitmap. Also sets the bit for each region, 1442 // containing live data, in the region liveness bitmap. 1443 1444 class FinalCountDataUpdateClosure: public G1CMCountDataClosureBase { 1445 public: 1446 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1447 BitMap* region_bm, 1448 BitMap* card_bm) : 1449 G1CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1450 1451 bool doHeapRegion(HeapRegion* hr) { 1452 HeapWord* ntams = hr->next_top_at_mark_start(); 1453 HeapWord* top = hr->top(); 1454 1455 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1456 1457 // Mark the allocated-since-marking portion... 1458 if (ntams < top) { 1459 // This definitely means the region has live objects. 1460 set_bit_for_region(hr); 1461 1462 // Now set the bits in the card bitmap for [ntams, top) 1463 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1464 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1465 1466 // Note: if we're looking at the last region in heap - top 1467 // could be actually just beyond the end of the heap; end_idx 1468 // will then correspond to a (non-existent) card that is also 1469 // just beyond the heap. 1470 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1471 // end of object is not card aligned - increment to cover 1472 // all the cards spanned by the object 1473 end_idx += 1; 1474 } 1475 1476 assert(end_idx <= _card_bm->size(), 1477 "oob: end_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT, 1478 end_idx, _card_bm->size()); 1479 assert(start_idx < _card_bm->size(), 1480 "oob: start_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT, 1481 start_idx, _card_bm->size()); 1482 1483 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1484 } 1485 1486 // Set the bit for the region if it contains live data 1487 if (hr->next_marked_bytes() > 0) { 1488 set_bit_for_region(hr); 1489 } 1490 1491 return false; 1492 } 1493 }; 1494 1495 class G1ParFinalCountTask: public AbstractGangTask { 1496 protected: 1497 G1CollectedHeap* _g1h; 1498 G1ConcurrentMark* _cm; 1499 BitMap* _actual_region_bm; 1500 BitMap* _actual_card_bm; 1501 1502 uint _n_workers; 1503 HeapRegionClaimer _hrclaimer; 1504 1505 public: 1506 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1507 : AbstractGangTask("G1 final counting"), 1508 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1509 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1510 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1511 } 1512 1513 void work(uint worker_id) { 1514 assert(worker_id < _n_workers, "invariant"); 1515 1516 FinalCountDataUpdateClosure final_update_cl(_g1h, 1517 _actual_region_bm, 1518 _actual_card_bm); 1519 1520 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1521 } 1522 }; 1523 1524 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1525 G1CollectedHeap* _g1; 1526 size_t _freed_bytes; 1527 FreeRegionList* _local_cleanup_list; 1528 uint _old_regions_removed; 1529 uint _humongous_regions_removed; 1530 HRRSCleanupTask* _hrrs_cleanup_task; 1531 1532 public: 1533 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1534 FreeRegionList* local_cleanup_list, 1535 HRRSCleanupTask* hrrs_cleanup_task) : 1536 _g1(g1), 1537 _freed_bytes(0), 1538 _local_cleanup_list(local_cleanup_list), 1539 _old_regions_removed(0), 1540 _humongous_regions_removed(0), 1541 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1542 1543 size_t freed_bytes() { return _freed_bytes; } 1544 const uint old_regions_removed() { return _old_regions_removed; } 1545 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1546 1547 bool doHeapRegion(HeapRegion *hr) { 1548 if (hr->is_archive()) { 1549 return false; 1550 } 1551 // We use a claim value of zero here because all regions 1552 // were claimed with value 1 in the FinalCount task. 1553 _g1->reset_gc_time_stamps(hr); 1554 hr->note_end_of_marking(); 1555 1556 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1557 _freed_bytes += hr->used(); 1558 hr->set_containing_set(NULL); 1559 if (hr->is_humongous()) { 1560 _humongous_regions_removed++; 1561 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1562 } else { 1563 _old_regions_removed++; 1564 _g1->free_region(hr, _local_cleanup_list, true); 1565 } 1566 } else { 1567 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1568 } 1569 1570 return false; 1571 } 1572 }; 1573 1574 class G1ParNoteEndTask: public AbstractGangTask { 1575 friend class G1NoteEndOfConcMarkClosure; 1576 1577 protected: 1578 G1CollectedHeap* _g1h; 1579 FreeRegionList* _cleanup_list; 1580 HeapRegionClaimer _hrclaimer; 1581 1582 public: 1583 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1584 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1585 } 1586 1587 void work(uint worker_id) { 1588 FreeRegionList local_cleanup_list("Local Cleanup List"); 1589 HRRSCleanupTask hrrs_cleanup_task; 1590 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1591 &hrrs_cleanup_task); 1592 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1593 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1594 1595 // Now update the lists 1596 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1597 { 1598 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1599 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1600 1601 // If we iterate over the global cleanup list at the end of 1602 // cleanup to do this printing we will not guarantee to only 1603 // generate output for the newly-reclaimed regions (the list 1604 // might not be empty at the beginning of cleanup; we might 1605 // still be working on its previous contents). So we do the 1606 // printing here, before we append the new regions to the global 1607 // cleanup list. 1608 1609 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1610 if (hr_printer->is_active()) { 1611 FreeRegionListIterator iter(&local_cleanup_list); 1612 while (iter.more_available()) { 1613 HeapRegion* hr = iter.get_next(); 1614 hr_printer->cleanup(hr); 1615 } 1616 } 1617 1618 _cleanup_list->add_ordered(&local_cleanup_list); 1619 assert(local_cleanup_list.is_empty(), "post-condition"); 1620 1621 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1622 } 1623 } 1624 }; 1625 1626 void G1ConcurrentMark::cleanup() { 1627 // world is stopped at this checkpoint 1628 assert(SafepointSynchronize::is_at_safepoint(), 1629 "world should be stopped"); 1630 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1631 1632 // If a full collection has happened, we shouldn't do this. 1633 if (has_aborted()) { 1634 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1635 return; 1636 } 1637 1638 g1h->verifier()->verify_region_sets_optional(); 1639 1640 if (VerifyDuringGC) { 1641 HandleMark hm; // handle scope 1642 g1h->prepare_for_verify(); 1643 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1644 } 1645 g1h->verifier()->check_bitmaps("Cleanup Start"); 1646 1647 G1CollectorPolicy* g1p = g1h->g1_policy(); 1648 g1p->record_concurrent_mark_cleanup_start(); 1649 1650 double start = os::elapsedTime(); 1651 1652 HeapRegionRemSet::reset_for_cleanup_tasks(); 1653 1654 // Do counting once more with the world stopped for good measure. 1655 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 1656 1657 g1h->workers()->run_task(&g1_par_count_task); 1658 1659 if (VerifyDuringGC) { 1660 // Verify that the counting data accumulated during marking matches 1661 // that calculated by walking the marking bitmap. 1662 1663 // Bitmaps to hold expected values 1664 BitMap expected_region_bm(_region_bm.size(), true); 1665 BitMap expected_card_bm(_card_bm.size(), true); 1666 1667 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 1668 &_region_bm, 1669 &_card_bm, 1670 &expected_region_bm, 1671 &expected_card_bm); 1672 1673 g1h->workers()->run_task(&g1_par_verify_task); 1674 1675 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 1676 } 1677 1678 size_t start_used_bytes = g1h->used(); 1679 g1h->collector_state()->set_mark_in_progress(false); 1680 1681 double count_end = os::elapsedTime(); 1682 double this_final_counting_time = (count_end - start); 1683 _total_counting_time += this_final_counting_time; 1684 1685 if (log_is_enabled(Trace, gc, liveness)) { 1686 G1PrintRegionLivenessInfoClosure cl("Post-Marking"); 1687 _g1h->heap_region_iterate(&cl); 1688 } 1689 1690 // Install newly created mark bitMap as "prev". 1691 swapMarkBitMaps(); 1692 1693 g1h->reset_gc_time_stamp(); 1694 1695 uint n_workers = _g1h->workers()->active_workers(); 1696 1697 // Note end of marking in all heap regions. 1698 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1699 g1h->workers()->run_task(&g1_par_note_end_task); 1700 g1h->check_gc_time_stamps(); 1701 1702 if (!cleanup_list_is_empty()) { 1703 // The cleanup list is not empty, so we'll have to process it 1704 // concurrently. Notify anyone else that might be wanting free 1705 // regions that there will be more free regions coming soon. 1706 g1h->set_free_regions_coming(); 1707 } 1708 1709 // call below, since it affects the metric by which we sort the heap 1710 // regions. 1711 if (G1ScrubRemSets) { 1712 double rs_scrub_start = os::elapsedTime(); 1713 g1h->scrub_rem_set(&_region_bm, &_card_bm); 1714 _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start); 1715 } 1716 1717 // this will also free any regions totally full of garbage objects, 1718 // and sort the regions. 1719 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1720 1721 // Statistics. 1722 double end = os::elapsedTime(); 1723 _cleanup_times.add((end - start) * 1000.0); 1724 1725 // Clean up will have freed any regions completely full of garbage. 1726 // Update the soft reference policy with the new heap occupancy. 1727 Universe::update_heap_info_at_gc(); 1728 1729 if (VerifyDuringGC) { 1730 HandleMark hm; // handle scope 1731 g1h->prepare_for_verify(); 1732 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)"); 1733 } 1734 1735 g1h->verifier()->check_bitmaps("Cleanup End"); 1736 1737 g1h->verifier()->verify_region_sets_optional(); 1738 1739 // We need to make this be a "collection" so any collection pause that 1740 // races with it goes around and waits for completeCleanup to finish. 1741 g1h->increment_total_collections(); 1742 1743 // Clean out dead classes and update Metaspace sizes. 1744 if (ClassUnloadingWithConcurrentMark) { 1745 ClassLoaderDataGraph::purge(); 1746 } 1747 MetaspaceGC::compute_new_size(); 1748 1749 // We reclaimed old regions so we should calculate the sizes to make 1750 // sure we update the old gen/space data. 1751 g1h->g1mm()->update_sizes(); 1752 g1h->allocation_context_stats().update_after_mark(); 1753 1754 g1h->trace_heap_after_concurrent_cycle(); 1755 } 1756 1757 void G1ConcurrentMark::completeCleanup() { 1758 if (has_aborted()) return; 1759 1760 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1761 1762 _cleanup_list.verify_optional(); 1763 FreeRegionList tmp_free_list("Tmp Free List"); 1764 1765 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1766 "cleanup list has %u entries", 1767 _cleanup_list.length()); 1768 1769 // No one else should be accessing the _cleanup_list at this point, 1770 // so it is not necessary to take any locks 1771 while (!_cleanup_list.is_empty()) { 1772 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 1773 assert(hr != NULL, "Got NULL from a non-empty list"); 1774 hr->par_clear(); 1775 tmp_free_list.add_ordered(hr); 1776 1777 // Instead of adding one region at a time to the secondary_free_list, 1778 // we accumulate them in the local list and move them a few at a 1779 // time. This also cuts down on the number of notify_all() calls 1780 // we do during this process. We'll also append the local list when 1781 // _cleanup_list is empty (which means we just removed the last 1782 // region from the _cleanup_list). 1783 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1784 _cleanup_list.is_empty()) { 1785 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1786 "appending %u entries to the secondary_free_list, " 1787 "cleanup list still has %u entries", 1788 tmp_free_list.length(), 1789 _cleanup_list.length()); 1790 1791 { 1792 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1793 g1h->secondary_free_list_add(&tmp_free_list); 1794 SecondaryFreeList_lock->notify_all(); 1795 } 1796 #ifndef PRODUCT 1797 if (G1StressConcRegionFreeing) { 1798 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1799 os::sleep(Thread::current(), (jlong) 1, false); 1800 } 1801 } 1802 #endif 1803 } 1804 } 1805 assert(tmp_free_list.is_empty(), "post-condition"); 1806 } 1807 1808 // Supporting Object and Oop closures for reference discovery 1809 // and processing in during marking 1810 1811 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1812 HeapWord* addr = (HeapWord*)obj; 1813 return addr != NULL && 1814 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1815 } 1816 1817 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1818 // Uses the G1CMTask associated with a worker thread (for serial reference 1819 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1820 // trace referent objects. 1821 // 1822 // Using the G1CMTask and embedded local queues avoids having the worker 1823 // threads operating on the global mark stack. This reduces the risk 1824 // of overflowing the stack - which we would rather avoid at this late 1825 // state. Also using the tasks' local queues removes the potential 1826 // of the workers interfering with each other that could occur if 1827 // operating on the global stack. 1828 1829 class G1CMKeepAliveAndDrainClosure: public OopClosure { 1830 G1ConcurrentMark* _cm; 1831 G1CMTask* _task; 1832 int _ref_counter_limit; 1833 int _ref_counter; 1834 bool _is_serial; 1835 public: 1836 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1837 _cm(cm), _task(task), _is_serial(is_serial), 1838 _ref_counter_limit(G1RefProcDrainInterval) { 1839 assert(_ref_counter_limit > 0, "sanity"); 1840 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1841 _ref_counter = _ref_counter_limit; 1842 } 1843 1844 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1845 virtual void do_oop( oop* p) { do_oop_work(p); } 1846 1847 template <class T> void do_oop_work(T* p) { 1848 if (!_cm->has_overflown()) { 1849 oop obj = oopDesc::load_decode_heap_oop(p); 1850 _task->deal_with_reference(obj); 1851 _ref_counter--; 1852 1853 if (_ref_counter == 0) { 1854 // We have dealt with _ref_counter_limit references, pushing them 1855 // and objects reachable from them on to the local stack (and 1856 // possibly the global stack). Call G1CMTask::do_marking_step() to 1857 // process these entries. 1858 // 1859 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1860 // there's nothing more to do (i.e. we're done with the entries that 1861 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1862 // above) or we overflow. 1863 // 1864 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1865 // flag while there may still be some work to do. (See the comment at 1866 // the beginning of G1CMTask::do_marking_step() for those conditions - 1867 // one of which is reaching the specified time target.) It is only 1868 // when G1CMTask::do_marking_step() returns without setting the 1869 // has_aborted() flag that the marking step has completed. 1870 do { 1871 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1872 _task->do_marking_step(mark_step_duration_ms, 1873 false /* do_termination */, 1874 _is_serial); 1875 } while (_task->has_aborted() && !_cm->has_overflown()); 1876 _ref_counter = _ref_counter_limit; 1877 } 1878 } 1879 } 1880 }; 1881 1882 // 'Drain' oop closure used by both serial and parallel reference processing. 1883 // Uses the G1CMTask associated with a given worker thread (for serial 1884 // reference processing the G1CMtask for worker 0 is used). Calls the 1885 // do_marking_step routine, with an unbelievably large timeout value, 1886 // to drain the marking data structures of the remaining entries 1887 // added by the 'keep alive' oop closure above. 1888 1889 class G1CMDrainMarkingStackClosure: public VoidClosure { 1890 G1ConcurrentMark* _cm; 1891 G1CMTask* _task; 1892 bool _is_serial; 1893 public: 1894 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1895 _cm(cm), _task(task), _is_serial(is_serial) { 1896 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1897 } 1898 1899 void do_void() { 1900 do { 1901 // We call G1CMTask::do_marking_step() to completely drain the local 1902 // and global marking stacks of entries pushed by the 'keep alive' 1903 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1904 // 1905 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1906 // if there's nothing more to do (i.e. we've completely drained the 1907 // entries that were pushed as a a result of applying the 'keep alive' 1908 // closure to the entries on the discovered ref lists) or we overflow 1909 // the global marking stack. 1910 // 1911 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1912 // flag while there may still be some work to do. (See the comment at 1913 // the beginning of G1CMTask::do_marking_step() for those conditions - 1914 // one of which is reaching the specified time target.) It is only 1915 // when G1CMTask::do_marking_step() returns without setting the 1916 // has_aborted() flag that the marking step has completed. 1917 1918 _task->do_marking_step(1000000000.0 /* something very large */, 1919 true /* do_termination */, 1920 _is_serial); 1921 } while (_task->has_aborted() && !_cm->has_overflown()); 1922 } 1923 }; 1924 1925 // Implementation of AbstractRefProcTaskExecutor for parallel 1926 // reference processing at the end of G1 concurrent marking 1927 1928 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 1929 private: 1930 G1CollectedHeap* _g1h; 1931 G1ConcurrentMark* _cm; 1932 WorkGang* _workers; 1933 uint _active_workers; 1934 1935 public: 1936 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1937 G1ConcurrentMark* cm, 1938 WorkGang* workers, 1939 uint n_workers) : 1940 _g1h(g1h), _cm(cm), 1941 _workers(workers), _active_workers(n_workers) { } 1942 1943 // Executes the given task using concurrent marking worker threads. 1944 virtual void execute(ProcessTask& task); 1945 virtual void execute(EnqueueTask& task); 1946 }; 1947 1948 class G1CMRefProcTaskProxy: public AbstractGangTask { 1949 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1950 ProcessTask& _proc_task; 1951 G1CollectedHeap* _g1h; 1952 G1ConcurrentMark* _cm; 1953 1954 public: 1955 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1956 G1CollectedHeap* g1h, 1957 G1ConcurrentMark* cm) : 1958 AbstractGangTask("Process reference objects in parallel"), 1959 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1960 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1961 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1962 } 1963 1964 virtual void work(uint worker_id) { 1965 ResourceMark rm; 1966 HandleMark hm; 1967 G1CMTask* task = _cm->task(worker_id); 1968 G1CMIsAliveClosure g1_is_alive(_g1h); 1969 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1970 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1971 1972 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1973 } 1974 }; 1975 1976 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1977 assert(_workers != NULL, "Need parallel worker threads."); 1978 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1979 1980 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1981 1982 // We need to reset the concurrency level before each 1983 // proxy task execution, so that the termination protocol 1984 // and overflow handling in G1CMTask::do_marking_step() knows 1985 // how many workers to wait for. 1986 _cm->set_concurrency(_active_workers); 1987 _workers->run_task(&proc_task_proxy); 1988 } 1989 1990 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 1991 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1992 EnqueueTask& _enq_task; 1993 1994 public: 1995 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1996 AbstractGangTask("Enqueue reference objects in parallel"), 1997 _enq_task(enq_task) { } 1998 1999 virtual void work(uint worker_id) { 2000 _enq_task.work(worker_id); 2001 } 2002 }; 2003 2004 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2005 assert(_workers != NULL, "Need parallel worker threads."); 2006 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2007 2008 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2009 2010 // Not strictly necessary but... 2011 // 2012 // We need to reset the concurrency level before each 2013 // proxy task execution, so that the termination protocol 2014 // and overflow handling in G1CMTask::do_marking_step() knows 2015 // how many workers to wait for. 2016 _cm->set_concurrency(_active_workers); 2017 _workers->run_task(&enq_task_proxy); 2018 } 2019 2020 void G1ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2021 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2022 } 2023 2024 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2025 if (has_overflown()) { 2026 // Skip processing the discovered references if we have 2027 // overflown the global marking stack. Reference objects 2028 // only get discovered once so it is OK to not 2029 // de-populate the discovered reference lists. We could have, 2030 // but the only benefit would be that, when marking restarts, 2031 // less reference objects are discovered. 2032 return; 2033 } 2034 2035 ResourceMark rm; 2036 HandleMark hm; 2037 2038 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2039 2040 // Is alive closure. 2041 G1CMIsAliveClosure g1_is_alive(g1h); 2042 2043 // Inner scope to exclude the cleaning of the string and symbol 2044 // tables from the displayed time. 2045 { 2046 GCTraceTime(Debug, gc) trace("Reference Processing", g1h->gc_timer_cm()); 2047 2048 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2049 2050 // See the comment in G1CollectedHeap::ref_processing_init() 2051 // about how reference processing currently works in G1. 2052 2053 // Set the soft reference policy 2054 rp->setup_policy(clear_all_soft_refs); 2055 assert(_markStack.isEmpty(), "mark stack should be empty"); 2056 2057 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2058 // in serial reference processing. Note these closures are also 2059 // used for serially processing (by the the current thread) the 2060 // JNI references during parallel reference processing. 2061 // 2062 // These closures do not need to synchronize with the worker 2063 // threads involved in parallel reference processing as these 2064 // instances are executed serially by the current thread (e.g. 2065 // reference processing is not multi-threaded and is thus 2066 // performed by the current thread instead of a gang worker). 2067 // 2068 // The gang tasks involved in parallel reference processing create 2069 // their own instances of these closures, which do their own 2070 // synchronization among themselves. 2071 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2072 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2073 2074 // We need at least one active thread. If reference processing 2075 // is not multi-threaded we use the current (VMThread) thread, 2076 // otherwise we use the work gang from the G1CollectedHeap and 2077 // we utilize all the worker threads we can. 2078 bool processing_is_mt = rp->processing_is_mt(); 2079 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2080 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2081 2082 // Parallel processing task executor. 2083 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2084 g1h->workers(), active_workers); 2085 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2086 2087 // Set the concurrency level. The phase was already set prior to 2088 // executing the remark task. 2089 set_concurrency(active_workers); 2090 2091 // Set the degree of MT processing here. If the discovery was done MT, 2092 // the number of threads involved during discovery could differ from 2093 // the number of active workers. This is OK as long as the discovered 2094 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2095 rp->set_active_mt_degree(active_workers); 2096 2097 // Process the weak references. 2098 const ReferenceProcessorStats& stats = 2099 rp->process_discovered_references(&g1_is_alive, 2100 &g1_keep_alive, 2101 &g1_drain_mark_stack, 2102 executor, 2103 g1h->gc_timer_cm()); 2104 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2105 2106 // The do_oop work routines of the keep_alive and drain_marking_stack 2107 // oop closures will set the has_overflown flag if we overflow the 2108 // global marking stack. 2109 2110 assert(_markStack.overflow() || _markStack.isEmpty(), 2111 "mark stack should be empty (unless it overflowed)"); 2112 2113 if (_markStack.overflow()) { 2114 // This should have been done already when we tried to push an 2115 // entry on to the global mark stack. But let's do it again. 2116 set_has_overflown(); 2117 } 2118 2119 assert(rp->num_q() == active_workers, "why not"); 2120 2121 rp->enqueue_discovered_references(executor); 2122 2123 rp->verify_no_references_recorded(); 2124 assert(!rp->discovery_enabled(), "Post condition"); 2125 } 2126 2127 if (has_overflown()) { 2128 // We can not trust g1_is_alive if the marking stack overflowed 2129 return; 2130 } 2131 2132 assert(_markStack.isEmpty(), "Marking should have completed"); 2133 2134 // Unload Klasses, String, Symbols, Code Cache, etc. 2135 { 2136 GCTraceTime(Debug, gc) trace("Unloading", g1h->gc_timer_cm()); 2137 2138 if (ClassUnloadingWithConcurrentMark) { 2139 bool purged_classes; 2140 2141 { 2142 GCTraceTime(Trace, gc) trace("System Dictionary Unloading", g1h->gc_timer_cm()); 2143 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2144 } 2145 2146 { 2147 GCTraceTime(Trace, gc) trace("Parallel Unloading", g1h->gc_timer_cm()); 2148 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2149 } 2150 } 2151 2152 if (G1StringDedup::is_enabled()) { 2153 GCTraceTime(Trace, gc) trace("String Deduplication Unlink", g1h->gc_timer_cm()); 2154 G1StringDedup::unlink(&g1_is_alive); 2155 } 2156 } 2157 } 2158 2159 void G1ConcurrentMark::swapMarkBitMaps() { 2160 G1CMBitMapRO* temp = _prevMarkBitMap; 2161 _prevMarkBitMap = (G1CMBitMapRO*)_nextMarkBitMap; 2162 _nextMarkBitMap = (G1CMBitMap*) temp; 2163 } 2164 2165 // Closure for marking entries in SATB buffers. 2166 class G1CMSATBBufferClosure : public SATBBufferClosure { 2167 private: 2168 G1CMTask* _task; 2169 G1CollectedHeap* _g1h; 2170 2171 // This is very similar to G1CMTask::deal_with_reference, but with 2172 // more relaxed requirements for the argument, so this must be more 2173 // circumspect about treating the argument as an object. 2174 void do_entry(void* entry) const { 2175 _task->increment_refs_reached(); 2176 HeapRegion* hr = _g1h->heap_region_containing(entry); 2177 if (entry < hr->next_top_at_mark_start()) { 2178 // Until we get here, we don't know whether entry refers to a valid 2179 // object; it could instead have been a stale reference. 2180 oop obj = static_cast<oop>(entry); 2181 assert(obj->is_oop(true /* ignore mark word */), 2182 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)); 2183 _task->make_reference_grey(obj, hr); 2184 } 2185 } 2186 2187 public: 2188 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 2189 : _task(task), _g1h(g1h) { } 2190 2191 virtual void do_buffer(void** buffer, size_t size) { 2192 for (size_t i = 0; i < size; ++i) { 2193 do_entry(buffer[i]); 2194 } 2195 } 2196 }; 2197 2198 class G1RemarkThreadsClosure : public ThreadClosure { 2199 G1CMSATBBufferClosure _cm_satb_cl; 2200 G1CMOopClosure _cm_cl; 2201 MarkingCodeBlobClosure _code_cl; 2202 int _thread_parity; 2203 2204 public: 2205 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 2206 _cm_satb_cl(task, g1h), 2207 _cm_cl(g1h, g1h->concurrent_mark(), task), 2208 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2209 _thread_parity(Threads::thread_claim_parity()) {} 2210 2211 void do_thread(Thread* thread) { 2212 if (thread->is_Java_thread()) { 2213 if (thread->claim_oops_do(true, _thread_parity)) { 2214 JavaThread* jt = (JavaThread*)thread; 2215 2216 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2217 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2218 // * Alive if on the stack of an executing method 2219 // * Weakly reachable otherwise 2220 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2221 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2222 jt->nmethods_do(&_code_cl); 2223 2224 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 2225 } 2226 } else if (thread->is_VM_thread()) { 2227 if (thread->claim_oops_do(true, _thread_parity)) { 2228 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 2229 } 2230 } 2231 } 2232 }; 2233 2234 class G1CMRemarkTask: public AbstractGangTask { 2235 private: 2236 G1ConcurrentMark* _cm; 2237 public: 2238 void work(uint worker_id) { 2239 // Since all available tasks are actually started, we should 2240 // only proceed if we're supposed to be active. 2241 if (worker_id < _cm->active_tasks()) { 2242 G1CMTask* task = _cm->task(worker_id); 2243 task->record_start_time(); 2244 { 2245 ResourceMark rm; 2246 HandleMark hm; 2247 2248 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2249 Threads::threads_do(&threads_f); 2250 } 2251 2252 do { 2253 task->do_marking_step(1000000000.0 /* something very large */, 2254 true /* do_termination */, 2255 false /* is_serial */); 2256 } while (task->has_aborted() && !_cm->has_overflown()); 2257 // If we overflow, then we do not want to restart. We instead 2258 // want to abort remark and do concurrent marking again. 2259 task->record_end_time(); 2260 } 2261 } 2262 2263 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 2264 AbstractGangTask("Par Remark"), _cm(cm) { 2265 _cm->terminator()->reset_for_reuse(active_workers); 2266 } 2267 }; 2268 2269 void G1ConcurrentMark::checkpointRootsFinalWork() { 2270 ResourceMark rm; 2271 HandleMark hm; 2272 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2273 2274 GCTraceTime(Debug, gc) trace("Finalize Marking", g1h->gc_timer_cm()); 2275 2276 g1h->ensure_parsability(false); 2277 2278 // this is remark, so we'll use up all active threads 2279 uint active_workers = g1h->workers()->active_workers(); 2280 set_concurrency_and_phase(active_workers, false /* concurrent */); 2281 // Leave _parallel_marking_threads at it's 2282 // value originally calculated in the G1ConcurrentMark 2283 // constructor and pass values of the active workers 2284 // through the gang in the task. 2285 2286 { 2287 StrongRootsScope srs(active_workers); 2288 2289 G1CMRemarkTask remarkTask(this, active_workers); 2290 // We will start all available threads, even if we decide that the 2291 // active_workers will be fewer. The extra ones will just bail out 2292 // immediately. 2293 g1h->workers()->run_task(&remarkTask); 2294 } 2295 2296 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2297 guarantee(has_overflown() || 2298 satb_mq_set.completed_buffers_num() == 0, 2299 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 2300 BOOL_TO_STR(has_overflown()), 2301 satb_mq_set.completed_buffers_num()); 2302 2303 print_stats(); 2304 } 2305 2306 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2307 // Note we are overriding the read-only view of the prev map here, via 2308 // the cast. 2309 ((G1CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2310 } 2311 2312 HeapRegion* 2313 G1ConcurrentMark::claim_region(uint worker_id) { 2314 // "checkpoint" the finger 2315 HeapWord* finger = _finger; 2316 2317 // _heap_end will not change underneath our feet; it only changes at 2318 // yield points. 2319 while (finger < _heap_end) { 2320 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2321 2322 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 2323 2324 // Above heap_region_containing may return NULL as we always scan claim 2325 // until the end of the heap. In this case, just jump to the next region. 2326 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2327 2328 // Is the gap between reading the finger and doing the CAS too long? 2329 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2330 if (res == finger && curr_region != NULL) { 2331 // we succeeded 2332 HeapWord* bottom = curr_region->bottom(); 2333 HeapWord* limit = curr_region->next_top_at_mark_start(); 2334 2335 // notice that _finger == end cannot be guaranteed here since, 2336 // someone else might have moved the finger even further 2337 assert(_finger >= end, "the finger should have moved forward"); 2338 2339 if (limit > bottom) { 2340 return curr_region; 2341 } else { 2342 assert(limit == bottom, 2343 "the region limit should be at bottom"); 2344 // we return NULL and the caller should try calling 2345 // claim_region() again. 2346 return NULL; 2347 } 2348 } else { 2349 assert(_finger > finger, "the finger should have moved forward"); 2350 // read it again 2351 finger = _finger; 2352 } 2353 } 2354 2355 return NULL; 2356 } 2357 2358 #ifndef PRODUCT 2359 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 2360 private: 2361 G1CollectedHeap* _g1h; 2362 const char* _phase; 2363 int _info; 2364 2365 public: 2366 VerifyNoCSetOops(const char* phase, int info = -1) : 2367 _g1h(G1CollectedHeap::heap()), 2368 _phase(phase), 2369 _info(info) 2370 { } 2371 2372 void operator()(oop obj) const { 2373 guarantee(obj->is_oop(), 2374 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 2375 p2i(obj), _phase, _info); 2376 guarantee(!_g1h->obj_in_cs(obj), 2377 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 2378 p2i(obj), _phase, _info); 2379 } 2380 }; 2381 2382 void G1ConcurrentMark::verify_no_cset_oops() { 2383 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2384 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 2385 return; 2386 } 2387 2388 // Verify entries on the global mark stack 2389 _markStack.iterate(VerifyNoCSetOops("Stack")); 2390 2391 // Verify entries on the task queues 2392 for (uint i = 0; i < _max_worker_id; ++i) { 2393 G1CMTaskQueue* queue = _task_queues->queue(i); 2394 queue->iterate(VerifyNoCSetOops("Queue", i)); 2395 } 2396 2397 // Verify the global finger 2398 HeapWord* global_finger = finger(); 2399 if (global_finger != NULL && global_finger < _heap_end) { 2400 // Since we always iterate over all regions, we might get a NULL HeapRegion 2401 // here. 2402 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 2403 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2404 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 2405 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 2406 } 2407 2408 // Verify the task fingers 2409 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2410 for (uint i = 0; i < parallel_marking_threads(); ++i) { 2411 G1CMTask* task = _tasks[i]; 2412 HeapWord* task_finger = task->finger(); 2413 if (task_finger != NULL && task_finger < _heap_end) { 2414 // See above note on the global finger verification. 2415 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 2416 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2417 !task_hr->in_collection_set(), 2418 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 2419 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 2420 } 2421 } 2422 } 2423 #endif // PRODUCT 2424 2425 // Aggregate the counting data that was constructed concurrently 2426 // with marking. 2427 class AggregateCountDataHRClosure: public HeapRegionClosure { 2428 G1CollectedHeap* _g1h; 2429 G1ConcurrentMark* _cm; 2430 CardTableModRefBS* _ct_bs; 2431 BitMap* _cm_card_bm; 2432 uint _max_worker_id; 2433 2434 public: 2435 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 2436 BitMap* cm_card_bm, 2437 uint max_worker_id) : 2438 _g1h(g1h), _cm(g1h->concurrent_mark()), 2439 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 2440 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 2441 2442 bool doHeapRegion(HeapRegion* hr) { 2443 HeapWord* start = hr->bottom(); 2444 HeapWord* limit = hr->next_top_at_mark_start(); 2445 HeapWord* end = hr->end(); 2446 2447 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 2448 "Preconditions not met - " 2449 "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", " 2450 "top: " PTR_FORMAT ", end: " PTR_FORMAT, 2451 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())); 2452 2453 assert(hr->next_marked_bytes() == 0, "Precondition"); 2454 2455 if (start == limit) { 2456 // NTAMS of this region has not been set so nothing to do. 2457 return false; 2458 } 2459 2460 // 'start' should be in the heap. 2461 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 2462 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 2463 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 2464 2465 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 2466 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 2467 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 2468 2469 // If ntams is not card aligned then we bump card bitmap index 2470 // for limit so that we get the all the cards spanned by 2471 // the object ending at ntams. 2472 // Note: if this is the last region in the heap then ntams 2473 // could be actually just beyond the end of the the heap; 2474 // limit_idx will then correspond to a (non-existent) card 2475 // that is also outside the heap. 2476 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 2477 limit_idx += 1; 2478 } 2479 2480 assert(limit_idx <= end_idx, "or else use atomics"); 2481 2482 // Aggregate the "stripe" in the count data associated with hr. 2483 uint hrm_index = hr->hrm_index(); 2484 size_t marked_bytes = 0; 2485 2486 for (uint i = 0; i < _max_worker_id; i += 1) { 2487 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 2488 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 2489 2490 // Fetch the marked_bytes in this region for task i and 2491 // add it to the running total for this region. 2492 marked_bytes += marked_bytes_array[hrm_index]; 2493 2494 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 2495 // into the global card bitmap. 2496 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 2497 2498 while (scan_idx < limit_idx) { 2499 assert(task_card_bm->at(scan_idx) == true, "should be"); 2500 _cm_card_bm->set_bit(scan_idx); 2501 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 2502 2503 // BitMap::get_next_one_offset() can handle the case when 2504 // its left_offset parameter is greater than its right_offset 2505 // parameter. It does, however, have an early exit if 2506 // left_offset == right_offset. So let's limit the value 2507 // passed in for left offset here. 2508 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 2509 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 2510 } 2511 } 2512 2513 // Update the marked bytes for this region. 2514 hr->add_to_marked_bytes(marked_bytes); 2515 2516 // Next heap region 2517 return false; 2518 } 2519 }; 2520 2521 class G1AggregateCountDataTask: public AbstractGangTask { 2522 protected: 2523 G1CollectedHeap* _g1h; 2524 G1ConcurrentMark* _cm; 2525 BitMap* _cm_card_bm; 2526 uint _max_worker_id; 2527 uint _active_workers; 2528 HeapRegionClaimer _hrclaimer; 2529 2530 public: 2531 G1AggregateCountDataTask(G1CollectedHeap* g1h, 2532 G1ConcurrentMark* cm, 2533 BitMap* cm_card_bm, 2534 uint max_worker_id, 2535 uint n_workers) : 2536 AbstractGangTask("Count Aggregation"), 2537 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 2538 _max_worker_id(max_worker_id), 2539 _active_workers(n_workers), 2540 _hrclaimer(_active_workers) { 2541 } 2542 2543 void work(uint worker_id) { 2544 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 2545 2546 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 2547 } 2548 }; 2549 2550 2551 void G1ConcurrentMark::aggregate_count_data() { 2552 uint n_workers = _g1h->workers()->active_workers(); 2553 2554 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 2555 _max_worker_id, n_workers); 2556 2557 _g1h->workers()->run_task(&g1_par_agg_task); 2558 } 2559 2560 // Clear the per-worker arrays used to store the per-region counting data 2561 void G1ConcurrentMark::clear_all_count_data() { 2562 // Clear the global card bitmap - it will be filled during 2563 // liveness count aggregation (during remark) and the 2564 // final counting task. 2565 _card_bm.clear(); 2566 2567 // Clear the global region bitmap - it will be filled as part 2568 // of the final counting task. 2569 _region_bm.clear(); 2570 2571 uint max_regions = _g1h->max_regions(); 2572 assert(_max_worker_id > 0, "uninitialized"); 2573 2574 for (uint i = 0; i < _max_worker_id; i += 1) { 2575 BitMap* task_card_bm = count_card_bitmap_for(i); 2576 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 2577 2578 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 2579 assert(marked_bytes_array != NULL, "uninitialized"); 2580 2581 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 2582 task_card_bm->clear(); 2583 } 2584 } 2585 2586 void G1ConcurrentMark::print_stats() { 2587 if (!log_is_enabled(Debug, gc, stats)) { 2588 return; 2589 } 2590 log_debug(gc, stats)("---------------------------------------------------------------------"); 2591 for (size_t i = 0; i < _active_tasks; ++i) { 2592 _tasks[i]->print_stats(); 2593 log_debug(gc, stats)("---------------------------------------------------------------------"); 2594 } 2595 } 2596 2597 // abandon current marking iteration due to a Full GC 2598 void G1ConcurrentMark::abort() { 2599 if (!cmThread()->during_cycle() || _has_aborted) { 2600 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2601 return; 2602 } 2603 2604 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2605 // concurrent bitmap clearing. 2606 _nextMarkBitMap->clearAll(); 2607 2608 // Note we cannot clear the previous marking bitmap here 2609 // since VerifyDuringGC verifies the objects marked during 2610 // a full GC against the previous bitmap. 2611 2612 // Clear the liveness counting data 2613 clear_all_count_data(); 2614 // Empty mark stack 2615 reset_marking_state(); 2616 for (uint i = 0; i < _max_worker_id; ++i) { 2617 _tasks[i]->clear_region_fields(); 2618 } 2619 _first_overflow_barrier_sync.abort(); 2620 _second_overflow_barrier_sync.abort(); 2621 _has_aborted = true; 2622 2623 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2624 satb_mq_set.abandon_partial_marking(); 2625 // This can be called either during or outside marking, we'll read 2626 // the expected_active value from the SATB queue set. 2627 satb_mq_set.set_active_all_threads( 2628 false, /* new active value */ 2629 satb_mq_set.is_active() /* expected_active */); 2630 2631 _g1h->trace_heap_after_concurrent_cycle(); 2632 2633 _g1h->register_concurrent_cycle_end(); 2634 } 2635 2636 static void print_ms_time_info(const char* prefix, const char* name, 2637 NumberSeq& ns) { 2638 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2639 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2640 if (ns.num() > 0) { 2641 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2642 prefix, ns.sd(), ns.maximum()); 2643 } 2644 } 2645 2646 void G1ConcurrentMark::print_summary_info() { 2647 LogHandle(gc, marking) log; 2648 if (!log.is_trace()) { 2649 return; 2650 } 2651 2652 log.trace(" Concurrent marking:"); 2653 print_ms_time_info(" ", "init marks", _init_times); 2654 print_ms_time_info(" ", "remarks", _remark_times); 2655 { 2656 print_ms_time_info(" ", "final marks", _remark_mark_times); 2657 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2658 2659 } 2660 print_ms_time_info(" ", "cleanups", _cleanup_times); 2661 log.trace(" Final counting total time = %8.2f s (avg = %8.2f ms).", 2662 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2663 if (G1ScrubRemSets) { 2664 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 2665 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2666 } 2667 log.trace(" Total stop_world time = %8.2f s.", 2668 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2669 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2670 cmThread()->vtime_accum(), cmThread()->vtime_mark_accum()); 2671 } 2672 2673 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2674 _parallel_workers->print_worker_threads_on(st); 2675 } 2676 2677 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2678 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2679 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 2680 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 2681 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 2682 } 2683 2684 // We take a break if someone is trying to stop the world. 2685 bool G1ConcurrentMark::do_yield_check(uint worker_id) { 2686 if (SuspendibleThreadSet::should_yield()) { 2687 SuspendibleThreadSet::yield(); 2688 return true; 2689 } else { 2690 return false; 2691 } 2692 } 2693 2694 // Closure for iteration over bitmaps 2695 class G1CMBitMapClosure : public BitMapClosure { 2696 private: 2697 // the bitmap that is being iterated over 2698 G1CMBitMap* _nextMarkBitMap; 2699 G1ConcurrentMark* _cm; 2700 G1CMTask* _task; 2701 2702 public: 2703 G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm, G1CMBitMap* nextMarkBitMap) : 2704 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 2705 2706 bool do_bit(size_t offset) { 2707 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 2708 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 2709 assert( addr < _cm->finger(), "invariant"); 2710 assert(addr >= _task->finger(), "invariant"); 2711 2712 // We move that task's local finger along. 2713 _task->move_finger_to(addr); 2714 2715 _task->scan_object(oop(addr)); 2716 // we only partially drain the local queue and global stack 2717 _task->drain_local_queue(true); 2718 _task->drain_global_stack(true); 2719 2720 // if the has_aborted flag has been raised, we need to bail out of 2721 // the iteration 2722 return !_task->has_aborted(); 2723 } 2724 }; 2725 2726 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2727 ReferenceProcessor* result = g1h->ref_processor_cm(); 2728 assert(result != NULL, "CM reference processor should not be NULL"); 2729 return result; 2730 } 2731 2732 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2733 G1ConcurrentMark* cm, 2734 G1CMTask* task) 2735 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2736 _g1h(g1h), _cm(cm), _task(task) 2737 { } 2738 2739 void G1CMTask::setup_for_region(HeapRegion* hr) { 2740 assert(hr != NULL, 2741 "claim_region() should have filtered out NULL regions"); 2742 _curr_region = hr; 2743 _finger = hr->bottom(); 2744 update_region_limit(); 2745 } 2746 2747 void G1CMTask::update_region_limit() { 2748 HeapRegion* hr = _curr_region; 2749 HeapWord* bottom = hr->bottom(); 2750 HeapWord* limit = hr->next_top_at_mark_start(); 2751 2752 if (limit == bottom) { 2753 // The region was collected underneath our feet. 2754 // We set the finger to bottom to ensure that the bitmap 2755 // iteration that will follow this will not do anything. 2756 // (this is not a condition that holds when we set the region up, 2757 // as the region is not supposed to be empty in the first place) 2758 _finger = bottom; 2759 } else if (limit >= _region_limit) { 2760 assert(limit >= _finger, "peace of mind"); 2761 } else { 2762 assert(limit < _region_limit, "only way to get here"); 2763 // This can happen under some pretty unusual circumstances. An 2764 // evacuation pause empties the region underneath our feet (NTAMS 2765 // at bottom). We then do some allocation in the region (NTAMS 2766 // stays at bottom), followed by the region being used as a GC 2767 // alloc region (NTAMS will move to top() and the objects 2768 // originally below it will be grayed). All objects now marked in 2769 // the region are explicitly grayed, if below the global finger, 2770 // and we do not need in fact to scan anything else. So, we simply 2771 // set _finger to be limit to ensure that the bitmap iteration 2772 // doesn't do anything. 2773 _finger = limit; 2774 } 2775 2776 _region_limit = limit; 2777 } 2778 2779 void G1CMTask::giveup_current_region() { 2780 assert(_curr_region != NULL, "invariant"); 2781 clear_region_fields(); 2782 } 2783 2784 void G1CMTask::clear_region_fields() { 2785 // Values for these three fields that indicate that we're not 2786 // holding on to a region. 2787 _curr_region = NULL; 2788 _finger = NULL; 2789 _region_limit = NULL; 2790 } 2791 2792 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2793 if (cm_oop_closure == NULL) { 2794 assert(_cm_oop_closure != NULL, "invariant"); 2795 } else { 2796 assert(_cm_oop_closure == NULL, "invariant"); 2797 } 2798 _cm_oop_closure = cm_oop_closure; 2799 } 2800 2801 void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) { 2802 guarantee(nextMarkBitMap != NULL, "invariant"); 2803 _nextMarkBitMap = nextMarkBitMap; 2804 clear_region_fields(); 2805 2806 _calls = 0; 2807 _elapsed_time_ms = 0.0; 2808 _termination_time_ms = 0.0; 2809 _termination_start_time_ms = 0.0; 2810 } 2811 2812 bool G1CMTask::should_exit_termination() { 2813 regular_clock_call(); 2814 // This is called when we are in the termination protocol. We should 2815 // quit if, for some reason, this task wants to abort or the global 2816 // stack is not empty (this means that we can get work from it). 2817 return !_cm->mark_stack_empty() || has_aborted(); 2818 } 2819 2820 void G1CMTask::reached_limit() { 2821 assert(_words_scanned >= _words_scanned_limit || 2822 _refs_reached >= _refs_reached_limit , 2823 "shouldn't have been called otherwise"); 2824 regular_clock_call(); 2825 } 2826 2827 void G1CMTask::regular_clock_call() { 2828 if (has_aborted()) return; 2829 2830 // First, we need to recalculate the words scanned and refs reached 2831 // limits for the next clock call. 2832 recalculate_limits(); 2833 2834 // During the regular clock call we do the following 2835 2836 // (1) If an overflow has been flagged, then we abort. 2837 if (_cm->has_overflown()) { 2838 set_has_aborted(); 2839 return; 2840 } 2841 2842 // If we are not concurrent (i.e. we're doing remark) we don't need 2843 // to check anything else. The other steps are only needed during 2844 // the concurrent marking phase. 2845 if (!concurrent()) return; 2846 2847 // (2) If marking has been aborted for Full GC, then we also abort. 2848 if (_cm->has_aborted()) { 2849 set_has_aborted(); 2850 return; 2851 } 2852 2853 double curr_time_ms = os::elapsedVTime() * 1000.0; 2854 2855 // (4) We check whether we should yield. If we have to, then we abort. 2856 if (SuspendibleThreadSet::should_yield()) { 2857 // We should yield. To do this we abort the task. The caller is 2858 // responsible for yielding. 2859 set_has_aborted(); 2860 return; 2861 } 2862 2863 // (5) We check whether we've reached our time quota. If we have, 2864 // then we abort. 2865 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2866 if (elapsed_time_ms > _time_target_ms) { 2867 set_has_aborted(); 2868 _has_timed_out = true; 2869 return; 2870 } 2871 2872 // (6) Finally, we check whether there are enough completed STAB 2873 // buffers available for processing. If there are, we abort. 2874 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2875 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2876 // we do need to process SATB buffers, we'll abort and restart 2877 // the marking task to do so 2878 set_has_aborted(); 2879 return; 2880 } 2881 } 2882 2883 void G1CMTask::recalculate_limits() { 2884 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2885 _words_scanned_limit = _real_words_scanned_limit; 2886 2887 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2888 _refs_reached_limit = _real_refs_reached_limit; 2889 } 2890 2891 void G1CMTask::decrease_limits() { 2892 // This is called when we believe that we're going to do an infrequent 2893 // operation which will increase the per byte scanned cost (i.e. move 2894 // entries to/from the global stack). It basically tries to decrease the 2895 // scanning limit so that the clock is called earlier. 2896 2897 _words_scanned_limit = _real_words_scanned_limit - 2898 3 * words_scanned_period / 4; 2899 _refs_reached_limit = _real_refs_reached_limit - 2900 3 * refs_reached_period / 4; 2901 } 2902 2903 void G1CMTask::move_entries_to_global_stack() { 2904 // local array where we'll store the entries that will be popped 2905 // from the local queue 2906 oop buffer[global_stack_transfer_size]; 2907 2908 int n = 0; 2909 oop obj; 2910 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 2911 buffer[n] = obj; 2912 ++n; 2913 } 2914 2915 if (n > 0) { 2916 // we popped at least one entry from the local queue 2917 2918 if (!_cm->mark_stack_push(buffer, n)) { 2919 set_has_aborted(); 2920 } 2921 } 2922 2923 // this operation was quite expensive, so decrease the limits 2924 decrease_limits(); 2925 } 2926 2927 void G1CMTask::get_entries_from_global_stack() { 2928 // local array where we'll store the entries that will be popped 2929 // from the global stack. 2930 oop buffer[global_stack_transfer_size]; 2931 int n; 2932 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 2933 assert(n <= global_stack_transfer_size, 2934 "we should not pop more than the given limit"); 2935 if (n > 0) { 2936 // yes, we did actually pop at least one entry 2937 for (int i = 0; i < n; ++i) { 2938 bool success = _task_queue->push(buffer[i]); 2939 // We only call this when the local queue is empty or under a 2940 // given target limit. So, we do not expect this push to fail. 2941 assert(success, "invariant"); 2942 } 2943 } 2944 2945 // this operation was quite expensive, so decrease the limits 2946 decrease_limits(); 2947 } 2948 2949 void G1CMTask::drain_local_queue(bool partially) { 2950 if (has_aborted()) return; 2951 2952 // Decide what the target size is, depending whether we're going to 2953 // drain it partially (so that other tasks can steal if they run out 2954 // of things to do) or totally (at the very end). 2955 size_t target_size; 2956 if (partially) { 2957 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2958 } else { 2959 target_size = 0; 2960 } 2961 2962 if (_task_queue->size() > target_size) { 2963 oop obj; 2964 bool ret = _task_queue->pop_local(obj); 2965 while (ret) { 2966 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 2967 assert(!_g1h->is_on_master_free_list( 2968 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 2969 2970 scan_object(obj); 2971 2972 if (_task_queue->size() <= target_size || has_aborted()) { 2973 ret = false; 2974 } else { 2975 ret = _task_queue->pop_local(obj); 2976 } 2977 } 2978 } 2979 } 2980 2981 void G1CMTask::drain_global_stack(bool partially) { 2982 if (has_aborted()) return; 2983 2984 // We have a policy to drain the local queue before we attempt to 2985 // drain the global stack. 2986 assert(partially || _task_queue->size() == 0, "invariant"); 2987 2988 // Decide what the target size is, depending whether we're going to 2989 // drain it partially (so that other tasks can steal if they run out 2990 // of things to do) or totally (at the very end). Notice that, 2991 // because we move entries from the global stack in chunks or 2992 // because another task might be doing the same, we might in fact 2993 // drop below the target. But, this is not a problem. 2994 size_t target_size; 2995 if (partially) { 2996 target_size = _cm->partial_mark_stack_size_target(); 2997 } else { 2998 target_size = 0; 2999 } 3000 3001 if (_cm->mark_stack_size() > target_size) { 3002 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3003 get_entries_from_global_stack(); 3004 drain_local_queue(partially); 3005 } 3006 } 3007 } 3008 3009 // SATB Queue has several assumptions on whether to call the par or 3010 // non-par versions of the methods. this is why some of the code is 3011 // replicated. We should really get rid of the single-threaded version 3012 // of the code to simplify things. 3013 void G1CMTask::drain_satb_buffers() { 3014 if (has_aborted()) return; 3015 3016 // We set this so that the regular clock knows that we're in the 3017 // middle of draining buffers and doesn't set the abort flag when it 3018 // notices that SATB buffers are available for draining. It'd be 3019 // very counter productive if it did that. :-) 3020 _draining_satb_buffers = true; 3021 3022 G1CMSATBBufferClosure satb_cl(this, _g1h); 3023 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3024 3025 // This keeps claiming and applying the closure to completed buffers 3026 // until we run out of buffers or we need to abort. 3027 while (!has_aborted() && 3028 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 3029 regular_clock_call(); 3030 } 3031 3032 _draining_satb_buffers = false; 3033 3034 assert(has_aborted() || 3035 concurrent() || 3036 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3037 3038 // again, this was a potentially expensive operation, decrease the 3039 // limits to get the regular clock call early 3040 decrease_limits(); 3041 } 3042 3043 void G1CMTask::print_stats() { 3044 log_debug(gc, stats)("Marking Stats, task = %u, calls = %d", 3045 _worker_id, _calls); 3046 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3047 _elapsed_time_ms, _termination_time_ms); 3048 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3049 _step_times_ms.num(), _step_times_ms.avg(), 3050 _step_times_ms.sd()); 3051 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", 3052 _step_times_ms.maximum(), _step_times_ms.sum()); 3053 } 3054 3055 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 3056 return _task_queues->steal(worker_id, hash_seed, obj); 3057 } 3058 3059 /***************************************************************************** 3060 3061 The do_marking_step(time_target_ms, ...) method is the building 3062 block of the parallel marking framework. It can be called in parallel 3063 with other invocations of do_marking_step() on different tasks 3064 (but only one per task, obviously) and concurrently with the 3065 mutator threads, or during remark, hence it eliminates the need 3066 for two versions of the code. When called during remark, it will 3067 pick up from where the task left off during the concurrent marking 3068 phase. Interestingly, tasks are also claimable during evacuation 3069 pauses too, since do_marking_step() ensures that it aborts before 3070 it needs to yield. 3071 3072 The data structures that it uses to do marking work are the 3073 following: 3074 3075 (1) Marking Bitmap. If there are gray objects that appear only 3076 on the bitmap (this happens either when dealing with an overflow 3077 or when the initial marking phase has simply marked the roots 3078 and didn't push them on the stack), then tasks claim heap 3079 regions whose bitmap they then scan to find gray objects. A 3080 global finger indicates where the end of the last claimed region 3081 is. A local finger indicates how far into the region a task has 3082 scanned. The two fingers are used to determine how to gray an 3083 object (i.e. whether simply marking it is OK, as it will be 3084 visited by a task in the future, or whether it needs to be also 3085 pushed on a stack). 3086 3087 (2) Local Queue. The local queue of the task which is accessed 3088 reasonably efficiently by the task. Other tasks can steal from 3089 it when they run out of work. Throughout the marking phase, a 3090 task attempts to keep its local queue short but not totally 3091 empty, so that entries are available for stealing by other 3092 tasks. Only when there is no more work, a task will totally 3093 drain its local queue. 3094 3095 (3) Global Mark Stack. This handles local queue overflow. During 3096 marking only sets of entries are moved between it and the local 3097 queues, as access to it requires a mutex and more fine-grain 3098 interaction with it which might cause contention. If it 3099 overflows, then the marking phase should restart and iterate 3100 over the bitmap to identify gray objects. Throughout the marking 3101 phase, tasks attempt to keep the global mark stack at a small 3102 length but not totally empty, so that entries are available for 3103 popping by other tasks. Only when there is no more work, tasks 3104 will totally drain the global mark stack. 3105 3106 (4) SATB Buffer Queue. This is where completed SATB buffers are 3107 made available. Buffers are regularly removed from this queue 3108 and scanned for roots, so that the queue doesn't get too 3109 long. During remark, all completed buffers are processed, as 3110 well as the filled in parts of any uncompleted buffers. 3111 3112 The do_marking_step() method tries to abort when the time target 3113 has been reached. There are a few other cases when the 3114 do_marking_step() method also aborts: 3115 3116 (1) When the marking phase has been aborted (after a Full GC). 3117 3118 (2) When a global overflow (on the global stack) has been 3119 triggered. Before the task aborts, it will actually sync up with 3120 the other tasks to ensure that all the marking data structures 3121 (local queues, stacks, fingers etc.) are re-initialized so that 3122 when do_marking_step() completes, the marking phase can 3123 immediately restart. 3124 3125 (3) When enough completed SATB buffers are available. The 3126 do_marking_step() method only tries to drain SATB buffers right 3127 at the beginning. So, if enough buffers are available, the 3128 marking step aborts and the SATB buffers are processed at 3129 the beginning of the next invocation. 3130 3131 (4) To yield. when we have to yield then we abort and yield 3132 right at the end of do_marking_step(). This saves us from a lot 3133 of hassle as, by yielding we might allow a Full GC. If this 3134 happens then objects will be compacted underneath our feet, the 3135 heap might shrink, etc. We save checking for this by just 3136 aborting and doing the yield right at the end. 3137 3138 From the above it follows that the do_marking_step() method should 3139 be called in a loop (or, otherwise, regularly) until it completes. 3140 3141 If a marking step completes without its has_aborted() flag being 3142 true, it means it has completed the current marking phase (and 3143 also all other marking tasks have done so and have all synced up). 3144 3145 A method called regular_clock_call() is invoked "regularly" (in 3146 sub ms intervals) throughout marking. It is this clock method that 3147 checks all the abort conditions which were mentioned above and 3148 decides when the task should abort. A work-based scheme is used to 3149 trigger this clock method: when the number of object words the 3150 marking phase has scanned or the number of references the marking 3151 phase has visited reach a given limit. Additional invocations to 3152 the method clock have been planted in a few other strategic places 3153 too. The initial reason for the clock method was to avoid calling 3154 vtime too regularly, as it is quite expensive. So, once it was in 3155 place, it was natural to piggy-back all the other conditions on it 3156 too and not constantly check them throughout the code. 3157 3158 If do_termination is true then do_marking_step will enter its 3159 termination protocol. 3160 3161 The value of is_serial must be true when do_marking_step is being 3162 called serially (i.e. by the VMThread) and do_marking_step should 3163 skip any synchronization in the termination and overflow code. 3164 Examples include the serial remark code and the serial reference 3165 processing closures. 3166 3167 The value of is_serial must be false when do_marking_step is 3168 being called by any of the worker threads in a work gang. 3169 Examples include the concurrent marking code (CMMarkingTask), 3170 the MT remark code, and the MT reference processing closures. 3171 3172 *****************************************************************************/ 3173 3174 void G1CMTask::do_marking_step(double time_target_ms, 3175 bool do_termination, 3176 bool is_serial) { 3177 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3178 assert(concurrent() == _cm->concurrent(), "they should be the same"); 3179 3180 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3181 assert(_task_queues != NULL, "invariant"); 3182 assert(_task_queue != NULL, "invariant"); 3183 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 3184 3185 assert(!_claimed, 3186 "only one thread should claim this task at any one time"); 3187 3188 // OK, this doesn't safeguard again all possible scenarios, as it is 3189 // possible for two threads to set the _claimed flag at the same 3190 // time. But it is only for debugging purposes anyway and it will 3191 // catch most problems. 3192 _claimed = true; 3193 3194 _start_time_ms = os::elapsedVTime() * 1000.0; 3195 3196 // If do_stealing is true then do_marking_step will attempt to 3197 // steal work from the other G1CMTasks. It only makes sense to 3198 // enable stealing when the termination protocol is enabled 3199 // and do_marking_step() is not being called serially. 3200 bool do_stealing = do_termination && !is_serial; 3201 3202 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 3203 _time_target_ms = time_target_ms - diff_prediction_ms; 3204 3205 // set up the variables that are used in the work-based scheme to 3206 // call the regular clock method 3207 _words_scanned = 0; 3208 _refs_reached = 0; 3209 recalculate_limits(); 3210 3211 // clear all flags 3212 clear_has_aborted(); 3213 _has_timed_out = false; 3214 _draining_satb_buffers = false; 3215 3216 ++_calls; 3217 3218 // Set up the bitmap and oop closures. Anything that uses them is 3219 // eventually called from this method, so it is OK to allocate these 3220 // statically. 3221 G1CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 3222 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 3223 set_cm_oop_closure(&cm_oop_closure); 3224 3225 if (_cm->has_overflown()) { 3226 // This can happen if the mark stack overflows during a GC pause 3227 // and this task, after a yield point, restarts. We have to abort 3228 // as we need to get into the overflow protocol which happens 3229 // right at the end of this task. 3230 set_has_aborted(); 3231 } 3232 3233 // First drain any available SATB buffers. After this, we will not 3234 // look at SATB buffers before the next invocation of this method. 3235 // If enough completed SATB buffers are queued up, the regular clock 3236 // will abort this task so that it restarts. 3237 drain_satb_buffers(); 3238 // ...then partially drain the local queue and the global stack 3239 drain_local_queue(true); 3240 drain_global_stack(true); 3241 3242 do { 3243 if (!has_aborted() && _curr_region != NULL) { 3244 // This means that we're already holding on to a region. 3245 assert(_finger != NULL, "if region is not NULL, then the finger " 3246 "should not be NULL either"); 3247 3248 // We might have restarted this task after an evacuation pause 3249 // which might have evacuated the region we're holding on to 3250 // underneath our feet. Let's read its limit again to make sure 3251 // that we do not iterate over a region of the heap that 3252 // contains garbage (update_region_limit() will also move 3253 // _finger to the start of the region if it is found empty). 3254 update_region_limit(); 3255 // We will start from _finger not from the start of the region, 3256 // as we might be restarting this task after aborting half-way 3257 // through scanning this region. In this case, _finger points to 3258 // the address where we last found a marked object. If this is a 3259 // fresh region, _finger points to start(). 3260 MemRegion mr = MemRegion(_finger, _region_limit); 3261 3262 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 3263 "humongous regions should go around loop once only"); 3264 3265 // Some special cases: 3266 // If the memory region is empty, we can just give up the region. 3267 // If the current region is humongous then we only need to check 3268 // the bitmap for the bit associated with the start of the object, 3269 // scan the object if it's live, and give up the region. 3270 // Otherwise, let's iterate over the bitmap of the part of the region 3271 // that is left. 3272 // If the iteration is successful, give up the region. 3273 if (mr.is_empty()) { 3274 giveup_current_region(); 3275 regular_clock_call(); 3276 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 3277 if (_nextMarkBitMap->isMarked(mr.start())) { 3278 // The object is marked - apply the closure 3279 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 3280 bitmap_closure.do_bit(offset); 3281 } 3282 // Even if this task aborted while scanning the humongous object 3283 // we can (and should) give up the current region. 3284 giveup_current_region(); 3285 regular_clock_call(); 3286 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 3287 giveup_current_region(); 3288 regular_clock_call(); 3289 } else { 3290 assert(has_aborted(), "currently the only way to do so"); 3291 // The only way to abort the bitmap iteration is to return 3292 // false from the do_bit() method. However, inside the 3293 // do_bit() method we move the _finger to point to the 3294 // object currently being looked at. So, if we bail out, we 3295 // have definitely set _finger to something non-null. 3296 assert(_finger != NULL, "invariant"); 3297 3298 // Region iteration was actually aborted. So now _finger 3299 // points to the address of the object we last scanned. If we 3300 // leave it there, when we restart this task, we will rescan 3301 // the object. It is easy to avoid this. We move the finger by 3302 // enough to point to the next possible object header (the 3303 // bitmap knows by how much we need to move it as it knows its 3304 // granularity). 3305 assert(_finger < _region_limit, "invariant"); 3306 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 3307 // Check if bitmap iteration was aborted while scanning the last object 3308 if (new_finger >= _region_limit) { 3309 giveup_current_region(); 3310 } else { 3311 move_finger_to(new_finger); 3312 } 3313 } 3314 } 3315 // At this point we have either completed iterating over the 3316 // region we were holding on to, or we have aborted. 3317 3318 // We then partially drain the local queue and the global stack. 3319 // (Do we really need this?) 3320 drain_local_queue(true); 3321 drain_global_stack(true); 3322 3323 // Read the note on the claim_region() method on why it might 3324 // return NULL with potentially more regions available for 3325 // claiming and why we have to check out_of_regions() to determine 3326 // whether we're done or not. 3327 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 3328 // We are going to try to claim a new region. We should have 3329 // given up on the previous one. 3330 // Separated the asserts so that we know which one fires. 3331 assert(_curr_region == NULL, "invariant"); 3332 assert(_finger == NULL, "invariant"); 3333 assert(_region_limit == NULL, "invariant"); 3334 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 3335 if (claimed_region != NULL) { 3336 // Yes, we managed to claim one 3337 setup_for_region(claimed_region); 3338 assert(_curr_region == claimed_region, "invariant"); 3339 } 3340 // It is important to call the regular clock here. It might take 3341 // a while to claim a region if, for example, we hit a large 3342 // block of empty regions. So we need to call the regular clock 3343 // method once round the loop to make sure it's called 3344 // frequently enough. 3345 regular_clock_call(); 3346 } 3347 3348 if (!has_aborted() && _curr_region == NULL) { 3349 assert(_cm->out_of_regions(), 3350 "at this point we should be out of regions"); 3351 } 3352 } while ( _curr_region != NULL && !has_aborted()); 3353 3354 if (!has_aborted()) { 3355 // We cannot check whether the global stack is empty, since other 3356 // tasks might be pushing objects to it concurrently. 3357 assert(_cm->out_of_regions(), 3358 "at this point we should be out of regions"); 3359 // Try to reduce the number of available SATB buffers so that 3360 // remark has less work to do. 3361 drain_satb_buffers(); 3362 } 3363 3364 // Since we've done everything else, we can now totally drain the 3365 // local queue and global stack. 3366 drain_local_queue(false); 3367 drain_global_stack(false); 3368 3369 // Attempt at work stealing from other task's queues. 3370 if (do_stealing && !has_aborted()) { 3371 // We have not aborted. This means that we have finished all that 3372 // we could. Let's try to do some stealing... 3373 3374 // We cannot check whether the global stack is empty, since other 3375 // tasks might be pushing objects to it concurrently. 3376 assert(_cm->out_of_regions() && _task_queue->size() == 0, 3377 "only way to reach here"); 3378 while (!has_aborted()) { 3379 oop obj; 3380 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 3381 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 3382 "any stolen object should be marked"); 3383 scan_object(obj); 3384 3385 // And since we're towards the end, let's totally drain the 3386 // local queue and global stack. 3387 drain_local_queue(false); 3388 drain_global_stack(false); 3389 } else { 3390 break; 3391 } 3392 } 3393 } 3394 3395 // We still haven't aborted. Now, let's try to get into the 3396 // termination protocol. 3397 if (do_termination && !has_aborted()) { 3398 // We cannot check whether the global stack is empty, since other 3399 // tasks might be concurrently pushing objects on it. 3400 // Separated the asserts so that we know which one fires. 3401 assert(_cm->out_of_regions(), "only way to reach here"); 3402 assert(_task_queue->size() == 0, "only way to reach here"); 3403 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 3404 3405 // The G1CMTask class also extends the TerminatorTerminator class, 3406 // hence its should_exit_termination() method will also decide 3407 // whether to exit the termination protocol or not. 3408 bool finished = (is_serial || 3409 _cm->terminator()->offer_termination(this)); 3410 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 3411 _termination_time_ms += 3412 termination_end_time_ms - _termination_start_time_ms; 3413 3414 if (finished) { 3415 // We're all done. 3416 3417 if (_worker_id == 0) { 3418 // let's allow task 0 to do this 3419 if (concurrent()) { 3420 assert(_cm->concurrent_marking_in_progress(), "invariant"); 3421 // we need to set this to false before the next 3422 // safepoint. This way we ensure that the marking phase 3423 // doesn't observe any more heap expansions. 3424 _cm->clear_concurrent_marking_in_progress(); 3425 } 3426 } 3427 3428 // We can now guarantee that the global stack is empty, since 3429 // all other tasks have finished. We separated the guarantees so 3430 // that, if a condition is false, we can immediately find out 3431 // which one. 3432 guarantee(_cm->out_of_regions(), "only way to reach here"); 3433 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 3434 guarantee(_task_queue->size() == 0, "only way to reach here"); 3435 guarantee(!_cm->has_overflown(), "only way to reach here"); 3436 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 3437 } else { 3438 // Apparently there's more work to do. Let's abort this task. It 3439 // will restart it and we can hopefully find more things to do. 3440 set_has_aborted(); 3441 } 3442 } 3443 3444 // Mainly for debugging purposes to make sure that a pointer to the 3445 // closure which was statically allocated in this frame doesn't 3446 // escape it by accident. 3447 set_cm_oop_closure(NULL); 3448 double end_time_ms = os::elapsedVTime() * 1000.0; 3449 double elapsed_time_ms = end_time_ms - _start_time_ms; 3450 // Update the step history. 3451 _step_times_ms.add(elapsed_time_ms); 3452 3453 if (has_aborted()) { 3454 // The task was aborted for some reason. 3455 if (_has_timed_out) { 3456 double diff_ms = elapsed_time_ms - _time_target_ms; 3457 // Keep statistics of how well we did with respect to hitting 3458 // our target only if we actually timed out (if we aborted for 3459 // other reasons, then the results might get skewed). 3460 _marking_step_diffs_ms.add(diff_ms); 3461 } 3462 3463 if (_cm->has_overflown()) { 3464 // This is the interesting one. We aborted because a global 3465 // overflow was raised. This means we have to restart the 3466 // marking phase and start iterating over regions. However, in 3467 // order to do this we have to make sure that all tasks stop 3468 // what they are doing and re-initialize in a safe manner. We 3469 // will achieve this with the use of two barrier sync points. 3470 3471 if (!is_serial) { 3472 // We only need to enter the sync barrier if being called 3473 // from a parallel context 3474 _cm->enter_first_sync_barrier(_worker_id); 3475 3476 // When we exit this sync barrier we know that all tasks have 3477 // stopped doing marking work. So, it's now safe to 3478 // re-initialize our data structures. At the end of this method, 3479 // task 0 will clear the global data structures. 3480 } 3481 3482 // We clear the local state of this task... 3483 clear_region_fields(); 3484 3485 if (!is_serial) { 3486 // ...and enter the second barrier. 3487 _cm->enter_second_sync_barrier(_worker_id); 3488 } 3489 // At this point, if we're during the concurrent phase of 3490 // marking, everything has been re-initialized and we're 3491 // ready to restart. 3492 } 3493 } 3494 3495 _claimed = false; 3496 } 3497 3498 G1CMTask::G1CMTask(uint worker_id, 3499 G1ConcurrentMark* cm, 3500 size_t* marked_bytes, 3501 BitMap* card_bm, 3502 G1CMTaskQueue* task_queue, 3503 G1CMTaskQueueSet* task_queues) 3504 : _g1h(G1CollectedHeap::heap()), 3505 _worker_id(worker_id), _cm(cm), 3506 _claimed(false), 3507 _nextMarkBitMap(NULL), _hash_seed(17), 3508 _task_queue(task_queue), 3509 _task_queues(task_queues), 3510 _cm_oop_closure(NULL), 3511 _marked_bytes_array(marked_bytes), 3512 _card_bm(card_bm) { 3513 guarantee(task_queue != NULL, "invariant"); 3514 guarantee(task_queues != NULL, "invariant"); 3515 3516 _marking_step_diffs_ms.add(0.5); 3517 } 3518 3519 // These are formatting macros that are used below to ensure 3520 // consistent formatting. The *_H_* versions are used to format the 3521 // header for a particular value and they should be kept consistent 3522 // with the corresponding macro. Also note that most of the macros add 3523 // the necessary white space (as a prefix) which makes them a bit 3524 // easier to compose. 3525 3526 // All the output lines are prefixed with this string to be able to 3527 // identify them easily in a large log file. 3528 #define G1PPRL_LINE_PREFIX "###" 3529 3530 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 3531 #ifdef _LP64 3532 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 3533 #else // _LP64 3534 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 3535 #endif // _LP64 3536 3537 // For per-region info 3538 #define G1PPRL_TYPE_FORMAT " %-4s" 3539 #define G1PPRL_TYPE_H_FORMAT " %4s" 3540 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 3541 #define G1PPRL_BYTE_H_FORMAT " %9s" 3542 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 3543 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 3544 3545 // For summary info 3546 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 3547 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 3548 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 3549 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 3550 3551 G1PrintRegionLivenessInfoClosure:: 3552 G1PrintRegionLivenessInfoClosure(const char* phase_name) 3553 : _total_used_bytes(0), _total_capacity_bytes(0), 3554 _total_prev_live_bytes(0), _total_next_live_bytes(0), 3555 _hum_used_bytes(0), _hum_capacity_bytes(0), 3556 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 3557 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 3558 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 3559 MemRegion g1_reserved = g1h->g1_reserved(); 3560 double now = os::elapsedTime(); 3561 3562 // Print the header of the output. 3563 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 3564 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 3565 G1PPRL_SUM_ADDR_FORMAT("reserved") 3566 G1PPRL_SUM_BYTE_FORMAT("region-size"), 3567 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 3568 HeapRegion::GrainBytes); 3569 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3570 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3571 G1PPRL_TYPE_H_FORMAT 3572 G1PPRL_ADDR_BASE_H_FORMAT 3573 G1PPRL_BYTE_H_FORMAT 3574 G1PPRL_BYTE_H_FORMAT 3575 G1PPRL_BYTE_H_FORMAT 3576 G1PPRL_DOUBLE_H_FORMAT 3577 G1PPRL_BYTE_H_FORMAT 3578 G1PPRL_BYTE_H_FORMAT, 3579 "type", "address-range", 3580 "used", "prev-live", "next-live", "gc-eff", 3581 "remset", "code-roots"); 3582 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3583 G1PPRL_TYPE_H_FORMAT 3584 G1PPRL_ADDR_BASE_H_FORMAT 3585 G1PPRL_BYTE_H_FORMAT 3586 G1PPRL_BYTE_H_FORMAT 3587 G1PPRL_BYTE_H_FORMAT 3588 G1PPRL_DOUBLE_H_FORMAT 3589 G1PPRL_BYTE_H_FORMAT 3590 G1PPRL_BYTE_H_FORMAT, 3591 "", "", 3592 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 3593 "(bytes)", "(bytes)"); 3594 } 3595 3596 // It takes as a parameter a reference to one of the _hum_* fields, it 3597 // deduces the corresponding value for a region in a humongous region 3598 // series (either the region size, or what's left if the _hum_* field 3599 // is < the region size), and updates the _hum_* field accordingly. 3600 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 3601 size_t bytes = 0; 3602 // The > 0 check is to deal with the prev and next live bytes which 3603 // could be 0. 3604 if (*hum_bytes > 0) { 3605 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 3606 *hum_bytes -= bytes; 3607 } 3608 return bytes; 3609 } 3610 3611 // It deduces the values for a region in a humongous region series 3612 // from the _hum_* fields and updates those accordingly. It assumes 3613 // that that _hum_* fields have already been set up from the "starts 3614 // humongous" region and we visit the regions in address order. 3615 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 3616 size_t* capacity_bytes, 3617 size_t* prev_live_bytes, 3618 size_t* next_live_bytes) { 3619 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 3620 *used_bytes = get_hum_bytes(&_hum_used_bytes); 3621 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 3622 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 3623 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 3624 } 3625 3626 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 3627 const char* type = r->get_type_str(); 3628 HeapWord* bottom = r->bottom(); 3629 HeapWord* end = r->end(); 3630 size_t capacity_bytes = r->capacity(); 3631 size_t used_bytes = r->used(); 3632 size_t prev_live_bytes = r->live_bytes(); 3633 size_t next_live_bytes = r->next_live_bytes(); 3634 double gc_eff = r->gc_efficiency(); 3635 size_t remset_bytes = r->rem_set()->mem_size(); 3636 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3637 3638 if (r->is_starts_humongous()) { 3639 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 3640 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 3641 "they should have been zeroed after the last time we used them"); 3642 // Set up the _hum_* fields. 3643 _hum_capacity_bytes = capacity_bytes; 3644 _hum_used_bytes = used_bytes; 3645 _hum_prev_live_bytes = prev_live_bytes; 3646 _hum_next_live_bytes = next_live_bytes; 3647 get_hum_bytes(&used_bytes, &capacity_bytes, 3648 &prev_live_bytes, &next_live_bytes); 3649 end = bottom + HeapRegion::GrainWords; 3650 } else if (r->is_continues_humongous()) { 3651 get_hum_bytes(&used_bytes, &capacity_bytes, 3652 &prev_live_bytes, &next_live_bytes); 3653 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 3654 } 3655 3656 _total_used_bytes += used_bytes; 3657 _total_capacity_bytes += capacity_bytes; 3658 _total_prev_live_bytes += prev_live_bytes; 3659 _total_next_live_bytes += next_live_bytes; 3660 _total_remset_bytes += remset_bytes; 3661 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3662 3663 // Print a line for this particular region. 3664 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3665 G1PPRL_TYPE_FORMAT 3666 G1PPRL_ADDR_BASE_FORMAT 3667 G1PPRL_BYTE_FORMAT 3668 G1PPRL_BYTE_FORMAT 3669 G1PPRL_BYTE_FORMAT 3670 G1PPRL_DOUBLE_FORMAT 3671 G1PPRL_BYTE_FORMAT 3672 G1PPRL_BYTE_FORMAT, 3673 type, p2i(bottom), p2i(end), 3674 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3675 remset_bytes, strong_code_roots_bytes); 3676 3677 return false; 3678 } 3679 3680 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3681 // add static memory usages to remembered set sizes 3682 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3683 // Print the footer of the output. 3684 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3685 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3686 " SUMMARY" 3687 G1PPRL_SUM_MB_FORMAT("capacity") 3688 G1PPRL_SUM_MB_PERC_FORMAT("used") 3689 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3690 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3691 G1PPRL_SUM_MB_FORMAT("remset") 3692 G1PPRL_SUM_MB_FORMAT("code-roots"), 3693 bytes_to_mb(_total_capacity_bytes), 3694 bytes_to_mb(_total_used_bytes), 3695 perc(_total_used_bytes, _total_capacity_bytes), 3696 bytes_to_mb(_total_prev_live_bytes), 3697 perc(_total_prev_live_bytes, _total_capacity_bytes), 3698 bytes_to_mb(_total_next_live_bytes), 3699 perc(_total_next_live_bytes, _total_capacity_bytes), 3700 bytes_to_mb(_total_remset_bytes), 3701 bytes_to_mb(_total_strong_code_roots_bytes)); 3702 }