1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMarkThread.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorPolicy.hpp" 32 #include "gc/g1/g1CollectorState.hpp" 33 #include "gc/g1/g1ConcurrentMark.inline.hpp" 34 #include "gc/g1/g1HeapVerifier.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1StringDedup.hpp" 37 #include "gc/g1/heapRegion.inline.hpp" 38 #include "gc/g1/heapRegionRemSet.hpp" 39 #include "gc/g1/heapRegionSet.inline.hpp" 40 #include "gc/g1/suspendibleThreadSet.hpp" 41 #include "gc/shared/gcId.hpp" 42 #include "gc/shared/gcTimer.hpp" 43 #include "gc/shared/gcTrace.hpp" 44 #include "gc/shared/gcTraceTime.inline.hpp" 45 #include "gc/shared/genOopClosures.inline.hpp" 46 #include "gc/shared/referencePolicy.hpp" 47 #include "gc/shared/strongRootsScope.hpp" 48 #include "gc/shared/taskqueue.inline.hpp" 49 #include "gc/shared/vmGCOperations.hpp" 50 #include "logging/log.hpp" 51 #include "memory/allocation.hpp" 52 #include "memory/resourceArea.hpp" 53 #include "oops/oop.inline.hpp" 54 #include "runtime/atomic.inline.hpp" 55 #include "runtime/handles.inline.hpp" 56 #include "runtime/java.hpp" 57 #include "runtime/prefetch.inline.hpp" 58 #include "services/memTracker.hpp" 59 60 // Concurrent marking bit map wrapper 61 62 G1CMBitMapRO::G1CMBitMapRO(int shifter) : 63 _bm(), 64 _shifter(shifter) { 65 _bmStartWord = 0; 66 _bmWordSize = 0; 67 } 68 69 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 70 const HeapWord* limit) const { 71 // First we must round addr *up* to a possible object boundary. 72 addr = (HeapWord*)align_size_up((intptr_t)addr, 73 HeapWordSize << _shifter); 74 size_t addrOffset = heapWordToOffset(addr); 75 assert(limit != NULL, "limit must not be NULL"); 76 size_t limitOffset = heapWordToOffset(limit); 77 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 78 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 79 assert(nextAddr >= addr, "get_next_one postcondition"); 80 assert(nextAddr == limit || isMarked(nextAddr), 81 "get_next_one postcondition"); 82 return nextAddr; 83 } 84 85 #ifndef PRODUCT 86 bool G1CMBitMapRO::covers(MemRegion heap_rs) const { 87 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 88 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 89 "size inconsistency"); 90 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 91 _bmWordSize == heap_rs.word_size(); 92 } 93 #endif 94 95 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 96 _bm.print_on_error(st, prefix); 97 } 98 99 size_t G1CMBitMap::compute_size(size_t heap_size) { 100 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 101 } 102 103 size_t G1CMBitMap::mark_distance() { 104 return MinObjAlignmentInBytes * BitsPerByte; 105 } 106 107 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 108 _bmStartWord = heap.start(); 109 _bmWordSize = heap.word_size(); 110 111 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 112 _bm.set_size(_bmWordSize >> _shifter); 113 114 storage->set_mapping_changed_listener(&_listener); 115 } 116 117 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 118 if (zero_filled) { 119 return; 120 } 121 // We need to clear the bitmap on commit, removing any existing information. 122 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 123 _bm->clearRange(mr); 124 } 125 126 // Closure used for clearing the given mark bitmap. 127 class ClearBitmapHRClosure : public HeapRegionClosure { 128 private: 129 G1ConcurrentMark* _cm; 130 G1CMBitMap* _bitmap; 131 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 132 public: 133 ClearBitmapHRClosure(G1ConcurrentMark* cm, G1CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 134 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 135 } 136 137 virtual bool doHeapRegion(HeapRegion* r) { 138 size_t const chunk_size_in_words = M / HeapWordSize; 139 140 HeapWord* cur = r->bottom(); 141 HeapWord* const end = r->end(); 142 143 while (cur < end) { 144 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 145 _bitmap->clearRange(mr); 146 147 cur += chunk_size_in_words; 148 149 // Abort iteration if after yielding the marking has been aborted. 150 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 151 return true; 152 } 153 // Repeat the asserts from before the start of the closure. We will do them 154 // as asserts here to minimize their overhead on the product. However, we 155 // will have them as guarantees at the beginning / end of the bitmap 156 // clearing to get some checking in the product. 157 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 158 assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 159 } 160 161 return false; 162 } 163 }; 164 165 class ParClearNextMarkBitmapTask : public AbstractGangTask { 166 ClearBitmapHRClosure* _cl; 167 HeapRegionClaimer _hrclaimer; 168 bool _suspendible; // If the task is suspendible, workers must join the STS. 169 170 public: 171 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 172 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 173 174 void work(uint worker_id) { 175 SuspendibleThreadSetJoiner sts_join(_suspendible); 176 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 177 } 178 }; 179 180 void G1CMBitMap::clearAll() { 181 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 182 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 183 uint n_workers = g1h->workers()->active_workers(); 184 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 185 g1h->workers()->run_task(&task); 186 guarantee(cl.complete(), "Must have completed iteration."); 187 return; 188 } 189 190 void G1CMBitMap::clearRange(MemRegion mr) { 191 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 192 assert(!mr.is_empty(), "unexpected empty region"); 193 // convert address range into offset range 194 _bm.at_put_range(heapWordToOffset(mr.start()), 195 heapWordToOffset(mr.end()), false); 196 } 197 198 G1CMMarkStack::G1CMMarkStack(G1ConcurrentMark* cm) : 199 _base(NULL), _cm(cm) 200 {} 201 202 bool G1CMMarkStack::allocate(size_t capacity) { 203 // allocate a stack of the requisite depth 204 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 205 if (!rs.is_reserved()) { 206 warning("ConcurrentMark MarkStack allocation failure"); 207 return false; 208 } 209 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 210 if (!_virtual_space.initialize(rs, rs.size())) { 211 warning("ConcurrentMark MarkStack backing store failure"); 212 // Release the virtual memory reserved for the marking stack 213 rs.release(); 214 return false; 215 } 216 assert(_virtual_space.committed_size() == rs.size(), 217 "Didn't reserve backing store for all of G1ConcurrentMark stack?"); 218 _base = (oop*) _virtual_space.low(); 219 setEmpty(); 220 _capacity = (jint) capacity; 221 _saved_index = -1; 222 _should_expand = false; 223 return true; 224 } 225 226 void G1CMMarkStack::expand() { 227 // Called, during remark, if we've overflown the marking stack during marking. 228 assert(isEmpty(), "stack should been emptied while handling overflow"); 229 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 230 // Clear expansion flag 231 _should_expand = false; 232 if (_capacity == (jint) MarkStackSizeMax) { 233 log_trace(gc)("(benign) Can't expand marking stack capacity, at max size limit"); 234 return; 235 } 236 // Double capacity if possible 237 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 238 // Do not give up existing stack until we have managed to 239 // get the double capacity that we desired. 240 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 241 sizeof(oop))); 242 if (rs.is_reserved()) { 243 // Release the backing store associated with old stack 244 _virtual_space.release(); 245 // Reinitialize virtual space for new stack 246 if (!_virtual_space.initialize(rs, rs.size())) { 247 fatal("Not enough swap for expanded marking stack capacity"); 248 } 249 _base = (oop*)(_virtual_space.low()); 250 _index = 0; 251 _capacity = new_capacity; 252 } else { 253 // Failed to double capacity, continue; 254 log_trace(gc)("(benign) Failed to expand marking stack capacity from " SIZE_FORMAT "K to " SIZE_FORMAT "K", 255 _capacity / K, new_capacity / K); 256 } 257 } 258 259 void G1CMMarkStack::set_should_expand() { 260 // If we're resetting the marking state because of an 261 // marking stack overflow, record that we should, if 262 // possible, expand the stack. 263 _should_expand = _cm->has_overflown(); 264 } 265 266 G1CMMarkStack::~G1CMMarkStack() { 267 if (_base != NULL) { 268 _base = NULL; 269 _virtual_space.release(); 270 } 271 } 272 273 void G1CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 274 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 275 jint start = _index; 276 jint next_index = start + n; 277 if (next_index > _capacity) { 278 _overflow = true; 279 return; 280 } 281 // Otherwise. 282 _index = next_index; 283 for (int i = 0; i < n; i++) { 284 int ind = start + i; 285 assert(ind < _capacity, "By overflow test above."); 286 _base[ind] = ptr_arr[i]; 287 } 288 } 289 290 bool G1CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 291 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 292 jint index = _index; 293 if (index == 0) { 294 *n = 0; 295 return false; 296 } else { 297 int k = MIN2(max, index); 298 jint new_ind = index - k; 299 for (int j = 0; j < k; j++) { 300 ptr_arr[j] = _base[new_ind + j]; 301 } 302 _index = new_ind; 303 *n = k; 304 return true; 305 } 306 } 307 308 void G1CMMarkStack::note_start_of_gc() { 309 assert(_saved_index == -1, 310 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 311 _saved_index = _index; 312 } 313 314 void G1CMMarkStack::note_end_of_gc() { 315 // This is intentionally a guarantee, instead of an assert. If we 316 // accidentally add something to the mark stack during GC, it 317 // will be a correctness issue so it's better if we crash. we'll 318 // only check this once per GC anyway, so it won't be a performance 319 // issue in any way. 320 guarantee(_saved_index == _index, 321 "saved index: %d index: %d", _saved_index, _index); 322 _saved_index = -1; 323 } 324 325 G1CMRootRegions::G1CMRootRegions() : 326 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 327 _should_abort(false), _next_survivor(NULL) { } 328 329 void G1CMRootRegions::init(G1CollectedHeap* g1h, G1ConcurrentMark* cm) { 330 _young_list = g1h->young_list(); 331 _cm = cm; 332 } 333 334 void G1CMRootRegions::prepare_for_scan() { 335 assert(!scan_in_progress(), "pre-condition"); 336 337 // Currently, only survivors can be root regions. 338 assert(_next_survivor == NULL, "pre-condition"); 339 _next_survivor = _young_list->first_survivor_region(); 340 _scan_in_progress = (_next_survivor != NULL); 341 _should_abort = false; 342 } 343 344 HeapRegion* G1CMRootRegions::claim_next() { 345 if (_should_abort) { 346 // If someone has set the should_abort flag, we return NULL to 347 // force the caller to bail out of their loop. 348 return NULL; 349 } 350 351 // Currently, only survivors can be root regions. 352 HeapRegion* res = _next_survivor; 353 if (res != NULL) { 354 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 355 // Read it again in case it changed while we were waiting for the lock. 356 res = _next_survivor; 357 if (res != NULL) { 358 if (res == _young_list->last_survivor_region()) { 359 // We just claimed the last survivor so store NULL to indicate 360 // that we're done. 361 _next_survivor = NULL; 362 } else { 363 _next_survivor = res->get_next_young_region(); 364 } 365 } else { 366 // Someone else claimed the last survivor while we were trying 367 // to take the lock so nothing else to do. 368 } 369 } 370 assert(res == NULL || res->is_survivor(), "post-condition"); 371 372 return res; 373 } 374 375 void G1CMRootRegions::notify_scan_done() { 376 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 377 _scan_in_progress = false; 378 RootRegionScan_lock->notify_all(); 379 } 380 381 void G1CMRootRegions::cancel_scan() { 382 notify_scan_done(); 383 } 384 385 void G1CMRootRegions::scan_finished() { 386 assert(scan_in_progress(), "pre-condition"); 387 388 // Currently, only survivors can be root regions. 389 if (!_should_abort) { 390 assert(_next_survivor == NULL, "we should have claimed all survivors"); 391 } 392 _next_survivor = NULL; 393 394 notify_scan_done(); 395 } 396 397 bool G1CMRootRegions::wait_until_scan_finished() { 398 if (!scan_in_progress()) return false; 399 400 { 401 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 402 while (scan_in_progress()) { 403 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 404 } 405 } 406 return true; 407 } 408 409 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 410 return MAX2((n_par_threads + 2) / 4, 1U); 411 } 412 413 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 414 _g1h(g1h), 415 _markBitMap1(), 416 _markBitMap2(), 417 _parallel_marking_threads(0), 418 _max_parallel_marking_threads(0), 419 _sleep_factor(0.0), 420 _marking_task_overhead(1.0), 421 _cleanup_list("Cleanup List"), 422 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 423 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 424 CardTableModRefBS::card_shift, 425 false /* in_resource_area*/), 426 427 _prevMarkBitMap(&_markBitMap1), 428 _nextMarkBitMap(&_markBitMap2), 429 430 _markStack(this), 431 // _finger set in set_non_marking_state 432 433 _max_worker_id(ParallelGCThreads), 434 // _active_tasks set in set_non_marking_state 435 // _tasks set inside the constructor 436 _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)), 437 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 438 439 _has_overflown(false), 440 _concurrent(false), 441 _has_aborted(false), 442 _restart_for_overflow(false), 443 _concurrent_marking_in_progress(false), 444 _concurrent_phase_started(false), 445 446 // _verbose_level set below 447 448 _init_times(), 449 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 450 _cleanup_times(), 451 _total_counting_time(0.0), 452 _total_rs_scrub_time(0.0), 453 454 _parallel_workers(NULL), 455 456 _count_card_bitmaps(NULL), 457 _count_marked_bytes(NULL), 458 _completed_initialization(false) { 459 460 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 461 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 462 463 // Create & start a ConcurrentMark thread. 464 _cmThread = new ConcurrentMarkThread(this); 465 assert(cmThread() != NULL, "CM Thread should have been created"); 466 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 467 if (_cmThread->osthread() == NULL) { 468 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 469 } 470 471 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 472 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 473 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 474 475 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 476 satb_qs.set_buffer_size(G1SATBBufferSize); 477 478 _root_regions.init(_g1h, this); 479 480 if (ConcGCThreads > ParallelGCThreads) { 481 warning("Can't have more ConcGCThreads (%u) " 482 "than ParallelGCThreads (%u).", 483 ConcGCThreads, ParallelGCThreads); 484 return; 485 } 486 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 487 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 488 // if both are set 489 _sleep_factor = 0.0; 490 _marking_task_overhead = 1.0; 491 } else if (G1MarkingOverheadPercent > 0) { 492 // We will calculate the number of parallel marking threads based 493 // on a target overhead with respect to the soft real-time goal 494 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 495 double overall_cm_overhead = 496 (double) MaxGCPauseMillis * marking_overhead / 497 (double) GCPauseIntervalMillis; 498 double cpu_ratio = 1.0 / (double) os::processor_count(); 499 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 500 double marking_task_overhead = 501 overall_cm_overhead / marking_thread_num * 502 (double) os::processor_count(); 503 double sleep_factor = 504 (1.0 - marking_task_overhead) / marking_task_overhead; 505 506 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 507 _sleep_factor = sleep_factor; 508 _marking_task_overhead = marking_task_overhead; 509 } else { 510 // Calculate the number of parallel marking threads by scaling 511 // the number of parallel GC threads. 512 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 513 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 514 _sleep_factor = 0.0; 515 _marking_task_overhead = 1.0; 516 } 517 518 assert(ConcGCThreads > 0, "Should have been set"); 519 _parallel_marking_threads = ConcGCThreads; 520 _max_parallel_marking_threads = _parallel_marking_threads; 521 522 _parallel_workers = new WorkGang("G1 Marker", 523 _max_parallel_marking_threads, false, true); 524 if (_parallel_workers == NULL) { 525 vm_exit_during_initialization("Failed necessary allocation."); 526 } else { 527 _parallel_workers->initialize_workers(); 528 } 529 530 if (FLAG_IS_DEFAULT(MarkStackSize)) { 531 size_t mark_stack_size = 532 MIN2(MarkStackSizeMax, 533 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 534 // Verify that the calculated value for MarkStackSize is in range. 535 // It would be nice to use the private utility routine from Arguments. 536 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 537 warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 538 "must be between 1 and " SIZE_FORMAT, 539 mark_stack_size, MarkStackSizeMax); 540 return; 541 } 542 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 543 } else { 544 // Verify MarkStackSize is in range. 545 if (FLAG_IS_CMDLINE(MarkStackSize)) { 546 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 547 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 548 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 549 "must be between 1 and " SIZE_FORMAT, 550 MarkStackSize, MarkStackSizeMax); 551 return; 552 } 553 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 554 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 555 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 556 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 557 MarkStackSize, MarkStackSizeMax); 558 return; 559 } 560 } 561 } 562 } 563 564 if (!_markStack.allocate(MarkStackSize)) { 565 warning("Failed to allocate CM marking stack"); 566 return; 567 } 568 569 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC); 570 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 571 572 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 573 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 574 575 BitMap::idx_t card_bm_size = _card_bm.size(); 576 577 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 578 _active_tasks = _max_worker_id; 579 580 uint max_regions = _g1h->max_regions(); 581 for (uint i = 0; i < _max_worker_id; ++i) { 582 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 583 task_queue->initialize(); 584 _task_queues->register_queue(i, task_queue); 585 586 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 587 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 588 589 _tasks[i] = new G1CMTask(i, this, 590 _count_marked_bytes[i], 591 &_count_card_bitmaps[i], 592 task_queue, _task_queues); 593 594 _accum_task_vtime[i] = 0.0; 595 } 596 597 // Calculate the card number for the bottom of the heap. Used 598 // in biasing indexes into the accounting card bitmaps. 599 _heap_bottom_card_num = 600 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 601 CardTableModRefBS::card_shift); 602 603 // Clear all the liveness counting data 604 clear_all_count_data(); 605 606 // so that the call below can read a sensible value 607 _heap_start = g1h->reserved_region().start(); 608 set_non_marking_state(); 609 _completed_initialization = true; 610 } 611 612 void G1ConcurrentMark::reset() { 613 // Starting values for these two. This should be called in a STW 614 // phase. 615 MemRegion reserved = _g1h->g1_reserved(); 616 _heap_start = reserved.start(); 617 _heap_end = reserved.end(); 618 619 // Separated the asserts so that we know which one fires. 620 assert(_heap_start != NULL, "heap bounds should look ok"); 621 assert(_heap_end != NULL, "heap bounds should look ok"); 622 assert(_heap_start < _heap_end, "heap bounds should look ok"); 623 624 // Reset all the marking data structures and any necessary flags 625 reset_marking_state(); 626 627 // We do reset all of them, since different phases will use 628 // different number of active threads. So, it's easiest to have all 629 // of them ready. 630 for (uint i = 0; i < _max_worker_id; ++i) { 631 _tasks[i]->reset(_nextMarkBitMap); 632 } 633 634 // we need this to make sure that the flag is on during the evac 635 // pause with initial mark piggy-backed 636 set_concurrent_marking_in_progress(); 637 } 638 639 640 void G1ConcurrentMark::reset_marking_state(bool clear_overflow) { 641 _markStack.set_should_expand(); 642 _markStack.setEmpty(); // Also clears the _markStack overflow flag 643 if (clear_overflow) { 644 clear_has_overflown(); 645 } else { 646 assert(has_overflown(), "pre-condition"); 647 } 648 _finger = _heap_start; 649 650 for (uint i = 0; i < _max_worker_id; ++i) { 651 G1CMTaskQueue* queue = _task_queues->queue(i); 652 queue->set_empty(); 653 } 654 } 655 656 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 657 assert(active_tasks <= _max_worker_id, "we should not have more"); 658 659 _active_tasks = active_tasks; 660 // Need to update the three data structures below according to the 661 // number of active threads for this phase. 662 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 663 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 664 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 665 } 666 667 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 668 set_concurrency(active_tasks); 669 670 _concurrent = concurrent; 671 // We propagate this to all tasks, not just the active ones. 672 for (uint i = 0; i < _max_worker_id; ++i) 673 _tasks[i]->set_concurrent(concurrent); 674 675 if (concurrent) { 676 set_concurrent_marking_in_progress(); 677 } else { 678 // We currently assume that the concurrent flag has been set to 679 // false before we start remark. At this point we should also be 680 // in a STW phase. 681 assert(!concurrent_marking_in_progress(), "invariant"); 682 assert(out_of_regions(), 683 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 684 p2i(_finger), p2i(_heap_end)); 685 } 686 } 687 688 void G1ConcurrentMark::set_non_marking_state() { 689 // We set the global marking state to some default values when we're 690 // not doing marking. 691 reset_marking_state(); 692 _active_tasks = 0; 693 clear_concurrent_marking_in_progress(); 694 } 695 696 G1ConcurrentMark::~G1ConcurrentMark() { 697 // The G1ConcurrentMark instance is never freed. 698 ShouldNotReachHere(); 699 } 700 701 void G1ConcurrentMark::clearNextBitmap() { 702 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 703 704 // Make sure that the concurrent mark thread looks to still be in 705 // the current cycle. 706 guarantee(cmThread()->during_cycle(), "invariant"); 707 708 // We are finishing up the current cycle by clearing the next 709 // marking bitmap and getting it ready for the next cycle. During 710 // this time no other cycle can start. So, let's make sure that this 711 // is the case. 712 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 713 714 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 715 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 716 _parallel_workers->run_task(&task); 717 718 // Clear the liveness counting data. If the marking has been aborted, the abort() 719 // call already did that. 720 if (cl.complete()) { 721 clear_all_count_data(); 722 } 723 724 // Repeat the asserts from above. 725 guarantee(cmThread()->during_cycle(), "invariant"); 726 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 727 } 728 729 class CheckBitmapClearHRClosure : public HeapRegionClosure { 730 G1CMBitMap* _bitmap; 731 bool _error; 732 public: 733 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { 734 } 735 736 virtual bool doHeapRegion(HeapRegion* r) { 737 // This closure can be called concurrently to the mutator, so we must make sure 738 // that the result of the getNextMarkedWordAddress() call is compared to the 739 // value passed to it as limit to detect any found bits. 740 // end never changes in G1. 741 HeapWord* end = r->end(); 742 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 743 } 744 }; 745 746 bool G1ConcurrentMark::nextMarkBitmapIsClear() { 747 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 748 _g1h->heap_region_iterate(&cl); 749 return cl.complete(); 750 } 751 752 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 753 public: 754 bool doHeapRegion(HeapRegion* r) { 755 r->note_start_of_marking(); 756 return false; 757 } 758 }; 759 760 void G1ConcurrentMark::checkpointRootsInitialPre() { 761 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 762 G1CollectorPolicy* g1p = g1h->g1_policy(); 763 764 _has_aborted = false; 765 766 // Initialize marking structures. This has to be done in a STW phase. 767 reset(); 768 769 // For each region note start of marking. 770 NoteStartOfMarkHRClosure startcl; 771 g1h->heap_region_iterate(&startcl); 772 } 773 774 775 void G1ConcurrentMark::checkpointRootsInitialPost() { 776 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 777 778 // Start Concurrent Marking weak-reference discovery. 779 ReferenceProcessor* rp = g1h->ref_processor_cm(); 780 // enable ("weak") refs discovery 781 rp->enable_discovery(); 782 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 783 784 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 785 // This is the start of the marking cycle, we're expected all 786 // threads to have SATB queues with active set to false. 787 satb_mq_set.set_active_all_threads(true, /* new active value */ 788 false /* expected_active */); 789 790 _root_regions.prepare_for_scan(); 791 792 // update_g1_committed() will be called at the end of an evac pause 793 // when marking is on. So, it's also called at the end of the 794 // initial-mark pause to update the heap end, if the heap expands 795 // during it. No need to call it here. 796 } 797 798 /* 799 * Notice that in the next two methods, we actually leave the STS 800 * during the barrier sync and join it immediately afterwards. If we 801 * do not do this, the following deadlock can occur: one thread could 802 * be in the barrier sync code, waiting for the other thread to also 803 * sync up, whereas another one could be trying to yield, while also 804 * waiting for the other threads to sync up too. 805 * 806 * Note, however, that this code is also used during remark and in 807 * this case we should not attempt to leave / enter the STS, otherwise 808 * we'll either hit an assert (debug / fastdebug) or deadlock 809 * (product). So we should only leave / enter the STS if we are 810 * operating concurrently. 811 * 812 * Because the thread that does the sync barrier has left the STS, it 813 * is possible to be suspended for a Full GC or an evacuation pause 814 * could occur. This is actually safe, since the entering the sync 815 * barrier is one of the last things do_marking_step() does, and it 816 * doesn't manipulate any data structures afterwards. 817 */ 818 819 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 820 bool barrier_aborted; 821 { 822 SuspendibleThreadSetLeaver sts_leave(concurrent()); 823 barrier_aborted = !_first_overflow_barrier_sync.enter(); 824 } 825 826 // at this point everyone should have synced up and not be doing any 827 // more work 828 829 if (barrier_aborted) { 830 // If the barrier aborted we ignore the overflow condition and 831 // just abort the whole marking phase as quickly as possible. 832 return; 833 } 834 835 // If we're executing the concurrent phase of marking, reset the marking 836 // state; otherwise the marking state is reset after reference processing, 837 // during the remark pause. 838 // If we reset here as a result of an overflow during the remark we will 839 // see assertion failures from any subsequent set_concurrency_and_phase() 840 // calls. 841 if (concurrent()) { 842 // let the task associated with with worker 0 do this 843 if (worker_id == 0) { 844 // task 0 is responsible for clearing the global data structures 845 // We should be here because of an overflow. During STW we should 846 // not clear the overflow flag since we rely on it being true when 847 // we exit this method to abort the pause and restart concurrent 848 // marking. 849 reset_marking_state(true /* clear_overflow */); 850 851 log_info(gc)("Concurrent Mark reset for overflow"); 852 } 853 } 854 855 // after this, each task should reset its own data structures then 856 // then go into the second barrier 857 } 858 859 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 860 SuspendibleThreadSetLeaver sts_leave(concurrent()); 861 _second_overflow_barrier_sync.enter(); 862 863 // at this point everything should be re-initialized and ready to go 864 } 865 866 class G1CMConcurrentMarkingTask: public AbstractGangTask { 867 private: 868 G1ConcurrentMark* _cm; 869 ConcurrentMarkThread* _cmt; 870 871 public: 872 void work(uint worker_id) { 873 assert(Thread::current()->is_ConcurrentGC_thread(), 874 "this should only be done by a conc GC thread"); 875 ResourceMark rm; 876 877 double start_vtime = os::elapsedVTime(); 878 879 { 880 SuspendibleThreadSetJoiner sts_join; 881 882 assert(worker_id < _cm->active_tasks(), "invariant"); 883 G1CMTask* the_task = _cm->task(worker_id); 884 the_task->record_start_time(); 885 if (!_cm->has_aborted()) { 886 do { 887 double start_vtime_sec = os::elapsedVTime(); 888 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 889 890 the_task->do_marking_step(mark_step_duration_ms, 891 true /* do_termination */, 892 false /* is_serial*/); 893 894 double end_vtime_sec = os::elapsedVTime(); 895 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 896 _cm->clear_has_overflown(); 897 898 _cm->do_yield_check(worker_id); 899 900 jlong sleep_time_ms; 901 if (!_cm->has_aborted() && the_task->has_aborted()) { 902 sleep_time_ms = 903 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 904 { 905 SuspendibleThreadSetLeaver sts_leave; 906 os::sleep(Thread::current(), sleep_time_ms, false); 907 } 908 } 909 } while (!_cm->has_aborted() && the_task->has_aborted()); 910 } 911 the_task->record_end_time(); 912 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 913 } 914 915 double end_vtime = os::elapsedVTime(); 916 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 917 } 918 919 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm, 920 ConcurrentMarkThread* cmt) : 921 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 922 923 ~G1CMConcurrentMarkingTask() { } 924 }; 925 926 // Calculates the number of active workers for a concurrent 927 // phase. 928 uint G1ConcurrentMark::calc_parallel_marking_threads() { 929 uint n_conc_workers = 0; 930 if (!UseDynamicNumberOfGCThreads || 931 (!FLAG_IS_DEFAULT(ConcGCThreads) && 932 !ForceDynamicNumberOfGCThreads)) { 933 n_conc_workers = max_parallel_marking_threads(); 934 } else { 935 n_conc_workers = 936 AdaptiveSizePolicy::calc_default_active_workers( 937 max_parallel_marking_threads(), 938 1, /* Minimum workers */ 939 parallel_marking_threads(), 940 Threads::number_of_non_daemon_threads()); 941 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 942 // that scaling has already gone into "_max_parallel_marking_threads". 943 } 944 assert(n_conc_workers > 0, "Always need at least 1"); 945 return n_conc_workers; 946 } 947 948 void G1ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 949 // Currently, only survivors can be root regions. 950 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 951 G1RootRegionScanClosure cl(_g1h, this, worker_id); 952 953 const uintx interval = PrefetchScanIntervalInBytes; 954 HeapWord* curr = hr->bottom(); 955 const HeapWord* end = hr->top(); 956 while (curr < end) { 957 Prefetch::read(curr, interval); 958 oop obj = oop(curr); 959 int size = obj->oop_iterate_size(&cl); 960 assert(size == obj->size(), "sanity"); 961 curr += size; 962 } 963 } 964 965 class G1CMRootRegionScanTask : public AbstractGangTask { 966 private: 967 G1ConcurrentMark* _cm; 968 969 public: 970 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 971 AbstractGangTask("Root Region Scan"), _cm(cm) { } 972 973 void work(uint worker_id) { 974 assert(Thread::current()->is_ConcurrentGC_thread(), 975 "this should only be done by a conc GC thread"); 976 977 G1CMRootRegions* root_regions = _cm->root_regions(); 978 HeapRegion* hr = root_regions->claim_next(); 979 while (hr != NULL) { 980 _cm->scanRootRegion(hr, worker_id); 981 hr = root_regions->claim_next(); 982 } 983 } 984 }; 985 986 void G1ConcurrentMark::scanRootRegions() { 987 // scan_in_progress() will have been set to true only if there was 988 // at least one root region to scan. So, if it's false, we 989 // should not attempt to do any further work. 990 if (root_regions()->scan_in_progress()) { 991 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 992 GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan"); 993 994 _parallel_marking_threads = calc_parallel_marking_threads(); 995 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 996 "Maximum number of marking threads exceeded"); 997 uint active_workers = MAX2(1U, parallel_marking_threads()); 998 999 G1CMRootRegionScanTask task(this); 1000 _parallel_workers->set_active_workers(active_workers); 1001 _parallel_workers->run_task(&task); 1002 1003 // It's possible that has_aborted() is true here without actually 1004 // aborting the survivor scan earlier. This is OK as it's 1005 // mainly used for sanity checking. 1006 root_regions()->scan_finished(); 1007 } 1008 } 1009 1010 void G1ConcurrentMark::register_concurrent_phase_start(const char* title) { 1011 assert(!_concurrent_phase_started, "Sanity"); 1012 _concurrent_phase_started = true; 1013 _g1h->gc_timer_cm()->register_gc_concurrent_start(title); 1014 } 1015 1016 void G1ConcurrentMark::register_concurrent_phase_end() { 1017 if (_concurrent_phase_started) { 1018 _concurrent_phase_started = false; 1019 _g1h->gc_timer_cm()->register_gc_concurrent_end(); 1020 } 1021 } 1022 1023 void G1ConcurrentMark::markFromRoots() { 1024 // we might be tempted to assert that: 1025 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1026 // "inconsistent argument?"); 1027 // However that wouldn't be right, because it's possible that 1028 // a safepoint is indeed in progress as a younger generation 1029 // stop-the-world GC happens even as we mark in this generation. 1030 1031 _restart_for_overflow = false; 1032 1033 // _g1h has _n_par_threads 1034 _parallel_marking_threads = calc_parallel_marking_threads(); 1035 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1036 "Maximum number of marking threads exceeded"); 1037 1038 uint active_workers = MAX2(1U, parallel_marking_threads()); 1039 assert(active_workers > 0, "Should have been set"); 1040 1041 // Parallel task terminator is set in "set_concurrency_and_phase()" 1042 set_concurrency_and_phase(active_workers, true /* concurrent */); 1043 1044 G1CMConcurrentMarkingTask markingTask(this, cmThread()); 1045 _parallel_workers->set_active_workers(active_workers); 1046 _parallel_workers->run_task(&markingTask); 1047 print_stats(); 1048 } 1049 1050 void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1051 // world is stopped at this checkpoint 1052 assert(SafepointSynchronize::is_at_safepoint(), 1053 "world should be stopped"); 1054 1055 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1056 1057 // If a full collection has happened, we shouldn't do this. 1058 if (has_aborted()) { 1059 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1060 return; 1061 } 1062 1063 if (VerifyDuringGC) { 1064 HandleMark hm; // handle scope 1065 g1h->prepare_for_verify(); 1066 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1067 } 1068 g1h->verifier()->check_bitmaps("Remark Start"); 1069 1070 G1CollectorPolicy* g1p = g1h->g1_policy(); 1071 g1p->record_concurrent_mark_remark_start(); 1072 1073 double start = os::elapsedTime(); 1074 1075 checkpointRootsFinalWork(); 1076 1077 double mark_work_end = os::elapsedTime(); 1078 1079 weakRefsWork(clear_all_soft_refs); 1080 1081 if (has_overflown()) { 1082 // Oops. We overflowed. Restart concurrent marking. 1083 _restart_for_overflow = true; 1084 log_develop_trace(gc)("Remark led to restart for overflow."); 1085 1086 // Verify the heap w.r.t. the previous marking bitmap. 1087 if (VerifyDuringGC) { 1088 HandleMark hm; // handle scope 1089 g1h->prepare_for_verify(); 1090 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)"); 1091 } 1092 1093 // Clear the marking state because we will be restarting 1094 // marking due to overflowing the global mark stack. 1095 reset_marking_state(); 1096 } else { 1097 { 1098 GCTraceTime(Debug, gc) trace("Aggregate Data", g1h->gc_timer_cm()); 1099 1100 // Aggregate the per-task counting data that we have accumulated 1101 // while marking. 1102 aggregate_count_data(); 1103 } 1104 1105 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1106 // We're done with marking. 1107 // This is the end of the marking cycle, we're expected all 1108 // threads to have SATB queues with active set to true. 1109 satb_mq_set.set_active_all_threads(false, /* new active value */ 1110 true /* expected_active */); 1111 1112 if (VerifyDuringGC) { 1113 HandleMark hm; // handle scope 1114 g1h->prepare_for_verify(); 1115 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)"); 1116 } 1117 g1h->verifier()->check_bitmaps("Remark End"); 1118 assert(!restart_for_overflow(), "sanity"); 1119 // Completely reset the marking state since marking completed 1120 set_non_marking_state(); 1121 } 1122 1123 // Expand the marking stack, if we have to and if we can. 1124 if (_markStack.should_expand()) { 1125 _markStack.expand(); 1126 } 1127 1128 // Statistics 1129 double now = os::elapsedTime(); 1130 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1131 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1132 _remark_times.add((now - start) * 1000.0); 1133 1134 g1p->record_concurrent_mark_remark_end(); 1135 1136 G1CMIsAliveClosure is_alive(g1h); 1137 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1138 } 1139 1140 // Base class of the closures that finalize and verify the 1141 // liveness counting data. 1142 class G1CMCountDataClosureBase: public HeapRegionClosure { 1143 protected: 1144 G1CollectedHeap* _g1h; 1145 G1ConcurrentMark* _cm; 1146 CardTableModRefBS* _ct_bs; 1147 1148 BitMap* _region_bm; 1149 BitMap* _card_bm; 1150 1151 // Takes a region that's not empty (i.e., it has at least one 1152 // live object in it and sets its corresponding bit on the region 1153 // bitmap to 1. 1154 void set_bit_for_region(HeapRegion* hr) { 1155 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1156 _region_bm->par_at_put(index, true); 1157 } 1158 1159 public: 1160 G1CMCountDataClosureBase(G1CollectedHeap* g1h, 1161 BitMap* region_bm, BitMap* card_bm): 1162 _g1h(g1h), _cm(g1h->concurrent_mark()), 1163 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 1164 _region_bm(region_bm), _card_bm(card_bm) { } 1165 }; 1166 1167 // Closure that calculates the # live objects per region. Used 1168 // for verification purposes during the cleanup pause. 1169 class CalcLiveObjectsClosure: public G1CMCountDataClosureBase { 1170 G1CMBitMapRO* _bm; 1171 size_t _region_marked_bytes; 1172 1173 public: 1174 CalcLiveObjectsClosure(G1CMBitMapRO *bm, G1CollectedHeap* g1h, 1175 BitMap* region_bm, BitMap* card_bm) : 1176 G1CMCountDataClosureBase(g1h, region_bm, card_bm), 1177 _bm(bm), _region_marked_bytes(0) { } 1178 1179 bool doHeapRegion(HeapRegion* hr) { 1180 HeapWord* ntams = hr->next_top_at_mark_start(); 1181 HeapWord* start = hr->bottom(); 1182 1183 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1184 "Preconditions not met - " 1185 "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT, 1186 p2i(start), p2i(ntams), p2i(hr->end())); 1187 1188 // Find the first marked object at or after "start". 1189 start = _bm->getNextMarkedWordAddress(start, ntams); 1190 1191 size_t marked_bytes = 0; 1192 1193 while (start < ntams) { 1194 oop obj = oop(start); 1195 int obj_sz = obj->size(); 1196 HeapWord* obj_end = start + obj_sz; 1197 1198 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1199 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1200 1201 // Note: if we're looking at the last region in heap - obj_end 1202 // could be actually just beyond the end of the heap; end_idx 1203 // will then correspond to a (non-existent) card that is also 1204 // just beyond the heap. 1205 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1206 // end of object is not card aligned - increment to cover 1207 // all the cards spanned by the object 1208 end_idx += 1; 1209 } 1210 1211 // Set the bits in the card BM for the cards spanned by this object. 1212 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1213 1214 // Add the size of this object to the number of marked bytes. 1215 marked_bytes += (size_t)obj_sz * HeapWordSize; 1216 1217 // This will happen if we are handling a humongous object that spans 1218 // several heap regions. 1219 if (obj_end > hr->end()) { 1220 break; 1221 } 1222 // Find the next marked object after this one. 1223 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1224 } 1225 1226 // Mark the allocated-since-marking portion... 1227 HeapWord* top = hr->top(); 1228 if (ntams < top) { 1229 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1230 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1231 1232 // Note: if we're looking at the last region in heap - top 1233 // could be actually just beyond the end of the heap; end_idx 1234 // will then correspond to a (non-existent) card that is also 1235 // just beyond the heap. 1236 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1237 // end of object is not card aligned - increment to cover 1238 // all the cards spanned by the object 1239 end_idx += 1; 1240 } 1241 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1242 1243 // This definitely means the region has live objects. 1244 set_bit_for_region(hr); 1245 } 1246 1247 // Update the live region bitmap. 1248 if (marked_bytes > 0) { 1249 set_bit_for_region(hr); 1250 } 1251 1252 // Set the marked bytes for the current region so that 1253 // it can be queried by a calling verification routine 1254 _region_marked_bytes = marked_bytes; 1255 1256 return false; 1257 } 1258 1259 size_t region_marked_bytes() const { return _region_marked_bytes; } 1260 }; 1261 1262 // Heap region closure used for verifying the counting data 1263 // that was accumulated concurrently and aggregated during 1264 // the remark pause. This closure is applied to the heap 1265 // regions during the STW cleanup pause. 1266 1267 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1268 G1CollectedHeap* _g1h; 1269 G1ConcurrentMark* _cm; 1270 CalcLiveObjectsClosure _calc_cl; 1271 BitMap* _region_bm; // Region BM to be verified 1272 BitMap* _card_bm; // Card BM to be verified 1273 1274 BitMap* _exp_region_bm; // Expected Region BM values 1275 BitMap* _exp_card_bm; // Expected card BM values 1276 1277 int _failures; 1278 1279 public: 1280 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1281 BitMap* region_bm, 1282 BitMap* card_bm, 1283 BitMap* exp_region_bm, 1284 BitMap* exp_card_bm) : 1285 _g1h(g1h), _cm(g1h->concurrent_mark()), 1286 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1287 _region_bm(region_bm), _card_bm(card_bm), 1288 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1289 _failures(0) { } 1290 1291 int failures() const { return _failures; } 1292 1293 bool doHeapRegion(HeapRegion* hr) { 1294 int failures = 0; 1295 1296 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1297 // this region and set the corresponding bits in the expected region 1298 // and card bitmaps. 1299 bool res = _calc_cl.doHeapRegion(hr); 1300 assert(res == false, "should be continuing"); 1301 1302 // Verify the marked bytes for this region. 1303 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1304 size_t act_marked_bytes = hr->next_marked_bytes(); 1305 1306 if (exp_marked_bytes > act_marked_bytes) { 1307 if (hr->is_starts_humongous()) { 1308 // For start_humongous regions, the size of the whole object will be 1309 // in exp_marked_bytes. 1310 HeapRegion* region = hr; 1311 int num_regions; 1312 for (num_regions = 0; region != NULL; num_regions++) { 1313 region = _g1h->next_region_in_humongous(region); 1314 } 1315 if ((num_regions-1) * HeapRegion::GrainBytes >= exp_marked_bytes) { 1316 failures += 1; 1317 } else if (num_regions * HeapRegion::GrainBytes < exp_marked_bytes) { 1318 failures += 1; 1319 } 1320 } else { 1321 // We're not OK if expected marked bytes > actual marked bytes. It means 1322 // we have missed accounting some objects during the actual marking. 1323 failures += 1; 1324 } 1325 } 1326 1327 // Verify the bit, for this region, in the actual and expected 1328 // (which was just calculated) region bit maps. 1329 // We're not OK if the bit in the calculated expected region 1330 // bitmap is set and the bit in the actual region bitmap is not. 1331 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1332 1333 bool expected = _exp_region_bm->at(index); 1334 bool actual = _region_bm->at(index); 1335 if (expected && !actual) { 1336 failures += 1; 1337 } 1338 1339 // Verify that the card bit maps for the cards spanned by the current 1340 // region match. We have an error if we have a set bit in the expected 1341 // bit map and the corresponding bit in the actual bitmap is not set. 1342 1343 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1344 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1345 1346 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1347 expected = _exp_card_bm->at(i); 1348 actual = _card_bm->at(i); 1349 1350 if (expected && !actual) { 1351 failures += 1; 1352 } 1353 } 1354 1355 _failures += failures; 1356 1357 // We could stop iteration over the heap when we 1358 // find the first violating region by returning true. 1359 return false; 1360 } 1361 }; 1362 1363 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1364 protected: 1365 G1CollectedHeap* _g1h; 1366 G1ConcurrentMark* _cm; 1367 BitMap* _actual_region_bm; 1368 BitMap* _actual_card_bm; 1369 1370 uint _n_workers; 1371 1372 BitMap* _expected_region_bm; 1373 BitMap* _expected_card_bm; 1374 1375 int _failures; 1376 1377 HeapRegionClaimer _hrclaimer; 1378 1379 public: 1380 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1381 BitMap* region_bm, BitMap* card_bm, 1382 BitMap* expected_region_bm, BitMap* expected_card_bm) 1383 : AbstractGangTask("G1 verify final counting"), 1384 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1385 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1386 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1387 _failures(0), 1388 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1389 assert(VerifyDuringGC, "don't call this otherwise"); 1390 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1391 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1392 } 1393 1394 void work(uint worker_id) { 1395 assert(worker_id < _n_workers, "invariant"); 1396 1397 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1398 _actual_region_bm, _actual_card_bm, 1399 _expected_region_bm, 1400 _expected_card_bm); 1401 1402 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1403 1404 Atomic::add(verify_cl.failures(), &_failures); 1405 } 1406 1407 int failures() const { return _failures; } 1408 }; 1409 1410 // Closure that finalizes the liveness counting data. 1411 // Used during the cleanup pause. 1412 // Sets the bits corresponding to the interval [NTAMS, top] 1413 // (which contains the implicitly live objects) in the 1414 // card liveness bitmap. Also sets the bit for each region, 1415 // containing live data, in the region liveness bitmap. 1416 1417 class FinalCountDataUpdateClosure: public G1CMCountDataClosureBase { 1418 public: 1419 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1420 BitMap* region_bm, 1421 BitMap* card_bm) : 1422 G1CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1423 1424 bool doHeapRegion(HeapRegion* hr) { 1425 HeapWord* ntams = hr->next_top_at_mark_start(); 1426 HeapWord* top = hr->top(); 1427 1428 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1429 1430 // Mark the allocated-since-marking portion... 1431 if (ntams < top) { 1432 // This definitely means the region has live objects. 1433 set_bit_for_region(hr); 1434 1435 // Now set the bits in the card bitmap for [ntams, top) 1436 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1437 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1438 1439 // Note: if we're looking at the last region in heap - top 1440 // could be actually just beyond the end of the heap; end_idx 1441 // will then correspond to a (non-existent) card that is also 1442 // just beyond the heap. 1443 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1444 // end of object is not card aligned - increment to cover 1445 // all the cards spanned by the object 1446 end_idx += 1; 1447 } 1448 1449 assert(end_idx <= _card_bm->size(), 1450 "oob: end_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT, 1451 end_idx, _card_bm->size()); 1452 assert(start_idx < _card_bm->size(), 1453 "oob: start_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT, 1454 start_idx, _card_bm->size()); 1455 1456 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1457 } 1458 1459 // Set the bit for the region if it contains live data 1460 if (hr->next_marked_bytes() > 0) { 1461 set_bit_for_region(hr); 1462 } 1463 1464 return false; 1465 } 1466 }; 1467 1468 class G1ParFinalCountTask: public AbstractGangTask { 1469 protected: 1470 G1CollectedHeap* _g1h; 1471 G1ConcurrentMark* _cm; 1472 BitMap* _actual_region_bm; 1473 BitMap* _actual_card_bm; 1474 1475 uint _n_workers; 1476 HeapRegionClaimer _hrclaimer; 1477 1478 public: 1479 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1480 : AbstractGangTask("G1 final counting"), 1481 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1482 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1483 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1484 } 1485 1486 void work(uint worker_id) { 1487 assert(worker_id < _n_workers, "invariant"); 1488 1489 FinalCountDataUpdateClosure final_update_cl(_g1h, 1490 _actual_region_bm, 1491 _actual_card_bm); 1492 1493 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1494 } 1495 }; 1496 1497 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1498 G1CollectedHeap* _g1; 1499 size_t _freed_bytes; 1500 FreeRegionList* _local_cleanup_list; 1501 uint _old_regions_removed; 1502 uint _humongous_regions_removed; 1503 HRRSCleanupTask* _hrrs_cleanup_task; 1504 1505 public: 1506 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1507 FreeRegionList* local_cleanup_list, 1508 HRRSCleanupTask* hrrs_cleanup_task) : 1509 _g1(g1), 1510 _freed_bytes(0), 1511 _local_cleanup_list(local_cleanup_list), 1512 _old_regions_removed(0), 1513 _humongous_regions_removed(0), 1514 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1515 1516 size_t freed_bytes() { return _freed_bytes; } 1517 const uint old_regions_removed() { return _old_regions_removed; } 1518 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1519 1520 bool doHeapRegion(HeapRegion *hr) { 1521 if (hr->is_archive()) { 1522 return false; 1523 } 1524 // We use a claim value of zero here because all regions 1525 // were claimed with value 1 in the FinalCount task. 1526 _g1->reset_gc_time_stamps(hr); 1527 hr->note_end_of_marking(); 1528 1529 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1530 _freed_bytes += hr->used(); 1531 hr->set_containing_set(NULL); 1532 if (hr->is_humongous()) { 1533 _humongous_regions_removed++; 1534 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1535 } else { 1536 _old_regions_removed++; 1537 _g1->free_region(hr, _local_cleanup_list, true); 1538 } 1539 } else { 1540 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1541 } 1542 1543 return false; 1544 } 1545 }; 1546 1547 class G1ParNoteEndTask: public AbstractGangTask { 1548 friend class G1NoteEndOfConcMarkClosure; 1549 1550 protected: 1551 G1CollectedHeap* _g1h; 1552 FreeRegionList* _cleanup_list; 1553 HeapRegionClaimer _hrclaimer; 1554 1555 public: 1556 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1557 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1558 } 1559 1560 void work(uint worker_id) { 1561 FreeRegionList local_cleanup_list("Local Cleanup List"); 1562 HRRSCleanupTask hrrs_cleanup_task; 1563 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1564 &hrrs_cleanup_task); 1565 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1566 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1567 1568 // Now update the lists 1569 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1570 { 1571 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1572 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1573 1574 // If we iterate over the global cleanup list at the end of 1575 // cleanup to do this printing we will not guarantee to only 1576 // generate output for the newly-reclaimed regions (the list 1577 // might not be empty at the beginning of cleanup; we might 1578 // still be working on its previous contents). So we do the 1579 // printing here, before we append the new regions to the global 1580 // cleanup list. 1581 1582 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1583 if (hr_printer->is_active()) { 1584 FreeRegionListIterator iter(&local_cleanup_list); 1585 while (iter.more_available()) { 1586 HeapRegion* hr = iter.get_next(); 1587 hr_printer->cleanup(hr); 1588 } 1589 } 1590 1591 _cleanup_list->add_ordered(&local_cleanup_list); 1592 assert(local_cleanup_list.is_empty(), "post-condition"); 1593 1594 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1595 } 1596 } 1597 }; 1598 1599 void G1ConcurrentMark::cleanup() { 1600 // world is stopped at this checkpoint 1601 assert(SafepointSynchronize::is_at_safepoint(), 1602 "world should be stopped"); 1603 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1604 1605 // If a full collection has happened, we shouldn't do this. 1606 if (has_aborted()) { 1607 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1608 return; 1609 } 1610 1611 g1h->verifier()->verify_region_sets_optional(); 1612 1613 if (VerifyDuringGC) { 1614 HandleMark hm; // handle scope 1615 g1h->prepare_for_verify(); 1616 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1617 } 1618 g1h->verifier()->check_bitmaps("Cleanup Start"); 1619 1620 G1CollectorPolicy* g1p = g1h->g1_policy(); 1621 g1p->record_concurrent_mark_cleanup_start(); 1622 1623 double start = os::elapsedTime(); 1624 1625 HeapRegionRemSet::reset_for_cleanup_tasks(); 1626 1627 // Do counting once more with the world stopped for good measure. 1628 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 1629 1630 g1h->workers()->run_task(&g1_par_count_task); 1631 1632 if (VerifyDuringGC) { 1633 // Verify that the counting data accumulated during marking matches 1634 // that calculated by walking the marking bitmap. 1635 1636 // Bitmaps to hold expected values 1637 BitMap expected_region_bm(_region_bm.size(), true); 1638 BitMap expected_card_bm(_card_bm.size(), true); 1639 1640 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 1641 &_region_bm, 1642 &_card_bm, 1643 &expected_region_bm, 1644 &expected_card_bm); 1645 1646 g1h->workers()->run_task(&g1_par_verify_task); 1647 1648 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 1649 } 1650 1651 size_t start_used_bytes = g1h->used(); 1652 g1h->collector_state()->set_mark_in_progress(false); 1653 1654 double count_end = os::elapsedTime(); 1655 double this_final_counting_time = (count_end - start); 1656 _total_counting_time += this_final_counting_time; 1657 1658 if (log_is_enabled(Trace, gc, liveness)) { 1659 G1PrintRegionLivenessInfoClosure cl("Post-Marking"); 1660 _g1h->heap_region_iterate(&cl); 1661 } 1662 1663 // Install newly created mark bitMap as "prev". 1664 swapMarkBitMaps(); 1665 1666 g1h->reset_gc_time_stamp(); 1667 1668 uint n_workers = _g1h->workers()->active_workers(); 1669 1670 // Note end of marking in all heap regions. 1671 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1672 g1h->workers()->run_task(&g1_par_note_end_task); 1673 g1h->check_gc_time_stamps(); 1674 1675 if (!cleanup_list_is_empty()) { 1676 // The cleanup list is not empty, so we'll have to process it 1677 // concurrently. Notify anyone else that might be wanting free 1678 // regions that there will be more free regions coming soon. 1679 g1h->set_free_regions_coming(); 1680 } 1681 1682 // call below, since it affects the metric by which we sort the heap 1683 // regions. 1684 if (G1ScrubRemSets) { 1685 double rs_scrub_start = os::elapsedTime(); 1686 g1h->scrub_rem_set(&_region_bm, &_card_bm); 1687 _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start); 1688 } 1689 1690 // this will also free any regions totally full of garbage objects, 1691 // and sort the regions. 1692 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1693 1694 // Statistics. 1695 double end = os::elapsedTime(); 1696 _cleanup_times.add((end - start) * 1000.0); 1697 1698 // Clean up will have freed any regions completely full of garbage. 1699 // Update the soft reference policy with the new heap occupancy. 1700 Universe::update_heap_info_at_gc(); 1701 1702 if (VerifyDuringGC) { 1703 HandleMark hm; // handle scope 1704 g1h->prepare_for_verify(); 1705 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)"); 1706 } 1707 1708 g1h->verifier()->check_bitmaps("Cleanup End"); 1709 1710 g1h->verifier()->verify_region_sets_optional(); 1711 1712 // We need to make this be a "collection" so any collection pause that 1713 // races with it goes around and waits for completeCleanup to finish. 1714 g1h->increment_total_collections(); 1715 1716 // Clean out dead classes and update Metaspace sizes. 1717 if (ClassUnloadingWithConcurrentMark) { 1718 ClassLoaderDataGraph::purge(); 1719 } 1720 MetaspaceGC::compute_new_size(); 1721 1722 // We reclaimed old regions so we should calculate the sizes to make 1723 // sure we update the old gen/space data. 1724 g1h->g1mm()->update_sizes(); 1725 g1h->allocation_context_stats().update_after_mark(); 1726 1727 g1h->trace_heap_after_concurrent_cycle(); 1728 } 1729 1730 void G1ConcurrentMark::completeCleanup() { 1731 if (has_aborted()) return; 1732 1733 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1734 1735 _cleanup_list.verify_optional(); 1736 FreeRegionList tmp_free_list("Tmp Free List"); 1737 1738 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1739 "cleanup list has %u entries", 1740 _cleanup_list.length()); 1741 1742 // No one else should be accessing the _cleanup_list at this point, 1743 // so it is not necessary to take any locks 1744 while (!_cleanup_list.is_empty()) { 1745 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 1746 assert(hr != NULL, "Got NULL from a non-empty list"); 1747 hr->par_clear(); 1748 tmp_free_list.add_ordered(hr); 1749 1750 // Instead of adding one region at a time to the secondary_free_list, 1751 // we accumulate them in the local list and move them a few at a 1752 // time. This also cuts down on the number of notify_all() calls 1753 // we do during this process. We'll also append the local list when 1754 // _cleanup_list is empty (which means we just removed the last 1755 // region from the _cleanup_list). 1756 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1757 _cleanup_list.is_empty()) { 1758 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1759 "appending %u entries to the secondary_free_list, " 1760 "cleanup list still has %u entries", 1761 tmp_free_list.length(), 1762 _cleanup_list.length()); 1763 1764 { 1765 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1766 g1h->secondary_free_list_add(&tmp_free_list); 1767 SecondaryFreeList_lock->notify_all(); 1768 } 1769 #ifndef PRODUCT 1770 if (G1StressConcRegionFreeing) { 1771 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1772 os::sleep(Thread::current(), (jlong) 1, false); 1773 } 1774 } 1775 #endif 1776 } 1777 } 1778 assert(tmp_free_list.is_empty(), "post-condition"); 1779 } 1780 1781 // Supporting Object and Oop closures for reference discovery 1782 // and processing in during marking 1783 1784 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1785 HeapWord* addr = (HeapWord*)obj; 1786 return addr != NULL && 1787 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1788 } 1789 1790 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1791 // Uses the G1CMTask associated with a worker thread (for serial reference 1792 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1793 // trace referent objects. 1794 // 1795 // Using the G1CMTask and embedded local queues avoids having the worker 1796 // threads operating on the global mark stack. This reduces the risk 1797 // of overflowing the stack - which we would rather avoid at this late 1798 // state. Also using the tasks' local queues removes the potential 1799 // of the workers interfering with each other that could occur if 1800 // operating on the global stack. 1801 1802 class G1CMKeepAliveAndDrainClosure: public OopClosure { 1803 G1ConcurrentMark* _cm; 1804 G1CMTask* _task; 1805 int _ref_counter_limit; 1806 int _ref_counter; 1807 bool _is_serial; 1808 public: 1809 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1810 _cm(cm), _task(task), _is_serial(is_serial), 1811 _ref_counter_limit(G1RefProcDrainInterval) { 1812 assert(_ref_counter_limit > 0, "sanity"); 1813 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1814 _ref_counter = _ref_counter_limit; 1815 } 1816 1817 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1818 virtual void do_oop( oop* p) { do_oop_work(p); } 1819 1820 template <class T> void do_oop_work(T* p) { 1821 if (!_cm->has_overflown()) { 1822 oop obj = oopDesc::load_decode_heap_oop(p); 1823 _task->deal_with_reference(obj); 1824 _ref_counter--; 1825 1826 if (_ref_counter == 0) { 1827 // We have dealt with _ref_counter_limit references, pushing them 1828 // and objects reachable from them on to the local stack (and 1829 // possibly the global stack). Call G1CMTask::do_marking_step() to 1830 // process these entries. 1831 // 1832 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1833 // there's nothing more to do (i.e. we're done with the entries that 1834 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1835 // above) or we overflow. 1836 // 1837 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1838 // flag while there may still be some work to do. (See the comment at 1839 // the beginning of G1CMTask::do_marking_step() for those conditions - 1840 // one of which is reaching the specified time target.) It is only 1841 // when G1CMTask::do_marking_step() returns without setting the 1842 // has_aborted() flag that the marking step has completed. 1843 do { 1844 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1845 _task->do_marking_step(mark_step_duration_ms, 1846 false /* do_termination */, 1847 _is_serial); 1848 } while (_task->has_aborted() && !_cm->has_overflown()); 1849 _ref_counter = _ref_counter_limit; 1850 } 1851 } 1852 } 1853 }; 1854 1855 // 'Drain' oop closure used by both serial and parallel reference processing. 1856 // Uses the G1CMTask associated with a given worker thread (for serial 1857 // reference processing the G1CMtask for worker 0 is used). Calls the 1858 // do_marking_step routine, with an unbelievably large timeout value, 1859 // to drain the marking data structures of the remaining entries 1860 // added by the 'keep alive' oop closure above. 1861 1862 class G1CMDrainMarkingStackClosure: public VoidClosure { 1863 G1ConcurrentMark* _cm; 1864 G1CMTask* _task; 1865 bool _is_serial; 1866 public: 1867 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1868 _cm(cm), _task(task), _is_serial(is_serial) { 1869 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1870 } 1871 1872 void do_void() { 1873 do { 1874 // We call G1CMTask::do_marking_step() to completely drain the local 1875 // and global marking stacks of entries pushed by the 'keep alive' 1876 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1877 // 1878 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1879 // if there's nothing more to do (i.e. we've completely drained the 1880 // entries that were pushed as a a result of applying the 'keep alive' 1881 // closure to the entries on the discovered ref lists) or we overflow 1882 // the global marking stack. 1883 // 1884 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1885 // flag while there may still be some work to do. (See the comment at 1886 // the beginning of G1CMTask::do_marking_step() for those conditions - 1887 // one of which is reaching the specified time target.) It is only 1888 // when G1CMTask::do_marking_step() returns without setting the 1889 // has_aborted() flag that the marking step has completed. 1890 1891 _task->do_marking_step(1000000000.0 /* something very large */, 1892 true /* do_termination */, 1893 _is_serial); 1894 } while (_task->has_aborted() && !_cm->has_overflown()); 1895 } 1896 }; 1897 1898 // Implementation of AbstractRefProcTaskExecutor for parallel 1899 // reference processing at the end of G1 concurrent marking 1900 1901 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 1902 private: 1903 G1CollectedHeap* _g1h; 1904 G1ConcurrentMark* _cm; 1905 WorkGang* _workers; 1906 uint _active_workers; 1907 1908 public: 1909 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1910 G1ConcurrentMark* cm, 1911 WorkGang* workers, 1912 uint n_workers) : 1913 _g1h(g1h), _cm(cm), 1914 _workers(workers), _active_workers(n_workers) { } 1915 1916 // Executes the given task using concurrent marking worker threads. 1917 virtual void execute(ProcessTask& task); 1918 virtual void execute(EnqueueTask& task); 1919 }; 1920 1921 class G1CMRefProcTaskProxy: public AbstractGangTask { 1922 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1923 ProcessTask& _proc_task; 1924 G1CollectedHeap* _g1h; 1925 G1ConcurrentMark* _cm; 1926 1927 public: 1928 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1929 G1CollectedHeap* g1h, 1930 G1ConcurrentMark* cm) : 1931 AbstractGangTask("Process reference objects in parallel"), 1932 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1933 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1934 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1935 } 1936 1937 virtual void work(uint worker_id) { 1938 ResourceMark rm; 1939 HandleMark hm; 1940 G1CMTask* task = _cm->task(worker_id); 1941 G1CMIsAliveClosure g1_is_alive(_g1h); 1942 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1943 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1944 1945 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1946 } 1947 }; 1948 1949 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1950 assert(_workers != NULL, "Need parallel worker threads."); 1951 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1952 1953 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1954 1955 // We need to reset the concurrency level before each 1956 // proxy task execution, so that the termination protocol 1957 // and overflow handling in G1CMTask::do_marking_step() knows 1958 // how many workers to wait for. 1959 _cm->set_concurrency(_active_workers); 1960 _workers->run_task(&proc_task_proxy); 1961 } 1962 1963 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 1964 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1965 EnqueueTask& _enq_task; 1966 1967 public: 1968 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1969 AbstractGangTask("Enqueue reference objects in parallel"), 1970 _enq_task(enq_task) { } 1971 1972 virtual void work(uint worker_id) { 1973 _enq_task.work(worker_id); 1974 } 1975 }; 1976 1977 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 1978 assert(_workers != NULL, "Need parallel worker threads."); 1979 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1980 1981 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 1982 1983 // Not strictly necessary but... 1984 // 1985 // We need to reset the concurrency level before each 1986 // proxy task execution, so that the termination protocol 1987 // and overflow handling in G1CMTask::do_marking_step() knows 1988 // how many workers to wait for. 1989 _cm->set_concurrency(_active_workers); 1990 _workers->run_task(&enq_task_proxy); 1991 } 1992 1993 void G1ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 1994 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 1995 } 1996 1997 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 1998 if (has_overflown()) { 1999 // Skip processing the discovered references if we have 2000 // overflown the global marking stack. Reference objects 2001 // only get discovered once so it is OK to not 2002 // de-populate the discovered reference lists. We could have, 2003 // but the only benefit would be that, when marking restarts, 2004 // less reference objects are discovered. 2005 return; 2006 } 2007 2008 ResourceMark rm; 2009 HandleMark hm; 2010 2011 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2012 2013 // Is alive closure. 2014 G1CMIsAliveClosure g1_is_alive(g1h); 2015 2016 // Inner scope to exclude the cleaning of the string and symbol 2017 // tables from the displayed time. 2018 { 2019 GCTraceTime(Debug, gc) trace("Reference Processing", g1h->gc_timer_cm()); 2020 2021 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2022 2023 // See the comment in G1CollectedHeap::ref_processing_init() 2024 // about how reference processing currently works in G1. 2025 2026 // Set the soft reference policy 2027 rp->setup_policy(clear_all_soft_refs); 2028 assert(_markStack.isEmpty(), "mark stack should be empty"); 2029 2030 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2031 // in serial reference processing. Note these closures are also 2032 // used for serially processing (by the the current thread) the 2033 // JNI references during parallel reference processing. 2034 // 2035 // These closures do not need to synchronize with the worker 2036 // threads involved in parallel reference processing as these 2037 // instances are executed serially by the current thread (e.g. 2038 // reference processing is not multi-threaded and is thus 2039 // performed by the current thread instead of a gang worker). 2040 // 2041 // The gang tasks involved in parallel reference processing create 2042 // their own instances of these closures, which do their own 2043 // synchronization among themselves. 2044 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2045 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2046 2047 // We need at least one active thread. If reference processing 2048 // is not multi-threaded we use the current (VMThread) thread, 2049 // otherwise we use the work gang from the G1CollectedHeap and 2050 // we utilize all the worker threads we can. 2051 bool processing_is_mt = rp->processing_is_mt(); 2052 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2053 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2054 2055 // Parallel processing task executor. 2056 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2057 g1h->workers(), active_workers); 2058 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2059 2060 // Set the concurrency level. The phase was already set prior to 2061 // executing the remark task. 2062 set_concurrency(active_workers); 2063 2064 // Set the degree of MT processing here. If the discovery was done MT, 2065 // the number of threads involved during discovery could differ from 2066 // the number of active workers. This is OK as long as the discovered 2067 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2068 rp->set_active_mt_degree(active_workers); 2069 2070 // Process the weak references. 2071 const ReferenceProcessorStats& stats = 2072 rp->process_discovered_references(&g1_is_alive, 2073 &g1_keep_alive, 2074 &g1_drain_mark_stack, 2075 executor, 2076 g1h->gc_timer_cm()); 2077 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2078 2079 // The do_oop work routines of the keep_alive and drain_marking_stack 2080 // oop closures will set the has_overflown flag if we overflow the 2081 // global marking stack. 2082 2083 assert(_markStack.overflow() || _markStack.isEmpty(), 2084 "mark stack should be empty (unless it overflowed)"); 2085 2086 if (_markStack.overflow()) { 2087 // This should have been done already when we tried to push an 2088 // entry on to the global mark stack. But let's do it again. 2089 set_has_overflown(); 2090 } 2091 2092 assert(rp->num_q() == active_workers, "why not"); 2093 2094 rp->enqueue_discovered_references(executor); 2095 2096 rp->verify_no_references_recorded(); 2097 assert(!rp->discovery_enabled(), "Post condition"); 2098 } 2099 2100 if (has_overflown()) { 2101 // We can not trust g1_is_alive if the marking stack overflowed 2102 return; 2103 } 2104 2105 assert(_markStack.isEmpty(), "Marking should have completed"); 2106 2107 // Unload Klasses, String, Symbols, Code Cache, etc. 2108 { 2109 GCTraceTime(Debug, gc) trace("Unloading", g1h->gc_timer_cm()); 2110 2111 if (ClassUnloadingWithConcurrentMark) { 2112 bool purged_classes; 2113 2114 { 2115 GCTraceTime(Trace, gc) trace("System Dictionary Unloading", g1h->gc_timer_cm()); 2116 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2117 } 2118 2119 { 2120 GCTraceTime(Trace, gc) trace("Parallel Unloading", g1h->gc_timer_cm()); 2121 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2122 } 2123 } 2124 2125 if (G1StringDedup::is_enabled()) { 2126 GCTraceTime(Trace, gc) trace("String Deduplication Unlink", g1h->gc_timer_cm()); 2127 G1StringDedup::unlink(&g1_is_alive); 2128 } 2129 } 2130 } 2131 2132 void G1ConcurrentMark::swapMarkBitMaps() { 2133 G1CMBitMapRO* temp = _prevMarkBitMap; 2134 _prevMarkBitMap = (G1CMBitMapRO*)_nextMarkBitMap; 2135 _nextMarkBitMap = (G1CMBitMap*) temp; 2136 } 2137 2138 // Closure for marking entries in SATB buffers. 2139 class G1CMSATBBufferClosure : public SATBBufferClosure { 2140 private: 2141 G1CMTask* _task; 2142 G1CollectedHeap* _g1h; 2143 2144 // This is very similar to G1CMTask::deal_with_reference, but with 2145 // more relaxed requirements for the argument, so this must be more 2146 // circumspect about treating the argument as an object. 2147 void do_entry(void* entry) const { 2148 _task->increment_refs_reached(); 2149 HeapRegion* hr = _g1h->heap_region_containing(entry); 2150 if (entry < hr->next_top_at_mark_start()) { 2151 // Until we get here, we don't know whether entry refers to a valid 2152 // object; it could instead have been a stale reference. 2153 oop obj = static_cast<oop>(entry); 2154 assert(obj->is_oop(true /* ignore mark word */), 2155 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)); 2156 _task->make_reference_grey(obj, hr); 2157 } 2158 } 2159 2160 public: 2161 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 2162 : _task(task), _g1h(g1h) { } 2163 2164 virtual void do_buffer(void** buffer, size_t size) { 2165 for (size_t i = 0; i < size; ++i) { 2166 do_entry(buffer[i]); 2167 } 2168 } 2169 }; 2170 2171 class G1RemarkThreadsClosure : public ThreadClosure { 2172 G1CMSATBBufferClosure _cm_satb_cl; 2173 G1CMOopClosure _cm_cl; 2174 MarkingCodeBlobClosure _code_cl; 2175 int _thread_parity; 2176 2177 public: 2178 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 2179 _cm_satb_cl(task, g1h), 2180 _cm_cl(g1h, g1h->concurrent_mark(), task), 2181 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2182 _thread_parity(Threads::thread_claim_parity()) {} 2183 2184 void do_thread(Thread* thread) { 2185 if (thread->is_Java_thread()) { 2186 if (thread->claim_oops_do(true, _thread_parity)) { 2187 JavaThread* jt = (JavaThread*)thread; 2188 2189 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2190 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2191 // * Alive if on the stack of an executing method 2192 // * Weakly reachable otherwise 2193 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2194 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2195 jt->nmethods_do(&_code_cl); 2196 2197 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 2198 } 2199 } else if (thread->is_VM_thread()) { 2200 if (thread->claim_oops_do(true, _thread_parity)) { 2201 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 2202 } 2203 } 2204 } 2205 }; 2206 2207 class G1CMRemarkTask: public AbstractGangTask { 2208 private: 2209 G1ConcurrentMark* _cm; 2210 public: 2211 void work(uint worker_id) { 2212 // Since all available tasks are actually started, we should 2213 // only proceed if we're supposed to be active. 2214 if (worker_id < _cm->active_tasks()) { 2215 G1CMTask* task = _cm->task(worker_id); 2216 task->record_start_time(); 2217 { 2218 ResourceMark rm; 2219 HandleMark hm; 2220 2221 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2222 Threads::threads_do(&threads_f); 2223 } 2224 2225 do { 2226 task->do_marking_step(1000000000.0 /* something very large */, 2227 true /* do_termination */, 2228 false /* is_serial */); 2229 } while (task->has_aborted() && !_cm->has_overflown()); 2230 // If we overflow, then we do not want to restart. We instead 2231 // want to abort remark and do concurrent marking again. 2232 task->record_end_time(); 2233 } 2234 } 2235 2236 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 2237 AbstractGangTask("Par Remark"), _cm(cm) { 2238 _cm->terminator()->reset_for_reuse(active_workers); 2239 } 2240 }; 2241 2242 void G1ConcurrentMark::checkpointRootsFinalWork() { 2243 ResourceMark rm; 2244 HandleMark hm; 2245 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2246 2247 GCTraceTime(Debug, gc) trace("Finalize Marking", g1h->gc_timer_cm()); 2248 2249 g1h->ensure_parsability(false); 2250 2251 // this is remark, so we'll use up all active threads 2252 uint active_workers = g1h->workers()->active_workers(); 2253 set_concurrency_and_phase(active_workers, false /* concurrent */); 2254 // Leave _parallel_marking_threads at it's 2255 // value originally calculated in the G1ConcurrentMark 2256 // constructor and pass values of the active workers 2257 // through the gang in the task. 2258 2259 { 2260 StrongRootsScope srs(active_workers); 2261 2262 G1CMRemarkTask remarkTask(this, active_workers); 2263 // We will start all available threads, even if we decide that the 2264 // active_workers will be fewer. The extra ones will just bail out 2265 // immediately. 2266 g1h->workers()->run_task(&remarkTask); 2267 } 2268 2269 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2270 guarantee(has_overflown() || 2271 satb_mq_set.completed_buffers_num() == 0, 2272 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 2273 BOOL_TO_STR(has_overflown()), 2274 satb_mq_set.completed_buffers_num()); 2275 2276 print_stats(); 2277 } 2278 2279 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2280 // Note we are overriding the read-only view of the prev map here, via 2281 // the cast. 2282 ((G1CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2283 } 2284 2285 HeapRegion* 2286 G1ConcurrentMark::claim_region(uint worker_id) { 2287 // "checkpoint" the finger 2288 HeapWord* finger = _finger; 2289 2290 // _heap_end will not change underneath our feet; it only changes at 2291 // yield points. 2292 while (finger < _heap_end) { 2293 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2294 2295 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 2296 2297 // Above heap_region_containing may return NULL as we always scan claim 2298 // until the end of the heap. In this case, just jump to the next region. 2299 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2300 2301 // Is the gap between reading the finger and doing the CAS too long? 2302 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2303 if (res == finger && curr_region != NULL) { 2304 // we succeeded 2305 HeapWord* bottom = curr_region->bottom(); 2306 HeapWord* limit = curr_region->next_top_at_mark_start(); 2307 2308 // notice that _finger == end cannot be guaranteed here since, 2309 // someone else might have moved the finger even further 2310 assert(_finger >= end, "the finger should have moved forward"); 2311 2312 if (limit > bottom) { 2313 return curr_region; 2314 } else { 2315 assert(limit == bottom, 2316 "the region limit should be at bottom"); 2317 // we return NULL and the caller should try calling 2318 // claim_region() again. 2319 return NULL; 2320 } 2321 } else { 2322 assert(_finger > finger, "the finger should have moved forward"); 2323 // read it again 2324 finger = _finger; 2325 } 2326 } 2327 2328 return NULL; 2329 } 2330 2331 #ifndef PRODUCT 2332 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 2333 private: 2334 G1CollectedHeap* _g1h; 2335 const char* _phase; 2336 int _info; 2337 2338 public: 2339 VerifyNoCSetOops(const char* phase, int info = -1) : 2340 _g1h(G1CollectedHeap::heap()), 2341 _phase(phase), 2342 _info(info) 2343 { } 2344 2345 void operator()(oop obj) const { 2346 guarantee(obj->is_oop(), 2347 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 2348 p2i(obj), _phase, _info); 2349 guarantee(!_g1h->obj_in_cs(obj), 2350 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 2351 p2i(obj), _phase, _info); 2352 } 2353 }; 2354 2355 void G1ConcurrentMark::verify_no_cset_oops() { 2356 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2357 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 2358 return; 2359 } 2360 2361 // Verify entries on the global mark stack 2362 _markStack.iterate(VerifyNoCSetOops("Stack")); 2363 2364 // Verify entries on the task queues 2365 for (uint i = 0; i < _max_worker_id; ++i) { 2366 G1CMTaskQueue* queue = _task_queues->queue(i); 2367 queue->iterate(VerifyNoCSetOops("Queue", i)); 2368 } 2369 2370 // Verify the global finger 2371 HeapWord* global_finger = finger(); 2372 if (global_finger != NULL && global_finger < _heap_end) { 2373 // Since we always iterate over all regions, we might get a NULL HeapRegion 2374 // here. 2375 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 2376 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2377 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 2378 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 2379 } 2380 2381 // Verify the task fingers 2382 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2383 for (uint i = 0; i < parallel_marking_threads(); ++i) { 2384 G1CMTask* task = _tasks[i]; 2385 HeapWord* task_finger = task->finger(); 2386 if (task_finger != NULL && task_finger < _heap_end) { 2387 // See above note on the global finger verification. 2388 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 2389 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2390 !task_hr->in_collection_set(), 2391 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 2392 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 2393 } 2394 } 2395 } 2396 #endif // PRODUCT 2397 2398 // Aggregate the counting data that was constructed concurrently 2399 // with marking. 2400 class AggregateCountDataHRClosure: public HeapRegionClosure { 2401 G1CollectedHeap* _g1h; 2402 G1ConcurrentMark* _cm; 2403 CardTableModRefBS* _ct_bs; 2404 BitMap* _cm_card_bm; 2405 uint _max_worker_id; 2406 2407 public: 2408 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 2409 BitMap* cm_card_bm, 2410 uint max_worker_id) : 2411 _g1h(g1h), _cm(g1h->concurrent_mark()), 2412 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 2413 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 2414 2415 bool doHeapRegion(HeapRegion* hr) { 2416 HeapWord* start = hr->bottom(); 2417 HeapWord* limit = hr->next_top_at_mark_start(); 2418 HeapWord* end = hr->end(); 2419 2420 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 2421 "Preconditions not met - " 2422 "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", " 2423 "top: " PTR_FORMAT ", end: " PTR_FORMAT, 2424 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())); 2425 2426 assert(hr->next_marked_bytes() == 0, "Precondition"); 2427 2428 if (start == limit) { 2429 // NTAMS of this region has not been set so nothing to do. 2430 return false; 2431 } 2432 2433 // 'start' should be in the heap. 2434 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 2435 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 2436 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 2437 2438 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 2439 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 2440 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 2441 2442 // If ntams is not card aligned then we bump card bitmap index 2443 // for limit so that we get the all the cards spanned by 2444 // the object ending at ntams. 2445 // Note: if this is the last region in the heap then ntams 2446 // could be actually just beyond the end of the the heap; 2447 // limit_idx will then correspond to a (non-existent) card 2448 // that is also outside the heap. 2449 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 2450 limit_idx += 1; 2451 } 2452 2453 assert(limit_idx <= end_idx, "or else use atomics"); 2454 2455 // Aggregate the "stripe" in the count data associated with hr. 2456 uint hrm_index = hr->hrm_index(); 2457 size_t marked_bytes = 0; 2458 2459 for (uint i = 0; i < _max_worker_id; i += 1) { 2460 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 2461 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 2462 2463 // Fetch the marked_bytes in this region for task i and 2464 // add it to the running total for this region. 2465 marked_bytes += marked_bytes_array[hrm_index]; 2466 2467 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 2468 // into the global card bitmap. 2469 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 2470 2471 while (scan_idx < limit_idx) { 2472 assert(task_card_bm->at(scan_idx) == true, "should be"); 2473 _cm_card_bm->set_bit(scan_idx); 2474 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 2475 2476 // BitMap::get_next_one_offset() can handle the case when 2477 // its left_offset parameter is greater than its right_offset 2478 // parameter. It does, however, have an early exit if 2479 // left_offset == right_offset. So let's limit the value 2480 // passed in for left offset here. 2481 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 2482 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 2483 } 2484 } 2485 2486 // Update the marked bytes for this region. 2487 hr->add_to_marked_bytes(marked_bytes); 2488 2489 // Next heap region 2490 return false; 2491 } 2492 }; 2493 2494 class G1AggregateCountDataTask: public AbstractGangTask { 2495 protected: 2496 G1CollectedHeap* _g1h; 2497 G1ConcurrentMark* _cm; 2498 BitMap* _cm_card_bm; 2499 uint _max_worker_id; 2500 uint _active_workers; 2501 HeapRegionClaimer _hrclaimer; 2502 2503 public: 2504 G1AggregateCountDataTask(G1CollectedHeap* g1h, 2505 G1ConcurrentMark* cm, 2506 BitMap* cm_card_bm, 2507 uint max_worker_id, 2508 uint n_workers) : 2509 AbstractGangTask("Count Aggregation"), 2510 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 2511 _max_worker_id(max_worker_id), 2512 _active_workers(n_workers), 2513 _hrclaimer(_active_workers) { 2514 } 2515 2516 void work(uint worker_id) { 2517 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 2518 2519 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 2520 } 2521 }; 2522 2523 2524 void G1ConcurrentMark::aggregate_count_data() { 2525 uint n_workers = _g1h->workers()->active_workers(); 2526 2527 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 2528 _max_worker_id, n_workers); 2529 2530 _g1h->workers()->run_task(&g1_par_agg_task); 2531 } 2532 2533 // Clear the per-worker arrays used to store the per-region counting data 2534 void G1ConcurrentMark::clear_all_count_data() { 2535 // Clear the global card bitmap - it will be filled during 2536 // liveness count aggregation (during remark) and the 2537 // final counting task. 2538 _card_bm.clear(); 2539 2540 // Clear the global region bitmap - it will be filled as part 2541 // of the final counting task. 2542 _region_bm.clear(); 2543 2544 uint max_regions = _g1h->max_regions(); 2545 assert(_max_worker_id > 0, "uninitialized"); 2546 2547 for (uint i = 0; i < _max_worker_id; i += 1) { 2548 BitMap* task_card_bm = count_card_bitmap_for(i); 2549 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 2550 2551 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 2552 assert(marked_bytes_array != NULL, "uninitialized"); 2553 2554 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 2555 task_card_bm->clear(); 2556 } 2557 } 2558 2559 void G1ConcurrentMark::print_stats() { 2560 if (!log_is_enabled(Debug, gc, stats)) { 2561 return; 2562 } 2563 log_debug(gc, stats)("---------------------------------------------------------------------"); 2564 for (size_t i = 0; i < _active_tasks; ++i) { 2565 _tasks[i]->print_stats(); 2566 log_debug(gc, stats)("---------------------------------------------------------------------"); 2567 } 2568 } 2569 2570 // abandon current marking iteration due to a Full GC 2571 void G1ConcurrentMark::abort() { 2572 if (!cmThread()->during_cycle() || _has_aborted) { 2573 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2574 return; 2575 } 2576 2577 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2578 // concurrent bitmap clearing. 2579 _nextMarkBitMap->clearAll(); 2580 2581 // Note we cannot clear the previous marking bitmap here 2582 // since VerifyDuringGC verifies the objects marked during 2583 // a full GC against the previous bitmap. 2584 2585 // Clear the liveness counting data 2586 clear_all_count_data(); 2587 // Empty mark stack 2588 reset_marking_state(); 2589 for (uint i = 0; i < _max_worker_id; ++i) { 2590 _tasks[i]->clear_region_fields(); 2591 } 2592 _first_overflow_barrier_sync.abort(); 2593 _second_overflow_barrier_sync.abort(); 2594 _has_aborted = true; 2595 2596 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2597 satb_mq_set.abandon_partial_marking(); 2598 // This can be called either during or outside marking, we'll read 2599 // the expected_active value from the SATB queue set. 2600 satb_mq_set.set_active_all_threads( 2601 false, /* new active value */ 2602 satb_mq_set.is_active() /* expected_active */); 2603 2604 _g1h->trace_heap_after_concurrent_cycle(); 2605 2606 // Close any open concurrent phase timing 2607 register_concurrent_phase_end(); 2608 2609 _g1h->register_concurrent_cycle_end(); 2610 } 2611 2612 static void print_ms_time_info(const char* prefix, const char* name, 2613 NumberSeq& ns) { 2614 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2615 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2616 if (ns.num() > 0) { 2617 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2618 prefix, ns.sd(), ns.maximum()); 2619 } 2620 } 2621 2622 void G1ConcurrentMark::print_summary_info() { 2623 LogHandle(gc, marking) log; 2624 if (!log.is_trace()) { 2625 return; 2626 } 2627 2628 log.trace(" Concurrent marking:"); 2629 print_ms_time_info(" ", "init marks", _init_times); 2630 print_ms_time_info(" ", "remarks", _remark_times); 2631 { 2632 print_ms_time_info(" ", "final marks", _remark_mark_times); 2633 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2634 2635 } 2636 print_ms_time_info(" ", "cleanups", _cleanup_times); 2637 log.trace(" Final counting total time = %8.2f s (avg = %8.2f ms).", 2638 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2639 if (G1ScrubRemSets) { 2640 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 2641 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2642 } 2643 log.trace(" Total stop_world time = %8.2f s.", 2644 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2645 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2646 cmThread()->vtime_accum(), cmThread()->vtime_mark_accum()); 2647 } 2648 2649 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2650 _parallel_workers->print_worker_threads_on(st); 2651 } 2652 2653 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2654 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2655 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 2656 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 2657 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 2658 } 2659 2660 // We take a break if someone is trying to stop the world. 2661 bool G1ConcurrentMark::do_yield_check(uint worker_id) { 2662 if (SuspendibleThreadSet::should_yield()) { 2663 SuspendibleThreadSet::yield(); 2664 return true; 2665 } else { 2666 return false; 2667 } 2668 } 2669 2670 // Closure for iteration over bitmaps 2671 class G1CMBitMapClosure : public BitMapClosure { 2672 private: 2673 // the bitmap that is being iterated over 2674 G1CMBitMap* _nextMarkBitMap; 2675 G1ConcurrentMark* _cm; 2676 G1CMTask* _task; 2677 2678 public: 2679 G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm, G1CMBitMap* nextMarkBitMap) : 2680 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 2681 2682 bool do_bit(size_t offset) { 2683 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 2684 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 2685 assert( addr < _cm->finger(), "invariant"); 2686 assert(addr >= _task->finger(), "invariant"); 2687 2688 // We move that task's local finger along. 2689 _task->move_finger_to(addr); 2690 2691 _task->scan_object(oop(addr)); 2692 // we only partially drain the local queue and global stack 2693 _task->drain_local_queue(true); 2694 _task->drain_global_stack(true); 2695 2696 // if the has_aborted flag has been raised, we need to bail out of 2697 // the iteration 2698 return !_task->has_aborted(); 2699 } 2700 }; 2701 2702 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2703 ReferenceProcessor* result = g1h->ref_processor_cm(); 2704 assert(result != NULL, "CM reference processor should not be NULL"); 2705 return result; 2706 } 2707 2708 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2709 G1ConcurrentMark* cm, 2710 G1CMTask* task) 2711 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2712 _g1h(g1h), _cm(cm), _task(task) 2713 { } 2714 2715 void G1CMTask::setup_for_region(HeapRegion* hr) { 2716 assert(hr != NULL, 2717 "claim_region() should have filtered out NULL regions"); 2718 _curr_region = hr; 2719 _finger = hr->bottom(); 2720 update_region_limit(); 2721 } 2722 2723 void G1CMTask::update_region_limit() { 2724 HeapRegion* hr = _curr_region; 2725 HeapWord* bottom = hr->bottom(); 2726 HeapWord* limit = hr->next_top_at_mark_start(); 2727 2728 if (limit == bottom) { 2729 // The region was collected underneath our feet. 2730 // We set the finger to bottom to ensure that the bitmap 2731 // iteration that will follow this will not do anything. 2732 // (this is not a condition that holds when we set the region up, 2733 // as the region is not supposed to be empty in the first place) 2734 _finger = bottom; 2735 } else if (limit >= _region_limit) { 2736 assert(limit >= _finger, "peace of mind"); 2737 } else { 2738 assert(limit < _region_limit, "only way to get here"); 2739 // This can happen under some pretty unusual circumstances. An 2740 // evacuation pause empties the region underneath our feet (NTAMS 2741 // at bottom). We then do some allocation in the region (NTAMS 2742 // stays at bottom), followed by the region being used as a GC 2743 // alloc region (NTAMS will move to top() and the objects 2744 // originally below it will be grayed). All objects now marked in 2745 // the region are explicitly grayed, if below the global finger, 2746 // and we do not need in fact to scan anything else. So, we simply 2747 // set _finger to be limit to ensure that the bitmap iteration 2748 // doesn't do anything. 2749 _finger = limit; 2750 } 2751 2752 _region_limit = limit; 2753 } 2754 2755 void G1CMTask::giveup_current_region() { 2756 assert(_curr_region != NULL, "invariant"); 2757 clear_region_fields(); 2758 } 2759 2760 void G1CMTask::clear_region_fields() { 2761 // Values for these three fields that indicate that we're not 2762 // holding on to a region. 2763 _curr_region = NULL; 2764 _finger = NULL; 2765 _region_limit = NULL; 2766 } 2767 2768 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2769 if (cm_oop_closure == NULL) { 2770 assert(_cm_oop_closure != NULL, "invariant"); 2771 } else { 2772 assert(_cm_oop_closure == NULL, "invariant"); 2773 } 2774 _cm_oop_closure = cm_oop_closure; 2775 } 2776 2777 void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) { 2778 guarantee(nextMarkBitMap != NULL, "invariant"); 2779 _nextMarkBitMap = nextMarkBitMap; 2780 clear_region_fields(); 2781 2782 _calls = 0; 2783 _elapsed_time_ms = 0.0; 2784 _termination_time_ms = 0.0; 2785 _termination_start_time_ms = 0.0; 2786 } 2787 2788 bool G1CMTask::should_exit_termination() { 2789 regular_clock_call(); 2790 // This is called when we are in the termination protocol. We should 2791 // quit if, for some reason, this task wants to abort or the global 2792 // stack is not empty (this means that we can get work from it). 2793 return !_cm->mark_stack_empty() || has_aborted(); 2794 } 2795 2796 void G1CMTask::reached_limit() { 2797 assert(_words_scanned >= _words_scanned_limit || 2798 _refs_reached >= _refs_reached_limit , 2799 "shouldn't have been called otherwise"); 2800 regular_clock_call(); 2801 } 2802 2803 void G1CMTask::regular_clock_call() { 2804 if (has_aborted()) return; 2805 2806 // First, we need to recalculate the words scanned and refs reached 2807 // limits for the next clock call. 2808 recalculate_limits(); 2809 2810 // During the regular clock call we do the following 2811 2812 // (1) If an overflow has been flagged, then we abort. 2813 if (_cm->has_overflown()) { 2814 set_has_aborted(); 2815 return; 2816 } 2817 2818 // If we are not concurrent (i.e. we're doing remark) we don't need 2819 // to check anything else. The other steps are only needed during 2820 // the concurrent marking phase. 2821 if (!concurrent()) return; 2822 2823 // (2) If marking has been aborted for Full GC, then we also abort. 2824 if (_cm->has_aborted()) { 2825 set_has_aborted(); 2826 return; 2827 } 2828 2829 double curr_time_ms = os::elapsedVTime() * 1000.0; 2830 2831 // (4) We check whether we should yield. If we have to, then we abort. 2832 if (SuspendibleThreadSet::should_yield()) { 2833 // We should yield. To do this we abort the task. The caller is 2834 // responsible for yielding. 2835 set_has_aborted(); 2836 return; 2837 } 2838 2839 // (5) We check whether we've reached our time quota. If we have, 2840 // then we abort. 2841 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2842 if (elapsed_time_ms > _time_target_ms) { 2843 set_has_aborted(); 2844 _has_timed_out = true; 2845 return; 2846 } 2847 2848 // (6) Finally, we check whether there are enough completed STAB 2849 // buffers available for processing. If there are, we abort. 2850 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2851 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2852 // we do need to process SATB buffers, we'll abort and restart 2853 // the marking task to do so 2854 set_has_aborted(); 2855 return; 2856 } 2857 } 2858 2859 void G1CMTask::recalculate_limits() { 2860 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2861 _words_scanned_limit = _real_words_scanned_limit; 2862 2863 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2864 _refs_reached_limit = _real_refs_reached_limit; 2865 } 2866 2867 void G1CMTask::decrease_limits() { 2868 // This is called when we believe that we're going to do an infrequent 2869 // operation which will increase the per byte scanned cost (i.e. move 2870 // entries to/from the global stack). It basically tries to decrease the 2871 // scanning limit so that the clock is called earlier. 2872 2873 _words_scanned_limit = _real_words_scanned_limit - 2874 3 * words_scanned_period / 4; 2875 _refs_reached_limit = _real_refs_reached_limit - 2876 3 * refs_reached_period / 4; 2877 } 2878 2879 void G1CMTask::move_entries_to_global_stack() { 2880 // local array where we'll store the entries that will be popped 2881 // from the local queue 2882 oop buffer[global_stack_transfer_size]; 2883 2884 int n = 0; 2885 oop obj; 2886 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 2887 buffer[n] = obj; 2888 ++n; 2889 } 2890 2891 if (n > 0) { 2892 // we popped at least one entry from the local queue 2893 2894 if (!_cm->mark_stack_push(buffer, n)) { 2895 set_has_aborted(); 2896 } 2897 } 2898 2899 // this operation was quite expensive, so decrease the limits 2900 decrease_limits(); 2901 } 2902 2903 void G1CMTask::get_entries_from_global_stack() { 2904 // local array where we'll store the entries that will be popped 2905 // from the global stack. 2906 oop buffer[global_stack_transfer_size]; 2907 int n; 2908 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 2909 assert(n <= global_stack_transfer_size, 2910 "we should not pop more than the given limit"); 2911 if (n > 0) { 2912 // yes, we did actually pop at least one entry 2913 for (int i = 0; i < n; ++i) { 2914 bool success = _task_queue->push(buffer[i]); 2915 // We only call this when the local queue is empty or under a 2916 // given target limit. So, we do not expect this push to fail. 2917 assert(success, "invariant"); 2918 } 2919 } 2920 2921 // this operation was quite expensive, so decrease the limits 2922 decrease_limits(); 2923 } 2924 2925 void G1CMTask::drain_local_queue(bool partially) { 2926 if (has_aborted()) return; 2927 2928 // Decide what the target size is, depending whether we're going to 2929 // drain it partially (so that other tasks can steal if they run out 2930 // of things to do) or totally (at the very end). 2931 size_t target_size; 2932 if (partially) { 2933 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2934 } else { 2935 target_size = 0; 2936 } 2937 2938 if (_task_queue->size() > target_size) { 2939 oop obj; 2940 bool ret = _task_queue->pop_local(obj); 2941 while (ret) { 2942 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 2943 assert(!_g1h->is_on_master_free_list( 2944 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 2945 2946 scan_object(obj); 2947 2948 if (_task_queue->size() <= target_size || has_aborted()) { 2949 ret = false; 2950 } else { 2951 ret = _task_queue->pop_local(obj); 2952 } 2953 } 2954 } 2955 } 2956 2957 void G1CMTask::drain_global_stack(bool partially) { 2958 if (has_aborted()) return; 2959 2960 // We have a policy to drain the local queue before we attempt to 2961 // drain the global stack. 2962 assert(partially || _task_queue->size() == 0, "invariant"); 2963 2964 // Decide what the target size is, depending whether we're going to 2965 // drain it partially (so that other tasks can steal if they run out 2966 // of things to do) or totally (at the very end). Notice that, 2967 // because we move entries from the global stack in chunks or 2968 // because another task might be doing the same, we might in fact 2969 // drop below the target. But, this is not a problem. 2970 size_t target_size; 2971 if (partially) { 2972 target_size = _cm->partial_mark_stack_size_target(); 2973 } else { 2974 target_size = 0; 2975 } 2976 2977 if (_cm->mark_stack_size() > target_size) { 2978 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2979 get_entries_from_global_stack(); 2980 drain_local_queue(partially); 2981 } 2982 } 2983 } 2984 2985 // SATB Queue has several assumptions on whether to call the par or 2986 // non-par versions of the methods. this is why some of the code is 2987 // replicated. We should really get rid of the single-threaded version 2988 // of the code to simplify things. 2989 void G1CMTask::drain_satb_buffers() { 2990 if (has_aborted()) return; 2991 2992 // We set this so that the regular clock knows that we're in the 2993 // middle of draining buffers and doesn't set the abort flag when it 2994 // notices that SATB buffers are available for draining. It'd be 2995 // very counter productive if it did that. :-) 2996 _draining_satb_buffers = true; 2997 2998 G1CMSATBBufferClosure satb_cl(this, _g1h); 2999 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3000 3001 // This keeps claiming and applying the closure to completed buffers 3002 // until we run out of buffers or we need to abort. 3003 while (!has_aborted() && 3004 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 3005 regular_clock_call(); 3006 } 3007 3008 _draining_satb_buffers = false; 3009 3010 assert(has_aborted() || 3011 concurrent() || 3012 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3013 3014 // again, this was a potentially expensive operation, decrease the 3015 // limits to get the regular clock call early 3016 decrease_limits(); 3017 } 3018 3019 void G1CMTask::print_stats() { 3020 log_debug(gc, stats)("Marking Stats, task = %u, calls = %d", 3021 _worker_id, _calls); 3022 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3023 _elapsed_time_ms, _termination_time_ms); 3024 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3025 _step_times_ms.num(), _step_times_ms.avg(), 3026 _step_times_ms.sd()); 3027 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", 3028 _step_times_ms.maximum(), _step_times_ms.sum()); 3029 } 3030 3031 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 3032 return _task_queues->steal(worker_id, hash_seed, obj); 3033 } 3034 3035 /***************************************************************************** 3036 3037 The do_marking_step(time_target_ms, ...) method is the building 3038 block of the parallel marking framework. It can be called in parallel 3039 with other invocations of do_marking_step() on different tasks 3040 (but only one per task, obviously) and concurrently with the 3041 mutator threads, or during remark, hence it eliminates the need 3042 for two versions of the code. When called during remark, it will 3043 pick up from where the task left off during the concurrent marking 3044 phase. Interestingly, tasks are also claimable during evacuation 3045 pauses too, since do_marking_step() ensures that it aborts before 3046 it needs to yield. 3047 3048 The data structures that it uses to do marking work are the 3049 following: 3050 3051 (1) Marking Bitmap. If there are gray objects that appear only 3052 on the bitmap (this happens either when dealing with an overflow 3053 or when the initial marking phase has simply marked the roots 3054 and didn't push them on the stack), then tasks claim heap 3055 regions whose bitmap they then scan to find gray objects. A 3056 global finger indicates where the end of the last claimed region 3057 is. A local finger indicates how far into the region a task has 3058 scanned. The two fingers are used to determine how to gray an 3059 object (i.e. whether simply marking it is OK, as it will be 3060 visited by a task in the future, or whether it needs to be also 3061 pushed on a stack). 3062 3063 (2) Local Queue. The local queue of the task which is accessed 3064 reasonably efficiently by the task. Other tasks can steal from 3065 it when they run out of work. Throughout the marking phase, a 3066 task attempts to keep its local queue short but not totally 3067 empty, so that entries are available for stealing by other 3068 tasks. Only when there is no more work, a task will totally 3069 drain its local queue. 3070 3071 (3) Global Mark Stack. This handles local queue overflow. During 3072 marking only sets of entries are moved between it and the local 3073 queues, as access to it requires a mutex and more fine-grain 3074 interaction with it which might cause contention. If it 3075 overflows, then the marking phase should restart and iterate 3076 over the bitmap to identify gray objects. Throughout the marking 3077 phase, tasks attempt to keep the global mark stack at a small 3078 length but not totally empty, so that entries are available for 3079 popping by other tasks. Only when there is no more work, tasks 3080 will totally drain the global mark stack. 3081 3082 (4) SATB Buffer Queue. This is where completed SATB buffers are 3083 made available. Buffers are regularly removed from this queue 3084 and scanned for roots, so that the queue doesn't get too 3085 long. During remark, all completed buffers are processed, as 3086 well as the filled in parts of any uncompleted buffers. 3087 3088 The do_marking_step() method tries to abort when the time target 3089 has been reached. There are a few other cases when the 3090 do_marking_step() method also aborts: 3091 3092 (1) When the marking phase has been aborted (after a Full GC). 3093 3094 (2) When a global overflow (on the global stack) has been 3095 triggered. Before the task aborts, it will actually sync up with 3096 the other tasks to ensure that all the marking data structures 3097 (local queues, stacks, fingers etc.) are re-initialized so that 3098 when do_marking_step() completes, the marking phase can 3099 immediately restart. 3100 3101 (3) When enough completed SATB buffers are available. The 3102 do_marking_step() method only tries to drain SATB buffers right 3103 at the beginning. So, if enough buffers are available, the 3104 marking step aborts and the SATB buffers are processed at 3105 the beginning of the next invocation. 3106 3107 (4) To yield. when we have to yield then we abort and yield 3108 right at the end of do_marking_step(). This saves us from a lot 3109 of hassle as, by yielding we might allow a Full GC. If this 3110 happens then objects will be compacted underneath our feet, the 3111 heap might shrink, etc. We save checking for this by just 3112 aborting and doing the yield right at the end. 3113 3114 From the above it follows that the do_marking_step() method should 3115 be called in a loop (or, otherwise, regularly) until it completes. 3116 3117 If a marking step completes without its has_aborted() flag being 3118 true, it means it has completed the current marking phase (and 3119 also all other marking tasks have done so and have all synced up). 3120 3121 A method called regular_clock_call() is invoked "regularly" (in 3122 sub ms intervals) throughout marking. It is this clock method that 3123 checks all the abort conditions which were mentioned above and 3124 decides when the task should abort. A work-based scheme is used to 3125 trigger this clock method: when the number of object words the 3126 marking phase has scanned or the number of references the marking 3127 phase has visited reach a given limit. Additional invocations to 3128 the method clock have been planted in a few other strategic places 3129 too. The initial reason for the clock method was to avoid calling 3130 vtime too regularly, as it is quite expensive. So, once it was in 3131 place, it was natural to piggy-back all the other conditions on it 3132 too and not constantly check them throughout the code. 3133 3134 If do_termination is true then do_marking_step will enter its 3135 termination protocol. 3136 3137 The value of is_serial must be true when do_marking_step is being 3138 called serially (i.e. by the VMThread) and do_marking_step should 3139 skip any synchronization in the termination and overflow code. 3140 Examples include the serial remark code and the serial reference 3141 processing closures. 3142 3143 The value of is_serial must be false when do_marking_step is 3144 being called by any of the worker threads in a work gang. 3145 Examples include the concurrent marking code (CMMarkingTask), 3146 the MT remark code, and the MT reference processing closures. 3147 3148 *****************************************************************************/ 3149 3150 void G1CMTask::do_marking_step(double time_target_ms, 3151 bool do_termination, 3152 bool is_serial) { 3153 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3154 assert(concurrent() == _cm->concurrent(), "they should be the same"); 3155 3156 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3157 assert(_task_queues != NULL, "invariant"); 3158 assert(_task_queue != NULL, "invariant"); 3159 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 3160 3161 assert(!_claimed, 3162 "only one thread should claim this task at any one time"); 3163 3164 // OK, this doesn't safeguard again all possible scenarios, as it is 3165 // possible for two threads to set the _claimed flag at the same 3166 // time. But it is only for debugging purposes anyway and it will 3167 // catch most problems. 3168 _claimed = true; 3169 3170 _start_time_ms = os::elapsedVTime() * 1000.0; 3171 3172 // If do_stealing is true then do_marking_step will attempt to 3173 // steal work from the other G1CMTasks. It only makes sense to 3174 // enable stealing when the termination protocol is enabled 3175 // and do_marking_step() is not being called serially. 3176 bool do_stealing = do_termination && !is_serial; 3177 3178 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 3179 _time_target_ms = time_target_ms - diff_prediction_ms; 3180 3181 // set up the variables that are used in the work-based scheme to 3182 // call the regular clock method 3183 _words_scanned = 0; 3184 _refs_reached = 0; 3185 recalculate_limits(); 3186 3187 // clear all flags 3188 clear_has_aborted(); 3189 _has_timed_out = false; 3190 _draining_satb_buffers = false; 3191 3192 ++_calls; 3193 3194 // Set up the bitmap and oop closures. Anything that uses them is 3195 // eventually called from this method, so it is OK to allocate these 3196 // statically. 3197 G1CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 3198 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 3199 set_cm_oop_closure(&cm_oop_closure); 3200 3201 if (_cm->has_overflown()) { 3202 // This can happen if the mark stack overflows during a GC pause 3203 // and this task, after a yield point, restarts. We have to abort 3204 // as we need to get into the overflow protocol which happens 3205 // right at the end of this task. 3206 set_has_aborted(); 3207 } 3208 3209 // First drain any available SATB buffers. After this, we will not 3210 // look at SATB buffers before the next invocation of this method. 3211 // If enough completed SATB buffers are queued up, the regular clock 3212 // will abort this task so that it restarts. 3213 drain_satb_buffers(); 3214 // ...then partially drain the local queue and the global stack 3215 drain_local_queue(true); 3216 drain_global_stack(true); 3217 3218 do { 3219 if (!has_aborted() && _curr_region != NULL) { 3220 // This means that we're already holding on to a region. 3221 assert(_finger != NULL, "if region is not NULL, then the finger " 3222 "should not be NULL either"); 3223 3224 // We might have restarted this task after an evacuation pause 3225 // which might have evacuated the region we're holding on to 3226 // underneath our feet. Let's read its limit again to make sure 3227 // that we do not iterate over a region of the heap that 3228 // contains garbage (update_region_limit() will also move 3229 // _finger to the start of the region if it is found empty). 3230 update_region_limit(); 3231 // We will start from _finger not from the start of the region, 3232 // as we might be restarting this task after aborting half-way 3233 // through scanning this region. In this case, _finger points to 3234 // the address where we last found a marked object. If this is a 3235 // fresh region, _finger points to start(). 3236 MemRegion mr = MemRegion(_finger, _region_limit); 3237 3238 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 3239 "humongous regions should go around loop once only"); 3240 3241 // Some special cases: 3242 // If the memory region is empty, we can just give up the region. 3243 // If the current region is humongous then we only need to check 3244 // the bitmap for the bit associated with the start of the object, 3245 // scan the object if it's live, and give up the region. 3246 // Otherwise, let's iterate over the bitmap of the part of the region 3247 // that is left. 3248 // If the iteration is successful, give up the region. 3249 if (mr.is_empty()) { 3250 giveup_current_region(); 3251 regular_clock_call(); 3252 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 3253 if (_nextMarkBitMap->isMarked(mr.start())) { 3254 // The object is marked - apply the closure 3255 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 3256 bitmap_closure.do_bit(offset); 3257 } 3258 // Even if this task aborted while scanning the humongous object 3259 // we can (and should) give up the current region. 3260 giveup_current_region(); 3261 regular_clock_call(); 3262 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 3263 giveup_current_region(); 3264 regular_clock_call(); 3265 } else { 3266 assert(has_aborted(), "currently the only way to do so"); 3267 // The only way to abort the bitmap iteration is to return 3268 // false from the do_bit() method. However, inside the 3269 // do_bit() method we move the _finger to point to the 3270 // object currently being looked at. So, if we bail out, we 3271 // have definitely set _finger to something non-null. 3272 assert(_finger != NULL, "invariant"); 3273 3274 // Region iteration was actually aborted. So now _finger 3275 // points to the address of the object we last scanned. If we 3276 // leave it there, when we restart this task, we will rescan 3277 // the object. It is easy to avoid this. We move the finger by 3278 // enough to point to the next possible object header (the 3279 // bitmap knows by how much we need to move it as it knows its 3280 // granularity). 3281 assert(_finger < _region_limit, "invariant"); 3282 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 3283 // Check if bitmap iteration was aborted while scanning the last object 3284 if (new_finger >= _region_limit) { 3285 giveup_current_region(); 3286 } else { 3287 move_finger_to(new_finger); 3288 } 3289 } 3290 } 3291 // At this point we have either completed iterating over the 3292 // region we were holding on to, or we have aborted. 3293 3294 // We then partially drain the local queue and the global stack. 3295 // (Do we really need this?) 3296 drain_local_queue(true); 3297 drain_global_stack(true); 3298 3299 // Read the note on the claim_region() method on why it might 3300 // return NULL with potentially more regions available for 3301 // claiming and why we have to check out_of_regions() to determine 3302 // whether we're done or not. 3303 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 3304 // We are going to try to claim a new region. We should have 3305 // given up on the previous one. 3306 // Separated the asserts so that we know which one fires. 3307 assert(_curr_region == NULL, "invariant"); 3308 assert(_finger == NULL, "invariant"); 3309 assert(_region_limit == NULL, "invariant"); 3310 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 3311 if (claimed_region != NULL) { 3312 // Yes, we managed to claim one 3313 setup_for_region(claimed_region); 3314 assert(_curr_region == claimed_region, "invariant"); 3315 } 3316 // It is important to call the regular clock here. It might take 3317 // a while to claim a region if, for example, we hit a large 3318 // block of empty regions. So we need to call the regular clock 3319 // method once round the loop to make sure it's called 3320 // frequently enough. 3321 regular_clock_call(); 3322 } 3323 3324 if (!has_aborted() && _curr_region == NULL) { 3325 assert(_cm->out_of_regions(), 3326 "at this point we should be out of regions"); 3327 } 3328 } while ( _curr_region != NULL && !has_aborted()); 3329 3330 if (!has_aborted()) { 3331 // We cannot check whether the global stack is empty, since other 3332 // tasks might be pushing objects to it concurrently. 3333 assert(_cm->out_of_regions(), 3334 "at this point we should be out of regions"); 3335 // Try to reduce the number of available SATB buffers so that 3336 // remark has less work to do. 3337 drain_satb_buffers(); 3338 } 3339 3340 // Since we've done everything else, we can now totally drain the 3341 // local queue and global stack. 3342 drain_local_queue(false); 3343 drain_global_stack(false); 3344 3345 // Attempt at work stealing from other task's queues. 3346 if (do_stealing && !has_aborted()) { 3347 // We have not aborted. This means that we have finished all that 3348 // we could. Let's try to do some stealing... 3349 3350 // We cannot check whether the global stack is empty, since other 3351 // tasks might be pushing objects to it concurrently. 3352 assert(_cm->out_of_regions() && _task_queue->size() == 0, 3353 "only way to reach here"); 3354 while (!has_aborted()) { 3355 oop obj; 3356 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 3357 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 3358 "any stolen object should be marked"); 3359 scan_object(obj); 3360 3361 // And since we're towards the end, let's totally drain the 3362 // local queue and global stack. 3363 drain_local_queue(false); 3364 drain_global_stack(false); 3365 } else { 3366 break; 3367 } 3368 } 3369 } 3370 3371 // We still haven't aborted. Now, let's try to get into the 3372 // termination protocol. 3373 if (do_termination && !has_aborted()) { 3374 // We cannot check whether the global stack is empty, since other 3375 // tasks might be concurrently pushing objects on it. 3376 // Separated the asserts so that we know which one fires. 3377 assert(_cm->out_of_regions(), "only way to reach here"); 3378 assert(_task_queue->size() == 0, "only way to reach here"); 3379 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 3380 3381 // The G1CMTask class also extends the TerminatorTerminator class, 3382 // hence its should_exit_termination() method will also decide 3383 // whether to exit the termination protocol or not. 3384 bool finished = (is_serial || 3385 _cm->terminator()->offer_termination(this)); 3386 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 3387 _termination_time_ms += 3388 termination_end_time_ms - _termination_start_time_ms; 3389 3390 if (finished) { 3391 // We're all done. 3392 3393 if (_worker_id == 0) { 3394 // let's allow task 0 to do this 3395 if (concurrent()) { 3396 assert(_cm->concurrent_marking_in_progress(), "invariant"); 3397 // we need to set this to false before the next 3398 // safepoint. This way we ensure that the marking phase 3399 // doesn't observe any more heap expansions. 3400 _cm->clear_concurrent_marking_in_progress(); 3401 } 3402 } 3403 3404 // We can now guarantee that the global stack is empty, since 3405 // all other tasks have finished. We separated the guarantees so 3406 // that, if a condition is false, we can immediately find out 3407 // which one. 3408 guarantee(_cm->out_of_regions(), "only way to reach here"); 3409 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 3410 guarantee(_task_queue->size() == 0, "only way to reach here"); 3411 guarantee(!_cm->has_overflown(), "only way to reach here"); 3412 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 3413 } else { 3414 // Apparently there's more work to do. Let's abort this task. It 3415 // will restart it and we can hopefully find more things to do. 3416 set_has_aborted(); 3417 } 3418 } 3419 3420 // Mainly for debugging purposes to make sure that a pointer to the 3421 // closure which was statically allocated in this frame doesn't 3422 // escape it by accident. 3423 set_cm_oop_closure(NULL); 3424 double end_time_ms = os::elapsedVTime() * 1000.0; 3425 double elapsed_time_ms = end_time_ms - _start_time_ms; 3426 // Update the step history. 3427 _step_times_ms.add(elapsed_time_ms); 3428 3429 if (has_aborted()) { 3430 // The task was aborted for some reason. 3431 if (_has_timed_out) { 3432 double diff_ms = elapsed_time_ms - _time_target_ms; 3433 // Keep statistics of how well we did with respect to hitting 3434 // our target only if we actually timed out (if we aborted for 3435 // other reasons, then the results might get skewed). 3436 _marking_step_diffs_ms.add(diff_ms); 3437 } 3438 3439 if (_cm->has_overflown()) { 3440 // This is the interesting one. We aborted because a global 3441 // overflow was raised. This means we have to restart the 3442 // marking phase and start iterating over regions. However, in 3443 // order to do this we have to make sure that all tasks stop 3444 // what they are doing and re-initialize in a safe manner. We 3445 // will achieve this with the use of two barrier sync points. 3446 3447 if (!is_serial) { 3448 // We only need to enter the sync barrier if being called 3449 // from a parallel context 3450 _cm->enter_first_sync_barrier(_worker_id); 3451 3452 // When we exit this sync barrier we know that all tasks have 3453 // stopped doing marking work. So, it's now safe to 3454 // re-initialize our data structures. At the end of this method, 3455 // task 0 will clear the global data structures. 3456 } 3457 3458 // We clear the local state of this task... 3459 clear_region_fields(); 3460 3461 if (!is_serial) { 3462 // ...and enter the second barrier. 3463 _cm->enter_second_sync_barrier(_worker_id); 3464 } 3465 // At this point, if we're during the concurrent phase of 3466 // marking, everything has been re-initialized and we're 3467 // ready to restart. 3468 } 3469 } 3470 3471 _claimed = false; 3472 } 3473 3474 G1CMTask::G1CMTask(uint worker_id, 3475 G1ConcurrentMark* cm, 3476 size_t* marked_bytes, 3477 BitMap* card_bm, 3478 G1CMTaskQueue* task_queue, 3479 G1CMTaskQueueSet* task_queues) 3480 : _g1h(G1CollectedHeap::heap()), 3481 _worker_id(worker_id), _cm(cm), 3482 _claimed(false), 3483 _nextMarkBitMap(NULL), _hash_seed(17), 3484 _task_queue(task_queue), 3485 _task_queues(task_queues), 3486 _cm_oop_closure(NULL), 3487 _marked_bytes_array(marked_bytes), 3488 _card_bm(card_bm) { 3489 guarantee(task_queue != NULL, "invariant"); 3490 guarantee(task_queues != NULL, "invariant"); 3491 3492 _marking_step_diffs_ms.add(0.5); 3493 } 3494 3495 // These are formatting macros that are used below to ensure 3496 // consistent formatting. The *_H_* versions are used to format the 3497 // header for a particular value and they should be kept consistent 3498 // with the corresponding macro. Also note that most of the macros add 3499 // the necessary white space (as a prefix) which makes them a bit 3500 // easier to compose. 3501 3502 // All the output lines are prefixed with this string to be able to 3503 // identify them easily in a large log file. 3504 #define G1PPRL_LINE_PREFIX "###" 3505 3506 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 3507 #ifdef _LP64 3508 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 3509 #else // _LP64 3510 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 3511 #endif // _LP64 3512 3513 // For per-region info 3514 #define G1PPRL_TYPE_FORMAT " %-4s" 3515 #define G1PPRL_TYPE_H_FORMAT " %4s" 3516 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 3517 #define G1PPRL_BYTE_H_FORMAT " %9s" 3518 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 3519 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 3520 3521 // For summary info 3522 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 3523 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 3524 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 3525 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 3526 3527 G1PrintRegionLivenessInfoClosure:: 3528 G1PrintRegionLivenessInfoClosure(const char* phase_name) 3529 : _total_used_bytes(0), _total_capacity_bytes(0), 3530 _total_prev_live_bytes(0), _total_next_live_bytes(0), 3531 _hum_used_bytes(0), _hum_capacity_bytes(0), 3532 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 3533 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 3534 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 3535 MemRegion g1_reserved = g1h->g1_reserved(); 3536 double now = os::elapsedTime(); 3537 3538 // Print the header of the output. 3539 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 3540 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 3541 G1PPRL_SUM_ADDR_FORMAT("reserved") 3542 G1PPRL_SUM_BYTE_FORMAT("region-size"), 3543 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 3544 HeapRegion::GrainBytes); 3545 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3546 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3547 G1PPRL_TYPE_H_FORMAT 3548 G1PPRL_ADDR_BASE_H_FORMAT 3549 G1PPRL_BYTE_H_FORMAT 3550 G1PPRL_BYTE_H_FORMAT 3551 G1PPRL_BYTE_H_FORMAT 3552 G1PPRL_DOUBLE_H_FORMAT 3553 G1PPRL_BYTE_H_FORMAT 3554 G1PPRL_BYTE_H_FORMAT, 3555 "type", "address-range", 3556 "used", "prev-live", "next-live", "gc-eff", 3557 "remset", "code-roots"); 3558 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3559 G1PPRL_TYPE_H_FORMAT 3560 G1PPRL_ADDR_BASE_H_FORMAT 3561 G1PPRL_BYTE_H_FORMAT 3562 G1PPRL_BYTE_H_FORMAT 3563 G1PPRL_BYTE_H_FORMAT 3564 G1PPRL_DOUBLE_H_FORMAT 3565 G1PPRL_BYTE_H_FORMAT 3566 G1PPRL_BYTE_H_FORMAT, 3567 "", "", 3568 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 3569 "(bytes)", "(bytes)"); 3570 } 3571 3572 // It takes as a parameter a reference to one of the _hum_* fields, it 3573 // deduces the corresponding value for a region in a humongous region 3574 // series (either the region size, or what's left if the _hum_* field 3575 // is < the region size), and updates the _hum_* field accordingly. 3576 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 3577 size_t bytes = 0; 3578 // The > 0 check is to deal with the prev and next live bytes which 3579 // could be 0. 3580 if (*hum_bytes > 0) { 3581 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 3582 *hum_bytes -= bytes; 3583 } 3584 return bytes; 3585 } 3586 3587 // It deduces the values for a region in a humongous region series 3588 // from the _hum_* fields and updates those accordingly. It assumes 3589 // that that _hum_* fields have already been set up from the "starts 3590 // humongous" region and we visit the regions in address order. 3591 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 3592 size_t* capacity_bytes, 3593 size_t* prev_live_bytes, 3594 size_t* next_live_bytes) { 3595 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 3596 *used_bytes = get_hum_bytes(&_hum_used_bytes); 3597 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 3598 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 3599 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 3600 } 3601 3602 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 3603 const char* type = r->get_type_str(); 3604 HeapWord* bottom = r->bottom(); 3605 HeapWord* end = r->end(); 3606 size_t capacity_bytes = r->capacity(); 3607 size_t used_bytes = r->used(); 3608 size_t prev_live_bytes = r->live_bytes(); 3609 size_t next_live_bytes = r->next_live_bytes(); 3610 double gc_eff = r->gc_efficiency(); 3611 size_t remset_bytes = r->rem_set()->mem_size(); 3612 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3613 3614 if (r->is_starts_humongous()) { 3615 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 3616 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 3617 "they should have been zeroed after the last time we used them"); 3618 // Set up the _hum_* fields. 3619 _hum_capacity_bytes = capacity_bytes; 3620 _hum_used_bytes = used_bytes; 3621 _hum_prev_live_bytes = prev_live_bytes; 3622 _hum_next_live_bytes = next_live_bytes; 3623 get_hum_bytes(&used_bytes, &capacity_bytes, 3624 &prev_live_bytes, &next_live_bytes); 3625 end = bottom + HeapRegion::GrainWords; 3626 } else if (r->is_continues_humongous()) { 3627 get_hum_bytes(&used_bytes, &capacity_bytes, 3628 &prev_live_bytes, &next_live_bytes); 3629 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 3630 } 3631 3632 _total_used_bytes += used_bytes; 3633 _total_capacity_bytes += capacity_bytes; 3634 _total_prev_live_bytes += prev_live_bytes; 3635 _total_next_live_bytes += next_live_bytes; 3636 _total_remset_bytes += remset_bytes; 3637 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3638 3639 // Print a line for this particular region. 3640 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3641 G1PPRL_TYPE_FORMAT 3642 G1PPRL_ADDR_BASE_FORMAT 3643 G1PPRL_BYTE_FORMAT 3644 G1PPRL_BYTE_FORMAT 3645 G1PPRL_BYTE_FORMAT 3646 G1PPRL_DOUBLE_FORMAT 3647 G1PPRL_BYTE_FORMAT 3648 G1PPRL_BYTE_FORMAT, 3649 type, p2i(bottom), p2i(end), 3650 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3651 remset_bytes, strong_code_roots_bytes); 3652 3653 return false; 3654 } 3655 3656 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3657 // add static memory usages to remembered set sizes 3658 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3659 // Print the footer of the output. 3660 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3661 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3662 " SUMMARY" 3663 G1PPRL_SUM_MB_FORMAT("capacity") 3664 G1PPRL_SUM_MB_PERC_FORMAT("used") 3665 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3666 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3667 G1PPRL_SUM_MB_FORMAT("remset") 3668 G1PPRL_SUM_MB_FORMAT("code-roots"), 3669 bytes_to_mb(_total_capacity_bytes), 3670 bytes_to_mb(_total_used_bytes), 3671 perc(_total_used_bytes, _total_capacity_bytes), 3672 bytes_to_mb(_total_prev_live_bytes), 3673 perc(_total_prev_live_bytes, _total_capacity_bytes), 3674 bytes_to_mb(_total_next_live_bytes), 3675 perc(_total_next_live_bytes, _total_capacity_bytes), 3676 bytes_to_mb(_total_remset_bytes), 3677 bytes_to_mb(_total_strong_code_roots_bytes)); 3678 }