1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMark.inline.hpp" 30 #include "gc/g1/concurrentMarkThread.inline.hpp" 31 #include "gc/g1/g1CollectedHeap.inline.hpp" 32 #include "gc/g1/g1CollectorPolicy.hpp" 33 #include "gc/g1/g1CollectorState.hpp" 34 #include "gc/g1/g1OopClosures.inline.hpp" 35 #include "gc/g1/g1RemSet.hpp" 36 #include "gc/g1/g1StringDedup.hpp" 37 #include "gc/g1/heapRegion.inline.hpp" 38 #include "gc/g1/heapRegionManager.inline.hpp" 39 #include "gc/g1/heapRegionRemSet.hpp" 40 #include "gc/g1/heapRegionSet.inline.hpp" 41 #include "gc/g1/suspendibleThreadSet.hpp" 42 #include "gc/shared/gcId.hpp" 43 #include "gc/shared/gcTimer.hpp" 44 #include "gc/shared/gcTrace.hpp" 45 #include "gc/shared/gcTraceTime.inline.hpp" 46 #include "gc/shared/genOopClosures.inline.hpp" 47 #include "gc/shared/referencePolicy.hpp" 48 #include "gc/shared/strongRootsScope.hpp" 49 #include "gc/shared/taskqueue.inline.hpp" 50 #include "gc/shared/vmGCOperations.hpp" 51 #include "logging/log.hpp" 52 #include "memory/allocation.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "runtime/atomic.inline.hpp" 56 #include "runtime/handles.inline.hpp" 57 #include "runtime/java.hpp" 58 #include "runtime/prefetch.inline.hpp" 59 #include "services/memTracker.hpp" 60 61 // Concurrent marking bit map wrapper 62 63 CMBitMapRO::CMBitMapRO(int shifter) : 64 _bm(), 65 _shifter(shifter) { 66 _bmStartWord = 0; 67 _bmWordSize = 0; 68 } 69 70 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 71 const HeapWord* limit) const { 72 // First we must round addr *up* to a possible object boundary. 73 addr = (HeapWord*)align_size_up((intptr_t)addr, 74 HeapWordSize << _shifter); 75 size_t addrOffset = heapWordToOffset(addr); 76 assert(limit != NULL, "limit must not be NULL"); 77 size_t limitOffset = heapWordToOffset(limit); 78 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 79 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 80 assert(nextAddr >= addr, "get_next_one postcondition"); 81 assert(nextAddr == limit || isMarked(nextAddr), 82 "get_next_one postcondition"); 83 return nextAddr; 84 } 85 86 #ifndef PRODUCT 87 bool CMBitMapRO::covers(MemRegion heap_rs) const { 88 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 89 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 90 "size inconsistency"); 91 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 92 _bmWordSize == heap_rs.word_size(); 93 } 94 #endif 95 96 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 97 _bm.print_on_error(st, prefix); 98 } 99 100 size_t CMBitMap::compute_size(size_t heap_size) { 101 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 102 } 103 104 size_t CMBitMap::mark_distance() { 105 return MinObjAlignmentInBytes * BitsPerByte; 106 } 107 108 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 109 _bmStartWord = heap.start(); 110 _bmWordSize = heap.word_size(); 111 112 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 113 _bm.set_size(_bmWordSize >> _shifter); 114 115 storage->set_mapping_changed_listener(&_listener); 116 } 117 118 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 119 if (zero_filled) { 120 return; 121 } 122 // We need to clear the bitmap on commit, removing any existing information. 123 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 124 _bm->clearRange(mr); 125 } 126 127 // Closure used for clearing the given mark bitmap. 128 class ClearBitmapHRClosure : public HeapRegionClosure { 129 private: 130 ConcurrentMark* _cm; 131 CMBitMap* _bitmap; 132 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 133 public: 134 ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 135 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 136 } 137 138 virtual bool doHeapRegion(HeapRegion* r) { 139 size_t const chunk_size_in_words = M / HeapWordSize; 140 141 HeapWord* cur = r->bottom(); 142 HeapWord* const end = r->end(); 143 144 while (cur < end) { 145 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 146 _bitmap->clearRange(mr); 147 148 cur += chunk_size_in_words; 149 150 // Abort iteration if after yielding the marking has been aborted. 151 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 152 return true; 153 } 154 // Repeat the asserts from before the start of the closure. We will do them 155 // as asserts here to minimize their overhead on the product. However, we 156 // will have them as guarantees at the beginning / end of the bitmap 157 // clearing to get some checking in the product. 158 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 159 assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 160 } 161 162 return false; 163 } 164 }; 165 166 class ParClearNextMarkBitmapTask : public AbstractGangTask { 167 ClearBitmapHRClosure* _cl; 168 HeapRegionClaimer _hrclaimer; 169 bool _suspendible; // If the task is suspendible, workers must join the STS. 170 171 public: 172 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 173 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 174 175 void work(uint worker_id) { 176 SuspendibleThreadSetJoiner sts_join(_suspendible); 177 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 178 } 179 }; 180 181 void CMBitMap::clearAll() { 182 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 183 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 184 uint n_workers = g1h->workers()->active_workers(); 185 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 186 g1h->workers()->run_task(&task); 187 guarantee(cl.complete(), "Must have completed iteration."); 188 return; 189 } 190 191 void CMBitMap::clearRange(MemRegion mr) { 192 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 193 assert(!mr.is_empty(), "unexpected empty region"); 194 // convert address range into offset range 195 _bm.at_put_range(heapWordToOffset(mr.start()), 196 heapWordToOffset(mr.end()), false); 197 } 198 199 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 200 _base(NULL), _cm(cm) 201 {} 202 203 bool CMMarkStack::allocate(size_t capacity) { 204 // allocate a stack of the requisite depth 205 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 206 if (!rs.is_reserved()) { 207 warning("ConcurrentMark MarkStack allocation failure"); 208 return false; 209 } 210 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 211 if (!_virtual_space.initialize(rs, rs.size())) { 212 warning("ConcurrentMark MarkStack backing store failure"); 213 // Release the virtual memory reserved for the marking stack 214 rs.release(); 215 return false; 216 } 217 assert(_virtual_space.committed_size() == rs.size(), 218 "Didn't reserve backing store for all of ConcurrentMark stack?"); 219 _base = (oop*) _virtual_space.low(); 220 setEmpty(); 221 _capacity = (jint) capacity; 222 _saved_index = -1; 223 _should_expand = false; 224 return true; 225 } 226 227 void CMMarkStack::expand() { 228 // Called, during remark, if we've overflown the marking stack during marking. 229 assert(isEmpty(), "stack should been emptied while handling overflow"); 230 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 231 // Clear expansion flag 232 _should_expand = false; 233 if (_capacity == (jint) MarkStackSizeMax) { 234 log_trace(gc)("(benign) Can't expand marking stack capacity, at max size limit"); 235 return; 236 } 237 // Double capacity if possible 238 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 239 // Do not give up existing stack until we have managed to 240 // get the double capacity that we desired. 241 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 242 sizeof(oop))); 243 if (rs.is_reserved()) { 244 // Release the backing store associated with old stack 245 _virtual_space.release(); 246 // Reinitialize virtual space for new stack 247 if (!_virtual_space.initialize(rs, rs.size())) { 248 fatal("Not enough swap for expanded marking stack capacity"); 249 } 250 _base = (oop*)(_virtual_space.low()); 251 _index = 0; 252 _capacity = new_capacity; 253 } else { 254 // Failed to double capacity, continue; 255 log_trace(gc)("(benign) Failed to expand marking stack capacity from " SIZE_FORMAT "K to " SIZE_FORMAT "K", 256 _capacity / K, new_capacity / K); 257 } 258 } 259 260 void CMMarkStack::set_should_expand() { 261 // If we're resetting the marking state because of an 262 // marking stack overflow, record that we should, if 263 // possible, expand the stack. 264 _should_expand = _cm->has_overflown(); 265 } 266 267 CMMarkStack::~CMMarkStack() { 268 if (_base != NULL) { 269 _base = NULL; 270 _virtual_space.release(); 271 } 272 } 273 274 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 275 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 276 jint start = _index; 277 jint next_index = start + n; 278 if (next_index > _capacity) { 279 _overflow = true; 280 return; 281 } 282 // Otherwise. 283 _index = next_index; 284 for (int i = 0; i < n; i++) { 285 int ind = start + i; 286 assert(ind < _capacity, "By overflow test above."); 287 _base[ind] = ptr_arr[i]; 288 } 289 } 290 291 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 292 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 293 jint index = _index; 294 if (index == 0) { 295 *n = 0; 296 return false; 297 } else { 298 int k = MIN2(max, index); 299 jint new_ind = index - k; 300 for (int j = 0; j < k; j++) { 301 ptr_arr[j] = _base[new_ind + j]; 302 } 303 _index = new_ind; 304 *n = k; 305 return true; 306 } 307 } 308 309 void CMMarkStack::note_start_of_gc() { 310 assert(_saved_index == -1, 311 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 312 _saved_index = _index; 313 } 314 315 void CMMarkStack::note_end_of_gc() { 316 // This is intentionally a guarantee, instead of an assert. If we 317 // accidentally add something to the mark stack during GC, it 318 // will be a correctness issue so it's better if we crash. we'll 319 // only check this once per GC anyway, so it won't be a performance 320 // issue in any way. 321 guarantee(_saved_index == _index, 322 "saved index: %d index: %d", _saved_index, _index); 323 _saved_index = -1; 324 } 325 326 CMRootRegions::CMRootRegions() : 327 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 328 _should_abort(false), _next_survivor(NULL) { } 329 330 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) { 331 _young_list = g1h->young_list(); 332 _cm = cm; 333 } 334 335 void CMRootRegions::prepare_for_scan() { 336 assert(!scan_in_progress(), "pre-condition"); 337 338 // Currently, only survivors can be root regions. 339 assert(_next_survivor == NULL, "pre-condition"); 340 _next_survivor = _young_list->first_survivor_region(); 341 _scan_in_progress = (_next_survivor != NULL); 342 _should_abort = false; 343 } 344 345 HeapRegion* CMRootRegions::claim_next() { 346 if (_should_abort) { 347 // If someone has set the should_abort flag, we return NULL to 348 // force the caller to bail out of their loop. 349 return NULL; 350 } 351 352 // Currently, only survivors can be root regions. 353 HeapRegion* res = _next_survivor; 354 if (res != NULL) { 355 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 356 // Read it again in case it changed while we were waiting for the lock. 357 res = _next_survivor; 358 if (res != NULL) { 359 if (res == _young_list->last_survivor_region()) { 360 // We just claimed the last survivor so store NULL to indicate 361 // that we're done. 362 _next_survivor = NULL; 363 } else { 364 _next_survivor = res->get_next_young_region(); 365 } 366 } else { 367 // Someone else claimed the last survivor while we were trying 368 // to take the lock so nothing else to do. 369 } 370 } 371 assert(res == NULL || res->is_survivor(), "post-condition"); 372 373 return res; 374 } 375 376 void CMRootRegions::scan_finished() { 377 assert(scan_in_progress(), "pre-condition"); 378 379 // Currently, only survivors can be root regions. 380 if (!_should_abort) { 381 assert(_next_survivor == NULL, "we should have claimed all survivors"); 382 } 383 _next_survivor = NULL; 384 385 { 386 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 387 _scan_in_progress = false; 388 RootRegionScan_lock->notify_all(); 389 } 390 } 391 392 bool CMRootRegions::wait_until_scan_finished() { 393 if (!scan_in_progress()) return false; 394 395 { 396 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 397 while (scan_in_progress()) { 398 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 399 } 400 } 401 return true; 402 } 403 404 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 405 return MAX2((n_par_threads + 2) / 4, 1U); 406 } 407 408 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 409 _g1h(g1h), 410 _markBitMap1(), 411 _markBitMap2(), 412 _parallel_marking_threads(0), 413 _max_parallel_marking_threads(0), 414 _sleep_factor(0.0), 415 _marking_task_overhead(1.0), 416 _cleanup_list("Cleanup List"), 417 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 418 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 419 CardTableModRefBS::card_shift, 420 false /* in_resource_area*/), 421 422 _prevMarkBitMap(&_markBitMap1), 423 _nextMarkBitMap(&_markBitMap2), 424 425 _markStack(this), 426 // _finger set in set_non_marking_state 427 428 _max_worker_id(ParallelGCThreads), 429 // _active_tasks set in set_non_marking_state 430 // _tasks set inside the constructor 431 _task_queues(new CMTaskQueueSet((int) _max_worker_id)), 432 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 433 434 _has_overflown(false), 435 _concurrent(false), 436 _has_aborted(false), 437 _restart_for_overflow(false), 438 _concurrent_marking_in_progress(false), 439 _concurrent_marking_from_roots(false), 440 441 // _verbose_level set below 442 443 _init_times(), 444 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 445 _cleanup_times(), 446 _total_counting_time(0.0), 447 _total_rs_scrub_time(0.0), 448 449 _parallel_workers(NULL), 450 451 _count_card_bitmaps(NULL), 452 _count_marked_bytes(NULL), 453 _completed_initialization(false) { 454 455 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 456 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 457 458 // Create & start a ConcurrentMark thread. 459 _cmThread = new ConcurrentMarkThread(this); 460 assert(cmThread() != NULL, "CM Thread should have been created"); 461 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 462 if (_cmThread->osthread() == NULL) { 463 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 464 } 465 466 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 467 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 468 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 469 470 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 471 satb_qs.set_buffer_size(G1SATBBufferSize); 472 473 _root_regions.init(_g1h, this); 474 475 if (ConcGCThreads > ParallelGCThreads) { 476 warning("Can't have more ConcGCThreads (%u) " 477 "than ParallelGCThreads (%u).", 478 ConcGCThreads, ParallelGCThreads); 479 return; 480 } 481 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 482 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 483 // if both are set 484 _sleep_factor = 0.0; 485 _marking_task_overhead = 1.0; 486 } else if (G1MarkingOverheadPercent > 0) { 487 // We will calculate the number of parallel marking threads based 488 // on a target overhead with respect to the soft real-time goal 489 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 490 double overall_cm_overhead = 491 (double) MaxGCPauseMillis * marking_overhead / 492 (double) GCPauseIntervalMillis; 493 double cpu_ratio = 1.0 / (double) os::processor_count(); 494 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 495 double marking_task_overhead = 496 overall_cm_overhead / marking_thread_num * 497 (double) os::processor_count(); 498 double sleep_factor = 499 (1.0 - marking_task_overhead) / marking_task_overhead; 500 501 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 502 _sleep_factor = sleep_factor; 503 _marking_task_overhead = marking_task_overhead; 504 } else { 505 // Calculate the number of parallel marking threads by scaling 506 // the number of parallel GC threads. 507 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 508 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 509 _sleep_factor = 0.0; 510 _marking_task_overhead = 1.0; 511 } 512 513 assert(ConcGCThreads > 0, "Should have been set"); 514 _parallel_marking_threads = ConcGCThreads; 515 _max_parallel_marking_threads = _parallel_marking_threads; 516 517 _parallel_workers = new WorkGang("G1 Marker", 518 _max_parallel_marking_threads, false, true); 519 if (_parallel_workers == NULL) { 520 vm_exit_during_initialization("Failed necessary allocation."); 521 } else { 522 _parallel_workers->initialize_workers(); 523 } 524 525 if (FLAG_IS_DEFAULT(MarkStackSize)) { 526 size_t mark_stack_size = 527 MIN2(MarkStackSizeMax, 528 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 529 // Verify that the calculated value for MarkStackSize is in range. 530 // It would be nice to use the private utility routine from Arguments. 531 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 532 warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 533 "must be between 1 and " SIZE_FORMAT, 534 mark_stack_size, MarkStackSizeMax); 535 return; 536 } 537 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 538 } else { 539 // Verify MarkStackSize is in range. 540 if (FLAG_IS_CMDLINE(MarkStackSize)) { 541 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 542 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 543 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 544 "must be between 1 and " SIZE_FORMAT, 545 MarkStackSize, MarkStackSizeMax); 546 return; 547 } 548 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 549 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 550 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 551 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 552 MarkStackSize, MarkStackSizeMax); 553 return; 554 } 555 } 556 } 557 } 558 559 if (!_markStack.allocate(MarkStackSize)) { 560 warning("Failed to allocate CM marking stack"); 561 return; 562 } 563 564 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC); 565 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 566 567 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 568 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 569 570 BitMap::idx_t card_bm_size = _card_bm.size(); 571 572 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 573 _active_tasks = _max_worker_id; 574 575 uint max_regions = _g1h->max_regions(); 576 for (uint i = 0; i < _max_worker_id; ++i) { 577 CMTaskQueue* task_queue = new CMTaskQueue(); 578 task_queue->initialize(); 579 _task_queues->register_queue(i, task_queue); 580 581 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 582 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 583 584 _tasks[i] = new CMTask(i, this, 585 _count_marked_bytes[i], 586 &_count_card_bitmaps[i], 587 task_queue, _task_queues); 588 589 _accum_task_vtime[i] = 0.0; 590 } 591 592 // Calculate the card number for the bottom of the heap. Used 593 // in biasing indexes into the accounting card bitmaps. 594 _heap_bottom_card_num = 595 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 596 CardTableModRefBS::card_shift); 597 598 // Clear all the liveness counting data 599 clear_all_count_data(); 600 601 // so that the call below can read a sensible value 602 _heap_start = g1h->reserved_region().start(); 603 set_non_marking_state(); 604 _completed_initialization = true; 605 } 606 607 void ConcurrentMark::reset() { 608 // Starting values for these two. This should be called in a STW 609 // phase. 610 MemRegion reserved = _g1h->g1_reserved(); 611 _heap_start = reserved.start(); 612 _heap_end = reserved.end(); 613 614 // Separated the asserts so that we know which one fires. 615 assert(_heap_start != NULL, "heap bounds should look ok"); 616 assert(_heap_end != NULL, "heap bounds should look ok"); 617 assert(_heap_start < _heap_end, "heap bounds should look ok"); 618 619 // Reset all the marking data structures and any necessary flags 620 reset_marking_state(); 621 622 // We do reset all of them, since different phases will use 623 // different number of active threads. So, it's easiest to have all 624 // of them ready. 625 for (uint i = 0; i < _max_worker_id; ++i) { 626 _tasks[i]->reset(_nextMarkBitMap); 627 } 628 629 // we need this to make sure that the flag is on during the evac 630 // pause with initial mark piggy-backed 631 set_concurrent_marking_in_progress(); 632 } 633 634 635 void ConcurrentMark::reset_marking_state(bool clear_overflow) { 636 _markStack.set_should_expand(); 637 _markStack.setEmpty(); // Also clears the _markStack overflow flag 638 if (clear_overflow) { 639 clear_has_overflown(); 640 } else { 641 assert(has_overflown(), "pre-condition"); 642 } 643 _finger = _heap_start; 644 645 for (uint i = 0; i < _max_worker_id; ++i) { 646 CMTaskQueue* queue = _task_queues->queue(i); 647 queue->set_empty(); 648 } 649 } 650 651 void ConcurrentMark::set_concurrency(uint active_tasks) { 652 assert(active_tasks <= _max_worker_id, "we should not have more"); 653 654 _active_tasks = active_tasks; 655 // Need to update the three data structures below according to the 656 // number of active threads for this phase. 657 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 658 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 659 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 660 } 661 662 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 663 set_concurrency(active_tasks); 664 665 _concurrent = concurrent; 666 // We propagate this to all tasks, not just the active ones. 667 for (uint i = 0; i < _max_worker_id; ++i) 668 _tasks[i]->set_concurrent(concurrent); 669 670 if (concurrent) { 671 set_concurrent_marking_in_progress(); 672 } else { 673 // We currently assume that the concurrent flag has been set to 674 // false before we start remark. At this point we should also be 675 // in a STW phase. 676 assert(!concurrent_marking_in_progress(), "invariant"); 677 assert(out_of_regions(), 678 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 679 p2i(_finger), p2i(_heap_end)); 680 } 681 } 682 683 void ConcurrentMark::set_non_marking_state() { 684 // We set the global marking state to some default values when we're 685 // not doing marking. 686 reset_marking_state(); 687 _active_tasks = 0; 688 clear_concurrent_marking_in_progress(); 689 } 690 691 ConcurrentMark::~ConcurrentMark() { 692 // The ConcurrentMark instance is never freed. 693 ShouldNotReachHere(); 694 } 695 696 void ConcurrentMark::clearNextBitmap() { 697 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 698 699 // Make sure that the concurrent mark thread looks to still be in 700 // the current cycle. 701 guarantee(cmThread()->during_cycle(), "invariant"); 702 703 // We are finishing up the current cycle by clearing the next 704 // marking bitmap and getting it ready for the next cycle. During 705 // this time no other cycle can start. So, let's make sure that this 706 // is the case. 707 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 708 709 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 710 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 711 _parallel_workers->run_task(&task); 712 713 // Clear the liveness counting data. If the marking has been aborted, the abort() 714 // call already did that. 715 if (cl.complete()) { 716 clear_all_count_data(); 717 } 718 719 // Repeat the asserts from above. 720 guarantee(cmThread()->during_cycle(), "invariant"); 721 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 722 } 723 724 class CheckBitmapClearHRClosure : public HeapRegionClosure { 725 CMBitMap* _bitmap; 726 bool _error; 727 public: 728 CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { 729 } 730 731 virtual bool doHeapRegion(HeapRegion* r) { 732 // This closure can be called concurrently to the mutator, so we must make sure 733 // that the result of the getNextMarkedWordAddress() call is compared to the 734 // value passed to it as limit to detect any found bits. 735 // end never changes in G1. 736 HeapWord* end = r->end(); 737 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 738 } 739 }; 740 741 bool ConcurrentMark::nextMarkBitmapIsClear() { 742 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 743 _g1h->heap_region_iterate(&cl); 744 return cl.complete(); 745 } 746 747 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 748 public: 749 bool doHeapRegion(HeapRegion* r) { 750 r->note_start_of_marking(); 751 return false; 752 } 753 }; 754 755 void ConcurrentMark::checkpointRootsInitialPre() { 756 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 757 G1CollectorPolicy* g1p = g1h->g1_policy(); 758 759 _has_aborted = false; 760 761 // Initialize marking structures. This has to be done in a STW phase. 762 reset(); 763 764 // For each region note start of marking. 765 NoteStartOfMarkHRClosure startcl; 766 g1h->heap_region_iterate(&startcl); 767 } 768 769 770 void ConcurrentMark::checkpointRootsInitialPost() { 771 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 772 773 // Start Concurrent Marking weak-reference discovery. 774 ReferenceProcessor* rp = g1h->ref_processor_cm(); 775 // enable ("weak") refs discovery 776 rp->enable_discovery(); 777 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 778 779 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 780 // This is the start of the marking cycle, we're expected all 781 // threads to have SATB queues with active set to false. 782 satb_mq_set.set_active_all_threads(true, /* new active value */ 783 false /* expected_active */); 784 785 _root_regions.prepare_for_scan(); 786 787 // update_g1_committed() will be called at the end of an evac pause 788 // when marking is on. So, it's also called at the end of the 789 // initial-mark pause to update the heap end, if the heap expands 790 // during it. No need to call it here. 791 } 792 793 /* 794 * Notice that in the next two methods, we actually leave the STS 795 * during the barrier sync and join it immediately afterwards. If we 796 * do not do this, the following deadlock can occur: one thread could 797 * be in the barrier sync code, waiting for the other thread to also 798 * sync up, whereas another one could be trying to yield, while also 799 * waiting for the other threads to sync up too. 800 * 801 * Note, however, that this code is also used during remark and in 802 * this case we should not attempt to leave / enter the STS, otherwise 803 * we'll either hit an assert (debug / fastdebug) or deadlock 804 * (product). So we should only leave / enter the STS if we are 805 * operating concurrently. 806 * 807 * Because the thread that does the sync barrier has left the STS, it 808 * is possible to be suspended for a Full GC or an evacuation pause 809 * could occur. This is actually safe, since the entering the sync 810 * barrier is one of the last things do_marking_step() does, and it 811 * doesn't manipulate any data structures afterwards. 812 */ 813 814 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 815 bool barrier_aborted; 816 { 817 SuspendibleThreadSetLeaver sts_leave(concurrent()); 818 barrier_aborted = !_first_overflow_barrier_sync.enter(); 819 } 820 821 // at this point everyone should have synced up and not be doing any 822 // more work 823 824 if (barrier_aborted) { 825 // If the barrier aborted we ignore the overflow condition and 826 // just abort the whole marking phase as quickly as possible. 827 return; 828 } 829 830 // If we're executing the concurrent phase of marking, reset the marking 831 // state; otherwise the marking state is reset after reference processing, 832 // during the remark pause. 833 // If we reset here as a result of an overflow during the remark we will 834 // see assertion failures from any subsequent set_concurrency_and_phase() 835 // calls. 836 if (concurrent()) { 837 // let the task associated with with worker 0 do this 838 if (worker_id == 0) { 839 // task 0 is responsible for clearing the global data structures 840 // We should be here because of an overflow. During STW we should 841 // not clear the overflow flag since we rely on it being true when 842 // we exit this method to abort the pause and restart concurrent 843 // marking. 844 reset_marking_state(true /* clear_overflow */); 845 846 log_info(gc)("Concurrent Mark reset for overflow"); 847 } 848 } 849 850 // after this, each task should reset its own data structures then 851 // then go into the second barrier 852 } 853 854 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 855 SuspendibleThreadSetLeaver sts_leave(concurrent()); 856 _second_overflow_barrier_sync.enter(); 857 858 // at this point everything should be re-initialized and ready to go 859 } 860 861 class CMConcurrentMarkingTask: public AbstractGangTask { 862 private: 863 ConcurrentMark* _cm; 864 ConcurrentMarkThread* _cmt; 865 866 public: 867 void work(uint worker_id) { 868 assert(Thread::current()->is_ConcurrentGC_thread(), 869 "this should only be done by a conc GC thread"); 870 ResourceMark rm; 871 872 double start_vtime = os::elapsedVTime(); 873 874 { 875 SuspendibleThreadSetJoiner sts_join; 876 877 assert(worker_id < _cm->active_tasks(), "invariant"); 878 CMTask* the_task = _cm->task(worker_id); 879 the_task->record_start_time(); 880 if (!_cm->has_aborted()) { 881 do { 882 double start_vtime_sec = os::elapsedVTime(); 883 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 884 885 the_task->do_marking_step(mark_step_duration_ms, 886 true /* do_termination */, 887 false /* is_serial*/); 888 889 double end_vtime_sec = os::elapsedVTime(); 890 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 891 _cm->clear_has_overflown(); 892 893 _cm->do_yield_check(worker_id); 894 895 jlong sleep_time_ms; 896 if (!_cm->has_aborted() && the_task->has_aborted()) { 897 sleep_time_ms = 898 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 899 { 900 SuspendibleThreadSetLeaver sts_leave; 901 os::sleep(Thread::current(), sleep_time_ms, false); 902 } 903 } 904 } while (!_cm->has_aborted() && the_task->has_aborted()); 905 } 906 the_task->record_end_time(); 907 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 908 } 909 910 double end_vtime = os::elapsedVTime(); 911 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 912 } 913 914 CMConcurrentMarkingTask(ConcurrentMark* cm, 915 ConcurrentMarkThread* cmt) : 916 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 917 918 ~CMConcurrentMarkingTask() { } 919 }; 920 921 // Calculates the number of active workers for a concurrent 922 // phase. 923 uint ConcurrentMark::calc_parallel_marking_threads() { 924 uint n_conc_workers = 0; 925 if (!UseDynamicNumberOfGCThreads || 926 (!FLAG_IS_DEFAULT(ConcGCThreads) && 927 !ForceDynamicNumberOfGCThreads)) { 928 n_conc_workers = max_parallel_marking_threads(); 929 } else { 930 n_conc_workers = 931 AdaptiveSizePolicy::calc_default_active_workers( 932 max_parallel_marking_threads(), 933 1, /* Minimum workers */ 934 parallel_marking_threads(), 935 Threads::number_of_non_daemon_threads()); 936 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 937 // that scaling has already gone into "_max_parallel_marking_threads". 938 } 939 assert(n_conc_workers > 0, "Always need at least 1"); 940 return n_conc_workers; 941 } 942 943 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 944 // Currently, only survivors can be root regions. 945 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 946 G1RootRegionScanClosure cl(_g1h, this, worker_id); 947 948 const uintx interval = PrefetchScanIntervalInBytes; 949 HeapWord* curr = hr->bottom(); 950 const HeapWord* end = hr->top(); 951 while (curr < end) { 952 Prefetch::read(curr, interval); 953 oop obj = oop(curr); 954 int size = obj->oop_iterate_size(&cl); 955 assert(size == obj->size(), "sanity"); 956 curr += size; 957 } 958 } 959 960 class CMRootRegionScanTask : public AbstractGangTask { 961 private: 962 ConcurrentMark* _cm; 963 964 public: 965 CMRootRegionScanTask(ConcurrentMark* cm) : 966 AbstractGangTask("Root Region Scan"), _cm(cm) { } 967 968 void work(uint worker_id) { 969 assert(Thread::current()->is_ConcurrentGC_thread(), 970 "this should only be done by a conc GC thread"); 971 972 CMRootRegions* root_regions = _cm->root_regions(); 973 HeapRegion* hr = root_regions->claim_next(); 974 while (hr != NULL) { 975 _cm->scanRootRegion(hr, worker_id); 976 hr = root_regions->claim_next(); 977 } 978 } 979 }; 980 981 void ConcurrentMark::scanRootRegions() { 982 // Start of concurrent marking. 983 ClassLoaderDataGraph::clear_claimed_marks(); 984 985 // scan_in_progress() will have been set to true only if there was 986 // at least one root region to scan. So, if it's false, we 987 // should not attempt to do any further work. 988 if (root_regions()->scan_in_progress()) { 989 GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan"); 990 991 _parallel_marking_threads = calc_parallel_marking_threads(); 992 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 993 "Maximum number of marking threads exceeded"); 994 uint active_workers = MAX2(1U, parallel_marking_threads()); 995 996 CMRootRegionScanTask task(this); 997 _parallel_workers->set_active_workers(active_workers); 998 _parallel_workers->run_task(&task); 999 1000 // It's possible that has_aborted() is true here without actually 1001 // aborting the survivor scan earlier. This is OK as it's 1002 // mainly used for sanity checking. 1003 root_regions()->scan_finished(); 1004 } 1005 } 1006 1007 void ConcurrentMark::register_mark_from_roots_phase_start() { 1008 _concurrent_marking_from_roots = true; 1009 _g1h->gc_timer_cm()->register_gc_concurrent_start("Concurrent Mark"); 1010 } 1011 1012 void ConcurrentMark::register_mark_from_roots_phase_end() { 1013 _concurrent_marking_from_roots = false; 1014 if (!has_aborted()) { 1015 _g1h->gc_timer_cm()->register_gc_concurrent_end(); 1016 } 1017 } 1018 1019 void ConcurrentMark::markFromRoots() { 1020 // we might be tempted to assert that: 1021 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1022 // "inconsistent argument?"); 1023 // However that wouldn't be right, because it's possible that 1024 // a safepoint is indeed in progress as a younger generation 1025 // stop-the-world GC happens even as we mark in this generation. 1026 1027 register_mark_from_roots_phase_start(); 1028 1029 _restart_for_overflow = false; 1030 1031 // _g1h has _n_par_threads 1032 _parallel_marking_threads = calc_parallel_marking_threads(); 1033 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1034 "Maximum number of marking threads exceeded"); 1035 1036 uint active_workers = MAX2(1U, parallel_marking_threads()); 1037 assert(active_workers > 0, "Should have been set"); 1038 1039 // Parallel task terminator is set in "set_concurrency_and_phase()" 1040 set_concurrency_and_phase(active_workers, true /* concurrent */); 1041 1042 CMConcurrentMarkingTask markingTask(this, cmThread()); 1043 _parallel_workers->set_active_workers(active_workers); 1044 _parallel_workers->run_task(&markingTask); 1045 1046 register_mark_from_roots_phase_end(); 1047 1048 print_stats(); 1049 } 1050 1051 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1052 // world is stopped at this checkpoint 1053 assert(SafepointSynchronize::is_at_safepoint(), 1054 "world should be stopped"); 1055 1056 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1057 1058 // If a full collection has happened, we shouldn't do this. 1059 if (has_aborted()) { 1060 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1061 return; 1062 } 1063 1064 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1065 1066 if (VerifyDuringGC) { 1067 HandleMark hm; // handle scope 1068 g1h->prepare_for_verify(); 1069 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1070 } 1071 g1h->check_bitmaps("Remark Start"); 1072 1073 G1CollectorPolicy* g1p = g1h->g1_policy(); 1074 g1p->record_concurrent_mark_remark_start(); 1075 1076 double start = os::elapsedTime(); 1077 1078 checkpointRootsFinalWork(); 1079 1080 double mark_work_end = os::elapsedTime(); 1081 1082 weakRefsWork(clear_all_soft_refs); 1083 1084 if (has_overflown()) { 1085 // Oops. We overflowed. Restart concurrent marking. 1086 _restart_for_overflow = true; 1087 log_develop_trace(gc)("Remark led to restart for overflow."); 1088 1089 // Verify the heap w.r.t. the previous marking bitmap. 1090 if (VerifyDuringGC) { 1091 HandleMark hm; // handle scope 1092 g1h->prepare_for_verify(); 1093 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)"); 1094 } 1095 1096 // Clear the marking state because we will be restarting 1097 // marking due to overflowing the global mark stack. 1098 reset_marking_state(); 1099 } else { 1100 { 1101 GCTraceTime(Debug, gc) trace("GC Aggregate Data", g1h->gc_timer_cm()); 1102 1103 // Aggregate the per-task counting data that we have accumulated 1104 // while marking. 1105 aggregate_count_data(); 1106 } 1107 1108 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1109 // We're done with marking. 1110 // This is the end of the marking cycle, we're expected all 1111 // threads to have SATB queues with active set to true. 1112 satb_mq_set.set_active_all_threads(false, /* new active value */ 1113 true /* expected_active */); 1114 1115 if (VerifyDuringGC) { 1116 HandleMark hm; // handle scope 1117 g1h->prepare_for_verify(); 1118 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)"); 1119 } 1120 g1h->check_bitmaps("Remark End"); 1121 assert(!restart_for_overflow(), "sanity"); 1122 // Completely reset the marking state since marking completed 1123 set_non_marking_state(); 1124 } 1125 1126 // Expand the marking stack, if we have to and if we can. 1127 if (_markStack.should_expand()) { 1128 _markStack.expand(); 1129 } 1130 1131 // Statistics 1132 double now = os::elapsedTime(); 1133 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1134 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1135 _remark_times.add((now - start) * 1000.0); 1136 1137 g1p->record_concurrent_mark_remark_end(); 1138 1139 G1CMIsAliveClosure is_alive(g1h); 1140 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1141 } 1142 1143 // Base class of the closures that finalize and verify the 1144 // liveness counting data. 1145 class CMCountDataClosureBase: public HeapRegionClosure { 1146 protected: 1147 G1CollectedHeap* _g1h; 1148 ConcurrentMark* _cm; 1149 CardTableModRefBS* _ct_bs; 1150 1151 BitMap* _region_bm; 1152 BitMap* _card_bm; 1153 1154 // Takes a region that's not empty (i.e., it has at least one 1155 // live object in it and sets its corresponding bit on the region 1156 // bitmap to 1. 1157 void set_bit_for_region(HeapRegion* hr) { 1158 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1159 _region_bm->par_at_put(index, true); 1160 } 1161 1162 public: 1163 CMCountDataClosureBase(G1CollectedHeap* g1h, 1164 BitMap* region_bm, BitMap* card_bm): 1165 _g1h(g1h), _cm(g1h->concurrent_mark()), 1166 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 1167 _region_bm(region_bm), _card_bm(card_bm) { } 1168 }; 1169 1170 // Closure that calculates the # live objects per region. Used 1171 // for verification purposes during the cleanup pause. 1172 class CalcLiveObjectsClosure: public CMCountDataClosureBase { 1173 CMBitMapRO* _bm; 1174 size_t _region_marked_bytes; 1175 1176 public: 1177 CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h, 1178 BitMap* region_bm, BitMap* card_bm) : 1179 CMCountDataClosureBase(g1h, region_bm, card_bm), 1180 _bm(bm), _region_marked_bytes(0) { } 1181 1182 bool doHeapRegion(HeapRegion* hr) { 1183 HeapWord* ntams = hr->next_top_at_mark_start(); 1184 HeapWord* start = hr->bottom(); 1185 1186 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1187 "Preconditions not met - " 1188 "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT, 1189 p2i(start), p2i(ntams), p2i(hr->end())); 1190 1191 // Find the first marked object at or after "start". 1192 start = _bm->getNextMarkedWordAddress(start, ntams); 1193 1194 size_t marked_bytes = 0; 1195 1196 while (start < ntams) { 1197 oop obj = oop(start); 1198 int obj_sz = obj->size(); 1199 HeapWord* obj_end = start + obj_sz; 1200 1201 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1202 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1203 1204 // Note: if we're looking at the last region in heap - obj_end 1205 // could be actually just beyond the end of the heap; end_idx 1206 // will then correspond to a (non-existent) card that is also 1207 // just beyond the heap. 1208 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1209 // end of object is not card aligned - increment to cover 1210 // all the cards spanned by the object 1211 end_idx += 1; 1212 } 1213 1214 // Set the bits in the card BM for the cards spanned by this object. 1215 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1216 1217 // Add the size of this object to the number of marked bytes. 1218 marked_bytes += (size_t)obj_sz * HeapWordSize; 1219 1220 // This will happen if we are handling a humongous object that spans 1221 // several heap regions. 1222 if (obj_end > hr->end()) { 1223 break; 1224 } 1225 // Find the next marked object after this one. 1226 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1227 } 1228 1229 // Mark the allocated-since-marking portion... 1230 HeapWord* top = hr->top(); 1231 if (ntams < top) { 1232 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1233 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1234 1235 // Note: if we're looking at the last region in heap - top 1236 // could be actually just beyond the end of the heap; end_idx 1237 // will then correspond to a (non-existent) card that is also 1238 // just beyond the heap. 1239 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1240 // end of object is not card aligned - increment to cover 1241 // all the cards spanned by the object 1242 end_idx += 1; 1243 } 1244 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1245 1246 // This definitely means the region has live objects. 1247 set_bit_for_region(hr); 1248 } 1249 1250 // Update the live region bitmap. 1251 if (marked_bytes > 0) { 1252 set_bit_for_region(hr); 1253 } 1254 1255 // Set the marked bytes for the current region so that 1256 // it can be queried by a calling verification routine 1257 _region_marked_bytes = marked_bytes; 1258 1259 return false; 1260 } 1261 1262 size_t region_marked_bytes() const { return _region_marked_bytes; } 1263 }; 1264 1265 // Heap region closure used for verifying the counting data 1266 // that was accumulated concurrently and aggregated during 1267 // the remark pause. This closure is applied to the heap 1268 // regions during the STW cleanup pause. 1269 1270 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1271 G1CollectedHeap* _g1h; 1272 ConcurrentMark* _cm; 1273 CalcLiveObjectsClosure _calc_cl; 1274 BitMap* _region_bm; // Region BM to be verified 1275 BitMap* _card_bm; // Card BM to be verified 1276 1277 BitMap* _exp_region_bm; // Expected Region BM values 1278 BitMap* _exp_card_bm; // Expected card BM values 1279 1280 int _failures; 1281 1282 public: 1283 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1284 BitMap* region_bm, 1285 BitMap* card_bm, 1286 BitMap* exp_region_bm, 1287 BitMap* exp_card_bm) : 1288 _g1h(g1h), _cm(g1h->concurrent_mark()), 1289 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1290 _region_bm(region_bm), _card_bm(card_bm), 1291 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1292 _failures(0) { } 1293 1294 int failures() const { return _failures; } 1295 1296 bool doHeapRegion(HeapRegion* hr) { 1297 int failures = 0; 1298 1299 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1300 // this region and set the corresponding bits in the expected region 1301 // and card bitmaps. 1302 bool res = _calc_cl.doHeapRegion(hr); 1303 assert(res == false, "should be continuing"); 1304 1305 // Verify the marked bytes for this region. 1306 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1307 size_t act_marked_bytes = hr->next_marked_bytes(); 1308 1309 if (exp_marked_bytes > act_marked_bytes) { 1310 if (hr->is_starts_humongous()) { 1311 // For start_humongous regions, the size of the whole object will be 1312 // in exp_marked_bytes. 1313 HeapRegion* region = hr; 1314 int num_regions; 1315 for (num_regions = 0; region != NULL; num_regions++) { 1316 region = _g1h->next_region_in_humongous(region); 1317 } 1318 if ((num_regions-1) * HeapRegion::GrainBytes >= exp_marked_bytes) { 1319 failures += 1; 1320 } else if (num_regions * HeapRegion::GrainBytes < exp_marked_bytes) { 1321 failures += 1; 1322 } 1323 } else { 1324 // We're not OK if expected marked bytes > actual marked bytes. It means 1325 // we have missed accounting some objects during the actual marking. 1326 failures += 1; 1327 } 1328 } 1329 1330 // Verify the bit, for this region, in the actual and expected 1331 // (which was just calculated) region bit maps. 1332 // We're not OK if the bit in the calculated expected region 1333 // bitmap is set and the bit in the actual region bitmap is not. 1334 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1335 1336 bool expected = _exp_region_bm->at(index); 1337 bool actual = _region_bm->at(index); 1338 if (expected && !actual) { 1339 failures += 1; 1340 } 1341 1342 // Verify that the card bit maps for the cards spanned by the current 1343 // region match. We have an error if we have a set bit in the expected 1344 // bit map and the corresponding bit in the actual bitmap is not set. 1345 1346 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1347 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1348 1349 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1350 expected = _exp_card_bm->at(i); 1351 actual = _card_bm->at(i); 1352 1353 if (expected && !actual) { 1354 failures += 1; 1355 } 1356 } 1357 1358 _failures += failures; 1359 1360 // We could stop iteration over the heap when we 1361 // find the first violating region by returning true. 1362 return false; 1363 } 1364 }; 1365 1366 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1367 protected: 1368 G1CollectedHeap* _g1h; 1369 ConcurrentMark* _cm; 1370 BitMap* _actual_region_bm; 1371 BitMap* _actual_card_bm; 1372 1373 uint _n_workers; 1374 1375 BitMap* _expected_region_bm; 1376 BitMap* _expected_card_bm; 1377 1378 int _failures; 1379 1380 HeapRegionClaimer _hrclaimer; 1381 1382 public: 1383 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1384 BitMap* region_bm, BitMap* card_bm, 1385 BitMap* expected_region_bm, BitMap* expected_card_bm) 1386 : AbstractGangTask("G1 verify final counting"), 1387 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1388 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1389 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1390 _failures(0), 1391 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1392 assert(VerifyDuringGC, "don't call this otherwise"); 1393 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1394 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1395 } 1396 1397 void work(uint worker_id) { 1398 assert(worker_id < _n_workers, "invariant"); 1399 1400 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1401 _actual_region_bm, _actual_card_bm, 1402 _expected_region_bm, 1403 _expected_card_bm); 1404 1405 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1406 1407 Atomic::add(verify_cl.failures(), &_failures); 1408 } 1409 1410 int failures() const { return _failures; } 1411 }; 1412 1413 // Closure that finalizes the liveness counting data. 1414 // Used during the cleanup pause. 1415 // Sets the bits corresponding to the interval [NTAMS, top] 1416 // (which contains the implicitly live objects) in the 1417 // card liveness bitmap. Also sets the bit for each region, 1418 // containing live data, in the region liveness bitmap. 1419 1420 class FinalCountDataUpdateClosure: public CMCountDataClosureBase { 1421 public: 1422 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1423 BitMap* region_bm, 1424 BitMap* card_bm) : 1425 CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1426 1427 bool doHeapRegion(HeapRegion* hr) { 1428 HeapWord* ntams = hr->next_top_at_mark_start(); 1429 HeapWord* top = hr->top(); 1430 1431 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1432 1433 // Mark the allocated-since-marking portion... 1434 if (ntams < top) { 1435 // This definitely means the region has live objects. 1436 set_bit_for_region(hr); 1437 1438 // Now set the bits in the card bitmap for [ntams, top) 1439 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1440 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1441 1442 // Note: if we're looking at the last region in heap - top 1443 // could be actually just beyond the end of the heap; end_idx 1444 // will then correspond to a (non-existent) card that is also 1445 // just beyond the heap. 1446 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1447 // end of object is not card aligned - increment to cover 1448 // all the cards spanned by the object 1449 end_idx += 1; 1450 } 1451 1452 assert(end_idx <= _card_bm->size(), 1453 "oob: end_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT, 1454 end_idx, _card_bm->size()); 1455 assert(start_idx < _card_bm->size(), 1456 "oob: start_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT, 1457 start_idx, _card_bm->size()); 1458 1459 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1460 } 1461 1462 // Set the bit for the region if it contains live data 1463 if (hr->next_marked_bytes() > 0) { 1464 set_bit_for_region(hr); 1465 } 1466 1467 return false; 1468 } 1469 }; 1470 1471 class G1ParFinalCountTask: public AbstractGangTask { 1472 protected: 1473 G1CollectedHeap* _g1h; 1474 ConcurrentMark* _cm; 1475 BitMap* _actual_region_bm; 1476 BitMap* _actual_card_bm; 1477 1478 uint _n_workers; 1479 HeapRegionClaimer _hrclaimer; 1480 1481 public: 1482 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1483 : AbstractGangTask("G1 final counting"), 1484 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1485 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1486 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1487 } 1488 1489 void work(uint worker_id) { 1490 assert(worker_id < _n_workers, "invariant"); 1491 1492 FinalCountDataUpdateClosure final_update_cl(_g1h, 1493 _actual_region_bm, 1494 _actual_card_bm); 1495 1496 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1497 } 1498 }; 1499 1500 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1501 G1CollectedHeap* _g1; 1502 size_t _freed_bytes; 1503 FreeRegionList* _local_cleanup_list; 1504 uint _old_regions_removed; 1505 uint _humongous_regions_removed; 1506 HRRSCleanupTask* _hrrs_cleanup_task; 1507 1508 public: 1509 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1510 FreeRegionList* local_cleanup_list, 1511 HRRSCleanupTask* hrrs_cleanup_task) : 1512 _g1(g1), 1513 _freed_bytes(0), 1514 _local_cleanup_list(local_cleanup_list), 1515 _old_regions_removed(0), 1516 _humongous_regions_removed(0), 1517 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1518 1519 size_t freed_bytes() { return _freed_bytes; } 1520 const uint old_regions_removed() { return _old_regions_removed; } 1521 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1522 1523 bool doHeapRegion(HeapRegion *hr) { 1524 if (hr->is_archive()) { 1525 return false; 1526 } 1527 // We use a claim value of zero here because all regions 1528 // were claimed with value 1 in the FinalCount task. 1529 _g1->reset_gc_time_stamps(hr); 1530 hr->note_end_of_marking(); 1531 1532 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1533 _freed_bytes += hr->used(); 1534 hr->set_containing_set(NULL); 1535 if (hr->is_humongous()) { 1536 _humongous_regions_removed++; 1537 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1538 } else { 1539 _old_regions_removed++; 1540 _g1->free_region(hr, _local_cleanup_list, true); 1541 } 1542 } else { 1543 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1544 } 1545 1546 return false; 1547 } 1548 }; 1549 1550 class G1ParNoteEndTask: public AbstractGangTask { 1551 friend class G1NoteEndOfConcMarkClosure; 1552 1553 protected: 1554 G1CollectedHeap* _g1h; 1555 FreeRegionList* _cleanup_list; 1556 HeapRegionClaimer _hrclaimer; 1557 1558 public: 1559 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1560 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1561 } 1562 1563 void work(uint worker_id) { 1564 FreeRegionList local_cleanup_list("Local Cleanup List"); 1565 HRRSCleanupTask hrrs_cleanup_task; 1566 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1567 &hrrs_cleanup_task); 1568 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1569 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1570 1571 // Now update the lists 1572 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1573 { 1574 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1575 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1576 1577 // If we iterate over the global cleanup list at the end of 1578 // cleanup to do this printing we will not guarantee to only 1579 // generate output for the newly-reclaimed regions (the list 1580 // might not be empty at the beginning of cleanup; we might 1581 // still be working on its previous contents). So we do the 1582 // printing here, before we append the new regions to the global 1583 // cleanup list. 1584 1585 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1586 if (hr_printer->is_active()) { 1587 FreeRegionListIterator iter(&local_cleanup_list); 1588 while (iter.more_available()) { 1589 HeapRegion* hr = iter.get_next(); 1590 hr_printer->cleanup(hr); 1591 } 1592 } 1593 1594 _cleanup_list->add_ordered(&local_cleanup_list); 1595 assert(local_cleanup_list.is_empty(), "post-condition"); 1596 1597 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1598 } 1599 } 1600 }; 1601 1602 class G1ParScrubRemSetTask: public AbstractGangTask { 1603 protected: 1604 G1RemSet* _g1rs; 1605 BitMap* _region_bm; 1606 BitMap* _card_bm; 1607 HeapRegionClaimer _hrclaimer; 1608 1609 public: 1610 G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) : 1611 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) { 1612 } 1613 1614 void work(uint worker_id) { 1615 _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer); 1616 } 1617 1618 }; 1619 1620 void ConcurrentMark::cleanup() { 1621 // world is stopped at this checkpoint 1622 assert(SafepointSynchronize::is_at_safepoint(), 1623 "world should be stopped"); 1624 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1625 1626 // If a full collection has happened, we shouldn't do this. 1627 if (has_aborted()) { 1628 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1629 return; 1630 } 1631 1632 g1h->verify_region_sets_optional(); 1633 1634 if (VerifyDuringGC) { 1635 HandleMark hm; // handle scope 1636 g1h->prepare_for_verify(); 1637 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1638 } 1639 g1h->check_bitmaps("Cleanup Start"); 1640 1641 G1CollectorPolicy* g1p = g1h->g1_policy(); 1642 g1p->record_concurrent_mark_cleanup_start(); 1643 1644 double start = os::elapsedTime(); 1645 1646 HeapRegionRemSet::reset_for_cleanup_tasks(); 1647 1648 // Do counting once more with the world stopped for good measure. 1649 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 1650 1651 g1h->workers()->run_task(&g1_par_count_task); 1652 1653 if (VerifyDuringGC) { 1654 // Verify that the counting data accumulated during marking matches 1655 // that calculated by walking the marking bitmap. 1656 1657 // Bitmaps to hold expected values 1658 BitMap expected_region_bm(_region_bm.size(), true); 1659 BitMap expected_card_bm(_card_bm.size(), true); 1660 1661 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 1662 &_region_bm, 1663 &_card_bm, 1664 &expected_region_bm, 1665 &expected_card_bm); 1666 1667 g1h->workers()->run_task(&g1_par_verify_task); 1668 1669 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 1670 } 1671 1672 size_t start_used_bytes = g1h->used(); 1673 g1h->collector_state()->set_mark_in_progress(false); 1674 1675 double count_end = os::elapsedTime(); 1676 double this_final_counting_time = (count_end - start); 1677 _total_counting_time += this_final_counting_time; 1678 1679 if (log_is_enabled(Trace, gc, liveness)) { 1680 G1PrintRegionLivenessInfoClosure cl("Post-Marking"); 1681 _g1h->heap_region_iterate(&cl); 1682 } 1683 1684 // Install newly created mark bitMap as "prev". 1685 swapMarkBitMaps(); 1686 1687 g1h->reset_gc_time_stamp(); 1688 1689 uint n_workers = _g1h->workers()->active_workers(); 1690 1691 // Note end of marking in all heap regions. 1692 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1693 g1h->workers()->run_task(&g1_par_note_end_task); 1694 g1h->check_gc_time_stamps(); 1695 1696 if (!cleanup_list_is_empty()) { 1697 // The cleanup list is not empty, so we'll have to process it 1698 // concurrently. Notify anyone else that might be wanting free 1699 // regions that there will be more free regions coming soon. 1700 g1h->set_free_regions_coming(); 1701 } 1702 1703 // call below, since it affects the metric by which we sort the heap 1704 // regions. 1705 if (G1ScrubRemSets) { 1706 double rs_scrub_start = os::elapsedTime(); 1707 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers); 1708 g1h->workers()->run_task(&g1_par_scrub_rs_task); 1709 1710 double rs_scrub_end = os::elapsedTime(); 1711 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 1712 _total_rs_scrub_time += this_rs_scrub_time; 1713 } 1714 1715 // this will also free any regions totally full of garbage objects, 1716 // and sort the regions. 1717 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1718 1719 // Statistics. 1720 double end = os::elapsedTime(); 1721 _cleanup_times.add((end - start) * 1000.0); 1722 1723 // Clean up will have freed any regions completely full of garbage. 1724 // Update the soft reference policy with the new heap occupancy. 1725 Universe::update_heap_info_at_gc(); 1726 1727 if (VerifyDuringGC) { 1728 HandleMark hm; // handle scope 1729 g1h->prepare_for_verify(); 1730 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)"); 1731 } 1732 1733 g1h->check_bitmaps("Cleanup End"); 1734 1735 g1h->verify_region_sets_optional(); 1736 1737 // We need to make this be a "collection" so any collection pause that 1738 // races with it goes around and waits for completeCleanup to finish. 1739 g1h->increment_total_collections(); 1740 1741 // Clean out dead classes and update Metaspace sizes. 1742 if (ClassUnloadingWithConcurrentMark) { 1743 ClassLoaderDataGraph::purge(); 1744 } 1745 MetaspaceGC::compute_new_size(); 1746 1747 // We reclaimed old regions so we should calculate the sizes to make 1748 // sure we update the old gen/space data. 1749 g1h->g1mm()->update_sizes(); 1750 g1h->allocation_context_stats().update_after_mark(); 1751 1752 g1h->trace_heap_after_concurrent_cycle(); 1753 } 1754 1755 void ConcurrentMark::completeCleanup() { 1756 if (has_aborted()) return; 1757 1758 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1759 1760 _cleanup_list.verify_optional(); 1761 FreeRegionList tmp_free_list("Tmp Free List"); 1762 1763 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1764 "cleanup list has %u entries", 1765 _cleanup_list.length()); 1766 1767 // No one else should be accessing the _cleanup_list at this point, 1768 // so it is not necessary to take any locks 1769 while (!_cleanup_list.is_empty()) { 1770 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 1771 assert(hr != NULL, "Got NULL from a non-empty list"); 1772 hr->par_clear(); 1773 tmp_free_list.add_ordered(hr); 1774 1775 // Instead of adding one region at a time to the secondary_free_list, 1776 // we accumulate them in the local list and move them a few at a 1777 // time. This also cuts down on the number of notify_all() calls 1778 // we do during this process. We'll also append the local list when 1779 // _cleanup_list is empty (which means we just removed the last 1780 // region from the _cleanup_list). 1781 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1782 _cleanup_list.is_empty()) { 1783 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1784 "appending %u entries to the secondary_free_list, " 1785 "cleanup list still has %u entries", 1786 tmp_free_list.length(), 1787 _cleanup_list.length()); 1788 1789 { 1790 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1791 g1h->secondary_free_list_add(&tmp_free_list); 1792 SecondaryFreeList_lock->notify_all(); 1793 } 1794 #ifndef PRODUCT 1795 if (G1StressConcRegionFreeing) { 1796 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1797 os::sleep(Thread::current(), (jlong) 1, false); 1798 } 1799 } 1800 #endif 1801 } 1802 } 1803 assert(tmp_free_list.is_empty(), "post-condition"); 1804 } 1805 1806 // Supporting Object and Oop closures for reference discovery 1807 // and processing in during marking 1808 1809 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1810 HeapWord* addr = (HeapWord*)obj; 1811 return addr != NULL && 1812 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1813 } 1814 1815 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1816 // Uses the CMTask associated with a worker thread (for serial reference 1817 // processing the CMTask for worker 0 is used) to preserve (mark) and 1818 // trace referent objects. 1819 // 1820 // Using the CMTask and embedded local queues avoids having the worker 1821 // threads operating on the global mark stack. This reduces the risk 1822 // of overflowing the stack - which we would rather avoid at this late 1823 // state. Also using the tasks' local queues removes the potential 1824 // of the workers interfering with each other that could occur if 1825 // operating on the global stack. 1826 1827 class G1CMKeepAliveAndDrainClosure: public OopClosure { 1828 ConcurrentMark* _cm; 1829 CMTask* _task; 1830 int _ref_counter_limit; 1831 int _ref_counter; 1832 bool _is_serial; 1833 public: 1834 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 1835 _cm(cm), _task(task), _is_serial(is_serial), 1836 _ref_counter_limit(G1RefProcDrainInterval) { 1837 assert(_ref_counter_limit > 0, "sanity"); 1838 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1839 _ref_counter = _ref_counter_limit; 1840 } 1841 1842 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1843 virtual void do_oop( oop* p) { do_oop_work(p); } 1844 1845 template <class T> void do_oop_work(T* p) { 1846 if (!_cm->has_overflown()) { 1847 oop obj = oopDesc::load_decode_heap_oop(p); 1848 _task->deal_with_reference(obj); 1849 _ref_counter--; 1850 1851 if (_ref_counter == 0) { 1852 // We have dealt with _ref_counter_limit references, pushing them 1853 // and objects reachable from them on to the local stack (and 1854 // possibly the global stack). Call CMTask::do_marking_step() to 1855 // process these entries. 1856 // 1857 // We call CMTask::do_marking_step() in a loop, which we'll exit if 1858 // there's nothing more to do (i.e. we're done with the entries that 1859 // were pushed as a result of the CMTask::deal_with_reference() calls 1860 // above) or we overflow. 1861 // 1862 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 1863 // flag while there may still be some work to do. (See the comment at 1864 // the beginning of CMTask::do_marking_step() for those conditions - 1865 // one of which is reaching the specified time target.) It is only 1866 // when CMTask::do_marking_step() returns without setting the 1867 // has_aborted() flag that the marking step has completed. 1868 do { 1869 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1870 _task->do_marking_step(mark_step_duration_ms, 1871 false /* do_termination */, 1872 _is_serial); 1873 } while (_task->has_aborted() && !_cm->has_overflown()); 1874 _ref_counter = _ref_counter_limit; 1875 } 1876 } 1877 } 1878 }; 1879 1880 // 'Drain' oop closure used by both serial and parallel reference processing. 1881 // Uses the CMTask associated with a given worker thread (for serial 1882 // reference processing the CMtask for worker 0 is used). Calls the 1883 // do_marking_step routine, with an unbelievably large timeout value, 1884 // to drain the marking data structures of the remaining entries 1885 // added by the 'keep alive' oop closure above. 1886 1887 class G1CMDrainMarkingStackClosure: public VoidClosure { 1888 ConcurrentMark* _cm; 1889 CMTask* _task; 1890 bool _is_serial; 1891 public: 1892 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : 1893 _cm(cm), _task(task), _is_serial(is_serial) { 1894 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1895 } 1896 1897 void do_void() { 1898 do { 1899 // We call CMTask::do_marking_step() to completely drain the local 1900 // and global marking stacks of entries pushed by the 'keep alive' 1901 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1902 // 1903 // CMTask::do_marking_step() is called in a loop, which we'll exit 1904 // if there's nothing more to do (i.e. we've completely drained the 1905 // entries that were pushed as a a result of applying the 'keep alive' 1906 // closure to the entries on the discovered ref lists) or we overflow 1907 // the global marking stack. 1908 // 1909 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 1910 // flag while there may still be some work to do. (See the comment at 1911 // the beginning of CMTask::do_marking_step() for those conditions - 1912 // one of which is reaching the specified time target.) It is only 1913 // when CMTask::do_marking_step() returns without setting the 1914 // has_aborted() flag that the marking step has completed. 1915 1916 _task->do_marking_step(1000000000.0 /* something very large */, 1917 true /* do_termination */, 1918 _is_serial); 1919 } while (_task->has_aborted() && !_cm->has_overflown()); 1920 } 1921 }; 1922 1923 // Implementation of AbstractRefProcTaskExecutor for parallel 1924 // reference processing at the end of G1 concurrent marking 1925 1926 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 1927 private: 1928 G1CollectedHeap* _g1h; 1929 ConcurrentMark* _cm; 1930 WorkGang* _workers; 1931 uint _active_workers; 1932 1933 public: 1934 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1935 ConcurrentMark* cm, 1936 WorkGang* workers, 1937 uint n_workers) : 1938 _g1h(g1h), _cm(cm), 1939 _workers(workers), _active_workers(n_workers) { } 1940 1941 // Executes the given task using concurrent marking worker threads. 1942 virtual void execute(ProcessTask& task); 1943 virtual void execute(EnqueueTask& task); 1944 }; 1945 1946 class G1CMRefProcTaskProxy: public AbstractGangTask { 1947 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1948 ProcessTask& _proc_task; 1949 G1CollectedHeap* _g1h; 1950 ConcurrentMark* _cm; 1951 1952 public: 1953 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1954 G1CollectedHeap* g1h, 1955 ConcurrentMark* cm) : 1956 AbstractGangTask("Process reference objects in parallel"), 1957 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1958 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1959 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1960 } 1961 1962 virtual void work(uint worker_id) { 1963 ResourceMark rm; 1964 HandleMark hm; 1965 CMTask* task = _cm->task(worker_id); 1966 G1CMIsAliveClosure g1_is_alive(_g1h); 1967 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1968 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1969 1970 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1971 } 1972 }; 1973 1974 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1975 assert(_workers != NULL, "Need parallel worker threads."); 1976 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1977 1978 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1979 1980 // We need to reset the concurrency level before each 1981 // proxy task execution, so that the termination protocol 1982 // and overflow handling in CMTask::do_marking_step() knows 1983 // how many workers to wait for. 1984 _cm->set_concurrency(_active_workers); 1985 _workers->run_task(&proc_task_proxy); 1986 } 1987 1988 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 1989 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1990 EnqueueTask& _enq_task; 1991 1992 public: 1993 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1994 AbstractGangTask("Enqueue reference objects in parallel"), 1995 _enq_task(enq_task) { } 1996 1997 virtual void work(uint worker_id) { 1998 _enq_task.work(worker_id); 1999 } 2000 }; 2001 2002 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2003 assert(_workers != NULL, "Need parallel worker threads."); 2004 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 2005 2006 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2007 2008 // Not strictly necessary but... 2009 // 2010 // We need to reset the concurrency level before each 2011 // proxy task execution, so that the termination protocol 2012 // and overflow handling in CMTask::do_marking_step() knows 2013 // how many workers to wait for. 2014 _cm->set_concurrency(_active_workers); 2015 _workers->run_task(&enq_task_proxy); 2016 } 2017 2018 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 2019 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 2020 } 2021 2022 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2023 if (has_overflown()) { 2024 // Skip processing the discovered references if we have 2025 // overflown the global marking stack. Reference objects 2026 // only get discovered once so it is OK to not 2027 // de-populate the discovered reference lists. We could have, 2028 // but the only benefit would be that, when marking restarts, 2029 // less reference objects are discovered. 2030 return; 2031 } 2032 2033 ResourceMark rm; 2034 HandleMark hm; 2035 2036 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2037 2038 // Is alive closure. 2039 G1CMIsAliveClosure g1_is_alive(g1h); 2040 2041 // Inner scope to exclude the cleaning of the string and symbol 2042 // tables from the displayed time. 2043 { 2044 GCTraceTime(Debug, gc) trace("GC Ref Proc", g1h->gc_timer_cm()); 2045 2046 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2047 2048 // See the comment in G1CollectedHeap::ref_processing_init() 2049 // about how reference processing currently works in G1. 2050 2051 // Set the soft reference policy 2052 rp->setup_policy(clear_all_soft_refs); 2053 assert(_markStack.isEmpty(), "mark stack should be empty"); 2054 2055 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2056 // in serial reference processing. Note these closures are also 2057 // used for serially processing (by the the current thread) the 2058 // JNI references during parallel reference processing. 2059 // 2060 // These closures do not need to synchronize with the worker 2061 // threads involved in parallel reference processing as these 2062 // instances are executed serially by the current thread (e.g. 2063 // reference processing is not multi-threaded and is thus 2064 // performed by the current thread instead of a gang worker). 2065 // 2066 // The gang tasks involved in parallel reference processing create 2067 // their own instances of these closures, which do their own 2068 // synchronization among themselves. 2069 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2070 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2071 2072 // We need at least one active thread. If reference processing 2073 // is not multi-threaded we use the current (VMThread) thread, 2074 // otherwise we use the work gang from the G1CollectedHeap and 2075 // we utilize all the worker threads we can. 2076 bool processing_is_mt = rp->processing_is_mt(); 2077 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2078 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2079 2080 // Parallel processing task executor. 2081 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2082 g1h->workers(), active_workers); 2083 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2084 2085 // Set the concurrency level. The phase was already set prior to 2086 // executing the remark task. 2087 set_concurrency(active_workers); 2088 2089 // Set the degree of MT processing here. If the discovery was done MT, 2090 // the number of threads involved during discovery could differ from 2091 // the number of active workers. This is OK as long as the discovered 2092 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2093 rp->set_active_mt_degree(active_workers); 2094 2095 // Process the weak references. 2096 const ReferenceProcessorStats& stats = 2097 rp->process_discovered_references(&g1_is_alive, 2098 &g1_keep_alive, 2099 &g1_drain_mark_stack, 2100 executor, 2101 g1h->gc_timer_cm()); 2102 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2103 2104 // The do_oop work routines of the keep_alive and drain_marking_stack 2105 // oop closures will set the has_overflown flag if we overflow the 2106 // global marking stack. 2107 2108 assert(_markStack.overflow() || _markStack.isEmpty(), 2109 "mark stack should be empty (unless it overflowed)"); 2110 2111 if (_markStack.overflow()) { 2112 // This should have been done already when we tried to push an 2113 // entry on to the global mark stack. But let's do it again. 2114 set_has_overflown(); 2115 } 2116 2117 assert(rp->num_q() == active_workers, "why not"); 2118 2119 rp->enqueue_discovered_references(executor); 2120 2121 rp->verify_no_references_recorded(); 2122 assert(!rp->discovery_enabled(), "Post condition"); 2123 } 2124 2125 if (has_overflown()) { 2126 // We can not trust g1_is_alive if the marking stack overflowed 2127 return; 2128 } 2129 2130 assert(_markStack.isEmpty(), "Marking should have completed"); 2131 2132 // Unload Klasses, String, Symbols, Code Cache, etc. 2133 { 2134 GCTraceTime(Debug, gc) trace("Unloading", g1h->gc_timer_cm()); 2135 2136 if (ClassUnloadingWithConcurrentMark) { 2137 bool purged_classes; 2138 2139 { 2140 GCTraceTime(Trace, gc) trace("System Dictionary Unloading", g1h->gc_timer_cm()); 2141 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2142 } 2143 2144 { 2145 GCTraceTime(Trace, gc) trace("Parallel Unloading", g1h->gc_timer_cm()); 2146 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2147 } 2148 } 2149 2150 if (G1StringDedup::is_enabled()) { 2151 GCTraceTime(Trace, gc) trace("String Deduplication Unlink", g1h->gc_timer_cm()); 2152 G1StringDedup::unlink(&g1_is_alive); 2153 } 2154 } 2155 } 2156 2157 void ConcurrentMark::swapMarkBitMaps() { 2158 CMBitMapRO* temp = _prevMarkBitMap; 2159 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2160 _nextMarkBitMap = (CMBitMap*) temp; 2161 } 2162 2163 // Closure for marking entries in SATB buffers. 2164 class CMSATBBufferClosure : public SATBBufferClosure { 2165 private: 2166 CMTask* _task; 2167 G1CollectedHeap* _g1h; 2168 2169 // This is very similar to CMTask::deal_with_reference, but with 2170 // more relaxed requirements for the argument, so this must be more 2171 // circumspect about treating the argument as an object. 2172 void do_entry(void* entry) const { 2173 _task->increment_refs_reached(); 2174 HeapRegion* hr = _g1h->heap_region_containing(entry); 2175 if (entry < hr->next_top_at_mark_start()) { 2176 // Until we get here, we don't know whether entry refers to a valid 2177 // object; it could instead have been a stale reference. 2178 oop obj = static_cast<oop>(entry); 2179 assert(obj->is_oop(true /* ignore mark word */), 2180 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)); 2181 _task->make_reference_grey(obj, hr); 2182 } 2183 } 2184 2185 public: 2186 CMSATBBufferClosure(CMTask* task, G1CollectedHeap* g1h) 2187 : _task(task), _g1h(g1h) { } 2188 2189 virtual void do_buffer(void** buffer, size_t size) { 2190 for (size_t i = 0; i < size; ++i) { 2191 do_entry(buffer[i]); 2192 } 2193 } 2194 }; 2195 2196 class G1RemarkThreadsClosure : public ThreadClosure { 2197 CMSATBBufferClosure _cm_satb_cl; 2198 G1CMOopClosure _cm_cl; 2199 MarkingCodeBlobClosure _code_cl; 2200 int _thread_parity; 2201 2202 public: 2203 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task) : 2204 _cm_satb_cl(task, g1h), 2205 _cm_cl(g1h, g1h->concurrent_mark(), task), 2206 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2207 _thread_parity(Threads::thread_claim_parity()) {} 2208 2209 void do_thread(Thread* thread) { 2210 if (thread->is_Java_thread()) { 2211 if (thread->claim_oops_do(true, _thread_parity)) { 2212 JavaThread* jt = (JavaThread*)thread; 2213 2214 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2215 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2216 // * Alive if on the stack of an executing method 2217 // * Weakly reachable otherwise 2218 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2219 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2220 jt->nmethods_do(&_code_cl); 2221 2222 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 2223 } 2224 } else if (thread->is_VM_thread()) { 2225 if (thread->claim_oops_do(true, _thread_parity)) { 2226 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 2227 } 2228 } 2229 } 2230 }; 2231 2232 class CMRemarkTask: public AbstractGangTask { 2233 private: 2234 ConcurrentMark* _cm; 2235 public: 2236 void work(uint worker_id) { 2237 // Since all available tasks are actually started, we should 2238 // only proceed if we're supposed to be active. 2239 if (worker_id < _cm->active_tasks()) { 2240 CMTask* task = _cm->task(worker_id); 2241 task->record_start_time(); 2242 { 2243 ResourceMark rm; 2244 HandleMark hm; 2245 2246 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2247 Threads::threads_do(&threads_f); 2248 } 2249 2250 do { 2251 task->do_marking_step(1000000000.0 /* something very large */, 2252 true /* do_termination */, 2253 false /* is_serial */); 2254 } while (task->has_aborted() && !_cm->has_overflown()); 2255 // If we overflow, then we do not want to restart. We instead 2256 // want to abort remark and do concurrent marking again. 2257 task->record_end_time(); 2258 } 2259 } 2260 2261 CMRemarkTask(ConcurrentMark* cm, uint active_workers) : 2262 AbstractGangTask("Par Remark"), _cm(cm) { 2263 _cm->terminator()->reset_for_reuse(active_workers); 2264 } 2265 }; 2266 2267 void ConcurrentMark::checkpointRootsFinalWork() { 2268 ResourceMark rm; 2269 HandleMark hm; 2270 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2271 2272 GCTraceTime(Debug, gc) trace("Finalize Marking", g1h->gc_timer_cm()); 2273 2274 g1h->ensure_parsability(false); 2275 2276 // this is remark, so we'll use up all active threads 2277 uint active_workers = g1h->workers()->active_workers(); 2278 set_concurrency_and_phase(active_workers, false /* concurrent */); 2279 // Leave _parallel_marking_threads at it's 2280 // value originally calculated in the ConcurrentMark 2281 // constructor and pass values of the active workers 2282 // through the gang in the task. 2283 2284 { 2285 StrongRootsScope srs(active_workers); 2286 2287 CMRemarkTask remarkTask(this, active_workers); 2288 // We will start all available threads, even if we decide that the 2289 // active_workers will be fewer. The extra ones will just bail out 2290 // immediately. 2291 g1h->workers()->run_task(&remarkTask); 2292 } 2293 2294 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2295 guarantee(has_overflown() || 2296 satb_mq_set.completed_buffers_num() == 0, 2297 "Invariant: has_overflown = %s, num buffers = %d", 2298 BOOL_TO_STR(has_overflown()), 2299 satb_mq_set.completed_buffers_num()); 2300 2301 print_stats(); 2302 } 2303 2304 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2305 // Note we are overriding the read-only view of the prev map here, via 2306 // the cast. 2307 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2308 } 2309 2310 HeapRegion* 2311 ConcurrentMark::claim_region(uint worker_id) { 2312 // "checkpoint" the finger 2313 HeapWord* finger = _finger; 2314 2315 // _heap_end will not change underneath our feet; it only changes at 2316 // yield points. 2317 while (finger < _heap_end) { 2318 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2319 2320 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 2321 2322 // Above heap_region_containing may return NULL as we always scan claim 2323 // until the end of the heap. In this case, just jump to the next region. 2324 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2325 2326 // Is the gap between reading the finger and doing the CAS too long? 2327 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2328 if (res == finger && curr_region != NULL) { 2329 // we succeeded 2330 HeapWord* bottom = curr_region->bottom(); 2331 HeapWord* limit = curr_region->next_top_at_mark_start(); 2332 2333 // notice that _finger == end cannot be guaranteed here since, 2334 // someone else might have moved the finger even further 2335 assert(_finger >= end, "the finger should have moved forward"); 2336 2337 if (limit > bottom) { 2338 return curr_region; 2339 } else { 2340 assert(limit == bottom, 2341 "the region limit should be at bottom"); 2342 // we return NULL and the caller should try calling 2343 // claim_region() again. 2344 return NULL; 2345 } 2346 } else { 2347 assert(_finger > finger, "the finger should have moved forward"); 2348 // read it again 2349 finger = _finger; 2350 } 2351 } 2352 2353 return NULL; 2354 } 2355 2356 #ifndef PRODUCT 2357 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 2358 private: 2359 G1CollectedHeap* _g1h; 2360 const char* _phase; 2361 int _info; 2362 2363 public: 2364 VerifyNoCSetOops(const char* phase, int info = -1) : 2365 _g1h(G1CollectedHeap::heap()), 2366 _phase(phase), 2367 _info(info) 2368 { } 2369 2370 void operator()(oop obj) const { 2371 guarantee(obj->is_oop(), 2372 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 2373 p2i(obj), _phase, _info); 2374 guarantee(!_g1h->obj_in_cs(obj), 2375 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 2376 p2i(obj), _phase, _info); 2377 } 2378 }; 2379 2380 void ConcurrentMark::verify_no_cset_oops() { 2381 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2382 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 2383 return; 2384 } 2385 2386 // Verify entries on the global mark stack 2387 _markStack.iterate(VerifyNoCSetOops("Stack")); 2388 2389 // Verify entries on the task queues 2390 for (uint i = 0; i < _max_worker_id; ++i) { 2391 CMTaskQueue* queue = _task_queues->queue(i); 2392 queue->iterate(VerifyNoCSetOops("Queue", i)); 2393 } 2394 2395 // Verify the global finger 2396 HeapWord* global_finger = finger(); 2397 if (global_finger != NULL && global_finger < _heap_end) { 2398 // Since we always iterate over all regions, we might get a NULL HeapRegion 2399 // here. 2400 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 2401 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2402 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 2403 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 2404 } 2405 2406 // Verify the task fingers 2407 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2408 for (uint i = 0; i < parallel_marking_threads(); ++i) { 2409 CMTask* task = _tasks[i]; 2410 HeapWord* task_finger = task->finger(); 2411 if (task_finger != NULL && task_finger < _heap_end) { 2412 // See above note on the global finger verification. 2413 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 2414 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2415 !task_hr->in_collection_set(), 2416 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 2417 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 2418 } 2419 } 2420 } 2421 #endif // PRODUCT 2422 2423 // Aggregate the counting data that was constructed concurrently 2424 // with marking. 2425 class AggregateCountDataHRClosure: public HeapRegionClosure { 2426 G1CollectedHeap* _g1h; 2427 ConcurrentMark* _cm; 2428 CardTableModRefBS* _ct_bs; 2429 BitMap* _cm_card_bm; 2430 uint _max_worker_id; 2431 2432 public: 2433 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 2434 BitMap* cm_card_bm, 2435 uint max_worker_id) : 2436 _g1h(g1h), _cm(g1h->concurrent_mark()), 2437 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 2438 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 2439 2440 bool doHeapRegion(HeapRegion* hr) { 2441 HeapWord* start = hr->bottom(); 2442 HeapWord* limit = hr->next_top_at_mark_start(); 2443 HeapWord* end = hr->end(); 2444 2445 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 2446 "Preconditions not met - " 2447 "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", " 2448 "top: " PTR_FORMAT ", end: " PTR_FORMAT, 2449 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())); 2450 2451 assert(hr->next_marked_bytes() == 0, "Precondition"); 2452 2453 if (start == limit) { 2454 // NTAMS of this region has not been set so nothing to do. 2455 return false; 2456 } 2457 2458 // 'start' should be in the heap. 2459 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 2460 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 2461 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 2462 2463 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 2464 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 2465 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 2466 2467 // If ntams is not card aligned then we bump card bitmap index 2468 // for limit so that we get the all the cards spanned by 2469 // the object ending at ntams. 2470 // Note: if this is the last region in the heap then ntams 2471 // could be actually just beyond the end of the the heap; 2472 // limit_idx will then correspond to a (non-existent) card 2473 // that is also outside the heap. 2474 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 2475 limit_idx += 1; 2476 } 2477 2478 assert(limit_idx <= end_idx, "or else use atomics"); 2479 2480 // Aggregate the "stripe" in the count data associated with hr. 2481 uint hrm_index = hr->hrm_index(); 2482 size_t marked_bytes = 0; 2483 2484 for (uint i = 0; i < _max_worker_id; i += 1) { 2485 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 2486 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 2487 2488 // Fetch the marked_bytes in this region for task i and 2489 // add it to the running total for this region. 2490 marked_bytes += marked_bytes_array[hrm_index]; 2491 2492 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 2493 // into the global card bitmap. 2494 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 2495 2496 while (scan_idx < limit_idx) { 2497 assert(task_card_bm->at(scan_idx) == true, "should be"); 2498 _cm_card_bm->set_bit(scan_idx); 2499 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 2500 2501 // BitMap::get_next_one_offset() can handle the case when 2502 // its left_offset parameter is greater than its right_offset 2503 // parameter. It does, however, have an early exit if 2504 // left_offset == right_offset. So let's limit the value 2505 // passed in for left offset here. 2506 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 2507 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 2508 } 2509 } 2510 2511 // Update the marked bytes for this region. 2512 hr->add_to_marked_bytes(marked_bytes); 2513 2514 // Next heap region 2515 return false; 2516 } 2517 }; 2518 2519 class G1AggregateCountDataTask: public AbstractGangTask { 2520 protected: 2521 G1CollectedHeap* _g1h; 2522 ConcurrentMark* _cm; 2523 BitMap* _cm_card_bm; 2524 uint _max_worker_id; 2525 uint _active_workers; 2526 HeapRegionClaimer _hrclaimer; 2527 2528 public: 2529 G1AggregateCountDataTask(G1CollectedHeap* g1h, 2530 ConcurrentMark* cm, 2531 BitMap* cm_card_bm, 2532 uint max_worker_id, 2533 uint n_workers) : 2534 AbstractGangTask("Count Aggregation"), 2535 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 2536 _max_worker_id(max_worker_id), 2537 _active_workers(n_workers), 2538 _hrclaimer(_active_workers) { 2539 } 2540 2541 void work(uint worker_id) { 2542 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 2543 2544 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 2545 } 2546 }; 2547 2548 2549 void ConcurrentMark::aggregate_count_data() { 2550 uint n_workers = _g1h->workers()->active_workers(); 2551 2552 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 2553 _max_worker_id, n_workers); 2554 2555 _g1h->workers()->run_task(&g1_par_agg_task); 2556 } 2557 2558 // Clear the per-worker arrays used to store the per-region counting data 2559 void ConcurrentMark::clear_all_count_data() { 2560 // Clear the global card bitmap - it will be filled during 2561 // liveness count aggregation (during remark) and the 2562 // final counting task. 2563 _card_bm.clear(); 2564 2565 // Clear the global region bitmap - it will be filled as part 2566 // of the final counting task. 2567 _region_bm.clear(); 2568 2569 uint max_regions = _g1h->max_regions(); 2570 assert(_max_worker_id > 0, "uninitialized"); 2571 2572 for (uint i = 0; i < _max_worker_id; i += 1) { 2573 BitMap* task_card_bm = count_card_bitmap_for(i); 2574 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 2575 2576 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 2577 assert(marked_bytes_array != NULL, "uninitialized"); 2578 2579 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 2580 task_card_bm->clear(); 2581 } 2582 } 2583 2584 void ConcurrentMark::print_stats() { 2585 if (!log_is_enabled(Debug, gc, stats)) { 2586 return; 2587 } 2588 log_debug(gc, stats)("---------------------------------------------------------------------"); 2589 for (size_t i = 0; i < _active_tasks; ++i) { 2590 _tasks[i]->print_stats(); 2591 log_debug(gc, stats)("---------------------------------------------------------------------"); 2592 } 2593 } 2594 2595 // abandon current marking iteration due to a Full GC 2596 void ConcurrentMark::abort() { 2597 if (!cmThread()->during_cycle() || _has_aborted) { 2598 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2599 return; 2600 } 2601 2602 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2603 // concurrent bitmap clearing. 2604 _nextMarkBitMap->clearAll(); 2605 2606 // Note we cannot clear the previous marking bitmap here 2607 // since VerifyDuringGC verifies the objects marked during 2608 // a full GC against the previous bitmap. 2609 2610 // Clear the liveness counting data 2611 clear_all_count_data(); 2612 // Empty mark stack 2613 reset_marking_state(); 2614 for (uint i = 0; i < _max_worker_id; ++i) { 2615 _tasks[i]->clear_region_fields(); 2616 } 2617 _first_overflow_barrier_sync.abort(); 2618 _second_overflow_barrier_sync.abort(); 2619 _has_aborted = true; 2620 2621 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2622 satb_mq_set.abandon_partial_marking(); 2623 // This can be called either during or outside marking, we'll read 2624 // the expected_active value from the SATB queue set. 2625 satb_mq_set.set_active_all_threads( 2626 false, /* new active value */ 2627 satb_mq_set.is_active() /* expected_active */); 2628 2629 _g1h->trace_heap_after_concurrent_cycle(); 2630 _g1h->register_concurrent_cycle_end(); 2631 } 2632 2633 static void print_ms_time_info(const char* prefix, const char* name, 2634 NumberSeq& ns) { 2635 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2636 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2637 if (ns.num() > 0) { 2638 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2639 prefix, ns.sd(), ns.maximum()); 2640 } 2641 } 2642 2643 void ConcurrentMark::print_summary_info() { 2644 LogHandle(gc, marking) log; 2645 if (!log.is_trace()) { 2646 return; 2647 } 2648 2649 log.trace(" Concurrent marking:"); 2650 print_ms_time_info(" ", "init marks", _init_times); 2651 print_ms_time_info(" ", "remarks", _remark_times); 2652 { 2653 print_ms_time_info(" ", "final marks", _remark_mark_times); 2654 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2655 2656 } 2657 print_ms_time_info(" ", "cleanups", _cleanup_times); 2658 log.trace(" Final counting total time = %8.2f s (avg = %8.2f ms).", 2659 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2660 if (G1ScrubRemSets) { 2661 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 2662 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2663 } 2664 log.trace(" Total stop_world time = %8.2f s.", 2665 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2666 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2667 cmThread()->vtime_accum(), cmThread()->vtime_mark_accum()); 2668 } 2669 2670 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2671 _parallel_workers->print_worker_threads_on(st); 2672 } 2673 2674 void ConcurrentMark::print_on_error(outputStream* st) const { 2675 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2676 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 2677 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 2678 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 2679 } 2680 2681 // We take a break if someone is trying to stop the world. 2682 bool ConcurrentMark::do_yield_check(uint worker_id) { 2683 if (SuspendibleThreadSet::should_yield()) { 2684 if (worker_id == 0) { 2685 _g1h->g1_policy()->record_concurrent_pause(); 2686 } 2687 SuspendibleThreadSet::yield(); 2688 return true; 2689 } else { 2690 return false; 2691 } 2692 } 2693 2694 // Closure for iteration over bitmaps 2695 class CMBitMapClosure : public BitMapClosure { 2696 private: 2697 // the bitmap that is being iterated over 2698 CMBitMap* _nextMarkBitMap; 2699 ConcurrentMark* _cm; 2700 CMTask* _task; 2701 2702 public: 2703 CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) : 2704 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 2705 2706 bool do_bit(size_t offset) { 2707 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 2708 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 2709 assert( addr < _cm->finger(), "invariant"); 2710 assert(addr >= _task->finger(), "invariant"); 2711 2712 // We move that task's local finger along. 2713 _task->move_finger_to(addr); 2714 2715 _task->scan_object(oop(addr)); 2716 // we only partially drain the local queue and global stack 2717 _task->drain_local_queue(true); 2718 _task->drain_global_stack(true); 2719 2720 // if the has_aborted flag has been raised, we need to bail out of 2721 // the iteration 2722 return !_task->has_aborted(); 2723 } 2724 }; 2725 2726 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2727 ReferenceProcessor* result = NULL; 2728 if (G1UseConcMarkReferenceProcessing) { 2729 result = g1h->ref_processor_cm(); 2730 assert(result != NULL, "should not be NULL"); 2731 } 2732 return result; 2733 } 2734 2735 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2736 ConcurrentMark* cm, 2737 CMTask* task) 2738 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2739 _g1h(g1h), _cm(cm), _task(task) 2740 { } 2741 2742 void CMTask::setup_for_region(HeapRegion* hr) { 2743 assert(hr != NULL, 2744 "claim_region() should have filtered out NULL regions"); 2745 _curr_region = hr; 2746 _finger = hr->bottom(); 2747 update_region_limit(); 2748 } 2749 2750 void CMTask::update_region_limit() { 2751 HeapRegion* hr = _curr_region; 2752 HeapWord* bottom = hr->bottom(); 2753 HeapWord* limit = hr->next_top_at_mark_start(); 2754 2755 if (limit == bottom) { 2756 // The region was collected underneath our feet. 2757 // We set the finger to bottom to ensure that the bitmap 2758 // iteration that will follow this will not do anything. 2759 // (this is not a condition that holds when we set the region up, 2760 // as the region is not supposed to be empty in the first place) 2761 _finger = bottom; 2762 } else if (limit >= _region_limit) { 2763 assert(limit >= _finger, "peace of mind"); 2764 } else { 2765 assert(limit < _region_limit, "only way to get here"); 2766 // This can happen under some pretty unusual circumstances. An 2767 // evacuation pause empties the region underneath our feet (NTAMS 2768 // at bottom). We then do some allocation in the region (NTAMS 2769 // stays at bottom), followed by the region being used as a GC 2770 // alloc region (NTAMS will move to top() and the objects 2771 // originally below it will be grayed). All objects now marked in 2772 // the region are explicitly grayed, if below the global finger, 2773 // and we do not need in fact to scan anything else. So, we simply 2774 // set _finger to be limit to ensure that the bitmap iteration 2775 // doesn't do anything. 2776 _finger = limit; 2777 } 2778 2779 _region_limit = limit; 2780 } 2781 2782 void CMTask::giveup_current_region() { 2783 assert(_curr_region != NULL, "invariant"); 2784 clear_region_fields(); 2785 } 2786 2787 void CMTask::clear_region_fields() { 2788 // Values for these three fields that indicate that we're not 2789 // holding on to a region. 2790 _curr_region = NULL; 2791 _finger = NULL; 2792 _region_limit = NULL; 2793 } 2794 2795 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2796 if (cm_oop_closure == NULL) { 2797 assert(_cm_oop_closure != NULL, "invariant"); 2798 } else { 2799 assert(_cm_oop_closure == NULL, "invariant"); 2800 } 2801 _cm_oop_closure = cm_oop_closure; 2802 } 2803 2804 void CMTask::reset(CMBitMap* nextMarkBitMap) { 2805 guarantee(nextMarkBitMap != NULL, "invariant"); 2806 _nextMarkBitMap = nextMarkBitMap; 2807 clear_region_fields(); 2808 2809 _calls = 0; 2810 _elapsed_time_ms = 0.0; 2811 _termination_time_ms = 0.0; 2812 _termination_start_time_ms = 0.0; 2813 } 2814 2815 bool CMTask::should_exit_termination() { 2816 regular_clock_call(); 2817 // This is called when we are in the termination protocol. We should 2818 // quit if, for some reason, this task wants to abort or the global 2819 // stack is not empty (this means that we can get work from it). 2820 return !_cm->mark_stack_empty() || has_aborted(); 2821 } 2822 2823 void CMTask::reached_limit() { 2824 assert(_words_scanned >= _words_scanned_limit || 2825 _refs_reached >= _refs_reached_limit , 2826 "shouldn't have been called otherwise"); 2827 regular_clock_call(); 2828 } 2829 2830 void CMTask::regular_clock_call() { 2831 if (has_aborted()) return; 2832 2833 // First, we need to recalculate the words scanned and refs reached 2834 // limits for the next clock call. 2835 recalculate_limits(); 2836 2837 // During the regular clock call we do the following 2838 2839 // (1) If an overflow has been flagged, then we abort. 2840 if (_cm->has_overflown()) { 2841 set_has_aborted(); 2842 return; 2843 } 2844 2845 // If we are not concurrent (i.e. we're doing remark) we don't need 2846 // to check anything else. The other steps are only needed during 2847 // the concurrent marking phase. 2848 if (!concurrent()) return; 2849 2850 // (2) If marking has been aborted for Full GC, then we also abort. 2851 if (_cm->has_aborted()) { 2852 set_has_aborted(); 2853 return; 2854 } 2855 2856 double curr_time_ms = os::elapsedVTime() * 1000.0; 2857 2858 // (4) We check whether we should yield. If we have to, then we abort. 2859 if (SuspendibleThreadSet::should_yield()) { 2860 // We should yield. To do this we abort the task. The caller is 2861 // responsible for yielding. 2862 set_has_aborted(); 2863 return; 2864 } 2865 2866 // (5) We check whether we've reached our time quota. If we have, 2867 // then we abort. 2868 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2869 if (elapsed_time_ms > _time_target_ms) { 2870 set_has_aborted(); 2871 _has_timed_out = true; 2872 return; 2873 } 2874 2875 // (6) Finally, we check whether there are enough completed STAB 2876 // buffers available for processing. If there are, we abort. 2877 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2878 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2879 // we do need to process SATB buffers, we'll abort and restart 2880 // the marking task to do so 2881 set_has_aborted(); 2882 return; 2883 } 2884 } 2885 2886 void CMTask::recalculate_limits() { 2887 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2888 _words_scanned_limit = _real_words_scanned_limit; 2889 2890 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2891 _refs_reached_limit = _real_refs_reached_limit; 2892 } 2893 2894 void CMTask::decrease_limits() { 2895 // This is called when we believe that we're going to do an infrequent 2896 // operation which will increase the per byte scanned cost (i.e. move 2897 // entries to/from the global stack). It basically tries to decrease the 2898 // scanning limit so that the clock is called earlier. 2899 2900 _words_scanned_limit = _real_words_scanned_limit - 2901 3 * words_scanned_period / 4; 2902 _refs_reached_limit = _real_refs_reached_limit - 2903 3 * refs_reached_period / 4; 2904 } 2905 2906 void CMTask::move_entries_to_global_stack() { 2907 // local array where we'll store the entries that will be popped 2908 // from the local queue 2909 oop buffer[global_stack_transfer_size]; 2910 2911 int n = 0; 2912 oop obj; 2913 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 2914 buffer[n] = obj; 2915 ++n; 2916 } 2917 2918 if (n > 0) { 2919 // we popped at least one entry from the local queue 2920 2921 if (!_cm->mark_stack_push(buffer, n)) { 2922 set_has_aborted(); 2923 } 2924 } 2925 2926 // this operation was quite expensive, so decrease the limits 2927 decrease_limits(); 2928 } 2929 2930 void CMTask::get_entries_from_global_stack() { 2931 // local array where we'll store the entries that will be popped 2932 // from the global stack. 2933 oop buffer[global_stack_transfer_size]; 2934 int n; 2935 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 2936 assert(n <= global_stack_transfer_size, 2937 "we should not pop more than the given limit"); 2938 if (n > 0) { 2939 // yes, we did actually pop at least one entry 2940 for (int i = 0; i < n; ++i) { 2941 bool success = _task_queue->push(buffer[i]); 2942 // We only call this when the local queue is empty or under a 2943 // given target limit. So, we do not expect this push to fail. 2944 assert(success, "invariant"); 2945 } 2946 } 2947 2948 // this operation was quite expensive, so decrease the limits 2949 decrease_limits(); 2950 } 2951 2952 void CMTask::drain_local_queue(bool partially) { 2953 if (has_aborted()) return; 2954 2955 // Decide what the target size is, depending whether we're going to 2956 // drain it partially (so that other tasks can steal if they run out 2957 // of things to do) or totally (at the very end). 2958 size_t target_size; 2959 if (partially) { 2960 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2961 } else { 2962 target_size = 0; 2963 } 2964 2965 if (_task_queue->size() > target_size) { 2966 oop obj; 2967 bool ret = _task_queue->pop_local(obj); 2968 while (ret) { 2969 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 2970 assert(!_g1h->is_on_master_free_list( 2971 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 2972 2973 scan_object(obj); 2974 2975 if (_task_queue->size() <= target_size || has_aborted()) { 2976 ret = false; 2977 } else { 2978 ret = _task_queue->pop_local(obj); 2979 } 2980 } 2981 } 2982 } 2983 2984 void CMTask::drain_global_stack(bool partially) { 2985 if (has_aborted()) return; 2986 2987 // We have a policy to drain the local queue before we attempt to 2988 // drain the global stack. 2989 assert(partially || _task_queue->size() == 0, "invariant"); 2990 2991 // Decide what the target size is, depending whether we're going to 2992 // drain it partially (so that other tasks can steal if they run out 2993 // of things to do) or totally (at the very end). Notice that, 2994 // because we move entries from the global stack in chunks or 2995 // because another task might be doing the same, we might in fact 2996 // drop below the target. But, this is not a problem. 2997 size_t target_size; 2998 if (partially) { 2999 target_size = _cm->partial_mark_stack_size_target(); 3000 } else { 3001 target_size = 0; 3002 } 3003 3004 if (_cm->mark_stack_size() > target_size) { 3005 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3006 get_entries_from_global_stack(); 3007 drain_local_queue(partially); 3008 } 3009 } 3010 } 3011 3012 // SATB Queue has several assumptions on whether to call the par or 3013 // non-par versions of the methods. this is why some of the code is 3014 // replicated. We should really get rid of the single-threaded version 3015 // of the code to simplify things. 3016 void CMTask::drain_satb_buffers() { 3017 if (has_aborted()) return; 3018 3019 // We set this so that the regular clock knows that we're in the 3020 // middle of draining buffers and doesn't set the abort flag when it 3021 // notices that SATB buffers are available for draining. It'd be 3022 // very counter productive if it did that. :-) 3023 _draining_satb_buffers = true; 3024 3025 CMSATBBufferClosure satb_cl(this, _g1h); 3026 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3027 3028 // This keeps claiming and applying the closure to completed buffers 3029 // until we run out of buffers or we need to abort. 3030 while (!has_aborted() && 3031 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 3032 regular_clock_call(); 3033 } 3034 3035 _draining_satb_buffers = false; 3036 3037 assert(has_aborted() || 3038 concurrent() || 3039 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3040 3041 // again, this was a potentially expensive operation, decrease the 3042 // limits to get the regular clock call early 3043 decrease_limits(); 3044 } 3045 3046 void CMTask::print_stats() { 3047 log_debug(gc, stats)("Marking Stats, task = %u, calls = %d", 3048 _worker_id, _calls); 3049 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3050 _elapsed_time_ms, _termination_time_ms); 3051 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3052 _step_times_ms.num(), _step_times_ms.avg(), 3053 _step_times_ms.sd()); 3054 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", 3055 _step_times_ms.maximum(), _step_times_ms.sum()); 3056 } 3057 3058 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 3059 return _task_queues->steal(worker_id, hash_seed, obj); 3060 } 3061 3062 /***************************************************************************** 3063 3064 The do_marking_step(time_target_ms, ...) method is the building 3065 block of the parallel marking framework. It can be called in parallel 3066 with other invocations of do_marking_step() on different tasks 3067 (but only one per task, obviously) and concurrently with the 3068 mutator threads, or during remark, hence it eliminates the need 3069 for two versions of the code. When called during remark, it will 3070 pick up from where the task left off during the concurrent marking 3071 phase. Interestingly, tasks are also claimable during evacuation 3072 pauses too, since do_marking_step() ensures that it aborts before 3073 it needs to yield. 3074 3075 The data structures that it uses to do marking work are the 3076 following: 3077 3078 (1) Marking Bitmap. If there are gray objects that appear only 3079 on the bitmap (this happens either when dealing with an overflow 3080 or when the initial marking phase has simply marked the roots 3081 and didn't push them on the stack), then tasks claim heap 3082 regions whose bitmap they then scan to find gray objects. A 3083 global finger indicates where the end of the last claimed region 3084 is. A local finger indicates how far into the region a task has 3085 scanned. The two fingers are used to determine how to gray an 3086 object (i.e. whether simply marking it is OK, as it will be 3087 visited by a task in the future, or whether it needs to be also 3088 pushed on a stack). 3089 3090 (2) Local Queue. The local queue of the task which is accessed 3091 reasonably efficiently by the task. Other tasks can steal from 3092 it when they run out of work. Throughout the marking phase, a 3093 task attempts to keep its local queue short but not totally 3094 empty, so that entries are available for stealing by other 3095 tasks. Only when there is no more work, a task will totally 3096 drain its local queue. 3097 3098 (3) Global Mark Stack. This handles local queue overflow. During 3099 marking only sets of entries are moved between it and the local 3100 queues, as access to it requires a mutex and more fine-grain 3101 interaction with it which might cause contention. If it 3102 overflows, then the marking phase should restart and iterate 3103 over the bitmap to identify gray objects. Throughout the marking 3104 phase, tasks attempt to keep the global mark stack at a small 3105 length but not totally empty, so that entries are available for 3106 popping by other tasks. Only when there is no more work, tasks 3107 will totally drain the global mark stack. 3108 3109 (4) SATB Buffer Queue. This is where completed SATB buffers are 3110 made available. Buffers are regularly removed from this queue 3111 and scanned for roots, so that the queue doesn't get too 3112 long. During remark, all completed buffers are processed, as 3113 well as the filled in parts of any uncompleted buffers. 3114 3115 The do_marking_step() method tries to abort when the time target 3116 has been reached. There are a few other cases when the 3117 do_marking_step() method also aborts: 3118 3119 (1) When the marking phase has been aborted (after a Full GC). 3120 3121 (2) When a global overflow (on the global stack) has been 3122 triggered. Before the task aborts, it will actually sync up with 3123 the other tasks to ensure that all the marking data structures 3124 (local queues, stacks, fingers etc.) are re-initialized so that 3125 when do_marking_step() completes, the marking phase can 3126 immediately restart. 3127 3128 (3) When enough completed SATB buffers are available. The 3129 do_marking_step() method only tries to drain SATB buffers right 3130 at the beginning. So, if enough buffers are available, the 3131 marking step aborts and the SATB buffers are processed at 3132 the beginning of the next invocation. 3133 3134 (4) To yield. when we have to yield then we abort and yield 3135 right at the end of do_marking_step(). This saves us from a lot 3136 of hassle as, by yielding we might allow a Full GC. If this 3137 happens then objects will be compacted underneath our feet, the 3138 heap might shrink, etc. We save checking for this by just 3139 aborting and doing the yield right at the end. 3140 3141 From the above it follows that the do_marking_step() method should 3142 be called in a loop (or, otherwise, regularly) until it completes. 3143 3144 If a marking step completes without its has_aborted() flag being 3145 true, it means it has completed the current marking phase (and 3146 also all other marking tasks have done so and have all synced up). 3147 3148 A method called regular_clock_call() is invoked "regularly" (in 3149 sub ms intervals) throughout marking. It is this clock method that 3150 checks all the abort conditions which were mentioned above and 3151 decides when the task should abort. A work-based scheme is used to 3152 trigger this clock method: when the number of object words the 3153 marking phase has scanned or the number of references the marking 3154 phase has visited reach a given limit. Additional invocations to 3155 the method clock have been planted in a few other strategic places 3156 too. The initial reason for the clock method was to avoid calling 3157 vtime too regularly, as it is quite expensive. So, once it was in 3158 place, it was natural to piggy-back all the other conditions on it 3159 too and not constantly check them throughout the code. 3160 3161 If do_termination is true then do_marking_step will enter its 3162 termination protocol. 3163 3164 The value of is_serial must be true when do_marking_step is being 3165 called serially (i.e. by the VMThread) and do_marking_step should 3166 skip any synchronization in the termination and overflow code. 3167 Examples include the serial remark code and the serial reference 3168 processing closures. 3169 3170 The value of is_serial must be false when do_marking_step is 3171 being called by any of the worker threads in a work gang. 3172 Examples include the concurrent marking code (CMMarkingTask), 3173 the MT remark code, and the MT reference processing closures. 3174 3175 *****************************************************************************/ 3176 3177 void CMTask::do_marking_step(double time_target_ms, 3178 bool do_termination, 3179 bool is_serial) { 3180 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3181 assert(concurrent() == _cm->concurrent(), "they should be the same"); 3182 3183 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3184 assert(_task_queues != NULL, "invariant"); 3185 assert(_task_queue != NULL, "invariant"); 3186 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 3187 3188 assert(!_claimed, 3189 "only one thread should claim this task at any one time"); 3190 3191 // OK, this doesn't safeguard again all possible scenarios, as it is 3192 // possible for two threads to set the _claimed flag at the same 3193 // time. But it is only for debugging purposes anyway and it will 3194 // catch most problems. 3195 _claimed = true; 3196 3197 _start_time_ms = os::elapsedVTime() * 1000.0; 3198 3199 // If do_stealing is true then do_marking_step will attempt to 3200 // steal work from the other CMTasks. It only makes sense to 3201 // enable stealing when the termination protocol is enabled 3202 // and do_marking_step() is not being called serially. 3203 bool do_stealing = do_termination && !is_serial; 3204 3205 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 3206 _time_target_ms = time_target_ms - diff_prediction_ms; 3207 3208 // set up the variables that are used in the work-based scheme to 3209 // call the regular clock method 3210 _words_scanned = 0; 3211 _refs_reached = 0; 3212 recalculate_limits(); 3213 3214 // clear all flags 3215 clear_has_aborted(); 3216 _has_timed_out = false; 3217 _draining_satb_buffers = false; 3218 3219 ++_calls; 3220 3221 // Set up the bitmap and oop closures. Anything that uses them is 3222 // eventually called from this method, so it is OK to allocate these 3223 // statically. 3224 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 3225 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 3226 set_cm_oop_closure(&cm_oop_closure); 3227 3228 if (_cm->has_overflown()) { 3229 // This can happen if the mark stack overflows during a GC pause 3230 // and this task, after a yield point, restarts. We have to abort 3231 // as we need to get into the overflow protocol which happens 3232 // right at the end of this task. 3233 set_has_aborted(); 3234 } 3235 3236 // First drain any available SATB buffers. After this, we will not 3237 // look at SATB buffers before the next invocation of this method. 3238 // If enough completed SATB buffers are queued up, the regular clock 3239 // will abort this task so that it restarts. 3240 drain_satb_buffers(); 3241 // ...then partially drain the local queue and the global stack 3242 drain_local_queue(true); 3243 drain_global_stack(true); 3244 3245 do { 3246 if (!has_aborted() && _curr_region != NULL) { 3247 // This means that we're already holding on to a region. 3248 assert(_finger != NULL, "if region is not NULL, then the finger " 3249 "should not be NULL either"); 3250 3251 // We might have restarted this task after an evacuation pause 3252 // which might have evacuated the region we're holding on to 3253 // underneath our feet. Let's read its limit again to make sure 3254 // that we do not iterate over a region of the heap that 3255 // contains garbage (update_region_limit() will also move 3256 // _finger to the start of the region if it is found empty). 3257 update_region_limit(); 3258 // We will start from _finger not from the start of the region, 3259 // as we might be restarting this task after aborting half-way 3260 // through scanning this region. In this case, _finger points to 3261 // the address where we last found a marked object. If this is a 3262 // fresh region, _finger points to start(). 3263 MemRegion mr = MemRegion(_finger, _region_limit); 3264 3265 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 3266 "humongous regions should go around loop once only"); 3267 3268 // Some special cases: 3269 // If the memory region is empty, we can just give up the region. 3270 // If the current region is humongous then we only need to check 3271 // the bitmap for the bit associated with the start of the object, 3272 // scan the object if it's live, and give up the region. 3273 // Otherwise, let's iterate over the bitmap of the part of the region 3274 // that is left. 3275 // If the iteration is successful, give up the region. 3276 if (mr.is_empty()) { 3277 giveup_current_region(); 3278 regular_clock_call(); 3279 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 3280 if (_nextMarkBitMap->isMarked(mr.start())) { 3281 // The object is marked - apply the closure 3282 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 3283 bitmap_closure.do_bit(offset); 3284 } 3285 // Even if this task aborted while scanning the humongous object 3286 // we can (and should) give up the current region. 3287 giveup_current_region(); 3288 regular_clock_call(); 3289 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 3290 giveup_current_region(); 3291 regular_clock_call(); 3292 } else { 3293 assert(has_aborted(), "currently the only way to do so"); 3294 // The only way to abort the bitmap iteration is to return 3295 // false from the do_bit() method. However, inside the 3296 // do_bit() method we move the _finger to point to the 3297 // object currently being looked at. So, if we bail out, we 3298 // have definitely set _finger to something non-null. 3299 assert(_finger != NULL, "invariant"); 3300 3301 // Region iteration was actually aborted. So now _finger 3302 // points to the address of the object we last scanned. If we 3303 // leave it there, when we restart this task, we will rescan 3304 // the object. It is easy to avoid this. We move the finger by 3305 // enough to point to the next possible object header (the 3306 // bitmap knows by how much we need to move it as it knows its 3307 // granularity). 3308 assert(_finger < _region_limit, "invariant"); 3309 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 3310 // Check if bitmap iteration was aborted while scanning the last object 3311 if (new_finger >= _region_limit) { 3312 giveup_current_region(); 3313 } else { 3314 move_finger_to(new_finger); 3315 } 3316 } 3317 } 3318 // At this point we have either completed iterating over the 3319 // region we were holding on to, or we have aborted. 3320 3321 // We then partially drain the local queue and the global stack. 3322 // (Do we really need this?) 3323 drain_local_queue(true); 3324 drain_global_stack(true); 3325 3326 // Read the note on the claim_region() method on why it might 3327 // return NULL with potentially more regions available for 3328 // claiming and why we have to check out_of_regions() to determine 3329 // whether we're done or not. 3330 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 3331 // We are going to try to claim a new region. We should have 3332 // given up on the previous one. 3333 // Separated the asserts so that we know which one fires. 3334 assert(_curr_region == NULL, "invariant"); 3335 assert(_finger == NULL, "invariant"); 3336 assert(_region_limit == NULL, "invariant"); 3337 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 3338 if (claimed_region != NULL) { 3339 // Yes, we managed to claim one 3340 setup_for_region(claimed_region); 3341 assert(_curr_region == claimed_region, "invariant"); 3342 } 3343 // It is important to call the regular clock here. It might take 3344 // a while to claim a region if, for example, we hit a large 3345 // block of empty regions. So we need to call the regular clock 3346 // method once round the loop to make sure it's called 3347 // frequently enough. 3348 regular_clock_call(); 3349 } 3350 3351 if (!has_aborted() && _curr_region == NULL) { 3352 assert(_cm->out_of_regions(), 3353 "at this point we should be out of regions"); 3354 } 3355 } while ( _curr_region != NULL && !has_aborted()); 3356 3357 if (!has_aborted()) { 3358 // We cannot check whether the global stack is empty, since other 3359 // tasks might be pushing objects to it concurrently. 3360 assert(_cm->out_of_regions(), 3361 "at this point we should be out of regions"); 3362 // Try to reduce the number of available SATB buffers so that 3363 // remark has less work to do. 3364 drain_satb_buffers(); 3365 } 3366 3367 // Since we've done everything else, we can now totally drain the 3368 // local queue and global stack. 3369 drain_local_queue(false); 3370 drain_global_stack(false); 3371 3372 // Attempt at work stealing from other task's queues. 3373 if (do_stealing && !has_aborted()) { 3374 // We have not aborted. This means that we have finished all that 3375 // we could. Let's try to do some stealing... 3376 3377 // We cannot check whether the global stack is empty, since other 3378 // tasks might be pushing objects to it concurrently. 3379 assert(_cm->out_of_regions() && _task_queue->size() == 0, 3380 "only way to reach here"); 3381 while (!has_aborted()) { 3382 oop obj; 3383 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 3384 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 3385 "any stolen object should be marked"); 3386 scan_object(obj); 3387 3388 // And since we're towards the end, let's totally drain the 3389 // local queue and global stack. 3390 drain_local_queue(false); 3391 drain_global_stack(false); 3392 } else { 3393 break; 3394 } 3395 } 3396 } 3397 3398 // We still haven't aborted. Now, let's try to get into the 3399 // termination protocol. 3400 if (do_termination && !has_aborted()) { 3401 // We cannot check whether the global stack is empty, since other 3402 // tasks might be concurrently pushing objects on it. 3403 // Separated the asserts so that we know which one fires. 3404 assert(_cm->out_of_regions(), "only way to reach here"); 3405 assert(_task_queue->size() == 0, "only way to reach here"); 3406 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 3407 3408 // The CMTask class also extends the TerminatorTerminator class, 3409 // hence its should_exit_termination() method will also decide 3410 // whether to exit the termination protocol or not. 3411 bool finished = (is_serial || 3412 _cm->terminator()->offer_termination(this)); 3413 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 3414 _termination_time_ms += 3415 termination_end_time_ms - _termination_start_time_ms; 3416 3417 if (finished) { 3418 // We're all done. 3419 3420 if (_worker_id == 0) { 3421 // let's allow task 0 to do this 3422 if (concurrent()) { 3423 assert(_cm->concurrent_marking_in_progress(), "invariant"); 3424 // we need to set this to false before the next 3425 // safepoint. This way we ensure that the marking phase 3426 // doesn't observe any more heap expansions. 3427 _cm->clear_concurrent_marking_in_progress(); 3428 } 3429 } 3430 3431 // We can now guarantee that the global stack is empty, since 3432 // all other tasks have finished. We separated the guarantees so 3433 // that, if a condition is false, we can immediately find out 3434 // which one. 3435 guarantee(_cm->out_of_regions(), "only way to reach here"); 3436 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 3437 guarantee(_task_queue->size() == 0, "only way to reach here"); 3438 guarantee(!_cm->has_overflown(), "only way to reach here"); 3439 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 3440 } else { 3441 // Apparently there's more work to do. Let's abort this task. It 3442 // will restart it and we can hopefully find more things to do. 3443 set_has_aborted(); 3444 } 3445 } 3446 3447 // Mainly for debugging purposes to make sure that a pointer to the 3448 // closure which was statically allocated in this frame doesn't 3449 // escape it by accident. 3450 set_cm_oop_closure(NULL); 3451 double end_time_ms = os::elapsedVTime() * 1000.0; 3452 double elapsed_time_ms = end_time_ms - _start_time_ms; 3453 // Update the step history. 3454 _step_times_ms.add(elapsed_time_ms); 3455 3456 if (has_aborted()) { 3457 // The task was aborted for some reason. 3458 if (_has_timed_out) { 3459 double diff_ms = elapsed_time_ms - _time_target_ms; 3460 // Keep statistics of how well we did with respect to hitting 3461 // our target only if we actually timed out (if we aborted for 3462 // other reasons, then the results might get skewed). 3463 _marking_step_diffs_ms.add(diff_ms); 3464 } 3465 3466 if (_cm->has_overflown()) { 3467 // This is the interesting one. We aborted because a global 3468 // overflow was raised. This means we have to restart the 3469 // marking phase and start iterating over regions. However, in 3470 // order to do this we have to make sure that all tasks stop 3471 // what they are doing and re-initialize in a safe manner. We 3472 // will achieve this with the use of two barrier sync points. 3473 3474 if (!is_serial) { 3475 // We only need to enter the sync barrier if being called 3476 // from a parallel context 3477 _cm->enter_first_sync_barrier(_worker_id); 3478 3479 // When we exit this sync barrier we know that all tasks have 3480 // stopped doing marking work. So, it's now safe to 3481 // re-initialize our data structures. At the end of this method, 3482 // task 0 will clear the global data structures. 3483 } 3484 3485 // We clear the local state of this task... 3486 clear_region_fields(); 3487 3488 if (!is_serial) { 3489 // ...and enter the second barrier. 3490 _cm->enter_second_sync_barrier(_worker_id); 3491 } 3492 // At this point, if we're during the concurrent phase of 3493 // marking, everything has been re-initialized and we're 3494 // ready to restart. 3495 } 3496 } 3497 3498 _claimed = false; 3499 } 3500 3501 CMTask::CMTask(uint worker_id, 3502 ConcurrentMark* cm, 3503 size_t* marked_bytes, 3504 BitMap* card_bm, 3505 CMTaskQueue* task_queue, 3506 CMTaskQueueSet* task_queues) 3507 : _g1h(G1CollectedHeap::heap()), 3508 _worker_id(worker_id), _cm(cm), 3509 _claimed(false), 3510 _nextMarkBitMap(NULL), _hash_seed(17), 3511 _task_queue(task_queue), 3512 _task_queues(task_queues), 3513 _cm_oop_closure(NULL), 3514 _marked_bytes_array(marked_bytes), 3515 _card_bm(card_bm) { 3516 guarantee(task_queue != NULL, "invariant"); 3517 guarantee(task_queues != NULL, "invariant"); 3518 3519 _marking_step_diffs_ms.add(0.5); 3520 } 3521 3522 // These are formatting macros that are used below to ensure 3523 // consistent formatting. The *_H_* versions are used to format the 3524 // header for a particular value and they should be kept consistent 3525 // with the corresponding macro. Also note that most of the macros add 3526 // the necessary white space (as a prefix) which makes them a bit 3527 // easier to compose. 3528 3529 // All the output lines are prefixed with this string to be able to 3530 // identify them easily in a large log file. 3531 #define G1PPRL_LINE_PREFIX "###" 3532 3533 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 3534 #ifdef _LP64 3535 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 3536 #else // _LP64 3537 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 3538 #endif // _LP64 3539 3540 // For per-region info 3541 #define G1PPRL_TYPE_FORMAT " %-4s" 3542 #define G1PPRL_TYPE_H_FORMAT " %4s" 3543 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 3544 #define G1PPRL_BYTE_H_FORMAT " %9s" 3545 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 3546 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 3547 3548 // For summary info 3549 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 3550 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 3551 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 3552 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 3553 3554 G1PrintRegionLivenessInfoClosure:: 3555 G1PrintRegionLivenessInfoClosure(const char* phase_name) 3556 : _total_used_bytes(0), _total_capacity_bytes(0), 3557 _total_prev_live_bytes(0), _total_next_live_bytes(0), 3558 _hum_used_bytes(0), _hum_capacity_bytes(0), 3559 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 3560 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 3561 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 3562 MemRegion g1_reserved = g1h->g1_reserved(); 3563 double now = os::elapsedTime(); 3564 3565 // Print the header of the output. 3566 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 3567 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 3568 G1PPRL_SUM_ADDR_FORMAT("reserved") 3569 G1PPRL_SUM_BYTE_FORMAT("region-size"), 3570 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 3571 HeapRegion::GrainBytes); 3572 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3573 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3574 G1PPRL_TYPE_H_FORMAT 3575 G1PPRL_ADDR_BASE_H_FORMAT 3576 G1PPRL_BYTE_H_FORMAT 3577 G1PPRL_BYTE_H_FORMAT 3578 G1PPRL_BYTE_H_FORMAT 3579 G1PPRL_DOUBLE_H_FORMAT 3580 G1PPRL_BYTE_H_FORMAT 3581 G1PPRL_BYTE_H_FORMAT, 3582 "type", "address-range", 3583 "used", "prev-live", "next-live", "gc-eff", 3584 "remset", "code-roots"); 3585 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3586 G1PPRL_TYPE_H_FORMAT 3587 G1PPRL_ADDR_BASE_H_FORMAT 3588 G1PPRL_BYTE_H_FORMAT 3589 G1PPRL_BYTE_H_FORMAT 3590 G1PPRL_BYTE_H_FORMAT 3591 G1PPRL_DOUBLE_H_FORMAT 3592 G1PPRL_BYTE_H_FORMAT 3593 G1PPRL_BYTE_H_FORMAT, 3594 "", "", 3595 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 3596 "(bytes)", "(bytes)"); 3597 } 3598 3599 // It takes as a parameter a reference to one of the _hum_* fields, it 3600 // deduces the corresponding value for a region in a humongous region 3601 // series (either the region size, or what's left if the _hum_* field 3602 // is < the region size), and updates the _hum_* field accordingly. 3603 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 3604 size_t bytes = 0; 3605 // The > 0 check is to deal with the prev and next live bytes which 3606 // could be 0. 3607 if (*hum_bytes > 0) { 3608 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 3609 *hum_bytes -= bytes; 3610 } 3611 return bytes; 3612 } 3613 3614 // It deduces the values for a region in a humongous region series 3615 // from the _hum_* fields and updates those accordingly. It assumes 3616 // that that _hum_* fields have already been set up from the "starts 3617 // humongous" region and we visit the regions in address order. 3618 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 3619 size_t* capacity_bytes, 3620 size_t* prev_live_bytes, 3621 size_t* next_live_bytes) { 3622 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 3623 *used_bytes = get_hum_bytes(&_hum_used_bytes); 3624 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 3625 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 3626 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 3627 } 3628 3629 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 3630 const char* type = r->get_type_str(); 3631 HeapWord* bottom = r->bottom(); 3632 HeapWord* end = r->end(); 3633 size_t capacity_bytes = r->capacity(); 3634 size_t used_bytes = r->used(); 3635 size_t prev_live_bytes = r->live_bytes(); 3636 size_t next_live_bytes = r->next_live_bytes(); 3637 double gc_eff = r->gc_efficiency(); 3638 size_t remset_bytes = r->rem_set()->mem_size(); 3639 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3640 3641 if (r->is_starts_humongous()) { 3642 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 3643 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 3644 "they should have been zeroed after the last time we used them"); 3645 // Set up the _hum_* fields. 3646 _hum_capacity_bytes = capacity_bytes; 3647 _hum_used_bytes = used_bytes; 3648 _hum_prev_live_bytes = prev_live_bytes; 3649 _hum_next_live_bytes = next_live_bytes; 3650 get_hum_bytes(&used_bytes, &capacity_bytes, 3651 &prev_live_bytes, &next_live_bytes); 3652 end = bottom + HeapRegion::GrainWords; 3653 } else if (r->is_continues_humongous()) { 3654 get_hum_bytes(&used_bytes, &capacity_bytes, 3655 &prev_live_bytes, &next_live_bytes); 3656 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 3657 } 3658 3659 _total_used_bytes += used_bytes; 3660 _total_capacity_bytes += capacity_bytes; 3661 _total_prev_live_bytes += prev_live_bytes; 3662 _total_next_live_bytes += next_live_bytes; 3663 _total_remset_bytes += remset_bytes; 3664 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3665 3666 // Print a line for this particular region. 3667 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3668 G1PPRL_TYPE_FORMAT 3669 G1PPRL_ADDR_BASE_FORMAT 3670 G1PPRL_BYTE_FORMAT 3671 G1PPRL_BYTE_FORMAT 3672 G1PPRL_BYTE_FORMAT 3673 G1PPRL_DOUBLE_FORMAT 3674 G1PPRL_BYTE_FORMAT 3675 G1PPRL_BYTE_FORMAT, 3676 type, p2i(bottom), p2i(end), 3677 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3678 remset_bytes, strong_code_roots_bytes); 3679 3680 return false; 3681 } 3682 3683 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3684 // add static memory usages to remembered set sizes 3685 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3686 // Print the footer of the output. 3687 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3688 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3689 " SUMMARY" 3690 G1PPRL_SUM_MB_FORMAT("capacity") 3691 G1PPRL_SUM_MB_PERC_FORMAT("used") 3692 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3693 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3694 G1PPRL_SUM_MB_FORMAT("remset") 3695 G1PPRL_SUM_MB_FORMAT("code-roots"), 3696 bytes_to_mb(_total_capacity_bytes), 3697 bytes_to_mb(_total_used_bytes), 3698 perc(_total_used_bytes, _total_capacity_bytes), 3699 bytes_to_mb(_total_prev_live_bytes), 3700 perc(_total_prev_live_bytes, _total_capacity_bytes), 3701 bytes_to_mb(_total_next_live_bytes), 3702 perc(_total_next_live_bytes, _total_capacity_bytes), 3703 bytes_to_mb(_total_remset_bytes), 3704 bytes_to_mb(_total_strong_code_roots_bytes)); 3705 }