1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/symbolTable.hpp" 27 #include "gc_implementation/g1/concurrentMark.inline.hpp" 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 32 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 33 #include "gc_implementation/g1/g1RemSet.hpp" 34 #include "gc_implementation/g1/heapRegionRemSet.hpp" 35 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 36 #include "gc_implementation/shared/vmGCOperations.hpp" 37 #include "memory/genOopClosures.inline.hpp" 38 #include "memory/referencePolicy.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "runtime/handles.inline.hpp" 42 #include "runtime/java.hpp" 43 44 // 45 // CMS Bit Map Wrapper 46 47 CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter): 48 _bm((uintptr_t*)NULL,0), 49 _shifter(shifter) { 50 _bmStartWord = (HeapWord*)(rs.base()); 51 _bmWordSize = rs.size()/HeapWordSize; // rs.size() is in bytes 52 ReservedSpace brs(ReservedSpace::allocation_align_size_up( 53 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); 54 55 guarantee(brs.is_reserved(), "couldn't allocate CMS bit map"); 56 // For now we'll just commit all of the bit map up fromt. 57 // Later on we'll try to be more parsimonious with swap. 58 guarantee(_virtual_space.initialize(brs, brs.size()), 59 "couldn't reseve backing store for CMS bit map"); 60 assert(_virtual_space.committed_size() == brs.size(), 61 "didn't reserve backing store for all of CMS bit map?"); 62 _bm.set_map((uintptr_t*)_virtual_space.low()); 63 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= 64 _bmWordSize, "inconsistency in bit map sizing"); 65 _bm.set_size(_bmWordSize >> _shifter); 66 } 67 68 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr, 69 HeapWord* limit) const { 70 // First we must round addr *up* to a possible object boundary. 71 addr = (HeapWord*)align_size_up((intptr_t)addr, 72 HeapWordSize << _shifter); 73 size_t addrOffset = heapWordToOffset(addr); 74 if (limit == NULL) { 75 limit = _bmStartWord + _bmWordSize; 76 } 77 size_t limitOffset = heapWordToOffset(limit); 78 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 79 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 80 assert(nextAddr >= addr, "get_next_one postcondition"); 81 assert(nextAddr == limit || isMarked(nextAddr), 82 "get_next_one postcondition"); 83 return nextAddr; 84 } 85 86 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr, 87 HeapWord* limit) const { 88 size_t addrOffset = heapWordToOffset(addr); 89 if (limit == NULL) { 90 limit = _bmStartWord + _bmWordSize; 91 } 92 size_t limitOffset = heapWordToOffset(limit); 93 size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset); 94 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 95 assert(nextAddr >= addr, "get_next_one postcondition"); 96 assert(nextAddr == limit || !isMarked(nextAddr), 97 "get_next_one postcondition"); 98 return nextAddr; 99 } 100 101 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const { 102 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); 103 return (int) (diff >> _shifter); 104 } 105 106 bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) { 107 HeapWord* left = MAX2(_bmStartWord, mr.start()); 108 HeapWord* right = MIN2(_bmStartWord + _bmWordSize, mr.end()); 109 if (right > left) { 110 // Right-open interval [leftOffset, rightOffset). 111 return _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right)); 112 } else { 113 return true; 114 } 115 } 116 117 void CMBitMapRO::mostly_disjoint_range_union(BitMap* from_bitmap, 118 size_t from_start_index, 119 HeapWord* to_start_word, 120 size_t word_num) { 121 _bm.mostly_disjoint_range_union(from_bitmap, 122 from_start_index, 123 heapWordToOffset(to_start_word), 124 word_num); 125 } 126 127 #ifndef PRODUCT 128 bool CMBitMapRO::covers(ReservedSpace rs) const { 129 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 130 assert(((size_t)_bm.size() * (size_t)(1 << _shifter)) == _bmWordSize, 131 "size inconsistency"); 132 return _bmStartWord == (HeapWord*)(rs.base()) && 133 _bmWordSize == rs.size()>>LogHeapWordSize; 134 } 135 #endif 136 137 void CMBitMap::clearAll() { 138 _bm.clear(); 139 return; 140 } 141 142 void CMBitMap::markRange(MemRegion mr) { 143 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 144 assert(!mr.is_empty(), "unexpected empty region"); 145 assert((offsetToHeapWord(heapWordToOffset(mr.end())) == 146 ((HeapWord *) mr.end())), 147 "markRange memory region end is not card aligned"); 148 // convert address range into offset range 149 _bm.at_put_range(heapWordToOffset(mr.start()), 150 heapWordToOffset(mr.end()), true); 151 } 152 153 void CMBitMap::clearRange(MemRegion mr) { 154 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 155 assert(!mr.is_empty(), "unexpected empty region"); 156 // convert address range into offset range 157 _bm.at_put_range(heapWordToOffset(mr.start()), 158 heapWordToOffset(mr.end()), false); 159 } 160 161 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr, 162 HeapWord* end_addr) { 163 HeapWord* start = getNextMarkedWordAddress(addr); 164 start = MIN2(start, end_addr); 165 HeapWord* end = getNextUnmarkedWordAddress(start); 166 end = MIN2(end, end_addr); 167 assert(start <= end, "Consistency check"); 168 MemRegion mr(start, end); 169 if (!mr.is_empty()) { 170 clearRange(mr); 171 } 172 return mr; 173 } 174 175 CMMarkStack::CMMarkStack(ConcurrentMark* cm) : 176 _base(NULL), _cm(cm) 177 #ifdef ASSERT 178 , _drain_in_progress(false) 179 , _drain_in_progress_yields(false) 180 #endif 181 {} 182 183 void CMMarkStack::allocate(size_t size) { 184 _base = NEW_C_HEAP_ARRAY(oop, size); 185 if (_base == NULL) { 186 vm_exit_during_initialization("Failed to allocate " 187 "CM region mark stack"); 188 } 189 _index = 0; 190 _capacity = (jint) size; 191 _oops_do_bound = -1; 192 NOT_PRODUCT(_max_depth = 0); 193 } 194 195 CMMarkStack::~CMMarkStack() { 196 if (_base != NULL) { 197 FREE_C_HEAP_ARRAY(oop, _base); 198 } 199 } 200 201 void CMMarkStack::par_push(oop ptr) { 202 while (true) { 203 if (isFull()) { 204 _overflow = true; 205 return; 206 } 207 // Otherwise... 208 jint index = _index; 209 jint next_index = index+1; 210 jint res = Atomic::cmpxchg(next_index, &_index, index); 211 if (res == index) { 212 _base[index] = ptr; 213 // Note that we don't maintain this atomically. We could, but it 214 // doesn't seem necessary. 215 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 216 return; 217 } 218 // Otherwise, we need to try again. 219 } 220 } 221 222 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) { 223 while (true) { 224 if (isFull()) { 225 _overflow = true; 226 return; 227 } 228 // Otherwise... 229 jint index = _index; 230 jint next_index = index + n; 231 if (next_index > _capacity) { 232 _overflow = true; 233 return; 234 } 235 jint res = Atomic::cmpxchg(next_index, &_index, index); 236 if (res == index) { 237 for (int i = 0; i < n; i++) { 238 int ind = index + i; 239 assert(ind < _capacity, "By overflow test above."); 240 _base[ind] = ptr_arr[i]; 241 } 242 NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index)); 243 return; 244 } 245 // Otherwise, we need to try again. 246 } 247 } 248 249 250 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 251 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 252 jint start = _index; 253 jint next_index = start + n; 254 if (next_index > _capacity) { 255 _overflow = true; 256 return; 257 } 258 // Otherwise. 259 _index = next_index; 260 for (int i = 0; i < n; i++) { 261 int ind = start + i; 262 assert(ind < _capacity, "By overflow test above."); 263 _base[ind] = ptr_arr[i]; 264 } 265 } 266 267 268 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 269 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 270 jint index = _index; 271 if (index == 0) { 272 *n = 0; 273 return false; 274 } else { 275 int k = MIN2(max, index); 276 jint new_ind = index - k; 277 for (int j = 0; j < k; j++) { 278 ptr_arr[j] = _base[new_ind + j]; 279 } 280 _index = new_ind; 281 *n = k; 282 return true; 283 } 284 } 285 286 287 CMRegionStack::CMRegionStack() : _base(NULL) {} 288 289 void CMRegionStack::allocate(size_t size) { 290 _base = NEW_C_HEAP_ARRAY(MemRegion, size); 291 if (_base == NULL) { 292 vm_exit_during_initialization("Failed to allocate CM region mark stack"); 293 } 294 _index = 0; 295 _capacity = (jint) size; 296 } 297 298 CMRegionStack::~CMRegionStack() { 299 if (_base != NULL) { 300 FREE_C_HEAP_ARRAY(oop, _base); 301 } 302 } 303 304 void CMRegionStack::push_lock_free(MemRegion mr) { 305 assert(mr.word_size() > 0, "Precondition"); 306 while (true) { 307 jint index = _index; 308 309 if (index >= _capacity) { 310 _overflow = true; 311 return; 312 } 313 // Otherwise... 314 jint next_index = index+1; 315 jint res = Atomic::cmpxchg(next_index, &_index, index); 316 if (res == index) { 317 _base[index] = mr; 318 return; 319 } 320 // Otherwise, we need to try again. 321 } 322 } 323 324 // Lock-free pop of the region stack. Called during the concurrent 325 // marking / remark phases. Should only be called in tandem with 326 // other lock-free pops. 327 MemRegion CMRegionStack::pop_lock_free() { 328 while (true) { 329 jint index = _index; 330 331 if (index == 0) { 332 return MemRegion(); 333 } 334 // Otherwise... 335 jint next_index = index-1; 336 jint res = Atomic::cmpxchg(next_index, &_index, index); 337 if (res == index) { 338 MemRegion mr = _base[next_index]; 339 if (mr.start() != NULL) { 340 assert(mr.end() != NULL, "invariant"); 341 assert(mr.word_size() > 0, "invariant"); 342 return mr; 343 } else { 344 // that entry was invalidated... let's skip it 345 assert(mr.end() == NULL, "invariant"); 346 } 347 } 348 // Otherwise, we need to try again. 349 } 350 } 351 352 #if 0 353 // The routines that manipulate the region stack with a lock are 354 // not currently used. They should be retained, however, as a 355 // diagnostic aid. 356 357 void CMRegionStack::push_with_lock(MemRegion mr) { 358 assert(mr.word_size() > 0, "Precondition"); 359 MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag); 360 361 if (isFull()) { 362 _overflow = true; 363 return; 364 } 365 366 _base[_index] = mr; 367 _index += 1; 368 } 369 370 MemRegion CMRegionStack::pop_with_lock() { 371 MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag); 372 373 while (true) { 374 if (_index == 0) { 375 return MemRegion(); 376 } 377 _index -= 1; 378 379 MemRegion mr = _base[_index]; 380 if (mr.start() != NULL) { 381 assert(mr.end() != NULL, "invariant"); 382 assert(mr.word_size() > 0, "invariant"); 383 return mr; 384 } else { 385 // that entry was invalidated... let's skip it 386 assert(mr.end() == NULL, "invariant"); 387 } 388 } 389 } 390 #endif 391 392 bool CMRegionStack::invalidate_entries_into_cset() { 393 bool result = false; 394 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 395 for (int i = 0; i < _oops_do_bound; ++i) { 396 MemRegion mr = _base[i]; 397 if (mr.start() != NULL) { 398 assert(mr.end() != NULL, "invariant"); 399 assert(mr.word_size() > 0, "invariant"); 400 HeapRegion* hr = g1h->heap_region_containing(mr.start()); 401 assert(hr != NULL, "invariant"); 402 if (hr->in_collection_set()) { 403 // The region points into the collection set 404 _base[i] = MemRegion(); 405 result = true; 406 } 407 } else { 408 // that entry was invalidated... let's skip it 409 assert(mr.end() == NULL, "invariant"); 410 } 411 } 412 return result; 413 } 414 415 template<class OopClosureClass> 416 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { 417 assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after 418 || SafepointSynchronize::is_at_safepoint(), 419 "Drain recursion must be yield-safe."); 420 bool res = true; 421 debug_only(_drain_in_progress = true); 422 debug_only(_drain_in_progress_yields = yield_after); 423 while (!isEmpty()) { 424 oop newOop = pop(); 425 assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop"); 426 assert(newOop->is_oop(), "Expected an oop"); 427 assert(bm == NULL || bm->isMarked((HeapWord*)newOop), 428 "only grey objects on this stack"); 429 // iterate over the oops in this oop, marking and pushing 430 // the ones in CMS generation. 431 newOop->oop_iterate(cl); 432 if (yield_after && _cm->do_yield_check()) { 433 res = false; 434 break; 435 } 436 } 437 debug_only(_drain_in_progress = false); 438 return res; 439 } 440 441 void CMMarkStack::oops_do(OopClosure* f) { 442 if (_index == 0) return; 443 assert(_oops_do_bound != -1 && _oops_do_bound <= _index, 444 "Bound must be set."); 445 for (int i = 0; i < _oops_do_bound; i++) { 446 f->do_oop(&_base[i]); 447 } 448 _oops_do_bound = -1; 449 } 450 451 bool ConcurrentMark::not_yet_marked(oop obj) const { 452 return (_g1h->is_obj_ill(obj) 453 || (_g1h->is_in_permanent(obj) 454 && !nextMarkBitMap()->isMarked((HeapWord*)obj))); 455 } 456 457 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 458 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 459 #endif // _MSC_VER 460 461 ConcurrentMark::ConcurrentMark(ReservedSpace rs, 462 int max_regions) : 463 _markBitMap1(rs, MinObjAlignment - 1), 464 _markBitMap2(rs, MinObjAlignment - 1), 465 466 _parallel_marking_threads(0), 467 _sleep_factor(0.0), 468 _marking_task_overhead(1.0), 469 _cleanup_sleep_factor(0.0), 470 _cleanup_task_overhead(1.0), 471 _cleanup_list("Cleanup List"), 472 _region_bm(max_regions, false /* in_resource_area*/), 473 _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >> 474 CardTableModRefBS::card_shift, 475 false /* in_resource_area*/), 476 _prevMarkBitMap(&_markBitMap1), 477 _nextMarkBitMap(&_markBitMap2), 478 _at_least_one_mark_complete(false), 479 480 _markStack(this), 481 _regionStack(), 482 // _finger set in set_non_marking_state 483 484 _max_task_num(MAX2(ParallelGCThreads, (size_t)1)), 485 // _active_tasks set in set_non_marking_state 486 // _tasks set inside the constructor 487 _task_queues(new CMTaskQueueSet((int) _max_task_num)), 488 _terminator(ParallelTaskTerminator((int) _max_task_num, _task_queues)), 489 490 _has_overflown(false), 491 _concurrent(false), 492 _has_aborted(false), 493 _restart_for_overflow(false), 494 _concurrent_marking_in_progress(false), 495 _should_gray_objects(false), 496 497 // _verbose_level set below 498 499 _init_times(), 500 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 501 _cleanup_times(), 502 _total_counting_time(0.0), 503 _total_rs_scrub_time(0.0), 504 505 _parallel_workers(NULL) { 506 CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel; 507 if (verbose_level < no_verbose) { 508 verbose_level = no_verbose; 509 } 510 if (verbose_level > high_verbose) { 511 verbose_level = high_verbose; 512 } 513 _verbose_level = verbose_level; 514 515 if (verbose_low()) { 516 gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " 517 "heap end = "PTR_FORMAT, _heap_start, _heap_end); 518 } 519 520 _markStack.allocate(MarkStackSize); 521 _regionStack.allocate(G1MarkRegionStackSize); 522 523 // Create & start a ConcurrentMark thread. 524 _cmThread = new ConcurrentMarkThread(this); 525 assert(cmThread() != NULL, "CM Thread should have been created"); 526 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 527 528 _g1h = G1CollectedHeap::heap(); 529 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 530 assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency"); 531 assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency"); 532 533 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 534 satb_qs.set_buffer_size(G1SATBBufferSize); 535 536 _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num); 537 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num); 538 539 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 540 _active_tasks = _max_task_num; 541 for (int i = 0; i < (int) _max_task_num; ++i) { 542 CMTaskQueue* task_queue = new CMTaskQueue(); 543 task_queue->initialize(); 544 _task_queues->register_queue(i, task_queue); 545 546 _tasks[i] = new CMTask(i, this, task_queue, _task_queues); 547 _accum_task_vtime[i] = 0.0; 548 } 549 550 if (ConcGCThreads > ParallelGCThreads) { 551 vm_exit_during_initialization("Can't have more ConcGCThreads " 552 "than ParallelGCThreads."); 553 } 554 if (ParallelGCThreads == 0) { 555 // if we are not running with any parallel GC threads we will not 556 // spawn any marking threads either 557 _parallel_marking_threads = 0; 558 _sleep_factor = 0.0; 559 _marking_task_overhead = 1.0; 560 } else { 561 if (ConcGCThreads > 0) { 562 // notice that ConcGCThreads overwrites G1MarkingOverheadPercent 563 // if both are set 564 565 _parallel_marking_threads = ConcGCThreads; 566 _sleep_factor = 0.0; 567 _marking_task_overhead = 1.0; 568 } else if (G1MarkingOverheadPercent > 0) { 569 // we will calculate the number of parallel marking threads 570 // based on a target overhead with respect to the soft real-time 571 // goal 572 573 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 574 double overall_cm_overhead = 575 (double) MaxGCPauseMillis * marking_overhead / 576 (double) GCPauseIntervalMillis; 577 double cpu_ratio = 1.0 / (double) os::processor_count(); 578 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 579 double marking_task_overhead = 580 overall_cm_overhead / marking_thread_num * 581 (double) os::processor_count(); 582 double sleep_factor = 583 (1.0 - marking_task_overhead) / marking_task_overhead; 584 585 _parallel_marking_threads = (size_t) marking_thread_num; 586 _sleep_factor = sleep_factor; 587 _marking_task_overhead = marking_task_overhead; 588 } else { 589 _parallel_marking_threads = MAX2((ParallelGCThreads + 2) / 4, (size_t)1); 590 _sleep_factor = 0.0; 591 _marking_task_overhead = 1.0; 592 } 593 594 if (parallel_marking_threads() > 1) { 595 _cleanup_task_overhead = 1.0; 596 } else { 597 _cleanup_task_overhead = marking_task_overhead(); 598 } 599 _cleanup_sleep_factor = 600 (1.0 - cleanup_task_overhead()) / cleanup_task_overhead(); 601 602 #if 0 603 gclog_or_tty->print_cr("Marking Threads %d", parallel_marking_threads()); 604 gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead()); 605 gclog_or_tty->print_cr("CM Sleep Factor %1.4lf", sleep_factor()); 606 gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead()); 607 gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); 608 #endif 609 610 guarantee(parallel_marking_threads() > 0, "peace of mind"); 611 _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", 612 (int) _parallel_marking_threads, false, true); 613 if (_parallel_workers == NULL) { 614 vm_exit_during_initialization("Failed necessary allocation."); 615 } else { 616 _parallel_workers->initialize_workers(); 617 } 618 } 619 620 // so that the call below can read a sensible value 621 _heap_start = (HeapWord*) rs.base(); 622 set_non_marking_state(); 623 } 624 625 void ConcurrentMark::update_g1_committed(bool force) { 626 // If concurrent marking is not in progress, then we do not need to 627 // update _heap_end. This has a subtle and important 628 // side-effect. Imagine that two evacuation pauses happen between 629 // marking completion and remark. The first one can grow the 630 // heap (hence now the finger is below the heap end). Then, the 631 // second one could unnecessarily push regions on the region 632 // stack. This causes the invariant that the region stack is empty 633 // at the beginning of remark to be false. By ensuring that we do 634 // not observe heap expansions after marking is complete, then we do 635 // not have this problem. 636 if (!concurrent_marking_in_progress() && !force) return; 637 638 MemRegion committed = _g1h->g1_committed(); 639 assert(committed.start() == _heap_start, "start shouldn't change"); 640 HeapWord* new_end = committed.end(); 641 if (new_end > _heap_end) { 642 // The heap has been expanded. 643 644 _heap_end = new_end; 645 } 646 // Notice that the heap can also shrink. However, this only happens 647 // during a Full GC (at least currently) and the entire marking 648 // phase will bail out and the task will not be restarted. So, let's 649 // do nothing. 650 } 651 652 void ConcurrentMark::reset() { 653 // Starting values for these two. This should be called in a STW 654 // phase. CM will be notified of any future g1_committed expansions 655 // will be at the end of evacuation pauses, when tasks are 656 // inactive. 657 MemRegion committed = _g1h->g1_committed(); 658 _heap_start = committed.start(); 659 _heap_end = committed.end(); 660 661 // Separated the asserts so that we know which one fires. 662 assert(_heap_start != NULL, "heap bounds should look ok"); 663 assert(_heap_end != NULL, "heap bounds should look ok"); 664 assert(_heap_start < _heap_end, "heap bounds should look ok"); 665 666 // reset all the marking data structures and any necessary flags 667 clear_marking_state(); 668 669 if (verbose_low()) { 670 gclog_or_tty->print_cr("[global] resetting"); 671 } 672 673 // We do reset all of them, since different phases will use 674 // different number of active threads. So, it's easiest to have all 675 // of them ready. 676 for (int i = 0; i < (int) _max_task_num; ++i) { 677 _tasks[i]->reset(_nextMarkBitMap); 678 } 679 680 // we need this to make sure that the flag is on during the evac 681 // pause with initial mark piggy-backed 682 set_concurrent_marking_in_progress(); 683 } 684 685 void ConcurrentMark::set_phase(size_t active_tasks, bool concurrent) { 686 assert(active_tasks <= _max_task_num, "we should not have more"); 687 688 _active_tasks = active_tasks; 689 // Need to update the three data structures below according to the 690 // number of active threads for this phase. 691 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 692 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 693 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 694 695 _concurrent = concurrent; 696 // We propagate this to all tasks, not just the active ones. 697 for (int i = 0; i < (int) _max_task_num; ++i) 698 _tasks[i]->set_concurrent(concurrent); 699 700 if (concurrent) { 701 set_concurrent_marking_in_progress(); 702 } else { 703 // We currently assume that the concurrent flag has been set to 704 // false before we start remark. At this point we should also be 705 // in a STW phase. 706 assert(!concurrent_marking_in_progress(), "invariant"); 707 assert(_finger == _heap_end, "only way to get here"); 708 update_g1_committed(true); 709 } 710 } 711 712 void ConcurrentMark::set_non_marking_state() { 713 // We set the global marking state to some default values when we're 714 // not doing marking. 715 clear_marking_state(); 716 _active_tasks = 0; 717 clear_concurrent_marking_in_progress(); 718 } 719 720 ConcurrentMark::~ConcurrentMark() { 721 for (int i = 0; i < (int) _max_task_num; ++i) { 722 delete _task_queues->queue(i); 723 delete _tasks[i]; 724 } 725 delete _task_queues; 726 FREE_C_HEAP_ARRAY(CMTask*, _max_task_num); 727 } 728 729 // This closure is used to mark refs into the g1 generation 730 // from external roots in the CMS bit map. 731 // Called at the first checkpoint. 732 // 733 734 void ConcurrentMark::clearNextBitmap() { 735 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 736 G1CollectorPolicy* g1p = g1h->g1_policy(); 737 738 // Make sure that the concurrent mark thread looks to still be in 739 // the current cycle. 740 guarantee(cmThread()->during_cycle(), "invariant"); 741 742 // We are finishing up the current cycle by clearing the next 743 // marking bitmap and getting it ready for the next cycle. During 744 // this time no other cycle can start. So, let's make sure that this 745 // is the case. 746 guarantee(!g1h->mark_in_progress(), "invariant"); 747 748 // clear the mark bitmap (no grey objects to start with). 749 // We need to do this in chunks and offer to yield in between 750 // each chunk. 751 HeapWord* start = _nextMarkBitMap->startWord(); 752 HeapWord* end = _nextMarkBitMap->endWord(); 753 HeapWord* cur = start; 754 size_t chunkSize = M; 755 while (cur < end) { 756 HeapWord* next = cur + chunkSize; 757 if (next > end) { 758 next = end; 759 } 760 MemRegion mr(cur,next); 761 _nextMarkBitMap->clearRange(mr); 762 cur = next; 763 do_yield_check(); 764 765 // Repeat the asserts from above. We'll do them as asserts here to 766 // minimize their overhead on the product. However, we'll have 767 // them as guarantees at the beginning / end of the bitmap 768 // clearing to get some checking in the product. 769 assert(cmThread()->during_cycle(), "invariant"); 770 assert(!g1h->mark_in_progress(), "invariant"); 771 } 772 773 // Repeat the asserts from above. 774 guarantee(cmThread()->during_cycle(), "invariant"); 775 guarantee(!g1h->mark_in_progress(), "invariant"); 776 } 777 778 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 779 public: 780 bool doHeapRegion(HeapRegion* r) { 781 if (!r->continuesHumongous()) { 782 r->note_start_of_marking(true); 783 } 784 return false; 785 } 786 }; 787 788 void ConcurrentMark::checkpointRootsInitialPre() { 789 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 790 G1CollectorPolicy* g1p = g1h->g1_policy(); 791 792 _has_aborted = false; 793 794 #ifndef PRODUCT 795 if (G1PrintReachableAtInitialMark) { 796 print_reachable("at-cycle-start", 797 VerifyOption_G1UsePrevMarking, true /* all */); 798 } 799 #endif 800 801 // Initialise marking structures. This has to be done in a STW phase. 802 reset(); 803 } 804 805 806 void ConcurrentMark::checkpointRootsInitialPost() { 807 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 808 809 // If we force an overflow during remark, the remark operation will 810 // actually abort and we'll restart concurrent marking. If we always 811 // force an oveflow during remark we'll never actually complete the 812 // marking phase. So, we initilize this here, at the start of the 813 // cycle, so that at the remaining overflow number will decrease at 814 // every remark and we'll eventually not need to cause one. 815 force_overflow_stw()->init(); 816 817 // For each region note start of marking. 818 NoteStartOfMarkHRClosure startcl; 819 g1h->heap_region_iterate(&startcl); 820 821 // Start Concurrent Marking weak-reference discovery. 822 ReferenceProcessor* rp = g1h->ref_processor_cm(); 823 // enable ("weak") refs discovery 824 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); 825 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 826 827 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 828 // This is the start of the marking cycle, we're expected all 829 // threads to have SATB queues with active set to false. 830 satb_mq_set.set_active_all_threads(true, /* new active value */ 831 false /* expected_active */); 832 833 // update_g1_committed() will be called at the end of an evac pause 834 // when marking is on. So, it's also called at the end of the 835 // initial-mark pause to update the heap end, if the heap expands 836 // during it. No need to call it here. 837 } 838 839 /* 840 * Notice that in the next two methods, we actually leave the STS 841 * during the barrier sync and join it immediately afterwards. If we 842 * do not do this, the following deadlock can occur: one thread could 843 * be in the barrier sync code, waiting for the other thread to also 844 * sync up, whereas another one could be trying to yield, while also 845 * waiting for the other threads to sync up too. 846 * 847 * Note, however, that this code is also used during remark and in 848 * this case we should not attempt to leave / enter the STS, otherwise 849 * we'll either hit an asseert (debug / fastdebug) or deadlock 850 * (product). So we should only leave / enter the STS if we are 851 * operating concurrently. 852 * 853 * Because the thread that does the sync barrier has left the STS, it 854 * is possible to be suspended for a Full GC or an evacuation pause 855 * could occur. This is actually safe, since the entering the sync 856 * barrier is one of the last things do_marking_step() does, and it 857 * doesn't manipulate any data structures afterwards. 858 */ 859 860 void ConcurrentMark::enter_first_sync_barrier(int task_num) { 861 if (verbose_low()) { 862 gclog_or_tty->print_cr("[%d] entering first barrier", task_num); 863 } 864 865 if (concurrent()) { 866 ConcurrentGCThread::stsLeave(); 867 } 868 _first_overflow_barrier_sync.enter(); 869 if (concurrent()) { 870 ConcurrentGCThread::stsJoin(); 871 } 872 // at this point everyone should have synced up and not be doing any 873 // more work 874 875 if (verbose_low()) { 876 gclog_or_tty->print_cr("[%d] leaving first barrier", task_num); 877 } 878 879 // let task 0 do this 880 if (task_num == 0) { 881 // task 0 is responsible for clearing the global data structures 882 // We should be here because of an overflow. During STW we should 883 // not clear the overflow flag since we rely on it being true when 884 // we exit this method to abort the pause and restart concurent 885 // marking. 886 clear_marking_state(concurrent() /* clear_overflow */); 887 force_overflow()->update(); 888 889 if (PrintGC) { 890 gclog_or_tty->date_stamp(PrintGCDateStamps); 891 gclog_or_tty->stamp(PrintGCTimeStamps); 892 gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); 893 } 894 } 895 896 // after this, each task should reset its own data structures then 897 // then go into the second barrier 898 } 899 900 void ConcurrentMark::enter_second_sync_barrier(int task_num) { 901 if (verbose_low()) { 902 gclog_or_tty->print_cr("[%d] entering second barrier", task_num); 903 } 904 905 if (concurrent()) { 906 ConcurrentGCThread::stsLeave(); 907 } 908 _second_overflow_barrier_sync.enter(); 909 if (concurrent()) { 910 ConcurrentGCThread::stsJoin(); 911 } 912 // at this point everything should be re-initialised and ready to go 913 914 if (verbose_low()) { 915 gclog_or_tty->print_cr("[%d] leaving second barrier", task_num); 916 } 917 } 918 919 #ifndef PRODUCT 920 void ForceOverflowSettings::init() { 921 _num_remaining = G1ConcMarkForceOverflow; 922 _force = false; 923 update(); 924 } 925 926 void ForceOverflowSettings::update() { 927 if (_num_remaining > 0) { 928 _num_remaining -= 1; 929 _force = true; 930 } else { 931 _force = false; 932 } 933 } 934 935 bool ForceOverflowSettings::should_force() { 936 if (_force) { 937 _force = false; 938 return true; 939 } else { 940 return false; 941 } 942 } 943 #endif // !PRODUCT 944 945 void ConcurrentMark::grayRoot(oop p) { 946 HeapWord* addr = (HeapWord*) p; 947 // We can't really check against _heap_start and _heap_end, since it 948 // is possible during an evacuation pause with piggy-backed 949 // initial-mark that the committed space is expanded during the 950 // pause without CM observing this change. So the assertions below 951 // is a bit conservative; but better than nothing. 952 assert(_g1h->g1_committed().contains(addr), 953 "address should be within the heap bounds"); 954 955 if (!_nextMarkBitMap->isMarked(addr)) { 956 _nextMarkBitMap->parMark(addr); 957 } 958 } 959 960 void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) { 961 // The objects on the region have already been marked "in bulk" by 962 // the caller. We only need to decide whether to push the region on 963 // the region stack or not. 964 965 if (!concurrent_marking_in_progress() || !_should_gray_objects) { 966 // We're done with marking and waiting for remark. We do not need to 967 // push anything else on the region stack. 968 return; 969 } 970 971 HeapWord* finger = _finger; 972 973 if (verbose_low()) { 974 gclog_or_tty->print_cr("[global] attempting to push " 975 "region ["PTR_FORMAT", "PTR_FORMAT"), finger is at " 976 PTR_FORMAT, mr.start(), mr.end(), finger); 977 } 978 979 if (mr.start() < finger) { 980 // The finger is always heap region aligned and it is not possible 981 // for mr to span heap regions. 982 assert(mr.end() <= finger, "invariant"); 983 984 // Separated the asserts so that we know which one fires. 985 assert(mr.start() <= mr.end(), 986 "region boundaries should fall within the committed space"); 987 assert(_heap_start <= mr.start(), 988 "region boundaries should fall within the committed space"); 989 assert(mr.end() <= _heap_end, 990 "region boundaries should fall within the committed space"); 991 if (verbose_low()) { 992 gclog_or_tty->print_cr("[global] region ["PTR_FORMAT", "PTR_FORMAT") " 993 "below the finger, pushing it", 994 mr.start(), mr.end()); 995 } 996 997 if (!region_stack_push_lock_free(mr)) { 998 if (verbose_low()) { 999 gclog_or_tty->print_cr("[global] region stack has overflown."); 1000 } 1001 } 1002 } 1003 } 1004 1005 void ConcurrentMark::markAndGrayObjectIfNecessary(oop p) { 1006 // The object is not marked by the caller. We need to at least mark 1007 // it and maybe push in on the stack. 1008 1009 HeapWord* addr = (HeapWord*)p; 1010 if (!_nextMarkBitMap->isMarked(addr)) { 1011 // We definitely need to mark it, irrespective whether we bail out 1012 // because we're done with marking. 1013 if (_nextMarkBitMap->parMark(addr)) { 1014 if (!concurrent_marking_in_progress() || !_should_gray_objects) { 1015 // If we're done with concurrent marking and we're waiting for 1016 // remark, then we're not pushing anything on the stack. 1017 return; 1018 } 1019 1020 // No OrderAccess:store_load() is needed. It is implicit in the 1021 // CAS done in parMark(addr) above 1022 HeapWord* finger = _finger; 1023 1024 if (addr < finger) { 1025 if (!mark_stack_push(oop(addr))) { 1026 if (verbose_low()) { 1027 gclog_or_tty->print_cr("[global] global stack overflow " 1028 "during parMark"); 1029 } 1030 } 1031 } 1032 } 1033 } 1034 } 1035 1036 class CMConcurrentMarkingTask: public AbstractGangTask { 1037 private: 1038 ConcurrentMark* _cm; 1039 ConcurrentMarkThread* _cmt; 1040 1041 public: 1042 void work(int worker_i) { 1043 assert(Thread::current()->is_ConcurrentGC_thread(), 1044 "this should only be done by a conc GC thread"); 1045 ResourceMark rm; 1046 1047 double start_vtime = os::elapsedVTime(); 1048 1049 ConcurrentGCThread::stsJoin(); 1050 1051 assert((size_t) worker_i < _cm->active_tasks(), "invariant"); 1052 CMTask* the_task = _cm->task(worker_i); 1053 the_task->record_start_time(); 1054 if (!_cm->has_aborted()) { 1055 do { 1056 double start_vtime_sec = os::elapsedVTime(); 1057 double start_time_sec = os::elapsedTime(); 1058 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1059 1060 the_task->do_marking_step(mark_step_duration_ms, 1061 true /* do_stealing */, 1062 true /* do_termination */); 1063 1064 double end_time_sec = os::elapsedTime(); 1065 double end_vtime_sec = os::elapsedVTime(); 1066 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1067 double elapsed_time_sec = end_time_sec - start_time_sec; 1068 _cm->clear_has_overflown(); 1069 1070 bool ret = _cm->do_yield_check(worker_i); 1071 1072 jlong sleep_time_ms; 1073 if (!_cm->has_aborted() && the_task->has_aborted()) { 1074 sleep_time_ms = 1075 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 1076 ConcurrentGCThread::stsLeave(); 1077 os::sleep(Thread::current(), sleep_time_ms, false); 1078 ConcurrentGCThread::stsJoin(); 1079 } 1080 double end_time2_sec = os::elapsedTime(); 1081 double elapsed_time2_sec = end_time2_sec - start_time_sec; 1082 1083 #if 0 1084 gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, " 1085 "overhead %1.4lf", 1086 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms, 1087 the_task->conc_overhead(os::elapsedTime()) * 8.0); 1088 gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms", 1089 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0); 1090 #endif 1091 } while (!_cm->has_aborted() && the_task->has_aborted()); 1092 } 1093 the_task->record_end_time(); 1094 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 1095 1096 ConcurrentGCThread::stsLeave(); 1097 1098 double end_vtime = os::elapsedVTime(); 1099 _cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime); 1100 } 1101 1102 CMConcurrentMarkingTask(ConcurrentMark* cm, 1103 ConcurrentMarkThread* cmt) : 1104 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 1105 1106 ~CMConcurrentMarkingTask() { } 1107 }; 1108 1109 void ConcurrentMark::markFromRoots() { 1110 // we might be tempted to assert that: 1111 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1112 // "inconsistent argument?"); 1113 // However that wouldn't be right, because it's possible that 1114 // a safepoint is indeed in progress as a younger generation 1115 // stop-the-world GC happens even as we mark in this generation. 1116 1117 _restart_for_overflow = false; 1118 1119 size_t active_workers = MAX2((size_t) 1, parallel_marking_threads()); 1120 force_overflow_conc()->init(); 1121 set_phase(active_workers, true /* concurrent */); 1122 1123 CMConcurrentMarkingTask markingTask(this, cmThread()); 1124 if (parallel_marking_threads() > 0) { 1125 _parallel_workers->run_task(&markingTask); 1126 } else { 1127 markingTask.work(0); 1128 } 1129 print_stats(); 1130 } 1131 1132 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1133 // world is stopped at this checkpoint 1134 assert(SafepointSynchronize::is_at_safepoint(), 1135 "world should be stopped"); 1136 1137 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1138 1139 // If a full collection has happened, we shouldn't do this. 1140 if (has_aborted()) { 1141 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1142 return; 1143 } 1144 1145 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1146 1147 if (VerifyDuringGC) { 1148 HandleMark hm; // handle scope 1149 gclog_or_tty->print(" VerifyDuringGC:(before)"); 1150 Universe::heap()->prepare_for_verify(); 1151 Universe::verify(/* allow dirty */ true, 1152 /* silent */ false, 1153 /* option */ VerifyOption_G1UsePrevMarking); 1154 } 1155 1156 G1CollectorPolicy* g1p = g1h->g1_policy(); 1157 g1p->record_concurrent_mark_remark_start(); 1158 1159 double start = os::elapsedTime(); 1160 1161 checkpointRootsFinalWork(); 1162 1163 double mark_work_end = os::elapsedTime(); 1164 1165 weakRefsWork(clear_all_soft_refs); 1166 1167 if (has_overflown()) { 1168 // Oops. We overflowed. Restart concurrent marking. 1169 _restart_for_overflow = true; 1170 // Clear the flag. We do not need it any more. 1171 clear_has_overflown(); 1172 if (G1TraceMarkStackOverflow) { 1173 gclog_or_tty->print_cr("\nRemark led to restart for overflow."); 1174 } 1175 } else { 1176 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1177 // We're done with marking. 1178 // This is the end of the marking cycle, we're expected all 1179 // threads to have SATB queues with active set to true. 1180 satb_mq_set.set_active_all_threads(false, /* new active value */ 1181 true /* expected_active */); 1182 1183 if (VerifyDuringGC) { 1184 HandleMark hm; // handle scope 1185 gclog_or_tty->print(" VerifyDuringGC:(after)"); 1186 Universe::heap()->prepare_for_verify(); 1187 Universe::verify(/* allow dirty */ true, 1188 /* silent */ false, 1189 /* option */ VerifyOption_G1UseNextMarking); 1190 } 1191 assert(!restart_for_overflow(), "sanity"); 1192 } 1193 1194 // Reset the marking state if marking completed 1195 if (!restart_for_overflow()) { 1196 set_non_marking_state(); 1197 } 1198 1199 #if VERIFY_OBJS_PROCESSED 1200 _scan_obj_cl.objs_processed = 0; 1201 ThreadLocalObjQueue::objs_enqueued = 0; 1202 #endif 1203 1204 // Statistics 1205 double now = os::elapsedTime(); 1206 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1207 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1208 _remark_times.add((now - start) * 1000.0); 1209 1210 g1p->record_concurrent_mark_remark_end(); 1211 } 1212 1213 #define CARD_BM_TEST_MODE 0 1214 1215 class CalcLiveObjectsClosure: public HeapRegionClosure { 1216 1217 CMBitMapRO* _bm; 1218 ConcurrentMark* _cm; 1219 bool _changed; 1220 bool _yield; 1221 size_t _words_done; 1222 size_t _tot_live; 1223 size_t _tot_used; 1224 size_t _regions_done; 1225 double _start_vtime_sec; 1226 1227 BitMap* _region_bm; 1228 BitMap* _card_bm; 1229 intptr_t _bottom_card_num; 1230 bool _final; 1231 1232 void mark_card_num_range(intptr_t start_card_num, intptr_t last_card_num) { 1233 for (intptr_t i = start_card_num; i <= last_card_num; i++) { 1234 #if CARD_BM_TEST_MODE 1235 guarantee(_card_bm->at(i - _bottom_card_num), "Should already be set."); 1236 #else 1237 _card_bm->par_at_put(i - _bottom_card_num, 1); 1238 #endif 1239 } 1240 } 1241 1242 public: 1243 CalcLiveObjectsClosure(bool final, 1244 CMBitMapRO *bm, ConcurrentMark *cm, 1245 BitMap* region_bm, BitMap* card_bm) : 1246 _bm(bm), _cm(cm), _changed(false), _yield(true), 1247 _words_done(0), _tot_live(0), _tot_used(0), 1248 _region_bm(region_bm), _card_bm(card_bm),_final(final), 1249 _regions_done(0), _start_vtime_sec(0.0) 1250 { 1251 _bottom_card_num = 1252 intptr_t(uintptr_t(G1CollectedHeap::heap()->reserved_region().start()) >> 1253 CardTableModRefBS::card_shift); 1254 } 1255 1256 // It takes a region that's not empty (i.e., it has at least one 1257 // live object in it and sets its corresponding bit on the region 1258 // bitmap to 1. If the region is "starts humongous" it will also set 1259 // to 1 the bits on the region bitmap that correspond to its 1260 // associated "continues humongous" regions. 1261 void set_bit_for_region(HeapRegion* hr) { 1262 assert(!hr->continuesHumongous(), "should have filtered those out"); 1263 1264 size_t index = hr->hrs_index(); 1265 if (!hr->startsHumongous()) { 1266 // Normal (non-humongous) case: just set the bit. 1267 _region_bm->par_at_put((BitMap::idx_t) index, true); 1268 } else { 1269 // Starts humongous case: calculate how many regions are part of 1270 // this humongous region and then set the bit range. It might 1271 // have been a bit more efficient to look at the object that 1272 // spans these humongous regions to calculate their number from 1273 // the object's size. However, it's a good idea to calculate 1274 // this based on the metadata itself, and not the region 1275 // contents, so that this code is not aware of what goes into 1276 // the humongous regions (in case this changes in the future). 1277 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1278 size_t end_index = index + 1; 1279 while (end_index < g1h->n_regions()) { 1280 HeapRegion* chr = g1h->region_at(end_index); 1281 if (!chr->continuesHumongous()) break; 1282 end_index += 1; 1283 } 1284 _region_bm->par_at_put_range((BitMap::idx_t) index, 1285 (BitMap::idx_t) end_index, true); 1286 } 1287 } 1288 1289 bool doHeapRegion(HeapRegion* hr) { 1290 if (!_final && _regions_done == 0) { 1291 _start_vtime_sec = os::elapsedVTime(); 1292 } 1293 1294 if (hr->continuesHumongous()) { 1295 // We will ignore these here and process them when their 1296 // associated "starts humongous" region is processed (see 1297 // set_bit_for_heap_region()). Note that we cannot rely on their 1298 // associated "starts humongous" region to have their bit set to 1299 // 1 since, due to the region chunking in the parallel region 1300 // iteration, a "continues humongous" region might be visited 1301 // before its associated "starts humongous". 1302 return false; 1303 } 1304 1305 HeapWord* nextTop = hr->next_top_at_mark_start(); 1306 HeapWord* start = hr->top_at_conc_mark_count(); 1307 assert(hr->bottom() <= start && start <= hr->end() && 1308 hr->bottom() <= nextTop && nextTop <= hr->end() && 1309 start <= nextTop, 1310 "Preconditions."); 1311 // Otherwise, record the number of word's we'll examine. 1312 size_t words_done = (nextTop - start); 1313 // Find the first marked object at or after "start". 1314 start = _bm->getNextMarkedWordAddress(start, nextTop); 1315 size_t marked_bytes = 0; 1316 1317 // Below, the term "card num" means the result of shifting an address 1318 // by the card shift -- address 0 corresponds to card number 0. One 1319 // must subtract the card num of the bottom of the heap to obtain a 1320 // card table index. 1321 // The first card num of the sequence of live cards currently being 1322 // constructed. -1 ==> no sequence. 1323 intptr_t start_card_num = -1; 1324 // The last card num of the sequence of live cards currently being 1325 // constructed. -1 ==> no sequence. 1326 intptr_t last_card_num = -1; 1327 1328 while (start < nextTop) { 1329 if (_yield && _cm->do_yield_check()) { 1330 // We yielded. It might be for a full collection, in which case 1331 // all bets are off; terminate the traversal. 1332 if (_cm->has_aborted()) { 1333 _changed = false; 1334 return true; 1335 } else { 1336 // Otherwise, it might be a collection pause, and the region 1337 // we're looking at might be in the collection set. We'll 1338 // abandon this region. 1339 return false; 1340 } 1341 } 1342 oop obj = oop(start); 1343 int obj_sz = obj->size(); 1344 // The card num of the start of the current object. 1345 intptr_t obj_card_num = 1346 intptr_t(uintptr_t(start) >> CardTableModRefBS::card_shift); 1347 1348 HeapWord* obj_last = start + obj_sz - 1; 1349 intptr_t obj_last_card_num = 1350 intptr_t(uintptr_t(obj_last) >> CardTableModRefBS::card_shift); 1351 1352 if (obj_card_num != last_card_num) { 1353 if (start_card_num == -1) { 1354 assert(last_card_num == -1, "Both or neither."); 1355 start_card_num = obj_card_num; 1356 } else { 1357 assert(last_card_num != -1, "Both or neither."); 1358 assert(obj_card_num >= last_card_num, "Inv"); 1359 if ((obj_card_num - last_card_num) > 1) { 1360 // Mark the last run, and start a new one. 1361 mark_card_num_range(start_card_num, last_card_num); 1362 start_card_num = obj_card_num; 1363 } 1364 } 1365 #if CARD_BM_TEST_MODE 1366 /* 1367 gclog_or_tty->print_cr("Setting bits from %d/%d.", 1368 obj_card_num - _bottom_card_num, 1369 obj_last_card_num - _bottom_card_num); 1370 */ 1371 for (intptr_t j = obj_card_num; j <= obj_last_card_num; j++) { 1372 _card_bm->par_at_put(j - _bottom_card_num, 1); 1373 } 1374 #endif 1375 } 1376 // In any case, we set the last card num. 1377 last_card_num = obj_last_card_num; 1378 1379 marked_bytes += (size_t)obj_sz * HeapWordSize; 1380 // Find the next marked object after this one. 1381 start = _bm->getNextMarkedWordAddress(start + 1, nextTop); 1382 _changed = true; 1383 } 1384 // Handle the last range, if any. 1385 if (start_card_num != -1) { 1386 mark_card_num_range(start_card_num, last_card_num); 1387 } 1388 if (_final) { 1389 // Mark the allocated-since-marking portion... 1390 HeapWord* tp = hr->top(); 1391 if (nextTop < tp) { 1392 start_card_num = 1393 intptr_t(uintptr_t(nextTop) >> CardTableModRefBS::card_shift); 1394 last_card_num = 1395 intptr_t(uintptr_t(tp) >> CardTableModRefBS::card_shift); 1396 mark_card_num_range(start_card_num, last_card_num); 1397 // This definitely means the region has live objects. 1398 set_bit_for_region(hr); 1399 } 1400 } 1401 1402 hr->add_to_marked_bytes(marked_bytes); 1403 // Update the live region bitmap. 1404 if (marked_bytes > 0) { 1405 set_bit_for_region(hr); 1406 } 1407 hr->set_top_at_conc_mark_count(nextTop); 1408 _tot_live += hr->next_live_bytes(); 1409 _tot_used += hr->used(); 1410 _words_done = words_done; 1411 1412 if (!_final) { 1413 ++_regions_done; 1414 if (_regions_done % 10 == 0) { 1415 double end_vtime_sec = os::elapsedVTime(); 1416 double elapsed_vtime_sec = end_vtime_sec - _start_vtime_sec; 1417 if (elapsed_vtime_sec > (10.0 / 1000.0)) { 1418 jlong sleep_time_ms = 1419 (jlong) (elapsed_vtime_sec * _cm->cleanup_sleep_factor() * 1000.0); 1420 os::sleep(Thread::current(), sleep_time_ms, false); 1421 _start_vtime_sec = end_vtime_sec; 1422 } 1423 } 1424 } 1425 1426 return false; 1427 } 1428 1429 bool changed() { return _changed; } 1430 void reset() { _changed = false; _words_done = 0; } 1431 void no_yield() { _yield = false; } 1432 size_t words_done() { return _words_done; } 1433 size_t tot_live() { return _tot_live; } 1434 size_t tot_used() { return _tot_used; } 1435 }; 1436 1437 1438 void ConcurrentMark::calcDesiredRegions() { 1439 _region_bm.clear(); 1440 _card_bm.clear(); 1441 CalcLiveObjectsClosure calccl(false /*final*/, 1442 nextMarkBitMap(), this, 1443 &_region_bm, &_card_bm); 1444 G1CollectedHeap *g1h = G1CollectedHeap::heap(); 1445 g1h->heap_region_iterate(&calccl); 1446 1447 do { 1448 calccl.reset(); 1449 g1h->heap_region_iterate(&calccl); 1450 } while (calccl.changed()); 1451 } 1452 1453 class G1ParFinalCountTask: public AbstractGangTask { 1454 protected: 1455 G1CollectedHeap* _g1h; 1456 CMBitMap* _bm; 1457 size_t _n_workers; 1458 size_t *_live_bytes; 1459 size_t *_used_bytes; 1460 BitMap* _region_bm; 1461 BitMap* _card_bm; 1462 public: 1463 G1ParFinalCountTask(G1CollectedHeap* g1h, CMBitMap* bm, 1464 BitMap* region_bm, BitMap* card_bm) 1465 : AbstractGangTask("G1 final counting"), _g1h(g1h), 1466 _bm(bm), _region_bm(region_bm), _card_bm(card_bm) { 1467 if (ParallelGCThreads > 0) { 1468 _n_workers = _g1h->workers()->total_workers(); 1469 } else { 1470 _n_workers = 1; 1471 } 1472 _live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); 1473 _used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); 1474 } 1475 1476 ~G1ParFinalCountTask() { 1477 FREE_C_HEAP_ARRAY(size_t, _live_bytes); 1478 FREE_C_HEAP_ARRAY(size_t, _used_bytes); 1479 } 1480 1481 void work(int i) { 1482 CalcLiveObjectsClosure calccl(true /*final*/, 1483 _bm, _g1h->concurrent_mark(), 1484 _region_bm, _card_bm); 1485 calccl.no_yield(); 1486 if (G1CollectedHeap::use_parallel_gc_threads()) { 1487 _g1h->heap_region_par_iterate_chunked(&calccl, i, 1488 HeapRegion::FinalCountClaimValue); 1489 } else { 1490 _g1h->heap_region_iterate(&calccl); 1491 } 1492 assert(calccl.complete(), "Shouldn't have yielded!"); 1493 1494 assert((size_t) i < _n_workers, "invariant"); 1495 _live_bytes[i] = calccl.tot_live(); 1496 _used_bytes[i] = calccl.tot_used(); 1497 } 1498 size_t live_bytes() { 1499 size_t live_bytes = 0; 1500 for (size_t i = 0; i < _n_workers; ++i) 1501 live_bytes += _live_bytes[i]; 1502 return live_bytes; 1503 } 1504 size_t used_bytes() { 1505 size_t used_bytes = 0; 1506 for (size_t i = 0; i < _n_workers; ++i) 1507 used_bytes += _used_bytes[i]; 1508 return used_bytes; 1509 } 1510 }; 1511 1512 class G1ParNoteEndTask; 1513 1514 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1515 G1CollectedHeap* _g1; 1516 int _worker_num; 1517 size_t _max_live_bytes; 1518 size_t _regions_claimed; 1519 size_t _freed_bytes; 1520 FreeRegionList* _local_cleanup_list; 1521 HumongousRegionSet* _humongous_proxy_set; 1522 HRRSCleanupTask* _hrrs_cleanup_task; 1523 double _claimed_region_time; 1524 double _max_region_time; 1525 1526 public: 1527 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1528 int worker_num, 1529 FreeRegionList* local_cleanup_list, 1530 HumongousRegionSet* humongous_proxy_set, 1531 HRRSCleanupTask* hrrs_cleanup_task); 1532 size_t freed_bytes() { return _freed_bytes; } 1533 1534 bool doHeapRegion(HeapRegion *r); 1535 1536 size_t max_live_bytes() { return _max_live_bytes; } 1537 size_t regions_claimed() { return _regions_claimed; } 1538 double claimed_region_time_sec() { return _claimed_region_time; } 1539 double max_region_time_sec() { return _max_region_time; } 1540 }; 1541 1542 class G1ParNoteEndTask: public AbstractGangTask { 1543 friend class G1NoteEndOfConcMarkClosure; 1544 1545 protected: 1546 G1CollectedHeap* _g1h; 1547 size_t _max_live_bytes; 1548 size_t _freed_bytes; 1549 FreeRegionList* _cleanup_list; 1550 1551 public: 1552 G1ParNoteEndTask(G1CollectedHeap* g1h, 1553 FreeRegionList* cleanup_list) : 1554 AbstractGangTask("G1 note end"), _g1h(g1h), 1555 _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { } 1556 1557 void work(int i) { 1558 double start = os::elapsedTime(); 1559 FreeRegionList local_cleanup_list("Local Cleanup List"); 1560 HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set"); 1561 HRRSCleanupTask hrrs_cleanup_task; 1562 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i, &local_cleanup_list, 1563 &humongous_proxy_set, 1564 &hrrs_cleanup_task); 1565 if (G1CollectedHeap::use_parallel_gc_threads()) { 1566 _g1h->heap_region_par_iterate_chunked(&g1_note_end, i, 1567 HeapRegion::NoteEndClaimValue); 1568 } else { 1569 _g1h->heap_region_iterate(&g1_note_end); 1570 } 1571 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1572 1573 // Now update the lists 1574 _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(), 1575 NULL /* free_list */, 1576 &humongous_proxy_set, 1577 true /* par */); 1578 { 1579 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1580 _max_live_bytes += g1_note_end.max_live_bytes(); 1581 _freed_bytes += g1_note_end.freed_bytes(); 1582 1583 // If we iterate over the global cleanup list at the end of 1584 // cleanup to do this printing we will not guarantee to only 1585 // generate output for the newly-reclaimed regions (the list 1586 // might not be empty at the beginning of cleanup; we might 1587 // still be working on its previous contents). So we do the 1588 // printing here, before we append the new regions to the global 1589 // cleanup list. 1590 1591 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1592 if (hr_printer->is_active()) { 1593 HeapRegionLinkedListIterator iter(&local_cleanup_list); 1594 while (iter.more_available()) { 1595 HeapRegion* hr = iter.get_next(); 1596 hr_printer->cleanup(hr); 1597 } 1598 } 1599 1600 _cleanup_list->add_as_tail(&local_cleanup_list); 1601 assert(local_cleanup_list.is_empty(), "post-condition"); 1602 1603 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1604 } 1605 double end = os::elapsedTime(); 1606 if (G1PrintParCleanupStats) { 1607 gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] " 1608 "claimed %d regions (tot = %8.3f ms, max = %8.3f ms).\n", 1609 i, start, end, (end-start)*1000.0, 1610 g1_note_end.regions_claimed(), 1611 g1_note_end.claimed_region_time_sec()*1000.0, 1612 g1_note_end.max_region_time_sec()*1000.0); 1613 } 1614 } 1615 size_t max_live_bytes() { return _max_live_bytes; } 1616 size_t freed_bytes() { return _freed_bytes; } 1617 }; 1618 1619 class G1ParScrubRemSetTask: public AbstractGangTask { 1620 protected: 1621 G1RemSet* _g1rs; 1622 BitMap* _region_bm; 1623 BitMap* _card_bm; 1624 public: 1625 G1ParScrubRemSetTask(G1CollectedHeap* g1h, 1626 BitMap* region_bm, BitMap* card_bm) : 1627 AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), 1628 _region_bm(region_bm), _card_bm(card_bm) 1629 {} 1630 1631 void work(int i) { 1632 if (G1CollectedHeap::use_parallel_gc_threads()) { 1633 _g1rs->scrub_par(_region_bm, _card_bm, i, 1634 HeapRegion::ScrubRemSetClaimValue); 1635 } else { 1636 _g1rs->scrub(_region_bm, _card_bm); 1637 } 1638 } 1639 1640 }; 1641 1642 G1NoteEndOfConcMarkClosure:: 1643 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1644 int worker_num, 1645 FreeRegionList* local_cleanup_list, 1646 HumongousRegionSet* humongous_proxy_set, 1647 HRRSCleanupTask* hrrs_cleanup_task) 1648 : _g1(g1), _worker_num(worker_num), 1649 _max_live_bytes(0), _regions_claimed(0), 1650 _freed_bytes(0), 1651 _claimed_region_time(0.0), _max_region_time(0.0), 1652 _local_cleanup_list(local_cleanup_list), 1653 _humongous_proxy_set(humongous_proxy_set), 1654 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1655 1656 bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *hr) { 1657 // We use a claim value of zero here because all regions 1658 // were claimed with value 1 in the FinalCount task. 1659 hr->reset_gc_time_stamp(); 1660 if (!hr->continuesHumongous()) { 1661 double start = os::elapsedTime(); 1662 _regions_claimed++; 1663 hr->note_end_of_marking(); 1664 _max_live_bytes += hr->max_live_bytes(); 1665 _g1->free_region_if_empty(hr, 1666 &_freed_bytes, 1667 _local_cleanup_list, 1668 _humongous_proxy_set, 1669 _hrrs_cleanup_task, 1670 true /* par */); 1671 double region_time = (os::elapsedTime() - start); 1672 _claimed_region_time += region_time; 1673 if (region_time > _max_region_time) { 1674 _max_region_time = region_time; 1675 } 1676 } 1677 return false; 1678 } 1679 1680 void ConcurrentMark::cleanup() { 1681 // world is stopped at this checkpoint 1682 assert(SafepointSynchronize::is_at_safepoint(), 1683 "world should be stopped"); 1684 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1685 1686 // If a full collection has happened, we shouldn't do this. 1687 if (has_aborted()) { 1688 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1689 return; 1690 } 1691 1692 g1h->verify_region_sets_optional(); 1693 1694 if (VerifyDuringGC) { 1695 HandleMark hm; // handle scope 1696 gclog_or_tty->print(" VerifyDuringGC:(before)"); 1697 Universe::heap()->prepare_for_verify(); 1698 Universe::verify(/* allow dirty */ true, 1699 /* silent */ false, 1700 /* option */ VerifyOption_G1UsePrevMarking); 1701 } 1702 1703 G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); 1704 g1p->record_concurrent_mark_cleanup_start(); 1705 1706 double start = os::elapsedTime(); 1707 1708 HeapRegionRemSet::reset_for_cleanup_tasks(); 1709 1710 // Do counting once more with the world stopped for good measure. 1711 G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(), 1712 &_region_bm, &_card_bm); 1713 if (G1CollectedHeap::use_parallel_gc_threads()) { 1714 assert(g1h->check_heap_region_claim_values( 1715 HeapRegion::InitialClaimValue), 1716 "sanity check"); 1717 1718 int n_workers = g1h->workers()->total_workers(); 1719 g1h->set_par_threads(n_workers); 1720 g1h->workers()->run_task(&g1_par_count_task); 1721 g1h->set_par_threads(0); 1722 1723 assert(g1h->check_heap_region_claim_values( 1724 HeapRegion::FinalCountClaimValue), 1725 "sanity check"); 1726 } else { 1727 g1_par_count_task.work(0); 1728 } 1729 1730 size_t known_garbage_bytes = 1731 g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes(); 1732 g1p->set_known_garbage_bytes(known_garbage_bytes); 1733 1734 size_t start_used_bytes = g1h->used(); 1735 _at_least_one_mark_complete = true; 1736 g1h->set_marking_complete(); 1737 1738 ergo_verbose4(ErgoConcCycles, 1739 "finish cleanup", 1740 ergo_format_byte("occupancy") 1741 ergo_format_byte("capacity") 1742 ergo_format_byte_perc("known garbage"), 1743 start_used_bytes, g1h->capacity(), 1744 known_garbage_bytes, 1745 ((double) known_garbage_bytes / (double) g1h->capacity()) * 100.0); 1746 1747 double count_end = os::elapsedTime(); 1748 double this_final_counting_time = (count_end - start); 1749 if (G1PrintParCleanupStats) { 1750 gclog_or_tty->print_cr("Cleanup:"); 1751 gclog_or_tty->print_cr(" Finalize counting: %8.3f ms", 1752 this_final_counting_time*1000.0); 1753 } 1754 _total_counting_time += this_final_counting_time; 1755 1756 if (G1PrintRegionLivenessInfo) { 1757 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); 1758 _g1h->heap_region_iterate(&cl); 1759 } 1760 1761 // Install newly created mark bitMap as "prev". 1762 swapMarkBitMaps(); 1763 1764 g1h->reset_gc_time_stamp(); 1765 1766 // Note end of marking in all heap regions. 1767 double note_end_start = os::elapsedTime(); 1768 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list); 1769 if (G1CollectedHeap::use_parallel_gc_threads()) { 1770 int n_workers = g1h->workers()->total_workers(); 1771 g1h->set_par_threads(n_workers); 1772 g1h->workers()->run_task(&g1_par_note_end_task); 1773 g1h->set_par_threads(0); 1774 1775 assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue), 1776 "sanity check"); 1777 } else { 1778 g1_par_note_end_task.work(0); 1779 } 1780 1781 if (!cleanup_list_is_empty()) { 1782 // The cleanup list is not empty, so we'll have to process it 1783 // concurrently. Notify anyone else that might be wanting free 1784 // regions that there will be more free regions coming soon. 1785 g1h->set_free_regions_coming(); 1786 } 1787 double note_end_end = os::elapsedTime(); 1788 if (G1PrintParCleanupStats) { 1789 gclog_or_tty->print_cr(" note end of marking: %8.3f ms.", 1790 (note_end_end - note_end_start)*1000.0); 1791 } 1792 1793 1794 // call below, since it affects the metric by which we sort the heap 1795 // regions. 1796 if (G1ScrubRemSets) { 1797 double rs_scrub_start = os::elapsedTime(); 1798 G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm); 1799 if (G1CollectedHeap::use_parallel_gc_threads()) { 1800 int n_workers = g1h->workers()->total_workers(); 1801 g1h->set_par_threads(n_workers); 1802 g1h->workers()->run_task(&g1_par_scrub_rs_task); 1803 g1h->set_par_threads(0); 1804 1805 assert(g1h->check_heap_region_claim_values( 1806 HeapRegion::ScrubRemSetClaimValue), 1807 "sanity check"); 1808 } else { 1809 g1_par_scrub_rs_task.work(0); 1810 } 1811 1812 double rs_scrub_end = os::elapsedTime(); 1813 double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); 1814 _total_rs_scrub_time += this_rs_scrub_time; 1815 } 1816 1817 // this will also free any regions totally full of garbage objects, 1818 // and sort the regions. 1819 g1h->g1_policy()->record_concurrent_mark_cleanup_end( 1820 g1_par_note_end_task.freed_bytes(), 1821 g1_par_note_end_task.max_live_bytes()); 1822 1823 // Statistics. 1824 double end = os::elapsedTime(); 1825 _cleanup_times.add((end - start) * 1000.0); 1826 1827 // G1CollectedHeap::heap()->print(); 1828 // gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d", 1829 // G1CollectedHeap::heap()->get_gc_time_stamp()); 1830 1831 if (PrintGC || PrintGCDetails) { 1832 g1h->print_size_transition(gclog_or_tty, 1833 start_used_bytes, 1834 g1h->used(), 1835 g1h->capacity()); 1836 } 1837 1838 size_t cleaned_up_bytes = start_used_bytes - g1h->used(); 1839 g1p->decrease_known_garbage_bytes(cleaned_up_bytes); 1840 1841 // Clean up will have freed any regions completely full of garbage. 1842 // Update the soft reference policy with the new heap occupancy. 1843 Universe::update_heap_info_at_gc(); 1844 1845 // We need to make this be a "collection" so any collection pause that 1846 // races with it goes around and waits for completeCleanup to finish. 1847 g1h->increment_total_collections(); 1848 1849 if (VerifyDuringGC) { 1850 HandleMark hm; // handle scope 1851 gclog_or_tty->print(" VerifyDuringGC:(after)"); 1852 Universe::heap()->prepare_for_verify(); 1853 Universe::verify(/* allow dirty */ true, 1854 /* silent */ false, 1855 /* option */ VerifyOption_G1UsePrevMarking); 1856 } 1857 1858 g1h->verify_region_sets_optional(); 1859 } 1860 1861 void ConcurrentMark::completeCleanup() { 1862 if (has_aborted()) return; 1863 1864 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1865 1866 _cleanup_list.verify_optional(); 1867 FreeRegionList tmp_free_list("Tmp Free List"); 1868 1869 if (G1ConcRegionFreeingVerbose) { 1870 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 1871 "cleanup list has "SIZE_FORMAT" entries", 1872 _cleanup_list.length()); 1873 } 1874 1875 // Noone else should be accessing the _cleanup_list at this point, 1876 // so it's not necessary to take any locks 1877 while (!_cleanup_list.is_empty()) { 1878 HeapRegion* hr = _cleanup_list.remove_head(); 1879 assert(hr != NULL, "the list was not empty"); 1880 hr->par_clear(); 1881 tmp_free_list.add_as_tail(hr); 1882 1883 // Instead of adding one region at a time to the secondary_free_list, 1884 // we accumulate them in the local list and move them a few at a 1885 // time. This also cuts down on the number of notify_all() calls 1886 // we do during this process. We'll also append the local list when 1887 // _cleanup_list is empty (which means we just removed the last 1888 // region from the _cleanup_list). 1889 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1890 _cleanup_list.is_empty()) { 1891 if (G1ConcRegionFreeingVerbose) { 1892 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 1893 "appending "SIZE_FORMAT" entries to the " 1894 "secondary_free_list, clean list still has " 1895 SIZE_FORMAT" entries", 1896 tmp_free_list.length(), 1897 _cleanup_list.length()); 1898 } 1899 1900 { 1901 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1902 g1h->secondary_free_list_add_as_tail(&tmp_free_list); 1903 SecondaryFreeList_lock->notify_all(); 1904 } 1905 1906 if (G1StressConcRegionFreeing) { 1907 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1908 os::sleep(Thread::current(), (jlong) 1, false); 1909 } 1910 } 1911 } 1912 } 1913 assert(tmp_free_list.is_empty(), "post-condition"); 1914 } 1915 1916 // Support closures for reference procssing in G1 1917 1918 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1919 HeapWord* addr = (HeapWord*)obj; 1920 return addr != NULL && 1921 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1922 } 1923 1924 class G1CMKeepAliveClosure: public OopClosure { 1925 G1CollectedHeap* _g1; 1926 ConcurrentMark* _cm; 1927 CMBitMap* _bitMap; 1928 public: 1929 G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm, 1930 CMBitMap* bitMap) : 1931 _g1(g1), _cm(cm), 1932 _bitMap(bitMap) {} 1933 1934 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1935 virtual void do_oop( oop* p) { do_oop_work(p); } 1936 1937 template <class T> void do_oop_work(T* p) { 1938 oop obj = oopDesc::load_decode_heap_oop(p); 1939 HeapWord* addr = (HeapWord*)obj; 1940 1941 if (_cm->verbose_high()) { 1942 gclog_or_tty->print_cr("\t[0] we're looking at location " 1943 "*"PTR_FORMAT" = "PTR_FORMAT, 1944 p, (void*) obj); 1945 } 1946 1947 if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) { 1948 _bitMap->mark(addr); 1949 _cm->mark_stack_push(obj); 1950 } 1951 } 1952 }; 1953 1954 class G1CMDrainMarkingStackClosure: public VoidClosure { 1955 CMMarkStack* _markStack; 1956 CMBitMap* _bitMap; 1957 G1CMKeepAliveClosure* _oopClosure; 1958 public: 1959 G1CMDrainMarkingStackClosure(CMBitMap* bitMap, CMMarkStack* markStack, 1960 G1CMKeepAliveClosure* oopClosure) : 1961 _bitMap(bitMap), 1962 _markStack(markStack), 1963 _oopClosure(oopClosure) 1964 {} 1965 1966 void do_void() { 1967 _markStack->drain((OopClosure*)_oopClosure, _bitMap, false); 1968 } 1969 }; 1970 1971 // 'Keep Alive' closure used by parallel reference processing. 1972 // An instance of this closure is used in the parallel reference processing 1973 // code rather than an instance of G1CMKeepAliveClosure. We could have used 1974 // the G1CMKeepAliveClosure as it is MT-safe. Also reference objects are 1975 // placed on to discovered ref lists once so we can mark and push with no 1976 // need to check whether the object has already been marked. Using the 1977 // G1CMKeepAliveClosure would mean, however, having all the worker threads 1978 // operating on the global mark stack. This means that an individual 1979 // worker would be doing lock-free pushes while it processes its own 1980 // discovered ref list followed by drain call. If the discovered ref lists 1981 // are unbalanced then this could cause interference with the other 1982 // workers. Using a CMTask (and its embedded local data structures) 1983 // avoids that potential interference. 1984 class G1CMParKeepAliveAndDrainClosure: public OopClosure { 1985 ConcurrentMark* _cm; 1986 CMTask* _task; 1987 CMBitMap* _bitMap; 1988 int _ref_counter_limit; 1989 int _ref_counter; 1990 public: 1991 G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm, 1992 CMTask* task, 1993 CMBitMap* bitMap) : 1994 _cm(cm), _task(task), _bitMap(bitMap), 1995 _ref_counter_limit(G1RefProcDrainInterval) 1996 { 1997 assert(_ref_counter_limit > 0, "sanity"); 1998 _ref_counter = _ref_counter_limit; 1999 } 2000 2001 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2002 virtual void do_oop( oop* p) { do_oop_work(p); } 2003 2004 template <class T> void do_oop_work(T* p) { 2005 if (!_cm->has_overflown()) { 2006 oop obj = oopDesc::load_decode_heap_oop(p); 2007 if (_cm->verbose_high()) { 2008 gclog_or_tty->print_cr("\t[%d] we're looking at location " 2009 "*"PTR_FORMAT" = "PTR_FORMAT, 2010 _task->task_id(), p, (void*) obj); 2011 } 2012 2013 _task->deal_with_reference(obj); 2014 _ref_counter--; 2015 2016 if (_ref_counter == 0) { 2017 // We have dealt with _ref_counter_limit references, pushing them and objects 2018 // reachable from them on to the local stack (and possibly the global stack). 2019 // Call do_marking_step() to process these entries. We call the routine in a 2020 // loop, which we'll exit if there's nothing more to do (i.e. we're done 2021 // with the entries that we've pushed as a result of the deal_with_reference 2022 // calls above) or we overflow. 2023 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag 2024 // while there may still be some work to do. (See the comment at the 2025 // beginning of CMTask::do_marking_step() for those conditions - one of which 2026 // is reaching the specified time target.) It is only when 2027 // CMTask::do_marking_step() returns without setting the has_aborted() flag 2028 // that the marking has completed. 2029 do { 2030 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2031 _task->do_marking_step(mark_step_duration_ms, 2032 false /* do_stealing */, 2033 false /* do_termination */); 2034 } while (_task->has_aborted() && !_cm->has_overflown()); 2035 _ref_counter = _ref_counter_limit; 2036 } 2037 } else { 2038 if (_cm->verbose_high()) { 2039 gclog_or_tty->print_cr("\t[%d] CM Overflow", _task->task_id()); 2040 } 2041 } 2042 } 2043 }; 2044 2045 class G1CMParDrainMarkingStackClosure: public VoidClosure { 2046 ConcurrentMark* _cm; 2047 CMTask* _task; 2048 public: 2049 G1CMParDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task) : 2050 _cm(cm), _task(task) 2051 {} 2052 2053 void do_void() { 2054 do { 2055 if (_cm->verbose_high()) { 2056 gclog_or_tty->print_cr("\t[%d] Drain: Calling do marking_step", 2057 _task->task_id()); 2058 } 2059 2060 // We call CMTask::do_marking_step() to completely drain the local and 2061 // global marking stacks. The routine is called in a loop, which we'll 2062 // exit if there's nothing more to do (i.e. we'completely drained the 2063 // entries that were pushed as a result of applying the 2064 // G1CMParKeepAliveAndDrainClosure to the entries on the discovered ref 2065 // lists above) or we overflow the global marking stack. 2066 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag 2067 // while there may still be some work to do. (See the comment at the 2068 // beginning of CMTask::do_marking_step() for those conditions - one of which 2069 // is reaching the specified time target.) It is only when 2070 // CMTask::do_marking_step() returns without setting the has_aborted() flag 2071 // that the marking has completed. 2072 2073 _task->do_marking_step(1000000000.0 /* something very large */, 2074 true /* do_stealing */, 2075 true /* do_termination */); 2076 } while (_task->has_aborted() && !_cm->has_overflown()); 2077 } 2078 }; 2079 2080 // Implementation of AbstractRefProcTaskExecutor for parallel 2081 // reference processing at the end of G1 concurrent marking 2082 2083 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2084 private: 2085 G1CollectedHeap* _g1h; 2086 ConcurrentMark* _cm; 2087 CMBitMap* _bitmap; 2088 WorkGang* _workers; 2089 int _active_workers; 2090 2091 public: 2092 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 2093 ConcurrentMark* cm, 2094 CMBitMap* bitmap, 2095 WorkGang* workers, 2096 int n_workers) : 2097 _g1h(g1h), _cm(cm), _bitmap(bitmap), 2098 _workers(workers), _active_workers(n_workers) 2099 { } 2100 2101 // Executes the given task using concurrent marking worker threads. 2102 virtual void execute(ProcessTask& task); 2103 virtual void execute(EnqueueTask& task); 2104 }; 2105 2106 class G1CMRefProcTaskProxy: public AbstractGangTask { 2107 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2108 ProcessTask& _proc_task; 2109 G1CollectedHeap* _g1h; 2110 ConcurrentMark* _cm; 2111 CMBitMap* _bitmap; 2112 2113 public: 2114 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2115 G1CollectedHeap* g1h, 2116 ConcurrentMark* cm, 2117 CMBitMap* bitmap) : 2118 AbstractGangTask("Process reference objects in parallel"), 2119 _proc_task(proc_task), _g1h(g1h), _cm(cm), _bitmap(bitmap) 2120 {} 2121 2122 virtual void work(int i) { 2123 CMTask* marking_task = _cm->task(i); 2124 G1CMIsAliveClosure g1_is_alive(_g1h); 2125 G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task, _bitmap); 2126 G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task); 2127 2128 _proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2129 } 2130 }; 2131 2132 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2133 assert(_workers != NULL, "Need parallel worker threads."); 2134 2135 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap); 2136 2137 // We need to reset the phase for each task execution so that 2138 // the termination protocol of CMTask::do_marking_step works. 2139 _cm->set_phase(_active_workers, false /* concurrent */); 2140 _g1h->set_par_threads(_active_workers); 2141 _workers->run_task(&proc_task_proxy); 2142 _g1h->set_par_threads(0); 2143 } 2144 2145 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 2146 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2147 EnqueueTask& _enq_task; 2148 2149 public: 2150 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 2151 AbstractGangTask("Enqueue reference objects in parallel"), 2152 _enq_task(enq_task) 2153 { } 2154 2155 virtual void work(int i) { 2156 _enq_task.work(i); 2157 } 2158 }; 2159 2160 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2161 assert(_workers != NULL, "Need parallel worker threads."); 2162 2163 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2164 2165 _g1h->set_par_threads(_active_workers); 2166 _workers->run_task(&enq_task_proxy); 2167 _g1h->set_par_threads(0); 2168 } 2169 2170 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2171 ResourceMark rm; 2172 HandleMark hm; 2173 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2174 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2175 2176 // See the comment in G1CollectedHeap::ref_processing_init() 2177 // about how reference processing currently works in G1. 2178 2179 // Process weak references. 2180 rp->setup_policy(clear_all_soft_refs); 2181 assert(_markStack.isEmpty(), "mark stack should be empty"); 2182 2183 G1CMIsAliveClosure g1_is_alive(g1h); 2184 G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap()); 2185 G1CMDrainMarkingStackClosure 2186 g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive); 2187 // We use the work gang from the G1CollectedHeap and we utilize all 2188 // the worker threads. 2189 int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1; 2190 active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1); 2191 2192 G1CMRefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(), 2193 g1h->workers(), active_workers); 2194 2195 2196 if (rp->processing_is_mt()) { 2197 // Set the degree of MT here. If the discovery is done MT, there 2198 // may have been a different number of threads doing the discovery 2199 // and a different number of discovered lists may have Ref objects. 2200 // That is OK as long as the Reference lists are balanced (see 2201 // balance_all_queues() and balance_queues()). 2202 rp->set_active_mt_degree(active_workers); 2203 2204 rp->process_discovered_references(&g1_is_alive, 2205 &g1_keep_alive, 2206 &g1_drain_mark_stack, 2207 &par_task_executor); 2208 2209 // The work routines of the parallel keep_alive and drain_marking_stack 2210 // will set the has_overflown flag if we overflow the global marking 2211 // stack. 2212 } else { 2213 rp->process_discovered_references(&g1_is_alive, 2214 &g1_keep_alive, 2215 &g1_drain_mark_stack, 2216 NULL); 2217 2218 } 2219 2220 assert(_markStack.overflow() || _markStack.isEmpty(), 2221 "mark stack should be empty (unless it overflowed)"); 2222 if (_markStack.overflow()) { 2223 // Should have been done already when we tried to push an 2224 // entry on to the global mark stack. But let's do it again. 2225 set_has_overflown(); 2226 } 2227 2228 if (rp->processing_is_mt()) { 2229 assert(rp->num_q() == active_workers, "why not"); 2230 rp->enqueue_discovered_references(&par_task_executor); 2231 } else { 2232 rp->enqueue_discovered_references(); 2233 } 2234 2235 rp->verify_no_references_recorded(); 2236 assert(!rp->discovery_enabled(), "Post condition"); 2237 2238 // Now clean up stale oops in StringTable 2239 StringTable::unlink(&g1_is_alive); 2240 // Clean up unreferenced symbols in symbol table. 2241 SymbolTable::unlink(); 2242 } 2243 2244 void ConcurrentMark::swapMarkBitMaps() { 2245 CMBitMapRO* temp = _prevMarkBitMap; 2246 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2247 _nextMarkBitMap = (CMBitMap*) temp; 2248 } 2249 2250 class CMRemarkTask: public AbstractGangTask { 2251 private: 2252 ConcurrentMark *_cm; 2253 2254 public: 2255 void work(int worker_i) { 2256 // Since all available tasks are actually started, we should 2257 // only proceed if we're supposed to be actived. 2258 if ((size_t)worker_i < _cm->active_tasks()) { 2259 CMTask* task = _cm->task(worker_i); 2260 task->record_start_time(); 2261 do { 2262 task->do_marking_step(1000000000.0 /* something very large */, 2263 true /* do_stealing */, 2264 true /* do_termination */); 2265 } while (task->has_aborted() && !_cm->has_overflown()); 2266 // If we overflow, then we do not want to restart. We instead 2267 // want to abort remark and do concurrent marking again. 2268 task->record_end_time(); 2269 } 2270 } 2271 2272 CMRemarkTask(ConcurrentMark* cm) : 2273 AbstractGangTask("Par Remark"), _cm(cm) { } 2274 }; 2275 2276 void ConcurrentMark::checkpointRootsFinalWork() { 2277 ResourceMark rm; 2278 HandleMark hm; 2279 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2280 2281 g1h->ensure_parsability(false); 2282 2283 if (G1CollectedHeap::use_parallel_gc_threads()) { 2284 G1CollectedHeap::StrongRootsScope srs(g1h); 2285 // this is remark, so we'll use up all available threads 2286 int active_workers = ParallelGCThreads; 2287 set_phase(active_workers, false /* concurrent */); 2288 2289 CMRemarkTask remarkTask(this); 2290 // We will start all available threads, even if we decide that the 2291 // active_workers will be fewer. The extra ones will just bail out 2292 // immediately. 2293 int n_workers = g1h->workers()->total_workers(); 2294 g1h->set_par_threads(n_workers); 2295 g1h->workers()->run_task(&remarkTask); 2296 g1h->set_par_threads(0); 2297 } else { 2298 G1CollectedHeap::StrongRootsScope srs(g1h); 2299 // this is remark, so we'll use up all available threads 2300 int active_workers = 1; 2301 set_phase(active_workers, false /* concurrent */); 2302 2303 CMRemarkTask remarkTask(this); 2304 // We will start all available threads, even if we decide that the 2305 // active_workers will be fewer. The extra ones will just bail out 2306 // immediately. 2307 remarkTask.work(0); 2308 } 2309 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2310 guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant"); 2311 2312 print_stats(); 2313 2314 #if VERIFY_OBJS_PROCESSED 2315 if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) { 2316 gclog_or_tty->print_cr("Processed = %d, enqueued = %d.", 2317 _scan_obj_cl.objs_processed, 2318 ThreadLocalObjQueue::objs_enqueued); 2319 guarantee(_scan_obj_cl.objs_processed == 2320 ThreadLocalObjQueue::objs_enqueued, 2321 "Different number of objs processed and enqueued."); 2322 } 2323 #endif 2324 } 2325 2326 #ifndef PRODUCT 2327 2328 class PrintReachableOopClosure: public OopClosure { 2329 private: 2330 G1CollectedHeap* _g1h; 2331 outputStream* _out; 2332 VerifyOption _vo; 2333 bool _all; 2334 2335 public: 2336 PrintReachableOopClosure(outputStream* out, 2337 VerifyOption vo, 2338 bool all) : 2339 _g1h(G1CollectedHeap::heap()), 2340 _out(out), _vo(vo), _all(all) { } 2341 2342 void do_oop(narrowOop* p) { do_oop_work(p); } 2343 void do_oop( oop* p) { do_oop_work(p); } 2344 2345 template <class T> void do_oop_work(T* p) { 2346 oop obj = oopDesc::load_decode_heap_oop(p); 2347 const char* str = NULL; 2348 const char* str2 = ""; 2349 2350 if (obj == NULL) { 2351 str = ""; 2352 } else if (!_g1h->is_in_g1_reserved(obj)) { 2353 str = " O"; 2354 } else { 2355 HeapRegion* hr = _g1h->heap_region_containing(obj); 2356 guarantee(hr != NULL, "invariant"); 2357 bool over_tams = false; 2358 bool marked = false; 2359 2360 switch (_vo) { 2361 case VerifyOption_G1UsePrevMarking: 2362 over_tams = hr->obj_allocated_since_prev_marking(obj); 2363 marked = _g1h->isMarkedPrev(obj); 2364 break; 2365 case VerifyOption_G1UseNextMarking: 2366 over_tams = hr->obj_allocated_since_next_marking(obj); 2367 marked = _g1h->isMarkedNext(obj); 2368 break; 2369 case VerifyOption_G1UseMarkWord: 2370 marked = obj->is_gc_marked(); 2371 break; 2372 default: 2373 ShouldNotReachHere(); 2374 } 2375 2376 if (over_tams) { 2377 str = " >"; 2378 if (marked) { 2379 str2 = " AND MARKED"; 2380 } 2381 } else if (marked) { 2382 str = " M"; 2383 } else { 2384 str = " NOT"; 2385 } 2386 } 2387 2388 _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", 2389 p, (void*) obj, str, str2); 2390 } 2391 }; 2392 2393 class PrintReachableObjectClosure : public ObjectClosure { 2394 private: 2395 G1CollectedHeap* _g1h; 2396 outputStream* _out; 2397 VerifyOption _vo; 2398 bool _all; 2399 HeapRegion* _hr; 2400 2401 public: 2402 PrintReachableObjectClosure(outputStream* out, 2403 VerifyOption vo, 2404 bool all, 2405 HeapRegion* hr) : 2406 _g1h(G1CollectedHeap::heap()), 2407 _out(out), _vo(vo), _all(all), _hr(hr) { } 2408 2409 void do_object(oop o) { 2410 bool over_tams = false; 2411 bool marked = false; 2412 2413 switch (_vo) { 2414 case VerifyOption_G1UsePrevMarking: 2415 over_tams = _hr->obj_allocated_since_prev_marking(o); 2416 marked = _g1h->isMarkedPrev(o); 2417 break; 2418 case VerifyOption_G1UseNextMarking: 2419 over_tams = _hr->obj_allocated_since_next_marking(o); 2420 marked = _g1h->isMarkedNext(o); 2421 break; 2422 case VerifyOption_G1UseMarkWord: 2423 marked = o->is_gc_marked(); 2424 break; 2425 default: 2426 ShouldNotReachHere(); 2427 } 2428 bool print_it = _all || over_tams || marked; 2429 2430 if (print_it) { 2431 _out->print_cr(" "PTR_FORMAT"%s", 2432 o, (over_tams) ? " >" : (marked) ? " M" : ""); 2433 PrintReachableOopClosure oopCl(_out, _vo, _all); 2434 o->oop_iterate(&oopCl); 2435 } 2436 } 2437 }; 2438 2439 class PrintReachableRegionClosure : public HeapRegionClosure { 2440 private: 2441 outputStream* _out; 2442 VerifyOption _vo; 2443 bool _all; 2444 2445 public: 2446 bool doHeapRegion(HeapRegion* hr) { 2447 HeapWord* b = hr->bottom(); 2448 HeapWord* e = hr->end(); 2449 HeapWord* t = hr->top(); 2450 HeapWord* p = NULL; 2451 2452 switch (_vo) { 2453 case VerifyOption_G1UsePrevMarking: 2454 p = hr->prev_top_at_mark_start(); 2455 break; 2456 case VerifyOption_G1UseNextMarking: 2457 p = hr->next_top_at_mark_start(); 2458 break; 2459 case VerifyOption_G1UseMarkWord: 2460 // When we are verifying marking using the mark word 2461 // TAMS has no relevance. 2462 assert(p == NULL, "post-condition"); 2463 break; 2464 default: 2465 ShouldNotReachHere(); 2466 } 2467 _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " 2468 "TAMS: "PTR_FORMAT, b, e, t, p); 2469 _out->cr(); 2470 2471 HeapWord* from = b; 2472 HeapWord* to = t; 2473 2474 if (to > from) { 2475 _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to); 2476 _out->cr(); 2477 PrintReachableObjectClosure ocl(_out, _vo, _all, hr); 2478 hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); 2479 _out->cr(); 2480 } 2481 2482 return false; 2483 } 2484 2485 PrintReachableRegionClosure(outputStream* out, 2486 VerifyOption vo, 2487 bool all) : 2488 _out(out), _vo(vo), _all(all) { } 2489 }; 2490 2491 static const char* verify_option_to_tams(VerifyOption vo) { 2492 switch (vo) { 2493 case VerifyOption_G1UsePrevMarking: 2494 return "PTAMS"; 2495 case VerifyOption_G1UseNextMarking: 2496 return "NTAMS"; 2497 default: 2498 return "NONE"; 2499 } 2500 } 2501 2502 void ConcurrentMark::print_reachable(const char* str, 2503 VerifyOption vo, 2504 bool all) { 2505 gclog_or_tty->cr(); 2506 gclog_or_tty->print_cr("== Doing heap dump... "); 2507 2508 if (G1PrintReachableBaseFile == NULL) { 2509 gclog_or_tty->print_cr(" #### error: no base file defined"); 2510 return; 2511 } 2512 2513 if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) > 2514 (JVM_MAXPATHLEN - 1)) { 2515 gclog_or_tty->print_cr(" #### error: file name too long"); 2516 return; 2517 } 2518 2519 char file_name[JVM_MAXPATHLEN]; 2520 sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str); 2521 gclog_or_tty->print_cr(" dumping to file %s", file_name); 2522 2523 fileStream fout(file_name); 2524 if (!fout.is_open()) { 2525 gclog_or_tty->print_cr(" #### error: could not open file"); 2526 return; 2527 } 2528 2529 outputStream* out = &fout; 2530 out->print_cr("-- USING %s", verify_option_to_tams(vo)); 2531 out->cr(); 2532 2533 out->print_cr("--- ITERATING OVER REGIONS"); 2534 out->cr(); 2535 PrintReachableRegionClosure rcl(out, vo, all); 2536 _g1h->heap_region_iterate(&rcl); 2537 out->cr(); 2538 2539 gclog_or_tty->print_cr(" done"); 2540 gclog_or_tty->flush(); 2541 } 2542 2543 #endif // PRODUCT 2544 2545 // This note is for drainAllSATBBuffers and the code in between. 2546 // In the future we could reuse a task to do this work during an 2547 // evacuation pause (since now tasks are not active and can be claimed 2548 // during an evacuation pause). This was a late change to the code and 2549 // is currently not being taken advantage of. 2550 2551 class CMGlobalObjectClosure : public ObjectClosure { 2552 private: 2553 ConcurrentMark* _cm; 2554 2555 public: 2556 void do_object(oop obj) { 2557 _cm->deal_with_reference(obj); 2558 } 2559 2560 CMGlobalObjectClosure(ConcurrentMark* cm) : _cm(cm) { } 2561 }; 2562 2563 void ConcurrentMark::deal_with_reference(oop obj) { 2564 if (verbose_high()) { 2565 gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT, 2566 (void*) obj); 2567 } 2568 2569 HeapWord* objAddr = (HeapWord*) obj; 2570 assert(obj->is_oop_or_null(true /* ignore mark word */), "Error"); 2571 if (_g1h->is_in_g1_reserved(objAddr)) { 2572 assert(obj != NULL, "null check is implicit"); 2573 if (!_nextMarkBitMap->isMarked(objAddr)) { 2574 // Only get the containing region if the object is not marked on the 2575 // bitmap (otherwise, it's a waste of time since we won't do 2576 // anything with it). 2577 HeapRegion* hr = _g1h->heap_region_containing_raw(obj); 2578 if (!hr->obj_allocated_since_next_marking(obj)) { 2579 if (verbose_high()) { 2580 gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered " 2581 "marked", (void*) obj); 2582 } 2583 2584 // we need to mark it first 2585 if (_nextMarkBitMap->parMark(objAddr)) { 2586 // No OrderAccess:store_load() is needed. It is implicit in the 2587 // CAS done in parMark(objAddr) above 2588 HeapWord* finger = _finger; 2589 if (objAddr < finger) { 2590 if (verbose_high()) { 2591 gclog_or_tty->print_cr("[global] below the global finger " 2592 "("PTR_FORMAT"), pushing it", finger); 2593 } 2594 if (!mark_stack_push(obj)) { 2595 if (verbose_low()) { 2596 gclog_or_tty->print_cr("[global] global stack overflow during " 2597 "deal_with_reference"); 2598 } 2599 } 2600 } 2601 } 2602 } 2603 } 2604 } 2605 } 2606 2607 void ConcurrentMark::drainAllSATBBuffers() { 2608 CMGlobalObjectClosure oc(this); 2609 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2610 satb_mq_set.set_closure(&oc); 2611 2612 while (satb_mq_set.apply_closure_to_completed_buffer()) { 2613 if (verbose_medium()) { 2614 gclog_or_tty->print_cr("[global] processed an SATB buffer"); 2615 } 2616 } 2617 2618 // no need to check whether we should do this, as this is only 2619 // called during an evacuation pause 2620 satb_mq_set.iterate_closure_all_threads(); 2621 2622 satb_mq_set.set_closure(NULL); 2623 assert(satb_mq_set.completed_buffers_num() == 0, "invariant"); 2624 } 2625 2626 void ConcurrentMark::markPrev(oop p) { 2627 // Note we are overriding the read-only view of the prev map here, via 2628 // the cast. 2629 ((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*)p); 2630 } 2631 2632 void ConcurrentMark::clear(oop p) { 2633 assert(p != NULL && p->is_oop(), "expected an oop"); 2634 HeapWord* addr = (HeapWord*)p; 2635 assert(addr >= _nextMarkBitMap->startWord() || 2636 addr < _nextMarkBitMap->endWord(), "in a region"); 2637 2638 _nextMarkBitMap->clear(addr); 2639 } 2640 2641 void ConcurrentMark::clearRangeBothMaps(MemRegion mr) { 2642 // Note we are overriding the read-only view of the prev map here, via 2643 // the cast. 2644 ((CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2645 _nextMarkBitMap->clearRange(mr); 2646 } 2647 2648 HeapRegion* 2649 ConcurrentMark::claim_region(int task_num) { 2650 // "checkpoint" the finger 2651 HeapWord* finger = _finger; 2652 2653 // _heap_end will not change underneath our feet; it only changes at 2654 // yield points. 2655 while (finger < _heap_end) { 2656 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2657 2658 // Note on how this code handles humongous regions. In the 2659 // normal case the finger will reach the start of a "starts 2660 // humongous" (SH) region. Its end will either be the end of the 2661 // last "continues humongous" (CH) region in the sequence, or the 2662 // standard end of the SH region (if the SH is the only region in 2663 // the sequence). That way claim_region() will skip over the CH 2664 // regions. However, there is a subtle race between a CM thread 2665 // executing this method and a mutator thread doing a humongous 2666 // object allocation. The two are not mutually exclusive as the CM 2667 // thread does not need to hold the Heap_lock when it gets 2668 // here. So there is a chance that claim_region() will come across 2669 // a free region that's in the progress of becoming a SH or a CH 2670 // region. In the former case, it will either 2671 // a) Miss the update to the region's end, in which case it will 2672 // visit every subsequent CH region, will find their bitmaps 2673 // empty, and do nothing, or 2674 // b) Will observe the update of the region's end (in which case 2675 // it will skip the subsequent CH regions). 2676 // If it comes across a region that suddenly becomes CH, the 2677 // scenario will be similar to b). So, the race between 2678 // claim_region() and a humongous object allocation might force us 2679 // to do a bit of unnecessary work (due to some unnecessary bitmap 2680 // iterations) but it should not introduce and correctness issues. 2681 HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); 2682 HeapWord* bottom = curr_region->bottom(); 2683 HeapWord* end = curr_region->end(); 2684 HeapWord* limit = curr_region->next_top_at_mark_start(); 2685 2686 if (verbose_low()) { 2687 gclog_or_tty->print_cr("[%d] curr_region = "PTR_FORMAT" " 2688 "["PTR_FORMAT", "PTR_FORMAT"), " 2689 "limit = "PTR_FORMAT, 2690 task_num, curr_region, bottom, end, limit); 2691 } 2692 2693 // Is the gap between reading the finger and doing the CAS too long? 2694 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2695 if (res == finger) { 2696 // we succeeded 2697 2698 // notice that _finger == end cannot be guaranteed here since, 2699 // someone else might have moved the finger even further 2700 assert(_finger >= end, "the finger should have moved forward"); 2701 2702 if (verbose_low()) { 2703 gclog_or_tty->print_cr("[%d] we were successful with region = " 2704 PTR_FORMAT, task_num, curr_region); 2705 } 2706 2707 if (limit > bottom) { 2708 if (verbose_low()) { 2709 gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is not empty, " 2710 "returning it ", task_num, curr_region); 2711 } 2712 return curr_region; 2713 } else { 2714 assert(limit == bottom, 2715 "the region limit should be at bottom"); 2716 if (verbose_low()) { 2717 gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is empty, " 2718 "returning NULL", task_num, curr_region); 2719 } 2720 // we return NULL and the caller should try calling 2721 // claim_region() again. 2722 return NULL; 2723 } 2724 } else { 2725 assert(_finger > finger, "the finger should have moved forward"); 2726 if (verbose_low()) { 2727 gclog_or_tty->print_cr("[%d] somebody else moved the finger, " 2728 "global finger = "PTR_FORMAT", " 2729 "our finger = "PTR_FORMAT, 2730 task_num, _finger, finger); 2731 } 2732 2733 // read it again 2734 finger = _finger; 2735 } 2736 } 2737 2738 return NULL; 2739 } 2740 2741 bool ConcurrentMark::invalidate_aborted_regions_in_cset() { 2742 bool result = false; 2743 for (int i = 0; i < (int)_max_task_num; ++i) { 2744 CMTask* the_task = _tasks[i]; 2745 MemRegion mr = the_task->aborted_region(); 2746 if (mr.start() != NULL) { 2747 assert(mr.end() != NULL, "invariant"); 2748 assert(mr.word_size() > 0, "invariant"); 2749 HeapRegion* hr = _g1h->heap_region_containing(mr.start()); 2750 assert(hr != NULL, "invariant"); 2751 if (hr->in_collection_set()) { 2752 // The region points into the collection set 2753 the_task->set_aborted_region(MemRegion()); 2754 result = true; 2755 } 2756 } 2757 } 2758 return result; 2759 } 2760 2761 bool ConcurrentMark::has_aborted_regions() { 2762 for (int i = 0; i < (int)_max_task_num; ++i) { 2763 CMTask* the_task = _tasks[i]; 2764 MemRegion mr = the_task->aborted_region(); 2765 if (mr.start() != NULL) { 2766 assert(mr.end() != NULL, "invariant"); 2767 assert(mr.word_size() > 0, "invariant"); 2768 return true; 2769 } 2770 } 2771 return false; 2772 } 2773 2774 void ConcurrentMark::oops_do(OopClosure* cl) { 2775 if (_markStack.size() > 0 && verbose_low()) { 2776 gclog_or_tty->print_cr("[global] scanning the global marking stack, " 2777 "size = %d", _markStack.size()); 2778 } 2779 // we first iterate over the contents of the mark stack... 2780 _markStack.oops_do(cl); 2781 2782 for (int i = 0; i < (int)_max_task_num; ++i) { 2783 OopTaskQueue* queue = _task_queues->queue((int)i); 2784 2785 if (queue->size() > 0 && verbose_low()) { 2786 gclog_or_tty->print_cr("[global] scanning task queue of task %d, " 2787 "size = %d", i, queue->size()); 2788 } 2789 2790 // ...then over the contents of the all the task queues. 2791 queue->oops_do(cl); 2792 } 2793 2794 // Invalidate any entries, that are in the region stack, that 2795 // point into the collection set 2796 if (_regionStack.invalidate_entries_into_cset()) { 2797 // otherwise, any gray objects copied during the evacuation pause 2798 // might not be visited. 2799 assert(_should_gray_objects, "invariant"); 2800 } 2801 2802 // Invalidate any aborted regions, recorded in the individual CM 2803 // tasks, that point into the collection set. 2804 if (invalidate_aborted_regions_in_cset()) { 2805 // otherwise, any gray objects copied during the evacuation pause 2806 // might not be visited. 2807 assert(_should_gray_objects, "invariant"); 2808 } 2809 2810 } 2811 2812 void ConcurrentMark::clear_marking_state(bool clear_overflow) { 2813 _markStack.setEmpty(); 2814 _markStack.clear_overflow(); 2815 _regionStack.setEmpty(); 2816 _regionStack.clear_overflow(); 2817 if (clear_overflow) { 2818 clear_has_overflown(); 2819 } else { 2820 assert(has_overflown(), "pre-condition"); 2821 } 2822 _finger = _heap_start; 2823 2824 for (int i = 0; i < (int)_max_task_num; ++i) { 2825 OopTaskQueue* queue = _task_queues->queue(i); 2826 queue->set_empty(); 2827 // Clear any partial regions from the CMTasks 2828 _tasks[i]->clear_aborted_region(); 2829 } 2830 } 2831 2832 void ConcurrentMark::print_stats() { 2833 if (verbose_stats()) { 2834 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 2835 for (size_t i = 0; i < _active_tasks; ++i) { 2836 _tasks[i]->print_stats(); 2837 gclog_or_tty->print_cr("---------------------------------------------------------------------"); 2838 } 2839 } 2840 } 2841 2842 class CSMarkOopClosure: public OopClosure { 2843 friend class CSMarkBitMapClosure; 2844 2845 G1CollectedHeap* _g1h; 2846 CMBitMap* _bm; 2847 ConcurrentMark* _cm; 2848 oop* _ms; 2849 jint* _array_ind_stack; 2850 int _ms_size; 2851 int _ms_ind; 2852 int _array_increment; 2853 2854 bool push(oop obj, int arr_ind = 0) { 2855 if (_ms_ind == _ms_size) { 2856 gclog_or_tty->print_cr("Mark stack is full."); 2857 return false; 2858 } 2859 _ms[_ms_ind] = obj; 2860 if (obj->is_objArray()) { 2861 _array_ind_stack[_ms_ind] = arr_ind; 2862 } 2863 _ms_ind++; 2864 return true; 2865 } 2866 2867 oop pop() { 2868 if (_ms_ind == 0) { 2869 return NULL; 2870 } else { 2871 _ms_ind--; 2872 return _ms[_ms_ind]; 2873 } 2874 } 2875 2876 template <class T> bool drain() { 2877 while (_ms_ind > 0) { 2878 oop obj = pop(); 2879 assert(obj != NULL, "Since index was non-zero."); 2880 if (obj->is_objArray()) { 2881 jint arr_ind = _array_ind_stack[_ms_ind]; 2882 objArrayOop aobj = objArrayOop(obj); 2883 jint len = aobj->length(); 2884 jint next_arr_ind = arr_ind + _array_increment; 2885 if (next_arr_ind < len) { 2886 push(obj, next_arr_ind); 2887 } 2888 // Now process this portion of this one. 2889 int lim = MIN2(next_arr_ind, len); 2890 for (int j = arr_ind; j < lim; j++) { 2891 do_oop(aobj->objArrayOopDesc::obj_at_addr<T>(j)); 2892 } 2893 2894 } else { 2895 obj->oop_iterate(this); 2896 } 2897 if (abort()) return false; 2898 } 2899 return true; 2900 } 2901 2902 public: 2903 CSMarkOopClosure(ConcurrentMark* cm, int ms_size) : 2904 _g1h(G1CollectedHeap::heap()), 2905 _cm(cm), 2906 _bm(cm->nextMarkBitMap()), 2907 _ms_size(ms_size), _ms_ind(0), 2908 _ms(NEW_C_HEAP_ARRAY(oop, ms_size)), 2909 _array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)), 2910 _array_increment(MAX2(ms_size/8, 16)) 2911 {} 2912 2913 ~CSMarkOopClosure() { 2914 FREE_C_HEAP_ARRAY(oop, _ms); 2915 FREE_C_HEAP_ARRAY(jint, _array_ind_stack); 2916 } 2917 2918 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2919 virtual void do_oop( oop* p) { do_oop_work(p); } 2920 2921 template <class T> void do_oop_work(T* p) { 2922 T heap_oop = oopDesc::load_heap_oop(p); 2923 if (oopDesc::is_null(heap_oop)) return; 2924 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 2925 if (obj->is_forwarded()) { 2926 // If the object has already been forwarded, we have to make sure 2927 // that it's marked. So follow the forwarding pointer. Note that 2928 // this does the right thing for self-forwarding pointers in the 2929 // evacuation failure case. 2930 obj = obj->forwardee(); 2931 } 2932 HeapRegion* hr = _g1h->heap_region_containing(obj); 2933 if (hr != NULL) { 2934 if (hr->in_collection_set()) { 2935 if (_g1h->is_obj_ill(obj)) { 2936 _bm->mark((HeapWord*)obj); 2937 if (!push(obj)) { 2938 gclog_or_tty->print_cr("Setting abort in CSMarkOopClosure because push failed."); 2939 set_abort(); 2940 } 2941 } 2942 } else { 2943 // Outside the collection set; we need to gray it 2944 _cm->deal_with_reference(obj); 2945 } 2946 } 2947 } 2948 }; 2949 2950 class CSMarkBitMapClosure: public BitMapClosure { 2951 G1CollectedHeap* _g1h; 2952 CMBitMap* _bitMap; 2953 ConcurrentMark* _cm; 2954 CSMarkOopClosure _oop_cl; 2955 public: 2956 CSMarkBitMapClosure(ConcurrentMark* cm, int ms_size) : 2957 _g1h(G1CollectedHeap::heap()), 2958 _bitMap(cm->nextMarkBitMap()), 2959 _oop_cl(cm, ms_size) 2960 {} 2961 2962 ~CSMarkBitMapClosure() {} 2963 2964 bool do_bit(size_t offset) { 2965 // convert offset into a HeapWord* 2966 HeapWord* addr = _bitMap->offsetToHeapWord(offset); 2967 assert(_bitMap->endWord() && addr < _bitMap->endWord(), 2968 "address out of range"); 2969 assert(_bitMap->isMarked(addr), "tautology"); 2970 oop obj = oop(addr); 2971 if (!obj->is_forwarded()) { 2972 if (!_oop_cl.push(obj)) return false; 2973 if (UseCompressedOops) { 2974 if (!_oop_cl.drain<narrowOop>()) return false; 2975 } else { 2976 if (!_oop_cl.drain<oop>()) return false; 2977 } 2978 } 2979 // Otherwise... 2980 return true; 2981 } 2982 }; 2983 2984 2985 class CompleteMarkingInCSHRClosure: public HeapRegionClosure { 2986 CMBitMap* _bm; 2987 CSMarkBitMapClosure _bit_cl; 2988 enum SomePrivateConstants { 2989 MSSize = 1000 2990 }; 2991 bool _completed; 2992 public: 2993 CompleteMarkingInCSHRClosure(ConcurrentMark* cm) : 2994 _bm(cm->nextMarkBitMap()), 2995 _bit_cl(cm, MSSize), 2996 _completed(true) 2997 {} 2998 2999 ~CompleteMarkingInCSHRClosure() {} 3000 3001 bool doHeapRegion(HeapRegion* r) { 3002 if (!r->evacuation_failed()) { 3003 MemRegion mr = MemRegion(r->bottom(), r->next_top_at_mark_start()); 3004 if (!mr.is_empty()) { 3005 if (!_bm->iterate(&_bit_cl, mr)) { 3006 _completed = false; 3007 return true; 3008 } 3009 } 3010 } 3011 return false; 3012 } 3013 3014 bool completed() { return _completed; } 3015 }; 3016 3017 class ClearMarksInHRClosure: public HeapRegionClosure { 3018 CMBitMap* _bm; 3019 public: 3020 ClearMarksInHRClosure(CMBitMap* bm): _bm(bm) { } 3021 3022 bool doHeapRegion(HeapRegion* r) { 3023 if (!r->used_region().is_empty() && !r->evacuation_failed()) { 3024 MemRegion usedMR = r->used_region(); 3025 _bm->clearRange(r->used_region()); 3026 } 3027 return false; 3028 } 3029 }; 3030 3031 void ConcurrentMark::complete_marking_in_collection_set() { 3032 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 3033 3034 if (!g1h->mark_in_progress()) { 3035 g1h->g1_policy()->record_mark_closure_time(0.0); 3036 return; 3037 } 3038 3039 int i = 1; 3040 double start = os::elapsedTime(); 3041 while (true) { 3042 i++; 3043 CompleteMarkingInCSHRClosure cmplt(this); 3044 g1h->collection_set_iterate(&cmplt); 3045 if (cmplt.completed()) break; 3046 } 3047 double end_time = os::elapsedTime(); 3048 double elapsed_time_ms = (end_time - start) * 1000.0; 3049 g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms); 3050 3051 ClearMarksInHRClosure clr(nextMarkBitMap()); 3052 g1h->collection_set_iterate(&clr); 3053 } 3054 3055 // The next two methods deal with the following optimisation. Some 3056 // objects are gray by being marked and located above the finger. If 3057 // they are copied, during an evacuation pause, below the finger then 3058 // the need to be pushed on the stack. The observation is that, if 3059 // there are no regions in the collection set located above the 3060 // finger, then the above cannot happen, hence we do not need to 3061 // explicitly gray any objects when copying them to below the 3062 // finger. The global stack will be scanned to ensure that, if it 3063 // points to objects being copied, it will update their 3064 // location. There is a tricky situation with the gray objects in 3065 // region stack that are being coped, however. See the comment in 3066 // newCSet(). 3067 3068 void ConcurrentMark::newCSet() { 3069 if (!concurrent_marking_in_progress()) { 3070 // nothing to do if marking is not in progress 3071 return; 3072 } 3073 3074 // find what the lowest finger is among the global and local fingers 3075 _min_finger = _finger; 3076 for (int i = 0; i < (int)_max_task_num; ++i) { 3077 CMTask* task = _tasks[i]; 3078 HeapWord* task_finger = task->finger(); 3079 if (task_finger != NULL && task_finger < _min_finger) { 3080 _min_finger = task_finger; 3081 } 3082 } 3083 3084 _should_gray_objects = false; 3085 3086 // This fixes a very subtle and fustrating bug. It might be the case 3087 // that, during en evacuation pause, heap regions that contain 3088 // objects that are gray (by being in regions contained in the 3089 // region stack) are included in the collection set. Since such gray 3090 // objects will be moved, and because it's not easy to redirect 3091 // region stack entries to point to a new location (because objects 3092 // in one region might be scattered to multiple regions after they 3093 // are copied), one option is to ensure that all marked objects 3094 // copied during a pause are pushed on the stack. Notice, however, 3095 // that this problem can only happen when the region stack is not 3096 // empty during an evacuation pause. So, we make the fix a bit less 3097 // conservative and ensure that regions are pushed on the stack, 3098 // irrespective whether all collection set regions are below the 3099 // finger, if the region stack is not empty. This is expected to be 3100 // a rare case, so I don't think it's necessary to be smarted about it. 3101 if (!region_stack_empty() || has_aborted_regions()) { 3102 _should_gray_objects = true; 3103 } 3104 } 3105 3106 void ConcurrentMark::registerCSetRegion(HeapRegion* hr) { 3107 if (!concurrent_marking_in_progress()) return; 3108 3109 HeapWord* region_end = hr->end(); 3110 if (region_end > _min_finger) { 3111 _should_gray_objects = true; 3112 } 3113 } 3114 3115 // Resets the region fields of active CMTasks whose values point 3116 // into the collection set. 3117 void ConcurrentMark::reset_active_task_region_fields_in_cset() { 3118 assert(SafepointSynchronize::is_at_safepoint(), "should be in STW"); 3119 assert(parallel_marking_threads() <= _max_task_num, "sanity"); 3120 3121 for (int i = 0; i < (int)parallel_marking_threads(); i += 1) { 3122 CMTask* task = _tasks[i]; 3123 HeapWord* task_finger = task->finger(); 3124 if (task_finger != NULL) { 3125 assert(_g1h->is_in_g1_reserved(task_finger), "not in heap"); 3126 HeapRegion* finger_region = _g1h->heap_region_containing(task_finger); 3127 if (finger_region->in_collection_set()) { 3128 // The task's current region is in the collection set. 3129 // This region will be evacuated in the current GC and 3130 // the region fields in the task will be stale. 3131 task->giveup_current_region(); 3132 } 3133 } 3134 } 3135 } 3136 3137 // abandon current marking iteration due to a Full GC 3138 void ConcurrentMark::abort() { 3139 // Clear all marks to force marking thread to do nothing 3140 _nextMarkBitMap->clearAll(); 3141 // Empty mark stack 3142 clear_marking_state(); 3143 for (int i = 0; i < (int)_max_task_num; ++i) { 3144 _tasks[i]->clear_region_fields(); 3145 } 3146 _has_aborted = true; 3147 3148 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3149 satb_mq_set.abandon_partial_marking(); 3150 // This can be called either during or outside marking, we'll read 3151 // the expected_active value from the SATB queue set. 3152 satb_mq_set.set_active_all_threads( 3153 false, /* new active value */ 3154 satb_mq_set.is_active() /* expected_active */); 3155 } 3156 3157 static void print_ms_time_info(const char* prefix, const char* name, 3158 NumberSeq& ns) { 3159 gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 3160 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 3161 if (ns.num() > 0) { 3162 gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", 3163 prefix, ns.sd(), ns.maximum()); 3164 } 3165 } 3166 3167 void ConcurrentMark::print_summary_info() { 3168 gclog_or_tty->print_cr(" Concurrent marking:"); 3169 print_ms_time_info(" ", "init marks", _init_times); 3170 print_ms_time_info(" ", "remarks", _remark_times); 3171 { 3172 print_ms_time_info(" ", "final marks", _remark_mark_times); 3173 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 3174 3175 } 3176 print_ms_time_info(" ", "cleanups", _cleanup_times); 3177 gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", 3178 _total_counting_time, 3179 (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / 3180 (double)_cleanup_times.num() 3181 : 0.0)); 3182 if (G1ScrubRemSets) { 3183 gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 3184 _total_rs_scrub_time, 3185 (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / 3186 (double)_cleanup_times.num() 3187 : 0.0)); 3188 } 3189 gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", 3190 (_init_times.sum() + _remark_times.sum() + 3191 _cleanup_times.sum())/1000.0); 3192 gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " 3193 "(%8.2f s marking, %8.2f s counting).", 3194 cmThread()->vtime_accum(), 3195 cmThread()->vtime_mark_accum(), 3196 cmThread()->vtime_count_accum()); 3197 } 3198 3199 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3200 _parallel_workers->print_worker_threads_on(st); 3201 } 3202 3203 // Closures 3204 // XXX: there seems to be a lot of code duplication here; 3205 // should refactor and consolidate the shared code. 3206 3207 // This closure is used to mark refs into the CMS generation in 3208 // the CMS bit map. Called at the first checkpoint. 3209 3210 // We take a break if someone is trying to stop the world. 3211 bool ConcurrentMark::do_yield_check(int worker_i) { 3212 if (should_yield()) { 3213 if (worker_i == 0) { 3214 _g1h->g1_policy()->record_concurrent_pause(); 3215 } 3216 cmThread()->yield(); 3217 if (worker_i == 0) { 3218 _g1h->g1_policy()->record_concurrent_pause_end(); 3219 } 3220 return true; 3221 } else { 3222 return false; 3223 } 3224 } 3225 3226 bool ConcurrentMark::should_yield() { 3227 return cmThread()->should_yield(); 3228 } 3229 3230 bool ConcurrentMark::containing_card_is_marked(void* p) { 3231 size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1); 3232 return _card_bm.at(offset >> CardTableModRefBS::card_shift); 3233 } 3234 3235 bool ConcurrentMark::containing_cards_are_marked(void* start, 3236 void* last) { 3237 return containing_card_is_marked(start) && 3238 containing_card_is_marked(last); 3239 } 3240 3241 #ifndef PRODUCT 3242 // for debugging purposes 3243 void ConcurrentMark::print_finger() { 3244 gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT, 3245 _heap_start, _heap_end, _finger); 3246 for (int i = 0; i < (int) _max_task_num; ++i) { 3247 gclog_or_tty->print(" %d: "PTR_FORMAT, i, _tasks[i]->finger()); 3248 } 3249 gclog_or_tty->print_cr(""); 3250 } 3251 #endif 3252 3253 void CMTask::scan_object(oop obj) { 3254 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); 3255 3256 if (_cm->verbose_high()) { 3257 gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT, 3258 _task_id, (void*) obj); 3259 } 3260 3261 size_t obj_size = obj->size(); 3262 _words_scanned += obj_size; 3263 3264 obj->oop_iterate(_cm_oop_closure); 3265 statsOnly( ++_objs_scanned ); 3266 check_limits(); 3267 } 3268 3269 // Closure for iteration over bitmaps 3270 class CMBitMapClosure : public BitMapClosure { 3271 private: 3272 // the bitmap that is being iterated over 3273 CMBitMap* _nextMarkBitMap; 3274 ConcurrentMark* _cm; 3275 CMTask* _task; 3276 // true if we're scanning a heap region claimed by the task (so that 3277 // we move the finger along), false if we're not, i.e. currently when 3278 // scanning a heap region popped from the region stack (so that we 3279 // do not move the task finger along; it'd be a mistake if we did so). 3280 bool _scanning_heap_region; 3281 3282 public: 3283 CMBitMapClosure(CMTask *task, 3284 ConcurrentMark* cm, 3285 CMBitMap* nextMarkBitMap) 3286 : _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 3287 3288 void set_scanning_heap_region(bool scanning_heap_region) { 3289 _scanning_heap_region = scanning_heap_region; 3290 } 3291 3292 bool do_bit(size_t offset) { 3293 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 3294 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 3295 assert( addr < _cm->finger(), "invariant"); 3296 3297 if (_scanning_heap_region) { 3298 statsOnly( _task->increase_objs_found_on_bitmap() ); 3299 assert(addr >= _task->finger(), "invariant"); 3300 // We move that task's local finger along. 3301 _task->move_finger_to(addr); 3302 } else { 3303 // We move the task's region finger along. 3304 _task->move_region_finger_to(addr); 3305 } 3306 3307 _task->scan_object(oop(addr)); 3308 // we only partially drain the local queue and global stack 3309 _task->drain_local_queue(true); 3310 _task->drain_global_stack(true); 3311 3312 // if the has_aborted flag has been raised, we need to bail out of 3313 // the iteration 3314 return !_task->has_aborted(); 3315 } 3316 }; 3317 3318 // Closure for iterating over objects, currently only used for 3319 // processing SATB buffers. 3320 class CMObjectClosure : public ObjectClosure { 3321 private: 3322 CMTask* _task; 3323 3324 public: 3325 void do_object(oop obj) { 3326 _task->deal_with_reference(obj); 3327 } 3328 3329 CMObjectClosure(CMTask* task) : _task(task) { } 3330 }; 3331 3332 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3333 ConcurrentMark* cm, 3334 CMTask* task) 3335 : _g1h(g1h), _cm(cm), _task(task) { 3336 assert(_ref_processor == NULL, "should be initialized to NULL"); 3337 3338 if (G1UseConcMarkReferenceProcessing) { 3339 _ref_processor = g1h->ref_processor_cm(); 3340 assert(_ref_processor != NULL, "should not be NULL"); 3341 } 3342 } 3343 3344 void CMTask::setup_for_region(HeapRegion* hr) { 3345 // Separated the asserts so that we know which one fires. 3346 assert(hr != NULL, 3347 "claim_region() should have filtered out continues humongous regions"); 3348 assert(!hr->continuesHumongous(), 3349 "claim_region() should have filtered out continues humongous regions"); 3350 3351 if (_cm->verbose_low()) { 3352 gclog_or_tty->print_cr("[%d] setting up for region "PTR_FORMAT, 3353 _task_id, hr); 3354 } 3355 3356 _curr_region = hr; 3357 _finger = hr->bottom(); 3358 update_region_limit(); 3359 } 3360 3361 void CMTask::update_region_limit() { 3362 HeapRegion* hr = _curr_region; 3363 HeapWord* bottom = hr->bottom(); 3364 HeapWord* limit = hr->next_top_at_mark_start(); 3365 3366 if (limit == bottom) { 3367 if (_cm->verbose_low()) { 3368 gclog_or_tty->print_cr("[%d] found an empty region " 3369 "["PTR_FORMAT", "PTR_FORMAT")", 3370 _task_id, bottom, limit); 3371 } 3372 // The region was collected underneath our feet. 3373 // We set the finger to bottom to ensure that the bitmap 3374 // iteration that will follow this will not do anything. 3375 // (this is not a condition that holds when we set the region up, 3376 // as the region is not supposed to be empty in the first place) 3377 _finger = bottom; 3378 } else if (limit >= _region_limit) { 3379 assert(limit >= _finger, "peace of mind"); 3380 } else { 3381 assert(limit < _region_limit, "only way to get here"); 3382 // This can happen under some pretty unusual circumstances. An 3383 // evacuation pause empties the region underneath our feet (NTAMS 3384 // at bottom). We then do some allocation in the region (NTAMS 3385 // stays at bottom), followed by the region being used as a GC 3386 // alloc region (NTAMS will move to top() and the objects 3387 // originally below it will be grayed). All objects now marked in 3388 // the region are explicitly grayed, if below the global finger, 3389 // and we do not need in fact to scan anything else. So, we simply 3390 // set _finger to be limit to ensure that the bitmap iteration 3391 // doesn't do anything. 3392 _finger = limit; 3393 } 3394 3395 _region_limit = limit; 3396 } 3397 3398 void CMTask::giveup_current_region() { 3399 assert(_curr_region != NULL, "invariant"); 3400 if (_cm->verbose_low()) { 3401 gclog_or_tty->print_cr("[%d] giving up region "PTR_FORMAT, 3402 _task_id, _curr_region); 3403 } 3404 clear_region_fields(); 3405 } 3406 3407 void CMTask::clear_region_fields() { 3408 // Values for these three fields that indicate that we're not 3409 // holding on to a region. 3410 _curr_region = NULL; 3411 _finger = NULL; 3412 _region_limit = NULL; 3413 3414 _region_finger = NULL; 3415 } 3416 3417 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 3418 if (cm_oop_closure == NULL) { 3419 assert(_cm_oop_closure != NULL, "invariant"); 3420 } else { 3421 assert(_cm_oop_closure == NULL, "invariant"); 3422 } 3423 _cm_oop_closure = cm_oop_closure; 3424 } 3425 3426 void CMTask::reset(CMBitMap* nextMarkBitMap) { 3427 guarantee(nextMarkBitMap != NULL, "invariant"); 3428 3429 if (_cm->verbose_low()) { 3430 gclog_or_tty->print_cr("[%d] resetting", _task_id); 3431 } 3432 3433 _nextMarkBitMap = nextMarkBitMap; 3434 clear_region_fields(); 3435 assert(_aborted_region.is_empty(), "should have been cleared"); 3436 3437 _calls = 0; 3438 _elapsed_time_ms = 0.0; 3439 _termination_time_ms = 0.0; 3440 _termination_start_time_ms = 0.0; 3441 3442 #if _MARKING_STATS_ 3443 _local_pushes = 0; 3444 _local_pops = 0; 3445 _local_max_size = 0; 3446 _objs_scanned = 0; 3447 _global_pushes = 0; 3448 _global_pops = 0; 3449 _global_max_size = 0; 3450 _global_transfers_to = 0; 3451 _global_transfers_from = 0; 3452 _region_stack_pops = 0; 3453 _regions_claimed = 0; 3454 _objs_found_on_bitmap = 0; 3455 _satb_buffers_processed = 0; 3456 _steal_attempts = 0; 3457 _steals = 0; 3458 _aborted = 0; 3459 _aborted_overflow = 0; 3460 _aborted_cm_aborted = 0; 3461 _aborted_yield = 0; 3462 _aborted_timed_out = 0; 3463 _aborted_satb = 0; 3464 _aborted_termination = 0; 3465 #endif // _MARKING_STATS_ 3466 } 3467 3468 bool CMTask::should_exit_termination() { 3469 regular_clock_call(); 3470 // This is called when we are in the termination protocol. We should 3471 // quit if, for some reason, this task wants to abort or the global 3472 // stack is not empty (this means that we can get work from it). 3473 return !_cm->mark_stack_empty() || has_aborted(); 3474 } 3475 3476 void CMTask::reached_limit() { 3477 assert(_words_scanned >= _words_scanned_limit || 3478 _refs_reached >= _refs_reached_limit , 3479 "shouldn't have been called otherwise"); 3480 regular_clock_call(); 3481 } 3482 3483 void CMTask::regular_clock_call() { 3484 if (has_aborted()) return; 3485 3486 // First, we need to recalculate the words scanned and refs reached 3487 // limits for the next clock call. 3488 recalculate_limits(); 3489 3490 // During the regular clock call we do the following 3491 3492 // (1) If an overflow has been flagged, then we abort. 3493 if (_cm->has_overflown()) { 3494 set_has_aborted(); 3495 return; 3496 } 3497 3498 // If we are not concurrent (i.e. we're doing remark) we don't need 3499 // to check anything else. The other steps are only needed during 3500 // the concurrent marking phase. 3501 if (!concurrent()) return; 3502 3503 // (2) If marking has been aborted for Full GC, then we also abort. 3504 if (_cm->has_aborted()) { 3505 set_has_aborted(); 3506 statsOnly( ++_aborted_cm_aborted ); 3507 return; 3508 } 3509 3510 double curr_time_ms = os::elapsedVTime() * 1000.0; 3511 3512 // (3) If marking stats are enabled, then we update the step history. 3513 #if _MARKING_STATS_ 3514 if (_words_scanned >= _words_scanned_limit) { 3515 ++_clock_due_to_scanning; 3516 } 3517 if (_refs_reached >= _refs_reached_limit) { 3518 ++_clock_due_to_marking; 3519 } 3520 3521 double last_interval_ms = curr_time_ms - _interval_start_time_ms; 3522 _interval_start_time_ms = curr_time_ms; 3523 _all_clock_intervals_ms.add(last_interval_ms); 3524 3525 if (_cm->verbose_medium()) { 3526 gclog_or_tty->print_cr("[%d] regular clock, interval = %1.2lfms, " 3527 "scanned = %d%s, refs reached = %d%s", 3528 _task_id, last_interval_ms, 3529 _words_scanned, 3530 (_words_scanned >= _words_scanned_limit) ? " (*)" : "", 3531 _refs_reached, 3532 (_refs_reached >= _refs_reached_limit) ? " (*)" : ""); 3533 } 3534 #endif // _MARKING_STATS_ 3535 3536 // (4) We check whether we should yield. If we have to, then we abort. 3537 if (_cm->should_yield()) { 3538 // We should yield. To do this we abort the task. The caller is 3539 // responsible for yielding. 3540 set_has_aborted(); 3541 statsOnly( ++_aborted_yield ); 3542 return; 3543 } 3544 3545 // (5) We check whether we've reached our time quota. If we have, 3546 // then we abort. 3547 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3548 if (elapsed_time_ms > _time_target_ms) { 3549 set_has_aborted(); 3550 _has_timed_out = true; 3551 statsOnly( ++_aborted_timed_out ); 3552 return; 3553 } 3554 3555 // (6) Finally, we check whether there are enough completed STAB 3556 // buffers available for processing. If there are, we abort. 3557 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3558 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 3559 if (_cm->verbose_low()) { 3560 gclog_or_tty->print_cr("[%d] aborting to deal with pending SATB buffers", 3561 _task_id); 3562 } 3563 // we do need to process SATB buffers, we'll abort and restart 3564 // the marking task to do so 3565 set_has_aborted(); 3566 statsOnly( ++_aborted_satb ); 3567 return; 3568 } 3569 } 3570 3571 void CMTask::recalculate_limits() { 3572 _real_words_scanned_limit = _words_scanned + words_scanned_period; 3573 _words_scanned_limit = _real_words_scanned_limit; 3574 3575 _real_refs_reached_limit = _refs_reached + refs_reached_period; 3576 _refs_reached_limit = _real_refs_reached_limit; 3577 } 3578 3579 void CMTask::decrease_limits() { 3580 // This is called when we believe that we're going to do an infrequent 3581 // operation which will increase the per byte scanned cost (i.e. move 3582 // entries to/from the global stack). It basically tries to decrease the 3583 // scanning limit so that the clock is called earlier. 3584 3585 if (_cm->verbose_medium()) { 3586 gclog_or_tty->print_cr("[%d] decreasing limits", _task_id); 3587 } 3588 3589 _words_scanned_limit = _real_words_scanned_limit - 3590 3 * words_scanned_period / 4; 3591 _refs_reached_limit = _real_refs_reached_limit - 3592 3 * refs_reached_period / 4; 3593 } 3594 3595 void CMTask::move_entries_to_global_stack() { 3596 // local array where we'll store the entries that will be popped 3597 // from the local queue 3598 oop buffer[global_stack_transfer_size]; 3599 3600 int n = 0; 3601 oop obj; 3602 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 3603 buffer[n] = obj; 3604 ++n; 3605 } 3606 3607 if (n > 0) { 3608 // we popped at least one entry from the local queue 3609 3610 statsOnly( ++_global_transfers_to; _local_pops += n ); 3611 3612 if (!_cm->mark_stack_push(buffer, n)) { 3613 if (_cm->verbose_low()) { 3614 gclog_or_tty->print_cr("[%d] aborting due to global stack overflow", 3615 _task_id); 3616 } 3617 set_has_aborted(); 3618 } else { 3619 // the transfer was successful 3620 3621 if (_cm->verbose_medium()) { 3622 gclog_or_tty->print_cr("[%d] pushed %d entries to the global stack", 3623 _task_id, n); 3624 } 3625 statsOnly( int tmp_size = _cm->mark_stack_size(); 3626 if (tmp_size > _global_max_size) { 3627 _global_max_size = tmp_size; 3628 } 3629 _global_pushes += n ); 3630 } 3631 } 3632 3633 // this operation was quite expensive, so decrease the limits 3634 decrease_limits(); 3635 } 3636 3637 void CMTask::get_entries_from_global_stack() { 3638 // local array where we'll store the entries that will be popped 3639 // from the global stack. 3640 oop buffer[global_stack_transfer_size]; 3641 int n; 3642 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 3643 assert(n <= global_stack_transfer_size, 3644 "we should not pop more than the given limit"); 3645 if (n > 0) { 3646 // yes, we did actually pop at least one entry 3647 3648 statsOnly( ++_global_transfers_from; _global_pops += n ); 3649 if (_cm->verbose_medium()) { 3650 gclog_or_tty->print_cr("[%d] popped %d entries from the global stack", 3651 _task_id, n); 3652 } 3653 for (int i = 0; i < n; ++i) { 3654 bool success = _task_queue->push(buffer[i]); 3655 // We only call this when the local queue is empty or under a 3656 // given target limit. So, we do not expect this push to fail. 3657 assert(success, "invariant"); 3658 } 3659 3660 statsOnly( int tmp_size = _task_queue->size(); 3661 if (tmp_size > _local_max_size) { 3662 _local_max_size = tmp_size; 3663 } 3664 _local_pushes += n ); 3665 } 3666 3667 // this operation was quite expensive, so decrease the limits 3668 decrease_limits(); 3669 } 3670 3671 void CMTask::drain_local_queue(bool partially) { 3672 if (has_aborted()) return; 3673 3674 // Decide what the target size is, depending whether we're going to 3675 // drain it partially (so that other tasks can steal if they run out 3676 // of things to do) or totally (at the very end). 3677 size_t target_size; 3678 if (partially) { 3679 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 3680 } else { 3681 target_size = 0; 3682 } 3683 3684 if (_task_queue->size() > target_size) { 3685 if (_cm->verbose_high()) { 3686 gclog_or_tty->print_cr("[%d] draining local queue, target size = %d", 3687 _task_id, target_size); 3688 } 3689 3690 oop obj; 3691 bool ret = _task_queue->pop_local(obj); 3692 while (ret) { 3693 statsOnly( ++_local_pops ); 3694 3695 if (_cm->verbose_high()) { 3696 gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id, 3697 (void*) obj); 3698 } 3699 3700 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 3701 assert(!_g1h->is_on_master_free_list( 3702 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 3703 3704 scan_object(obj); 3705 3706 if (_task_queue->size() <= target_size || has_aborted()) { 3707 ret = false; 3708 } else { 3709 ret = _task_queue->pop_local(obj); 3710 } 3711 } 3712 3713 if (_cm->verbose_high()) { 3714 gclog_or_tty->print_cr("[%d] drained local queue, size = %d", 3715 _task_id, _task_queue->size()); 3716 } 3717 } 3718 } 3719 3720 void CMTask::drain_global_stack(bool partially) { 3721 if (has_aborted()) return; 3722 3723 // We have a policy to drain the local queue before we attempt to 3724 // drain the global stack. 3725 assert(partially || _task_queue->size() == 0, "invariant"); 3726 3727 // Decide what the target size is, depending whether we're going to 3728 // drain it partially (so that other tasks can steal if they run out 3729 // of things to do) or totally (at the very end). Notice that, 3730 // because we move entries from the global stack in chunks or 3731 // because another task might be doing the same, we might in fact 3732 // drop below the target. But, this is not a problem. 3733 size_t target_size; 3734 if (partially) { 3735 target_size = _cm->partial_mark_stack_size_target(); 3736 } else { 3737 target_size = 0; 3738 } 3739 3740 if (_cm->mark_stack_size() > target_size) { 3741 if (_cm->verbose_low()) { 3742 gclog_or_tty->print_cr("[%d] draining global_stack, target size %d", 3743 _task_id, target_size); 3744 } 3745 3746 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 3747 get_entries_from_global_stack(); 3748 drain_local_queue(partially); 3749 } 3750 3751 if (_cm->verbose_low()) { 3752 gclog_or_tty->print_cr("[%d] drained global stack, size = %d", 3753 _task_id, _cm->mark_stack_size()); 3754 } 3755 } 3756 } 3757 3758 // SATB Queue has several assumptions on whether to call the par or 3759 // non-par versions of the methods. this is why some of the code is 3760 // replicated. We should really get rid of the single-threaded version 3761 // of the code to simplify things. 3762 void CMTask::drain_satb_buffers() { 3763 if (has_aborted()) return; 3764 3765 // We set this so that the regular clock knows that we're in the 3766 // middle of draining buffers and doesn't set the abort flag when it 3767 // notices that SATB buffers are available for draining. It'd be 3768 // very counter productive if it did that. :-) 3769 _draining_satb_buffers = true; 3770 3771 CMObjectClosure oc(this); 3772 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3773 if (G1CollectedHeap::use_parallel_gc_threads()) { 3774 satb_mq_set.set_par_closure(_task_id, &oc); 3775 } else { 3776 satb_mq_set.set_closure(&oc); 3777 } 3778 3779 // This keeps claiming and applying the closure to completed buffers 3780 // until we run out of buffers or we need to abort. 3781 if (G1CollectedHeap::use_parallel_gc_threads()) { 3782 while (!has_aborted() && 3783 satb_mq_set.par_apply_closure_to_completed_buffer(_task_id)) { 3784 if (_cm->verbose_medium()) { 3785 gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id); 3786 } 3787 statsOnly( ++_satb_buffers_processed ); 3788 regular_clock_call(); 3789 } 3790 } else { 3791 while (!has_aborted() && 3792 satb_mq_set.apply_closure_to_completed_buffer()) { 3793 if (_cm->verbose_medium()) { 3794 gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id); 3795 } 3796 statsOnly( ++_satb_buffers_processed ); 3797 regular_clock_call(); 3798 } 3799 } 3800 3801 if (!concurrent() && !has_aborted()) { 3802 // We should only do this during remark. 3803 if (G1CollectedHeap::use_parallel_gc_threads()) { 3804 satb_mq_set.par_iterate_closure_all_threads(_task_id); 3805 } else { 3806 satb_mq_set.iterate_closure_all_threads(); 3807 } 3808 } 3809 3810 _draining_satb_buffers = false; 3811 3812 assert(has_aborted() || 3813 concurrent() || 3814 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3815 3816 if (G1CollectedHeap::use_parallel_gc_threads()) { 3817 satb_mq_set.set_par_closure(_task_id, NULL); 3818 } else { 3819 satb_mq_set.set_closure(NULL); 3820 } 3821 3822 // again, this was a potentially expensive operation, decrease the 3823 // limits to get the regular clock call early 3824 decrease_limits(); 3825 } 3826 3827 void CMTask::drain_region_stack(BitMapClosure* bc) { 3828 if (has_aborted()) return; 3829 3830 assert(_region_finger == NULL, 3831 "it should be NULL when we're not scanning a region"); 3832 3833 if (!_cm->region_stack_empty() || !_aborted_region.is_empty()) { 3834 if (_cm->verbose_low()) { 3835 gclog_or_tty->print_cr("[%d] draining region stack, size = %d", 3836 _task_id, _cm->region_stack_size()); 3837 } 3838 3839 MemRegion mr; 3840 3841 if (!_aborted_region.is_empty()) { 3842 mr = _aborted_region; 3843 _aborted_region = MemRegion(); 3844 3845 if (_cm->verbose_low()) { 3846 gclog_or_tty->print_cr("[%d] scanning aborted region " 3847 "[ " PTR_FORMAT ", " PTR_FORMAT " )", 3848 _task_id, mr.start(), mr.end()); 3849 } 3850 } else { 3851 mr = _cm->region_stack_pop_lock_free(); 3852 // it returns MemRegion() if the pop fails 3853 statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); 3854 } 3855 3856 while (mr.start() != NULL) { 3857 if (_cm->verbose_medium()) { 3858 gclog_or_tty->print_cr("[%d] we are scanning region " 3859 "["PTR_FORMAT", "PTR_FORMAT")", 3860 _task_id, mr.start(), mr.end()); 3861 } 3862 3863 assert(mr.end() <= _cm->finger(), 3864 "otherwise the region shouldn't be on the stack"); 3865 assert(!mr.is_empty(), "Only non-empty regions live on the region stack"); 3866 if (_nextMarkBitMap->iterate(bc, mr)) { 3867 assert(!has_aborted(), 3868 "cannot abort the task without aborting the bitmap iteration"); 3869 3870 // We finished iterating over the region without aborting. 3871 regular_clock_call(); 3872 if (has_aborted()) { 3873 mr = MemRegion(); 3874 } else { 3875 mr = _cm->region_stack_pop_lock_free(); 3876 // it returns MemRegion() if the pop fails 3877 statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); 3878 } 3879 } else { 3880 assert(has_aborted(), "currently the only way to do so"); 3881 3882 // The only way to abort the bitmap iteration is to return 3883 // false from the do_bit() method. However, inside the 3884 // do_bit() method we move the _region_finger to point to the 3885 // object currently being looked at. So, if we bail out, we 3886 // have definitely set _region_finger to something non-null. 3887 assert(_region_finger != NULL, "invariant"); 3888 3889 // Make sure that any previously aborted region has been 3890 // cleared. 3891 assert(_aborted_region.is_empty(), "aborted region not cleared"); 3892 3893 // The iteration was actually aborted. So now _region_finger 3894 // points to the address of the object we last scanned. If we 3895 // leave it there, when we restart this task, we will rescan 3896 // the object. It is easy to avoid this. We move the finger by 3897 // enough to point to the next possible object header (the 3898 // bitmap knows by how much we need to move it as it knows its 3899 // granularity). 3900 MemRegion newRegion = 3901 MemRegion(_nextMarkBitMap->nextWord(_region_finger), mr.end()); 3902 3903 if (!newRegion.is_empty()) { 3904 if (_cm->verbose_low()) { 3905 gclog_or_tty->print_cr("[%d] recording unscanned region" 3906 "[" PTR_FORMAT "," PTR_FORMAT ") in CMTask", 3907 _task_id, 3908 newRegion.start(), newRegion.end()); 3909 } 3910 // Now record the part of the region we didn't scan to 3911 // make sure this task scans it later. 3912 _aborted_region = newRegion; 3913 } 3914 // break from while 3915 mr = MemRegion(); 3916 } 3917 _region_finger = NULL; 3918 } 3919 3920 if (_cm->verbose_low()) { 3921 gclog_or_tty->print_cr("[%d] drained region stack, size = %d", 3922 _task_id, _cm->region_stack_size()); 3923 } 3924 } 3925 } 3926 3927 void CMTask::print_stats() { 3928 gclog_or_tty->print_cr("Marking Stats, task = %d, calls = %d", 3929 _task_id, _calls); 3930 gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3931 _elapsed_time_ms, _termination_time_ms); 3932 gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3933 _step_times_ms.num(), _step_times_ms.avg(), 3934 _step_times_ms.sd()); 3935 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3936 _step_times_ms.maximum(), _step_times_ms.sum()); 3937 3938 #if _MARKING_STATS_ 3939 gclog_or_tty->print_cr(" Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3940 _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(), 3941 _all_clock_intervals_ms.sd()); 3942 gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", 3943 _all_clock_intervals_ms.maximum(), 3944 _all_clock_intervals_ms.sum()); 3945 gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", 3946 _clock_due_to_scanning, _clock_due_to_marking); 3947 gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", 3948 _objs_scanned, _objs_found_on_bitmap); 3949 gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", 3950 _local_pushes, _local_pops, _local_max_size); 3951 gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", 3952 _global_pushes, _global_pops, _global_max_size); 3953 gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", 3954 _global_transfers_to,_global_transfers_from); 3955 gclog_or_tty->print_cr(" Regions: claimed = %d, Region Stack: pops = %d", 3956 _regions_claimed, _region_stack_pops); 3957 gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); 3958 gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", 3959 _steal_attempts, _steals); 3960 gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); 3961 gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", 3962 _aborted_overflow, _aborted_cm_aborted, _aborted_yield); 3963 gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", 3964 _aborted_timed_out, _aborted_satb, _aborted_termination); 3965 #endif // _MARKING_STATS_ 3966 } 3967 3968 /***************************************************************************** 3969 3970 The do_marking_step(time_target_ms) method is the building block 3971 of the parallel marking framework. It can be called in parallel 3972 with other invocations of do_marking_step() on different tasks 3973 (but only one per task, obviously) and concurrently with the 3974 mutator threads, or during remark, hence it eliminates the need 3975 for two versions of the code. When called during remark, it will 3976 pick up from where the task left off during the concurrent marking 3977 phase. Interestingly, tasks are also claimable during evacuation 3978 pauses too, since do_marking_step() ensures that it aborts before 3979 it needs to yield. 3980 3981 The data structures that is uses to do marking work are the 3982 following: 3983 3984 (1) Marking Bitmap. If there are gray objects that appear only 3985 on the bitmap (this happens either when dealing with an overflow 3986 or when the initial marking phase has simply marked the roots 3987 and didn't push them on the stack), then tasks claim heap 3988 regions whose bitmap they then scan to find gray objects. A 3989 global finger indicates where the end of the last claimed region 3990 is. A local finger indicates how far into the region a task has 3991 scanned. The two fingers are used to determine how to gray an 3992 object (i.e. whether simply marking it is OK, as it will be 3993 visited by a task in the future, or whether it needs to be also 3994 pushed on a stack). 3995 3996 (2) Local Queue. The local queue of the task which is accessed 3997 reasonably efficiently by the task. Other tasks can steal from 3998 it when they run out of work. Throughout the marking phase, a 3999 task attempts to keep its local queue short but not totally 4000 empty, so that entries are available for stealing by other 4001 tasks. Only when there is no more work, a task will totally 4002 drain its local queue. 4003 4004 (3) Global Mark Stack. This handles local queue overflow. During 4005 marking only sets of entries are moved between it and the local 4006 queues, as access to it requires a mutex and more fine-grain 4007 interaction with it which might cause contention. If it 4008 overflows, then the marking phase should restart and iterate 4009 over the bitmap to identify gray objects. Throughout the marking 4010 phase, tasks attempt to keep the global mark stack at a small 4011 length but not totally empty, so that entries are available for 4012 popping by other tasks. Only when there is no more work, tasks 4013 will totally drain the global mark stack. 4014 4015 (4) Global Region Stack. Entries on it correspond to areas of 4016 the bitmap that need to be scanned since they contain gray 4017 objects. Pushes on the region stack only happen during 4018 evacuation pauses and typically correspond to areas covered by 4019 GC LABS. If it overflows, then the marking phase should restart 4020 and iterate over the bitmap to identify gray objects. Tasks will 4021 try to totally drain the region stack as soon as possible. 4022 4023 (5) SATB Buffer Queue. This is where completed SATB buffers are 4024 made available. Buffers are regularly removed from this queue 4025 and scanned for roots, so that the queue doesn't get too 4026 long. During remark, all completed buffers are processed, as 4027 well as the filled in parts of any uncompleted buffers. 4028 4029 The do_marking_step() method tries to abort when the time target 4030 has been reached. There are a few other cases when the 4031 do_marking_step() method also aborts: 4032 4033 (1) When the marking phase has been aborted (after a Full GC). 4034 4035 (2) When a global overflow (either on the global stack or the 4036 region stack) has been triggered. Before the task aborts, it 4037 will actually sync up with the other tasks to ensure that all 4038 the marking data structures (local queues, stacks, fingers etc.) 4039 are re-initialised so that when do_marking_step() completes, 4040 the marking phase can immediately restart. 4041 4042 (3) When enough completed SATB buffers are available. The 4043 do_marking_step() method only tries to drain SATB buffers right 4044 at the beginning. So, if enough buffers are available, the 4045 marking step aborts and the SATB buffers are processed at 4046 the beginning of the next invocation. 4047 4048 (4) To yield. when we have to yield then we abort and yield 4049 right at the end of do_marking_step(). This saves us from a lot 4050 of hassle as, by yielding we might allow a Full GC. If this 4051 happens then objects will be compacted underneath our feet, the 4052 heap might shrink, etc. We save checking for this by just 4053 aborting and doing the yield right at the end. 4054 4055 From the above it follows that the do_marking_step() method should 4056 be called in a loop (or, otherwise, regularly) until it completes. 4057 4058 If a marking step completes without its has_aborted() flag being 4059 true, it means it has completed the current marking phase (and 4060 also all other marking tasks have done so and have all synced up). 4061 4062 A method called regular_clock_call() is invoked "regularly" (in 4063 sub ms intervals) throughout marking. It is this clock method that 4064 checks all the abort conditions which were mentioned above and 4065 decides when the task should abort. A work-based scheme is used to 4066 trigger this clock method: when the number of object words the 4067 marking phase has scanned or the number of references the marking 4068 phase has visited reach a given limit. Additional invocations to 4069 the method clock have been planted in a few other strategic places 4070 too. The initial reason for the clock method was to avoid calling 4071 vtime too regularly, as it is quite expensive. So, once it was in 4072 place, it was natural to piggy-back all the other conditions on it 4073 too and not constantly check them throughout the code. 4074 4075 *****************************************************************************/ 4076 4077 void CMTask::do_marking_step(double time_target_ms, 4078 bool do_stealing, 4079 bool do_termination) { 4080 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 4081 assert(concurrent() == _cm->concurrent(), "they should be the same"); 4082 4083 assert(concurrent() || _cm->region_stack_empty(), 4084 "the region stack should have been cleared before remark"); 4085 assert(concurrent() || !_cm->has_aborted_regions(), 4086 "aborted regions should have been cleared before remark"); 4087 assert(_region_finger == NULL, 4088 "this should be non-null only when a region is being scanned"); 4089 4090 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 4091 assert(_task_queues != NULL, "invariant"); 4092 assert(_task_queue != NULL, "invariant"); 4093 assert(_task_queues->queue(_task_id) == _task_queue, "invariant"); 4094 4095 assert(!_claimed, 4096 "only one thread should claim this task at any one time"); 4097 4098 // OK, this doesn't safeguard again all possible scenarios, as it is 4099 // possible for two threads to set the _claimed flag at the same 4100 // time. But it is only for debugging purposes anyway and it will 4101 // catch most problems. 4102 _claimed = true; 4103 4104 _start_time_ms = os::elapsedVTime() * 1000.0; 4105 statsOnly( _interval_start_time_ms = _start_time_ms ); 4106 4107 double diff_prediction_ms = 4108 g1_policy->get_new_prediction(&_marking_step_diffs_ms); 4109 _time_target_ms = time_target_ms - diff_prediction_ms; 4110 4111 // set up the variables that are used in the work-based scheme to 4112 // call the regular clock method 4113 _words_scanned = 0; 4114 _refs_reached = 0; 4115 recalculate_limits(); 4116 4117 // clear all flags 4118 clear_has_aborted(); 4119 _has_timed_out = false; 4120 _draining_satb_buffers = false; 4121 4122 ++_calls; 4123 4124 if (_cm->verbose_low()) { 4125 gclog_or_tty->print_cr("[%d] >>>>>>>>>> START, call = %d, " 4126 "target = %1.2lfms >>>>>>>>>>", 4127 _task_id, _calls, _time_target_ms); 4128 } 4129 4130 // Set up the bitmap and oop closures. Anything that uses them is 4131 // eventually called from this method, so it is OK to allocate these 4132 // statically. 4133 CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 4134 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 4135 set_cm_oop_closure(&cm_oop_closure); 4136 4137 if (_cm->has_overflown()) { 4138 // This can happen if the region stack or the mark stack overflows 4139 // during a GC pause and this task, after a yield point, 4140 // restarts. We have to abort as we need to get into the overflow 4141 // protocol which happens right at the end of this task. 4142 set_has_aborted(); 4143 } 4144 4145 // First drain any available SATB buffers. After this, we will not 4146 // look at SATB buffers before the next invocation of this method. 4147 // If enough completed SATB buffers are queued up, the regular clock 4148 // will abort this task so that it restarts. 4149 drain_satb_buffers(); 4150 // ...then partially drain the local queue and the global stack 4151 drain_local_queue(true); 4152 drain_global_stack(true); 4153 4154 // Then totally drain the region stack. We will not look at 4155 // it again before the next invocation of this method. Entries on 4156 // the region stack are only added during evacuation pauses, for 4157 // which we have to yield. When we do, we abort the task anyway so 4158 // it will look at the region stack again when it restarts. 4159 bitmap_closure.set_scanning_heap_region(false); 4160 drain_region_stack(&bitmap_closure); 4161 // ...then partially drain the local queue and the global stack 4162 drain_local_queue(true); 4163 drain_global_stack(true); 4164 4165 do { 4166 if (!has_aborted() && _curr_region != NULL) { 4167 // This means that we're already holding on to a region. 4168 assert(_finger != NULL, "if region is not NULL, then the finger " 4169 "should not be NULL either"); 4170 4171 // We might have restarted this task after an evacuation pause 4172 // which might have evacuated the region we're holding on to 4173 // underneath our feet. Let's read its limit again to make sure 4174 // that we do not iterate over a region of the heap that 4175 // contains garbage (update_region_limit() will also move 4176 // _finger to the start of the region if it is found empty). 4177 update_region_limit(); 4178 // We will start from _finger not from the start of the region, 4179 // as we might be restarting this task after aborting half-way 4180 // through scanning this region. In this case, _finger points to 4181 // the address where we last found a marked object. If this is a 4182 // fresh region, _finger points to start(). 4183 MemRegion mr = MemRegion(_finger, _region_limit); 4184 4185 if (_cm->verbose_low()) { 4186 gclog_or_tty->print_cr("[%d] we're scanning part " 4187 "["PTR_FORMAT", "PTR_FORMAT") " 4188 "of region "PTR_FORMAT, 4189 _task_id, _finger, _region_limit, _curr_region); 4190 } 4191 4192 // Let's iterate over the bitmap of the part of the 4193 // region that is left. 4194 bitmap_closure.set_scanning_heap_region(true); 4195 if (mr.is_empty() || 4196 _nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4197 // We successfully completed iterating over the region. Now, 4198 // let's give up the region. 4199 giveup_current_region(); 4200 regular_clock_call(); 4201 } else { 4202 assert(has_aborted(), "currently the only way to do so"); 4203 // The only way to abort the bitmap iteration is to return 4204 // false from the do_bit() method. However, inside the 4205 // do_bit() method we move the _finger to point to the 4206 // object currently being looked at. So, if we bail out, we 4207 // have definitely set _finger to something non-null. 4208 assert(_finger != NULL, "invariant"); 4209 4210 // Region iteration was actually aborted. So now _finger 4211 // points to the address of the object we last scanned. If we 4212 // leave it there, when we restart this task, we will rescan 4213 // the object. It is easy to avoid this. We move the finger by 4214 // enough to point to the next possible object header (the 4215 // bitmap knows by how much we need to move it as it knows its 4216 // granularity). 4217 assert(_finger < _region_limit, "invariant"); 4218 HeapWord* new_finger = _nextMarkBitMap->nextWord(_finger); 4219 // Check if bitmap iteration was aborted while scanning the last object 4220 if (new_finger >= _region_limit) { 4221 giveup_current_region(); 4222 } else { 4223 move_finger_to(new_finger); 4224 } 4225 } 4226 } 4227 // At this point we have either completed iterating over the 4228 // region we were holding on to, or we have aborted. 4229 4230 // We then partially drain the local queue and the global stack. 4231 // (Do we really need this?) 4232 drain_local_queue(true); 4233 drain_global_stack(true); 4234 4235 // Read the note on the claim_region() method on why it might 4236 // return NULL with potentially more regions available for 4237 // claiming and why we have to check out_of_regions() to determine 4238 // whether we're done or not. 4239 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 4240 // We are going to try to claim a new region. We should have 4241 // given up on the previous one. 4242 // Separated the asserts so that we know which one fires. 4243 assert(_curr_region == NULL, "invariant"); 4244 assert(_finger == NULL, "invariant"); 4245 assert(_region_limit == NULL, "invariant"); 4246 if (_cm->verbose_low()) { 4247 gclog_or_tty->print_cr("[%d] trying to claim a new region", _task_id); 4248 } 4249 HeapRegion* claimed_region = _cm->claim_region(_task_id); 4250 if (claimed_region != NULL) { 4251 // Yes, we managed to claim one 4252 statsOnly( ++_regions_claimed ); 4253 4254 if (_cm->verbose_low()) { 4255 gclog_or_tty->print_cr("[%d] we successfully claimed " 4256 "region "PTR_FORMAT, 4257 _task_id, claimed_region); 4258 } 4259 4260 setup_for_region(claimed_region); 4261 assert(_curr_region == claimed_region, "invariant"); 4262 } 4263 // It is important to call the regular clock here. It might take 4264 // a while to claim a region if, for example, we hit a large 4265 // block of empty regions. So we need to call the regular clock 4266 // method once round the loop to make sure it's called 4267 // frequently enough. 4268 regular_clock_call(); 4269 } 4270 4271 if (!has_aborted() && _curr_region == NULL) { 4272 assert(_cm->out_of_regions(), 4273 "at this point we should be out of regions"); 4274 } 4275 } while ( _curr_region != NULL && !has_aborted()); 4276 4277 if (!has_aborted()) { 4278 // We cannot check whether the global stack is empty, since other 4279 // tasks might be pushing objects to it concurrently. We also cannot 4280 // check if the region stack is empty because if a thread is aborting 4281 // it can push a partially done region back. 4282 assert(_cm->out_of_regions(), 4283 "at this point we should be out of regions"); 4284 4285 if (_cm->verbose_low()) { 4286 gclog_or_tty->print_cr("[%d] all regions claimed", _task_id); 4287 } 4288 4289 // Try to reduce the number of available SATB buffers so that 4290 // remark has less work to do. 4291 drain_satb_buffers(); 4292 } 4293 4294 // Since we've done everything else, we can now totally drain the 4295 // local queue and global stack. 4296 drain_local_queue(false); 4297 drain_global_stack(false); 4298 4299 // Attempt at work stealing from other task's queues. 4300 if (do_stealing && !has_aborted()) { 4301 // We have not aborted. This means that we have finished all that 4302 // we could. Let's try to do some stealing... 4303 4304 // We cannot check whether the global stack is empty, since other 4305 // tasks might be pushing objects to it concurrently. We also cannot 4306 // check if the region stack is empty because if a thread is aborting 4307 // it can push a partially done region back. 4308 assert(_cm->out_of_regions() && _task_queue->size() == 0, 4309 "only way to reach here"); 4310 4311 if (_cm->verbose_low()) { 4312 gclog_or_tty->print_cr("[%d] starting to steal", _task_id); 4313 } 4314 4315 while (!has_aborted()) { 4316 oop obj; 4317 statsOnly( ++_steal_attempts ); 4318 4319 if (_cm->try_stealing(_task_id, &_hash_seed, obj)) { 4320 if (_cm->verbose_medium()) { 4321 gclog_or_tty->print_cr("[%d] stolen "PTR_FORMAT" successfully", 4322 _task_id, (void*) obj); 4323 } 4324 4325 statsOnly( ++_steals ); 4326 4327 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 4328 "any stolen object should be marked"); 4329 scan_object(obj); 4330 4331 // And since we're towards the end, let's totally drain the 4332 // local queue and global stack. 4333 drain_local_queue(false); 4334 drain_global_stack(false); 4335 } else { 4336 break; 4337 } 4338 } 4339 } 4340 4341 // If we are about to wrap up and go into termination, check if we 4342 // should raise the overflow flag. 4343 if (do_termination && !has_aborted()) { 4344 if (_cm->force_overflow()->should_force()) { 4345 _cm->set_has_overflown(); 4346 regular_clock_call(); 4347 } 4348 } 4349 4350 // We still haven't aborted. Now, let's try to get into the 4351 // termination protocol. 4352 if (do_termination && !has_aborted()) { 4353 // We cannot check whether the global stack is empty, since other 4354 // tasks might be concurrently pushing objects on it. We also cannot 4355 // check if the region stack is empty because if a thread is aborting 4356 // it can push a partially done region back. 4357 // Separated the asserts so that we know which one fires. 4358 assert(_cm->out_of_regions(), "only way to reach here"); 4359 assert(_task_queue->size() == 0, "only way to reach here"); 4360 4361 if (_cm->verbose_low()) { 4362 gclog_or_tty->print_cr("[%d] starting termination protocol", _task_id); 4363 } 4364 4365 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 4366 // The CMTask class also extends the TerminatorTerminator class, 4367 // hence its should_exit_termination() method will also decide 4368 // whether to exit the termination protocol or not. 4369 bool finished = _cm->terminator()->offer_termination(this); 4370 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 4371 _termination_time_ms += 4372 termination_end_time_ms - _termination_start_time_ms; 4373 4374 if (finished) { 4375 // We're all done. 4376 4377 if (_task_id == 0) { 4378 // let's allow task 0 to do this 4379 if (concurrent()) { 4380 assert(_cm->concurrent_marking_in_progress(), "invariant"); 4381 // we need to set this to false before the next 4382 // safepoint. This way we ensure that the marking phase 4383 // doesn't observe any more heap expansions. 4384 _cm->clear_concurrent_marking_in_progress(); 4385 } 4386 } 4387 4388 // We can now guarantee that the global stack is empty, since 4389 // all other tasks have finished. We separated the guarantees so 4390 // that, if a condition is false, we can immediately find out 4391 // which one. 4392 guarantee(_cm->out_of_regions(), "only way to reach here"); 4393 guarantee(_aborted_region.is_empty(), "only way to reach here"); 4394 guarantee(_cm->region_stack_empty(), "only way to reach here"); 4395 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 4396 guarantee(_task_queue->size() == 0, "only way to reach here"); 4397 guarantee(!_cm->has_overflown(), "only way to reach here"); 4398 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 4399 guarantee(!_cm->region_stack_overflow(), "only way to reach here"); 4400 4401 if (_cm->verbose_low()) { 4402 gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id); 4403 } 4404 } else { 4405 // Apparently there's more work to do. Let's abort this task. It 4406 // will restart it and we can hopefully find more things to do. 4407 4408 if (_cm->verbose_low()) { 4409 gclog_or_tty->print_cr("[%d] apparently there is more work to do", 4410 _task_id); 4411 } 4412 4413 set_has_aborted(); 4414 statsOnly( ++_aborted_termination ); 4415 } 4416 } 4417 4418 // Mainly for debugging purposes to make sure that a pointer to the 4419 // closure which was statically allocated in this frame doesn't 4420 // escape it by accident. 4421 set_cm_oop_closure(NULL); 4422 double end_time_ms = os::elapsedVTime() * 1000.0; 4423 double elapsed_time_ms = end_time_ms - _start_time_ms; 4424 // Update the step history. 4425 _step_times_ms.add(elapsed_time_ms); 4426 4427 if (has_aborted()) { 4428 // The task was aborted for some reason. 4429 4430 statsOnly( ++_aborted ); 4431 4432 if (_has_timed_out) { 4433 double diff_ms = elapsed_time_ms - _time_target_ms; 4434 // Keep statistics of how well we did with respect to hitting 4435 // our target only if we actually timed out (if we aborted for 4436 // other reasons, then the results might get skewed). 4437 _marking_step_diffs_ms.add(diff_ms); 4438 } 4439 4440 if (_cm->has_overflown()) { 4441 // This is the interesting one. We aborted because a global 4442 // overflow was raised. This means we have to restart the 4443 // marking phase and start iterating over regions. However, in 4444 // order to do this we have to make sure that all tasks stop 4445 // what they are doing and re-initialise in a safe manner. We 4446 // will achieve this with the use of two barrier sync points. 4447 4448 if (_cm->verbose_low()) { 4449 gclog_or_tty->print_cr("[%d] detected overflow", _task_id); 4450 } 4451 4452 _cm->enter_first_sync_barrier(_task_id); 4453 // When we exit this sync barrier we know that all tasks have 4454 // stopped doing marking work. So, it's now safe to 4455 // re-initialise our data structures. At the end of this method, 4456 // task 0 will clear the global data structures. 4457 4458 statsOnly( ++_aborted_overflow ); 4459 4460 // We clear the local state of this task... 4461 clear_region_fields(); 4462 4463 // ...and enter the second barrier. 4464 _cm->enter_second_sync_barrier(_task_id); 4465 // At this point everything has bee re-initialised and we're 4466 // ready to restart. 4467 } 4468 4469 if (_cm->verbose_low()) { 4470 gclog_or_tty->print_cr("[%d] <<<<<<<<<< ABORTING, target = %1.2lfms, " 4471 "elapsed = %1.2lfms <<<<<<<<<<", 4472 _task_id, _time_target_ms, elapsed_time_ms); 4473 if (_cm->has_aborted()) { 4474 gclog_or_tty->print_cr("[%d] ========== MARKING ABORTED ==========", 4475 _task_id); 4476 } 4477 } 4478 } else { 4479 if (_cm->verbose_low()) { 4480 gclog_or_tty->print_cr("[%d] <<<<<<<<<< FINISHED, target = %1.2lfms, " 4481 "elapsed = %1.2lfms <<<<<<<<<<", 4482 _task_id, _time_target_ms, elapsed_time_ms); 4483 } 4484 } 4485 4486 _claimed = false; 4487 } 4488 4489 CMTask::CMTask(int task_id, 4490 ConcurrentMark* cm, 4491 CMTaskQueue* task_queue, 4492 CMTaskQueueSet* task_queues) 4493 : _g1h(G1CollectedHeap::heap()), 4494 _task_id(task_id), _cm(cm), 4495 _claimed(false), 4496 _nextMarkBitMap(NULL), _hash_seed(17), 4497 _task_queue(task_queue), 4498 _task_queues(task_queues), 4499 _cm_oop_closure(NULL), 4500 _aborted_region(MemRegion()) { 4501 guarantee(task_queue != NULL, "invariant"); 4502 guarantee(task_queues != NULL, "invariant"); 4503 4504 statsOnly( _clock_due_to_scanning = 0; 4505 _clock_due_to_marking = 0 ); 4506 4507 _marking_step_diffs_ms.add(0.5); 4508 } 4509 4510 // These are formatting macros that are used below to ensure 4511 // consistent formatting. The *_H_* versions are used to format the 4512 // header for a particular value and they should be kept consistent 4513 // with the corresponding macro. Also note that most of the macros add 4514 // the necessary white space (as a prefix) which makes them a bit 4515 // easier to compose. 4516 4517 // All the output lines are prefixed with this string to be able to 4518 // identify them easily in a large log file. 4519 #define G1PPRL_LINE_PREFIX "###" 4520 4521 #define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT 4522 #ifdef _LP64 4523 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 4524 #else // _LP64 4525 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 4526 #endif // _LP64 4527 4528 // For per-region info 4529 #define G1PPRL_TYPE_FORMAT " %-4s" 4530 #define G1PPRL_TYPE_H_FORMAT " %4s" 4531 #define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) 4532 #define G1PPRL_BYTE_H_FORMAT " %9s" 4533 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 4534 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 4535 4536 // For summary info 4537 #define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT 4538 #define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT 4539 #define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" 4540 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" 4541 4542 G1PrintRegionLivenessInfoClosure:: 4543 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) 4544 : _out(out), 4545 _total_used_bytes(0), _total_capacity_bytes(0), 4546 _total_prev_live_bytes(0), _total_next_live_bytes(0), 4547 _hum_used_bytes(0), _hum_capacity_bytes(0), 4548 _hum_prev_live_bytes(0), _hum_next_live_bytes(0) { 4549 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 4550 MemRegion g1_committed = g1h->g1_committed(); 4551 MemRegion g1_reserved = g1h->g1_reserved(); 4552 double now = os::elapsedTime(); 4553 4554 // Print the header of the output. 4555 _out->cr(); 4556 _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 4557 _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" 4558 G1PPRL_SUM_ADDR_FORMAT("committed") 4559 G1PPRL_SUM_ADDR_FORMAT("reserved") 4560 G1PPRL_SUM_BYTE_FORMAT("region-size"), 4561 g1_committed.start(), g1_committed.end(), 4562 g1_reserved.start(), g1_reserved.end(), 4563 HeapRegion::GrainBytes); 4564 _out->print_cr(G1PPRL_LINE_PREFIX); 4565 _out->print_cr(G1PPRL_LINE_PREFIX 4566 G1PPRL_TYPE_H_FORMAT 4567 G1PPRL_ADDR_BASE_H_FORMAT 4568 G1PPRL_BYTE_H_FORMAT 4569 G1PPRL_BYTE_H_FORMAT 4570 G1PPRL_BYTE_H_FORMAT 4571 G1PPRL_DOUBLE_H_FORMAT, 4572 "type", "address-range", 4573 "used", "prev-live", "next-live", "gc-eff"); 4574 } 4575 4576 // It takes as a parameter a reference to one of the _hum_* fields, it 4577 // deduces the corresponding value for a region in a humongous region 4578 // series (either the region size, or what's left if the _hum_* field 4579 // is < the region size), and updates the _hum_* field accordingly. 4580 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 4581 size_t bytes = 0; 4582 // The > 0 check is to deal with the prev and next live bytes which 4583 // could be 0. 4584 if (*hum_bytes > 0) { 4585 bytes = MIN2((size_t) HeapRegion::GrainBytes, *hum_bytes); 4586 *hum_bytes -= bytes; 4587 } 4588 return bytes; 4589 } 4590 4591 // It deduces the values for a region in a humongous region series 4592 // from the _hum_* fields and updates those accordingly. It assumes 4593 // that that _hum_* fields have already been set up from the "starts 4594 // humongous" region and we visit the regions in address order. 4595 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 4596 size_t* capacity_bytes, 4597 size_t* prev_live_bytes, 4598 size_t* next_live_bytes) { 4599 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 4600 *used_bytes = get_hum_bytes(&_hum_used_bytes); 4601 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 4602 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 4603 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 4604 } 4605 4606 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 4607 const char* type = ""; 4608 HeapWord* bottom = r->bottom(); 4609 HeapWord* end = r->end(); 4610 size_t capacity_bytes = r->capacity(); 4611 size_t used_bytes = r->used(); 4612 size_t prev_live_bytes = r->live_bytes(); 4613 size_t next_live_bytes = r->next_live_bytes(); 4614 double gc_eff = r->gc_efficiency(); 4615 if (r->used() == 0) { 4616 type = "FREE"; 4617 } else if (r->is_survivor()) { 4618 type = "SURV"; 4619 } else if (r->is_young()) { 4620 type = "EDEN"; 4621 } else if (r->startsHumongous()) { 4622 type = "HUMS"; 4623 4624 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 4625 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 4626 "they should have been zeroed after the last time we used them"); 4627 // Set up the _hum_* fields. 4628 _hum_capacity_bytes = capacity_bytes; 4629 _hum_used_bytes = used_bytes; 4630 _hum_prev_live_bytes = prev_live_bytes; 4631 _hum_next_live_bytes = next_live_bytes; 4632 get_hum_bytes(&used_bytes, &capacity_bytes, 4633 &prev_live_bytes, &next_live_bytes); 4634 end = bottom + HeapRegion::GrainWords; 4635 } else if (r->continuesHumongous()) { 4636 type = "HUMC"; 4637 get_hum_bytes(&used_bytes, &capacity_bytes, 4638 &prev_live_bytes, &next_live_bytes); 4639 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 4640 } else { 4641 type = "OLD"; 4642 } 4643 4644 _total_used_bytes += used_bytes; 4645 _total_capacity_bytes += capacity_bytes; 4646 _total_prev_live_bytes += prev_live_bytes; 4647 _total_next_live_bytes += next_live_bytes; 4648 4649 // Print a line for this particular region. 4650 _out->print_cr(G1PPRL_LINE_PREFIX 4651 G1PPRL_TYPE_FORMAT 4652 G1PPRL_ADDR_BASE_FORMAT 4653 G1PPRL_BYTE_FORMAT 4654 G1PPRL_BYTE_FORMAT 4655 G1PPRL_BYTE_FORMAT 4656 G1PPRL_DOUBLE_FORMAT, 4657 type, bottom, end, 4658 used_bytes, prev_live_bytes, next_live_bytes, gc_eff); 4659 4660 return false; 4661 } 4662 4663 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 4664 // Print the footer of the output. 4665 _out->print_cr(G1PPRL_LINE_PREFIX); 4666 _out->print_cr(G1PPRL_LINE_PREFIX 4667 " SUMMARY" 4668 G1PPRL_SUM_MB_FORMAT("capacity") 4669 G1PPRL_SUM_MB_PERC_FORMAT("used") 4670 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 4671 G1PPRL_SUM_MB_PERC_FORMAT("next-live"), 4672 bytes_to_mb(_total_capacity_bytes), 4673 bytes_to_mb(_total_used_bytes), 4674 perc(_total_used_bytes, _total_capacity_bytes), 4675 bytes_to_mb(_total_prev_live_bytes), 4676 perc(_total_prev_live_bytes, _total_capacity_bytes), 4677 bytes_to_mb(_total_next_live_bytes), 4678 perc(_total_next_live_bytes, _total_capacity_bytes)); 4679 _out->cr(); 4680 }