1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMarkThread.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorState.hpp" 32 #include "gc/g1/g1ConcurrentMark.inline.hpp" 33 #include "gc/g1/g1HeapVerifier.hpp" 34 #include "gc/g1/g1OopClosures.inline.hpp" 35 #include "gc/g1/g1CardLiveData.inline.hpp" 36 #include "gc/g1/g1Policy.hpp" 37 #include "gc/g1/g1StringDedup.hpp" 38 #include "gc/g1/heapRegion.inline.hpp" 39 #include "gc/g1/heapRegionRemSet.hpp" 40 #include "gc/g1/heapRegionSet.inline.hpp" 41 #include "gc/g1/suspendibleThreadSet.hpp" 42 #include "gc/shared/gcId.hpp" 43 #include "gc/shared/gcTimer.hpp" 44 #include "gc/shared/gcTrace.hpp" 45 #include "gc/shared/gcTraceTime.inline.hpp" 46 #include "gc/shared/genOopClosures.inline.hpp" 47 #include "gc/shared/referencePolicy.hpp" 48 #include "gc/shared/strongRootsScope.hpp" 49 #include "gc/shared/taskqueue.inline.hpp" 50 #include "gc/shared/vmGCOperations.hpp" 51 #include "logging/log.hpp" 52 #include "memory/allocation.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "runtime/atomic.hpp" 56 #include "runtime/handles.inline.hpp" 57 #include "runtime/java.hpp" 58 #include "runtime/prefetch.inline.hpp" 59 #include "services/memTracker.hpp" 60 #include "utilities/growableArray.hpp" 61 62 // Concurrent marking bit map wrapper 63 64 G1CMBitMapRO::G1CMBitMapRO(int shifter) : 65 _bm(), 66 _shifter(shifter) { 67 _bmStartWord = 0; 68 _bmWordSize = 0; 69 } 70 71 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 72 const HeapWord* limit) const { 73 // First we must round addr *up* to a possible object boundary. 74 addr = (HeapWord*)align_size_up((intptr_t)addr, 75 HeapWordSize << _shifter); 76 size_t addrOffset = heapWordToOffset(addr); 77 assert(limit != NULL, "limit must not be NULL"); 78 size_t limitOffset = heapWordToOffset(limit); 79 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 80 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 81 assert(nextAddr >= addr, "get_next_one postcondition"); 82 assert(nextAddr == limit || isMarked(nextAddr), 83 "get_next_one postcondition"); 84 return nextAddr; 85 } 86 87 #ifndef PRODUCT 88 bool G1CMBitMapRO::covers(MemRegion heap_rs) const { 89 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 90 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 91 "size inconsistency"); 92 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 93 _bmWordSize == heap_rs.word_size(); 94 } 95 #endif 96 97 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 98 _bm.print_on_error(st, prefix); 99 } 100 101 size_t G1CMBitMap::compute_size(size_t heap_size) { 102 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 103 } 104 105 size_t G1CMBitMap::mark_distance() { 106 return MinObjAlignmentInBytes * BitsPerByte; 107 } 108 109 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 110 _bmStartWord = heap.start(); 111 _bmWordSize = heap.word_size(); 112 113 _bm = BitMapView((BitMap::bm_word_t*) storage->reserved().start(), _bmWordSize >> _shifter); 114 115 storage->set_mapping_changed_listener(&_listener); 116 } 117 118 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 119 if (zero_filled) { 120 return; 121 } 122 // We need to clear the bitmap on commit, removing any existing information. 123 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 124 _bm->clear_range(mr); 125 } 126 127 void G1CMBitMap::clear_range(MemRegion mr) { 128 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 129 assert(!mr.is_empty(), "unexpected empty region"); 130 // convert address range into offset range 131 _bm.at_put_range(heapWordToOffset(mr.start()), 132 heapWordToOffset(mr.end()), false); 133 } 134 135 G1CMMarkStack::G1CMMarkStack() : 136 _max_chunk_capacity(0), 137 _base(NULL), 138 _chunk_capacity(0), 139 _out_of_memory(false), 140 _should_expand(false) { 141 set_empty(); 142 } 143 144 bool G1CMMarkStack::resize(size_t new_capacity) { 145 assert(is_empty(), "Only resize when stack is empty."); 146 assert(new_capacity <= _max_chunk_capacity, 147 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); 148 149 OopChunk* new_base = MmapArrayAllocator<OopChunk, mtGC>::allocate_or_null(new_capacity); 150 151 if (new_base == NULL) { 152 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(OopChunk)); 153 return false; 154 } 155 // Release old mapping. 156 if (_base != NULL) { 157 MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity); 158 } 159 160 _base = new_base; 161 _chunk_capacity = new_capacity; 162 set_empty(); 163 _should_expand = false; 164 165 return true; 166 } 167 168 size_t G1CMMarkStack::capacity_alignment() { 169 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(OopChunk)) / sizeof(void*); 170 } 171 172 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { 173 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); 174 175 size_t const OopChunkSizeInVoidStar = sizeof(OopChunk) / sizeof(void*); 176 177 _max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / OopChunkSizeInVoidStar; 178 size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / OopChunkSizeInVoidStar; 179 180 guarantee(initial_chunk_capacity <= _max_chunk_capacity, 181 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, 182 _max_chunk_capacity, 183 initial_chunk_capacity); 184 185 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, 186 initial_chunk_capacity, _max_chunk_capacity); 187 188 return resize(initial_chunk_capacity); 189 } 190 191 void G1CMMarkStack::expand() { 192 // Clear expansion flag 193 _should_expand = false; 194 195 if (_chunk_capacity == _max_chunk_capacity) { 196 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); 197 return; 198 } 199 size_t old_capacity = _chunk_capacity; 200 // Double capacity if possible 201 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); 202 203 if (resize(new_capacity)) { 204 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 205 old_capacity, new_capacity); 206 } else { 207 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 208 old_capacity, new_capacity); 209 } 210 } 211 212 G1CMMarkStack::~G1CMMarkStack() { 213 if (_base != NULL) { 214 MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity); 215 } 216 } 217 218 void G1CMMarkStack::add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem) { 219 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 220 elem->next = *list; 221 *list = elem; 222 } 223 224 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) { 225 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 226 227 OopChunk* result = *list; 228 if (result != NULL) { 229 *list = (*list)->next; 230 } 231 return result; 232 } 233 234 G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() { 235 // This dirty read is okay because we only ever increase the _hwm in parallel code. 236 if (_hwm >= _chunk_capacity) { 237 return NULL; 238 } 239 240 size_t cur_idx = Atomic::add(1, &_hwm) - 1; 241 if (cur_idx >= _chunk_capacity) { 242 return NULL; 243 } 244 245 OopChunk* result = ::new (&_base[cur_idx]) OopChunk; 246 result->next = NULL; 247 return result; 248 } 249 250 void G1CMMarkStack::par_push_chunk(oop* ptr_arr) { 251 // Get a new chunk. 252 OopChunk* new_chunk = remove_chunk_from_list(&_free_list); 253 254 if (new_chunk == NULL) { 255 // Did not get a chunk from the free list. Allocate from backing memory. 256 new_chunk = allocate_new_chunk(); 257 } 258 259 if (new_chunk == NULL) { 260 _out_of_memory = true; 261 return; 262 } 263 264 for (size_t i = 0; i < OopsPerChunk; i++) { 265 new_chunk->data[i] = ptr_arr[i]; 266 } 267 268 add_chunk_to_list(&_chunk_list, new_chunk); 269 Atomic::inc(&_chunks_in_chunk_list); 270 } 271 272 bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) { 273 OopChunk* cur = remove_chunk_from_list(&_chunk_list); 274 275 if (cur == NULL) { 276 return false; 277 } 278 279 Atomic::dec(&_chunks_in_chunk_list); 280 281 for (size_t i = 0; i < OopsPerChunk; i++) { 282 ptr_arr[i] = (oop)cur->data[i]; 283 } 284 285 add_chunk_to_list(&_free_list, cur); 286 return true; 287 } 288 289 void G1CMMarkStack::set_empty() { 290 _chunks_in_chunk_list = 0; 291 _hwm = 0; 292 clear_out_of_memory(); 293 _chunk_list = NULL; 294 _free_list = NULL; 295 } 296 297 G1CMRootRegions::G1CMRootRegions() : 298 _cm(NULL), _scan_in_progress(false), 299 _should_abort(false), _claimed_survivor_index(0) { } 300 301 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) { 302 _survivors = survivors; 303 _cm = cm; 304 } 305 306 void G1CMRootRegions::prepare_for_scan() { 307 assert(!scan_in_progress(), "pre-condition"); 308 309 // Currently, only survivors can be root regions. 310 _claimed_survivor_index = 0; 311 _scan_in_progress = _survivors->regions()->is_nonempty(); 312 _should_abort = false; 313 } 314 315 HeapRegion* G1CMRootRegions::claim_next() { 316 if (_should_abort) { 317 // If someone has set the should_abort flag, we return NULL to 318 // force the caller to bail out of their loop. 319 return NULL; 320 } 321 322 // Currently, only survivors can be root regions. 323 const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions(); 324 325 int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1; 326 if (claimed_index < survivor_regions->length()) { 327 return survivor_regions->at(claimed_index); 328 } 329 return NULL; 330 } 331 332 uint G1CMRootRegions::num_root_regions() const { 333 return (uint)_survivors->regions()->length(); 334 } 335 336 void G1CMRootRegions::notify_scan_done() { 337 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 338 _scan_in_progress = false; 339 RootRegionScan_lock->notify_all(); 340 } 341 342 void G1CMRootRegions::cancel_scan() { 343 notify_scan_done(); 344 } 345 346 void G1CMRootRegions::scan_finished() { 347 assert(scan_in_progress(), "pre-condition"); 348 349 // Currently, only survivors can be root regions. 350 if (!_should_abort) { 351 assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index); 352 assert((uint)_claimed_survivor_index >= _survivors->length(), 353 "we should have claimed all survivors, claimed index = %u, length = %u", 354 (uint)_claimed_survivor_index, _survivors->length()); 355 } 356 357 notify_scan_done(); 358 } 359 360 bool G1CMRootRegions::wait_until_scan_finished() { 361 if (!scan_in_progress()) return false; 362 363 { 364 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 365 while (scan_in_progress()) { 366 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 367 } 368 } 369 return true; 370 } 371 372 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 373 return MAX2((n_par_threads + 2) / 4, 1U); 374 } 375 376 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 377 _g1h(g1h), 378 _markBitMap1(), 379 _markBitMap2(), 380 _parallel_marking_threads(0), 381 _max_parallel_marking_threads(0), 382 _sleep_factor(0.0), 383 _marking_task_overhead(1.0), 384 _cleanup_list("Cleanup List"), 385 386 _prevMarkBitMap(&_markBitMap1), 387 _nextMarkBitMap(&_markBitMap2), 388 389 _global_mark_stack(), 390 // _finger set in set_non_marking_state 391 392 _max_worker_id(ParallelGCThreads), 393 // _active_tasks set in set_non_marking_state 394 // _tasks set inside the constructor 395 _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)), 396 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 397 398 _has_overflown(false), 399 _concurrent(false), 400 _has_aborted(false), 401 _restart_for_overflow(false), 402 _concurrent_marking_in_progress(false), 403 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 404 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 405 406 // _verbose_level set below 407 408 _init_times(), 409 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 410 _cleanup_times(), 411 _total_counting_time(0.0), 412 _total_rs_scrub_time(0.0), 413 414 _parallel_workers(NULL), 415 416 _completed_initialization(false) { 417 418 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 419 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 420 421 // Create & start a ConcurrentMark thread. 422 _cmThread = new ConcurrentMarkThread(this); 423 assert(cmThread() != NULL, "CM Thread should have been created"); 424 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 425 if (_cmThread->osthread() == NULL) { 426 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 427 } 428 429 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 430 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 431 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 432 433 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 434 satb_qs.set_buffer_size(G1SATBBufferSize); 435 436 _root_regions.init(_g1h->survivor(), this); 437 438 if (ConcGCThreads > ParallelGCThreads) { 439 log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).", 440 ConcGCThreads, ParallelGCThreads); 441 return; 442 } 443 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 444 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 445 // if both are set 446 _sleep_factor = 0.0; 447 _marking_task_overhead = 1.0; 448 } else if (G1MarkingOverheadPercent > 0) { 449 // We will calculate the number of parallel marking threads based 450 // on a target overhead with respect to the soft real-time goal 451 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 452 double overall_cm_overhead = 453 (double) MaxGCPauseMillis * marking_overhead / 454 (double) GCPauseIntervalMillis; 455 double cpu_ratio = 1.0 / os::initial_active_processor_count(); 456 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 457 double marking_task_overhead = 458 overall_cm_overhead / marking_thread_num * os::initial_active_processor_count(); 459 double sleep_factor = 460 (1.0 - marking_task_overhead) / marking_task_overhead; 461 462 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 463 _sleep_factor = sleep_factor; 464 _marking_task_overhead = marking_task_overhead; 465 } else { 466 // Calculate the number of parallel marking threads by scaling 467 // the number of parallel GC threads. 468 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 469 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 470 _sleep_factor = 0.0; 471 _marking_task_overhead = 1.0; 472 } 473 474 assert(ConcGCThreads > 0, "Should have been set"); 475 log_debug(gc)("ConcGCThreads: %u", ConcGCThreads); 476 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 477 _parallel_marking_threads = ConcGCThreads; 478 _max_parallel_marking_threads = _parallel_marking_threads; 479 480 _parallel_workers = new WorkGang("G1 Marker", 481 _max_parallel_marking_threads, false, true); 482 if (_parallel_workers == NULL) { 483 vm_exit_during_initialization("Failed necessary allocation."); 484 } else { 485 _parallel_workers->initialize_workers(); 486 } 487 488 if (FLAG_IS_DEFAULT(MarkStackSize)) { 489 size_t mark_stack_size = 490 MIN2(MarkStackSizeMax, 491 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 492 // Verify that the calculated value for MarkStackSize is in range. 493 // It would be nice to use the private utility routine from Arguments. 494 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 495 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 496 "must be between 1 and " SIZE_FORMAT, 497 mark_stack_size, MarkStackSizeMax); 498 return; 499 } 500 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 501 } else { 502 // Verify MarkStackSize is in range. 503 if (FLAG_IS_CMDLINE(MarkStackSize)) { 504 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 505 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 506 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 507 "must be between 1 and " SIZE_FORMAT, 508 MarkStackSize, MarkStackSizeMax); 509 return; 510 } 511 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 512 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 513 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 514 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 515 MarkStackSize, MarkStackSizeMax); 516 return; 517 } 518 } 519 } 520 } 521 522 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { 523 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 524 } 525 526 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC); 527 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 528 529 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 530 _active_tasks = _max_worker_id; 531 532 for (uint i = 0; i < _max_worker_id; ++i) { 533 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 534 task_queue->initialize(); 535 _task_queues->register_queue(i, task_queue); 536 537 _tasks[i] = new G1CMTask(i, this, task_queue, _task_queues); 538 539 _accum_task_vtime[i] = 0.0; 540 } 541 542 // so that the call below can read a sensible value 543 _heap_start = g1h->reserved_region().start(); 544 set_non_marking_state(); 545 _completed_initialization = true; 546 } 547 548 void G1ConcurrentMark::reset() { 549 // Starting values for these two. This should be called in a STW 550 // phase. 551 MemRegion reserved = _g1h->g1_reserved(); 552 _heap_start = reserved.start(); 553 _heap_end = reserved.end(); 554 555 // Separated the asserts so that we know which one fires. 556 assert(_heap_start != NULL, "heap bounds should look ok"); 557 assert(_heap_end != NULL, "heap bounds should look ok"); 558 assert(_heap_start < _heap_end, "heap bounds should look ok"); 559 560 // Reset all the marking data structures and any necessary flags 561 reset_marking_state(); 562 563 // We do reset all of them, since different phases will use 564 // different number of active threads. So, it's easiest to have all 565 // of them ready. 566 for (uint i = 0; i < _max_worker_id; ++i) { 567 _tasks[i]->reset(_nextMarkBitMap); 568 } 569 570 // we need this to make sure that the flag is on during the evac 571 // pause with initial mark piggy-backed 572 set_concurrent_marking_in_progress(); 573 } 574 575 576 void G1ConcurrentMark::reset_marking_state(bool clear_overflow) { 577 _global_mark_stack.set_should_expand(has_overflown()); 578 _global_mark_stack.set_empty(); // Also clears the overflow stack's overflow flag 579 if (clear_overflow) { 580 clear_has_overflown(); 581 } else { 582 assert(has_overflown(), "pre-condition"); 583 } 584 _finger = _heap_start; 585 586 for (uint i = 0; i < _max_worker_id; ++i) { 587 G1CMTaskQueue* queue = _task_queues->queue(i); 588 queue->set_empty(); 589 } 590 } 591 592 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 593 assert(active_tasks <= _max_worker_id, "we should not have more"); 594 595 _active_tasks = active_tasks; 596 // Need to update the three data structures below according to the 597 // number of active threads for this phase. 598 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 599 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 600 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 601 } 602 603 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 604 set_concurrency(active_tasks); 605 606 _concurrent = concurrent; 607 // We propagate this to all tasks, not just the active ones. 608 for (uint i = 0; i < _max_worker_id; ++i) 609 _tasks[i]->set_concurrent(concurrent); 610 611 if (concurrent) { 612 set_concurrent_marking_in_progress(); 613 } else { 614 // We currently assume that the concurrent flag has been set to 615 // false before we start remark. At this point we should also be 616 // in a STW phase. 617 assert(!concurrent_marking_in_progress(), "invariant"); 618 assert(out_of_regions(), 619 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 620 p2i(_finger), p2i(_heap_end)); 621 } 622 } 623 624 void G1ConcurrentMark::set_non_marking_state() { 625 // We set the global marking state to some default values when we're 626 // not doing marking. 627 reset_marking_state(); 628 _active_tasks = 0; 629 clear_concurrent_marking_in_progress(); 630 } 631 632 G1ConcurrentMark::~G1ConcurrentMark() { 633 // The G1ConcurrentMark instance is never freed. 634 ShouldNotReachHere(); 635 } 636 637 class G1ClearBitMapTask : public AbstractGangTask { 638 public: 639 static size_t chunk_size() { return M; } 640 641 private: 642 // Heap region closure used for clearing the given mark bitmap. 643 class G1ClearBitmapHRClosure : public HeapRegionClosure { 644 private: 645 G1CMBitMap* _bitmap; 646 G1ConcurrentMark* _cm; 647 public: 648 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) { 649 } 650 651 virtual bool doHeapRegion(HeapRegion* r) { 652 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 653 654 HeapWord* cur = r->bottom(); 655 HeapWord* const end = r->end(); 656 657 while (cur < end) { 658 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 659 _bitmap->clear_range(mr); 660 661 cur += chunk_size_in_words; 662 663 // Abort iteration if after yielding the marking has been aborted. 664 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 665 return true; 666 } 667 // Repeat the asserts from before the start of the closure. We will do them 668 // as asserts here to minimize their overhead on the product. However, we 669 // will have them as guarantees at the beginning / end of the bitmap 670 // clearing to get some checking in the product. 671 assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant"); 672 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 673 } 674 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 675 676 return false; 677 } 678 }; 679 680 G1ClearBitmapHRClosure _cl; 681 HeapRegionClaimer _hr_claimer; 682 bool _suspendible; // If the task is suspendible, workers must join the STS. 683 684 public: 685 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 686 AbstractGangTask("G1 Clear Bitmap"), 687 _cl(bitmap, suspendible ? cm : NULL), 688 _hr_claimer(n_workers), 689 _suspendible(suspendible) 690 { } 691 692 void work(uint worker_id) { 693 SuspendibleThreadSetJoiner sts_join(_suspendible); 694 G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true); 695 } 696 697 bool is_complete() { 698 return _cl.complete(); 699 } 700 }; 701 702 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 703 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 704 705 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 706 size_t const num_chunks = align_size_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 707 708 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 709 710 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 711 712 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 713 workers->run_task(&cl, num_workers); 714 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 715 } 716 717 void G1ConcurrentMark::cleanup_for_next_mark() { 718 // Make sure that the concurrent mark thread looks to still be in 719 // the current cycle. 720 guarantee(cmThread()->during_cycle(), "invariant"); 721 722 // We are finishing up the current cycle by clearing the next 723 // marking bitmap and getting it ready for the next cycle. During 724 // this time no other cycle can start. So, let's make sure that this 725 // is the case. 726 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 727 728 clear_bitmap(_nextMarkBitMap, _parallel_workers, true); 729 730 // Clear the live count data. If the marking has been aborted, the abort() 731 // call already did that. 732 if (!has_aborted()) { 733 clear_live_data(_parallel_workers); 734 DEBUG_ONLY(verify_live_data_clear()); 735 } 736 737 // Repeat the asserts from above. 738 guarantee(cmThread()->during_cycle(), "invariant"); 739 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 740 } 741 742 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 743 assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint."); 744 clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false); 745 } 746 747 class CheckBitmapClearHRClosure : public HeapRegionClosure { 748 G1CMBitMap* _bitmap; 749 bool _error; 750 public: 751 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { 752 } 753 754 virtual bool doHeapRegion(HeapRegion* r) { 755 // This closure can be called concurrently to the mutator, so we must make sure 756 // that the result of the getNextMarkedWordAddress() call is compared to the 757 // value passed to it as limit to detect any found bits. 758 // end never changes in G1. 759 HeapWord* end = r->end(); 760 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 761 } 762 }; 763 764 bool G1ConcurrentMark::nextMarkBitmapIsClear() { 765 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 766 _g1h->heap_region_iterate(&cl); 767 return cl.complete(); 768 } 769 770 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 771 public: 772 bool doHeapRegion(HeapRegion* r) { 773 r->note_start_of_marking(); 774 return false; 775 } 776 }; 777 778 void G1ConcurrentMark::checkpointRootsInitialPre() { 779 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 780 G1Policy* g1p = g1h->g1_policy(); 781 782 _has_aborted = false; 783 784 // Initialize marking structures. This has to be done in a STW phase. 785 reset(); 786 787 // For each region note start of marking. 788 NoteStartOfMarkHRClosure startcl; 789 g1h->heap_region_iterate(&startcl); 790 } 791 792 793 void G1ConcurrentMark::checkpointRootsInitialPost() { 794 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 795 796 // Start Concurrent Marking weak-reference discovery. 797 ReferenceProcessor* rp = g1h->ref_processor_cm(); 798 // enable ("weak") refs discovery 799 rp->enable_discovery(); 800 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 801 802 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 803 // This is the start of the marking cycle, we're expected all 804 // threads to have SATB queues with active set to false. 805 satb_mq_set.set_active_all_threads(true, /* new active value */ 806 false /* expected_active */); 807 808 _root_regions.prepare_for_scan(); 809 810 // update_g1_committed() will be called at the end of an evac pause 811 // when marking is on. So, it's also called at the end of the 812 // initial-mark pause to update the heap end, if the heap expands 813 // during it. No need to call it here. 814 } 815 816 /* 817 * Notice that in the next two methods, we actually leave the STS 818 * during the barrier sync and join it immediately afterwards. If we 819 * do not do this, the following deadlock can occur: one thread could 820 * be in the barrier sync code, waiting for the other thread to also 821 * sync up, whereas another one could be trying to yield, while also 822 * waiting for the other threads to sync up too. 823 * 824 * Note, however, that this code is also used during remark and in 825 * this case we should not attempt to leave / enter the STS, otherwise 826 * we'll either hit an assert (debug / fastdebug) or deadlock 827 * (product). So we should only leave / enter the STS if we are 828 * operating concurrently. 829 * 830 * Because the thread that does the sync barrier has left the STS, it 831 * is possible to be suspended for a Full GC or an evacuation pause 832 * could occur. This is actually safe, since the entering the sync 833 * barrier is one of the last things do_marking_step() does, and it 834 * doesn't manipulate any data structures afterwards. 835 */ 836 837 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 838 bool barrier_aborted; 839 { 840 SuspendibleThreadSetLeaver sts_leave(concurrent()); 841 barrier_aborted = !_first_overflow_barrier_sync.enter(); 842 } 843 844 // at this point everyone should have synced up and not be doing any 845 // more work 846 847 if (barrier_aborted) { 848 // If the barrier aborted we ignore the overflow condition and 849 // just abort the whole marking phase as quickly as possible. 850 return; 851 } 852 853 // If we're executing the concurrent phase of marking, reset the marking 854 // state; otherwise the marking state is reset after reference processing, 855 // during the remark pause. 856 // If we reset here as a result of an overflow during the remark we will 857 // see assertion failures from any subsequent set_concurrency_and_phase() 858 // calls. 859 if (concurrent()) { 860 // let the task associated with with worker 0 do this 861 if (worker_id == 0) { 862 // task 0 is responsible for clearing the global data structures 863 // We should be here because of an overflow. During STW we should 864 // not clear the overflow flag since we rely on it being true when 865 // we exit this method to abort the pause and restart concurrent 866 // marking. 867 reset_marking_state(true /* clear_overflow */); 868 869 log_info(gc, marking)("Concurrent Mark reset for overflow"); 870 } 871 } 872 873 // after this, each task should reset its own data structures then 874 // then go into the second barrier 875 } 876 877 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 878 SuspendibleThreadSetLeaver sts_leave(concurrent()); 879 _second_overflow_barrier_sync.enter(); 880 881 // at this point everything should be re-initialized and ready to go 882 } 883 884 class G1CMConcurrentMarkingTask: public AbstractGangTask { 885 private: 886 G1ConcurrentMark* _cm; 887 ConcurrentMarkThread* _cmt; 888 889 public: 890 void work(uint worker_id) { 891 assert(Thread::current()->is_ConcurrentGC_thread(), 892 "this should only be done by a conc GC thread"); 893 ResourceMark rm; 894 895 double start_vtime = os::elapsedVTime(); 896 897 { 898 SuspendibleThreadSetJoiner sts_join; 899 900 assert(worker_id < _cm->active_tasks(), "invariant"); 901 G1CMTask* the_task = _cm->task(worker_id); 902 the_task->record_start_time(); 903 if (!_cm->has_aborted()) { 904 do { 905 double start_vtime_sec = os::elapsedVTime(); 906 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 907 908 the_task->do_marking_step(mark_step_duration_ms, 909 true /* do_termination */, 910 false /* is_serial*/); 911 912 double end_vtime_sec = os::elapsedVTime(); 913 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 914 _cm->clear_has_overflown(); 915 916 _cm->do_yield_check(); 917 918 jlong sleep_time_ms; 919 if (!_cm->has_aborted() && the_task->has_aborted()) { 920 sleep_time_ms = 921 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 922 { 923 SuspendibleThreadSetLeaver sts_leave; 924 os::sleep(Thread::current(), sleep_time_ms, false); 925 } 926 } 927 } while (!_cm->has_aborted() && the_task->has_aborted()); 928 } 929 the_task->record_end_time(); 930 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 931 } 932 933 double end_vtime = os::elapsedVTime(); 934 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 935 } 936 937 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm, 938 ConcurrentMarkThread* cmt) : 939 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 940 941 ~G1CMConcurrentMarkingTask() { } 942 }; 943 944 // Calculates the number of active workers for a concurrent 945 // phase. 946 uint G1ConcurrentMark::calc_parallel_marking_threads() { 947 uint n_conc_workers = 0; 948 if (!UseDynamicNumberOfGCThreads || 949 (!FLAG_IS_DEFAULT(ConcGCThreads) && 950 !ForceDynamicNumberOfGCThreads)) { 951 n_conc_workers = max_parallel_marking_threads(); 952 } else { 953 n_conc_workers = 954 AdaptiveSizePolicy::calc_default_active_workers(max_parallel_marking_threads(), 955 1, /* Minimum workers */ 956 parallel_marking_threads(), 957 Threads::number_of_non_daemon_threads()); 958 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 959 // that scaling has already gone into "_max_parallel_marking_threads". 960 } 961 assert(n_conc_workers > 0 && n_conc_workers <= max_parallel_marking_threads(), 962 "Calculated number of workers must be larger than zero and at most the maximum %u, but is %u", 963 max_parallel_marking_threads(), n_conc_workers); 964 return n_conc_workers; 965 } 966 967 void G1ConcurrentMark::scanRootRegion(HeapRegion* hr) { 968 // Currently, only survivors can be root regions. 969 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 970 G1RootRegionScanClosure cl(_g1h, this); 971 972 const uintx interval = PrefetchScanIntervalInBytes; 973 HeapWord* curr = hr->bottom(); 974 const HeapWord* end = hr->top(); 975 while (curr < end) { 976 Prefetch::read(curr, interval); 977 oop obj = oop(curr); 978 int size = obj->oop_iterate_size(&cl); 979 assert(size == obj->size(), "sanity"); 980 curr += size; 981 } 982 } 983 984 class G1CMRootRegionScanTask : public AbstractGangTask { 985 private: 986 G1ConcurrentMark* _cm; 987 988 public: 989 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 990 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 991 992 void work(uint worker_id) { 993 assert(Thread::current()->is_ConcurrentGC_thread(), 994 "this should only be done by a conc GC thread"); 995 996 G1CMRootRegions* root_regions = _cm->root_regions(); 997 HeapRegion* hr = root_regions->claim_next(); 998 while (hr != NULL) { 999 _cm->scanRootRegion(hr); 1000 hr = root_regions->claim_next(); 1001 } 1002 } 1003 }; 1004 1005 void G1ConcurrentMark::scan_root_regions() { 1006 // scan_in_progress() will have been set to true only if there was 1007 // at least one root region to scan. So, if it's false, we 1008 // should not attempt to do any further work. 1009 if (root_regions()->scan_in_progress()) { 1010 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 1011 1012 _parallel_marking_threads = MIN2(calc_parallel_marking_threads(), 1013 // We distribute work on a per-region basis, so starting 1014 // more threads than that is useless. 1015 root_regions()->num_root_regions()); 1016 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1017 "Maximum number of marking threads exceeded"); 1018 1019 G1CMRootRegionScanTask task(this); 1020 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 1021 task.name(), _parallel_marking_threads, root_regions()->num_root_regions()); 1022 _parallel_workers->run_task(&task, _parallel_marking_threads); 1023 1024 // It's possible that has_aborted() is true here without actually 1025 // aborting the survivor scan earlier. This is OK as it's 1026 // mainly used for sanity checking. 1027 root_regions()->scan_finished(); 1028 } 1029 } 1030 1031 void G1ConcurrentMark::concurrent_cycle_start() { 1032 _gc_timer_cm->register_gc_start(); 1033 1034 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 1035 1036 _g1h->trace_heap_before_gc(_gc_tracer_cm); 1037 } 1038 1039 void G1ConcurrentMark::concurrent_cycle_end() { 1040 _g1h->trace_heap_after_gc(_gc_tracer_cm); 1041 1042 if (has_aborted()) { 1043 _gc_tracer_cm->report_concurrent_mode_failure(); 1044 } 1045 1046 _gc_timer_cm->register_gc_end(); 1047 1048 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 1049 } 1050 1051 void G1ConcurrentMark::mark_from_roots() { 1052 // we might be tempted to assert that: 1053 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1054 // "inconsistent argument?"); 1055 // However that wouldn't be right, because it's possible that 1056 // a safepoint is indeed in progress as a younger generation 1057 // stop-the-world GC happens even as we mark in this generation. 1058 1059 _restart_for_overflow = false; 1060 1061 // _g1h has _n_par_threads 1062 _parallel_marking_threads = calc_parallel_marking_threads(); 1063 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1064 "Maximum number of marking threads exceeded"); 1065 1066 uint active_workers = MAX2(1U, parallel_marking_threads()); 1067 assert(active_workers > 0, "Should have been set"); 1068 1069 // Setting active workers is not guaranteed since fewer 1070 // worker threads may currently exist and more may not be 1071 // available. 1072 active_workers = _parallel_workers->update_active_workers(active_workers); 1073 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _parallel_workers->total_workers()); 1074 1075 // Parallel task terminator is set in "set_concurrency_and_phase()" 1076 set_concurrency_and_phase(active_workers, true /* concurrent */); 1077 1078 G1CMConcurrentMarkingTask markingTask(this, cmThread()); 1079 _parallel_workers->run_task(&markingTask); 1080 print_stats(); 1081 } 1082 1083 void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1084 // world is stopped at this checkpoint 1085 assert(SafepointSynchronize::is_at_safepoint(), 1086 "world should be stopped"); 1087 1088 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1089 1090 // If a full collection has happened, we shouldn't do this. 1091 if (has_aborted()) { 1092 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1093 return; 1094 } 1095 1096 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1097 1098 if (VerifyDuringGC) { 1099 HandleMark hm; // handle scope 1100 g1h->prepare_for_verify(); 1101 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1102 } 1103 g1h->verifier()->check_bitmaps("Remark Start"); 1104 1105 G1Policy* g1p = g1h->g1_policy(); 1106 g1p->record_concurrent_mark_remark_start(); 1107 1108 double start = os::elapsedTime(); 1109 1110 checkpointRootsFinalWork(); 1111 1112 double mark_work_end = os::elapsedTime(); 1113 1114 weakRefsWork(clear_all_soft_refs); 1115 1116 if (has_overflown()) { 1117 // We overflowed. Restart concurrent marking. 1118 _restart_for_overflow = true; 1119 1120 // Verify the heap w.r.t. the previous marking bitmap. 1121 if (VerifyDuringGC) { 1122 HandleMark hm; // handle scope 1123 g1h->prepare_for_verify(); 1124 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)"); 1125 } 1126 1127 // Clear the marking state because we will be restarting 1128 // marking due to overflowing the global mark stack. 1129 reset_marking_state(); 1130 } else { 1131 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1132 // We're done with marking. 1133 // This is the end of the marking cycle, we're expected all 1134 // threads to have SATB queues with active set to true. 1135 satb_mq_set.set_active_all_threads(false, /* new active value */ 1136 true /* expected_active */); 1137 1138 if (VerifyDuringGC) { 1139 HandleMark hm; // handle scope 1140 g1h->prepare_for_verify(); 1141 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)"); 1142 } 1143 g1h->verifier()->check_bitmaps("Remark End"); 1144 assert(!restart_for_overflow(), "sanity"); 1145 // Completely reset the marking state since marking completed 1146 set_non_marking_state(); 1147 } 1148 1149 // Expand the marking stack, if we have to and if we can. 1150 if (_global_mark_stack.should_expand()) { 1151 _global_mark_stack.expand(); 1152 } 1153 1154 // Statistics 1155 double now = os::elapsedTime(); 1156 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1157 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1158 _remark_times.add((now - start) * 1000.0); 1159 1160 g1p->record_concurrent_mark_remark_end(); 1161 1162 G1CMIsAliveClosure is_alive(g1h); 1163 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1164 } 1165 1166 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1167 G1CollectedHeap* _g1; 1168 size_t _freed_bytes; 1169 FreeRegionList* _local_cleanup_list; 1170 uint _old_regions_removed; 1171 uint _humongous_regions_removed; 1172 HRRSCleanupTask* _hrrs_cleanup_task; 1173 1174 public: 1175 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1176 FreeRegionList* local_cleanup_list, 1177 HRRSCleanupTask* hrrs_cleanup_task) : 1178 _g1(g1), 1179 _freed_bytes(0), 1180 _local_cleanup_list(local_cleanup_list), 1181 _old_regions_removed(0), 1182 _humongous_regions_removed(0), 1183 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1184 1185 size_t freed_bytes() { return _freed_bytes; } 1186 const uint old_regions_removed() { return _old_regions_removed; } 1187 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1188 1189 bool doHeapRegion(HeapRegion *hr) { 1190 if (hr->is_archive()) { 1191 return false; 1192 } 1193 _g1->reset_gc_time_stamps(hr); 1194 hr->note_end_of_marking(); 1195 1196 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1197 _freed_bytes += hr->used(); 1198 hr->set_containing_set(NULL); 1199 if (hr->is_humongous()) { 1200 _humongous_regions_removed++; 1201 _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */); 1202 } else { 1203 _old_regions_removed++; 1204 _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */); 1205 } 1206 } else { 1207 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1208 } 1209 1210 return false; 1211 } 1212 }; 1213 1214 class G1ParNoteEndTask: public AbstractGangTask { 1215 friend class G1NoteEndOfConcMarkClosure; 1216 1217 protected: 1218 G1CollectedHeap* _g1h; 1219 FreeRegionList* _cleanup_list; 1220 HeapRegionClaimer _hrclaimer; 1221 1222 public: 1223 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1224 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1225 } 1226 1227 void work(uint worker_id) { 1228 FreeRegionList local_cleanup_list("Local Cleanup List"); 1229 HRRSCleanupTask hrrs_cleanup_task; 1230 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1231 &hrrs_cleanup_task); 1232 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1233 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1234 1235 // Now update the lists 1236 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1237 { 1238 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1239 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1240 1241 // If we iterate over the global cleanup list at the end of 1242 // cleanup to do this printing we will not guarantee to only 1243 // generate output for the newly-reclaimed regions (the list 1244 // might not be empty at the beginning of cleanup; we might 1245 // still be working on its previous contents). So we do the 1246 // printing here, before we append the new regions to the global 1247 // cleanup list. 1248 1249 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1250 if (hr_printer->is_active()) { 1251 FreeRegionListIterator iter(&local_cleanup_list); 1252 while (iter.more_available()) { 1253 HeapRegion* hr = iter.get_next(); 1254 hr_printer->cleanup(hr); 1255 } 1256 } 1257 1258 _cleanup_list->add_ordered(&local_cleanup_list); 1259 assert(local_cleanup_list.is_empty(), "post-condition"); 1260 1261 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1262 } 1263 } 1264 }; 1265 1266 void G1ConcurrentMark::cleanup() { 1267 // world is stopped at this checkpoint 1268 assert(SafepointSynchronize::is_at_safepoint(), 1269 "world should be stopped"); 1270 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1271 1272 // If a full collection has happened, we shouldn't do this. 1273 if (has_aborted()) { 1274 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1275 return; 1276 } 1277 1278 g1h->verifier()->verify_region_sets_optional(); 1279 1280 if (VerifyDuringGC) { 1281 HandleMark hm; // handle scope 1282 g1h->prepare_for_verify(); 1283 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1284 } 1285 g1h->verifier()->check_bitmaps("Cleanup Start"); 1286 1287 G1Policy* g1p = g1h->g1_policy(); 1288 g1p->record_concurrent_mark_cleanup_start(); 1289 1290 double start = os::elapsedTime(); 1291 1292 HeapRegionRemSet::reset_for_cleanup_tasks(); 1293 1294 { 1295 GCTraceTime(Debug, gc)("Finalize Live Data"); 1296 finalize_live_data(); 1297 } 1298 1299 if (VerifyDuringGC) { 1300 GCTraceTime(Debug, gc)("Verify Live Data"); 1301 verify_live_data(); 1302 } 1303 1304 g1h->collector_state()->set_mark_in_progress(false); 1305 1306 double count_end = os::elapsedTime(); 1307 double this_final_counting_time = (count_end - start); 1308 _total_counting_time += this_final_counting_time; 1309 1310 if (log_is_enabled(Trace, gc, liveness)) { 1311 G1PrintRegionLivenessInfoClosure cl("Post-Marking"); 1312 _g1h->heap_region_iterate(&cl); 1313 } 1314 1315 // Install newly created mark bitMap as "prev". 1316 swapMarkBitMaps(); 1317 1318 g1h->reset_gc_time_stamp(); 1319 1320 uint n_workers = _g1h->workers()->active_workers(); 1321 1322 // Note end of marking in all heap regions. 1323 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1324 g1h->workers()->run_task(&g1_par_note_end_task); 1325 g1h->check_gc_time_stamps(); 1326 1327 if (!cleanup_list_is_empty()) { 1328 // The cleanup list is not empty, so we'll have to process it 1329 // concurrently. Notify anyone else that might be wanting free 1330 // regions that there will be more free regions coming soon. 1331 g1h->set_free_regions_coming(); 1332 } 1333 1334 // call below, since it affects the metric by which we sort the heap 1335 // regions. 1336 if (G1ScrubRemSets) { 1337 double rs_scrub_start = os::elapsedTime(); 1338 g1h->scrub_rem_set(); 1339 _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start); 1340 } 1341 1342 // this will also free any regions totally full of garbage objects, 1343 // and sort the regions. 1344 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1345 1346 // Statistics. 1347 double end = os::elapsedTime(); 1348 _cleanup_times.add((end - start) * 1000.0); 1349 1350 // Clean up will have freed any regions completely full of garbage. 1351 // Update the soft reference policy with the new heap occupancy. 1352 Universe::update_heap_info_at_gc(); 1353 1354 if (VerifyDuringGC) { 1355 HandleMark hm; // handle scope 1356 g1h->prepare_for_verify(); 1357 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)"); 1358 } 1359 1360 g1h->verifier()->check_bitmaps("Cleanup End"); 1361 1362 g1h->verifier()->verify_region_sets_optional(); 1363 1364 // We need to make this be a "collection" so any collection pause that 1365 // races with it goes around and waits for completeCleanup to finish. 1366 g1h->increment_total_collections(); 1367 1368 // Clean out dead classes and update Metaspace sizes. 1369 if (ClassUnloadingWithConcurrentMark) { 1370 ClassLoaderDataGraph::purge(); 1371 } 1372 MetaspaceGC::compute_new_size(); 1373 1374 // We reclaimed old regions so we should calculate the sizes to make 1375 // sure we update the old gen/space data. 1376 g1h->g1mm()->update_sizes(); 1377 g1h->allocation_context_stats().update_after_mark(); 1378 } 1379 1380 void G1ConcurrentMark::complete_cleanup() { 1381 if (has_aborted()) return; 1382 1383 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1384 1385 _cleanup_list.verify_optional(); 1386 FreeRegionList tmp_free_list("Tmp Free List"); 1387 1388 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1389 "cleanup list has %u entries", 1390 _cleanup_list.length()); 1391 1392 // No one else should be accessing the _cleanup_list at this point, 1393 // so it is not necessary to take any locks 1394 while (!_cleanup_list.is_empty()) { 1395 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 1396 assert(hr != NULL, "Got NULL from a non-empty list"); 1397 hr->par_clear(); 1398 tmp_free_list.add_ordered(hr); 1399 1400 // Instead of adding one region at a time to the secondary_free_list, 1401 // we accumulate them in the local list and move them a few at a 1402 // time. This also cuts down on the number of notify_all() calls 1403 // we do during this process. We'll also append the local list when 1404 // _cleanup_list is empty (which means we just removed the last 1405 // region from the _cleanup_list). 1406 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1407 _cleanup_list.is_empty()) { 1408 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1409 "appending %u entries to the secondary_free_list, " 1410 "cleanup list still has %u entries", 1411 tmp_free_list.length(), 1412 _cleanup_list.length()); 1413 1414 { 1415 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1416 g1h->secondary_free_list_add(&tmp_free_list); 1417 SecondaryFreeList_lock->notify_all(); 1418 } 1419 #ifndef PRODUCT 1420 if (G1StressConcRegionFreeing) { 1421 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1422 os::sleep(Thread::current(), (jlong) 1, false); 1423 } 1424 } 1425 #endif 1426 } 1427 } 1428 assert(tmp_free_list.is_empty(), "post-condition"); 1429 } 1430 1431 // Supporting Object and Oop closures for reference discovery 1432 // and processing in during marking 1433 1434 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1435 HeapWord* addr = (HeapWord*)obj; 1436 return addr != NULL && 1437 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1438 } 1439 1440 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1441 // Uses the G1CMTask associated with a worker thread (for serial reference 1442 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1443 // trace referent objects. 1444 // 1445 // Using the G1CMTask and embedded local queues avoids having the worker 1446 // threads operating on the global mark stack. This reduces the risk 1447 // of overflowing the stack - which we would rather avoid at this late 1448 // state. Also using the tasks' local queues removes the potential 1449 // of the workers interfering with each other that could occur if 1450 // operating on the global stack. 1451 1452 class G1CMKeepAliveAndDrainClosure: public OopClosure { 1453 G1ConcurrentMark* _cm; 1454 G1CMTask* _task; 1455 int _ref_counter_limit; 1456 int _ref_counter; 1457 bool _is_serial; 1458 public: 1459 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1460 _cm(cm), _task(task), _is_serial(is_serial), 1461 _ref_counter_limit(G1RefProcDrainInterval) { 1462 assert(_ref_counter_limit > 0, "sanity"); 1463 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1464 _ref_counter = _ref_counter_limit; 1465 } 1466 1467 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1468 virtual void do_oop( oop* p) { do_oop_work(p); } 1469 1470 template <class T> void do_oop_work(T* p) { 1471 if (!_cm->has_overflown()) { 1472 oop obj = oopDesc::load_decode_heap_oop(p); 1473 _task->deal_with_reference(obj); 1474 _ref_counter--; 1475 1476 if (_ref_counter == 0) { 1477 // We have dealt with _ref_counter_limit references, pushing them 1478 // and objects reachable from them on to the local stack (and 1479 // possibly the global stack). Call G1CMTask::do_marking_step() to 1480 // process these entries. 1481 // 1482 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1483 // there's nothing more to do (i.e. we're done with the entries that 1484 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1485 // above) or we overflow. 1486 // 1487 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1488 // flag while there may still be some work to do. (See the comment at 1489 // the beginning of G1CMTask::do_marking_step() for those conditions - 1490 // one of which is reaching the specified time target.) It is only 1491 // when G1CMTask::do_marking_step() returns without setting the 1492 // has_aborted() flag that the marking step has completed. 1493 do { 1494 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1495 _task->do_marking_step(mark_step_duration_ms, 1496 false /* do_termination */, 1497 _is_serial); 1498 } while (_task->has_aborted() && !_cm->has_overflown()); 1499 _ref_counter = _ref_counter_limit; 1500 } 1501 } 1502 } 1503 }; 1504 1505 // 'Drain' oop closure used by both serial and parallel reference processing. 1506 // Uses the G1CMTask associated with a given worker thread (for serial 1507 // reference processing the G1CMtask for worker 0 is used). Calls the 1508 // do_marking_step routine, with an unbelievably large timeout value, 1509 // to drain the marking data structures of the remaining entries 1510 // added by the 'keep alive' oop closure above. 1511 1512 class G1CMDrainMarkingStackClosure: public VoidClosure { 1513 G1ConcurrentMark* _cm; 1514 G1CMTask* _task; 1515 bool _is_serial; 1516 public: 1517 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1518 _cm(cm), _task(task), _is_serial(is_serial) { 1519 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1520 } 1521 1522 void do_void() { 1523 do { 1524 // We call G1CMTask::do_marking_step() to completely drain the local 1525 // and global marking stacks of entries pushed by the 'keep alive' 1526 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1527 // 1528 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1529 // if there's nothing more to do (i.e. we've completely drained the 1530 // entries that were pushed as a a result of applying the 'keep alive' 1531 // closure to the entries on the discovered ref lists) or we overflow 1532 // the global marking stack. 1533 // 1534 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1535 // flag while there may still be some work to do. (See the comment at 1536 // the beginning of G1CMTask::do_marking_step() for those conditions - 1537 // one of which is reaching the specified time target.) It is only 1538 // when G1CMTask::do_marking_step() returns without setting the 1539 // has_aborted() flag that the marking step has completed. 1540 1541 _task->do_marking_step(1000000000.0 /* something very large */, 1542 true /* do_termination */, 1543 _is_serial); 1544 } while (_task->has_aborted() && !_cm->has_overflown()); 1545 } 1546 }; 1547 1548 // Implementation of AbstractRefProcTaskExecutor for parallel 1549 // reference processing at the end of G1 concurrent marking 1550 1551 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 1552 private: 1553 G1CollectedHeap* _g1h; 1554 G1ConcurrentMark* _cm; 1555 WorkGang* _workers; 1556 uint _active_workers; 1557 1558 public: 1559 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1560 G1ConcurrentMark* cm, 1561 WorkGang* workers, 1562 uint n_workers) : 1563 _g1h(g1h), _cm(cm), 1564 _workers(workers), _active_workers(n_workers) { } 1565 1566 // Executes the given task using concurrent marking worker threads. 1567 virtual void execute(ProcessTask& task); 1568 virtual void execute(EnqueueTask& task); 1569 }; 1570 1571 class G1CMRefProcTaskProxy: public AbstractGangTask { 1572 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1573 ProcessTask& _proc_task; 1574 G1CollectedHeap* _g1h; 1575 G1ConcurrentMark* _cm; 1576 1577 public: 1578 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1579 G1CollectedHeap* g1h, 1580 G1ConcurrentMark* cm) : 1581 AbstractGangTask("Process reference objects in parallel"), 1582 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1583 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1584 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1585 } 1586 1587 virtual void work(uint worker_id) { 1588 ResourceMark rm; 1589 HandleMark hm; 1590 G1CMTask* task = _cm->task(worker_id); 1591 G1CMIsAliveClosure g1_is_alive(_g1h); 1592 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1593 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1594 1595 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1596 } 1597 }; 1598 1599 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1600 assert(_workers != NULL, "Need parallel worker threads."); 1601 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1602 1603 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1604 1605 // We need to reset the concurrency level before each 1606 // proxy task execution, so that the termination protocol 1607 // and overflow handling in G1CMTask::do_marking_step() knows 1608 // how many workers to wait for. 1609 _cm->set_concurrency(_active_workers); 1610 _workers->run_task(&proc_task_proxy); 1611 } 1612 1613 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 1614 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1615 EnqueueTask& _enq_task; 1616 1617 public: 1618 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1619 AbstractGangTask("Enqueue reference objects in parallel"), 1620 _enq_task(enq_task) { } 1621 1622 virtual void work(uint worker_id) { 1623 _enq_task.work(worker_id); 1624 } 1625 }; 1626 1627 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 1628 assert(_workers != NULL, "Need parallel worker threads."); 1629 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1630 1631 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 1632 1633 // Not strictly necessary but... 1634 // 1635 // We need to reset the concurrency level before each 1636 // proxy task execution, so that the termination protocol 1637 // and overflow handling in G1CMTask::do_marking_step() knows 1638 // how many workers to wait for. 1639 _cm->set_concurrency(_active_workers); 1640 _workers->run_task(&enq_task_proxy); 1641 } 1642 1643 void G1ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 1644 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 1645 } 1646 1647 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 1648 if (has_overflown()) { 1649 // Skip processing the discovered references if we have 1650 // overflown the global marking stack. Reference objects 1651 // only get discovered once so it is OK to not 1652 // de-populate the discovered reference lists. We could have, 1653 // but the only benefit would be that, when marking restarts, 1654 // less reference objects are discovered. 1655 return; 1656 } 1657 1658 ResourceMark rm; 1659 HandleMark hm; 1660 1661 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1662 1663 // Is alive closure. 1664 G1CMIsAliveClosure g1_is_alive(g1h); 1665 1666 // Inner scope to exclude the cleaning of the string and symbol 1667 // tables from the displayed time. 1668 { 1669 GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm); 1670 1671 ReferenceProcessor* rp = g1h->ref_processor_cm(); 1672 1673 // See the comment in G1CollectedHeap::ref_processing_init() 1674 // about how reference processing currently works in G1. 1675 1676 // Set the soft reference policy 1677 rp->setup_policy(clear_all_soft_refs); 1678 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1679 1680 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1681 // in serial reference processing. Note these closures are also 1682 // used for serially processing (by the the current thread) the 1683 // JNI references during parallel reference processing. 1684 // 1685 // These closures do not need to synchronize with the worker 1686 // threads involved in parallel reference processing as these 1687 // instances are executed serially by the current thread (e.g. 1688 // reference processing is not multi-threaded and is thus 1689 // performed by the current thread instead of a gang worker). 1690 // 1691 // The gang tasks involved in parallel reference processing create 1692 // their own instances of these closures, which do their own 1693 // synchronization among themselves. 1694 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1695 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1696 1697 // We need at least one active thread. If reference processing 1698 // is not multi-threaded we use the current (VMThread) thread, 1699 // otherwise we use the work gang from the G1CollectedHeap and 1700 // we utilize all the worker threads we can. 1701 bool processing_is_mt = rp->processing_is_mt(); 1702 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 1703 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 1704 1705 // Parallel processing task executor. 1706 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 1707 g1h->workers(), active_workers); 1708 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1709 1710 // Set the concurrency level. The phase was already set prior to 1711 // executing the remark task. 1712 set_concurrency(active_workers); 1713 1714 // Set the degree of MT processing here. If the discovery was done MT, 1715 // the number of threads involved during discovery could differ from 1716 // the number of active workers. This is OK as long as the discovered 1717 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1718 rp->set_active_mt_degree(active_workers); 1719 1720 // Process the weak references. 1721 const ReferenceProcessorStats& stats = 1722 rp->process_discovered_references(&g1_is_alive, 1723 &g1_keep_alive, 1724 &g1_drain_mark_stack, 1725 executor, 1726 _gc_timer_cm); 1727 _gc_tracer_cm->report_gc_reference_stats(stats); 1728 1729 // The do_oop work routines of the keep_alive and drain_marking_stack 1730 // oop closures will set the has_overflown flag if we overflow the 1731 // global marking stack. 1732 1733 assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(), 1734 "mark stack should be empty (unless it overflowed)"); 1735 1736 if (_global_mark_stack.is_out_of_memory()) { 1737 // This should have been done already when we tried to push an 1738 // entry on to the global mark stack. But let's do it again. 1739 set_has_overflown(); 1740 } 1741 1742 assert(rp->num_q() == active_workers, "why not"); 1743 1744 rp->enqueue_discovered_references(executor); 1745 1746 rp->verify_no_references_recorded(); 1747 assert(!rp->discovery_enabled(), "Post condition"); 1748 } 1749 1750 if (has_overflown()) { 1751 // We can not trust g1_is_alive if the marking stack overflowed 1752 return; 1753 } 1754 1755 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1756 1757 // Unload Klasses, String, Symbols, Code Cache, etc. 1758 if (ClassUnloadingWithConcurrentMark) { 1759 bool purged_classes; 1760 1761 { 1762 GCTraceTime(Debug, gc, phases) trace("System Dictionary Unloading", _gc_timer_cm); 1763 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 1764 } 1765 1766 { 1767 GCTraceTime(Debug, gc, phases) trace("Parallel Unloading", _gc_timer_cm); 1768 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 1769 } 1770 } 1771 1772 if (G1StringDedup::is_enabled()) { 1773 GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", _gc_timer_cm); 1774 G1StringDedup::unlink(&g1_is_alive); 1775 } 1776 } 1777 1778 void G1ConcurrentMark::swapMarkBitMaps() { 1779 G1CMBitMapRO* temp = _prevMarkBitMap; 1780 _prevMarkBitMap = (G1CMBitMapRO*)_nextMarkBitMap; 1781 _nextMarkBitMap = (G1CMBitMap*) temp; 1782 } 1783 1784 // Closure for marking entries in SATB buffers. 1785 class G1CMSATBBufferClosure : public SATBBufferClosure { 1786 private: 1787 G1CMTask* _task; 1788 G1CollectedHeap* _g1h; 1789 1790 // This is very similar to G1CMTask::deal_with_reference, but with 1791 // more relaxed requirements for the argument, so this must be more 1792 // circumspect about treating the argument as an object. 1793 void do_entry(void* entry) const { 1794 _task->increment_refs_reached(); 1795 HeapRegion* hr = _g1h->heap_region_containing(entry); 1796 if (entry < hr->next_top_at_mark_start()) { 1797 // Until we get here, we don't know whether entry refers to a valid 1798 // object; it could instead have been a stale reference. 1799 oop obj = static_cast<oop>(entry); 1800 assert(obj->is_oop(true /* ignore mark word */), 1801 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)); 1802 _task->make_reference_grey(obj); 1803 } 1804 } 1805 1806 public: 1807 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1808 : _task(task), _g1h(g1h) { } 1809 1810 virtual void do_buffer(void** buffer, size_t size) { 1811 for (size_t i = 0; i < size; ++i) { 1812 do_entry(buffer[i]); 1813 } 1814 } 1815 }; 1816 1817 class G1RemarkThreadsClosure : public ThreadClosure { 1818 G1CMSATBBufferClosure _cm_satb_cl; 1819 G1CMOopClosure _cm_cl; 1820 MarkingCodeBlobClosure _code_cl; 1821 int _thread_parity; 1822 1823 public: 1824 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1825 _cm_satb_cl(task, g1h), 1826 _cm_cl(g1h, g1h->concurrent_mark(), task), 1827 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1828 _thread_parity(Threads::thread_claim_parity()) {} 1829 1830 void do_thread(Thread* thread) { 1831 if (thread->is_Java_thread()) { 1832 if (thread->claim_oops_do(true, _thread_parity)) { 1833 JavaThread* jt = (JavaThread*)thread; 1834 1835 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1836 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1837 // * Alive if on the stack of an executing method 1838 // * Weakly reachable otherwise 1839 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1840 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1841 jt->nmethods_do(&_code_cl); 1842 1843 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 1844 } 1845 } else if (thread->is_VM_thread()) { 1846 if (thread->claim_oops_do(true, _thread_parity)) { 1847 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1848 } 1849 } 1850 } 1851 }; 1852 1853 class G1CMRemarkTask: public AbstractGangTask { 1854 private: 1855 G1ConcurrentMark* _cm; 1856 public: 1857 void work(uint worker_id) { 1858 // Since all available tasks are actually started, we should 1859 // only proceed if we're supposed to be active. 1860 if (worker_id < _cm->active_tasks()) { 1861 G1CMTask* task = _cm->task(worker_id); 1862 task->record_start_time(); 1863 { 1864 ResourceMark rm; 1865 HandleMark hm; 1866 1867 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1868 Threads::threads_do(&threads_f); 1869 } 1870 1871 do { 1872 task->do_marking_step(1000000000.0 /* something very large */, 1873 true /* do_termination */, 1874 false /* is_serial */); 1875 } while (task->has_aborted() && !_cm->has_overflown()); 1876 // If we overflow, then we do not want to restart. We instead 1877 // want to abort remark and do concurrent marking again. 1878 task->record_end_time(); 1879 } 1880 } 1881 1882 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1883 AbstractGangTask("Par Remark"), _cm(cm) { 1884 _cm->terminator()->reset_for_reuse(active_workers); 1885 } 1886 }; 1887 1888 void G1ConcurrentMark::checkpointRootsFinalWork() { 1889 ResourceMark rm; 1890 HandleMark hm; 1891 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1892 1893 GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm); 1894 1895 g1h->ensure_parsability(false); 1896 1897 // this is remark, so we'll use up all active threads 1898 uint active_workers = g1h->workers()->active_workers(); 1899 set_concurrency_and_phase(active_workers, false /* concurrent */); 1900 // Leave _parallel_marking_threads at it's 1901 // value originally calculated in the G1ConcurrentMark 1902 // constructor and pass values of the active workers 1903 // through the gang in the task. 1904 1905 { 1906 StrongRootsScope srs(active_workers); 1907 1908 G1CMRemarkTask remarkTask(this, active_workers); 1909 // We will start all available threads, even if we decide that the 1910 // active_workers will be fewer. The extra ones will just bail out 1911 // immediately. 1912 g1h->workers()->run_task(&remarkTask); 1913 } 1914 1915 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1916 guarantee(has_overflown() || 1917 satb_mq_set.completed_buffers_num() == 0, 1918 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1919 BOOL_TO_STR(has_overflown()), 1920 satb_mq_set.completed_buffers_num()); 1921 1922 print_stats(); 1923 } 1924 1925 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 1926 // Note we are overriding the read-only view of the prev map here, via 1927 // the cast. 1928 ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr); 1929 } 1930 1931 HeapRegion* 1932 G1ConcurrentMark::claim_region(uint worker_id) { 1933 // "checkpoint" the finger 1934 HeapWord* finger = _finger; 1935 1936 // _heap_end will not change underneath our feet; it only changes at 1937 // yield points. 1938 while (finger < _heap_end) { 1939 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1940 1941 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1942 // Make sure that the reads below do not float before loading curr_region. 1943 OrderAccess::loadload(); 1944 // Above heap_region_containing may return NULL as we always scan claim 1945 // until the end of the heap. In this case, just jump to the next region. 1946 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1947 1948 // Is the gap between reading the finger and doing the CAS too long? 1949 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 1950 if (res == finger && curr_region != NULL) { 1951 // we succeeded 1952 HeapWord* bottom = curr_region->bottom(); 1953 HeapWord* limit = curr_region->next_top_at_mark_start(); 1954 1955 // notice that _finger == end cannot be guaranteed here since, 1956 // someone else might have moved the finger even further 1957 assert(_finger >= end, "the finger should have moved forward"); 1958 1959 if (limit > bottom) { 1960 return curr_region; 1961 } else { 1962 assert(limit == bottom, 1963 "the region limit should be at bottom"); 1964 // we return NULL and the caller should try calling 1965 // claim_region() again. 1966 return NULL; 1967 } 1968 } else { 1969 assert(_finger > finger, "the finger should have moved forward"); 1970 // read it again 1971 finger = _finger; 1972 } 1973 } 1974 1975 return NULL; 1976 } 1977 1978 #ifndef PRODUCT 1979 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 1980 private: 1981 G1CollectedHeap* _g1h; 1982 const char* _phase; 1983 int _info; 1984 1985 public: 1986 VerifyNoCSetOops(const char* phase, int info = -1) : 1987 _g1h(G1CollectedHeap::heap()), 1988 _phase(phase), 1989 _info(info) 1990 { } 1991 1992 void operator()(oop obj) const { 1993 guarantee(obj->is_oop(), 1994 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1995 p2i(obj), _phase, _info); 1996 guarantee(!_g1h->obj_in_cs(obj), 1997 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1998 p2i(obj), _phase, _info); 1999 } 2000 }; 2001 2002 void G1ConcurrentMark::verify_no_cset_oops() { 2003 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2004 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 2005 return; 2006 } 2007 2008 // Verify entries on the global mark stack 2009 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 2010 2011 // Verify entries on the task queues 2012 for (uint i = 0; i < _max_worker_id; ++i) { 2013 G1CMTaskQueue* queue = _task_queues->queue(i); 2014 queue->iterate(VerifyNoCSetOops("Queue", i)); 2015 } 2016 2017 // Verify the global finger 2018 HeapWord* global_finger = finger(); 2019 if (global_finger != NULL && global_finger < _heap_end) { 2020 // Since we always iterate over all regions, we might get a NULL HeapRegion 2021 // here. 2022 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 2023 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2024 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 2025 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 2026 } 2027 2028 // Verify the task fingers 2029 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2030 for (uint i = 0; i < parallel_marking_threads(); ++i) { 2031 G1CMTask* task = _tasks[i]; 2032 HeapWord* task_finger = task->finger(); 2033 if (task_finger != NULL && task_finger < _heap_end) { 2034 // See above note on the global finger verification. 2035 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 2036 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2037 !task_hr->in_collection_set(), 2038 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 2039 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 2040 } 2041 } 2042 } 2043 #endif // PRODUCT 2044 void G1ConcurrentMark::create_live_data() { 2045 _g1h->g1_rem_set()->create_card_live_data(_parallel_workers, _nextMarkBitMap); 2046 } 2047 2048 void G1ConcurrentMark::finalize_live_data() { 2049 _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _nextMarkBitMap); 2050 } 2051 2052 void G1ConcurrentMark::verify_live_data() { 2053 _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _nextMarkBitMap); 2054 } 2055 2056 void G1ConcurrentMark::clear_live_data(WorkGang* workers) { 2057 _g1h->g1_rem_set()->clear_card_live_data(workers); 2058 } 2059 2060 #ifdef ASSERT 2061 void G1ConcurrentMark::verify_live_data_clear() { 2062 _g1h->g1_rem_set()->verify_card_live_data_is_clear(); 2063 } 2064 #endif 2065 2066 void G1ConcurrentMark::print_stats() { 2067 if (!log_is_enabled(Debug, gc, stats)) { 2068 return; 2069 } 2070 log_debug(gc, stats)("---------------------------------------------------------------------"); 2071 for (size_t i = 0; i < _active_tasks; ++i) { 2072 _tasks[i]->print_stats(); 2073 log_debug(gc, stats)("---------------------------------------------------------------------"); 2074 } 2075 } 2076 2077 void G1ConcurrentMark::abort() { 2078 if (!cmThread()->during_cycle() || _has_aborted) { 2079 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2080 return; 2081 } 2082 2083 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2084 // concurrent bitmap clearing. 2085 { 2086 GCTraceTime(Debug, gc)("Clear Next Bitmap"); 2087 clear_bitmap(_nextMarkBitMap, _g1h->workers(), false); 2088 } 2089 // Note we cannot clear the previous marking bitmap here 2090 // since VerifyDuringGC verifies the objects marked during 2091 // a full GC against the previous bitmap. 2092 2093 { 2094 GCTraceTime(Debug, gc)("Clear Live Data"); 2095 clear_live_data(_g1h->workers()); 2096 } 2097 DEBUG_ONLY({ 2098 GCTraceTime(Debug, gc)("Verify Live Data Clear"); 2099 verify_live_data_clear(); 2100 }) 2101 // Empty mark stack 2102 reset_marking_state(); 2103 for (uint i = 0; i < _max_worker_id; ++i) { 2104 _tasks[i]->clear_region_fields(); 2105 } 2106 _first_overflow_barrier_sync.abort(); 2107 _second_overflow_barrier_sync.abort(); 2108 _has_aborted = true; 2109 2110 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2111 satb_mq_set.abandon_partial_marking(); 2112 // This can be called either during or outside marking, we'll read 2113 // the expected_active value from the SATB queue set. 2114 satb_mq_set.set_active_all_threads( 2115 false, /* new active value */ 2116 satb_mq_set.is_active() /* expected_active */); 2117 } 2118 2119 static void print_ms_time_info(const char* prefix, const char* name, 2120 NumberSeq& ns) { 2121 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2122 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2123 if (ns.num() > 0) { 2124 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2125 prefix, ns.sd(), ns.maximum()); 2126 } 2127 } 2128 2129 void G1ConcurrentMark::print_summary_info() { 2130 Log(gc, marking) log; 2131 if (!log.is_trace()) { 2132 return; 2133 } 2134 2135 log.trace(" Concurrent marking:"); 2136 print_ms_time_info(" ", "init marks", _init_times); 2137 print_ms_time_info(" ", "remarks", _remark_times); 2138 { 2139 print_ms_time_info(" ", "final marks", _remark_mark_times); 2140 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2141 2142 } 2143 print_ms_time_info(" ", "cleanups", _cleanup_times); 2144 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2145 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2146 if (G1ScrubRemSets) { 2147 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 2148 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2149 } 2150 log.trace(" Total stop_world time = %8.2f s.", 2151 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2152 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2153 cmThread()->vtime_accum(), cmThread()->vtime_mark_accum()); 2154 } 2155 2156 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2157 _parallel_workers->print_worker_threads_on(st); 2158 } 2159 2160 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2161 _parallel_workers->threads_do(tc); 2162 } 2163 2164 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2165 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2166 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 2167 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 2168 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 2169 } 2170 2171 // Closure for iteration over bitmaps 2172 class G1CMBitMapClosure : public BitMapClosure { 2173 private: 2174 // the bitmap that is being iterated over 2175 G1CMBitMap* _nextMarkBitMap; 2176 G1ConcurrentMark* _cm; 2177 G1CMTask* _task; 2178 2179 public: 2180 G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm, G1CMBitMap* nextMarkBitMap) : 2181 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 2182 2183 bool do_bit(size_t offset) { 2184 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 2185 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 2186 assert( addr < _cm->finger(), "invariant"); 2187 assert(addr >= _task->finger(), "invariant"); 2188 2189 // We move that task's local finger along. 2190 _task->move_finger_to(addr); 2191 2192 _task->scan_object(oop(addr)); 2193 // we only partially drain the local queue and global stack 2194 _task->drain_local_queue(true); 2195 _task->drain_global_stack(true); 2196 2197 // if the has_aborted flag has been raised, we need to bail out of 2198 // the iteration 2199 return !_task->has_aborted(); 2200 } 2201 }; 2202 2203 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2204 ReferenceProcessor* result = g1h->ref_processor_cm(); 2205 assert(result != NULL, "CM reference processor should not be NULL"); 2206 return result; 2207 } 2208 2209 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2210 G1ConcurrentMark* cm, 2211 G1CMTask* task) 2212 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2213 _g1h(g1h), _cm(cm), _task(task) 2214 { } 2215 2216 void G1CMTask::setup_for_region(HeapRegion* hr) { 2217 assert(hr != NULL, 2218 "claim_region() should have filtered out NULL regions"); 2219 _curr_region = hr; 2220 _finger = hr->bottom(); 2221 update_region_limit(); 2222 } 2223 2224 void G1CMTask::update_region_limit() { 2225 HeapRegion* hr = _curr_region; 2226 HeapWord* bottom = hr->bottom(); 2227 HeapWord* limit = hr->next_top_at_mark_start(); 2228 2229 if (limit == bottom) { 2230 // The region was collected underneath our feet. 2231 // We set the finger to bottom to ensure that the bitmap 2232 // iteration that will follow this will not do anything. 2233 // (this is not a condition that holds when we set the region up, 2234 // as the region is not supposed to be empty in the first place) 2235 _finger = bottom; 2236 } else if (limit >= _region_limit) { 2237 assert(limit >= _finger, "peace of mind"); 2238 } else { 2239 assert(limit < _region_limit, "only way to get here"); 2240 // This can happen under some pretty unusual circumstances. An 2241 // evacuation pause empties the region underneath our feet (NTAMS 2242 // at bottom). We then do some allocation in the region (NTAMS 2243 // stays at bottom), followed by the region being used as a GC 2244 // alloc region (NTAMS will move to top() and the objects 2245 // originally below it will be grayed). All objects now marked in 2246 // the region are explicitly grayed, if below the global finger, 2247 // and we do not need in fact to scan anything else. So, we simply 2248 // set _finger to be limit to ensure that the bitmap iteration 2249 // doesn't do anything. 2250 _finger = limit; 2251 } 2252 2253 _region_limit = limit; 2254 } 2255 2256 void G1CMTask::giveup_current_region() { 2257 assert(_curr_region != NULL, "invariant"); 2258 clear_region_fields(); 2259 } 2260 2261 void G1CMTask::clear_region_fields() { 2262 // Values for these three fields that indicate that we're not 2263 // holding on to a region. 2264 _curr_region = NULL; 2265 _finger = NULL; 2266 _region_limit = NULL; 2267 } 2268 2269 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2270 if (cm_oop_closure == NULL) { 2271 assert(_cm_oop_closure != NULL, "invariant"); 2272 } else { 2273 assert(_cm_oop_closure == NULL, "invariant"); 2274 } 2275 _cm_oop_closure = cm_oop_closure; 2276 } 2277 2278 void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) { 2279 guarantee(nextMarkBitMap != NULL, "invariant"); 2280 _nextMarkBitMap = nextMarkBitMap; 2281 clear_region_fields(); 2282 2283 _calls = 0; 2284 _elapsed_time_ms = 0.0; 2285 _termination_time_ms = 0.0; 2286 _termination_start_time_ms = 0.0; 2287 } 2288 2289 bool G1CMTask::should_exit_termination() { 2290 regular_clock_call(); 2291 // This is called when we are in the termination protocol. We should 2292 // quit if, for some reason, this task wants to abort or the global 2293 // stack is not empty (this means that we can get work from it). 2294 return !_cm->mark_stack_empty() || has_aborted(); 2295 } 2296 2297 void G1CMTask::reached_limit() { 2298 assert(_words_scanned >= _words_scanned_limit || 2299 _refs_reached >= _refs_reached_limit , 2300 "shouldn't have been called otherwise"); 2301 regular_clock_call(); 2302 } 2303 2304 void G1CMTask::regular_clock_call() { 2305 if (has_aborted()) return; 2306 2307 // First, we need to recalculate the words scanned and refs reached 2308 // limits for the next clock call. 2309 recalculate_limits(); 2310 2311 // During the regular clock call we do the following 2312 2313 // (1) If an overflow has been flagged, then we abort. 2314 if (_cm->has_overflown()) { 2315 set_has_aborted(); 2316 return; 2317 } 2318 2319 // If we are not concurrent (i.e. we're doing remark) we don't need 2320 // to check anything else. The other steps are only needed during 2321 // the concurrent marking phase. 2322 if (!concurrent()) return; 2323 2324 // (2) If marking has been aborted for Full GC, then we also abort. 2325 if (_cm->has_aborted()) { 2326 set_has_aborted(); 2327 return; 2328 } 2329 2330 double curr_time_ms = os::elapsedVTime() * 1000.0; 2331 2332 // (4) We check whether we should yield. If we have to, then we abort. 2333 if (SuspendibleThreadSet::should_yield()) { 2334 // We should yield. To do this we abort the task. The caller is 2335 // responsible for yielding. 2336 set_has_aborted(); 2337 return; 2338 } 2339 2340 // (5) We check whether we've reached our time quota. If we have, 2341 // then we abort. 2342 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2343 if (elapsed_time_ms > _time_target_ms) { 2344 set_has_aborted(); 2345 _has_timed_out = true; 2346 return; 2347 } 2348 2349 // (6) Finally, we check whether there are enough completed STAB 2350 // buffers available for processing. If there are, we abort. 2351 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2352 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2353 // we do need to process SATB buffers, we'll abort and restart 2354 // the marking task to do so 2355 set_has_aborted(); 2356 return; 2357 } 2358 } 2359 2360 void G1CMTask::recalculate_limits() { 2361 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2362 _words_scanned_limit = _real_words_scanned_limit; 2363 2364 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2365 _refs_reached_limit = _real_refs_reached_limit; 2366 } 2367 2368 void G1CMTask::decrease_limits() { 2369 // This is called when we believe that we're going to do an infrequent 2370 // operation which will increase the per byte scanned cost (i.e. move 2371 // entries to/from the global stack). It basically tries to decrease the 2372 // scanning limit so that the clock is called earlier. 2373 2374 _words_scanned_limit = _real_words_scanned_limit - 2375 3 * words_scanned_period / 4; 2376 _refs_reached_limit = _real_refs_reached_limit - 2377 3 * refs_reached_period / 4; 2378 } 2379 2380 void G1CMTask::move_entries_to_global_stack() { 2381 // Local array where we'll store the entries that will be popped 2382 // from the local queue. 2383 oop buffer[G1CMMarkStack::OopsPerChunk]; 2384 2385 size_t n = 0; 2386 oop obj; 2387 while (n < G1CMMarkStack::OopsPerChunk && _task_queue->pop_local(obj)) { 2388 buffer[n] = obj; 2389 ++n; 2390 } 2391 if (n < G1CMMarkStack::OopsPerChunk) { 2392 buffer[n] = NULL; 2393 } 2394 2395 if (n > 0) { 2396 if (!_cm->mark_stack_push(buffer)) { 2397 set_has_aborted(); 2398 } 2399 } 2400 2401 // This operation was quite expensive, so decrease the limits. 2402 decrease_limits(); 2403 } 2404 2405 bool G1CMTask::get_entries_from_global_stack() { 2406 // Local array where we'll store the entries that will be popped 2407 // from the global stack. 2408 oop buffer[G1CMMarkStack::OopsPerChunk]; 2409 2410 if (!_cm->mark_stack_pop(buffer)) { 2411 return false; 2412 } 2413 2414 // We did actually pop at least one entry. 2415 for (size_t i = 0; i < G1CMMarkStack::OopsPerChunk; ++i) { 2416 oop elem = buffer[i]; 2417 if (elem == NULL) { 2418 break; 2419 } 2420 bool success = _task_queue->push(elem); 2421 // We only call this when the local queue is empty or under a 2422 // given target limit. So, we do not expect this push to fail. 2423 assert(success, "invariant"); 2424 } 2425 2426 // This operation was quite expensive, so decrease the limits 2427 decrease_limits(); 2428 return true; 2429 } 2430 2431 void G1CMTask::drain_local_queue(bool partially) { 2432 if (has_aborted()) return; 2433 2434 // Decide what the target size is, depending whether we're going to 2435 // drain it partially (so that other tasks can steal if they run out 2436 // of things to do) or totally (at the very end). 2437 size_t target_size; 2438 if (partially) { 2439 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2440 } else { 2441 target_size = 0; 2442 } 2443 2444 if (_task_queue->size() > target_size) { 2445 oop obj; 2446 bool ret = _task_queue->pop_local(obj); 2447 while (ret) { 2448 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 2449 assert(!_g1h->is_on_master_free_list( 2450 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 2451 2452 scan_object(obj); 2453 2454 if (_task_queue->size() <= target_size || has_aborted()) { 2455 ret = false; 2456 } else { 2457 ret = _task_queue->pop_local(obj); 2458 } 2459 } 2460 } 2461 } 2462 2463 void G1CMTask::drain_global_stack(bool partially) { 2464 if (has_aborted()) return; 2465 2466 // We have a policy to drain the local queue before we attempt to 2467 // drain the global stack. 2468 assert(partially || _task_queue->size() == 0, "invariant"); 2469 2470 // Decide what the target size is, depending whether we're going to 2471 // drain it partially (so that other tasks can steal if they run out 2472 // of things to do) or totally (at the very end). 2473 // Notice that when draining the global mark stack partially, due to the racyness 2474 // of the mark stack size update we might in fact drop below the target. But, 2475 // this is not a problem. 2476 // In case of total draining, we simply process until the global mark stack is 2477 // totally empty, disregarding the size counter. 2478 if (partially) { 2479 size_t const target_size = _cm->partial_mark_stack_size_target(); 2480 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2481 if (get_entries_from_global_stack()) { 2482 drain_local_queue(partially); 2483 } 2484 } 2485 } else { 2486 while (!has_aborted() && get_entries_from_global_stack()) { 2487 drain_local_queue(partially); 2488 } 2489 } 2490 } 2491 2492 // SATB Queue has several assumptions on whether to call the par or 2493 // non-par versions of the methods. this is why some of the code is 2494 // replicated. We should really get rid of the single-threaded version 2495 // of the code to simplify things. 2496 void G1CMTask::drain_satb_buffers() { 2497 if (has_aborted()) return; 2498 2499 // We set this so that the regular clock knows that we're in the 2500 // middle of draining buffers and doesn't set the abort flag when it 2501 // notices that SATB buffers are available for draining. It'd be 2502 // very counter productive if it did that. :-) 2503 _draining_satb_buffers = true; 2504 2505 G1CMSATBBufferClosure satb_cl(this, _g1h); 2506 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2507 2508 // This keeps claiming and applying the closure to completed buffers 2509 // until we run out of buffers or we need to abort. 2510 while (!has_aborted() && 2511 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2512 regular_clock_call(); 2513 } 2514 2515 _draining_satb_buffers = false; 2516 2517 assert(has_aborted() || 2518 concurrent() || 2519 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2520 2521 // again, this was a potentially expensive operation, decrease the 2522 // limits to get the regular clock call early 2523 decrease_limits(); 2524 } 2525 2526 void G1CMTask::print_stats() { 2527 log_debug(gc, stats)("Marking Stats, task = %u, calls = %d", 2528 _worker_id, _calls); 2529 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2530 _elapsed_time_ms, _termination_time_ms); 2531 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 2532 _step_times_ms.num(), _step_times_ms.avg(), 2533 _step_times_ms.sd()); 2534 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", 2535 _step_times_ms.maximum(), _step_times_ms.sum()); 2536 } 2537 2538 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 2539 return _task_queues->steal(worker_id, hash_seed, obj); 2540 } 2541 2542 /***************************************************************************** 2543 2544 The do_marking_step(time_target_ms, ...) method is the building 2545 block of the parallel marking framework. It can be called in parallel 2546 with other invocations of do_marking_step() on different tasks 2547 (but only one per task, obviously) and concurrently with the 2548 mutator threads, or during remark, hence it eliminates the need 2549 for two versions of the code. When called during remark, it will 2550 pick up from where the task left off during the concurrent marking 2551 phase. Interestingly, tasks are also claimable during evacuation 2552 pauses too, since do_marking_step() ensures that it aborts before 2553 it needs to yield. 2554 2555 The data structures that it uses to do marking work are the 2556 following: 2557 2558 (1) Marking Bitmap. If there are gray objects that appear only 2559 on the bitmap (this happens either when dealing with an overflow 2560 or when the initial marking phase has simply marked the roots 2561 and didn't push them on the stack), then tasks claim heap 2562 regions whose bitmap they then scan to find gray objects. A 2563 global finger indicates where the end of the last claimed region 2564 is. A local finger indicates how far into the region a task has 2565 scanned. The two fingers are used to determine how to gray an 2566 object (i.e. whether simply marking it is OK, as it will be 2567 visited by a task in the future, or whether it needs to be also 2568 pushed on a stack). 2569 2570 (2) Local Queue. The local queue of the task which is accessed 2571 reasonably efficiently by the task. Other tasks can steal from 2572 it when they run out of work. Throughout the marking phase, a 2573 task attempts to keep its local queue short but not totally 2574 empty, so that entries are available for stealing by other 2575 tasks. Only when there is no more work, a task will totally 2576 drain its local queue. 2577 2578 (3) Global Mark Stack. This handles local queue overflow. During 2579 marking only sets of entries are moved between it and the local 2580 queues, as access to it requires a mutex and more fine-grain 2581 interaction with it which might cause contention. If it 2582 overflows, then the marking phase should restart and iterate 2583 over the bitmap to identify gray objects. Throughout the marking 2584 phase, tasks attempt to keep the global mark stack at a small 2585 length but not totally empty, so that entries are available for 2586 popping by other tasks. Only when there is no more work, tasks 2587 will totally drain the global mark stack. 2588 2589 (4) SATB Buffer Queue. This is where completed SATB buffers are 2590 made available. Buffers are regularly removed from this queue 2591 and scanned for roots, so that the queue doesn't get too 2592 long. During remark, all completed buffers are processed, as 2593 well as the filled in parts of any uncompleted buffers. 2594 2595 The do_marking_step() method tries to abort when the time target 2596 has been reached. There are a few other cases when the 2597 do_marking_step() method also aborts: 2598 2599 (1) When the marking phase has been aborted (after a Full GC). 2600 2601 (2) When a global overflow (on the global stack) has been 2602 triggered. Before the task aborts, it will actually sync up with 2603 the other tasks to ensure that all the marking data structures 2604 (local queues, stacks, fingers etc.) are re-initialized so that 2605 when do_marking_step() completes, the marking phase can 2606 immediately restart. 2607 2608 (3) When enough completed SATB buffers are available. The 2609 do_marking_step() method only tries to drain SATB buffers right 2610 at the beginning. So, if enough buffers are available, the 2611 marking step aborts and the SATB buffers are processed at 2612 the beginning of the next invocation. 2613 2614 (4) To yield. when we have to yield then we abort and yield 2615 right at the end of do_marking_step(). This saves us from a lot 2616 of hassle as, by yielding we might allow a Full GC. If this 2617 happens then objects will be compacted underneath our feet, the 2618 heap might shrink, etc. We save checking for this by just 2619 aborting and doing the yield right at the end. 2620 2621 From the above it follows that the do_marking_step() method should 2622 be called in a loop (or, otherwise, regularly) until it completes. 2623 2624 If a marking step completes without its has_aborted() flag being 2625 true, it means it has completed the current marking phase (and 2626 also all other marking tasks have done so and have all synced up). 2627 2628 A method called regular_clock_call() is invoked "regularly" (in 2629 sub ms intervals) throughout marking. It is this clock method that 2630 checks all the abort conditions which were mentioned above and 2631 decides when the task should abort. A work-based scheme is used to 2632 trigger this clock method: when the number of object words the 2633 marking phase has scanned or the number of references the marking 2634 phase has visited reach a given limit. Additional invocations to 2635 the method clock have been planted in a few other strategic places 2636 too. The initial reason for the clock method was to avoid calling 2637 vtime too regularly, as it is quite expensive. So, once it was in 2638 place, it was natural to piggy-back all the other conditions on it 2639 too and not constantly check them throughout the code. 2640 2641 If do_termination is true then do_marking_step will enter its 2642 termination protocol. 2643 2644 The value of is_serial must be true when do_marking_step is being 2645 called serially (i.e. by the VMThread) and do_marking_step should 2646 skip any synchronization in the termination and overflow code. 2647 Examples include the serial remark code and the serial reference 2648 processing closures. 2649 2650 The value of is_serial must be false when do_marking_step is 2651 being called by any of the worker threads in a work gang. 2652 Examples include the concurrent marking code (CMMarkingTask), 2653 the MT remark code, and the MT reference processing closures. 2654 2655 *****************************************************************************/ 2656 2657 void G1CMTask::do_marking_step(double time_target_ms, 2658 bool do_termination, 2659 bool is_serial) { 2660 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2661 assert(concurrent() == _cm->concurrent(), "they should be the same"); 2662 2663 G1Policy* g1_policy = _g1h->g1_policy(); 2664 assert(_task_queues != NULL, "invariant"); 2665 assert(_task_queue != NULL, "invariant"); 2666 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 2667 2668 assert(!_claimed, 2669 "only one thread should claim this task at any one time"); 2670 2671 // OK, this doesn't safeguard again all possible scenarios, as it is 2672 // possible for two threads to set the _claimed flag at the same 2673 // time. But it is only for debugging purposes anyway and it will 2674 // catch most problems. 2675 _claimed = true; 2676 2677 _start_time_ms = os::elapsedVTime() * 1000.0; 2678 2679 // If do_stealing is true then do_marking_step will attempt to 2680 // steal work from the other G1CMTasks. It only makes sense to 2681 // enable stealing when the termination protocol is enabled 2682 // and do_marking_step() is not being called serially. 2683 bool do_stealing = do_termination && !is_serial; 2684 2685 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2686 _time_target_ms = time_target_ms - diff_prediction_ms; 2687 2688 // set up the variables that are used in the work-based scheme to 2689 // call the regular clock method 2690 _words_scanned = 0; 2691 _refs_reached = 0; 2692 recalculate_limits(); 2693 2694 // clear all flags 2695 clear_has_aborted(); 2696 _has_timed_out = false; 2697 _draining_satb_buffers = false; 2698 2699 ++_calls; 2700 2701 // Set up the bitmap and oop closures. Anything that uses them is 2702 // eventually called from this method, so it is OK to allocate these 2703 // statically. 2704 G1CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 2705 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 2706 set_cm_oop_closure(&cm_oop_closure); 2707 2708 if (_cm->has_overflown()) { 2709 // This can happen if the mark stack overflows during a GC pause 2710 // and this task, after a yield point, restarts. We have to abort 2711 // as we need to get into the overflow protocol which happens 2712 // right at the end of this task. 2713 set_has_aborted(); 2714 } 2715 2716 // First drain any available SATB buffers. After this, we will not 2717 // look at SATB buffers before the next invocation of this method. 2718 // If enough completed SATB buffers are queued up, the regular clock 2719 // will abort this task so that it restarts. 2720 drain_satb_buffers(); 2721 // ...then partially drain the local queue and the global stack 2722 drain_local_queue(true); 2723 drain_global_stack(true); 2724 2725 do { 2726 if (!has_aborted() && _curr_region != NULL) { 2727 // This means that we're already holding on to a region. 2728 assert(_finger != NULL, "if region is not NULL, then the finger " 2729 "should not be NULL either"); 2730 2731 // We might have restarted this task after an evacuation pause 2732 // which might have evacuated the region we're holding on to 2733 // underneath our feet. Let's read its limit again to make sure 2734 // that we do not iterate over a region of the heap that 2735 // contains garbage (update_region_limit() will also move 2736 // _finger to the start of the region if it is found empty). 2737 update_region_limit(); 2738 // We will start from _finger not from the start of the region, 2739 // as we might be restarting this task after aborting half-way 2740 // through scanning this region. In this case, _finger points to 2741 // the address where we last found a marked object. If this is a 2742 // fresh region, _finger points to start(). 2743 MemRegion mr = MemRegion(_finger, _region_limit); 2744 2745 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2746 "humongous regions should go around loop once only"); 2747 2748 // Some special cases: 2749 // If the memory region is empty, we can just give up the region. 2750 // If the current region is humongous then we only need to check 2751 // the bitmap for the bit associated with the start of the object, 2752 // scan the object if it's live, and give up the region. 2753 // Otherwise, let's iterate over the bitmap of the part of the region 2754 // that is left. 2755 // If the iteration is successful, give up the region. 2756 if (mr.is_empty()) { 2757 giveup_current_region(); 2758 regular_clock_call(); 2759 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2760 if (_nextMarkBitMap->isMarked(mr.start())) { 2761 // The object is marked - apply the closure 2762 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 2763 bitmap_closure.do_bit(offset); 2764 } 2765 // Even if this task aborted while scanning the humongous object 2766 // we can (and should) give up the current region. 2767 giveup_current_region(); 2768 regular_clock_call(); 2769 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 2770 giveup_current_region(); 2771 regular_clock_call(); 2772 } else { 2773 assert(has_aborted(), "currently the only way to do so"); 2774 // The only way to abort the bitmap iteration is to return 2775 // false from the do_bit() method. However, inside the 2776 // do_bit() method we move the _finger to point to the 2777 // object currently being looked at. So, if we bail out, we 2778 // have definitely set _finger to something non-null. 2779 assert(_finger != NULL, "invariant"); 2780 2781 // Region iteration was actually aborted. So now _finger 2782 // points to the address of the object we last scanned. If we 2783 // leave it there, when we restart this task, we will rescan 2784 // the object. It is easy to avoid this. We move the finger by 2785 // enough to point to the next possible object header (the 2786 // bitmap knows by how much we need to move it as it knows its 2787 // granularity). 2788 assert(_finger < _region_limit, "invariant"); 2789 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 2790 // Check if bitmap iteration was aborted while scanning the last object 2791 if (new_finger >= _region_limit) { 2792 giveup_current_region(); 2793 } else { 2794 move_finger_to(new_finger); 2795 } 2796 } 2797 } 2798 // At this point we have either completed iterating over the 2799 // region we were holding on to, or we have aborted. 2800 2801 // We then partially drain the local queue and the global stack. 2802 // (Do we really need this?) 2803 drain_local_queue(true); 2804 drain_global_stack(true); 2805 2806 // Read the note on the claim_region() method on why it might 2807 // return NULL with potentially more regions available for 2808 // claiming and why we have to check out_of_regions() to determine 2809 // whether we're done or not. 2810 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2811 // We are going to try to claim a new region. We should have 2812 // given up on the previous one. 2813 // Separated the asserts so that we know which one fires. 2814 assert(_curr_region == NULL, "invariant"); 2815 assert(_finger == NULL, "invariant"); 2816 assert(_region_limit == NULL, "invariant"); 2817 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2818 if (claimed_region != NULL) { 2819 // Yes, we managed to claim one 2820 setup_for_region(claimed_region); 2821 assert(_curr_region == claimed_region, "invariant"); 2822 } 2823 // It is important to call the regular clock here. It might take 2824 // a while to claim a region if, for example, we hit a large 2825 // block of empty regions. So we need to call the regular clock 2826 // method once round the loop to make sure it's called 2827 // frequently enough. 2828 regular_clock_call(); 2829 } 2830 2831 if (!has_aborted() && _curr_region == NULL) { 2832 assert(_cm->out_of_regions(), 2833 "at this point we should be out of regions"); 2834 } 2835 } while ( _curr_region != NULL && !has_aborted()); 2836 2837 if (!has_aborted()) { 2838 // We cannot check whether the global stack is empty, since other 2839 // tasks might be pushing objects to it concurrently. 2840 assert(_cm->out_of_regions(), 2841 "at this point we should be out of regions"); 2842 // Try to reduce the number of available SATB buffers so that 2843 // remark has less work to do. 2844 drain_satb_buffers(); 2845 } 2846 2847 // Since we've done everything else, we can now totally drain the 2848 // local queue and global stack. 2849 drain_local_queue(false); 2850 drain_global_stack(false); 2851 2852 // Attempt at work stealing from other task's queues. 2853 if (do_stealing && !has_aborted()) { 2854 // We have not aborted. This means that we have finished all that 2855 // we could. Let's try to do some stealing... 2856 2857 // We cannot check whether the global stack is empty, since other 2858 // tasks might be pushing objects to it concurrently. 2859 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2860 "only way to reach here"); 2861 while (!has_aborted()) { 2862 oop obj; 2863 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 2864 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 2865 "any stolen object should be marked"); 2866 scan_object(obj); 2867 2868 // And since we're towards the end, let's totally drain the 2869 // local queue and global stack. 2870 drain_local_queue(false); 2871 drain_global_stack(false); 2872 } else { 2873 break; 2874 } 2875 } 2876 } 2877 2878 // We still haven't aborted. Now, let's try to get into the 2879 // termination protocol. 2880 if (do_termination && !has_aborted()) { 2881 // We cannot check whether the global stack is empty, since other 2882 // tasks might be concurrently pushing objects on it. 2883 // Separated the asserts so that we know which one fires. 2884 assert(_cm->out_of_regions(), "only way to reach here"); 2885 assert(_task_queue->size() == 0, "only way to reach here"); 2886 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2887 2888 // The G1CMTask class also extends the TerminatorTerminator class, 2889 // hence its should_exit_termination() method will also decide 2890 // whether to exit the termination protocol or not. 2891 bool finished = (is_serial || 2892 _cm->terminator()->offer_termination(this)); 2893 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2894 _termination_time_ms += 2895 termination_end_time_ms - _termination_start_time_ms; 2896 2897 if (finished) { 2898 // We're all done. 2899 2900 if (_worker_id == 0) { 2901 // let's allow task 0 to do this 2902 if (concurrent()) { 2903 assert(_cm->concurrent_marking_in_progress(), "invariant"); 2904 // we need to set this to false before the next 2905 // safepoint. This way we ensure that the marking phase 2906 // doesn't observe any more heap expansions. 2907 _cm->clear_concurrent_marking_in_progress(); 2908 } 2909 } 2910 2911 // We can now guarantee that the global stack is empty, since 2912 // all other tasks have finished. We separated the guarantees so 2913 // that, if a condition is false, we can immediately find out 2914 // which one. 2915 guarantee(_cm->out_of_regions(), "only way to reach here"); 2916 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2917 guarantee(_task_queue->size() == 0, "only way to reach here"); 2918 guarantee(!_cm->has_overflown(), "only way to reach here"); 2919 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 2920 } else { 2921 // Apparently there's more work to do. Let's abort this task. It 2922 // will restart it and we can hopefully find more things to do. 2923 set_has_aborted(); 2924 } 2925 } 2926 2927 // Mainly for debugging purposes to make sure that a pointer to the 2928 // closure which was statically allocated in this frame doesn't 2929 // escape it by accident. 2930 set_cm_oop_closure(NULL); 2931 double end_time_ms = os::elapsedVTime() * 1000.0; 2932 double elapsed_time_ms = end_time_ms - _start_time_ms; 2933 // Update the step history. 2934 _step_times_ms.add(elapsed_time_ms); 2935 2936 if (has_aborted()) { 2937 // The task was aborted for some reason. 2938 if (_has_timed_out) { 2939 double diff_ms = elapsed_time_ms - _time_target_ms; 2940 // Keep statistics of how well we did with respect to hitting 2941 // our target only if we actually timed out (if we aborted for 2942 // other reasons, then the results might get skewed). 2943 _marking_step_diffs_ms.add(diff_ms); 2944 } 2945 2946 if (_cm->has_overflown()) { 2947 // This is the interesting one. We aborted because a global 2948 // overflow was raised. This means we have to restart the 2949 // marking phase and start iterating over regions. However, in 2950 // order to do this we have to make sure that all tasks stop 2951 // what they are doing and re-initialize in a safe manner. We 2952 // will achieve this with the use of two barrier sync points. 2953 2954 if (!is_serial) { 2955 // We only need to enter the sync barrier if being called 2956 // from a parallel context 2957 _cm->enter_first_sync_barrier(_worker_id); 2958 2959 // When we exit this sync barrier we know that all tasks have 2960 // stopped doing marking work. So, it's now safe to 2961 // re-initialize our data structures. At the end of this method, 2962 // task 0 will clear the global data structures. 2963 } 2964 2965 // We clear the local state of this task... 2966 clear_region_fields(); 2967 2968 if (!is_serial) { 2969 // ...and enter the second barrier. 2970 _cm->enter_second_sync_barrier(_worker_id); 2971 } 2972 // At this point, if we're during the concurrent phase of 2973 // marking, everything has been re-initialized and we're 2974 // ready to restart. 2975 } 2976 } 2977 2978 _claimed = false; 2979 } 2980 2981 G1CMTask::G1CMTask(uint worker_id, 2982 G1ConcurrentMark* cm, 2983 G1CMTaskQueue* task_queue, 2984 G1CMTaskQueueSet* task_queues) 2985 : _g1h(G1CollectedHeap::heap()), 2986 _worker_id(worker_id), _cm(cm), 2987 _claimed(false), 2988 _nextMarkBitMap(NULL), _hash_seed(17), 2989 _task_queue(task_queue), 2990 _task_queues(task_queues), 2991 _cm_oop_closure(NULL) { 2992 guarantee(task_queue != NULL, "invariant"); 2993 guarantee(task_queues != NULL, "invariant"); 2994 2995 _marking_step_diffs_ms.add(0.5); 2996 } 2997 2998 // These are formatting macros that are used below to ensure 2999 // consistent formatting. The *_H_* versions are used to format the 3000 // header for a particular value and they should be kept consistent 3001 // with the corresponding macro. Also note that most of the macros add 3002 // the necessary white space (as a prefix) which makes them a bit 3003 // easier to compose. 3004 3005 // All the output lines are prefixed with this string to be able to 3006 // identify them easily in a large log file. 3007 #define G1PPRL_LINE_PREFIX "###" 3008 3009 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 3010 #ifdef _LP64 3011 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 3012 #else // _LP64 3013 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 3014 #endif // _LP64 3015 3016 // For per-region info 3017 #define G1PPRL_TYPE_FORMAT " %-4s" 3018 #define G1PPRL_TYPE_H_FORMAT " %4s" 3019 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 3020 #define G1PPRL_BYTE_H_FORMAT " %9s" 3021 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 3022 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 3023 3024 // For summary info 3025 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 3026 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 3027 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 3028 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 3029 3030 G1PrintRegionLivenessInfoClosure:: 3031 G1PrintRegionLivenessInfoClosure(const char* phase_name) 3032 : _total_used_bytes(0), _total_capacity_bytes(0), 3033 _total_prev_live_bytes(0), _total_next_live_bytes(0), 3034 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 3035 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 3036 MemRegion g1_reserved = g1h->g1_reserved(); 3037 double now = os::elapsedTime(); 3038 3039 // Print the header of the output. 3040 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 3041 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 3042 G1PPRL_SUM_ADDR_FORMAT("reserved") 3043 G1PPRL_SUM_BYTE_FORMAT("region-size"), 3044 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 3045 HeapRegion::GrainBytes); 3046 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3047 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3048 G1PPRL_TYPE_H_FORMAT 3049 G1PPRL_ADDR_BASE_H_FORMAT 3050 G1PPRL_BYTE_H_FORMAT 3051 G1PPRL_BYTE_H_FORMAT 3052 G1PPRL_BYTE_H_FORMAT 3053 G1PPRL_DOUBLE_H_FORMAT 3054 G1PPRL_BYTE_H_FORMAT 3055 G1PPRL_BYTE_H_FORMAT, 3056 "type", "address-range", 3057 "used", "prev-live", "next-live", "gc-eff", 3058 "remset", "code-roots"); 3059 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3060 G1PPRL_TYPE_H_FORMAT 3061 G1PPRL_ADDR_BASE_H_FORMAT 3062 G1PPRL_BYTE_H_FORMAT 3063 G1PPRL_BYTE_H_FORMAT 3064 G1PPRL_BYTE_H_FORMAT 3065 G1PPRL_DOUBLE_H_FORMAT 3066 G1PPRL_BYTE_H_FORMAT 3067 G1PPRL_BYTE_H_FORMAT, 3068 "", "", 3069 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 3070 "(bytes)", "(bytes)"); 3071 } 3072 3073 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 3074 const char* type = r->get_type_str(); 3075 HeapWord* bottom = r->bottom(); 3076 HeapWord* end = r->end(); 3077 size_t capacity_bytes = r->capacity(); 3078 size_t used_bytes = r->used(); 3079 size_t prev_live_bytes = r->live_bytes(); 3080 size_t next_live_bytes = r->next_live_bytes(); 3081 double gc_eff = r->gc_efficiency(); 3082 size_t remset_bytes = r->rem_set()->mem_size(); 3083 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3084 3085 _total_used_bytes += used_bytes; 3086 _total_capacity_bytes += capacity_bytes; 3087 _total_prev_live_bytes += prev_live_bytes; 3088 _total_next_live_bytes += next_live_bytes; 3089 _total_remset_bytes += remset_bytes; 3090 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3091 3092 // Print a line for this particular region. 3093 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3094 G1PPRL_TYPE_FORMAT 3095 G1PPRL_ADDR_BASE_FORMAT 3096 G1PPRL_BYTE_FORMAT 3097 G1PPRL_BYTE_FORMAT 3098 G1PPRL_BYTE_FORMAT 3099 G1PPRL_DOUBLE_FORMAT 3100 G1PPRL_BYTE_FORMAT 3101 G1PPRL_BYTE_FORMAT, 3102 type, p2i(bottom), p2i(end), 3103 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3104 remset_bytes, strong_code_roots_bytes); 3105 3106 return false; 3107 } 3108 3109 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3110 // add static memory usages to remembered set sizes 3111 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3112 // Print the footer of the output. 3113 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3114 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3115 " SUMMARY" 3116 G1PPRL_SUM_MB_FORMAT("capacity") 3117 G1PPRL_SUM_MB_PERC_FORMAT("used") 3118 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3119 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3120 G1PPRL_SUM_MB_FORMAT("remset") 3121 G1PPRL_SUM_MB_FORMAT("code-roots"), 3122 bytes_to_mb(_total_capacity_bytes), 3123 bytes_to_mb(_total_used_bytes), 3124 perc(_total_used_bytes, _total_capacity_bytes), 3125 bytes_to_mb(_total_prev_live_bytes), 3126 perc(_total_prev_live_bytes, _total_capacity_bytes), 3127 bytes_to_mb(_total_next_live_bytes), 3128 perc(_total_next_live_bytes, _total_capacity_bytes), 3129 bytes_to_mb(_total_remset_bytes), 3130 bytes_to_mb(_total_strong_code_roots_bytes)); 3131 }