1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMarkThread.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorState.hpp" 32 #include "gc/g1/g1ConcurrentMark.inline.hpp" 33 #include "gc/g1/g1HeapVerifier.hpp" 34 #include "gc/g1/g1OopClosures.inline.hpp" 35 #include "gc/g1/g1CardLiveData.inline.hpp" 36 #include "gc/g1/g1Policy.hpp" 37 #include "gc/g1/g1StringDedup.hpp" 38 #include "gc/g1/heapRegion.inline.hpp" 39 #include "gc/g1/heapRegionRemSet.hpp" 40 #include "gc/g1/heapRegionSet.inline.hpp" 41 #include "gc/g1/suspendibleThreadSet.hpp" 42 #include "gc/shared/gcId.hpp" 43 #include "gc/shared/gcTimer.hpp" 44 #include "gc/shared/gcTrace.hpp" 45 #include "gc/shared/gcTraceTime.inline.hpp" 46 #include "gc/shared/genOopClosures.inline.hpp" 47 #include "gc/shared/referencePolicy.hpp" 48 #include "gc/shared/strongRootsScope.hpp" 49 #include "gc/shared/taskqueue.inline.hpp" 50 #include "gc/shared/vmGCOperations.hpp" 51 #include "logging/log.hpp" 52 #include "memory/allocation.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "runtime/atomic.hpp" 56 #include "runtime/handles.inline.hpp" 57 #include "runtime/java.hpp" 58 #include "runtime/prefetch.inline.hpp" 59 #include "services/memTracker.hpp" 60 #include "utilities/growableArray.hpp" 61 62 // Concurrent marking bit map wrapper 63 64 G1CMBitMapRO::G1CMBitMapRO(int shifter) : 65 _bm(), 66 _shifter(shifter) { 67 _bmStartWord = 0; 68 _bmWordSize = 0; 69 } 70 71 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 72 const HeapWord* limit) const { 73 // First we must round addr *up* to a possible object boundary. 74 addr = (HeapWord*)align_size_up((intptr_t)addr, 75 HeapWordSize << _shifter); 76 size_t addrOffset = heapWordToOffset(addr); 77 assert(limit != NULL, "limit must not be NULL"); 78 size_t limitOffset = heapWordToOffset(limit); 79 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 80 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 81 assert(nextAddr >= addr, "get_next_one postcondition"); 82 assert(nextAddr == limit || isMarked(nextAddr), 83 "get_next_one postcondition"); 84 return nextAddr; 85 } 86 87 #ifndef PRODUCT 88 bool G1CMBitMapRO::covers(MemRegion heap_rs) const { 89 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 90 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 91 "size inconsistency"); 92 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 93 _bmWordSize == heap_rs.word_size(); 94 } 95 #endif 96 97 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 98 _bm.print_on_error(st, prefix); 99 } 100 101 size_t G1CMBitMap::compute_size(size_t heap_size) { 102 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 103 } 104 105 size_t G1CMBitMap::mark_distance() { 106 return MinObjAlignmentInBytes * BitsPerByte; 107 } 108 109 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 110 _bmStartWord = heap.start(); 111 _bmWordSize = heap.word_size(); 112 113 _bm = BitMapView((BitMap::bm_word_t*) storage->reserved().start(), _bmWordSize >> _shifter); 114 115 storage->set_mapping_changed_listener(&_listener); 116 } 117 118 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 119 if (zero_filled) { 120 return; 121 } 122 // We need to clear the bitmap on commit, removing any existing information. 123 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 124 _bm->clear_range(mr); 125 } 126 127 void G1CMBitMap::clear_range(MemRegion mr) { 128 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 129 assert(!mr.is_empty(), "unexpected empty region"); 130 // convert address range into offset range 131 _bm.at_put_range(heapWordToOffset(mr.start()), 132 heapWordToOffset(mr.end()), false); 133 } 134 135 G1CMMarkStack::G1CMMarkStack() : 136 _max_chunk_capacity(0), 137 _base(NULL), 138 _chunk_capacity(0), 139 _should_expand(false) { 140 set_empty(); 141 } 142 143 bool G1CMMarkStack::resize(size_t new_capacity) { 144 assert(is_empty(), "Only resize when stack is empty."); 145 assert(new_capacity <= _max_chunk_capacity, 146 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); 147 148 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::allocate_or_null(new_capacity); 149 150 if (new_base == NULL) { 151 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); 152 return false; 153 } 154 // Release old mapping. 155 if (_base != NULL) { 156 MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::free(_base, _chunk_capacity); 157 } 158 159 _base = new_base; 160 _chunk_capacity = new_capacity; 161 set_empty(); 162 _should_expand = false; 163 164 return true; 165 } 166 167 size_t G1CMMarkStack::capacity_alignment() { 168 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); 169 } 170 171 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { 172 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); 173 174 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); 175 176 _max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 177 size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 178 179 guarantee(initial_chunk_capacity <= _max_chunk_capacity, 180 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, 181 _max_chunk_capacity, 182 initial_chunk_capacity); 183 184 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, 185 initial_chunk_capacity, _max_chunk_capacity); 186 187 return resize(initial_chunk_capacity); 188 } 189 190 void G1CMMarkStack::expand() { 191 // Clear expansion flag 192 _should_expand = false; 193 194 if (_chunk_capacity == _max_chunk_capacity) { 195 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); 196 return; 197 } 198 size_t old_capacity = _chunk_capacity; 199 // Double capacity if possible 200 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); 201 202 if (resize(new_capacity)) { 203 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 204 old_capacity, new_capacity); 205 } else { 206 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 207 old_capacity, new_capacity); 208 } 209 } 210 211 G1CMMarkStack::~G1CMMarkStack() { 212 if (_base != NULL) { 213 MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::free(_base, _chunk_capacity); 214 } 215 } 216 217 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { 218 elem->next = *list; 219 *list = elem; 220 } 221 222 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { 223 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 224 add_chunk_to_list(&_chunk_list, elem); 225 _chunks_in_chunk_list++; 226 } 227 228 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { 229 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 230 add_chunk_to_list(&_free_list, elem); 231 } 232 233 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { 234 TaskQueueEntryChunk* result = *list; 235 if (result != NULL) { 236 *list = (*list)->next; 237 } 238 return result; 239 } 240 241 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { 242 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 243 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); 244 if (result != NULL) { 245 _chunks_in_chunk_list--; 246 } 247 return result; 248 } 249 250 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { 251 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 252 return remove_chunk_from_list(&_free_list); 253 } 254 255 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { 256 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. 257 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding 258 // wraparound of _hwm. 259 if (_hwm >= _chunk_capacity) { 260 return NULL; 261 } 262 263 size_t cur_idx = Atomic::add(1, &_hwm) - 1; 264 if (cur_idx >= _chunk_capacity) { 265 return NULL; 266 } 267 268 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; 269 result->next = NULL; 270 return result; 271 } 272 273 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { 274 // Get a new chunk. 275 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); 276 277 if (new_chunk == NULL) { 278 // Did not get a chunk from the free list. Allocate from backing memory. 279 new_chunk = allocate_new_chunk(); 280 281 if (new_chunk == NULL) { 282 return false; 283 } 284 } 285 286 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 287 288 add_chunk_to_chunk_list(new_chunk); 289 290 return true; 291 } 292 293 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { 294 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); 295 296 if (cur == NULL) { 297 return false; 298 } 299 300 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 301 302 add_chunk_to_free_list(cur); 303 return true; 304 } 305 306 void G1CMMarkStack::set_empty() { 307 _chunks_in_chunk_list = 0; 308 _hwm = 0; 309 _chunk_list = NULL; 310 _free_list = NULL; 311 } 312 313 G1CMRootRegions::G1CMRootRegions() : 314 _cm(NULL), _scan_in_progress(false), 315 _should_abort(false), _claimed_survivor_index(0) { } 316 317 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) { 318 _survivors = survivors; 319 _cm = cm; 320 } 321 322 void G1CMRootRegions::prepare_for_scan() { 323 assert(!scan_in_progress(), "pre-condition"); 324 325 // Currently, only survivors can be root regions. 326 _claimed_survivor_index = 0; 327 _scan_in_progress = _survivors->regions()->is_nonempty(); 328 _should_abort = false; 329 } 330 331 HeapRegion* G1CMRootRegions::claim_next() { 332 if (_should_abort) { 333 // If someone has set the should_abort flag, we return NULL to 334 // force the caller to bail out of their loop. 335 return NULL; 336 } 337 338 // Currently, only survivors can be root regions. 339 const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions(); 340 341 int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1; 342 if (claimed_index < survivor_regions->length()) { 343 return survivor_regions->at(claimed_index); 344 } 345 return NULL; 346 } 347 348 uint G1CMRootRegions::num_root_regions() const { 349 return (uint)_survivors->regions()->length(); 350 } 351 352 void G1CMRootRegions::notify_scan_done() { 353 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 354 _scan_in_progress = false; 355 RootRegionScan_lock->notify_all(); 356 } 357 358 void G1CMRootRegions::cancel_scan() { 359 notify_scan_done(); 360 } 361 362 void G1CMRootRegions::scan_finished() { 363 assert(scan_in_progress(), "pre-condition"); 364 365 // Currently, only survivors can be root regions. 366 if (!_should_abort) { 367 assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index); 368 assert((uint)_claimed_survivor_index >= _survivors->length(), 369 "we should have claimed all survivors, claimed index = %u, length = %u", 370 (uint)_claimed_survivor_index, _survivors->length()); 371 } 372 373 notify_scan_done(); 374 } 375 376 bool G1CMRootRegions::wait_until_scan_finished() { 377 if (!scan_in_progress()) return false; 378 379 { 380 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 381 while (scan_in_progress()) { 382 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 383 } 384 } 385 return true; 386 } 387 388 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 389 return MAX2((n_par_threads + 2) / 4, 1U); 390 } 391 392 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 393 _g1h(g1h), 394 _markBitMap1(), 395 _markBitMap2(), 396 _parallel_marking_threads(0), 397 _max_parallel_marking_threads(0), 398 _sleep_factor(0.0), 399 _marking_task_overhead(1.0), 400 _cleanup_list("Cleanup List"), 401 402 _prevMarkBitMap(&_markBitMap1), 403 _nextMarkBitMap(&_markBitMap2), 404 405 _global_mark_stack(), 406 // _finger set in set_non_marking_state 407 408 _max_worker_id(ParallelGCThreads), 409 // _active_tasks set in set_non_marking_state 410 // _tasks set inside the constructor 411 _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)), 412 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 413 414 _has_overflown(false), 415 _concurrent(false), 416 _has_aborted(false), 417 _restart_for_overflow(false), 418 _concurrent_marking_in_progress(false), 419 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 420 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 421 422 // _verbose_level set below 423 424 _init_times(), 425 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 426 _cleanup_times(), 427 _total_counting_time(0.0), 428 _total_rs_scrub_time(0.0), 429 430 _parallel_workers(NULL), 431 432 _completed_initialization(false) { 433 434 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 435 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 436 437 // Create & start a ConcurrentMark thread. 438 _cmThread = new ConcurrentMarkThread(this); 439 assert(cmThread() != NULL, "CM Thread should have been created"); 440 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 441 if (_cmThread->osthread() == NULL) { 442 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 443 } 444 445 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 446 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 447 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 448 449 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 450 satb_qs.set_buffer_size(G1SATBBufferSize); 451 452 _root_regions.init(_g1h->survivor(), this); 453 454 if (ConcGCThreads > ParallelGCThreads) { 455 log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).", 456 ConcGCThreads, ParallelGCThreads); 457 return; 458 } 459 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 460 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 461 // if both are set 462 _sleep_factor = 0.0; 463 _marking_task_overhead = 1.0; 464 } else if (G1MarkingOverheadPercent > 0) { 465 // We will calculate the number of parallel marking threads based 466 // on a target overhead with respect to the soft real-time goal 467 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 468 double overall_cm_overhead = 469 (double) MaxGCPauseMillis * marking_overhead / 470 (double) GCPauseIntervalMillis; 471 double cpu_ratio = 1.0 / os::initial_active_processor_count(); 472 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 473 double marking_task_overhead = 474 overall_cm_overhead / marking_thread_num * os::initial_active_processor_count(); 475 double sleep_factor = 476 (1.0 - marking_task_overhead) / marking_task_overhead; 477 478 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 479 _sleep_factor = sleep_factor; 480 _marking_task_overhead = marking_task_overhead; 481 } else { 482 // Calculate the number of parallel marking threads by scaling 483 // the number of parallel GC threads. 484 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 485 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 486 _sleep_factor = 0.0; 487 _marking_task_overhead = 1.0; 488 } 489 490 assert(ConcGCThreads > 0, "Should have been set"); 491 log_debug(gc)("ConcGCThreads: %u", ConcGCThreads); 492 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 493 _parallel_marking_threads = ConcGCThreads; 494 _max_parallel_marking_threads = _parallel_marking_threads; 495 496 _parallel_workers = new WorkGang("G1 Marker", 497 _max_parallel_marking_threads, false, true); 498 if (_parallel_workers == NULL) { 499 vm_exit_during_initialization("Failed necessary allocation."); 500 } else { 501 _parallel_workers->initialize_workers(); 502 } 503 504 if (FLAG_IS_DEFAULT(MarkStackSize)) { 505 size_t mark_stack_size = 506 MIN2(MarkStackSizeMax, 507 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 508 // Verify that the calculated value for MarkStackSize is in range. 509 // It would be nice to use the private utility routine from Arguments. 510 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 511 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 512 "must be between 1 and " SIZE_FORMAT, 513 mark_stack_size, MarkStackSizeMax); 514 return; 515 } 516 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 517 } else { 518 // Verify MarkStackSize is in range. 519 if (FLAG_IS_CMDLINE(MarkStackSize)) { 520 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 521 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 522 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 523 "must be between 1 and " SIZE_FORMAT, 524 MarkStackSize, MarkStackSizeMax); 525 return; 526 } 527 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 528 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 529 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 530 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 531 MarkStackSize, MarkStackSizeMax); 532 return; 533 } 534 } 535 } 536 } 537 538 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { 539 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 540 } 541 542 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC); 543 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 544 545 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 546 _active_tasks = _max_worker_id; 547 548 for (uint i = 0; i < _max_worker_id; ++i) { 549 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 550 task_queue->initialize(); 551 _task_queues->register_queue(i, task_queue); 552 553 _tasks[i] = new G1CMTask(i, this, task_queue, _task_queues); 554 555 _accum_task_vtime[i] = 0.0; 556 } 557 558 // so that the call below can read a sensible value 559 _heap_start = g1h->reserved_region().start(); 560 set_non_marking_state(); 561 _completed_initialization = true; 562 } 563 564 void G1ConcurrentMark::reset() { 565 // Starting values for these two. This should be called in a STW 566 // phase. 567 MemRegion reserved = _g1h->g1_reserved(); 568 _heap_start = reserved.start(); 569 _heap_end = reserved.end(); 570 571 // Separated the asserts so that we know which one fires. 572 assert(_heap_start != NULL, "heap bounds should look ok"); 573 assert(_heap_end != NULL, "heap bounds should look ok"); 574 assert(_heap_start < _heap_end, "heap bounds should look ok"); 575 576 // Reset all the marking data structures and any necessary flags 577 reset_marking_state(); 578 579 // We do reset all of them, since different phases will use 580 // different number of active threads. So, it's easiest to have all 581 // of them ready. 582 for (uint i = 0; i < _max_worker_id; ++i) { 583 _tasks[i]->reset(_nextMarkBitMap); 584 } 585 586 // we need this to make sure that the flag is on during the evac 587 // pause with initial mark piggy-backed 588 set_concurrent_marking_in_progress(); 589 } 590 591 592 void G1ConcurrentMark::reset_marking_state() { 593 _global_mark_stack.set_should_expand(has_overflown()); 594 _global_mark_stack.set_empty(); 595 clear_has_overflown(); 596 _finger = _heap_start; 597 598 for (uint i = 0; i < _max_worker_id; ++i) { 599 G1CMTaskQueue* queue = _task_queues->queue(i); 600 queue->set_empty(); 601 } 602 } 603 604 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 605 assert(active_tasks <= _max_worker_id, "we should not have more"); 606 607 _active_tasks = active_tasks; 608 // Need to update the three data structures below according to the 609 // number of active threads for this phase. 610 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 611 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 612 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 613 } 614 615 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 616 set_concurrency(active_tasks); 617 618 _concurrent = concurrent; 619 // We propagate this to all tasks, not just the active ones. 620 for (uint i = 0; i < _max_worker_id; ++i) 621 _tasks[i]->set_concurrent(concurrent); 622 623 if (concurrent) { 624 set_concurrent_marking_in_progress(); 625 } else { 626 // We currently assume that the concurrent flag has been set to 627 // false before we start remark. At this point we should also be 628 // in a STW phase. 629 assert(!concurrent_marking_in_progress(), "invariant"); 630 assert(out_of_regions(), 631 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 632 p2i(_finger), p2i(_heap_end)); 633 } 634 } 635 636 void G1ConcurrentMark::set_non_marking_state() { 637 // We set the global marking state to some default values when we're 638 // not doing marking. 639 reset_marking_state(); 640 _active_tasks = 0; 641 clear_concurrent_marking_in_progress(); 642 } 643 644 G1ConcurrentMark::~G1ConcurrentMark() { 645 // The G1ConcurrentMark instance is never freed. 646 ShouldNotReachHere(); 647 } 648 649 class G1ClearBitMapTask : public AbstractGangTask { 650 public: 651 static size_t chunk_size() { return M; } 652 653 private: 654 // Heap region closure used for clearing the given mark bitmap. 655 class G1ClearBitmapHRClosure : public HeapRegionClosure { 656 private: 657 G1CMBitMap* _bitmap; 658 G1ConcurrentMark* _cm; 659 public: 660 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) { 661 } 662 663 virtual bool doHeapRegion(HeapRegion* r) { 664 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 665 666 HeapWord* cur = r->bottom(); 667 HeapWord* const end = r->end(); 668 669 while (cur < end) { 670 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 671 _bitmap->clear_range(mr); 672 673 cur += chunk_size_in_words; 674 675 // Abort iteration if after yielding the marking has been aborted. 676 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 677 return true; 678 } 679 // Repeat the asserts from before the start of the closure. We will do them 680 // as asserts here to minimize their overhead on the product. However, we 681 // will have them as guarantees at the beginning / end of the bitmap 682 // clearing to get some checking in the product. 683 assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant"); 684 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 685 } 686 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 687 688 return false; 689 } 690 }; 691 692 G1ClearBitmapHRClosure _cl; 693 HeapRegionClaimer _hr_claimer; 694 bool _suspendible; // If the task is suspendible, workers must join the STS. 695 696 public: 697 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 698 AbstractGangTask("G1 Clear Bitmap"), 699 _cl(bitmap, suspendible ? cm : NULL), 700 _hr_claimer(n_workers), 701 _suspendible(suspendible) 702 { } 703 704 void work(uint worker_id) { 705 SuspendibleThreadSetJoiner sts_join(_suspendible); 706 G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true); 707 } 708 709 bool is_complete() { 710 return _cl.complete(); 711 } 712 }; 713 714 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 715 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 716 717 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 718 size_t const num_chunks = align_size_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 719 720 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 721 722 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 723 724 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 725 workers->run_task(&cl, num_workers); 726 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 727 } 728 729 void G1ConcurrentMark::cleanup_for_next_mark() { 730 // Make sure that the concurrent mark thread looks to still be in 731 // the current cycle. 732 guarantee(cmThread()->during_cycle(), "invariant"); 733 734 // We are finishing up the current cycle by clearing the next 735 // marking bitmap and getting it ready for the next cycle. During 736 // this time no other cycle can start. So, let's make sure that this 737 // is the case. 738 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 739 740 clear_bitmap(_nextMarkBitMap, _parallel_workers, true); 741 742 // Clear the live count data. If the marking has been aborted, the abort() 743 // call already did that. 744 if (!has_aborted()) { 745 clear_live_data(_parallel_workers); 746 DEBUG_ONLY(verify_live_data_clear()); 747 } 748 749 // Repeat the asserts from above. 750 guarantee(cmThread()->during_cycle(), "invariant"); 751 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 752 } 753 754 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 755 assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint."); 756 clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false); 757 } 758 759 class CheckBitmapClearHRClosure : public HeapRegionClosure { 760 G1CMBitMap* _bitmap; 761 bool _error; 762 public: 763 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { 764 } 765 766 virtual bool doHeapRegion(HeapRegion* r) { 767 // This closure can be called concurrently to the mutator, so we must make sure 768 // that the result of the getNextMarkedWordAddress() call is compared to the 769 // value passed to it as limit to detect any found bits. 770 // end never changes in G1. 771 HeapWord* end = r->end(); 772 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 773 } 774 }; 775 776 bool G1ConcurrentMark::nextMarkBitmapIsClear() { 777 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 778 _g1h->heap_region_iterate(&cl); 779 return cl.complete(); 780 } 781 782 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 783 public: 784 bool doHeapRegion(HeapRegion* r) { 785 r->note_start_of_marking(); 786 return false; 787 } 788 }; 789 790 void G1ConcurrentMark::checkpointRootsInitialPre() { 791 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 792 G1Policy* g1p = g1h->g1_policy(); 793 794 _has_aborted = false; 795 796 // Initialize marking structures. This has to be done in a STW phase. 797 reset(); 798 799 // For each region note start of marking. 800 NoteStartOfMarkHRClosure startcl; 801 g1h->heap_region_iterate(&startcl); 802 } 803 804 805 void G1ConcurrentMark::checkpointRootsInitialPost() { 806 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 807 808 // Start Concurrent Marking weak-reference discovery. 809 ReferenceProcessor* rp = g1h->ref_processor_cm(); 810 // enable ("weak") refs discovery 811 rp->enable_discovery(); 812 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 813 814 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 815 // This is the start of the marking cycle, we're expected all 816 // threads to have SATB queues with active set to false. 817 satb_mq_set.set_active_all_threads(true, /* new active value */ 818 false /* expected_active */); 819 820 _root_regions.prepare_for_scan(); 821 822 // update_g1_committed() will be called at the end of an evac pause 823 // when marking is on. So, it's also called at the end of the 824 // initial-mark pause to update the heap end, if the heap expands 825 // during it. No need to call it here. 826 } 827 828 /* 829 * Notice that in the next two methods, we actually leave the STS 830 * during the barrier sync and join it immediately afterwards. If we 831 * do not do this, the following deadlock can occur: one thread could 832 * be in the barrier sync code, waiting for the other thread to also 833 * sync up, whereas another one could be trying to yield, while also 834 * waiting for the other threads to sync up too. 835 * 836 * Note, however, that this code is also used during remark and in 837 * this case we should not attempt to leave / enter the STS, otherwise 838 * we'll either hit an assert (debug / fastdebug) or deadlock 839 * (product). So we should only leave / enter the STS if we are 840 * operating concurrently. 841 * 842 * Because the thread that does the sync barrier has left the STS, it 843 * is possible to be suspended for a Full GC or an evacuation pause 844 * could occur. This is actually safe, since the entering the sync 845 * barrier is one of the last things do_marking_step() does, and it 846 * doesn't manipulate any data structures afterwards. 847 */ 848 849 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 850 bool barrier_aborted; 851 { 852 SuspendibleThreadSetLeaver sts_leave(concurrent()); 853 barrier_aborted = !_first_overflow_barrier_sync.enter(); 854 } 855 856 // at this point everyone should have synced up and not be doing any 857 // more work 858 859 if (barrier_aborted) { 860 // If the barrier aborted we ignore the overflow condition and 861 // just abort the whole marking phase as quickly as possible. 862 return; 863 } 864 865 // If we're executing the concurrent phase of marking, reset the marking 866 // state; otherwise the marking state is reset after reference processing, 867 // during the remark pause. 868 // If we reset here as a result of an overflow during the remark we will 869 // see assertion failures from any subsequent set_concurrency_and_phase() 870 // calls. 871 if (concurrent()) { 872 // let the task associated with with worker 0 do this 873 if (worker_id == 0) { 874 // task 0 is responsible for clearing the global data structures 875 // We should be here because of an overflow. During STW we should 876 // not clear the overflow flag since we rely on it being true when 877 // we exit this method to abort the pause and restart concurrent 878 // marking. 879 reset_marking_state(); 880 881 log_info(gc, marking)("Concurrent Mark reset for overflow"); 882 } 883 } 884 885 // after this, each task should reset its own data structures then 886 // then go into the second barrier 887 } 888 889 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 890 SuspendibleThreadSetLeaver sts_leave(concurrent()); 891 _second_overflow_barrier_sync.enter(); 892 893 // at this point everything should be re-initialized and ready to go 894 } 895 896 class G1CMConcurrentMarkingTask: public AbstractGangTask { 897 private: 898 G1ConcurrentMark* _cm; 899 ConcurrentMarkThread* _cmt; 900 901 public: 902 void work(uint worker_id) { 903 assert(Thread::current()->is_ConcurrentGC_thread(), 904 "this should only be done by a conc GC thread"); 905 ResourceMark rm; 906 907 double start_vtime = os::elapsedVTime(); 908 909 { 910 SuspendibleThreadSetJoiner sts_join; 911 912 assert(worker_id < _cm->active_tasks(), "invariant"); 913 G1CMTask* the_task = _cm->task(worker_id); 914 the_task->record_start_time(); 915 if (!_cm->has_aborted()) { 916 do { 917 double start_vtime_sec = os::elapsedVTime(); 918 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 919 920 the_task->do_marking_step(mark_step_duration_ms, 921 true /* do_termination */, 922 false /* is_serial*/); 923 924 double end_vtime_sec = os::elapsedVTime(); 925 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 926 _cm->do_yield_check(); 927 928 jlong sleep_time_ms; 929 if (!_cm->has_aborted() && the_task->has_aborted()) { 930 sleep_time_ms = 931 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 932 { 933 SuspendibleThreadSetLeaver sts_leave; 934 os::sleep(Thread::current(), sleep_time_ms, false); 935 } 936 } 937 } while (!_cm->has_aborted() && the_task->has_aborted()); 938 } 939 the_task->record_end_time(); 940 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 941 } 942 943 double end_vtime = os::elapsedVTime(); 944 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 945 } 946 947 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm, 948 ConcurrentMarkThread* cmt) : 949 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 950 951 ~G1CMConcurrentMarkingTask() { } 952 }; 953 954 // Calculates the number of active workers for a concurrent 955 // phase. 956 uint G1ConcurrentMark::calc_parallel_marking_threads() { 957 uint n_conc_workers = 0; 958 if (!UseDynamicNumberOfGCThreads || 959 (!FLAG_IS_DEFAULT(ConcGCThreads) && 960 !ForceDynamicNumberOfGCThreads)) { 961 n_conc_workers = max_parallel_marking_threads(); 962 } else { 963 n_conc_workers = 964 AdaptiveSizePolicy::calc_default_active_workers(max_parallel_marking_threads(), 965 1, /* Minimum workers */ 966 parallel_marking_threads(), 967 Threads::number_of_non_daemon_threads()); 968 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 969 // that scaling has already gone into "_max_parallel_marking_threads". 970 } 971 assert(n_conc_workers > 0 && n_conc_workers <= max_parallel_marking_threads(), 972 "Calculated number of workers must be larger than zero and at most the maximum %u, but is %u", 973 max_parallel_marking_threads(), n_conc_workers); 974 return n_conc_workers; 975 } 976 977 void G1ConcurrentMark::scanRootRegion(HeapRegion* hr) { 978 // Currently, only survivors can be root regions. 979 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 980 G1RootRegionScanClosure cl(_g1h, this); 981 982 const uintx interval = PrefetchScanIntervalInBytes; 983 HeapWord* curr = hr->bottom(); 984 const HeapWord* end = hr->top(); 985 while (curr < end) { 986 Prefetch::read(curr, interval); 987 oop obj = oop(curr); 988 int size = obj->oop_iterate_size(&cl); 989 assert(size == obj->size(), "sanity"); 990 curr += size; 991 } 992 } 993 994 class G1CMRootRegionScanTask : public AbstractGangTask { 995 private: 996 G1ConcurrentMark* _cm; 997 998 public: 999 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 1000 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 1001 1002 void work(uint worker_id) { 1003 assert(Thread::current()->is_ConcurrentGC_thread(), 1004 "this should only be done by a conc GC thread"); 1005 1006 G1CMRootRegions* root_regions = _cm->root_regions(); 1007 HeapRegion* hr = root_regions->claim_next(); 1008 while (hr != NULL) { 1009 _cm->scanRootRegion(hr); 1010 hr = root_regions->claim_next(); 1011 } 1012 } 1013 }; 1014 1015 void G1ConcurrentMark::scan_root_regions() { 1016 // scan_in_progress() will have been set to true only if there was 1017 // at least one root region to scan. So, if it's false, we 1018 // should not attempt to do any further work. 1019 if (root_regions()->scan_in_progress()) { 1020 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 1021 1022 _parallel_marking_threads = MIN2(calc_parallel_marking_threads(), 1023 // We distribute work on a per-region basis, so starting 1024 // more threads than that is useless. 1025 root_regions()->num_root_regions()); 1026 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1027 "Maximum number of marking threads exceeded"); 1028 1029 G1CMRootRegionScanTask task(this); 1030 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 1031 task.name(), _parallel_marking_threads, root_regions()->num_root_regions()); 1032 _parallel_workers->run_task(&task, _parallel_marking_threads); 1033 1034 // It's possible that has_aborted() is true here without actually 1035 // aborting the survivor scan earlier. This is OK as it's 1036 // mainly used for sanity checking. 1037 root_regions()->scan_finished(); 1038 } 1039 } 1040 1041 void G1ConcurrentMark::concurrent_cycle_start() { 1042 _gc_timer_cm->register_gc_start(); 1043 1044 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 1045 1046 _g1h->trace_heap_before_gc(_gc_tracer_cm); 1047 } 1048 1049 void G1ConcurrentMark::concurrent_cycle_end() { 1050 _g1h->trace_heap_after_gc(_gc_tracer_cm); 1051 1052 if (has_aborted()) { 1053 _gc_tracer_cm->report_concurrent_mode_failure(); 1054 } 1055 1056 _gc_timer_cm->register_gc_end(); 1057 1058 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 1059 } 1060 1061 void G1ConcurrentMark::mark_from_roots() { 1062 // we might be tempted to assert that: 1063 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1064 // "inconsistent argument?"); 1065 // However that wouldn't be right, because it's possible that 1066 // a safepoint is indeed in progress as a younger generation 1067 // stop-the-world GC happens even as we mark in this generation. 1068 1069 _restart_for_overflow = false; 1070 1071 // _g1h has _n_par_threads 1072 _parallel_marking_threads = calc_parallel_marking_threads(); 1073 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1074 "Maximum number of marking threads exceeded"); 1075 1076 uint active_workers = MAX2(1U, parallel_marking_threads()); 1077 assert(active_workers > 0, "Should have been set"); 1078 1079 // Setting active workers is not guaranteed since fewer 1080 // worker threads may currently exist and more may not be 1081 // available. 1082 active_workers = _parallel_workers->update_active_workers(active_workers); 1083 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _parallel_workers->total_workers()); 1084 1085 // Parallel task terminator is set in "set_concurrency_and_phase()" 1086 set_concurrency_and_phase(active_workers, true /* concurrent */); 1087 1088 G1CMConcurrentMarkingTask markingTask(this, cmThread()); 1089 _parallel_workers->run_task(&markingTask); 1090 print_stats(); 1091 } 1092 1093 void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1094 // world is stopped at this checkpoint 1095 assert(SafepointSynchronize::is_at_safepoint(), 1096 "world should be stopped"); 1097 1098 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1099 1100 // If a full collection has happened, we shouldn't do this. 1101 if (has_aborted()) { 1102 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1103 return; 1104 } 1105 1106 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1107 1108 if (VerifyDuringGC) { 1109 HandleMark hm; // handle scope 1110 g1h->prepare_for_verify(); 1111 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1112 } 1113 g1h->verifier()->check_bitmaps("Remark Start"); 1114 1115 G1Policy* g1p = g1h->g1_policy(); 1116 g1p->record_concurrent_mark_remark_start(); 1117 1118 double start = os::elapsedTime(); 1119 1120 checkpointRootsFinalWork(); 1121 1122 double mark_work_end = os::elapsedTime(); 1123 1124 weakRefsWork(clear_all_soft_refs); 1125 1126 if (has_overflown()) { 1127 // We overflowed. Restart concurrent marking. 1128 _restart_for_overflow = true; 1129 1130 // Verify the heap w.r.t. the previous marking bitmap. 1131 if (VerifyDuringGC) { 1132 HandleMark hm; // handle scope 1133 g1h->prepare_for_verify(); 1134 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)"); 1135 } 1136 1137 // Clear the marking state because we will be restarting 1138 // marking due to overflowing the global mark stack. 1139 reset_marking_state(); 1140 } else { 1141 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1142 // We're done with marking. 1143 // This is the end of the marking cycle, we're expected all 1144 // threads to have SATB queues with active set to true. 1145 satb_mq_set.set_active_all_threads(false, /* new active value */ 1146 true /* expected_active */); 1147 1148 if (VerifyDuringGC) { 1149 HandleMark hm; // handle scope 1150 g1h->prepare_for_verify(); 1151 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)"); 1152 } 1153 g1h->verifier()->check_bitmaps("Remark End"); 1154 assert(!restart_for_overflow(), "sanity"); 1155 // Completely reset the marking state since marking completed 1156 set_non_marking_state(); 1157 } 1158 1159 // Expand the marking stack, if we have to and if we can. 1160 if (_global_mark_stack.should_expand()) { 1161 _global_mark_stack.expand(); 1162 } 1163 1164 // Statistics 1165 double now = os::elapsedTime(); 1166 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1167 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1168 _remark_times.add((now - start) * 1000.0); 1169 1170 g1p->record_concurrent_mark_remark_end(); 1171 1172 G1CMIsAliveClosure is_alive(g1h); 1173 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1174 } 1175 1176 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1177 G1CollectedHeap* _g1; 1178 size_t _freed_bytes; 1179 FreeRegionList* _local_cleanup_list; 1180 uint _old_regions_removed; 1181 uint _humongous_regions_removed; 1182 HRRSCleanupTask* _hrrs_cleanup_task; 1183 1184 public: 1185 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1186 FreeRegionList* local_cleanup_list, 1187 HRRSCleanupTask* hrrs_cleanup_task) : 1188 _g1(g1), 1189 _freed_bytes(0), 1190 _local_cleanup_list(local_cleanup_list), 1191 _old_regions_removed(0), 1192 _humongous_regions_removed(0), 1193 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1194 1195 size_t freed_bytes() { return _freed_bytes; } 1196 const uint old_regions_removed() { return _old_regions_removed; } 1197 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1198 1199 bool doHeapRegion(HeapRegion *hr) { 1200 if (hr->is_archive()) { 1201 return false; 1202 } 1203 _g1->reset_gc_time_stamps(hr); 1204 hr->note_end_of_marking(); 1205 1206 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1207 _freed_bytes += hr->used(); 1208 hr->set_containing_set(NULL); 1209 if (hr->is_humongous()) { 1210 _humongous_regions_removed++; 1211 _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */); 1212 } else { 1213 _old_regions_removed++; 1214 _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */); 1215 } 1216 } else { 1217 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1218 } 1219 1220 return false; 1221 } 1222 }; 1223 1224 class G1ParNoteEndTask: public AbstractGangTask { 1225 friend class G1NoteEndOfConcMarkClosure; 1226 1227 protected: 1228 G1CollectedHeap* _g1h; 1229 FreeRegionList* _cleanup_list; 1230 HeapRegionClaimer _hrclaimer; 1231 1232 public: 1233 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1234 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1235 } 1236 1237 void work(uint worker_id) { 1238 FreeRegionList local_cleanup_list("Local Cleanup List"); 1239 HRRSCleanupTask hrrs_cleanup_task; 1240 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1241 &hrrs_cleanup_task); 1242 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1243 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1244 1245 // Now update the lists 1246 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1247 { 1248 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1249 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1250 1251 // If we iterate over the global cleanup list at the end of 1252 // cleanup to do this printing we will not guarantee to only 1253 // generate output for the newly-reclaimed regions (the list 1254 // might not be empty at the beginning of cleanup; we might 1255 // still be working on its previous contents). So we do the 1256 // printing here, before we append the new regions to the global 1257 // cleanup list. 1258 1259 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1260 if (hr_printer->is_active()) { 1261 FreeRegionListIterator iter(&local_cleanup_list); 1262 while (iter.more_available()) { 1263 HeapRegion* hr = iter.get_next(); 1264 hr_printer->cleanup(hr); 1265 } 1266 } 1267 1268 _cleanup_list->add_ordered(&local_cleanup_list); 1269 assert(local_cleanup_list.is_empty(), "post-condition"); 1270 1271 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1272 } 1273 } 1274 }; 1275 1276 void G1ConcurrentMark::cleanup() { 1277 // world is stopped at this checkpoint 1278 assert(SafepointSynchronize::is_at_safepoint(), 1279 "world should be stopped"); 1280 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1281 1282 // If a full collection has happened, we shouldn't do this. 1283 if (has_aborted()) { 1284 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1285 return; 1286 } 1287 1288 g1h->verifier()->verify_region_sets_optional(); 1289 1290 if (VerifyDuringGC) { 1291 HandleMark hm; // handle scope 1292 g1h->prepare_for_verify(); 1293 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1294 } 1295 g1h->verifier()->check_bitmaps("Cleanup Start"); 1296 1297 G1Policy* g1p = g1h->g1_policy(); 1298 g1p->record_concurrent_mark_cleanup_start(); 1299 1300 double start = os::elapsedTime(); 1301 1302 HeapRegionRemSet::reset_for_cleanup_tasks(); 1303 1304 { 1305 GCTraceTime(Debug, gc)("Finalize Live Data"); 1306 finalize_live_data(); 1307 } 1308 1309 if (VerifyDuringGC) { 1310 GCTraceTime(Debug, gc)("Verify Live Data"); 1311 verify_live_data(); 1312 } 1313 1314 g1h->collector_state()->set_mark_in_progress(false); 1315 1316 double count_end = os::elapsedTime(); 1317 double this_final_counting_time = (count_end - start); 1318 _total_counting_time += this_final_counting_time; 1319 1320 if (log_is_enabled(Trace, gc, liveness)) { 1321 G1PrintRegionLivenessInfoClosure cl("Post-Marking"); 1322 _g1h->heap_region_iterate(&cl); 1323 } 1324 1325 // Install newly created mark bitMap as "prev". 1326 swapMarkBitMaps(); 1327 1328 g1h->reset_gc_time_stamp(); 1329 1330 uint n_workers = _g1h->workers()->active_workers(); 1331 1332 // Note end of marking in all heap regions. 1333 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1334 g1h->workers()->run_task(&g1_par_note_end_task); 1335 g1h->check_gc_time_stamps(); 1336 1337 if (!cleanup_list_is_empty()) { 1338 // The cleanup list is not empty, so we'll have to process it 1339 // concurrently. Notify anyone else that might be wanting free 1340 // regions that there will be more free regions coming soon. 1341 g1h->set_free_regions_coming(); 1342 } 1343 1344 // call below, since it affects the metric by which we sort the heap 1345 // regions. 1346 if (G1ScrubRemSets) { 1347 double rs_scrub_start = os::elapsedTime(); 1348 g1h->scrub_rem_set(); 1349 _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start); 1350 } 1351 1352 // this will also free any regions totally full of garbage objects, 1353 // and sort the regions. 1354 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1355 1356 // Statistics. 1357 double end = os::elapsedTime(); 1358 _cleanup_times.add((end - start) * 1000.0); 1359 1360 // Clean up will have freed any regions completely full of garbage. 1361 // Update the soft reference policy with the new heap occupancy. 1362 Universe::update_heap_info_at_gc(); 1363 1364 if (VerifyDuringGC) { 1365 HandleMark hm; // handle scope 1366 g1h->prepare_for_verify(); 1367 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)"); 1368 } 1369 1370 g1h->verifier()->check_bitmaps("Cleanup End"); 1371 1372 g1h->verifier()->verify_region_sets_optional(); 1373 1374 // We need to make this be a "collection" so any collection pause that 1375 // races with it goes around and waits for completeCleanup to finish. 1376 g1h->increment_total_collections(); 1377 1378 // Clean out dead classes and update Metaspace sizes. 1379 if (ClassUnloadingWithConcurrentMark) { 1380 ClassLoaderDataGraph::purge(); 1381 } 1382 MetaspaceGC::compute_new_size(); 1383 1384 // We reclaimed old regions so we should calculate the sizes to make 1385 // sure we update the old gen/space data. 1386 g1h->g1mm()->update_sizes(); 1387 g1h->allocation_context_stats().update_after_mark(); 1388 } 1389 1390 void G1ConcurrentMark::complete_cleanup() { 1391 if (has_aborted()) return; 1392 1393 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1394 1395 _cleanup_list.verify_optional(); 1396 FreeRegionList tmp_free_list("Tmp Free List"); 1397 1398 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1399 "cleanup list has %u entries", 1400 _cleanup_list.length()); 1401 1402 // No one else should be accessing the _cleanup_list at this point, 1403 // so it is not necessary to take any locks 1404 while (!_cleanup_list.is_empty()) { 1405 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 1406 assert(hr != NULL, "Got NULL from a non-empty list"); 1407 hr->par_clear(); 1408 tmp_free_list.add_ordered(hr); 1409 1410 // Instead of adding one region at a time to the secondary_free_list, 1411 // we accumulate them in the local list and move them a few at a 1412 // time. This also cuts down on the number of notify_all() calls 1413 // we do during this process. We'll also append the local list when 1414 // _cleanup_list is empty (which means we just removed the last 1415 // region from the _cleanup_list). 1416 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1417 _cleanup_list.is_empty()) { 1418 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1419 "appending %u entries to the secondary_free_list, " 1420 "cleanup list still has %u entries", 1421 tmp_free_list.length(), 1422 _cleanup_list.length()); 1423 1424 { 1425 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1426 g1h->secondary_free_list_add(&tmp_free_list); 1427 SecondaryFreeList_lock->notify_all(); 1428 } 1429 #ifndef PRODUCT 1430 if (G1StressConcRegionFreeing) { 1431 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1432 os::sleep(Thread::current(), (jlong) 1, false); 1433 } 1434 } 1435 #endif 1436 } 1437 } 1438 assert(tmp_free_list.is_empty(), "post-condition"); 1439 } 1440 1441 // Supporting Object and Oop closures for reference discovery 1442 // and processing in during marking 1443 1444 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1445 HeapWord* addr = (HeapWord*)obj; 1446 return addr != NULL && 1447 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1448 } 1449 1450 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1451 // Uses the G1CMTask associated with a worker thread (for serial reference 1452 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1453 // trace referent objects. 1454 // 1455 // Using the G1CMTask and embedded local queues avoids having the worker 1456 // threads operating on the global mark stack. This reduces the risk 1457 // of overflowing the stack - which we would rather avoid at this late 1458 // state. Also using the tasks' local queues removes the potential 1459 // of the workers interfering with each other that could occur if 1460 // operating on the global stack. 1461 1462 class G1CMKeepAliveAndDrainClosure: public OopClosure { 1463 G1ConcurrentMark* _cm; 1464 G1CMTask* _task; 1465 int _ref_counter_limit; 1466 int _ref_counter; 1467 bool _is_serial; 1468 public: 1469 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1470 _cm(cm), _task(task), _is_serial(is_serial), 1471 _ref_counter_limit(G1RefProcDrainInterval) { 1472 assert(_ref_counter_limit > 0, "sanity"); 1473 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1474 _ref_counter = _ref_counter_limit; 1475 } 1476 1477 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1478 virtual void do_oop( oop* p) { do_oop_work(p); } 1479 1480 template <class T> void do_oop_work(T* p) { 1481 if (!_cm->has_overflown()) { 1482 oop obj = oopDesc::load_decode_heap_oop(p); 1483 _task->deal_with_reference(obj); 1484 _ref_counter--; 1485 1486 if (_ref_counter == 0) { 1487 // We have dealt with _ref_counter_limit references, pushing them 1488 // and objects reachable from them on to the local stack (and 1489 // possibly the global stack). Call G1CMTask::do_marking_step() to 1490 // process these entries. 1491 // 1492 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1493 // there's nothing more to do (i.e. we're done with the entries that 1494 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1495 // above) or we overflow. 1496 // 1497 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1498 // flag while there may still be some work to do. (See the comment at 1499 // the beginning of G1CMTask::do_marking_step() for those conditions - 1500 // one of which is reaching the specified time target.) It is only 1501 // when G1CMTask::do_marking_step() returns without setting the 1502 // has_aborted() flag that the marking step has completed. 1503 do { 1504 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1505 _task->do_marking_step(mark_step_duration_ms, 1506 false /* do_termination */, 1507 _is_serial); 1508 } while (_task->has_aborted() && !_cm->has_overflown()); 1509 _ref_counter = _ref_counter_limit; 1510 } 1511 } 1512 } 1513 }; 1514 1515 // 'Drain' oop closure used by both serial and parallel reference processing. 1516 // Uses the G1CMTask associated with a given worker thread (for serial 1517 // reference processing the G1CMtask for worker 0 is used). Calls the 1518 // do_marking_step routine, with an unbelievably large timeout value, 1519 // to drain the marking data structures of the remaining entries 1520 // added by the 'keep alive' oop closure above. 1521 1522 class G1CMDrainMarkingStackClosure: public VoidClosure { 1523 G1ConcurrentMark* _cm; 1524 G1CMTask* _task; 1525 bool _is_serial; 1526 public: 1527 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1528 _cm(cm), _task(task), _is_serial(is_serial) { 1529 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1530 } 1531 1532 void do_void() { 1533 do { 1534 // We call G1CMTask::do_marking_step() to completely drain the local 1535 // and global marking stacks of entries pushed by the 'keep alive' 1536 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1537 // 1538 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1539 // if there's nothing more to do (i.e. we've completely drained the 1540 // entries that were pushed as a a result of applying the 'keep alive' 1541 // closure to the entries on the discovered ref lists) or we overflow 1542 // the global marking stack. 1543 // 1544 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1545 // flag while there may still be some work to do. (See the comment at 1546 // the beginning of G1CMTask::do_marking_step() for those conditions - 1547 // one of which is reaching the specified time target.) It is only 1548 // when G1CMTask::do_marking_step() returns without setting the 1549 // has_aborted() flag that the marking step has completed. 1550 1551 _task->do_marking_step(1000000000.0 /* something very large */, 1552 true /* do_termination */, 1553 _is_serial); 1554 } while (_task->has_aborted() && !_cm->has_overflown()); 1555 } 1556 }; 1557 1558 // Implementation of AbstractRefProcTaskExecutor for parallel 1559 // reference processing at the end of G1 concurrent marking 1560 1561 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 1562 private: 1563 G1CollectedHeap* _g1h; 1564 G1ConcurrentMark* _cm; 1565 WorkGang* _workers; 1566 uint _active_workers; 1567 1568 public: 1569 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1570 G1ConcurrentMark* cm, 1571 WorkGang* workers, 1572 uint n_workers) : 1573 _g1h(g1h), _cm(cm), 1574 _workers(workers), _active_workers(n_workers) { } 1575 1576 // Executes the given task using concurrent marking worker threads. 1577 virtual void execute(ProcessTask& task); 1578 virtual void execute(EnqueueTask& task); 1579 }; 1580 1581 class G1CMRefProcTaskProxy: public AbstractGangTask { 1582 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1583 ProcessTask& _proc_task; 1584 G1CollectedHeap* _g1h; 1585 G1ConcurrentMark* _cm; 1586 1587 public: 1588 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1589 G1CollectedHeap* g1h, 1590 G1ConcurrentMark* cm) : 1591 AbstractGangTask("Process reference objects in parallel"), 1592 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1593 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1594 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1595 } 1596 1597 virtual void work(uint worker_id) { 1598 ResourceMark rm; 1599 HandleMark hm; 1600 G1CMTask* task = _cm->task(worker_id); 1601 G1CMIsAliveClosure g1_is_alive(_g1h); 1602 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1603 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1604 1605 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1606 } 1607 }; 1608 1609 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1610 assert(_workers != NULL, "Need parallel worker threads."); 1611 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1612 1613 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1614 1615 // We need to reset the concurrency level before each 1616 // proxy task execution, so that the termination protocol 1617 // and overflow handling in G1CMTask::do_marking_step() knows 1618 // how many workers to wait for. 1619 _cm->set_concurrency(_active_workers); 1620 _workers->run_task(&proc_task_proxy); 1621 } 1622 1623 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 1624 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1625 EnqueueTask& _enq_task; 1626 1627 public: 1628 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1629 AbstractGangTask("Enqueue reference objects in parallel"), 1630 _enq_task(enq_task) { } 1631 1632 virtual void work(uint worker_id) { 1633 _enq_task.work(worker_id); 1634 } 1635 }; 1636 1637 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 1638 assert(_workers != NULL, "Need parallel worker threads."); 1639 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1640 1641 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 1642 1643 // Not strictly necessary but... 1644 // 1645 // We need to reset the concurrency level before each 1646 // proxy task execution, so that the termination protocol 1647 // and overflow handling in G1CMTask::do_marking_step() knows 1648 // how many workers to wait for. 1649 _cm->set_concurrency(_active_workers); 1650 _workers->run_task(&enq_task_proxy); 1651 } 1652 1653 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 1654 if (has_overflown()) { 1655 // Skip processing the discovered references if we have 1656 // overflown the global marking stack. Reference objects 1657 // only get discovered once so it is OK to not 1658 // de-populate the discovered reference lists. We could have, 1659 // but the only benefit would be that, when marking restarts, 1660 // less reference objects are discovered. 1661 return; 1662 } 1663 1664 ResourceMark rm; 1665 HandleMark hm; 1666 1667 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1668 1669 // Is alive closure. 1670 G1CMIsAliveClosure g1_is_alive(g1h); 1671 1672 // Inner scope to exclude the cleaning of the string and symbol 1673 // tables from the displayed time. 1674 { 1675 GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm); 1676 1677 ReferenceProcessor* rp = g1h->ref_processor_cm(); 1678 1679 // See the comment in G1CollectedHeap::ref_processing_init() 1680 // about how reference processing currently works in G1. 1681 1682 // Set the soft reference policy 1683 rp->setup_policy(clear_all_soft_refs); 1684 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1685 1686 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1687 // in serial reference processing. Note these closures are also 1688 // used for serially processing (by the the current thread) the 1689 // JNI references during parallel reference processing. 1690 // 1691 // These closures do not need to synchronize with the worker 1692 // threads involved in parallel reference processing as these 1693 // instances are executed serially by the current thread (e.g. 1694 // reference processing is not multi-threaded and is thus 1695 // performed by the current thread instead of a gang worker). 1696 // 1697 // The gang tasks involved in parallel reference processing create 1698 // their own instances of these closures, which do their own 1699 // synchronization among themselves. 1700 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1701 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1702 1703 // We need at least one active thread. If reference processing 1704 // is not multi-threaded we use the current (VMThread) thread, 1705 // otherwise we use the work gang from the G1CollectedHeap and 1706 // we utilize all the worker threads we can. 1707 bool processing_is_mt = rp->processing_is_mt(); 1708 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 1709 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 1710 1711 // Parallel processing task executor. 1712 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 1713 g1h->workers(), active_workers); 1714 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1715 1716 // Set the concurrency level. The phase was already set prior to 1717 // executing the remark task. 1718 set_concurrency(active_workers); 1719 1720 // Set the degree of MT processing here. If the discovery was done MT, 1721 // the number of threads involved during discovery could differ from 1722 // the number of active workers. This is OK as long as the discovered 1723 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1724 rp->set_active_mt_degree(active_workers); 1725 1726 // Process the weak references. 1727 const ReferenceProcessorStats& stats = 1728 rp->process_discovered_references(&g1_is_alive, 1729 &g1_keep_alive, 1730 &g1_drain_mark_stack, 1731 executor, 1732 _gc_timer_cm); 1733 _gc_tracer_cm->report_gc_reference_stats(stats); 1734 1735 // The do_oop work routines of the keep_alive and drain_marking_stack 1736 // oop closures will set the has_overflown flag if we overflow the 1737 // global marking stack. 1738 1739 assert(has_overflown() || _global_mark_stack.is_empty(), 1740 "Mark stack should be empty (unless it has overflown)"); 1741 1742 assert(rp->num_q() == active_workers, "why not"); 1743 1744 rp->enqueue_discovered_references(executor); 1745 1746 rp->verify_no_references_recorded(); 1747 assert(!rp->discovery_enabled(), "Post condition"); 1748 } 1749 1750 if (has_overflown()) { 1751 // We can not trust g1_is_alive if the marking stack overflowed 1752 return; 1753 } 1754 1755 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1756 1757 // Unload Klasses, String, Symbols, Code Cache, etc. 1758 if (ClassUnloadingWithConcurrentMark) { 1759 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); 1760 bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 1761 g1h->complete_cleaning(&g1_is_alive, purged_classes); 1762 } else { 1763 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm); 1764 // No need to clean string table and symbol table as they are treated as strong roots when 1765 // class unloading is disabled. 1766 g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled()); 1767 1768 } 1769 } 1770 1771 void G1ConcurrentMark::swapMarkBitMaps() { 1772 G1CMBitMapRO* temp = _prevMarkBitMap; 1773 _prevMarkBitMap = (G1CMBitMapRO*)_nextMarkBitMap; 1774 _nextMarkBitMap = (G1CMBitMap*) temp; 1775 } 1776 1777 // Closure for marking entries in SATB buffers. 1778 class G1CMSATBBufferClosure : public SATBBufferClosure { 1779 private: 1780 G1CMTask* _task; 1781 G1CollectedHeap* _g1h; 1782 1783 // This is very similar to G1CMTask::deal_with_reference, but with 1784 // more relaxed requirements for the argument, so this must be more 1785 // circumspect about treating the argument as an object. 1786 void do_entry(void* entry) const { 1787 _task->increment_refs_reached(); 1788 HeapRegion* hr = _g1h->heap_region_containing(entry); 1789 if (entry < hr->next_top_at_mark_start()) { 1790 // Until we get here, we don't know whether entry refers to a valid 1791 // object; it could instead have been a stale reference. 1792 oop obj = static_cast<oop>(entry); 1793 assert(obj->is_oop(true /* ignore mark word */), 1794 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)); 1795 _task->make_reference_grey(obj); 1796 } 1797 } 1798 1799 public: 1800 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1801 : _task(task), _g1h(g1h) { } 1802 1803 virtual void do_buffer(void** buffer, size_t size) { 1804 for (size_t i = 0; i < size; ++i) { 1805 do_entry(buffer[i]); 1806 } 1807 } 1808 }; 1809 1810 class G1RemarkThreadsClosure : public ThreadClosure { 1811 G1CMSATBBufferClosure _cm_satb_cl; 1812 G1CMOopClosure _cm_cl; 1813 MarkingCodeBlobClosure _code_cl; 1814 int _thread_parity; 1815 1816 public: 1817 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1818 _cm_satb_cl(task, g1h), 1819 _cm_cl(g1h, g1h->concurrent_mark(), task), 1820 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1821 _thread_parity(Threads::thread_claim_parity()) {} 1822 1823 void do_thread(Thread* thread) { 1824 if (thread->is_Java_thread()) { 1825 if (thread->claim_oops_do(true, _thread_parity)) { 1826 JavaThread* jt = (JavaThread*)thread; 1827 1828 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1829 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1830 // * Alive if on the stack of an executing method 1831 // * Weakly reachable otherwise 1832 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1833 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1834 jt->nmethods_do(&_code_cl); 1835 1836 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 1837 } 1838 } else if (thread->is_VM_thread()) { 1839 if (thread->claim_oops_do(true, _thread_parity)) { 1840 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1841 } 1842 } 1843 } 1844 }; 1845 1846 class G1CMRemarkTask: public AbstractGangTask { 1847 private: 1848 G1ConcurrentMark* _cm; 1849 public: 1850 void work(uint worker_id) { 1851 // Since all available tasks are actually started, we should 1852 // only proceed if we're supposed to be active. 1853 if (worker_id < _cm->active_tasks()) { 1854 G1CMTask* task = _cm->task(worker_id); 1855 task->record_start_time(); 1856 { 1857 ResourceMark rm; 1858 HandleMark hm; 1859 1860 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1861 Threads::threads_do(&threads_f); 1862 } 1863 1864 do { 1865 task->do_marking_step(1000000000.0 /* something very large */, 1866 true /* do_termination */, 1867 false /* is_serial */); 1868 } while (task->has_aborted() && !_cm->has_overflown()); 1869 // If we overflow, then we do not want to restart. We instead 1870 // want to abort remark and do concurrent marking again. 1871 task->record_end_time(); 1872 } 1873 } 1874 1875 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1876 AbstractGangTask("Par Remark"), _cm(cm) { 1877 _cm->terminator()->reset_for_reuse(active_workers); 1878 } 1879 }; 1880 1881 void G1ConcurrentMark::checkpointRootsFinalWork() { 1882 ResourceMark rm; 1883 HandleMark hm; 1884 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1885 1886 GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm); 1887 1888 g1h->ensure_parsability(false); 1889 1890 // this is remark, so we'll use up all active threads 1891 uint active_workers = g1h->workers()->active_workers(); 1892 set_concurrency_and_phase(active_workers, false /* concurrent */); 1893 // Leave _parallel_marking_threads at it's 1894 // value originally calculated in the G1ConcurrentMark 1895 // constructor and pass values of the active workers 1896 // through the gang in the task. 1897 1898 { 1899 StrongRootsScope srs(active_workers); 1900 1901 G1CMRemarkTask remarkTask(this, active_workers); 1902 // We will start all available threads, even if we decide that the 1903 // active_workers will be fewer. The extra ones will just bail out 1904 // immediately. 1905 g1h->workers()->run_task(&remarkTask); 1906 } 1907 1908 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1909 guarantee(has_overflown() || 1910 satb_mq_set.completed_buffers_num() == 0, 1911 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1912 BOOL_TO_STR(has_overflown()), 1913 satb_mq_set.completed_buffers_num()); 1914 1915 print_stats(); 1916 } 1917 1918 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 1919 // Note we are overriding the read-only view of the prev map here, via 1920 // the cast. 1921 ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr); 1922 } 1923 1924 HeapRegion* 1925 G1ConcurrentMark::claim_region(uint worker_id) { 1926 // "checkpoint" the finger 1927 HeapWord* finger = _finger; 1928 1929 // _heap_end will not change underneath our feet; it only changes at 1930 // yield points. 1931 while (finger < _heap_end) { 1932 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1933 1934 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1935 // Make sure that the reads below do not float before loading curr_region. 1936 OrderAccess::loadload(); 1937 // Above heap_region_containing may return NULL as we always scan claim 1938 // until the end of the heap. In this case, just jump to the next region. 1939 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1940 1941 // Is the gap between reading the finger and doing the CAS too long? 1942 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 1943 if (res == finger && curr_region != NULL) { 1944 // we succeeded 1945 HeapWord* bottom = curr_region->bottom(); 1946 HeapWord* limit = curr_region->next_top_at_mark_start(); 1947 1948 // notice that _finger == end cannot be guaranteed here since, 1949 // someone else might have moved the finger even further 1950 assert(_finger >= end, "the finger should have moved forward"); 1951 1952 if (limit > bottom) { 1953 return curr_region; 1954 } else { 1955 assert(limit == bottom, 1956 "the region limit should be at bottom"); 1957 // we return NULL and the caller should try calling 1958 // claim_region() again. 1959 return NULL; 1960 } 1961 } else { 1962 assert(_finger > finger, "the finger should have moved forward"); 1963 // read it again 1964 finger = _finger; 1965 } 1966 } 1967 1968 return NULL; 1969 } 1970 1971 #ifndef PRODUCT 1972 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 1973 private: 1974 G1CollectedHeap* _g1h; 1975 const char* _phase; 1976 int _info; 1977 1978 public: 1979 VerifyNoCSetOops(const char* phase, int info = -1) : 1980 _g1h(G1CollectedHeap::heap()), 1981 _phase(phase), 1982 _info(info) 1983 { } 1984 1985 void operator()(G1TaskQueueEntry task_entry) const { 1986 if (task_entry.is_array_slice()) { 1987 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); 1988 return; 1989 } 1990 guarantee(task_entry.obj()->is_oop(), 1991 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1992 p2i(task_entry.obj()), _phase, _info); 1993 guarantee(!_g1h->is_in_cset(task_entry.obj()), 1994 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1995 p2i(task_entry.obj()), _phase, _info); 1996 } 1997 }; 1998 1999 void G1ConcurrentMark::verify_no_cset_oops() { 2000 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2001 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 2002 return; 2003 } 2004 2005 // Verify entries on the global mark stack 2006 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 2007 2008 // Verify entries on the task queues 2009 for (uint i = 0; i < _max_worker_id; ++i) { 2010 G1CMTaskQueue* queue = _task_queues->queue(i); 2011 queue->iterate(VerifyNoCSetOops("Queue", i)); 2012 } 2013 2014 // Verify the global finger 2015 HeapWord* global_finger = finger(); 2016 if (global_finger != NULL && global_finger < _heap_end) { 2017 // Since we always iterate over all regions, we might get a NULL HeapRegion 2018 // here. 2019 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 2020 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2021 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 2022 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 2023 } 2024 2025 // Verify the task fingers 2026 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2027 for (uint i = 0; i < parallel_marking_threads(); ++i) { 2028 G1CMTask* task = _tasks[i]; 2029 HeapWord* task_finger = task->finger(); 2030 if (task_finger != NULL && task_finger < _heap_end) { 2031 // See above note on the global finger verification. 2032 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 2033 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2034 !task_hr->in_collection_set(), 2035 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 2036 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 2037 } 2038 } 2039 } 2040 #endif // PRODUCT 2041 void G1ConcurrentMark::create_live_data() { 2042 _g1h->g1_rem_set()->create_card_live_data(_parallel_workers, _nextMarkBitMap); 2043 } 2044 2045 void G1ConcurrentMark::finalize_live_data() { 2046 _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _nextMarkBitMap); 2047 } 2048 2049 void G1ConcurrentMark::verify_live_data() { 2050 _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _nextMarkBitMap); 2051 } 2052 2053 void G1ConcurrentMark::clear_live_data(WorkGang* workers) { 2054 _g1h->g1_rem_set()->clear_card_live_data(workers); 2055 } 2056 2057 #ifdef ASSERT 2058 void G1ConcurrentMark::verify_live_data_clear() { 2059 _g1h->g1_rem_set()->verify_card_live_data_is_clear(); 2060 } 2061 #endif 2062 2063 void G1ConcurrentMark::print_stats() { 2064 if (!log_is_enabled(Debug, gc, stats)) { 2065 return; 2066 } 2067 log_debug(gc, stats)("---------------------------------------------------------------------"); 2068 for (size_t i = 0; i < _active_tasks; ++i) { 2069 _tasks[i]->print_stats(); 2070 log_debug(gc, stats)("---------------------------------------------------------------------"); 2071 } 2072 } 2073 2074 void G1ConcurrentMark::abort() { 2075 if (!cmThread()->during_cycle() || _has_aborted) { 2076 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2077 return; 2078 } 2079 2080 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2081 // concurrent bitmap clearing. 2082 { 2083 GCTraceTime(Debug, gc)("Clear Next Bitmap"); 2084 clear_bitmap(_nextMarkBitMap, _g1h->workers(), false); 2085 } 2086 // Note we cannot clear the previous marking bitmap here 2087 // since VerifyDuringGC verifies the objects marked during 2088 // a full GC against the previous bitmap. 2089 2090 { 2091 GCTraceTime(Debug, gc)("Clear Live Data"); 2092 clear_live_data(_g1h->workers()); 2093 } 2094 DEBUG_ONLY({ 2095 GCTraceTime(Debug, gc)("Verify Live Data Clear"); 2096 verify_live_data_clear(); 2097 }) 2098 // Empty mark stack 2099 reset_marking_state(); 2100 for (uint i = 0; i < _max_worker_id; ++i) { 2101 _tasks[i]->clear_region_fields(); 2102 } 2103 _first_overflow_barrier_sync.abort(); 2104 _second_overflow_barrier_sync.abort(); 2105 _has_aborted = true; 2106 2107 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2108 satb_mq_set.abandon_partial_marking(); 2109 // This can be called either during or outside marking, we'll read 2110 // the expected_active value from the SATB queue set. 2111 satb_mq_set.set_active_all_threads( 2112 false, /* new active value */ 2113 satb_mq_set.is_active() /* expected_active */); 2114 } 2115 2116 static void print_ms_time_info(const char* prefix, const char* name, 2117 NumberSeq& ns) { 2118 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2119 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2120 if (ns.num() > 0) { 2121 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2122 prefix, ns.sd(), ns.maximum()); 2123 } 2124 } 2125 2126 void G1ConcurrentMark::print_summary_info() { 2127 Log(gc, marking) log; 2128 if (!log.is_trace()) { 2129 return; 2130 } 2131 2132 log.trace(" Concurrent marking:"); 2133 print_ms_time_info(" ", "init marks", _init_times); 2134 print_ms_time_info(" ", "remarks", _remark_times); 2135 { 2136 print_ms_time_info(" ", "final marks", _remark_mark_times); 2137 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2138 2139 } 2140 print_ms_time_info(" ", "cleanups", _cleanup_times); 2141 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2142 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2143 if (G1ScrubRemSets) { 2144 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 2145 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2146 } 2147 log.trace(" Total stop_world time = %8.2f s.", 2148 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2149 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2150 cmThread()->vtime_accum(), cmThread()->vtime_mark_accum()); 2151 } 2152 2153 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2154 _parallel_workers->print_worker_threads_on(st); 2155 } 2156 2157 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2158 _parallel_workers->threads_do(tc); 2159 } 2160 2161 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2162 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2163 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 2164 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 2165 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 2166 } 2167 2168 // Closure for iteration over bitmaps 2169 class G1CMBitMapClosure : public BitMapClosure { 2170 private: 2171 // the bitmap that is being iterated over 2172 G1CMBitMap* _nextMarkBitMap; 2173 G1ConcurrentMark* _cm; 2174 G1CMTask* _task; 2175 2176 public: 2177 G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm, G1CMBitMap* nextMarkBitMap) : 2178 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 2179 2180 bool do_bit(size_t offset) { 2181 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 2182 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 2183 assert( addr < _cm->finger(), "invariant"); 2184 assert(addr >= _task->finger(), "invariant"); 2185 2186 // We move that task's local finger along. 2187 _task->move_finger_to(addr); 2188 2189 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); 2190 // we only partially drain the local queue and global stack 2191 _task->drain_local_queue(true); 2192 _task->drain_global_stack(true); 2193 2194 // if the has_aborted flag has been raised, we need to bail out of 2195 // the iteration 2196 return !_task->has_aborted(); 2197 } 2198 }; 2199 2200 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2201 ReferenceProcessor* result = g1h->ref_processor_cm(); 2202 assert(result != NULL, "CM reference processor should not be NULL"); 2203 return result; 2204 } 2205 2206 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2207 G1ConcurrentMark* cm, 2208 G1CMTask* task) 2209 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2210 _g1h(g1h), _cm(cm), _task(task) 2211 { } 2212 2213 void G1CMTask::setup_for_region(HeapRegion* hr) { 2214 assert(hr != NULL, 2215 "claim_region() should have filtered out NULL regions"); 2216 _curr_region = hr; 2217 _finger = hr->bottom(); 2218 update_region_limit(); 2219 } 2220 2221 void G1CMTask::update_region_limit() { 2222 HeapRegion* hr = _curr_region; 2223 HeapWord* bottom = hr->bottom(); 2224 HeapWord* limit = hr->next_top_at_mark_start(); 2225 2226 if (limit == bottom) { 2227 // The region was collected underneath our feet. 2228 // We set the finger to bottom to ensure that the bitmap 2229 // iteration that will follow this will not do anything. 2230 // (this is not a condition that holds when we set the region up, 2231 // as the region is not supposed to be empty in the first place) 2232 _finger = bottom; 2233 } else if (limit >= _region_limit) { 2234 assert(limit >= _finger, "peace of mind"); 2235 } else { 2236 assert(limit < _region_limit, "only way to get here"); 2237 // This can happen under some pretty unusual circumstances. An 2238 // evacuation pause empties the region underneath our feet (NTAMS 2239 // at bottom). We then do some allocation in the region (NTAMS 2240 // stays at bottom), followed by the region being used as a GC 2241 // alloc region (NTAMS will move to top() and the objects 2242 // originally below it will be grayed). All objects now marked in 2243 // the region are explicitly grayed, if below the global finger, 2244 // and we do not need in fact to scan anything else. So, we simply 2245 // set _finger to be limit to ensure that the bitmap iteration 2246 // doesn't do anything. 2247 _finger = limit; 2248 } 2249 2250 _region_limit = limit; 2251 } 2252 2253 void G1CMTask::giveup_current_region() { 2254 assert(_curr_region != NULL, "invariant"); 2255 clear_region_fields(); 2256 } 2257 2258 void G1CMTask::clear_region_fields() { 2259 // Values for these three fields that indicate that we're not 2260 // holding on to a region. 2261 _curr_region = NULL; 2262 _finger = NULL; 2263 _region_limit = NULL; 2264 } 2265 2266 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2267 if (cm_oop_closure == NULL) { 2268 assert(_cm_oop_closure != NULL, "invariant"); 2269 } else { 2270 assert(_cm_oop_closure == NULL, "invariant"); 2271 } 2272 _cm_oop_closure = cm_oop_closure; 2273 } 2274 2275 void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) { 2276 guarantee(nextMarkBitMap != NULL, "invariant"); 2277 _nextMarkBitMap = nextMarkBitMap; 2278 clear_region_fields(); 2279 2280 _calls = 0; 2281 _elapsed_time_ms = 0.0; 2282 _termination_time_ms = 0.0; 2283 _termination_start_time_ms = 0.0; 2284 } 2285 2286 bool G1CMTask::should_exit_termination() { 2287 regular_clock_call(); 2288 // This is called when we are in the termination protocol. We should 2289 // quit if, for some reason, this task wants to abort or the global 2290 // stack is not empty (this means that we can get work from it). 2291 return !_cm->mark_stack_empty() || has_aborted(); 2292 } 2293 2294 void G1CMTask::reached_limit() { 2295 assert(_words_scanned >= _words_scanned_limit || 2296 _refs_reached >= _refs_reached_limit , 2297 "shouldn't have been called otherwise"); 2298 regular_clock_call(); 2299 } 2300 2301 void G1CMTask::regular_clock_call() { 2302 if (has_aborted()) return; 2303 2304 // First, we need to recalculate the words scanned and refs reached 2305 // limits for the next clock call. 2306 recalculate_limits(); 2307 2308 // During the regular clock call we do the following 2309 2310 // (1) If an overflow has been flagged, then we abort. 2311 if (_cm->has_overflown()) { 2312 set_has_aborted(); 2313 return; 2314 } 2315 2316 // If we are not concurrent (i.e. we're doing remark) we don't need 2317 // to check anything else. The other steps are only needed during 2318 // the concurrent marking phase. 2319 if (!concurrent()) return; 2320 2321 // (2) If marking has been aborted for Full GC, then we also abort. 2322 if (_cm->has_aborted()) { 2323 set_has_aborted(); 2324 return; 2325 } 2326 2327 double curr_time_ms = os::elapsedVTime() * 1000.0; 2328 2329 // (4) We check whether we should yield. If we have to, then we abort. 2330 if (SuspendibleThreadSet::should_yield()) { 2331 // We should yield. To do this we abort the task. The caller is 2332 // responsible for yielding. 2333 set_has_aborted(); 2334 return; 2335 } 2336 2337 // (5) We check whether we've reached our time quota. If we have, 2338 // then we abort. 2339 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2340 if (elapsed_time_ms > _time_target_ms) { 2341 set_has_aborted(); 2342 _has_timed_out = true; 2343 return; 2344 } 2345 2346 // (6) Finally, we check whether there are enough completed STAB 2347 // buffers available for processing. If there are, we abort. 2348 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2349 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2350 // we do need to process SATB buffers, we'll abort and restart 2351 // the marking task to do so 2352 set_has_aborted(); 2353 return; 2354 } 2355 } 2356 2357 void G1CMTask::recalculate_limits() { 2358 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2359 _words_scanned_limit = _real_words_scanned_limit; 2360 2361 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2362 _refs_reached_limit = _real_refs_reached_limit; 2363 } 2364 2365 void G1CMTask::decrease_limits() { 2366 // This is called when we believe that we're going to do an infrequent 2367 // operation which will increase the per byte scanned cost (i.e. move 2368 // entries to/from the global stack). It basically tries to decrease the 2369 // scanning limit so that the clock is called earlier. 2370 2371 _words_scanned_limit = _real_words_scanned_limit - 2372 3 * words_scanned_period / 4; 2373 _refs_reached_limit = _real_refs_reached_limit - 2374 3 * refs_reached_period / 4; 2375 } 2376 2377 void G1CMTask::move_entries_to_global_stack() { 2378 // Local array where we'll store the entries that will be popped 2379 // from the local queue. 2380 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2381 2382 size_t n = 0; 2383 G1TaskQueueEntry task_entry; 2384 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) { 2385 buffer[n] = task_entry; 2386 ++n; 2387 } 2388 if (n < G1CMMarkStack::EntriesPerChunk) { 2389 buffer[n] = G1TaskQueueEntry(); 2390 } 2391 2392 if (n > 0) { 2393 if (!_cm->mark_stack_push(buffer)) { 2394 set_has_aborted(); 2395 } 2396 } 2397 2398 // This operation was quite expensive, so decrease the limits. 2399 decrease_limits(); 2400 } 2401 2402 bool G1CMTask::get_entries_from_global_stack() { 2403 // Local array where we'll store the entries that will be popped 2404 // from the global stack. 2405 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2406 2407 if (!_cm->mark_stack_pop(buffer)) { 2408 return false; 2409 } 2410 2411 // We did actually pop at least one entry. 2412 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) { 2413 G1TaskQueueEntry task_entry = buffer[i]; 2414 if (task_entry.is_null()) { 2415 break; 2416 } 2417 assert(task_entry.is_array_slice() || task_entry.obj()->is_oop(), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj())); 2418 bool success = _task_queue->push(task_entry); 2419 // We only call this when the local queue is empty or under a 2420 // given target limit. So, we do not expect this push to fail. 2421 assert(success, "invariant"); 2422 } 2423 2424 // This operation was quite expensive, so decrease the limits 2425 decrease_limits(); 2426 return true; 2427 } 2428 2429 void G1CMTask::drain_local_queue(bool partially) { 2430 if (has_aborted()) { 2431 return; 2432 } 2433 2434 // Decide what the target size is, depending whether we're going to 2435 // drain it partially (so that other tasks can steal if they run out 2436 // of things to do) or totally (at the very end). 2437 size_t target_size; 2438 if (partially) { 2439 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2440 } else { 2441 target_size = 0; 2442 } 2443 2444 if (_task_queue->size() > target_size) { 2445 G1TaskQueueEntry entry; 2446 bool ret = _task_queue->pop_local(entry); 2447 while (ret) { 2448 scan_task_entry(entry); 2449 if (_task_queue->size() <= target_size || has_aborted()) { 2450 ret = false; 2451 } else { 2452 ret = _task_queue->pop_local(entry); 2453 } 2454 } 2455 } 2456 } 2457 2458 void G1CMTask::drain_global_stack(bool partially) { 2459 if (has_aborted()) return; 2460 2461 // We have a policy to drain the local queue before we attempt to 2462 // drain the global stack. 2463 assert(partially || _task_queue->size() == 0, "invariant"); 2464 2465 // Decide what the target size is, depending whether we're going to 2466 // drain it partially (so that other tasks can steal if they run out 2467 // of things to do) or totally (at the very end). 2468 // Notice that when draining the global mark stack partially, due to the racyness 2469 // of the mark stack size update we might in fact drop below the target. But, 2470 // this is not a problem. 2471 // In case of total draining, we simply process until the global mark stack is 2472 // totally empty, disregarding the size counter. 2473 if (partially) { 2474 size_t const target_size = _cm->partial_mark_stack_size_target(); 2475 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2476 if (get_entries_from_global_stack()) { 2477 drain_local_queue(partially); 2478 } 2479 } 2480 } else { 2481 while (!has_aborted() && get_entries_from_global_stack()) { 2482 drain_local_queue(partially); 2483 } 2484 } 2485 } 2486 2487 // SATB Queue has several assumptions on whether to call the par or 2488 // non-par versions of the methods. this is why some of the code is 2489 // replicated. We should really get rid of the single-threaded version 2490 // of the code to simplify things. 2491 void G1CMTask::drain_satb_buffers() { 2492 if (has_aborted()) return; 2493 2494 // We set this so that the regular clock knows that we're in the 2495 // middle of draining buffers and doesn't set the abort flag when it 2496 // notices that SATB buffers are available for draining. It'd be 2497 // very counter productive if it did that. :-) 2498 _draining_satb_buffers = true; 2499 2500 G1CMSATBBufferClosure satb_cl(this, _g1h); 2501 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2502 2503 // This keeps claiming and applying the closure to completed buffers 2504 // until we run out of buffers or we need to abort. 2505 while (!has_aborted() && 2506 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2507 regular_clock_call(); 2508 } 2509 2510 _draining_satb_buffers = false; 2511 2512 assert(has_aborted() || 2513 concurrent() || 2514 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2515 2516 // again, this was a potentially expensive operation, decrease the 2517 // limits to get the regular clock call early 2518 decrease_limits(); 2519 } 2520 2521 void G1CMTask::print_stats() { 2522 log_debug(gc, stats)("Marking Stats, task = %u, calls = %d", 2523 _worker_id, _calls); 2524 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2525 _elapsed_time_ms, _termination_time_ms); 2526 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 2527 _step_times_ms.num(), _step_times_ms.avg(), 2528 _step_times_ms.sd()); 2529 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", 2530 _step_times_ms.maximum(), _step_times_ms.sum()); 2531 } 2532 2533 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry) { 2534 return _task_queues->steal(worker_id, hash_seed, task_entry); 2535 } 2536 2537 /***************************************************************************** 2538 2539 The do_marking_step(time_target_ms, ...) method is the building 2540 block of the parallel marking framework. It can be called in parallel 2541 with other invocations of do_marking_step() on different tasks 2542 (but only one per task, obviously) and concurrently with the 2543 mutator threads, or during remark, hence it eliminates the need 2544 for two versions of the code. When called during remark, it will 2545 pick up from where the task left off during the concurrent marking 2546 phase. Interestingly, tasks are also claimable during evacuation 2547 pauses too, since do_marking_step() ensures that it aborts before 2548 it needs to yield. 2549 2550 The data structures that it uses to do marking work are the 2551 following: 2552 2553 (1) Marking Bitmap. If there are gray objects that appear only 2554 on the bitmap (this happens either when dealing with an overflow 2555 or when the initial marking phase has simply marked the roots 2556 and didn't push them on the stack), then tasks claim heap 2557 regions whose bitmap they then scan to find gray objects. A 2558 global finger indicates where the end of the last claimed region 2559 is. A local finger indicates how far into the region a task has 2560 scanned. The two fingers are used to determine how to gray an 2561 object (i.e. whether simply marking it is OK, as it will be 2562 visited by a task in the future, or whether it needs to be also 2563 pushed on a stack). 2564 2565 (2) Local Queue. The local queue of the task which is accessed 2566 reasonably efficiently by the task. Other tasks can steal from 2567 it when they run out of work. Throughout the marking phase, a 2568 task attempts to keep its local queue short but not totally 2569 empty, so that entries are available for stealing by other 2570 tasks. Only when there is no more work, a task will totally 2571 drain its local queue. 2572 2573 (3) Global Mark Stack. This handles local queue overflow. During 2574 marking only sets of entries are moved between it and the local 2575 queues, as access to it requires a mutex and more fine-grain 2576 interaction with it which might cause contention. If it 2577 overflows, then the marking phase should restart and iterate 2578 over the bitmap to identify gray objects. Throughout the marking 2579 phase, tasks attempt to keep the global mark stack at a small 2580 length but not totally empty, so that entries are available for 2581 popping by other tasks. Only when there is no more work, tasks 2582 will totally drain the global mark stack. 2583 2584 (4) SATB Buffer Queue. This is where completed SATB buffers are 2585 made available. Buffers are regularly removed from this queue 2586 and scanned for roots, so that the queue doesn't get too 2587 long. During remark, all completed buffers are processed, as 2588 well as the filled in parts of any uncompleted buffers. 2589 2590 The do_marking_step() method tries to abort when the time target 2591 has been reached. There are a few other cases when the 2592 do_marking_step() method also aborts: 2593 2594 (1) When the marking phase has been aborted (after a Full GC). 2595 2596 (2) When a global overflow (on the global stack) has been 2597 triggered. Before the task aborts, it will actually sync up with 2598 the other tasks to ensure that all the marking data structures 2599 (local queues, stacks, fingers etc.) are re-initialized so that 2600 when do_marking_step() completes, the marking phase can 2601 immediately restart. 2602 2603 (3) When enough completed SATB buffers are available. The 2604 do_marking_step() method only tries to drain SATB buffers right 2605 at the beginning. So, if enough buffers are available, the 2606 marking step aborts and the SATB buffers are processed at 2607 the beginning of the next invocation. 2608 2609 (4) To yield. when we have to yield then we abort and yield 2610 right at the end of do_marking_step(). This saves us from a lot 2611 of hassle as, by yielding we might allow a Full GC. If this 2612 happens then objects will be compacted underneath our feet, the 2613 heap might shrink, etc. We save checking for this by just 2614 aborting and doing the yield right at the end. 2615 2616 From the above it follows that the do_marking_step() method should 2617 be called in a loop (or, otherwise, regularly) until it completes. 2618 2619 If a marking step completes without its has_aborted() flag being 2620 true, it means it has completed the current marking phase (and 2621 also all other marking tasks have done so and have all synced up). 2622 2623 A method called regular_clock_call() is invoked "regularly" (in 2624 sub ms intervals) throughout marking. It is this clock method that 2625 checks all the abort conditions which were mentioned above and 2626 decides when the task should abort. A work-based scheme is used to 2627 trigger this clock method: when the number of object words the 2628 marking phase has scanned or the number of references the marking 2629 phase has visited reach a given limit. Additional invocations to 2630 the method clock have been planted in a few other strategic places 2631 too. The initial reason for the clock method was to avoid calling 2632 vtime too regularly, as it is quite expensive. So, once it was in 2633 place, it was natural to piggy-back all the other conditions on it 2634 too and not constantly check them throughout the code. 2635 2636 If do_termination is true then do_marking_step will enter its 2637 termination protocol. 2638 2639 The value of is_serial must be true when do_marking_step is being 2640 called serially (i.e. by the VMThread) and do_marking_step should 2641 skip any synchronization in the termination and overflow code. 2642 Examples include the serial remark code and the serial reference 2643 processing closures. 2644 2645 The value of is_serial must be false when do_marking_step is 2646 being called by any of the worker threads in a work gang. 2647 Examples include the concurrent marking code (CMMarkingTask), 2648 the MT remark code, and the MT reference processing closures. 2649 2650 *****************************************************************************/ 2651 2652 void G1CMTask::do_marking_step(double time_target_ms, 2653 bool do_termination, 2654 bool is_serial) { 2655 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2656 assert(concurrent() == _cm->concurrent(), "they should be the same"); 2657 2658 G1Policy* g1_policy = _g1h->g1_policy(); 2659 assert(_task_queues != NULL, "invariant"); 2660 assert(_task_queue != NULL, "invariant"); 2661 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 2662 2663 assert(!_claimed, 2664 "only one thread should claim this task at any one time"); 2665 2666 // OK, this doesn't safeguard again all possible scenarios, as it is 2667 // possible for two threads to set the _claimed flag at the same 2668 // time. But it is only for debugging purposes anyway and it will 2669 // catch most problems. 2670 _claimed = true; 2671 2672 _start_time_ms = os::elapsedVTime() * 1000.0; 2673 2674 // If do_stealing is true then do_marking_step will attempt to 2675 // steal work from the other G1CMTasks. It only makes sense to 2676 // enable stealing when the termination protocol is enabled 2677 // and do_marking_step() is not being called serially. 2678 bool do_stealing = do_termination && !is_serial; 2679 2680 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2681 _time_target_ms = time_target_ms - diff_prediction_ms; 2682 2683 // set up the variables that are used in the work-based scheme to 2684 // call the regular clock method 2685 _words_scanned = 0; 2686 _refs_reached = 0; 2687 recalculate_limits(); 2688 2689 // clear all flags 2690 clear_has_aborted(); 2691 _has_timed_out = false; 2692 _draining_satb_buffers = false; 2693 2694 ++_calls; 2695 2696 // Set up the bitmap and oop closures. Anything that uses them is 2697 // eventually called from this method, so it is OK to allocate these 2698 // statically. 2699 G1CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 2700 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 2701 set_cm_oop_closure(&cm_oop_closure); 2702 2703 if (_cm->has_overflown()) { 2704 // This can happen if the mark stack overflows during a GC pause 2705 // and this task, after a yield point, restarts. We have to abort 2706 // as we need to get into the overflow protocol which happens 2707 // right at the end of this task. 2708 set_has_aborted(); 2709 } 2710 2711 // First drain any available SATB buffers. After this, we will not 2712 // look at SATB buffers before the next invocation of this method. 2713 // If enough completed SATB buffers are queued up, the regular clock 2714 // will abort this task so that it restarts. 2715 drain_satb_buffers(); 2716 // ...then partially drain the local queue and the global stack 2717 drain_local_queue(true); 2718 drain_global_stack(true); 2719 2720 do { 2721 if (!has_aborted() && _curr_region != NULL) { 2722 // This means that we're already holding on to a region. 2723 assert(_finger != NULL, "if region is not NULL, then the finger " 2724 "should not be NULL either"); 2725 2726 // We might have restarted this task after an evacuation pause 2727 // which might have evacuated the region we're holding on to 2728 // underneath our feet. Let's read its limit again to make sure 2729 // that we do not iterate over a region of the heap that 2730 // contains garbage (update_region_limit() will also move 2731 // _finger to the start of the region if it is found empty). 2732 update_region_limit(); 2733 // We will start from _finger not from the start of the region, 2734 // as we might be restarting this task after aborting half-way 2735 // through scanning this region. In this case, _finger points to 2736 // the address where we last found a marked object. If this is a 2737 // fresh region, _finger points to start(). 2738 MemRegion mr = MemRegion(_finger, _region_limit); 2739 2740 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2741 "humongous regions should go around loop once only"); 2742 2743 // Some special cases: 2744 // If the memory region is empty, we can just give up the region. 2745 // If the current region is humongous then we only need to check 2746 // the bitmap for the bit associated with the start of the object, 2747 // scan the object if it's live, and give up the region. 2748 // Otherwise, let's iterate over the bitmap of the part of the region 2749 // that is left. 2750 // If the iteration is successful, give up the region. 2751 if (mr.is_empty()) { 2752 giveup_current_region(); 2753 regular_clock_call(); 2754 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2755 if (_nextMarkBitMap->isMarked(mr.start())) { 2756 // The object is marked - apply the closure 2757 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 2758 bitmap_closure.do_bit(offset); 2759 } 2760 // Even if this task aborted while scanning the humongous object 2761 // we can (and should) give up the current region. 2762 giveup_current_region(); 2763 regular_clock_call(); 2764 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 2765 giveup_current_region(); 2766 regular_clock_call(); 2767 } else { 2768 assert(has_aborted(), "currently the only way to do so"); 2769 // The only way to abort the bitmap iteration is to return 2770 // false from the do_bit() method. However, inside the 2771 // do_bit() method we move the _finger to point to the 2772 // object currently being looked at. So, if we bail out, we 2773 // have definitely set _finger to something non-null. 2774 assert(_finger != NULL, "invariant"); 2775 2776 // Region iteration was actually aborted. So now _finger 2777 // points to the address of the object we last scanned. If we 2778 // leave it there, when we restart this task, we will rescan 2779 // the object. It is easy to avoid this. We move the finger by 2780 // enough to point to the next possible object header (the 2781 // bitmap knows by how much we need to move it as it knows its 2782 // granularity). 2783 assert(_finger < _region_limit, "invariant"); 2784 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 2785 // Check if bitmap iteration was aborted while scanning the last object 2786 if (new_finger >= _region_limit) { 2787 giveup_current_region(); 2788 } else { 2789 move_finger_to(new_finger); 2790 } 2791 } 2792 } 2793 // At this point we have either completed iterating over the 2794 // region we were holding on to, or we have aborted. 2795 2796 // We then partially drain the local queue and the global stack. 2797 // (Do we really need this?) 2798 drain_local_queue(true); 2799 drain_global_stack(true); 2800 2801 // Read the note on the claim_region() method on why it might 2802 // return NULL with potentially more regions available for 2803 // claiming and why we have to check out_of_regions() to determine 2804 // whether we're done or not. 2805 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2806 // We are going to try to claim a new region. We should have 2807 // given up on the previous one. 2808 // Separated the asserts so that we know which one fires. 2809 assert(_curr_region == NULL, "invariant"); 2810 assert(_finger == NULL, "invariant"); 2811 assert(_region_limit == NULL, "invariant"); 2812 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2813 if (claimed_region != NULL) { 2814 // Yes, we managed to claim one 2815 setup_for_region(claimed_region); 2816 assert(_curr_region == claimed_region, "invariant"); 2817 } 2818 // It is important to call the regular clock here. It might take 2819 // a while to claim a region if, for example, we hit a large 2820 // block of empty regions. So we need to call the regular clock 2821 // method once round the loop to make sure it's called 2822 // frequently enough. 2823 regular_clock_call(); 2824 } 2825 2826 if (!has_aborted() && _curr_region == NULL) { 2827 assert(_cm->out_of_regions(), 2828 "at this point we should be out of regions"); 2829 } 2830 } while ( _curr_region != NULL && !has_aborted()); 2831 2832 if (!has_aborted()) { 2833 // We cannot check whether the global stack is empty, since other 2834 // tasks might be pushing objects to it concurrently. 2835 assert(_cm->out_of_regions(), 2836 "at this point we should be out of regions"); 2837 // Try to reduce the number of available SATB buffers so that 2838 // remark has less work to do. 2839 drain_satb_buffers(); 2840 } 2841 2842 // Since we've done everything else, we can now totally drain the 2843 // local queue and global stack. 2844 drain_local_queue(false); 2845 drain_global_stack(false); 2846 2847 // Attempt at work stealing from other task's queues. 2848 if (do_stealing && !has_aborted()) { 2849 // We have not aborted. This means that we have finished all that 2850 // we could. Let's try to do some stealing... 2851 2852 // We cannot check whether the global stack is empty, since other 2853 // tasks might be pushing objects to it concurrently. 2854 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2855 "only way to reach here"); 2856 while (!has_aborted()) { 2857 G1TaskQueueEntry entry; 2858 if (_cm->try_stealing(_worker_id, &_hash_seed, entry)) { 2859 scan_task_entry(entry); 2860 2861 // And since we're towards the end, let's totally drain the 2862 // local queue and global stack. 2863 drain_local_queue(false); 2864 drain_global_stack(false); 2865 } else { 2866 break; 2867 } 2868 } 2869 } 2870 2871 // We still haven't aborted. Now, let's try to get into the 2872 // termination protocol. 2873 if (do_termination && !has_aborted()) { 2874 // We cannot check whether the global stack is empty, since other 2875 // tasks might be concurrently pushing objects on it. 2876 // Separated the asserts so that we know which one fires. 2877 assert(_cm->out_of_regions(), "only way to reach here"); 2878 assert(_task_queue->size() == 0, "only way to reach here"); 2879 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2880 2881 // The G1CMTask class also extends the TerminatorTerminator class, 2882 // hence its should_exit_termination() method will also decide 2883 // whether to exit the termination protocol or not. 2884 bool finished = (is_serial || 2885 _cm->terminator()->offer_termination(this)); 2886 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2887 _termination_time_ms += 2888 termination_end_time_ms - _termination_start_time_ms; 2889 2890 if (finished) { 2891 // We're all done. 2892 2893 if (_worker_id == 0) { 2894 // let's allow task 0 to do this 2895 if (concurrent()) { 2896 assert(_cm->concurrent_marking_in_progress(), "invariant"); 2897 // we need to set this to false before the next 2898 // safepoint. This way we ensure that the marking phase 2899 // doesn't observe any more heap expansions. 2900 _cm->clear_concurrent_marking_in_progress(); 2901 } 2902 } 2903 2904 // We can now guarantee that the global stack is empty, since 2905 // all other tasks have finished. We separated the guarantees so 2906 // that, if a condition is false, we can immediately find out 2907 // which one. 2908 guarantee(_cm->out_of_regions(), "only way to reach here"); 2909 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2910 guarantee(_task_queue->size() == 0, "only way to reach here"); 2911 guarantee(!_cm->has_overflown(), "only way to reach here"); 2912 } else { 2913 // Apparently there's more work to do. Let's abort this task. It 2914 // will restart it and we can hopefully find more things to do. 2915 set_has_aborted(); 2916 } 2917 } 2918 2919 // Mainly for debugging purposes to make sure that a pointer to the 2920 // closure which was statically allocated in this frame doesn't 2921 // escape it by accident. 2922 set_cm_oop_closure(NULL); 2923 double end_time_ms = os::elapsedVTime() * 1000.0; 2924 double elapsed_time_ms = end_time_ms - _start_time_ms; 2925 // Update the step history. 2926 _step_times_ms.add(elapsed_time_ms); 2927 2928 if (has_aborted()) { 2929 // The task was aborted for some reason. 2930 if (_has_timed_out) { 2931 double diff_ms = elapsed_time_ms - _time_target_ms; 2932 // Keep statistics of how well we did with respect to hitting 2933 // our target only if we actually timed out (if we aborted for 2934 // other reasons, then the results might get skewed). 2935 _marking_step_diffs_ms.add(diff_ms); 2936 } 2937 2938 if (_cm->has_overflown()) { 2939 // This is the interesting one. We aborted because a global 2940 // overflow was raised. This means we have to restart the 2941 // marking phase and start iterating over regions. However, in 2942 // order to do this we have to make sure that all tasks stop 2943 // what they are doing and re-initialize in a safe manner. We 2944 // will achieve this with the use of two barrier sync points. 2945 2946 if (!is_serial) { 2947 // We only need to enter the sync barrier if being called 2948 // from a parallel context 2949 _cm->enter_first_sync_barrier(_worker_id); 2950 2951 // When we exit this sync barrier we know that all tasks have 2952 // stopped doing marking work. So, it's now safe to 2953 // re-initialize our data structures. At the end of this method, 2954 // task 0 will clear the global data structures. 2955 } 2956 2957 // We clear the local state of this task... 2958 clear_region_fields(); 2959 2960 if (!is_serial) { 2961 // ...and enter the second barrier. 2962 _cm->enter_second_sync_barrier(_worker_id); 2963 } 2964 // At this point, if we're during the concurrent phase of 2965 // marking, everything has been re-initialized and we're 2966 // ready to restart. 2967 } 2968 } 2969 2970 _claimed = false; 2971 } 2972 2973 G1CMTask::G1CMTask(uint worker_id, 2974 G1ConcurrentMark* cm, 2975 G1CMTaskQueue* task_queue, 2976 G1CMTaskQueueSet* task_queues) 2977 : _g1h(G1CollectedHeap::heap()), 2978 _worker_id(worker_id), _cm(cm), 2979 _objArray_processor(this), 2980 _claimed(false), 2981 _nextMarkBitMap(NULL), _hash_seed(17), 2982 _task_queue(task_queue), 2983 _task_queues(task_queues), 2984 _cm_oop_closure(NULL) { 2985 guarantee(task_queue != NULL, "invariant"); 2986 guarantee(task_queues != NULL, "invariant"); 2987 2988 _marking_step_diffs_ms.add(0.5); 2989 } 2990 2991 // These are formatting macros that are used below to ensure 2992 // consistent formatting. The *_H_* versions are used to format the 2993 // header for a particular value and they should be kept consistent 2994 // with the corresponding macro. Also note that most of the macros add 2995 // the necessary white space (as a prefix) which makes them a bit 2996 // easier to compose. 2997 2998 // All the output lines are prefixed with this string to be able to 2999 // identify them easily in a large log file. 3000 #define G1PPRL_LINE_PREFIX "###" 3001 3002 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 3003 #ifdef _LP64 3004 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 3005 #else // _LP64 3006 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 3007 #endif // _LP64 3008 3009 // For per-region info 3010 #define G1PPRL_TYPE_FORMAT " %-4s" 3011 #define G1PPRL_TYPE_H_FORMAT " %4s" 3012 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 3013 #define G1PPRL_BYTE_H_FORMAT " %9s" 3014 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 3015 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 3016 3017 // For summary info 3018 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 3019 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 3020 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 3021 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 3022 3023 G1PrintRegionLivenessInfoClosure:: 3024 G1PrintRegionLivenessInfoClosure(const char* phase_name) 3025 : _total_used_bytes(0), _total_capacity_bytes(0), 3026 _total_prev_live_bytes(0), _total_next_live_bytes(0), 3027 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 3028 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 3029 MemRegion g1_reserved = g1h->g1_reserved(); 3030 double now = os::elapsedTime(); 3031 3032 // Print the header of the output. 3033 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 3034 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 3035 G1PPRL_SUM_ADDR_FORMAT("reserved") 3036 G1PPRL_SUM_BYTE_FORMAT("region-size"), 3037 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 3038 HeapRegion::GrainBytes); 3039 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3040 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3041 G1PPRL_TYPE_H_FORMAT 3042 G1PPRL_ADDR_BASE_H_FORMAT 3043 G1PPRL_BYTE_H_FORMAT 3044 G1PPRL_BYTE_H_FORMAT 3045 G1PPRL_BYTE_H_FORMAT 3046 G1PPRL_DOUBLE_H_FORMAT 3047 G1PPRL_BYTE_H_FORMAT 3048 G1PPRL_BYTE_H_FORMAT, 3049 "type", "address-range", 3050 "used", "prev-live", "next-live", "gc-eff", 3051 "remset", "code-roots"); 3052 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3053 G1PPRL_TYPE_H_FORMAT 3054 G1PPRL_ADDR_BASE_H_FORMAT 3055 G1PPRL_BYTE_H_FORMAT 3056 G1PPRL_BYTE_H_FORMAT 3057 G1PPRL_BYTE_H_FORMAT 3058 G1PPRL_DOUBLE_H_FORMAT 3059 G1PPRL_BYTE_H_FORMAT 3060 G1PPRL_BYTE_H_FORMAT, 3061 "", "", 3062 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 3063 "(bytes)", "(bytes)"); 3064 } 3065 3066 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 3067 const char* type = r->get_type_str(); 3068 HeapWord* bottom = r->bottom(); 3069 HeapWord* end = r->end(); 3070 size_t capacity_bytes = r->capacity(); 3071 size_t used_bytes = r->used(); 3072 size_t prev_live_bytes = r->live_bytes(); 3073 size_t next_live_bytes = r->next_live_bytes(); 3074 double gc_eff = r->gc_efficiency(); 3075 size_t remset_bytes = r->rem_set()->mem_size(); 3076 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3077 3078 _total_used_bytes += used_bytes; 3079 _total_capacity_bytes += capacity_bytes; 3080 _total_prev_live_bytes += prev_live_bytes; 3081 _total_next_live_bytes += next_live_bytes; 3082 _total_remset_bytes += remset_bytes; 3083 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3084 3085 // Print a line for this particular region. 3086 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3087 G1PPRL_TYPE_FORMAT 3088 G1PPRL_ADDR_BASE_FORMAT 3089 G1PPRL_BYTE_FORMAT 3090 G1PPRL_BYTE_FORMAT 3091 G1PPRL_BYTE_FORMAT 3092 G1PPRL_DOUBLE_FORMAT 3093 G1PPRL_BYTE_FORMAT 3094 G1PPRL_BYTE_FORMAT, 3095 type, p2i(bottom), p2i(end), 3096 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3097 remset_bytes, strong_code_roots_bytes); 3098 3099 return false; 3100 } 3101 3102 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3103 // add static memory usages to remembered set sizes 3104 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3105 // Print the footer of the output. 3106 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3107 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3108 " SUMMARY" 3109 G1PPRL_SUM_MB_FORMAT("capacity") 3110 G1PPRL_SUM_MB_PERC_FORMAT("used") 3111 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3112 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3113 G1PPRL_SUM_MB_FORMAT("remset") 3114 G1PPRL_SUM_MB_FORMAT("code-roots"), 3115 bytes_to_mb(_total_capacity_bytes), 3116 bytes_to_mb(_total_used_bytes), 3117 perc(_total_used_bytes, _total_capacity_bytes), 3118 bytes_to_mb(_total_prev_live_bytes), 3119 perc(_total_prev_live_bytes, _total_capacity_bytes), 3120 bytes_to_mb(_total_next_live_bytes), 3121 perc(_total_next_live_bytes, _total_capacity_bytes), 3122 bytes_to_mb(_total_remset_bytes), 3123 bytes_to_mb(_total_strong_code_roots_bytes)); 3124 }