1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMarkThread.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorState.hpp" 32 #include "gc/g1/g1ConcurrentMark.inline.hpp" 33 #include "gc/g1/g1HeapVerifier.hpp" 34 #include "gc/g1/g1OopClosures.inline.hpp" 35 #include "gc/g1/g1CardLiveData.inline.hpp" 36 #include "gc/g1/g1Policy.hpp" 37 #include "gc/g1/g1StringDedup.hpp" 38 #include "gc/g1/heapRegion.inline.hpp" 39 #include "gc/g1/heapRegionRemSet.hpp" 40 #include "gc/g1/heapRegionSet.inline.hpp" 41 #include "gc/g1/suspendibleThreadSet.hpp" 42 #include "gc/shared/gcId.hpp" 43 #include "gc/shared/gcTimer.hpp" 44 #include "gc/shared/gcTrace.hpp" 45 #include "gc/shared/gcTraceTime.inline.hpp" 46 #include "gc/shared/genOopClosures.inline.hpp" 47 #include "gc/shared/referencePolicy.hpp" 48 #include "gc/shared/strongRootsScope.hpp" 49 #include "gc/shared/taskqueue.inline.hpp" 50 #include "gc/shared/vmGCOperations.hpp" 51 #include "logging/log.hpp" 52 #include "memory/allocation.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "runtime/atomic.hpp" 56 #include "runtime/handles.inline.hpp" 57 #include "runtime/java.hpp" 58 #include "runtime/prefetch.inline.hpp" 59 #include "services/memTracker.hpp" 60 #include "utilities/align.hpp" 61 #include "utilities/growableArray.hpp" 62 63 void G1CMBitMap::print_on_error(outputStream* st, const char* prefix) const { 64 _bm.print_on_error(st, prefix); 65 } 66 67 size_t G1CMBitMap::compute_size(size_t heap_size) { 68 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 69 } 70 71 size_t G1CMBitMap::mark_distance() { 72 return MinObjAlignmentInBytes * BitsPerByte; 73 } 74 75 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 76 _covered = heap; 77 78 _bm = BitMapView((BitMap::bm_word_t*) storage->reserved().start(), _covered.word_size() >> _shifter); 79 80 storage->set_mapping_changed_listener(&_listener); 81 } 82 83 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 84 if (zero_filled) { 85 return; 86 } 87 // We need to clear the bitmap on commit, removing any existing information. 88 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 89 _bm->clear_range(mr); 90 } 91 92 void G1CMBitMap::clear_range(MemRegion mr) { 93 MemRegion intersection = mr.intersection(_covered); 94 assert(!intersection.is_empty(), "Given range from " PTR_FORMAT " to " PTR_FORMAT " is completely outside the heap", p2i(mr.start()), p2i(mr.end())); 95 // convert address range into offset range 96 _bm.at_put_range(addr_to_offset(intersection.start()), 97 addr_to_offset(intersection.end()), false); 98 } 99 100 G1CMMarkStack::G1CMMarkStack() : 101 _max_chunk_capacity(0), 102 _base(NULL), 103 _chunk_capacity(0) { 104 set_empty(); 105 } 106 107 bool G1CMMarkStack::resize(size_t new_capacity) { 108 assert(is_empty(), "Only resize when stack is empty."); 109 assert(new_capacity <= _max_chunk_capacity, 110 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); 111 112 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC); 113 114 if (new_base == NULL) { 115 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); 116 return false; 117 } 118 // Release old mapping. 119 if (_base != NULL) { 120 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 121 } 122 123 _base = new_base; 124 _chunk_capacity = new_capacity; 125 set_empty(); 126 127 return true; 128 } 129 130 size_t G1CMMarkStack::capacity_alignment() { 131 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); 132 } 133 134 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { 135 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); 136 137 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); 138 139 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 140 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 141 142 guarantee(initial_chunk_capacity <= _max_chunk_capacity, 143 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, 144 _max_chunk_capacity, 145 initial_chunk_capacity); 146 147 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, 148 initial_chunk_capacity, _max_chunk_capacity); 149 150 return resize(initial_chunk_capacity); 151 } 152 153 void G1CMMarkStack::expand() { 154 if (_chunk_capacity == _max_chunk_capacity) { 155 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); 156 return; 157 } 158 size_t old_capacity = _chunk_capacity; 159 // Double capacity if possible 160 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); 161 162 if (resize(new_capacity)) { 163 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 164 old_capacity, new_capacity); 165 } else { 166 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 167 old_capacity, new_capacity); 168 } 169 } 170 171 G1CMMarkStack::~G1CMMarkStack() { 172 if (_base != NULL) { 173 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 174 } 175 } 176 177 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { 178 elem->next = *list; 179 *list = elem; 180 } 181 182 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { 183 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 184 add_chunk_to_list(&_chunk_list, elem); 185 _chunks_in_chunk_list++; 186 } 187 188 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { 189 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 190 add_chunk_to_list(&_free_list, elem); 191 } 192 193 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { 194 TaskQueueEntryChunk* result = *list; 195 if (result != NULL) { 196 *list = (*list)->next; 197 } 198 return result; 199 } 200 201 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { 202 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 203 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); 204 if (result != NULL) { 205 _chunks_in_chunk_list--; 206 } 207 return result; 208 } 209 210 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { 211 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 212 return remove_chunk_from_list(&_free_list); 213 } 214 215 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { 216 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. 217 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding 218 // wraparound of _hwm. 219 if (_hwm >= _chunk_capacity) { 220 return NULL; 221 } 222 223 size_t cur_idx = Atomic::add(1, &_hwm) - 1; 224 if (cur_idx >= _chunk_capacity) { 225 return NULL; 226 } 227 228 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; 229 result->next = NULL; 230 return result; 231 } 232 233 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { 234 // Get a new chunk. 235 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); 236 237 if (new_chunk == NULL) { 238 // Did not get a chunk from the free list. Allocate from backing memory. 239 new_chunk = allocate_new_chunk(); 240 241 if (new_chunk == NULL) { 242 return false; 243 } 244 } 245 246 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 247 248 add_chunk_to_chunk_list(new_chunk); 249 250 return true; 251 } 252 253 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { 254 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); 255 256 if (cur == NULL) { 257 return false; 258 } 259 260 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 261 262 add_chunk_to_free_list(cur); 263 return true; 264 } 265 266 void G1CMMarkStack::set_empty() { 267 _chunks_in_chunk_list = 0; 268 _hwm = 0; 269 _chunk_list = NULL; 270 _free_list = NULL; 271 } 272 273 G1CMRootRegions::G1CMRootRegions() : 274 _cm(NULL), _scan_in_progress(false), 275 _should_abort(false), _claimed_survivor_index(0) { } 276 277 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) { 278 _survivors = survivors; 279 _cm = cm; 280 } 281 282 void G1CMRootRegions::prepare_for_scan() { 283 assert(!scan_in_progress(), "pre-condition"); 284 285 // Currently, only survivors can be root regions. 286 _claimed_survivor_index = 0; 287 _scan_in_progress = _survivors->regions()->is_nonempty(); 288 _should_abort = false; 289 } 290 291 HeapRegion* G1CMRootRegions::claim_next() { 292 if (_should_abort) { 293 // If someone has set the should_abort flag, we return NULL to 294 // force the caller to bail out of their loop. 295 return NULL; 296 } 297 298 // Currently, only survivors can be root regions. 299 const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions(); 300 301 int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1; 302 if (claimed_index < survivor_regions->length()) { 303 return survivor_regions->at(claimed_index); 304 } 305 return NULL; 306 } 307 308 uint G1CMRootRegions::num_root_regions() const { 309 return (uint)_survivors->regions()->length(); 310 } 311 312 void G1CMRootRegions::notify_scan_done() { 313 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 314 _scan_in_progress = false; 315 RootRegionScan_lock->notify_all(); 316 } 317 318 void G1CMRootRegions::cancel_scan() { 319 notify_scan_done(); 320 } 321 322 void G1CMRootRegions::scan_finished() { 323 assert(scan_in_progress(), "pre-condition"); 324 325 // Currently, only survivors can be root regions. 326 if (!_should_abort) { 327 assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index); 328 assert((uint)_claimed_survivor_index >= _survivors->length(), 329 "we should have claimed all survivors, claimed index = %u, length = %u", 330 (uint)_claimed_survivor_index, _survivors->length()); 331 } 332 333 notify_scan_done(); 334 } 335 336 bool G1CMRootRegions::wait_until_scan_finished() { 337 if (!scan_in_progress()) return false; 338 339 { 340 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 341 while (scan_in_progress()) { 342 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 343 } 344 } 345 return true; 346 } 347 348 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 349 return MAX2((n_par_threads + 2) / 4, 1U); 350 } 351 352 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 353 _g1h(g1h), 354 _markBitMap1(), 355 _markBitMap2(), 356 _parallel_marking_threads(0), 357 _max_parallel_marking_threads(0), 358 _sleep_factor(0.0), 359 _marking_task_overhead(1.0), 360 _cleanup_list("Cleanup List"), 361 362 _prevMarkBitMap(&_markBitMap1), 363 _nextMarkBitMap(&_markBitMap2), 364 365 _global_mark_stack(), 366 // _finger set in set_non_marking_state 367 368 _max_worker_id(ParallelGCThreads), 369 // _active_tasks set in set_non_marking_state 370 // _tasks set inside the constructor 371 _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)), 372 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 373 374 _has_overflown(false), 375 _concurrent(false), 376 _has_aborted(false), 377 _restart_for_overflow(false), 378 _concurrent_marking_in_progress(false), 379 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 380 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 381 382 // _verbose_level set below 383 384 _init_times(), 385 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 386 _cleanup_times(), 387 _total_counting_time(0.0), 388 _total_rs_scrub_time(0.0), 389 390 _parallel_workers(NULL), 391 392 _completed_initialization(false) { 393 394 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 395 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 396 397 // Create & start a ConcurrentMark thread. 398 _cmThread = new ConcurrentMarkThread(this); 399 assert(cmThread() != NULL, "CM Thread should have been created"); 400 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 401 if (_cmThread->osthread() == NULL) { 402 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 403 } 404 405 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 406 407 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 408 satb_qs.set_buffer_size(G1SATBBufferSize); 409 410 _root_regions.init(_g1h->survivor(), this); 411 412 if (ConcGCThreads > ParallelGCThreads) { 413 log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).", 414 ConcGCThreads, ParallelGCThreads); 415 return; 416 } 417 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 418 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 419 // if both are set 420 _sleep_factor = 0.0; 421 _marking_task_overhead = 1.0; 422 } else if (G1MarkingOverheadPercent > 0) { 423 // We will calculate the number of parallel marking threads based 424 // on a target overhead with respect to the soft real-time goal 425 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 426 double overall_cm_overhead = 427 (double) MaxGCPauseMillis * marking_overhead / 428 (double) GCPauseIntervalMillis; 429 double cpu_ratio = 1.0 / os::initial_active_processor_count(); 430 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 431 double marking_task_overhead = 432 overall_cm_overhead / marking_thread_num * os::initial_active_processor_count(); 433 double sleep_factor = 434 (1.0 - marking_task_overhead) / marking_task_overhead; 435 436 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 437 _sleep_factor = sleep_factor; 438 _marking_task_overhead = marking_task_overhead; 439 } else { 440 // Calculate the number of parallel marking threads by scaling 441 // the number of parallel GC threads. 442 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 443 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 444 _sleep_factor = 0.0; 445 _marking_task_overhead = 1.0; 446 } 447 448 assert(ConcGCThreads > 0, "Should have been set"); 449 log_debug(gc)("ConcGCThreads: %u", ConcGCThreads); 450 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 451 _parallel_marking_threads = ConcGCThreads; 452 _max_parallel_marking_threads = _parallel_marking_threads; 453 454 _parallel_workers = new WorkGang("G1 Marker", 455 _max_parallel_marking_threads, false, true); 456 if (_parallel_workers == NULL) { 457 vm_exit_during_initialization("Failed necessary allocation."); 458 } else { 459 _parallel_workers->initialize_workers(); 460 } 461 462 if (FLAG_IS_DEFAULT(MarkStackSize)) { 463 size_t mark_stack_size = 464 MIN2(MarkStackSizeMax, 465 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 466 // Verify that the calculated value for MarkStackSize is in range. 467 // It would be nice to use the private utility routine from Arguments. 468 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 469 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 470 "must be between 1 and " SIZE_FORMAT, 471 mark_stack_size, MarkStackSizeMax); 472 return; 473 } 474 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 475 } else { 476 // Verify MarkStackSize is in range. 477 if (FLAG_IS_CMDLINE(MarkStackSize)) { 478 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 479 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 480 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 481 "must be between 1 and " SIZE_FORMAT, 482 MarkStackSize, MarkStackSizeMax); 483 return; 484 } 485 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 486 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 487 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 488 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 489 MarkStackSize, MarkStackSizeMax); 490 return; 491 } 492 } 493 } 494 } 495 496 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { 497 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 498 } 499 500 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC); 501 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 502 503 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 504 _active_tasks = _max_worker_id; 505 506 for (uint i = 0; i < _max_worker_id; ++i) { 507 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 508 task_queue->initialize(); 509 _task_queues->register_queue(i, task_queue); 510 511 _tasks[i] = new G1CMTask(i, this, task_queue, _task_queues); 512 513 _accum_task_vtime[i] = 0.0; 514 } 515 516 // so that the call below can read a sensible value 517 _heap_start = g1h->reserved_region().start(); 518 set_non_marking_state(); 519 _completed_initialization = true; 520 } 521 522 void G1ConcurrentMark::reset() { 523 // Starting values for these two. This should be called in a STW 524 // phase. 525 MemRegion reserved = _g1h->g1_reserved(); 526 _heap_start = reserved.start(); 527 _heap_end = reserved.end(); 528 529 // Separated the asserts so that we know which one fires. 530 assert(_heap_start != NULL, "heap bounds should look ok"); 531 assert(_heap_end != NULL, "heap bounds should look ok"); 532 assert(_heap_start < _heap_end, "heap bounds should look ok"); 533 534 // Reset all the marking data structures and any necessary flags 535 reset_marking_state(); 536 537 // We do reset all of them, since different phases will use 538 // different number of active threads. So, it's easiest to have all 539 // of them ready. 540 for (uint i = 0; i < _max_worker_id; ++i) { 541 _tasks[i]->reset(_nextMarkBitMap); 542 } 543 544 // we need this to make sure that the flag is on during the evac 545 // pause with initial mark piggy-backed 546 set_concurrent_marking_in_progress(); 547 } 548 549 550 void G1ConcurrentMark::reset_marking_state() { 551 _global_mark_stack.set_empty(); 552 553 // Expand the marking stack, if we have to and if we can. 554 if (has_overflown()) { 555 _global_mark_stack.expand(); 556 } 557 558 clear_has_overflown(); 559 _finger = _heap_start; 560 561 for (uint i = 0; i < _max_worker_id; ++i) { 562 G1CMTaskQueue* queue = _task_queues->queue(i); 563 queue->set_empty(); 564 } 565 } 566 567 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 568 assert(active_tasks <= _max_worker_id, "we should not have more"); 569 570 _active_tasks = active_tasks; 571 // Need to update the three data structures below according to the 572 // number of active threads for this phase. 573 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 574 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 575 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 576 } 577 578 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 579 set_concurrency(active_tasks); 580 581 _concurrent = concurrent; 582 // We propagate this to all tasks, not just the active ones. 583 for (uint i = 0; i < _max_worker_id; ++i) 584 _tasks[i]->set_concurrent(concurrent); 585 586 if (concurrent) { 587 set_concurrent_marking_in_progress(); 588 } else { 589 // We currently assume that the concurrent flag has been set to 590 // false before we start remark. At this point we should also be 591 // in a STW phase. 592 assert(!concurrent_marking_in_progress(), "invariant"); 593 assert(out_of_regions(), 594 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 595 p2i(_finger), p2i(_heap_end)); 596 } 597 } 598 599 void G1ConcurrentMark::set_non_marking_state() { 600 // We set the global marking state to some default values when we're 601 // not doing marking. 602 reset_marking_state(); 603 _active_tasks = 0; 604 clear_concurrent_marking_in_progress(); 605 } 606 607 G1ConcurrentMark::~G1ConcurrentMark() { 608 // The G1ConcurrentMark instance is never freed. 609 ShouldNotReachHere(); 610 } 611 612 class G1ClearBitMapTask : public AbstractGangTask { 613 public: 614 static size_t chunk_size() { return M; } 615 616 private: 617 // Heap region closure used for clearing the given mark bitmap. 618 class G1ClearBitmapHRClosure : public HeapRegionClosure { 619 private: 620 G1CMBitMap* _bitmap; 621 G1ConcurrentMark* _cm; 622 public: 623 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) { 624 } 625 626 virtual bool doHeapRegion(HeapRegion* r) { 627 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 628 629 HeapWord* cur = r->bottom(); 630 HeapWord* const end = r->end(); 631 632 while (cur < end) { 633 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 634 _bitmap->clear_range(mr); 635 636 cur += chunk_size_in_words; 637 638 // Abort iteration if after yielding the marking has been aborted. 639 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 640 return true; 641 } 642 // Repeat the asserts from before the start of the closure. We will do them 643 // as asserts here to minimize their overhead on the product. However, we 644 // will have them as guarantees at the beginning / end of the bitmap 645 // clearing to get some checking in the product. 646 assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant"); 647 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 648 } 649 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 650 651 return false; 652 } 653 }; 654 655 G1ClearBitmapHRClosure _cl; 656 HeapRegionClaimer _hr_claimer; 657 bool _suspendible; // If the task is suspendible, workers must join the STS. 658 659 public: 660 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 661 AbstractGangTask("G1 Clear Bitmap"), 662 _cl(bitmap, suspendible ? cm : NULL), 663 _hr_claimer(n_workers), 664 _suspendible(suspendible) 665 { } 666 667 void work(uint worker_id) { 668 SuspendibleThreadSetJoiner sts_join(_suspendible); 669 G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer); 670 } 671 672 bool is_complete() { 673 return _cl.complete(); 674 } 675 }; 676 677 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 678 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 679 680 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 681 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 682 683 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 684 685 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 686 687 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 688 workers->run_task(&cl, num_workers); 689 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 690 } 691 692 void G1ConcurrentMark::cleanup_for_next_mark() { 693 // Make sure that the concurrent mark thread looks to still be in 694 // the current cycle. 695 guarantee(cmThread()->during_cycle(), "invariant"); 696 697 // We are finishing up the current cycle by clearing the next 698 // marking bitmap and getting it ready for the next cycle. During 699 // this time no other cycle can start. So, let's make sure that this 700 // is the case. 701 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 702 703 clear_bitmap(_nextMarkBitMap, _parallel_workers, true); 704 705 // Clear the live count data. If the marking has been aborted, the abort() 706 // call already did that. 707 if (!has_aborted()) { 708 clear_live_data(_parallel_workers); 709 DEBUG_ONLY(verify_live_data_clear()); 710 } 711 712 // Repeat the asserts from above. 713 guarantee(cmThread()->during_cycle(), "invariant"); 714 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 715 } 716 717 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 718 assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint."); 719 clear_bitmap(_prevMarkBitMap, workers, false); 720 } 721 722 class CheckBitmapClearHRClosure : public HeapRegionClosure { 723 G1CMBitMap* _bitmap; 724 bool _error; 725 public: 726 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { 727 } 728 729 virtual bool doHeapRegion(HeapRegion* r) { 730 // This closure can be called concurrently to the mutator, so we must make sure 731 // that the result of the getNextMarkedWordAddress() call is compared to the 732 // value passed to it as limit to detect any found bits. 733 // end never changes in G1. 734 HeapWord* end = r->end(); 735 return _bitmap->get_next_marked_addr(r->bottom(), end) != end; 736 } 737 }; 738 739 bool G1ConcurrentMark::nextMarkBitmapIsClear() { 740 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 741 _g1h->heap_region_iterate(&cl); 742 return cl.complete(); 743 } 744 745 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 746 public: 747 bool doHeapRegion(HeapRegion* r) { 748 r->note_start_of_marking(); 749 return false; 750 } 751 }; 752 753 void G1ConcurrentMark::checkpointRootsInitialPre() { 754 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 755 756 _has_aborted = false; 757 758 // Initialize marking structures. This has to be done in a STW phase. 759 reset(); 760 761 // For each region note start of marking. 762 NoteStartOfMarkHRClosure startcl; 763 g1h->heap_region_iterate(&startcl); 764 } 765 766 767 void G1ConcurrentMark::checkpointRootsInitialPost() { 768 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 769 770 // Start Concurrent Marking weak-reference discovery. 771 ReferenceProcessor* rp = g1h->ref_processor_cm(); 772 // enable ("weak") refs discovery 773 rp->enable_discovery(); 774 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 775 776 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 777 // This is the start of the marking cycle, we're expected all 778 // threads to have SATB queues with active set to false. 779 satb_mq_set.set_active_all_threads(true, /* new active value */ 780 false /* expected_active */); 781 782 _root_regions.prepare_for_scan(); 783 784 // update_g1_committed() will be called at the end of an evac pause 785 // when marking is on. So, it's also called at the end of the 786 // initial-mark pause to update the heap end, if the heap expands 787 // during it. No need to call it here. 788 } 789 790 /* 791 * Notice that in the next two methods, we actually leave the STS 792 * during the barrier sync and join it immediately afterwards. If we 793 * do not do this, the following deadlock can occur: one thread could 794 * be in the barrier sync code, waiting for the other thread to also 795 * sync up, whereas another one could be trying to yield, while also 796 * waiting for the other threads to sync up too. 797 * 798 * Note, however, that this code is also used during remark and in 799 * this case we should not attempt to leave / enter the STS, otherwise 800 * we'll either hit an assert (debug / fastdebug) or deadlock 801 * (product). So we should only leave / enter the STS if we are 802 * operating concurrently. 803 * 804 * Because the thread that does the sync barrier has left the STS, it 805 * is possible to be suspended for a Full GC or an evacuation pause 806 * could occur. This is actually safe, since the entering the sync 807 * barrier is one of the last things do_marking_step() does, and it 808 * doesn't manipulate any data structures afterwards. 809 */ 810 811 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 812 bool barrier_aborted; 813 { 814 SuspendibleThreadSetLeaver sts_leave(concurrent()); 815 barrier_aborted = !_first_overflow_barrier_sync.enter(); 816 } 817 818 // at this point everyone should have synced up and not be doing any 819 // more work 820 821 if (barrier_aborted) { 822 // If the barrier aborted we ignore the overflow condition and 823 // just abort the whole marking phase as quickly as possible. 824 return; 825 } 826 827 // If we're executing the concurrent phase of marking, reset the marking 828 // state; otherwise the marking state is reset after reference processing, 829 // during the remark pause. 830 // If we reset here as a result of an overflow during the remark we will 831 // see assertion failures from any subsequent set_concurrency_and_phase() 832 // calls. 833 if (concurrent()) { 834 // let the task associated with with worker 0 do this 835 if (worker_id == 0) { 836 // task 0 is responsible for clearing the global data structures 837 // We should be here because of an overflow. During STW we should 838 // not clear the overflow flag since we rely on it being true when 839 // we exit this method to abort the pause and restart concurrent 840 // marking. 841 reset_marking_state(); 842 843 log_info(gc, marking)("Concurrent Mark reset for overflow"); 844 } 845 } 846 847 // after this, each task should reset its own data structures then 848 // then go into the second barrier 849 } 850 851 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 852 SuspendibleThreadSetLeaver sts_leave(concurrent()); 853 _second_overflow_barrier_sync.enter(); 854 855 // at this point everything should be re-initialized and ready to go 856 } 857 858 class G1CMConcurrentMarkingTask: public AbstractGangTask { 859 private: 860 G1ConcurrentMark* _cm; 861 ConcurrentMarkThread* _cmt; 862 863 public: 864 void work(uint worker_id) { 865 assert(Thread::current()->is_ConcurrentGC_thread(), 866 "this should only be done by a conc GC thread"); 867 ResourceMark rm; 868 869 double start_vtime = os::elapsedVTime(); 870 871 { 872 SuspendibleThreadSetJoiner sts_join; 873 874 assert(worker_id < _cm->active_tasks(), "invariant"); 875 G1CMTask* the_task = _cm->task(worker_id); 876 the_task->record_start_time(); 877 if (!_cm->has_aborted()) { 878 do { 879 double start_vtime_sec = os::elapsedVTime(); 880 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 881 882 the_task->do_marking_step(mark_step_duration_ms, 883 true /* do_termination */, 884 false /* is_serial*/); 885 886 double end_vtime_sec = os::elapsedVTime(); 887 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 888 _cm->do_yield_check(); 889 890 jlong sleep_time_ms; 891 if (!_cm->has_aborted() && the_task->has_aborted()) { 892 sleep_time_ms = 893 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 894 { 895 SuspendibleThreadSetLeaver sts_leave; 896 os::sleep(Thread::current(), sleep_time_ms, false); 897 } 898 } 899 } while (!_cm->has_aborted() && the_task->has_aborted()); 900 } 901 the_task->record_end_time(); 902 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 903 } 904 905 double end_vtime = os::elapsedVTime(); 906 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 907 } 908 909 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm, 910 ConcurrentMarkThread* cmt) : 911 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 912 913 ~G1CMConcurrentMarkingTask() { } 914 }; 915 916 // Calculates the number of active workers for a concurrent 917 // phase. 918 uint G1ConcurrentMark::calc_parallel_marking_threads() { 919 uint n_conc_workers = 0; 920 if (!UseDynamicNumberOfGCThreads || 921 (!FLAG_IS_DEFAULT(ConcGCThreads) && 922 !ForceDynamicNumberOfGCThreads)) { 923 n_conc_workers = max_parallel_marking_threads(); 924 } else { 925 n_conc_workers = 926 AdaptiveSizePolicy::calc_default_active_workers(max_parallel_marking_threads(), 927 1, /* Minimum workers */ 928 parallel_marking_threads(), 929 Threads::number_of_non_daemon_threads()); 930 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 931 // that scaling has already gone into "_max_parallel_marking_threads". 932 } 933 assert(n_conc_workers > 0 && n_conc_workers <= max_parallel_marking_threads(), 934 "Calculated number of workers must be larger than zero and at most the maximum %u, but is %u", 935 max_parallel_marking_threads(), n_conc_workers); 936 return n_conc_workers; 937 } 938 939 void G1ConcurrentMark::scanRootRegion(HeapRegion* hr) { 940 // Currently, only survivors can be root regions. 941 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 942 G1RootRegionScanClosure cl(_g1h, this); 943 944 const uintx interval = PrefetchScanIntervalInBytes; 945 HeapWord* curr = hr->bottom(); 946 const HeapWord* end = hr->top(); 947 while (curr < end) { 948 Prefetch::read(curr, interval); 949 oop obj = oop(curr); 950 int size = obj->oop_iterate_size(&cl); 951 assert(size == obj->size(), "sanity"); 952 curr += size; 953 } 954 } 955 956 class G1CMRootRegionScanTask : public AbstractGangTask { 957 private: 958 G1ConcurrentMark* _cm; 959 960 public: 961 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 962 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 963 964 void work(uint worker_id) { 965 assert(Thread::current()->is_ConcurrentGC_thread(), 966 "this should only be done by a conc GC thread"); 967 968 G1CMRootRegions* root_regions = _cm->root_regions(); 969 HeapRegion* hr = root_regions->claim_next(); 970 while (hr != NULL) { 971 _cm->scanRootRegion(hr); 972 hr = root_regions->claim_next(); 973 } 974 } 975 }; 976 977 void G1ConcurrentMark::scan_root_regions() { 978 // scan_in_progress() will have been set to true only if there was 979 // at least one root region to scan. So, if it's false, we 980 // should not attempt to do any further work. 981 if (root_regions()->scan_in_progress()) { 982 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 983 984 _parallel_marking_threads = MIN2(calc_parallel_marking_threads(), 985 // We distribute work on a per-region basis, so starting 986 // more threads than that is useless. 987 root_regions()->num_root_regions()); 988 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 989 "Maximum number of marking threads exceeded"); 990 991 G1CMRootRegionScanTask task(this); 992 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 993 task.name(), _parallel_marking_threads, root_regions()->num_root_regions()); 994 _parallel_workers->run_task(&task, _parallel_marking_threads); 995 996 // It's possible that has_aborted() is true here without actually 997 // aborting the survivor scan earlier. This is OK as it's 998 // mainly used for sanity checking. 999 root_regions()->scan_finished(); 1000 } 1001 } 1002 1003 void G1ConcurrentMark::concurrent_cycle_start() { 1004 _gc_timer_cm->register_gc_start(); 1005 1006 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 1007 1008 _g1h->trace_heap_before_gc(_gc_tracer_cm); 1009 } 1010 1011 void G1ConcurrentMark::concurrent_cycle_end() { 1012 _g1h->trace_heap_after_gc(_gc_tracer_cm); 1013 1014 if (has_aborted()) { 1015 _gc_tracer_cm->report_concurrent_mode_failure(); 1016 } 1017 1018 _gc_timer_cm->register_gc_end(); 1019 1020 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 1021 } 1022 1023 void G1ConcurrentMark::mark_from_roots() { 1024 // we might be tempted to assert that: 1025 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1026 // "inconsistent argument?"); 1027 // However that wouldn't be right, because it's possible that 1028 // a safepoint is indeed in progress as a younger generation 1029 // stop-the-world GC happens even as we mark in this generation. 1030 1031 _restart_for_overflow = false; 1032 1033 // _g1h has _n_par_threads 1034 _parallel_marking_threads = calc_parallel_marking_threads(); 1035 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1036 "Maximum number of marking threads exceeded"); 1037 1038 uint active_workers = MAX2(1U, parallel_marking_threads()); 1039 assert(active_workers > 0, "Should have been set"); 1040 1041 // Setting active workers is not guaranteed since fewer 1042 // worker threads may currently exist and more may not be 1043 // available. 1044 active_workers = _parallel_workers->update_active_workers(active_workers); 1045 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _parallel_workers->total_workers()); 1046 1047 // Parallel task terminator is set in "set_concurrency_and_phase()" 1048 set_concurrency_and_phase(active_workers, true /* concurrent */); 1049 1050 G1CMConcurrentMarkingTask markingTask(this, cmThread()); 1051 _parallel_workers->run_task(&markingTask); 1052 print_stats(); 1053 } 1054 1055 void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1056 // world is stopped at this checkpoint 1057 assert(SafepointSynchronize::is_at_safepoint(), 1058 "world should be stopped"); 1059 1060 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1061 1062 // If a full collection has happened, we shouldn't do this. 1063 if (has_aborted()) { 1064 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1065 return; 1066 } 1067 1068 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1069 1070 if (VerifyDuringGC) { 1071 HandleMark hm; // handle scope 1072 g1h->prepare_for_verify(); 1073 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1074 } 1075 g1h->verifier()->check_bitmaps("Remark Start"); 1076 1077 G1Policy* g1p = g1h->g1_policy(); 1078 g1p->record_concurrent_mark_remark_start(); 1079 1080 double start = os::elapsedTime(); 1081 1082 checkpointRootsFinalWork(); 1083 1084 double mark_work_end = os::elapsedTime(); 1085 1086 weakRefsWork(clear_all_soft_refs); 1087 1088 if (has_overflown()) { 1089 // We overflowed. Restart concurrent marking. 1090 _restart_for_overflow = true; 1091 1092 // Verify the heap w.r.t. the previous marking bitmap. 1093 if (VerifyDuringGC) { 1094 HandleMark hm; // handle scope 1095 g1h->prepare_for_verify(); 1096 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)"); 1097 } 1098 1099 // Clear the marking state because we will be restarting 1100 // marking due to overflowing the global mark stack. 1101 reset_marking_state(); 1102 } else { 1103 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1104 // We're done with marking. 1105 // This is the end of the marking cycle, we're expected all 1106 // threads to have SATB queues with active set to true. 1107 satb_mq_set.set_active_all_threads(false, /* new active value */ 1108 true /* expected_active */); 1109 1110 if (VerifyDuringGC) { 1111 HandleMark hm; // handle scope 1112 g1h->prepare_for_verify(); 1113 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)"); 1114 } 1115 g1h->verifier()->check_bitmaps("Remark End"); 1116 assert(!restart_for_overflow(), "sanity"); 1117 // Completely reset the marking state since marking completed 1118 set_non_marking_state(); 1119 } 1120 1121 // Statistics 1122 double now = os::elapsedTime(); 1123 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1124 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1125 _remark_times.add((now - start) * 1000.0); 1126 1127 g1p->record_concurrent_mark_remark_end(); 1128 1129 G1CMIsAliveClosure is_alive(g1h); 1130 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1131 } 1132 1133 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1134 G1CollectedHeap* _g1; 1135 size_t _freed_bytes; 1136 FreeRegionList* _local_cleanup_list; 1137 uint _old_regions_removed; 1138 uint _humongous_regions_removed; 1139 HRRSCleanupTask* _hrrs_cleanup_task; 1140 1141 public: 1142 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1143 FreeRegionList* local_cleanup_list, 1144 HRRSCleanupTask* hrrs_cleanup_task) : 1145 _g1(g1), 1146 _freed_bytes(0), 1147 _local_cleanup_list(local_cleanup_list), 1148 _old_regions_removed(0), 1149 _humongous_regions_removed(0), 1150 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1151 1152 size_t freed_bytes() { return _freed_bytes; } 1153 const uint old_regions_removed() { return _old_regions_removed; } 1154 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1155 1156 bool doHeapRegion(HeapRegion *hr) { 1157 if (hr->is_archive()) { 1158 return false; 1159 } 1160 _g1->reset_gc_time_stamps(hr); 1161 hr->note_end_of_marking(); 1162 1163 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1164 _freed_bytes += hr->used(); 1165 hr->set_containing_set(NULL); 1166 if (hr->is_humongous()) { 1167 _humongous_regions_removed++; 1168 _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */); 1169 } else { 1170 _old_regions_removed++; 1171 _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */); 1172 } 1173 } else { 1174 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1175 } 1176 1177 return false; 1178 } 1179 }; 1180 1181 class G1ParNoteEndTask: public AbstractGangTask { 1182 friend class G1NoteEndOfConcMarkClosure; 1183 1184 protected: 1185 G1CollectedHeap* _g1h; 1186 FreeRegionList* _cleanup_list; 1187 HeapRegionClaimer _hrclaimer; 1188 1189 public: 1190 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1191 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1192 } 1193 1194 void work(uint worker_id) { 1195 FreeRegionList local_cleanup_list("Local Cleanup List"); 1196 HRRSCleanupTask hrrs_cleanup_task; 1197 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1198 &hrrs_cleanup_task); 1199 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1200 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1201 1202 // Now update the lists 1203 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1204 { 1205 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1206 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1207 1208 // If we iterate over the global cleanup list at the end of 1209 // cleanup to do this printing we will not guarantee to only 1210 // generate output for the newly-reclaimed regions (the list 1211 // might not be empty at the beginning of cleanup; we might 1212 // still be working on its previous contents). So we do the 1213 // printing here, before we append the new regions to the global 1214 // cleanup list. 1215 1216 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1217 if (hr_printer->is_active()) { 1218 FreeRegionListIterator iter(&local_cleanup_list); 1219 while (iter.more_available()) { 1220 HeapRegion* hr = iter.get_next(); 1221 hr_printer->cleanup(hr); 1222 } 1223 } 1224 1225 _cleanup_list->add_ordered(&local_cleanup_list); 1226 assert(local_cleanup_list.is_empty(), "post-condition"); 1227 1228 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1229 } 1230 } 1231 }; 1232 1233 void G1ConcurrentMark::cleanup() { 1234 // world is stopped at this checkpoint 1235 assert(SafepointSynchronize::is_at_safepoint(), 1236 "world should be stopped"); 1237 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1238 1239 // If a full collection has happened, we shouldn't do this. 1240 if (has_aborted()) { 1241 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1242 return; 1243 } 1244 1245 g1h->verifier()->verify_region_sets_optional(); 1246 1247 if (VerifyDuringGC) { 1248 HandleMark hm; // handle scope 1249 g1h->prepare_for_verify(); 1250 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1251 } 1252 g1h->verifier()->check_bitmaps("Cleanup Start"); 1253 1254 G1Policy* g1p = g1h->g1_policy(); 1255 g1p->record_concurrent_mark_cleanup_start(); 1256 1257 double start = os::elapsedTime(); 1258 1259 HeapRegionRemSet::reset_for_cleanup_tasks(); 1260 1261 { 1262 GCTraceTime(Debug, gc)("Finalize Live Data"); 1263 finalize_live_data(); 1264 } 1265 1266 if (VerifyDuringGC) { 1267 GCTraceTime(Debug, gc)("Verify Live Data"); 1268 verify_live_data(); 1269 } 1270 1271 g1h->collector_state()->set_mark_in_progress(false); 1272 1273 double count_end = os::elapsedTime(); 1274 double this_final_counting_time = (count_end - start); 1275 _total_counting_time += this_final_counting_time; 1276 1277 if (log_is_enabled(Trace, gc, liveness)) { 1278 G1PrintRegionLivenessInfoClosure cl("Post-Marking"); 1279 _g1h->heap_region_iterate(&cl); 1280 } 1281 1282 // Install newly created mark bitMap as "prev". 1283 swapMarkBitMaps(); 1284 1285 g1h->reset_gc_time_stamp(); 1286 1287 uint n_workers = _g1h->workers()->active_workers(); 1288 1289 // Note end of marking in all heap regions. 1290 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1291 g1h->workers()->run_task(&g1_par_note_end_task); 1292 g1h->check_gc_time_stamps(); 1293 1294 if (!cleanup_list_is_empty()) { 1295 // The cleanup list is not empty, so we'll have to process it 1296 // concurrently. Notify anyone else that might be wanting free 1297 // regions that there will be more free regions coming soon. 1298 g1h->set_free_regions_coming(); 1299 } 1300 1301 // call below, since it affects the metric by which we sort the heap 1302 // regions. 1303 if (G1ScrubRemSets) { 1304 double rs_scrub_start = os::elapsedTime(); 1305 g1h->scrub_rem_set(); 1306 _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start); 1307 } 1308 1309 // this will also free any regions totally full of garbage objects, 1310 // and sort the regions. 1311 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1312 1313 // Statistics. 1314 double end = os::elapsedTime(); 1315 _cleanup_times.add((end - start) * 1000.0); 1316 1317 // Clean up will have freed any regions completely full of garbage. 1318 // Update the soft reference policy with the new heap occupancy. 1319 Universe::update_heap_info_at_gc(); 1320 1321 if (VerifyDuringGC) { 1322 HandleMark hm; // handle scope 1323 g1h->prepare_for_verify(); 1324 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)"); 1325 } 1326 1327 g1h->verifier()->check_bitmaps("Cleanup End"); 1328 1329 g1h->verifier()->verify_region_sets_optional(); 1330 1331 // We need to make this be a "collection" so any collection pause that 1332 // races with it goes around and waits for completeCleanup to finish. 1333 g1h->increment_total_collections(); 1334 1335 // Clean out dead classes and update Metaspace sizes. 1336 if (ClassUnloadingWithConcurrentMark) { 1337 ClassLoaderDataGraph::purge(); 1338 } 1339 MetaspaceGC::compute_new_size(); 1340 1341 // We reclaimed old regions so we should calculate the sizes to make 1342 // sure we update the old gen/space data. 1343 g1h->g1mm()->update_sizes(); 1344 g1h->allocation_context_stats().update_after_mark(); 1345 } 1346 1347 void G1ConcurrentMark::complete_cleanup() { 1348 if (has_aborted()) return; 1349 1350 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1351 1352 _cleanup_list.verify_optional(); 1353 FreeRegionList tmp_free_list("Tmp Free List"); 1354 1355 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1356 "cleanup list has %u entries", 1357 _cleanup_list.length()); 1358 1359 // No one else should be accessing the _cleanup_list at this point, 1360 // so it is not necessary to take any locks 1361 while (!_cleanup_list.is_empty()) { 1362 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 1363 assert(hr != NULL, "Got NULL from a non-empty list"); 1364 hr->par_clear(); 1365 tmp_free_list.add_ordered(hr); 1366 1367 // Instead of adding one region at a time to the secondary_free_list, 1368 // we accumulate them in the local list and move them a few at a 1369 // time. This also cuts down on the number of notify_all() calls 1370 // we do during this process. We'll also append the local list when 1371 // _cleanup_list is empty (which means we just removed the last 1372 // region from the _cleanup_list). 1373 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1374 _cleanup_list.is_empty()) { 1375 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1376 "appending %u entries to the secondary_free_list, " 1377 "cleanup list still has %u entries", 1378 tmp_free_list.length(), 1379 _cleanup_list.length()); 1380 1381 { 1382 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1383 g1h->secondary_free_list_add(&tmp_free_list); 1384 SecondaryFreeList_lock->notify_all(); 1385 } 1386 #ifndef PRODUCT 1387 if (G1StressConcRegionFreeing) { 1388 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1389 os::sleep(Thread::current(), (jlong) 1, false); 1390 } 1391 } 1392 #endif 1393 } 1394 } 1395 assert(tmp_free_list.is_empty(), "post-condition"); 1396 } 1397 1398 // Supporting Object and Oop closures for reference discovery 1399 // and processing in during marking 1400 1401 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1402 HeapWord* addr = (HeapWord*)obj; 1403 return addr != NULL && 1404 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1405 } 1406 1407 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1408 // Uses the G1CMTask associated with a worker thread (for serial reference 1409 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1410 // trace referent objects. 1411 // 1412 // Using the G1CMTask and embedded local queues avoids having the worker 1413 // threads operating on the global mark stack. This reduces the risk 1414 // of overflowing the stack - which we would rather avoid at this late 1415 // state. Also using the tasks' local queues removes the potential 1416 // of the workers interfering with each other that could occur if 1417 // operating on the global stack. 1418 1419 class G1CMKeepAliveAndDrainClosure: public OopClosure { 1420 G1ConcurrentMark* _cm; 1421 G1CMTask* _task; 1422 int _ref_counter_limit; 1423 int _ref_counter; 1424 bool _is_serial; 1425 public: 1426 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1427 _cm(cm), _task(task), _is_serial(is_serial), 1428 _ref_counter_limit(G1RefProcDrainInterval) { 1429 assert(_ref_counter_limit > 0, "sanity"); 1430 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1431 _ref_counter = _ref_counter_limit; 1432 } 1433 1434 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1435 virtual void do_oop( oop* p) { do_oop_work(p); } 1436 1437 template <class T> void do_oop_work(T* p) { 1438 if (!_cm->has_overflown()) { 1439 oop obj = oopDesc::load_decode_heap_oop(p); 1440 _task->deal_with_reference(obj); 1441 _ref_counter--; 1442 1443 if (_ref_counter == 0) { 1444 // We have dealt with _ref_counter_limit references, pushing them 1445 // and objects reachable from them on to the local stack (and 1446 // possibly the global stack). Call G1CMTask::do_marking_step() to 1447 // process these entries. 1448 // 1449 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1450 // there's nothing more to do (i.e. we're done with the entries that 1451 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1452 // above) or we overflow. 1453 // 1454 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1455 // flag while there may still be some work to do. (See the comment at 1456 // the beginning of G1CMTask::do_marking_step() for those conditions - 1457 // one of which is reaching the specified time target.) It is only 1458 // when G1CMTask::do_marking_step() returns without setting the 1459 // has_aborted() flag that the marking step has completed. 1460 do { 1461 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1462 _task->do_marking_step(mark_step_duration_ms, 1463 false /* do_termination */, 1464 _is_serial); 1465 } while (_task->has_aborted() && !_cm->has_overflown()); 1466 _ref_counter = _ref_counter_limit; 1467 } 1468 } 1469 } 1470 }; 1471 1472 // 'Drain' oop closure used by both serial and parallel reference processing. 1473 // Uses the G1CMTask associated with a given worker thread (for serial 1474 // reference processing the G1CMtask for worker 0 is used). Calls the 1475 // do_marking_step routine, with an unbelievably large timeout value, 1476 // to drain the marking data structures of the remaining entries 1477 // added by the 'keep alive' oop closure above. 1478 1479 class G1CMDrainMarkingStackClosure: public VoidClosure { 1480 G1ConcurrentMark* _cm; 1481 G1CMTask* _task; 1482 bool _is_serial; 1483 public: 1484 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1485 _cm(cm), _task(task), _is_serial(is_serial) { 1486 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1487 } 1488 1489 void do_void() { 1490 do { 1491 // We call G1CMTask::do_marking_step() to completely drain the local 1492 // and global marking stacks of entries pushed by the 'keep alive' 1493 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1494 // 1495 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1496 // if there's nothing more to do (i.e. we've completely drained the 1497 // entries that were pushed as a a result of applying the 'keep alive' 1498 // closure to the entries on the discovered ref lists) or we overflow 1499 // the global marking stack. 1500 // 1501 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1502 // flag while there may still be some work to do. (See the comment at 1503 // the beginning of G1CMTask::do_marking_step() for those conditions - 1504 // one of which is reaching the specified time target.) It is only 1505 // when G1CMTask::do_marking_step() returns without setting the 1506 // has_aborted() flag that the marking step has completed. 1507 1508 _task->do_marking_step(1000000000.0 /* something very large */, 1509 true /* do_termination */, 1510 _is_serial); 1511 } while (_task->has_aborted() && !_cm->has_overflown()); 1512 } 1513 }; 1514 1515 // Implementation of AbstractRefProcTaskExecutor for parallel 1516 // reference processing at the end of G1 concurrent marking 1517 1518 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 1519 private: 1520 G1CollectedHeap* _g1h; 1521 G1ConcurrentMark* _cm; 1522 WorkGang* _workers; 1523 uint _active_workers; 1524 1525 public: 1526 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1527 G1ConcurrentMark* cm, 1528 WorkGang* workers, 1529 uint n_workers) : 1530 _g1h(g1h), _cm(cm), 1531 _workers(workers), _active_workers(n_workers) { } 1532 1533 // Executes the given task using concurrent marking worker threads. 1534 virtual void execute(ProcessTask& task); 1535 virtual void execute(EnqueueTask& task); 1536 }; 1537 1538 class G1CMRefProcTaskProxy: public AbstractGangTask { 1539 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1540 ProcessTask& _proc_task; 1541 G1CollectedHeap* _g1h; 1542 G1ConcurrentMark* _cm; 1543 1544 public: 1545 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1546 G1CollectedHeap* g1h, 1547 G1ConcurrentMark* cm) : 1548 AbstractGangTask("Process reference objects in parallel"), 1549 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1550 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1551 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1552 } 1553 1554 virtual void work(uint worker_id) { 1555 ResourceMark rm; 1556 HandleMark hm; 1557 G1CMTask* task = _cm->task(worker_id); 1558 G1CMIsAliveClosure g1_is_alive(_g1h); 1559 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1560 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1561 1562 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1563 } 1564 }; 1565 1566 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1567 assert(_workers != NULL, "Need parallel worker threads."); 1568 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1569 1570 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1571 1572 // We need to reset the concurrency level before each 1573 // proxy task execution, so that the termination protocol 1574 // and overflow handling in G1CMTask::do_marking_step() knows 1575 // how many workers to wait for. 1576 _cm->set_concurrency(_active_workers); 1577 _workers->run_task(&proc_task_proxy); 1578 } 1579 1580 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 1581 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1582 EnqueueTask& _enq_task; 1583 1584 public: 1585 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1586 AbstractGangTask("Enqueue reference objects in parallel"), 1587 _enq_task(enq_task) { } 1588 1589 virtual void work(uint worker_id) { 1590 _enq_task.work(worker_id); 1591 } 1592 }; 1593 1594 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 1595 assert(_workers != NULL, "Need parallel worker threads."); 1596 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1597 1598 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 1599 1600 // Not strictly necessary but... 1601 // 1602 // We need to reset the concurrency level before each 1603 // proxy task execution, so that the termination protocol 1604 // and overflow handling in G1CMTask::do_marking_step() knows 1605 // how many workers to wait for. 1606 _cm->set_concurrency(_active_workers); 1607 _workers->run_task(&enq_task_proxy); 1608 } 1609 1610 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 1611 if (has_overflown()) { 1612 // Skip processing the discovered references if we have 1613 // overflown the global marking stack. Reference objects 1614 // only get discovered once so it is OK to not 1615 // de-populate the discovered reference lists. We could have, 1616 // but the only benefit would be that, when marking restarts, 1617 // less reference objects are discovered. 1618 return; 1619 } 1620 1621 ResourceMark rm; 1622 HandleMark hm; 1623 1624 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1625 1626 // Is alive closure. 1627 G1CMIsAliveClosure g1_is_alive(g1h); 1628 1629 // Inner scope to exclude the cleaning of the string and symbol 1630 // tables from the displayed time. 1631 { 1632 GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm); 1633 1634 ReferenceProcessor* rp = g1h->ref_processor_cm(); 1635 1636 // See the comment in G1CollectedHeap::ref_processing_init() 1637 // about how reference processing currently works in G1. 1638 1639 // Set the soft reference policy 1640 rp->setup_policy(clear_all_soft_refs); 1641 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1642 1643 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1644 // in serial reference processing. Note these closures are also 1645 // used for serially processing (by the the current thread) the 1646 // JNI references during parallel reference processing. 1647 // 1648 // These closures do not need to synchronize with the worker 1649 // threads involved in parallel reference processing as these 1650 // instances are executed serially by the current thread (e.g. 1651 // reference processing is not multi-threaded and is thus 1652 // performed by the current thread instead of a gang worker). 1653 // 1654 // The gang tasks involved in parallel reference processing create 1655 // their own instances of these closures, which do their own 1656 // synchronization among themselves. 1657 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1658 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1659 1660 // We need at least one active thread. If reference processing 1661 // is not multi-threaded we use the current (VMThread) thread, 1662 // otherwise we use the work gang from the G1CollectedHeap and 1663 // we utilize all the worker threads we can. 1664 bool processing_is_mt = rp->processing_is_mt(); 1665 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 1666 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 1667 1668 // Parallel processing task executor. 1669 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 1670 g1h->workers(), active_workers); 1671 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1672 1673 // Set the concurrency level. The phase was already set prior to 1674 // executing the remark task. 1675 set_concurrency(active_workers); 1676 1677 // Set the degree of MT processing here. If the discovery was done MT, 1678 // the number of threads involved during discovery could differ from 1679 // the number of active workers. This is OK as long as the discovered 1680 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1681 rp->set_active_mt_degree(active_workers); 1682 1683 // Process the weak references. 1684 const ReferenceProcessorStats& stats = 1685 rp->process_discovered_references(&g1_is_alive, 1686 &g1_keep_alive, 1687 &g1_drain_mark_stack, 1688 executor, 1689 _gc_timer_cm); 1690 _gc_tracer_cm->report_gc_reference_stats(stats); 1691 1692 // The do_oop work routines of the keep_alive and drain_marking_stack 1693 // oop closures will set the has_overflown flag if we overflow the 1694 // global marking stack. 1695 1696 assert(has_overflown() || _global_mark_stack.is_empty(), 1697 "Mark stack should be empty (unless it has overflown)"); 1698 1699 assert(rp->num_q() == active_workers, "why not"); 1700 1701 rp->enqueue_discovered_references(executor); 1702 1703 rp->verify_no_references_recorded(); 1704 assert(!rp->discovery_enabled(), "Post condition"); 1705 } 1706 1707 if (has_overflown()) { 1708 // We can not trust g1_is_alive if the marking stack overflowed 1709 return; 1710 } 1711 1712 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1713 1714 // Unload Klasses, String, Symbols, Code Cache, etc. 1715 if (ClassUnloadingWithConcurrentMark) { 1716 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); 1717 bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */); 1718 g1h->complete_cleaning(&g1_is_alive, purged_classes); 1719 } else { 1720 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm); 1721 // No need to clean string table and symbol table as they are treated as strong roots when 1722 // class unloading is disabled. 1723 g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled()); 1724 1725 } 1726 } 1727 1728 void G1ConcurrentMark::swapMarkBitMaps() { 1729 G1CMBitMap* temp = _prevMarkBitMap; 1730 _prevMarkBitMap = _nextMarkBitMap; 1731 _nextMarkBitMap = temp; 1732 } 1733 1734 // Closure for marking entries in SATB buffers. 1735 class G1CMSATBBufferClosure : public SATBBufferClosure { 1736 private: 1737 G1CMTask* _task; 1738 G1CollectedHeap* _g1h; 1739 1740 // This is very similar to G1CMTask::deal_with_reference, but with 1741 // more relaxed requirements for the argument, so this must be more 1742 // circumspect about treating the argument as an object. 1743 void do_entry(void* entry) const { 1744 _task->increment_refs_reached(); 1745 HeapRegion* hr = _g1h->heap_region_containing(entry); 1746 if (entry < hr->next_top_at_mark_start()) { 1747 // Until we get here, we don't know whether entry refers to a valid 1748 // object; it could instead have been a stale reference. 1749 oop obj = static_cast<oop>(entry); 1750 assert(obj->is_oop(true /* ignore mark word */), 1751 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)); 1752 _task->make_reference_grey(obj); 1753 } 1754 } 1755 1756 public: 1757 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1758 : _task(task), _g1h(g1h) { } 1759 1760 virtual void do_buffer(void** buffer, size_t size) { 1761 for (size_t i = 0; i < size; ++i) { 1762 do_entry(buffer[i]); 1763 } 1764 } 1765 }; 1766 1767 class G1RemarkThreadsClosure : public ThreadClosure { 1768 G1CMSATBBufferClosure _cm_satb_cl; 1769 G1CMOopClosure _cm_cl; 1770 MarkingCodeBlobClosure _code_cl; 1771 int _thread_parity; 1772 1773 public: 1774 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1775 _cm_satb_cl(task, g1h), 1776 _cm_cl(g1h, g1h->concurrent_mark(), task), 1777 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1778 _thread_parity(Threads::thread_claim_parity()) {} 1779 1780 void do_thread(Thread* thread) { 1781 if (thread->is_Java_thread()) { 1782 if (thread->claim_oops_do(true, _thread_parity)) { 1783 JavaThread* jt = (JavaThread*)thread; 1784 1785 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1786 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1787 // * Alive if on the stack of an executing method 1788 // * Weakly reachable otherwise 1789 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1790 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1791 jt->nmethods_do(&_code_cl); 1792 1793 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 1794 } 1795 } else if (thread->is_VM_thread()) { 1796 if (thread->claim_oops_do(true, _thread_parity)) { 1797 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1798 } 1799 } 1800 } 1801 }; 1802 1803 class G1CMRemarkTask: public AbstractGangTask { 1804 private: 1805 G1ConcurrentMark* _cm; 1806 public: 1807 void work(uint worker_id) { 1808 // Since all available tasks are actually started, we should 1809 // only proceed if we're supposed to be active. 1810 if (worker_id < _cm->active_tasks()) { 1811 G1CMTask* task = _cm->task(worker_id); 1812 task->record_start_time(); 1813 { 1814 ResourceMark rm; 1815 HandleMark hm; 1816 1817 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1818 Threads::threads_do(&threads_f); 1819 } 1820 1821 do { 1822 task->do_marking_step(1000000000.0 /* something very large */, 1823 true /* do_termination */, 1824 false /* is_serial */); 1825 } while (task->has_aborted() && !_cm->has_overflown()); 1826 // If we overflow, then we do not want to restart. We instead 1827 // want to abort remark and do concurrent marking again. 1828 task->record_end_time(); 1829 } 1830 } 1831 1832 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1833 AbstractGangTask("Par Remark"), _cm(cm) { 1834 _cm->terminator()->reset_for_reuse(active_workers); 1835 } 1836 }; 1837 1838 void G1ConcurrentMark::checkpointRootsFinalWork() { 1839 ResourceMark rm; 1840 HandleMark hm; 1841 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1842 1843 GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm); 1844 1845 g1h->ensure_parsability(false); 1846 1847 // this is remark, so we'll use up all active threads 1848 uint active_workers = g1h->workers()->active_workers(); 1849 set_concurrency_and_phase(active_workers, false /* concurrent */); 1850 // Leave _parallel_marking_threads at it's 1851 // value originally calculated in the G1ConcurrentMark 1852 // constructor and pass values of the active workers 1853 // through the gang in the task. 1854 1855 { 1856 StrongRootsScope srs(active_workers); 1857 1858 G1CMRemarkTask remarkTask(this, active_workers); 1859 // We will start all available threads, even if we decide that the 1860 // active_workers will be fewer. The extra ones will just bail out 1861 // immediately. 1862 g1h->workers()->run_task(&remarkTask); 1863 } 1864 1865 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1866 guarantee(has_overflown() || 1867 satb_mq_set.completed_buffers_num() == 0, 1868 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1869 BOOL_TO_STR(has_overflown()), 1870 satb_mq_set.completed_buffers_num()); 1871 1872 print_stats(); 1873 } 1874 1875 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 1876 _prevMarkBitMap->clear_range(mr); 1877 } 1878 1879 HeapRegion* 1880 G1ConcurrentMark::claim_region(uint worker_id) { 1881 // "checkpoint" the finger 1882 HeapWord* finger = _finger; 1883 1884 // _heap_end will not change underneath our feet; it only changes at 1885 // yield points. 1886 while (finger < _heap_end) { 1887 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1888 1889 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1890 // Make sure that the reads below do not float before loading curr_region. 1891 OrderAccess::loadload(); 1892 // Above heap_region_containing may return NULL as we always scan claim 1893 // until the end of the heap. In this case, just jump to the next region. 1894 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1895 1896 // Is the gap between reading the finger and doing the CAS too long? 1897 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 1898 if (res == finger && curr_region != NULL) { 1899 // we succeeded 1900 HeapWord* bottom = curr_region->bottom(); 1901 HeapWord* limit = curr_region->next_top_at_mark_start(); 1902 1903 // notice that _finger == end cannot be guaranteed here since, 1904 // someone else might have moved the finger even further 1905 assert(_finger >= end, "the finger should have moved forward"); 1906 1907 if (limit > bottom) { 1908 return curr_region; 1909 } else { 1910 assert(limit == bottom, 1911 "the region limit should be at bottom"); 1912 // we return NULL and the caller should try calling 1913 // claim_region() again. 1914 return NULL; 1915 } 1916 } else { 1917 assert(_finger > finger, "the finger should have moved forward"); 1918 // read it again 1919 finger = _finger; 1920 } 1921 } 1922 1923 return NULL; 1924 } 1925 1926 #ifndef PRODUCT 1927 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 1928 private: 1929 G1CollectedHeap* _g1h; 1930 const char* _phase; 1931 int _info; 1932 1933 public: 1934 VerifyNoCSetOops(const char* phase, int info = -1) : 1935 _g1h(G1CollectedHeap::heap()), 1936 _phase(phase), 1937 _info(info) 1938 { } 1939 1940 void operator()(G1TaskQueueEntry task_entry) const { 1941 if (task_entry.is_array_slice()) { 1942 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); 1943 return; 1944 } 1945 guarantee(task_entry.obj()->is_oop(), 1946 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1947 p2i(task_entry.obj()), _phase, _info); 1948 guarantee(!_g1h->is_in_cset(task_entry.obj()), 1949 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1950 p2i(task_entry.obj()), _phase, _info); 1951 } 1952 }; 1953 1954 void G1ConcurrentMark::verify_no_cset_oops() { 1955 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1956 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 1957 return; 1958 } 1959 1960 // Verify entries on the global mark stack 1961 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 1962 1963 // Verify entries on the task queues 1964 for (uint i = 0; i < _max_worker_id; ++i) { 1965 G1CMTaskQueue* queue = _task_queues->queue(i); 1966 queue->iterate(VerifyNoCSetOops("Queue", i)); 1967 } 1968 1969 // Verify the global finger 1970 HeapWord* global_finger = finger(); 1971 if (global_finger != NULL && global_finger < _heap_end) { 1972 // Since we always iterate over all regions, we might get a NULL HeapRegion 1973 // here. 1974 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1975 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1976 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1977 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1978 } 1979 1980 // Verify the task fingers 1981 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 1982 for (uint i = 0; i < parallel_marking_threads(); ++i) { 1983 G1CMTask* task = _tasks[i]; 1984 HeapWord* task_finger = task->finger(); 1985 if (task_finger != NULL && task_finger < _heap_end) { 1986 // See above note on the global finger verification. 1987 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 1988 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 1989 !task_hr->in_collection_set(), 1990 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 1991 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 1992 } 1993 } 1994 } 1995 #endif // PRODUCT 1996 void G1ConcurrentMark::create_live_data() { 1997 _g1h->g1_rem_set()->create_card_live_data(_parallel_workers, _nextMarkBitMap); 1998 } 1999 2000 void G1ConcurrentMark::finalize_live_data() { 2001 _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _nextMarkBitMap); 2002 } 2003 2004 void G1ConcurrentMark::verify_live_data() { 2005 _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _nextMarkBitMap); 2006 } 2007 2008 void G1ConcurrentMark::clear_live_data(WorkGang* workers) { 2009 _g1h->g1_rem_set()->clear_card_live_data(workers); 2010 } 2011 2012 #ifdef ASSERT 2013 void G1ConcurrentMark::verify_live_data_clear() { 2014 _g1h->g1_rem_set()->verify_card_live_data_is_clear(); 2015 } 2016 #endif 2017 2018 void G1ConcurrentMark::print_stats() { 2019 if (!log_is_enabled(Debug, gc, stats)) { 2020 return; 2021 } 2022 log_debug(gc, stats)("---------------------------------------------------------------------"); 2023 for (size_t i = 0; i < _active_tasks; ++i) { 2024 _tasks[i]->print_stats(); 2025 log_debug(gc, stats)("---------------------------------------------------------------------"); 2026 } 2027 } 2028 2029 void G1ConcurrentMark::abort() { 2030 if (!cmThread()->during_cycle() || _has_aborted) { 2031 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2032 return; 2033 } 2034 2035 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2036 // concurrent bitmap clearing. 2037 { 2038 GCTraceTime(Debug, gc)("Clear Next Bitmap"); 2039 clear_bitmap(_nextMarkBitMap, _g1h->workers(), false); 2040 } 2041 // Note we cannot clear the previous marking bitmap here 2042 // since VerifyDuringGC verifies the objects marked during 2043 // a full GC against the previous bitmap. 2044 2045 { 2046 GCTraceTime(Debug, gc)("Clear Live Data"); 2047 clear_live_data(_g1h->workers()); 2048 } 2049 DEBUG_ONLY({ 2050 GCTraceTime(Debug, gc)("Verify Live Data Clear"); 2051 verify_live_data_clear(); 2052 }) 2053 // Empty mark stack 2054 reset_marking_state(); 2055 for (uint i = 0; i < _max_worker_id; ++i) { 2056 _tasks[i]->clear_region_fields(); 2057 } 2058 _first_overflow_barrier_sync.abort(); 2059 _second_overflow_barrier_sync.abort(); 2060 _has_aborted = true; 2061 2062 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2063 satb_mq_set.abandon_partial_marking(); 2064 // This can be called either during or outside marking, we'll read 2065 // the expected_active value from the SATB queue set. 2066 satb_mq_set.set_active_all_threads( 2067 false, /* new active value */ 2068 satb_mq_set.is_active() /* expected_active */); 2069 } 2070 2071 static void print_ms_time_info(const char* prefix, const char* name, 2072 NumberSeq& ns) { 2073 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2074 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2075 if (ns.num() > 0) { 2076 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2077 prefix, ns.sd(), ns.maximum()); 2078 } 2079 } 2080 2081 void G1ConcurrentMark::print_summary_info() { 2082 Log(gc, marking) log; 2083 if (!log.is_trace()) { 2084 return; 2085 } 2086 2087 log.trace(" Concurrent marking:"); 2088 print_ms_time_info(" ", "init marks", _init_times); 2089 print_ms_time_info(" ", "remarks", _remark_times); 2090 { 2091 print_ms_time_info(" ", "final marks", _remark_mark_times); 2092 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2093 2094 } 2095 print_ms_time_info(" ", "cleanups", _cleanup_times); 2096 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2097 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2098 if (G1ScrubRemSets) { 2099 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 2100 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2101 } 2102 log.trace(" Total stop_world time = %8.2f s.", 2103 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2104 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2105 cmThread()->vtime_accum(), cmThread()->vtime_mark_accum()); 2106 } 2107 2108 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2109 _parallel_workers->print_worker_threads_on(st); 2110 } 2111 2112 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2113 _parallel_workers->threads_do(tc); 2114 } 2115 2116 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2117 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2118 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 2119 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 2120 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 2121 } 2122 2123 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) { 2124 assert(addr < _cm->finger(), "invariant"); 2125 assert(addr >= _task->finger(), "invariant"); 2126 2127 // We move that task's local finger along. 2128 _task->move_finger_to(addr); 2129 2130 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); 2131 // we only partially drain the local queue and global stack 2132 _task->drain_local_queue(true); 2133 _task->drain_global_stack(true); 2134 2135 // if the has_aborted flag has been raised, we need to bail out of 2136 // the iteration 2137 return !_task->has_aborted(); 2138 } 2139 2140 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2141 ReferenceProcessor* result = g1h->ref_processor_cm(); 2142 assert(result != NULL, "CM reference processor should not be NULL"); 2143 return result; 2144 } 2145 2146 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2147 G1ConcurrentMark* cm, 2148 G1CMTask* task) 2149 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2150 _g1h(g1h), _cm(cm), _task(task) 2151 { } 2152 2153 void G1CMTask::setup_for_region(HeapRegion* hr) { 2154 assert(hr != NULL, 2155 "claim_region() should have filtered out NULL regions"); 2156 _curr_region = hr; 2157 _finger = hr->bottom(); 2158 update_region_limit(); 2159 } 2160 2161 void G1CMTask::update_region_limit() { 2162 HeapRegion* hr = _curr_region; 2163 HeapWord* bottom = hr->bottom(); 2164 HeapWord* limit = hr->next_top_at_mark_start(); 2165 2166 if (limit == bottom) { 2167 // The region was collected underneath our feet. 2168 // We set the finger to bottom to ensure that the bitmap 2169 // iteration that will follow this will not do anything. 2170 // (this is not a condition that holds when we set the region up, 2171 // as the region is not supposed to be empty in the first place) 2172 _finger = bottom; 2173 } else if (limit >= _region_limit) { 2174 assert(limit >= _finger, "peace of mind"); 2175 } else { 2176 assert(limit < _region_limit, "only way to get here"); 2177 // This can happen under some pretty unusual circumstances. An 2178 // evacuation pause empties the region underneath our feet (NTAMS 2179 // at bottom). We then do some allocation in the region (NTAMS 2180 // stays at bottom), followed by the region being used as a GC 2181 // alloc region (NTAMS will move to top() and the objects 2182 // originally below it will be grayed). All objects now marked in 2183 // the region are explicitly grayed, if below the global finger, 2184 // and we do not need in fact to scan anything else. So, we simply 2185 // set _finger to be limit to ensure that the bitmap iteration 2186 // doesn't do anything. 2187 _finger = limit; 2188 } 2189 2190 _region_limit = limit; 2191 } 2192 2193 void G1CMTask::giveup_current_region() { 2194 assert(_curr_region != NULL, "invariant"); 2195 clear_region_fields(); 2196 } 2197 2198 void G1CMTask::clear_region_fields() { 2199 // Values for these three fields that indicate that we're not 2200 // holding on to a region. 2201 _curr_region = NULL; 2202 _finger = NULL; 2203 _region_limit = NULL; 2204 } 2205 2206 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2207 if (cm_oop_closure == NULL) { 2208 assert(_cm_oop_closure != NULL, "invariant"); 2209 } else { 2210 assert(_cm_oop_closure == NULL, "invariant"); 2211 } 2212 _cm_oop_closure = cm_oop_closure; 2213 } 2214 2215 void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) { 2216 guarantee(nextMarkBitMap != NULL, "invariant"); 2217 _nextMarkBitMap = nextMarkBitMap; 2218 clear_region_fields(); 2219 2220 _calls = 0; 2221 _elapsed_time_ms = 0.0; 2222 _termination_time_ms = 0.0; 2223 _termination_start_time_ms = 0.0; 2224 } 2225 2226 bool G1CMTask::should_exit_termination() { 2227 regular_clock_call(); 2228 // This is called when we are in the termination protocol. We should 2229 // quit if, for some reason, this task wants to abort or the global 2230 // stack is not empty (this means that we can get work from it). 2231 return !_cm->mark_stack_empty() || has_aborted(); 2232 } 2233 2234 void G1CMTask::reached_limit() { 2235 assert(_words_scanned >= _words_scanned_limit || 2236 _refs_reached >= _refs_reached_limit , 2237 "shouldn't have been called otherwise"); 2238 regular_clock_call(); 2239 } 2240 2241 void G1CMTask::regular_clock_call() { 2242 if (has_aborted()) return; 2243 2244 // First, we need to recalculate the words scanned and refs reached 2245 // limits for the next clock call. 2246 recalculate_limits(); 2247 2248 // During the regular clock call we do the following 2249 2250 // (1) If an overflow has been flagged, then we abort. 2251 if (_cm->has_overflown()) { 2252 set_has_aborted(); 2253 return; 2254 } 2255 2256 // If we are not concurrent (i.e. we're doing remark) we don't need 2257 // to check anything else. The other steps are only needed during 2258 // the concurrent marking phase. 2259 if (!concurrent()) return; 2260 2261 // (2) If marking has been aborted for Full GC, then we also abort. 2262 if (_cm->has_aborted()) { 2263 set_has_aborted(); 2264 return; 2265 } 2266 2267 double curr_time_ms = os::elapsedVTime() * 1000.0; 2268 2269 // (4) We check whether we should yield. If we have to, then we abort. 2270 if (SuspendibleThreadSet::should_yield()) { 2271 // We should yield. To do this we abort the task. The caller is 2272 // responsible for yielding. 2273 set_has_aborted(); 2274 return; 2275 } 2276 2277 // (5) We check whether we've reached our time quota. If we have, 2278 // then we abort. 2279 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2280 if (elapsed_time_ms > _time_target_ms) { 2281 set_has_aborted(); 2282 _has_timed_out = true; 2283 return; 2284 } 2285 2286 // (6) Finally, we check whether there are enough completed STAB 2287 // buffers available for processing. If there are, we abort. 2288 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2289 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2290 // we do need to process SATB buffers, we'll abort and restart 2291 // the marking task to do so 2292 set_has_aborted(); 2293 return; 2294 } 2295 } 2296 2297 void G1CMTask::recalculate_limits() { 2298 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2299 _words_scanned_limit = _real_words_scanned_limit; 2300 2301 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2302 _refs_reached_limit = _real_refs_reached_limit; 2303 } 2304 2305 void G1CMTask::decrease_limits() { 2306 // This is called when we believe that we're going to do an infrequent 2307 // operation which will increase the per byte scanned cost (i.e. move 2308 // entries to/from the global stack). It basically tries to decrease the 2309 // scanning limit so that the clock is called earlier. 2310 2311 _words_scanned_limit = _real_words_scanned_limit - 2312 3 * words_scanned_period / 4; 2313 _refs_reached_limit = _real_refs_reached_limit - 2314 3 * refs_reached_period / 4; 2315 } 2316 2317 void G1CMTask::move_entries_to_global_stack() { 2318 // Local array where we'll store the entries that will be popped 2319 // from the local queue. 2320 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2321 2322 size_t n = 0; 2323 G1TaskQueueEntry task_entry; 2324 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) { 2325 buffer[n] = task_entry; 2326 ++n; 2327 } 2328 if (n < G1CMMarkStack::EntriesPerChunk) { 2329 buffer[n] = G1TaskQueueEntry(); 2330 } 2331 2332 if (n > 0) { 2333 if (!_cm->mark_stack_push(buffer)) { 2334 set_has_aborted(); 2335 } 2336 } 2337 2338 // This operation was quite expensive, so decrease the limits. 2339 decrease_limits(); 2340 } 2341 2342 bool G1CMTask::get_entries_from_global_stack() { 2343 // Local array where we'll store the entries that will be popped 2344 // from the global stack. 2345 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2346 2347 if (!_cm->mark_stack_pop(buffer)) { 2348 return false; 2349 } 2350 2351 // We did actually pop at least one entry. 2352 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) { 2353 G1TaskQueueEntry task_entry = buffer[i]; 2354 if (task_entry.is_null()) { 2355 break; 2356 } 2357 assert(task_entry.is_array_slice() || task_entry.obj()->is_oop(), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj())); 2358 bool success = _task_queue->push(task_entry); 2359 // We only call this when the local queue is empty or under a 2360 // given target limit. So, we do not expect this push to fail. 2361 assert(success, "invariant"); 2362 } 2363 2364 // This operation was quite expensive, so decrease the limits 2365 decrease_limits(); 2366 return true; 2367 } 2368 2369 void G1CMTask::drain_local_queue(bool partially) { 2370 if (has_aborted()) { 2371 return; 2372 } 2373 2374 // Decide what the target size is, depending whether we're going to 2375 // drain it partially (so that other tasks can steal if they run out 2376 // of things to do) or totally (at the very end). 2377 size_t target_size; 2378 if (partially) { 2379 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2380 } else { 2381 target_size = 0; 2382 } 2383 2384 if (_task_queue->size() > target_size) { 2385 G1TaskQueueEntry entry; 2386 bool ret = _task_queue->pop_local(entry); 2387 while (ret) { 2388 scan_task_entry(entry); 2389 if (_task_queue->size() <= target_size || has_aborted()) { 2390 ret = false; 2391 } else { 2392 ret = _task_queue->pop_local(entry); 2393 } 2394 } 2395 } 2396 } 2397 2398 void G1CMTask::drain_global_stack(bool partially) { 2399 if (has_aborted()) return; 2400 2401 // We have a policy to drain the local queue before we attempt to 2402 // drain the global stack. 2403 assert(partially || _task_queue->size() == 0, "invariant"); 2404 2405 // Decide what the target size is, depending whether we're going to 2406 // drain it partially (so that other tasks can steal if they run out 2407 // of things to do) or totally (at the very end). 2408 // Notice that when draining the global mark stack partially, due to the racyness 2409 // of the mark stack size update we might in fact drop below the target. But, 2410 // this is not a problem. 2411 // In case of total draining, we simply process until the global mark stack is 2412 // totally empty, disregarding the size counter. 2413 if (partially) { 2414 size_t const target_size = _cm->partial_mark_stack_size_target(); 2415 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2416 if (get_entries_from_global_stack()) { 2417 drain_local_queue(partially); 2418 } 2419 } 2420 } else { 2421 while (!has_aborted() && get_entries_from_global_stack()) { 2422 drain_local_queue(partially); 2423 } 2424 } 2425 } 2426 2427 // SATB Queue has several assumptions on whether to call the par or 2428 // non-par versions of the methods. this is why some of the code is 2429 // replicated. We should really get rid of the single-threaded version 2430 // of the code to simplify things. 2431 void G1CMTask::drain_satb_buffers() { 2432 if (has_aborted()) return; 2433 2434 // We set this so that the regular clock knows that we're in the 2435 // middle of draining buffers and doesn't set the abort flag when it 2436 // notices that SATB buffers are available for draining. It'd be 2437 // very counter productive if it did that. :-) 2438 _draining_satb_buffers = true; 2439 2440 G1CMSATBBufferClosure satb_cl(this, _g1h); 2441 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2442 2443 // This keeps claiming and applying the closure to completed buffers 2444 // until we run out of buffers or we need to abort. 2445 while (!has_aborted() && 2446 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2447 regular_clock_call(); 2448 } 2449 2450 _draining_satb_buffers = false; 2451 2452 assert(has_aborted() || 2453 concurrent() || 2454 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2455 2456 // again, this was a potentially expensive operation, decrease the 2457 // limits to get the regular clock call early 2458 decrease_limits(); 2459 } 2460 2461 void G1CMTask::print_stats() { 2462 log_debug(gc, stats)("Marking Stats, task = %u, calls = %d", 2463 _worker_id, _calls); 2464 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2465 _elapsed_time_ms, _termination_time_ms); 2466 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 2467 _step_times_ms.num(), _step_times_ms.avg(), 2468 _step_times_ms.sd()); 2469 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", 2470 _step_times_ms.maximum(), _step_times_ms.sum()); 2471 } 2472 2473 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry) { 2474 return _task_queues->steal(worker_id, hash_seed, task_entry); 2475 } 2476 2477 /***************************************************************************** 2478 2479 The do_marking_step(time_target_ms, ...) method is the building 2480 block of the parallel marking framework. It can be called in parallel 2481 with other invocations of do_marking_step() on different tasks 2482 (but only one per task, obviously) and concurrently with the 2483 mutator threads, or during remark, hence it eliminates the need 2484 for two versions of the code. When called during remark, it will 2485 pick up from where the task left off during the concurrent marking 2486 phase. Interestingly, tasks are also claimable during evacuation 2487 pauses too, since do_marking_step() ensures that it aborts before 2488 it needs to yield. 2489 2490 The data structures that it uses to do marking work are the 2491 following: 2492 2493 (1) Marking Bitmap. If there are gray objects that appear only 2494 on the bitmap (this happens either when dealing with an overflow 2495 or when the initial marking phase has simply marked the roots 2496 and didn't push them on the stack), then tasks claim heap 2497 regions whose bitmap they then scan to find gray objects. A 2498 global finger indicates where the end of the last claimed region 2499 is. A local finger indicates how far into the region a task has 2500 scanned. The two fingers are used to determine how to gray an 2501 object (i.e. whether simply marking it is OK, as it will be 2502 visited by a task in the future, or whether it needs to be also 2503 pushed on a stack). 2504 2505 (2) Local Queue. The local queue of the task which is accessed 2506 reasonably efficiently by the task. Other tasks can steal from 2507 it when they run out of work. Throughout the marking phase, a 2508 task attempts to keep its local queue short but not totally 2509 empty, so that entries are available for stealing by other 2510 tasks. Only when there is no more work, a task will totally 2511 drain its local queue. 2512 2513 (3) Global Mark Stack. This handles local queue overflow. During 2514 marking only sets of entries are moved between it and the local 2515 queues, as access to it requires a mutex and more fine-grain 2516 interaction with it which might cause contention. If it 2517 overflows, then the marking phase should restart and iterate 2518 over the bitmap to identify gray objects. Throughout the marking 2519 phase, tasks attempt to keep the global mark stack at a small 2520 length but not totally empty, so that entries are available for 2521 popping by other tasks. Only when there is no more work, tasks 2522 will totally drain the global mark stack. 2523 2524 (4) SATB Buffer Queue. This is where completed SATB buffers are 2525 made available. Buffers are regularly removed from this queue 2526 and scanned for roots, so that the queue doesn't get too 2527 long. During remark, all completed buffers are processed, as 2528 well as the filled in parts of any uncompleted buffers. 2529 2530 The do_marking_step() method tries to abort when the time target 2531 has been reached. There are a few other cases when the 2532 do_marking_step() method also aborts: 2533 2534 (1) When the marking phase has been aborted (after a Full GC). 2535 2536 (2) When a global overflow (on the global stack) has been 2537 triggered. Before the task aborts, it will actually sync up with 2538 the other tasks to ensure that all the marking data structures 2539 (local queues, stacks, fingers etc.) are re-initialized so that 2540 when do_marking_step() completes, the marking phase can 2541 immediately restart. 2542 2543 (3) When enough completed SATB buffers are available. The 2544 do_marking_step() method only tries to drain SATB buffers right 2545 at the beginning. So, if enough buffers are available, the 2546 marking step aborts and the SATB buffers are processed at 2547 the beginning of the next invocation. 2548 2549 (4) To yield. when we have to yield then we abort and yield 2550 right at the end of do_marking_step(). This saves us from a lot 2551 of hassle as, by yielding we might allow a Full GC. If this 2552 happens then objects will be compacted underneath our feet, the 2553 heap might shrink, etc. We save checking for this by just 2554 aborting and doing the yield right at the end. 2555 2556 From the above it follows that the do_marking_step() method should 2557 be called in a loop (or, otherwise, regularly) until it completes. 2558 2559 If a marking step completes without its has_aborted() flag being 2560 true, it means it has completed the current marking phase (and 2561 also all other marking tasks have done so and have all synced up). 2562 2563 A method called regular_clock_call() is invoked "regularly" (in 2564 sub ms intervals) throughout marking. It is this clock method that 2565 checks all the abort conditions which were mentioned above and 2566 decides when the task should abort. A work-based scheme is used to 2567 trigger this clock method: when the number of object words the 2568 marking phase has scanned or the number of references the marking 2569 phase has visited reach a given limit. Additional invocations to 2570 the method clock have been planted in a few other strategic places 2571 too. The initial reason for the clock method was to avoid calling 2572 vtime too regularly, as it is quite expensive. So, once it was in 2573 place, it was natural to piggy-back all the other conditions on it 2574 too and not constantly check them throughout the code. 2575 2576 If do_termination is true then do_marking_step will enter its 2577 termination protocol. 2578 2579 The value of is_serial must be true when do_marking_step is being 2580 called serially (i.e. by the VMThread) and do_marking_step should 2581 skip any synchronization in the termination and overflow code. 2582 Examples include the serial remark code and the serial reference 2583 processing closures. 2584 2585 The value of is_serial must be false when do_marking_step is 2586 being called by any of the worker threads in a work gang. 2587 Examples include the concurrent marking code (CMMarkingTask), 2588 the MT remark code, and the MT reference processing closures. 2589 2590 *****************************************************************************/ 2591 2592 void G1CMTask::do_marking_step(double time_target_ms, 2593 bool do_termination, 2594 bool is_serial) { 2595 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2596 assert(concurrent() == _cm->concurrent(), "they should be the same"); 2597 2598 G1Policy* g1_policy = _g1h->g1_policy(); 2599 assert(_task_queues != NULL, "invariant"); 2600 assert(_task_queue != NULL, "invariant"); 2601 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 2602 2603 assert(!_claimed, 2604 "only one thread should claim this task at any one time"); 2605 2606 // OK, this doesn't safeguard again all possible scenarios, as it is 2607 // possible for two threads to set the _claimed flag at the same 2608 // time. But it is only for debugging purposes anyway and it will 2609 // catch most problems. 2610 _claimed = true; 2611 2612 _start_time_ms = os::elapsedVTime() * 1000.0; 2613 2614 // If do_stealing is true then do_marking_step will attempt to 2615 // steal work from the other G1CMTasks. It only makes sense to 2616 // enable stealing when the termination protocol is enabled 2617 // and do_marking_step() is not being called serially. 2618 bool do_stealing = do_termination && !is_serial; 2619 2620 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2621 _time_target_ms = time_target_ms - diff_prediction_ms; 2622 2623 // set up the variables that are used in the work-based scheme to 2624 // call the regular clock method 2625 _words_scanned = 0; 2626 _refs_reached = 0; 2627 recalculate_limits(); 2628 2629 // clear all flags 2630 clear_has_aborted(); 2631 _has_timed_out = false; 2632 _draining_satb_buffers = false; 2633 2634 ++_calls; 2635 2636 // Set up the bitmap and oop closures. Anything that uses them is 2637 // eventually called from this method, so it is OK to allocate these 2638 // statically. 2639 G1CMBitMapClosure bitmap_closure(this, _cm); 2640 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 2641 set_cm_oop_closure(&cm_oop_closure); 2642 2643 if (_cm->has_overflown()) { 2644 // This can happen if the mark stack overflows during a GC pause 2645 // and this task, after a yield point, restarts. We have to abort 2646 // as we need to get into the overflow protocol which happens 2647 // right at the end of this task. 2648 set_has_aborted(); 2649 } 2650 2651 // First drain any available SATB buffers. After this, we will not 2652 // look at SATB buffers before the next invocation of this method. 2653 // If enough completed SATB buffers are queued up, the regular clock 2654 // will abort this task so that it restarts. 2655 drain_satb_buffers(); 2656 // ...then partially drain the local queue and the global stack 2657 drain_local_queue(true); 2658 drain_global_stack(true); 2659 2660 do { 2661 if (!has_aborted() && _curr_region != NULL) { 2662 // This means that we're already holding on to a region. 2663 assert(_finger != NULL, "if region is not NULL, then the finger " 2664 "should not be NULL either"); 2665 2666 // We might have restarted this task after an evacuation pause 2667 // which might have evacuated the region we're holding on to 2668 // underneath our feet. Let's read its limit again to make sure 2669 // that we do not iterate over a region of the heap that 2670 // contains garbage (update_region_limit() will also move 2671 // _finger to the start of the region if it is found empty). 2672 update_region_limit(); 2673 // We will start from _finger not from the start of the region, 2674 // as we might be restarting this task after aborting half-way 2675 // through scanning this region. In this case, _finger points to 2676 // the address where we last found a marked object. If this is a 2677 // fresh region, _finger points to start(). 2678 MemRegion mr = MemRegion(_finger, _region_limit); 2679 2680 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2681 "humongous regions should go around loop once only"); 2682 2683 // Some special cases: 2684 // If the memory region is empty, we can just give up the region. 2685 // If the current region is humongous then we only need to check 2686 // the bitmap for the bit associated with the start of the object, 2687 // scan the object if it's live, and give up the region. 2688 // Otherwise, let's iterate over the bitmap of the part of the region 2689 // that is left. 2690 // If the iteration is successful, give up the region. 2691 if (mr.is_empty()) { 2692 giveup_current_region(); 2693 regular_clock_call(); 2694 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2695 if (_nextMarkBitMap->is_marked(mr.start())) { 2696 // The object is marked - apply the closure 2697 bitmap_closure.do_addr(mr.start()); 2698 } 2699 // Even if this task aborted while scanning the humongous object 2700 // we can (and should) give up the current region. 2701 giveup_current_region(); 2702 regular_clock_call(); 2703 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 2704 giveup_current_region(); 2705 regular_clock_call(); 2706 } else { 2707 assert(has_aborted(), "currently the only way to do so"); 2708 // The only way to abort the bitmap iteration is to return 2709 // false from the do_bit() method. However, inside the 2710 // do_bit() method we move the _finger to point to the 2711 // object currently being looked at. So, if we bail out, we 2712 // have definitely set _finger to something non-null. 2713 assert(_finger != NULL, "invariant"); 2714 2715 // Region iteration was actually aborted. So now _finger 2716 // points to the address of the object we last scanned. If we 2717 // leave it there, when we restart this task, we will rescan 2718 // the object. It is easy to avoid this. We move the finger by 2719 // enough to point to the next possible object header (the 2720 // bitmap knows by how much we need to move it as it knows its 2721 // granularity). 2722 assert(_finger < _region_limit, "invariant"); 2723 HeapWord* new_finger = _nextMarkBitMap->addr_after_obj(_finger); 2724 // Check if bitmap iteration was aborted while scanning the last object 2725 if (new_finger >= _region_limit) { 2726 giveup_current_region(); 2727 } else { 2728 move_finger_to(new_finger); 2729 } 2730 } 2731 } 2732 // At this point we have either completed iterating over the 2733 // region we were holding on to, or we have aborted. 2734 2735 // We then partially drain the local queue and the global stack. 2736 // (Do we really need this?) 2737 drain_local_queue(true); 2738 drain_global_stack(true); 2739 2740 // Read the note on the claim_region() method on why it might 2741 // return NULL with potentially more regions available for 2742 // claiming and why we have to check out_of_regions() to determine 2743 // whether we're done or not. 2744 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2745 // We are going to try to claim a new region. We should have 2746 // given up on the previous one. 2747 // Separated the asserts so that we know which one fires. 2748 assert(_curr_region == NULL, "invariant"); 2749 assert(_finger == NULL, "invariant"); 2750 assert(_region_limit == NULL, "invariant"); 2751 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2752 if (claimed_region != NULL) { 2753 // Yes, we managed to claim one 2754 setup_for_region(claimed_region); 2755 assert(_curr_region == claimed_region, "invariant"); 2756 } 2757 // It is important to call the regular clock here. It might take 2758 // a while to claim a region if, for example, we hit a large 2759 // block of empty regions. So we need to call the regular clock 2760 // method once round the loop to make sure it's called 2761 // frequently enough. 2762 regular_clock_call(); 2763 } 2764 2765 if (!has_aborted() && _curr_region == NULL) { 2766 assert(_cm->out_of_regions(), 2767 "at this point we should be out of regions"); 2768 } 2769 } while ( _curr_region != NULL && !has_aborted()); 2770 2771 if (!has_aborted()) { 2772 // We cannot check whether the global stack is empty, since other 2773 // tasks might be pushing objects to it concurrently. 2774 assert(_cm->out_of_regions(), 2775 "at this point we should be out of regions"); 2776 // Try to reduce the number of available SATB buffers so that 2777 // remark has less work to do. 2778 drain_satb_buffers(); 2779 } 2780 2781 // Since we've done everything else, we can now totally drain the 2782 // local queue and global stack. 2783 drain_local_queue(false); 2784 drain_global_stack(false); 2785 2786 // Attempt at work stealing from other task's queues. 2787 if (do_stealing && !has_aborted()) { 2788 // We have not aborted. This means that we have finished all that 2789 // we could. Let's try to do some stealing... 2790 2791 // We cannot check whether the global stack is empty, since other 2792 // tasks might be pushing objects to it concurrently. 2793 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2794 "only way to reach here"); 2795 while (!has_aborted()) { 2796 G1TaskQueueEntry entry; 2797 if (_cm->try_stealing(_worker_id, &_hash_seed, entry)) { 2798 scan_task_entry(entry); 2799 2800 // And since we're towards the end, let's totally drain the 2801 // local queue and global stack. 2802 drain_local_queue(false); 2803 drain_global_stack(false); 2804 } else { 2805 break; 2806 } 2807 } 2808 } 2809 2810 // We still haven't aborted. Now, let's try to get into the 2811 // termination protocol. 2812 if (do_termination && !has_aborted()) { 2813 // We cannot check whether the global stack is empty, since other 2814 // tasks might be concurrently pushing objects on it. 2815 // Separated the asserts so that we know which one fires. 2816 assert(_cm->out_of_regions(), "only way to reach here"); 2817 assert(_task_queue->size() == 0, "only way to reach here"); 2818 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2819 2820 // The G1CMTask class also extends the TerminatorTerminator class, 2821 // hence its should_exit_termination() method will also decide 2822 // whether to exit the termination protocol or not. 2823 bool finished = (is_serial || 2824 _cm->terminator()->offer_termination(this)); 2825 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2826 _termination_time_ms += 2827 termination_end_time_ms - _termination_start_time_ms; 2828 2829 if (finished) { 2830 // We're all done. 2831 2832 if (_worker_id == 0) { 2833 // let's allow task 0 to do this 2834 if (concurrent()) { 2835 assert(_cm->concurrent_marking_in_progress(), "invariant"); 2836 // we need to set this to false before the next 2837 // safepoint. This way we ensure that the marking phase 2838 // doesn't observe any more heap expansions. 2839 _cm->clear_concurrent_marking_in_progress(); 2840 } 2841 } 2842 2843 // We can now guarantee that the global stack is empty, since 2844 // all other tasks have finished. We separated the guarantees so 2845 // that, if a condition is false, we can immediately find out 2846 // which one. 2847 guarantee(_cm->out_of_regions(), "only way to reach here"); 2848 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2849 guarantee(_task_queue->size() == 0, "only way to reach here"); 2850 guarantee(!_cm->has_overflown(), "only way to reach here"); 2851 } else { 2852 // Apparently there's more work to do. Let's abort this task. It 2853 // will restart it and we can hopefully find more things to do. 2854 set_has_aborted(); 2855 } 2856 } 2857 2858 // Mainly for debugging purposes to make sure that a pointer to the 2859 // closure which was statically allocated in this frame doesn't 2860 // escape it by accident. 2861 set_cm_oop_closure(NULL); 2862 double end_time_ms = os::elapsedVTime() * 1000.0; 2863 double elapsed_time_ms = end_time_ms - _start_time_ms; 2864 // Update the step history. 2865 _step_times_ms.add(elapsed_time_ms); 2866 2867 if (has_aborted()) { 2868 // The task was aborted for some reason. 2869 if (_has_timed_out) { 2870 double diff_ms = elapsed_time_ms - _time_target_ms; 2871 // Keep statistics of how well we did with respect to hitting 2872 // our target only if we actually timed out (if we aborted for 2873 // other reasons, then the results might get skewed). 2874 _marking_step_diffs_ms.add(diff_ms); 2875 } 2876 2877 if (_cm->has_overflown()) { 2878 // This is the interesting one. We aborted because a global 2879 // overflow was raised. This means we have to restart the 2880 // marking phase and start iterating over regions. However, in 2881 // order to do this we have to make sure that all tasks stop 2882 // what they are doing and re-initialize in a safe manner. We 2883 // will achieve this with the use of two barrier sync points. 2884 2885 if (!is_serial) { 2886 // We only need to enter the sync barrier if being called 2887 // from a parallel context 2888 _cm->enter_first_sync_barrier(_worker_id); 2889 2890 // When we exit this sync barrier we know that all tasks have 2891 // stopped doing marking work. So, it's now safe to 2892 // re-initialize our data structures. At the end of this method, 2893 // task 0 will clear the global data structures. 2894 } 2895 2896 // We clear the local state of this task... 2897 clear_region_fields(); 2898 2899 if (!is_serial) { 2900 // ...and enter the second barrier. 2901 _cm->enter_second_sync_barrier(_worker_id); 2902 } 2903 // At this point, if we're during the concurrent phase of 2904 // marking, everything has been re-initialized and we're 2905 // ready to restart. 2906 } 2907 } 2908 2909 _claimed = false; 2910 } 2911 2912 G1CMTask::G1CMTask(uint worker_id, 2913 G1ConcurrentMark* cm, 2914 G1CMTaskQueue* task_queue, 2915 G1CMTaskQueueSet* task_queues) 2916 : _g1h(G1CollectedHeap::heap()), 2917 _worker_id(worker_id), _cm(cm), 2918 _objArray_processor(this), 2919 _claimed(false), 2920 _nextMarkBitMap(NULL), _hash_seed(17), 2921 _task_queue(task_queue), 2922 _task_queues(task_queues), 2923 _cm_oop_closure(NULL) { 2924 guarantee(task_queue != NULL, "invariant"); 2925 guarantee(task_queues != NULL, "invariant"); 2926 2927 _marking_step_diffs_ms.add(0.5); 2928 } 2929 2930 // These are formatting macros that are used below to ensure 2931 // consistent formatting. The *_H_* versions are used to format the 2932 // header for a particular value and they should be kept consistent 2933 // with the corresponding macro. Also note that most of the macros add 2934 // the necessary white space (as a prefix) which makes them a bit 2935 // easier to compose. 2936 2937 // All the output lines are prefixed with this string to be able to 2938 // identify them easily in a large log file. 2939 #define G1PPRL_LINE_PREFIX "###" 2940 2941 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2942 #ifdef _LP64 2943 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2944 #else // _LP64 2945 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2946 #endif // _LP64 2947 2948 // For per-region info 2949 #define G1PPRL_TYPE_FORMAT " %-4s" 2950 #define G1PPRL_TYPE_H_FORMAT " %4s" 2951 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2952 #define G1PPRL_BYTE_H_FORMAT " %9s" 2953 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2954 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2955 2956 // For summary info 2957 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2958 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2959 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2960 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2961 2962 G1PrintRegionLivenessInfoClosure:: 2963 G1PrintRegionLivenessInfoClosure(const char* phase_name) 2964 : _total_used_bytes(0), _total_capacity_bytes(0), 2965 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2966 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 2967 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2968 MemRegion g1_reserved = g1h->g1_reserved(); 2969 double now = os::elapsedTime(); 2970 2971 // Print the header of the output. 2972 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2973 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2974 G1PPRL_SUM_ADDR_FORMAT("reserved") 2975 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2976 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 2977 HeapRegion::GrainBytes); 2978 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2979 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2980 G1PPRL_TYPE_H_FORMAT 2981 G1PPRL_ADDR_BASE_H_FORMAT 2982 G1PPRL_BYTE_H_FORMAT 2983 G1PPRL_BYTE_H_FORMAT 2984 G1PPRL_BYTE_H_FORMAT 2985 G1PPRL_DOUBLE_H_FORMAT 2986 G1PPRL_BYTE_H_FORMAT 2987 G1PPRL_BYTE_H_FORMAT, 2988 "type", "address-range", 2989 "used", "prev-live", "next-live", "gc-eff", 2990 "remset", "code-roots"); 2991 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2992 G1PPRL_TYPE_H_FORMAT 2993 G1PPRL_ADDR_BASE_H_FORMAT 2994 G1PPRL_BYTE_H_FORMAT 2995 G1PPRL_BYTE_H_FORMAT 2996 G1PPRL_BYTE_H_FORMAT 2997 G1PPRL_DOUBLE_H_FORMAT 2998 G1PPRL_BYTE_H_FORMAT 2999 G1PPRL_BYTE_H_FORMAT, 3000 "", "", 3001 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 3002 "(bytes)", "(bytes)"); 3003 } 3004 3005 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 3006 const char* type = r->get_type_str(); 3007 HeapWord* bottom = r->bottom(); 3008 HeapWord* end = r->end(); 3009 size_t capacity_bytes = r->capacity(); 3010 size_t used_bytes = r->used(); 3011 size_t prev_live_bytes = r->live_bytes(); 3012 size_t next_live_bytes = r->next_live_bytes(); 3013 double gc_eff = r->gc_efficiency(); 3014 size_t remset_bytes = r->rem_set()->mem_size(); 3015 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3016 3017 _total_used_bytes += used_bytes; 3018 _total_capacity_bytes += capacity_bytes; 3019 _total_prev_live_bytes += prev_live_bytes; 3020 _total_next_live_bytes += next_live_bytes; 3021 _total_remset_bytes += remset_bytes; 3022 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3023 3024 // Print a line for this particular region. 3025 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3026 G1PPRL_TYPE_FORMAT 3027 G1PPRL_ADDR_BASE_FORMAT 3028 G1PPRL_BYTE_FORMAT 3029 G1PPRL_BYTE_FORMAT 3030 G1PPRL_BYTE_FORMAT 3031 G1PPRL_DOUBLE_FORMAT 3032 G1PPRL_BYTE_FORMAT 3033 G1PPRL_BYTE_FORMAT, 3034 type, p2i(bottom), p2i(end), 3035 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3036 remset_bytes, strong_code_roots_bytes); 3037 3038 return false; 3039 } 3040 3041 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3042 // add static memory usages to remembered set sizes 3043 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3044 // Print the footer of the output. 3045 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3046 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3047 " SUMMARY" 3048 G1PPRL_SUM_MB_FORMAT("capacity") 3049 G1PPRL_SUM_MB_PERC_FORMAT("used") 3050 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3051 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3052 G1PPRL_SUM_MB_FORMAT("remset") 3053 G1PPRL_SUM_MB_FORMAT("code-roots"), 3054 bytes_to_mb(_total_capacity_bytes), 3055 bytes_to_mb(_total_used_bytes), 3056 perc(_total_used_bytes, _total_capacity_bytes), 3057 bytes_to_mb(_total_prev_live_bytes), 3058 perc(_total_prev_live_bytes, _total_capacity_bytes), 3059 bytes_to_mb(_total_next_live_bytes), 3060 perc(_total_next_live_bytes, _total_capacity_bytes), 3061 bytes_to_mb(_total_remset_bytes), 3062 bytes_to_mb(_total_strong_code_roots_bytes)); 3063 }