1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc/g1/g1BarrierSet.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1CollectorState.hpp" 31 #include "gc/g1/g1ConcurrentMark.inline.hpp" 32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" 33 #include "gc/g1/g1HeapVerifier.hpp" 34 #include "gc/g1/g1OopClosures.inline.hpp" 35 #include "gc/g1/g1Policy.hpp" 36 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" 37 #include "gc/g1/g1StringDedup.hpp" 38 #include "gc/g1/g1ThreadLocalData.hpp" 39 #include "gc/g1/heapRegion.inline.hpp" 40 #include "gc/g1/heapRegionRemSet.hpp" 41 #include "gc/g1/heapRegionSet.inline.hpp" 42 #include "gc/shared/adaptiveSizePolicy.hpp" 43 #include "gc/shared/gcId.hpp" 44 #include "gc/shared/gcTimer.hpp" 45 #include "gc/shared/gcTrace.hpp" 46 #include "gc/shared/gcTraceTime.inline.hpp" 47 #include "gc/shared/genOopClosures.inline.hpp" 48 #include "gc/shared/referencePolicy.hpp" 49 #include "gc/shared/strongRootsScope.hpp" 50 #include "gc/shared/suspendibleThreadSet.hpp" 51 #include "gc/shared/taskqueue.inline.hpp" 52 #include "gc/shared/vmGCOperations.hpp" 53 #include "gc/shared/weakProcessor.inline.hpp" 54 #include "include/jvm.h" 55 #include "logging/log.hpp" 56 #include "memory/allocation.hpp" 57 #include "memory/resourceArea.hpp" 58 #include "oops/access.inline.hpp" 59 #include "oops/oop.inline.hpp" 60 #include "runtime/atomic.hpp" 61 #include "runtime/handles.inline.hpp" 62 #include "runtime/java.hpp" 63 #include "runtime/prefetch.inline.hpp" 64 #include "services/memTracker.hpp" 65 #include "utilities/align.hpp" 66 #include "utilities/growableArray.hpp" 67 68 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) { 69 assert(addr < _cm->finger(), "invariant"); 70 assert(addr >= _task->finger(), "invariant"); 71 72 // We move that task's local finger along. 73 _task->move_finger_to(addr); 74 75 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); 76 // we only partially drain the local queue and global stack 77 _task->drain_local_queue(true); 78 _task->drain_global_stack(true); 79 80 // if the has_aborted flag has been raised, we need to bail out of 81 // the iteration 82 return !_task->has_aborted(); 83 } 84 85 G1CMMarkStack::G1CMMarkStack() : 86 _max_chunk_capacity(0), 87 _base(NULL), 88 _chunk_capacity(0) { 89 set_empty(); 90 } 91 92 bool G1CMMarkStack::resize(size_t new_capacity) { 93 assert(is_empty(), "Only resize when stack is empty."); 94 assert(new_capacity <= _max_chunk_capacity, 95 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); 96 97 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC); 98 99 if (new_base == NULL) { 100 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); 101 return false; 102 } 103 // Release old mapping. 104 if (_base != NULL) { 105 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 106 } 107 108 _base = new_base; 109 _chunk_capacity = new_capacity; 110 set_empty(); 111 112 return true; 113 } 114 115 size_t G1CMMarkStack::capacity_alignment() { 116 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); 117 } 118 119 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { 120 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); 121 122 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); 123 124 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 125 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 126 127 guarantee(initial_chunk_capacity <= _max_chunk_capacity, 128 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, 129 _max_chunk_capacity, 130 initial_chunk_capacity); 131 132 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, 133 initial_chunk_capacity, _max_chunk_capacity); 134 135 return resize(initial_chunk_capacity); 136 } 137 138 void G1CMMarkStack::expand() { 139 if (_chunk_capacity == _max_chunk_capacity) { 140 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); 141 return; 142 } 143 size_t old_capacity = _chunk_capacity; 144 // Double capacity if possible 145 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); 146 147 if (resize(new_capacity)) { 148 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 149 old_capacity, new_capacity); 150 } else { 151 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 152 old_capacity, new_capacity); 153 } 154 } 155 156 G1CMMarkStack::~G1CMMarkStack() { 157 if (_base != NULL) { 158 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 159 } 160 } 161 162 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { 163 elem->next = *list; 164 *list = elem; 165 } 166 167 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { 168 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 169 add_chunk_to_list(&_chunk_list, elem); 170 _chunks_in_chunk_list++; 171 } 172 173 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { 174 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 175 add_chunk_to_list(&_free_list, elem); 176 } 177 178 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { 179 TaskQueueEntryChunk* result = *list; 180 if (result != NULL) { 181 *list = (*list)->next; 182 } 183 return result; 184 } 185 186 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { 187 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 188 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); 189 if (result != NULL) { 190 _chunks_in_chunk_list--; 191 } 192 return result; 193 } 194 195 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { 196 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 197 return remove_chunk_from_list(&_free_list); 198 } 199 200 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { 201 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. 202 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding 203 // wraparound of _hwm. 204 if (_hwm >= _chunk_capacity) { 205 return NULL; 206 } 207 208 size_t cur_idx = Atomic::add(1u, &_hwm) - 1; 209 if (cur_idx >= _chunk_capacity) { 210 return NULL; 211 } 212 213 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; 214 result->next = NULL; 215 return result; 216 } 217 218 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { 219 // Get a new chunk. 220 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); 221 222 if (new_chunk == NULL) { 223 // Did not get a chunk from the free list. Allocate from backing memory. 224 new_chunk = allocate_new_chunk(); 225 226 if (new_chunk == NULL) { 227 return false; 228 } 229 } 230 231 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 232 233 add_chunk_to_chunk_list(new_chunk); 234 235 return true; 236 } 237 238 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { 239 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); 240 241 if (cur == NULL) { 242 return false; 243 } 244 245 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 246 247 add_chunk_to_free_list(cur); 248 return true; 249 } 250 251 void G1CMMarkStack::set_empty() { 252 _chunks_in_chunk_list = 0; 253 _hwm = 0; 254 _chunk_list = NULL; 255 _free_list = NULL; 256 } 257 258 G1CMRootRegions::G1CMRootRegions() : 259 _survivors(NULL), _cm(NULL), _scan_in_progress(false), 260 _should_abort(false), _claimed_survivor_index(0) { } 261 262 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) { 263 _survivors = survivors; 264 _cm = cm; 265 } 266 267 void G1CMRootRegions::prepare_for_scan() { 268 assert(!scan_in_progress(), "pre-condition"); 269 270 // Currently, only survivors can be root regions. 271 _claimed_survivor_index = 0; 272 _scan_in_progress = _survivors->regions()->is_nonempty(); 273 _should_abort = false; 274 } 275 276 HeapRegion* G1CMRootRegions::claim_next() { 277 if (_should_abort) { 278 // If someone has set the should_abort flag, we return NULL to 279 // force the caller to bail out of their loop. 280 return NULL; 281 } 282 283 // Currently, only survivors can be root regions. 284 const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions(); 285 286 int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1; 287 if (claimed_index < survivor_regions->length()) { 288 return survivor_regions->at(claimed_index); 289 } 290 return NULL; 291 } 292 293 uint G1CMRootRegions::num_root_regions() const { 294 return (uint)_survivors->regions()->length(); 295 } 296 297 void G1CMRootRegions::notify_scan_done() { 298 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 299 _scan_in_progress = false; 300 RootRegionScan_lock->notify_all(); 301 } 302 303 void G1CMRootRegions::cancel_scan() { 304 notify_scan_done(); 305 } 306 307 void G1CMRootRegions::scan_finished() { 308 assert(scan_in_progress(), "pre-condition"); 309 310 // Currently, only survivors can be root regions. 311 if (!_should_abort) { 312 assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index); 313 assert((uint)_claimed_survivor_index >= _survivors->length(), 314 "we should have claimed all survivors, claimed index = %u, length = %u", 315 (uint)_claimed_survivor_index, _survivors->length()); 316 } 317 318 notify_scan_done(); 319 } 320 321 bool G1CMRootRegions::wait_until_scan_finished() { 322 if (!scan_in_progress()) { 323 return false; 324 } 325 326 { 327 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 328 while (scan_in_progress()) { 329 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 330 } 331 } 332 return true; 333 } 334 335 // Returns the maximum number of workers to be used in a concurrent 336 // phase based on the number of GC workers being used in a STW 337 // phase. 338 static uint scale_concurrent_worker_threads(uint num_gc_workers) { 339 return MAX2((num_gc_workers + 2) / 4, 1U); 340 } 341 342 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, 343 G1RegionToSpaceMapper* prev_bitmap_storage, 344 G1RegionToSpaceMapper* next_bitmap_storage) : 345 // _cm_thread set inside the constructor 346 _g1h(g1h), 347 _completed_initialization(false), 348 349 _mark_bitmap_1(), 350 _mark_bitmap_2(), 351 _prev_mark_bitmap(&_mark_bitmap_1), 352 _next_mark_bitmap(&_mark_bitmap_2), 353 354 _heap(_g1h->reserved_region()), 355 356 _root_regions(), 357 358 _global_mark_stack(), 359 360 // _finger set in set_non_marking_state 361 362 _worker_id_offset(DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads), 363 _max_num_tasks(ParallelGCThreads), 364 // _num_active_tasks set in set_non_marking_state() 365 // _tasks set inside the constructor 366 367 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)), 368 _terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)), 369 370 _first_overflow_barrier_sync(), 371 _second_overflow_barrier_sync(), 372 373 _has_overflown(false), 374 _concurrent(false), 375 _has_aborted(false), 376 _restart_for_overflow(false), 377 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 378 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 379 380 // _verbose_level set below 381 382 _init_times(), 383 _remark_times(), 384 _remark_mark_times(), 385 _remark_weak_ref_times(), 386 _cleanup_times(), 387 _total_cleanup_time(0.0), 388 389 _accum_task_vtime(NULL), 390 391 _concurrent_workers(NULL), 392 _num_concurrent_workers(0), 393 _max_concurrent_workers(0), 394 395 _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)), 396 _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC)) 397 { 398 _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage); 399 _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage); 400 401 // Create & start ConcurrentMark thread. 402 _cm_thread = new G1ConcurrentMarkThread(this); 403 if (_cm_thread->osthread() == NULL) { 404 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 405 } 406 407 assert(CGC_lock != NULL, "CGC_lock must be initialized"); 408 409 SATBMarkQueueSet& satb_qs = G1BarrierSet::satb_mark_queue_set(); 410 satb_qs.set_buffer_size(G1SATBBufferSize); 411 412 _root_regions.init(_g1h->survivor(), this); 413 414 if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) { 415 // Calculate the number of concurrent worker threads by scaling 416 // the number of parallel GC threads. 417 uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads); 418 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 419 } 420 421 assert(ConcGCThreads > 0, "ConcGCThreads have been set."); 422 if (ConcGCThreads > ParallelGCThreads) { 423 log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).", 424 ConcGCThreads, ParallelGCThreads); 425 return; 426 } 427 428 log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset); 429 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 430 431 _num_concurrent_workers = ConcGCThreads; 432 _max_concurrent_workers = _num_concurrent_workers; 433 434 _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true); 435 _concurrent_workers->initialize_workers(); 436 437 if (FLAG_IS_DEFAULT(MarkStackSize)) { 438 size_t mark_stack_size = 439 MIN2(MarkStackSizeMax, 440 MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE))); 441 // Verify that the calculated value for MarkStackSize is in range. 442 // It would be nice to use the private utility routine from Arguments. 443 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 444 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 445 "must be between 1 and " SIZE_FORMAT, 446 mark_stack_size, MarkStackSizeMax); 447 return; 448 } 449 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 450 } else { 451 // Verify MarkStackSize is in range. 452 if (FLAG_IS_CMDLINE(MarkStackSize)) { 453 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 454 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 455 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 456 "must be between 1 and " SIZE_FORMAT, 457 MarkStackSize, MarkStackSizeMax); 458 return; 459 } 460 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 461 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 462 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 463 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 464 MarkStackSize, MarkStackSizeMax); 465 return; 466 } 467 } 468 } 469 } 470 471 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { 472 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 473 } 474 475 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC); 476 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC); 477 478 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 479 _num_active_tasks = _max_num_tasks; 480 481 for (uint i = 0; i < _max_num_tasks; ++i) { 482 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 483 task_queue->initialize(); 484 _task_queues->register_queue(i, task_queue); 485 486 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions()); 487 488 _accum_task_vtime[i] = 0.0; 489 } 490 491 reset_at_marking_complete(); 492 _completed_initialization = true; 493 } 494 495 void G1ConcurrentMark::reset() { 496 _has_aborted = false; 497 498 reset_marking_for_restart(); 499 500 // Reset all tasks, since different phases will use different number of active 501 // threads. So, it's easiest to have all of them ready. 502 for (uint i = 0; i < _max_num_tasks; ++i) { 503 _tasks[i]->reset(_next_mark_bitmap); 504 } 505 506 uint max_regions = _g1h->max_regions(); 507 for (uint i = 0; i < max_regions; i++) { 508 _top_at_rebuild_starts[i] = NULL; 509 _region_mark_stats[i].clear(); 510 } 511 } 512 513 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) { 514 for (uint j = 0; j < _max_num_tasks; ++j) { 515 _tasks[j]->clear_mark_stats_cache(region_idx); 516 } 517 _top_at_rebuild_starts[region_idx] = NULL; 518 _region_mark_stats[region_idx].clear(); 519 } 520 521 void G1ConcurrentMark::clear_statistics(HeapRegion* r) { 522 uint const region_idx = r->hrm_index(); 523 if (r->is_humongous()) { 524 assert(r->is_starts_humongous(), "Got humongous continues region here"); 525 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size()); 526 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) { 527 clear_statistics_in_region(j); 528 } 529 } else { 530 clear_statistics_in_region(region_idx); 531 } 532 } 533 534 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) { 535 if (bitmap->is_marked(addr)) { 536 bitmap->clear(addr); 537 } 538 } 539 540 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) { 541 assert_at_safepoint_on_vm_thread(); 542 543 // Need to clear all mark bits of the humongous object. 544 clear_mark_if_set(_prev_mark_bitmap, r->bottom()); 545 clear_mark_if_set(_next_mark_bitmap, r->bottom()); 546 547 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 548 return; 549 } 550 551 // Clear any statistics about the region gathered so far. 552 clear_statistics(r); 553 } 554 555 void G1ConcurrentMark::reset_marking_for_restart() { 556 _global_mark_stack.set_empty(); 557 558 // Expand the marking stack, if we have to and if we can. 559 if (has_overflown()) { 560 _global_mark_stack.expand(); 561 562 uint max_regions = _g1h->max_regions(); 563 for (uint i = 0; i < max_regions; i++) { 564 _region_mark_stats[i].clear_during_overflow(); 565 } 566 } 567 568 clear_has_overflown(); 569 _finger = _heap.start(); 570 571 for (uint i = 0; i < _max_num_tasks; ++i) { 572 G1CMTaskQueue* queue = _task_queues->queue(i); 573 queue->set_empty(); 574 } 575 } 576 577 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 578 assert(active_tasks <= _max_num_tasks, "we should not have more"); 579 580 _num_active_tasks = active_tasks; 581 // Need to update the three data structures below according to the 582 // number of active threads for this phase. 583 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 584 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 585 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 586 } 587 588 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 589 set_concurrency(active_tasks); 590 591 _concurrent = concurrent; 592 593 if (!concurrent) { 594 // At this point we should be in a STW phase, and completed marking. 595 assert_at_safepoint_on_vm_thread(); 596 assert(out_of_regions(), 597 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 598 p2i(_finger), p2i(_heap.end())); 599 } 600 } 601 602 void G1ConcurrentMark::reset_at_marking_complete() { 603 // We set the global marking state to some default values when we're 604 // not doing marking. 605 reset_marking_for_restart(); 606 _num_active_tasks = 0; 607 } 608 609 G1ConcurrentMark::~G1ConcurrentMark() { 610 FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts); 611 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats); 612 // The G1ConcurrentMark instance is never freed. 613 ShouldNotReachHere(); 614 } 615 616 class G1ClearBitMapTask : public AbstractGangTask { 617 public: 618 static size_t chunk_size() { return M; } 619 620 private: 621 // Heap region closure used for clearing the given mark bitmap. 622 class G1ClearBitmapHRClosure : public HeapRegionClosure { 623 private: 624 G1CMBitMap* _bitmap; 625 G1ConcurrentMark* _cm; 626 public: 627 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) { 628 } 629 630 virtual bool do_heap_region(HeapRegion* r) { 631 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 632 633 HeapWord* cur = r->bottom(); 634 HeapWord* const end = r->end(); 635 636 while (cur < end) { 637 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 638 _bitmap->clear_range(mr); 639 640 cur += chunk_size_in_words; 641 642 // Abort iteration if after yielding the marking has been aborted. 643 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 644 return true; 645 } 646 // Repeat the asserts from before the start of the closure. We will do them 647 // as asserts here to minimize their overhead on the product. However, we 648 // will have them as guarantees at the beginning / end of the bitmap 649 // clearing to get some checking in the product. 650 assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant"); 651 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 652 } 653 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 654 655 return false; 656 } 657 }; 658 659 G1ClearBitmapHRClosure _cl; 660 HeapRegionClaimer _hr_claimer; 661 bool _suspendible; // If the task is suspendible, workers must join the STS. 662 663 public: 664 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 665 AbstractGangTask("G1 Clear Bitmap"), 666 _cl(bitmap, suspendible ? cm : NULL), 667 _hr_claimer(n_workers), 668 _suspendible(suspendible) 669 { } 670 671 void work(uint worker_id) { 672 SuspendibleThreadSetJoiner sts_join(_suspendible); 673 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id); 674 } 675 676 bool is_complete() { 677 return _cl.is_complete(); 678 } 679 }; 680 681 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 682 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 683 684 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 685 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 686 687 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 688 689 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 690 691 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 692 workers->run_task(&cl, num_workers); 693 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 694 } 695 696 void G1ConcurrentMark::cleanup_for_next_mark() { 697 // Make sure that the concurrent mark thread looks to still be in 698 // the current cycle. 699 guarantee(cm_thread()->during_cycle(), "invariant"); 700 701 // We are finishing up the current cycle by clearing the next 702 // marking bitmap and getting it ready for the next cycle. During 703 // this time no other cycle can start. So, let's make sure that this 704 // is the case. 705 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 706 707 clear_bitmap(_next_mark_bitmap, _concurrent_workers, true); 708 709 // Repeat the asserts from above. 710 guarantee(cm_thread()->during_cycle(), "invariant"); 711 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 712 } 713 714 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 715 assert_at_safepoint_on_vm_thread(); 716 clear_bitmap(_prev_mark_bitmap, workers, false); 717 } 718 719 class NoteStartOfMarkHRClosure : public HeapRegionClosure { 720 public: 721 bool do_heap_region(HeapRegion* r) { 722 r->note_start_of_marking(); 723 return false; 724 } 725 }; 726 727 void G1ConcurrentMark::pre_initial_mark() { 728 // Initialize marking structures. This has to be done in a STW phase. 729 reset(); 730 731 // For each region note start of marking. 732 NoteStartOfMarkHRClosure startcl; 733 _g1h->heap_region_iterate(&startcl); 734 } 735 736 737 void G1ConcurrentMark::post_initial_mark() { 738 // Start Concurrent Marking weak-reference discovery. 739 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 740 // enable ("weak") refs discovery 741 rp->enable_discovery(); 742 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 743 744 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 745 // This is the start of the marking cycle, we're expected all 746 // threads to have SATB queues with active set to false. 747 satb_mq_set.set_active_all_threads(true, /* new active value */ 748 false /* expected_active */); 749 750 _root_regions.prepare_for_scan(); 751 752 // update_g1_committed() will be called at the end of an evac pause 753 // when marking is on. So, it's also called at the end of the 754 // initial-mark pause to update the heap end, if the heap expands 755 // during it. No need to call it here. 756 } 757 758 /* 759 * Notice that in the next two methods, we actually leave the STS 760 * during the barrier sync and join it immediately afterwards. If we 761 * do not do this, the following deadlock can occur: one thread could 762 * be in the barrier sync code, waiting for the other thread to also 763 * sync up, whereas another one could be trying to yield, while also 764 * waiting for the other threads to sync up too. 765 * 766 * Note, however, that this code is also used during remark and in 767 * this case we should not attempt to leave / enter the STS, otherwise 768 * we'll either hit an assert (debug / fastdebug) or deadlock 769 * (product). So we should only leave / enter the STS if we are 770 * operating concurrently. 771 * 772 * Because the thread that does the sync barrier has left the STS, it 773 * is possible to be suspended for a Full GC or an evacuation pause 774 * could occur. This is actually safe, since the entering the sync 775 * barrier is one of the last things do_marking_step() does, and it 776 * doesn't manipulate any data structures afterwards. 777 */ 778 779 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 780 bool barrier_aborted; 781 { 782 SuspendibleThreadSetLeaver sts_leave(concurrent()); 783 barrier_aborted = !_first_overflow_barrier_sync.enter(); 784 } 785 786 // at this point everyone should have synced up and not be doing any 787 // more work 788 789 if (barrier_aborted) { 790 // If the barrier aborted we ignore the overflow condition and 791 // just abort the whole marking phase as quickly as possible. 792 return; 793 } 794 } 795 796 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 797 SuspendibleThreadSetLeaver sts_leave(concurrent()); 798 _second_overflow_barrier_sync.enter(); 799 800 // at this point everything should be re-initialized and ready to go 801 } 802 803 class G1CMConcurrentMarkingTask : public AbstractGangTask { 804 G1ConcurrentMark* _cm; 805 806 public: 807 void work(uint worker_id) { 808 assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread"); 809 ResourceMark rm; 810 811 double start_vtime = os::elapsedVTime(); 812 813 { 814 SuspendibleThreadSetJoiner sts_join; 815 816 assert(worker_id < _cm->active_tasks(), "invariant"); 817 818 G1CMTask* task = _cm->task(worker_id); 819 task->record_start_time(); 820 if (!_cm->has_aborted()) { 821 do { 822 task->do_marking_step(G1ConcMarkStepDurationMillis, 823 true /* do_termination */, 824 false /* is_serial*/); 825 826 _cm->do_yield_check(); 827 } while (!_cm->has_aborted() && task->has_aborted()); 828 } 829 task->record_end_time(); 830 guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant"); 831 } 832 833 double end_vtime = os::elapsedVTime(); 834 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 835 } 836 837 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) : 838 AbstractGangTask("Concurrent Mark"), _cm(cm) { } 839 840 ~G1CMConcurrentMarkingTask() { } 841 }; 842 843 uint G1ConcurrentMark::calc_active_marking_workers() { 844 uint result = 0; 845 if (!UseDynamicNumberOfGCThreads || 846 (!FLAG_IS_DEFAULT(ConcGCThreads) && 847 !ForceDynamicNumberOfGCThreads)) { 848 result = _max_concurrent_workers; 849 } else { 850 result = 851 AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers, 852 1, /* Minimum workers */ 853 _num_concurrent_workers, 854 Threads::number_of_non_daemon_threads()); 855 // Don't scale the result down by scale_concurrent_workers() because 856 // that scaling has already gone into "_max_concurrent_workers". 857 } 858 assert(result > 0 && result <= _max_concurrent_workers, 859 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u", 860 _max_concurrent_workers, result); 861 return result; 862 } 863 864 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) { 865 // Currently, only survivors can be root regions. 866 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 867 G1RootRegionScanClosure cl(_g1h, this, worker_id); 868 869 const uintx interval = PrefetchScanIntervalInBytes; 870 HeapWord* curr = hr->bottom(); 871 const HeapWord* end = hr->top(); 872 while (curr < end) { 873 Prefetch::read(curr, interval); 874 oop obj = oop(curr); 875 int size = obj->oop_iterate_size(&cl); 876 assert(size == obj->size(), "sanity"); 877 curr += size; 878 } 879 } 880 881 class G1CMRootRegionScanTask : public AbstractGangTask { 882 G1ConcurrentMark* _cm; 883 public: 884 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 885 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 886 887 void work(uint worker_id) { 888 assert(Thread::current()->is_ConcurrentGC_thread(), 889 "this should only be done by a conc GC thread"); 890 891 G1CMRootRegions* root_regions = _cm->root_regions(); 892 HeapRegion* hr = root_regions->claim_next(); 893 while (hr != NULL) { 894 _cm->scan_root_region(hr, worker_id); 895 hr = root_regions->claim_next(); 896 } 897 } 898 }; 899 900 void G1ConcurrentMark::scan_root_regions() { 901 // scan_in_progress() will have been set to true only if there was 902 // at least one root region to scan. So, if it's false, we 903 // should not attempt to do any further work. 904 if (root_regions()->scan_in_progress()) { 905 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 906 907 _num_concurrent_workers = MIN2(calc_active_marking_workers(), 908 // We distribute work on a per-region basis, so starting 909 // more threads than that is useless. 910 root_regions()->num_root_regions()); 911 assert(_num_concurrent_workers <= _max_concurrent_workers, 912 "Maximum number of marking threads exceeded"); 913 914 G1CMRootRegionScanTask task(this); 915 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 916 task.name(), _num_concurrent_workers, root_regions()->num_root_regions()); 917 _concurrent_workers->run_task(&task, _num_concurrent_workers); 918 919 // It's possible that has_aborted() is true here without actually 920 // aborting the survivor scan earlier. This is OK as it's 921 // mainly used for sanity checking. 922 root_regions()->scan_finished(); 923 } 924 } 925 926 void G1ConcurrentMark::concurrent_cycle_start() { 927 _gc_timer_cm->register_gc_start(); 928 929 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 930 931 _g1h->trace_heap_before_gc(_gc_tracer_cm); 932 } 933 934 void G1ConcurrentMark::concurrent_cycle_end() { 935 _g1h->collector_state()->set_clearing_next_bitmap(false); 936 937 _g1h->trace_heap_after_gc(_gc_tracer_cm); 938 939 if (has_aborted()) { 940 log_info(gc, marking)("Concurrent Mark Abort"); 941 _gc_tracer_cm->report_concurrent_mode_failure(); 942 } 943 944 _gc_timer_cm->register_gc_end(); 945 946 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 947 } 948 949 void G1ConcurrentMark::mark_from_roots() { 950 _restart_for_overflow = false; 951 952 _num_concurrent_workers = calc_active_marking_workers(); 953 954 uint active_workers = MAX2(1U, _num_concurrent_workers); 955 956 // Setting active workers is not guaranteed since fewer 957 // worker threads may currently exist and more may not be 958 // available. 959 active_workers = _concurrent_workers->update_active_workers(active_workers); 960 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers()); 961 962 // Parallel task terminator is set in "set_concurrency_and_phase()" 963 set_concurrency_and_phase(active_workers, true /* concurrent */); 964 965 G1CMConcurrentMarkingTask marking_task(this); 966 _concurrent_workers->run_task(&marking_task); 967 print_stats(); 968 } 969 970 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) { 971 G1HeapVerifier* verifier = _g1h->verifier(); 972 973 verifier->verify_region_sets_optional(); 974 975 if (VerifyDuringGC) { 976 GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm); 977 978 size_t const BufLen = 512; 979 char buffer[BufLen]; 980 981 jio_snprintf(buffer, BufLen, "During GC (%s)", caller); 982 verifier->verify(type, vo, buffer); 983 } 984 985 verifier->check_bitmaps(caller); 986 } 987 988 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask { 989 G1CollectedHeap* _g1h; 990 G1ConcurrentMark* _cm; 991 HeapRegionClaimer _hrclaimer; 992 uint volatile _total_selected_for_rebuild; 993 994 G1PrintRegionLivenessInfoClosure _cl; 995 996 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure { 997 G1CollectedHeap* _g1h; 998 G1ConcurrentMark* _cm; 999 1000 G1PrintRegionLivenessInfoClosure* _cl; 1001 1002 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild. 1003 1004 void update_remset_before_rebuild(HeapRegion* hr) { 1005 G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker(); 1006 1007 bool selected_for_rebuild; 1008 if (hr->is_humongous()) { 1009 bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0; 1010 selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live); 1011 } else { 1012 size_t const live_bytes = _cm->liveness(hr->hrm_index()); 1013 selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes); 1014 } 1015 if (selected_for_rebuild) { 1016 _num_regions_selected_for_rebuild++; 1017 } 1018 _cm->update_top_at_rebuild_start(hr); 1019 } 1020 1021 // Distribute the given words across the humongous object starting with hr and 1022 // note end of marking. 1023 void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) { 1024 uint const region_idx = hr->hrm_index(); 1025 size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size(); 1026 uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words); 1027 1028 // "Distributing" zero words means that we only note end of marking for these 1029 // regions. 1030 assert(marked_words == 0 || obj_size_in_words == marked_words, 1031 "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT, 1032 obj_size_in_words, marked_words); 1033 1034 for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) { 1035 HeapRegion* const r = _g1h->region_at(i); 1036 size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words); 1037 1038 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)", 1039 words_to_add, i, r->get_type_str()); 1040 add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize); 1041 marked_words -= words_to_add; 1042 } 1043 assert(marked_words == 0, 1044 SIZE_FORMAT " words left after distributing space across %u regions", 1045 marked_words, num_regions_in_humongous); 1046 } 1047 1048 void update_marked_bytes(HeapRegion* hr) { 1049 uint const region_idx = hr->hrm_index(); 1050 size_t const marked_words = _cm->liveness(region_idx); 1051 // The marking attributes the object's size completely to the humongous starts 1052 // region. We need to distribute this value across the entire set of regions a 1053 // humongous object spans. 1054 if (hr->is_humongous()) { 1055 assert(hr->is_starts_humongous() || marked_words == 0, 1056 "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)", 1057 marked_words, region_idx, hr->get_type_str()); 1058 if (hr->is_starts_humongous()) { 1059 distribute_marked_bytes(hr, marked_words); 1060 } 1061 } else { 1062 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str()); 1063 add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize); 1064 } 1065 } 1066 1067 void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) { 1068 hr->add_to_marked_bytes(marked_bytes); 1069 _cl->do_heap_region(hr); 1070 hr->note_end_of_marking(); 1071 } 1072 1073 public: 1074 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) : 1075 _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { } 1076 1077 virtual bool do_heap_region(HeapRegion* r) { 1078 update_remset_before_rebuild(r); 1079 update_marked_bytes(r); 1080 1081 return false; 1082 } 1083 1084 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; } 1085 }; 1086 1087 public: 1088 G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) : 1089 AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"), 1090 _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { } 1091 1092 virtual void work(uint worker_id) { 1093 G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl); 1094 _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id); 1095 Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild); 1096 } 1097 1098 uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; } 1099 1100 // Number of regions for which roughly one thread should be spawned for this work. 1101 static const uint RegionsPerThread = 384; 1102 }; 1103 1104 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure { 1105 G1CollectedHeap* _g1h; 1106 public: 1107 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { } 1108 1109 virtual bool do_heap_region(HeapRegion* r) { 1110 _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r); 1111 return false; 1112 } 1113 }; 1114 1115 void G1ConcurrentMark::remark() { 1116 assert_at_safepoint_on_vm_thread(); 1117 1118 // If a full collection has happened, we should not continue. However we might 1119 // have ended up here as the Remark VM operation has been scheduled already. 1120 if (has_aborted()) { 1121 return; 1122 } 1123 1124 G1Policy* g1p = _g1h->g1_policy(); 1125 g1p->record_concurrent_mark_remark_start(); 1126 1127 double start = os::elapsedTime(); 1128 1129 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before"); 1130 1131 { 1132 GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm); 1133 finalize_marking(); 1134 } 1135 1136 double mark_work_end = os::elapsedTime(); 1137 1138 bool const mark_finished = !has_overflown(); 1139 if (mark_finished) { 1140 weak_refs_work(false /* clear_all_soft_refs */); 1141 1142 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1143 // We're done with marking. 1144 // This is the end of the marking cycle, we're expected all 1145 // threads to have SATB queues with active set to true. 1146 satb_mq_set.set_active_all_threads(false, /* new active value */ 1147 true /* expected_active */); 1148 1149 { 1150 GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm); 1151 flush_all_task_caches(); 1152 } 1153 1154 // Install newly created mark bitmap as "prev". 1155 swap_mark_bitmaps(); 1156 { 1157 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm); 1158 1159 uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) / 1160 G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread; 1161 uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity); 1162 1163 G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers); 1164 log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions()); 1165 _g1h->workers()->run_task(&cl, num_workers); 1166 1167 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u", 1168 _g1h->num_regions(), cl.total_selected_for_rebuild()); 1169 } 1170 { 1171 GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm); 1172 reclaim_empty_regions(); 1173 } 1174 1175 // Clean out dead classes 1176 if (ClassUnloadingWithConcurrentMark) { 1177 GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm); 1178 ClassLoaderDataGraph::purge(); 1179 } 1180 1181 _g1h->resize_heap_if_necessary(); 1182 1183 compute_new_sizes(); 1184 1185 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after"); 1186 1187 assert(!restart_for_overflow(), "sanity"); 1188 // Completely reset the marking state since marking completed 1189 reset_at_marking_complete(); 1190 } else { 1191 // We overflowed. Restart concurrent marking. 1192 _restart_for_overflow = true; 1193 1194 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow"); 1195 1196 // Clear the marking state because we will be restarting 1197 // marking due to overflowing the global mark stack. 1198 reset_marking_for_restart(); 1199 } 1200 1201 { 1202 GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm); 1203 report_object_count(mark_finished); 1204 } 1205 1206 // Statistics 1207 double now = os::elapsedTime(); 1208 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1209 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1210 _remark_times.add((now - start) * 1000.0); 1211 1212 g1p->record_concurrent_mark_remark_end(); 1213 } 1214 1215 class G1ReclaimEmptyRegionsTask : public AbstractGangTask { 1216 // Per-region work during the Cleanup pause. 1217 class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure { 1218 G1CollectedHeap* _g1h; 1219 size_t _freed_bytes; 1220 FreeRegionList* _local_cleanup_list; 1221 uint _old_regions_removed; 1222 uint _humongous_regions_removed; 1223 1224 public: 1225 G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h, 1226 FreeRegionList* local_cleanup_list) : 1227 _g1h(g1h), 1228 _freed_bytes(0), 1229 _local_cleanup_list(local_cleanup_list), 1230 _old_regions_removed(0), 1231 _humongous_regions_removed(0){ } 1232 1233 size_t freed_bytes() { return _freed_bytes; } 1234 const uint old_regions_removed() { return _old_regions_removed; } 1235 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1236 1237 bool do_heap_region(HeapRegion *hr) { 1238 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) { 1239 _freed_bytes += hr->used(); 1240 hr->set_containing_set(NULL); 1241 if (hr->is_humongous()) { 1242 _humongous_regions_removed++; 1243 _g1h->free_humongous_region(hr, _local_cleanup_list); 1244 } else { 1245 _old_regions_removed++; 1246 _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */); 1247 } 1248 hr->clear_cardtable(); 1249 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index()); 1250 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom())); 1251 } 1252 1253 return false; 1254 } 1255 }; 1256 1257 G1CollectedHeap* _g1h; 1258 FreeRegionList* _cleanup_list; 1259 HeapRegionClaimer _hrclaimer; 1260 1261 public: 1262 G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1263 AbstractGangTask("G1 Cleanup"), 1264 _g1h(g1h), 1265 _cleanup_list(cleanup_list), 1266 _hrclaimer(n_workers) { 1267 } 1268 1269 void work(uint worker_id) { 1270 FreeRegionList local_cleanup_list("Local Cleanup List"); 1271 G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list); 1272 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id); 1273 assert(cl.is_complete(), "Shouldn't have aborted!"); 1274 1275 // Now update the old/humongous region sets 1276 _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed()); 1277 { 1278 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1279 _g1h->decrement_summary_bytes(cl.freed_bytes()); 1280 1281 _cleanup_list->add_ordered(&local_cleanup_list); 1282 assert(local_cleanup_list.is_empty(), "post-condition"); 1283 } 1284 } 1285 }; 1286 1287 void G1ConcurrentMark::reclaim_empty_regions() { 1288 WorkGang* workers = _g1h->workers(); 1289 FreeRegionList empty_regions_list("Empty Regions After Mark List"); 1290 1291 G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers()); 1292 workers->run_task(&cl); 1293 1294 if (!empty_regions_list.is_empty()) { 1295 log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length()); 1296 // Now print the empty regions list. 1297 G1HRPrinter* hrp = _g1h->hr_printer(); 1298 if (hrp->is_active()) { 1299 FreeRegionListIterator iter(&empty_regions_list); 1300 while (iter.more_available()) { 1301 HeapRegion* hr = iter.get_next(); 1302 hrp->cleanup(hr); 1303 } 1304 } 1305 // And actually make them available. 1306 _g1h->prepend_to_freelist(&empty_regions_list); 1307 } 1308 } 1309 1310 void G1ConcurrentMark::compute_new_sizes() { 1311 MetaspaceGC::compute_new_size(); 1312 1313 // Cleanup will have freed any regions completely full of garbage. 1314 // Update the soft reference policy with the new heap occupancy. 1315 Universe::update_heap_info_at_gc(); 1316 1317 // We reclaimed old regions so we should calculate the sizes to make 1318 // sure we update the old gen/space data. 1319 _g1h->g1mm()->update_sizes(); 1320 } 1321 1322 void G1ConcurrentMark::cleanup() { 1323 assert_at_safepoint_on_vm_thread(); 1324 1325 // If a full collection has happened, we shouldn't do this. 1326 if (has_aborted()) { 1327 return; 1328 } 1329 1330 G1Policy* g1p = _g1h->g1_policy(); 1331 g1p->record_concurrent_mark_cleanup_start(); 1332 1333 double start = os::elapsedTime(); 1334 1335 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before"); 1336 1337 { 1338 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm); 1339 G1UpdateRemSetTrackingAfterRebuild cl(_g1h); 1340 _g1h->heap_region_iterate(&cl); 1341 } 1342 1343 if (log_is_enabled(Trace, gc, liveness)) { 1344 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup"); 1345 _g1h->heap_region_iterate(&cl); 1346 } 1347 1348 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after"); 1349 1350 // We need to make this be a "collection" so any collection pause that 1351 // races with it goes around and waits for Cleanup to finish. 1352 _g1h->increment_total_collections(); 1353 1354 // Local statistics 1355 double recent_cleanup_time = (os::elapsedTime() - start); 1356 _total_cleanup_time += recent_cleanup_time; 1357 _cleanup_times.add(recent_cleanup_time); 1358 1359 { 1360 GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm); 1361 _g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1362 } 1363 } 1364 1365 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1366 // Uses the G1CMTask associated with a worker thread (for serial reference 1367 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1368 // trace referent objects. 1369 // 1370 // Using the G1CMTask and embedded local queues avoids having the worker 1371 // threads operating on the global mark stack. This reduces the risk 1372 // of overflowing the stack - which we would rather avoid at this late 1373 // state. Also using the tasks' local queues removes the potential 1374 // of the workers interfering with each other that could occur if 1375 // operating on the global stack. 1376 1377 class G1CMKeepAliveAndDrainClosure : public OopClosure { 1378 G1ConcurrentMark* _cm; 1379 G1CMTask* _task; 1380 uint _ref_counter_limit; 1381 uint _ref_counter; 1382 bool _is_serial; 1383 public: 1384 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1385 _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval), 1386 _ref_counter(_ref_counter_limit), _is_serial(is_serial) { 1387 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1388 } 1389 1390 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1391 virtual void do_oop( oop* p) { do_oop_work(p); } 1392 1393 template <class T> void do_oop_work(T* p) { 1394 if (_cm->has_overflown()) { 1395 return; 1396 } 1397 if (!_task->deal_with_reference(p)) { 1398 // We did not add anything to the mark bitmap (or mark stack), so there is 1399 // no point trying to drain it. 1400 return; 1401 } 1402 _ref_counter--; 1403 1404 if (_ref_counter == 0) { 1405 // We have dealt with _ref_counter_limit references, pushing them 1406 // and objects reachable from them on to the local stack (and 1407 // possibly the global stack). Call G1CMTask::do_marking_step() to 1408 // process these entries. 1409 // 1410 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1411 // there's nothing more to do (i.e. we're done with the entries that 1412 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1413 // above) or we overflow. 1414 // 1415 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1416 // flag while there may still be some work to do. (See the comment at 1417 // the beginning of G1CMTask::do_marking_step() for those conditions - 1418 // one of which is reaching the specified time target.) It is only 1419 // when G1CMTask::do_marking_step() returns without setting the 1420 // has_aborted() flag that the marking step has completed. 1421 do { 1422 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1423 _task->do_marking_step(mark_step_duration_ms, 1424 false /* do_termination */, 1425 _is_serial); 1426 } while (_task->has_aborted() && !_cm->has_overflown()); 1427 _ref_counter = _ref_counter_limit; 1428 } 1429 } 1430 }; 1431 1432 // 'Drain' oop closure used by both serial and parallel reference processing. 1433 // Uses the G1CMTask associated with a given worker thread (for serial 1434 // reference processing the G1CMtask for worker 0 is used). Calls the 1435 // do_marking_step routine, with an unbelievably large timeout value, 1436 // to drain the marking data structures of the remaining entries 1437 // added by the 'keep alive' oop closure above. 1438 1439 class G1CMDrainMarkingStackClosure : public VoidClosure { 1440 G1ConcurrentMark* _cm; 1441 G1CMTask* _task; 1442 bool _is_serial; 1443 public: 1444 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1445 _cm(cm), _task(task), _is_serial(is_serial) { 1446 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1447 } 1448 1449 void do_void() { 1450 do { 1451 // We call G1CMTask::do_marking_step() to completely drain the local 1452 // and global marking stacks of entries pushed by the 'keep alive' 1453 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1454 // 1455 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1456 // if there's nothing more to do (i.e. we've completely drained the 1457 // entries that were pushed as a a result of applying the 'keep alive' 1458 // closure to the entries on the discovered ref lists) or we overflow 1459 // the global marking stack. 1460 // 1461 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1462 // flag while there may still be some work to do. (See the comment at 1463 // the beginning of G1CMTask::do_marking_step() for those conditions - 1464 // one of which is reaching the specified time target.) It is only 1465 // when G1CMTask::do_marking_step() returns without setting the 1466 // has_aborted() flag that the marking step has completed. 1467 1468 _task->do_marking_step(1000000000.0 /* something very large */, 1469 true /* do_termination */, 1470 _is_serial); 1471 } while (_task->has_aborted() && !_cm->has_overflown()); 1472 } 1473 }; 1474 1475 // Implementation of AbstractRefProcTaskExecutor for parallel 1476 // reference processing at the end of G1 concurrent marking 1477 1478 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 1479 private: 1480 G1CollectedHeap* _g1h; 1481 G1ConcurrentMark* _cm; 1482 WorkGang* _workers; 1483 uint _active_workers; 1484 1485 public: 1486 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1487 G1ConcurrentMark* cm, 1488 WorkGang* workers, 1489 uint n_workers) : 1490 _g1h(g1h), _cm(cm), 1491 _workers(workers), _active_workers(n_workers) { } 1492 1493 virtual void execute(ProcessTask& task, uint ergo_workers); 1494 }; 1495 1496 class G1CMRefProcTaskProxy : public AbstractGangTask { 1497 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1498 ProcessTask& _proc_task; 1499 G1CollectedHeap* _g1h; 1500 G1ConcurrentMark* _cm; 1501 1502 public: 1503 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1504 G1CollectedHeap* g1h, 1505 G1ConcurrentMark* cm) : 1506 AbstractGangTask("Process reference objects in parallel"), 1507 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1508 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1509 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1510 } 1511 1512 virtual void work(uint worker_id) { 1513 ResourceMark rm; 1514 HandleMark hm; 1515 G1CMTask* task = _cm->task(worker_id); 1516 G1CMIsAliveClosure g1_is_alive(_g1h); 1517 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1518 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1519 1520 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1521 } 1522 }; 1523 1524 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) { 1525 assert(_workers != NULL, "Need parallel worker threads."); 1526 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1527 assert(_workers->active_workers() >= ergo_workers, 1528 "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)", 1529 ergo_workers, _workers->active_workers()); 1530 1531 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1532 1533 // We need to reset the concurrency level before each 1534 // proxy task execution, so that the termination protocol 1535 // and overflow handling in G1CMTask::do_marking_step() knows 1536 // how many workers to wait for. 1537 _cm->set_concurrency(ergo_workers); 1538 _workers->run_task(&proc_task_proxy, ergo_workers); 1539 } 1540 1541 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { 1542 ResourceMark rm; 1543 HandleMark hm; 1544 1545 // Is alive closure. 1546 G1CMIsAliveClosure g1_is_alive(_g1h); 1547 1548 // Inner scope to exclude the cleaning of the string table 1549 // from the displayed time. 1550 { 1551 GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm); 1552 1553 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1554 1555 // See the comment in G1CollectedHeap::ref_processing_init() 1556 // about how reference processing currently works in G1. 1557 1558 // Set the soft reference policy 1559 rp->setup_policy(clear_all_soft_refs); 1560 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1561 1562 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1563 // in serial reference processing. Note these closures are also 1564 // used for serially processing (by the the current thread) the 1565 // JNI references during parallel reference processing. 1566 // 1567 // These closures do not need to synchronize with the worker 1568 // threads involved in parallel reference processing as these 1569 // instances are executed serially by the current thread (e.g. 1570 // reference processing is not multi-threaded and is thus 1571 // performed by the current thread instead of a gang worker). 1572 // 1573 // The gang tasks involved in parallel reference processing create 1574 // their own instances of these closures, which do their own 1575 // synchronization among themselves. 1576 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1577 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1578 1579 // We need at least one active thread. If reference processing 1580 // is not multi-threaded we use the current (VMThread) thread, 1581 // otherwise we use the work gang from the G1CollectedHeap and 1582 // we utilize all the worker threads we can. 1583 bool processing_is_mt = rp->processing_is_mt(); 1584 uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U); 1585 active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U); 1586 1587 // Parallel processing task executor. 1588 G1CMRefProcTaskExecutor par_task_executor(_g1h, this, 1589 _g1h->workers(), active_workers); 1590 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1591 1592 // Set the concurrency level. The phase was already set prior to 1593 // executing the remark task. 1594 set_concurrency(active_workers); 1595 1596 // Set the degree of MT processing here. If the discovery was done MT, 1597 // the number of threads involved during discovery could differ from 1598 // the number of active workers. This is OK as long as the discovered 1599 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1600 rp->set_active_mt_degree(active_workers); 1601 1602 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues()); 1603 1604 // Process the weak references. 1605 const ReferenceProcessorStats& stats = 1606 rp->process_discovered_references(&g1_is_alive, 1607 &g1_keep_alive, 1608 &g1_drain_mark_stack, 1609 executor, 1610 &pt); 1611 _gc_tracer_cm->report_gc_reference_stats(stats); 1612 pt.print_all_references(); 1613 1614 // The do_oop work routines of the keep_alive and drain_marking_stack 1615 // oop closures will set the has_overflown flag if we overflow the 1616 // global marking stack. 1617 1618 assert(has_overflown() || _global_mark_stack.is_empty(), 1619 "Mark stack should be empty (unless it has overflown)"); 1620 1621 assert(rp->num_queues() == active_workers, "why not"); 1622 1623 rp->verify_no_references_recorded(); 1624 assert(!rp->discovery_enabled(), "Post condition"); 1625 } 1626 1627 if (has_overflown()) { 1628 // We can not trust g1_is_alive and the contents of the heap if the marking stack 1629 // overflowed while processing references. Exit the VM. 1630 fatal("Overflow during reference processing, can not continue. Please " 1631 "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and " 1632 "restart.", MarkStackSizeMax); 1633 return; 1634 } 1635 1636 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1637 1638 { 1639 GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm); 1640 WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1); 1641 } 1642 1643 // Unload Klasses, String, Code Cache, etc. 1644 if (ClassUnloadingWithConcurrentMark) { 1645 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); 1646 bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm); 1647 _g1h->complete_cleaning(&g1_is_alive, purged_classes); 1648 } else { 1649 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm); 1650 // No need to clean string table as it is treated as strong roots when 1651 // class unloading is disabled. 1652 _g1h->partial_cleaning(&g1_is_alive, false, G1StringDedup::is_enabled()); 1653 } 1654 } 1655 1656 class G1PrecleanYieldClosure : public YieldClosure { 1657 G1ConcurrentMark* _cm; 1658 1659 public: 1660 G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { } 1661 1662 virtual bool should_return() { 1663 return _cm->has_aborted(); 1664 } 1665 1666 virtual bool should_return_fine_grain() { 1667 _cm->do_yield_check(); 1668 return _cm->has_aborted(); 1669 } 1670 }; 1671 1672 void G1ConcurrentMark::preclean() { 1673 assert(G1UseReferencePrecleaning, "Precleaning must be enabled."); 1674 1675 SuspendibleThreadSetJoiner joiner; 1676 1677 G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */); 1678 G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */); 1679 1680 set_concurrency_and_phase(1, true); 1681 1682 G1PrecleanYieldClosure yield_cl(this); 1683 1684 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1685 // Precleaning is single threaded. Temporarily disable MT discovery. 1686 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); 1687 rp->preclean_discovered_references(rp->is_alive_non_header(), 1688 &keep_alive, 1689 &drain_mark_stack, 1690 &yield_cl, 1691 _gc_timer_cm); 1692 } 1693 1694 // When sampling object counts, we already swapped the mark bitmaps, so we need to use 1695 // the prev bitmap determining liveness. 1696 class G1ObjectCountIsAliveClosure: public BoolObjectClosure { 1697 G1CollectedHeap* _g1h; 1698 public: 1699 G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { } 1700 1701 bool do_object_b(oop obj) { 1702 HeapWord* addr = (HeapWord*)obj; 1703 return addr != NULL && 1704 (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj)); 1705 } 1706 }; 1707 1708 void G1ConcurrentMark::report_object_count(bool mark_completed) { 1709 // Depending on the completion of the marking liveness needs to be determined 1710 // using either the next or prev bitmap. 1711 if (mark_completed) { 1712 G1ObjectCountIsAliveClosure is_alive(_g1h); 1713 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1714 } else { 1715 G1CMIsAliveClosure is_alive(_g1h); 1716 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1717 } 1718 } 1719 1720 1721 void G1ConcurrentMark::swap_mark_bitmaps() { 1722 G1CMBitMap* temp = _prev_mark_bitmap; 1723 _prev_mark_bitmap = _next_mark_bitmap; 1724 _next_mark_bitmap = temp; 1725 _g1h->collector_state()->set_clearing_next_bitmap(true); 1726 } 1727 1728 // Closure for marking entries in SATB buffers. 1729 class G1CMSATBBufferClosure : public SATBBufferClosure { 1730 private: 1731 G1CMTask* _task; 1732 G1CollectedHeap* _g1h; 1733 1734 // This is very similar to G1CMTask::deal_with_reference, but with 1735 // more relaxed requirements for the argument, so this must be more 1736 // circumspect about treating the argument as an object. 1737 void do_entry(void* entry) const { 1738 _task->increment_refs_reached(); 1739 oop const obj = static_cast<oop>(entry); 1740 _task->make_reference_grey(obj); 1741 } 1742 1743 public: 1744 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1745 : _task(task), _g1h(g1h) { } 1746 1747 virtual void do_buffer(void** buffer, size_t size) { 1748 for (size_t i = 0; i < size; ++i) { 1749 do_entry(buffer[i]); 1750 } 1751 } 1752 }; 1753 1754 class G1RemarkThreadsClosure : public ThreadClosure { 1755 G1CMSATBBufferClosure _cm_satb_cl; 1756 G1CMOopClosure _cm_cl; 1757 MarkingCodeBlobClosure _code_cl; 1758 int _thread_parity; 1759 1760 public: 1761 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1762 _cm_satb_cl(task, g1h), 1763 _cm_cl(g1h, task), 1764 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1765 _thread_parity(Threads::thread_claim_parity()) {} 1766 1767 void do_thread(Thread* thread) { 1768 if (thread->is_Java_thread()) { 1769 if (thread->claim_oops_do(true, _thread_parity)) { 1770 JavaThread* jt = (JavaThread*)thread; 1771 1772 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1773 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1774 // * Alive if on the stack of an executing method 1775 // * Weakly reachable otherwise 1776 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1777 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1778 jt->nmethods_do(&_code_cl); 1779 1780 G1ThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(&_cm_satb_cl); 1781 } 1782 } else if (thread->is_VM_thread()) { 1783 if (thread->claim_oops_do(true, _thread_parity)) { 1784 G1BarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1785 } 1786 } 1787 } 1788 }; 1789 1790 class G1CMRemarkTask : public AbstractGangTask { 1791 G1ConcurrentMark* _cm; 1792 public: 1793 void work(uint worker_id) { 1794 G1CMTask* task = _cm->task(worker_id); 1795 task->record_start_time(); 1796 { 1797 ResourceMark rm; 1798 HandleMark hm; 1799 1800 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1801 Threads::threads_do(&threads_f); 1802 } 1803 1804 do { 1805 task->do_marking_step(1000000000.0 /* something very large */, 1806 true /* do_termination */, 1807 false /* is_serial */); 1808 } while (task->has_aborted() && !_cm->has_overflown()); 1809 // If we overflow, then we do not want to restart. We instead 1810 // want to abort remark and do concurrent marking again. 1811 task->record_end_time(); 1812 } 1813 1814 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1815 AbstractGangTask("Par Remark"), _cm(cm) { 1816 _cm->terminator()->reset_for_reuse(active_workers); 1817 } 1818 }; 1819 1820 void G1ConcurrentMark::finalize_marking() { 1821 ResourceMark rm; 1822 HandleMark hm; 1823 1824 _g1h->ensure_parsability(false); 1825 1826 // this is remark, so we'll use up all active threads 1827 uint active_workers = _g1h->workers()->active_workers(); 1828 set_concurrency_and_phase(active_workers, false /* concurrent */); 1829 // Leave _parallel_marking_threads at it's 1830 // value originally calculated in the G1ConcurrentMark 1831 // constructor and pass values of the active workers 1832 // through the gang in the task. 1833 1834 { 1835 StrongRootsScope srs(active_workers); 1836 1837 G1CMRemarkTask remarkTask(this, active_workers); 1838 // We will start all available threads, even if we decide that the 1839 // active_workers will be fewer. The extra ones will just bail out 1840 // immediately. 1841 _g1h->workers()->run_task(&remarkTask); 1842 } 1843 1844 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1845 guarantee(has_overflown() || 1846 satb_mq_set.completed_buffers_num() == 0, 1847 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1848 BOOL_TO_STR(has_overflown()), 1849 satb_mq_set.completed_buffers_num()); 1850 1851 print_stats(); 1852 } 1853 1854 void G1ConcurrentMark::flush_all_task_caches() { 1855 size_t hits = 0; 1856 size_t misses = 0; 1857 for (uint i = 0; i < _max_num_tasks; i++) { 1858 Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache(); 1859 hits += stats.first; 1860 misses += stats.second; 1861 } 1862 size_t sum = hits + misses; 1863 log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf", 1864 hits, misses, percent_of(hits, sum)); 1865 } 1866 1867 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) { 1868 _prev_mark_bitmap->clear_range(mr); 1869 } 1870 1871 HeapRegion* 1872 G1ConcurrentMark::claim_region(uint worker_id) { 1873 // "checkpoint" the finger 1874 HeapWord* finger = _finger; 1875 1876 while (finger < _heap.end()) { 1877 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1878 1879 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1880 // Make sure that the reads below do not float before loading curr_region. 1881 OrderAccess::loadload(); 1882 // Above heap_region_containing may return NULL as we always scan claim 1883 // until the end of the heap. In this case, just jump to the next region. 1884 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1885 1886 // Is the gap between reading the finger and doing the CAS too long? 1887 HeapWord* res = Atomic::cmpxchg(end, &_finger, finger); 1888 if (res == finger && curr_region != NULL) { 1889 // we succeeded 1890 HeapWord* bottom = curr_region->bottom(); 1891 HeapWord* limit = curr_region->next_top_at_mark_start(); 1892 1893 // notice that _finger == end cannot be guaranteed here since, 1894 // someone else might have moved the finger even further 1895 assert(_finger >= end, "the finger should have moved forward"); 1896 1897 if (limit > bottom) { 1898 return curr_region; 1899 } else { 1900 assert(limit == bottom, 1901 "the region limit should be at bottom"); 1902 // we return NULL and the caller should try calling 1903 // claim_region() again. 1904 return NULL; 1905 } 1906 } else { 1907 assert(_finger > finger, "the finger should have moved forward"); 1908 // read it again 1909 finger = _finger; 1910 } 1911 } 1912 1913 return NULL; 1914 } 1915 1916 #ifndef PRODUCT 1917 class VerifyNoCSetOops { 1918 G1CollectedHeap* _g1h; 1919 const char* _phase; 1920 int _info; 1921 1922 public: 1923 VerifyNoCSetOops(const char* phase, int info = -1) : 1924 _g1h(G1CollectedHeap::heap()), 1925 _phase(phase), 1926 _info(info) 1927 { } 1928 1929 void operator()(G1TaskQueueEntry task_entry) const { 1930 if (task_entry.is_array_slice()) { 1931 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); 1932 return; 1933 } 1934 guarantee(oopDesc::is_oop(task_entry.obj()), 1935 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1936 p2i(task_entry.obj()), _phase, _info); 1937 guarantee(!_g1h->is_in_cset(task_entry.obj()), 1938 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1939 p2i(task_entry.obj()), _phase, _info); 1940 } 1941 }; 1942 1943 void G1ConcurrentMark::verify_no_cset_oops() { 1944 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1945 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 1946 return; 1947 } 1948 1949 // Verify entries on the global mark stack 1950 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 1951 1952 // Verify entries on the task queues 1953 for (uint i = 0; i < _max_num_tasks; ++i) { 1954 G1CMTaskQueue* queue = _task_queues->queue(i); 1955 queue->iterate(VerifyNoCSetOops("Queue", i)); 1956 } 1957 1958 // Verify the global finger 1959 HeapWord* global_finger = finger(); 1960 if (global_finger != NULL && global_finger < _heap.end()) { 1961 // Since we always iterate over all regions, we might get a NULL HeapRegion 1962 // here. 1963 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1964 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1965 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1966 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1967 } 1968 1969 // Verify the task fingers 1970 assert(_num_concurrent_workers <= _max_num_tasks, "sanity"); 1971 for (uint i = 0; i < _num_concurrent_workers; ++i) { 1972 G1CMTask* task = _tasks[i]; 1973 HeapWord* task_finger = task->finger(); 1974 if (task_finger != NULL && task_finger < _heap.end()) { 1975 // See above note on the global finger verification. 1976 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 1977 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 1978 !task_hr->in_collection_set(), 1979 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 1980 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 1981 } 1982 } 1983 } 1984 #endif // PRODUCT 1985 1986 void G1ConcurrentMark::rebuild_rem_set_concurrently() { 1987 _g1h->g1_rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset); 1988 } 1989 1990 void G1ConcurrentMark::print_stats() { 1991 if (!log_is_enabled(Debug, gc, stats)) { 1992 return; 1993 } 1994 log_debug(gc, stats)("---------------------------------------------------------------------"); 1995 for (size_t i = 0; i < _num_active_tasks; ++i) { 1996 _tasks[i]->print_stats(); 1997 log_debug(gc, stats)("---------------------------------------------------------------------"); 1998 } 1999 } 2000 2001 void G1ConcurrentMark::concurrent_cycle_abort() { 2002 if (!cm_thread()->during_cycle() || _has_aborted) { 2003 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2004 return; 2005 } 2006 2007 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2008 // concurrent bitmap clearing. 2009 { 2010 GCTraceTime(Debug, gc) debug("Clear Next Bitmap"); 2011 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false); 2012 } 2013 // Note we cannot clear the previous marking bitmap here 2014 // since VerifyDuringGC verifies the objects marked during 2015 // a full GC against the previous bitmap. 2016 2017 // Empty mark stack 2018 reset_marking_for_restart(); 2019 for (uint i = 0; i < _max_num_tasks; ++i) { 2020 _tasks[i]->clear_region_fields(); 2021 } 2022 _first_overflow_barrier_sync.abort(); 2023 _second_overflow_barrier_sync.abort(); 2024 _has_aborted = true; 2025 2026 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2027 satb_mq_set.abandon_partial_marking(); 2028 // This can be called either during or outside marking, we'll read 2029 // the expected_active value from the SATB queue set. 2030 satb_mq_set.set_active_all_threads( 2031 false, /* new active value */ 2032 satb_mq_set.is_active() /* expected_active */); 2033 } 2034 2035 static void print_ms_time_info(const char* prefix, const char* name, 2036 NumberSeq& ns) { 2037 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2038 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2039 if (ns.num() > 0) { 2040 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2041 prefix, ns.sd(), ns.maximum()); 2042 } 2043 } 2044 2045 void G1ConcurrentMark::print_summary_info() { 2046 Log(gc, marking) log; 2047 if (!log.is_trace()) { 2048 return; 2049 } 2050 2051 log.trace(" Concurrent marking:"); 2052 print_ms_time_info(" ", "init marks", _init_times); 2053 print_ms_time_info(" ", "remarks", _remark_times); 2054 { 2055 print_ms_time_info(" ", "final marks", _remark_mark_times); 2056 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2057 2058 } 2059 print_ms_time_info(" ", "cleanups", _cleanup_times); 2060 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2061 _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2062 log.trace(" Total stop_world time = %8.2f s.", 2063 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2064 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2065 cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum()); 2066 } 2067 2068 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2069 _concurrent_workers->print_worker_threads_on(st); 2070 } 2071 2072 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2073 _concurrent_workers->threads_do(tc); 2074 } 2075 2076 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2077 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2078 p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap)); 2079 _prev_mark_bitmap->print_on_error(st, " Prev Bits: "); 2080 _next_mark_bitmap->print_on_error(st, " Next Bits: "); 2081 } 2082 2083 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2084 ReferenceProcessor* result = g1h->ref_processor_cm(); 2085 assert(result != NULL, "CM reference processor should not be NULL"); 2086 return result; 2087 } 2088 2089 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2090 G1CMTask* task) 2091 : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)), 2092 _g1h(g1h), _task(task) 2093 { } 2094 2095 void G1CMTask::setup_for_region(HeapRegion* hr) { 2096 assert(hr != NULL, 2097 "claim_region() should have filtered out NULL regions"); 2098 _curr_region = hr; 2099 _finger = hr->bottom(); 2100 update_region_limit(); 2101 } 2102 2103 void G1CMTask::update_region_limit() { 2104 HeapRegion* hr = _curr_region; 2105 HeapWord* bottom = hr->bottom(); 2106 HeapWord* limit = hr->next_top_at_mark_start(); 2107 2108 if (limit == bottom) { 2109 // The region was collected underneath our feet. 2110 // We set the finger to bottom to ensure that the bitmap 2111 // iteration that will follow this will not do anything. 2112 // (this is not a condition that holds when we set the region up, 2113 // as the region is not supposed to be empty in the first place) 2114 _finger = bottom; 2115 } else if (limit >= _region_limit) { 2116 assert(limit >= _finger, "peace of mind"); 2117 } else { 2118 assert(limit < _region_limit, "only way to get here"); 2119 // This can happen under some pretty unusual circumstances. An 2120 // evacuation pause empties the region underneath our feet (NTAMS 2121 // at bottom). We then do some allocation in the region (NTAMS 2122 // stays at bottom), followed by the region being used as a GC 2123 // alloc region (NTAMS will move to top() and the objects 2124 // originally below it will be grayed). All objects now marked in 2125 // the region are explicitly grayed, if below the global finger, 2126 // and we do not need in fact to scan anything else. So, we simply 2127 // set _finger to be limit to ensure that the bitmap iteration 2128 // doesn't do anything. 2129 _finger = limit; 2130 } 2131 2132 _region_limit = limit; 2133 } 2134 2135 void G1CMTask::giveup_current_region() { 2136 assert(_curr_region != NULL, "invariant"); 2137 clear_region_fields(); 2138 } 2139 2140 void G1CMTask::clear_region_fields() { 2141 // Values for these three fields that indicate that we're not 2142 // holding on to a region. 2143 _curr_region = NULL; 2144 _finger = NULL; 2145 _region_limit = NULL; 2146 } 2147 2148 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2149 if (cm_oop_closure == NULL) { 2150 assert(_cm_oop_closure != NULL, "invariant"); 2151 } else { 2152 assert(_cm_oop_closure == NULL, "invariant"); 2153 } 2154 _cm_oop_closure = cm_oop_closure; 2155 } 2156 2157 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) { 2158 guarantee(next_mark_bitmap != NULL, "invariant"); 2159 _next_mark_bitmap = next_mark_bitmap; 2160 clear_region_fields(); 2161 2162 _calls = 0; 2163 _elapsed_time_ms = 0.0; 2164 _termination_time_ms = 0.0; 2165 _termination_start_time_ms = 0.0; 2166 2167 _mark_stats_cache.reset(); 2168 } 2169 2170 bool G1CMTask::should_exit_termination() { 2171 regular_clock_call(); 2172 // This is called when we are in the termination protocol. We should 2173 // quit if, for some reason, this task wants to abort or the global 2174 // stack is not empty (this means that we can get work from it). 2175 return !_cm->mark_stack_empty() || has_aborted(); 2176 } 2177 2178 void G1CMTask::reached_limit() { 2179 assert(_words_scanned >= _words_scanned_limit || 2180 _refs_reached >= _refs_reached_limit , 2181 "shouldn't have been called otherwise"); 2182 regular_clock_call(); 2183 } 2184 2185 void G1CMTask::regular_clock_call() { 2186 if (has_aborted()) { 2187 return; 2188 } 2189 2190 // First, we need to recalculate the words scanned and refs reached 2191 // limits for the next clock call. 2192 recalculate_limits(); 2193 2194 // During the regular clock call we do the following 2195 2196 // (1) If an overflow has been flagged, then we abort. 2197 if (_cm->has_overflown()) { 2198 set_has_aborted(); 2199 return; 2200 } 2201 2202 // If we are not concurrent (i.e. we're doing remark) we don't need 2203 // to check anything else. The other steps are only needed during 2204 // the concurrent marking phase. 2205 if (!_cm->concurrent()) { 2206 return; 2207 } 2208 2209 // (2) If marking has been aborted for Full GC, then we also abort. 2210 if (_cm->has_aborted()) { 2211 set_has_aborted(); 2212 return; 2213 } 2214 2215 double curr_time_ms = os::elapsedVTime() * 1000.0; 2216 2217 // (4) We check whether we should yield. If we have to, then we abort. 2218 if (SuspendibleThreadSet::should_yield()) { 2219 // We should yield. To do this we abort the task. The caller is 2220 // responsible for yielding. 2221 set_has_aborted(); 2222 return; 2223 } 2224 2225 // (5) We check whether we've reached our time quota. If we have, 2226 // then we abort. 2227 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2228 if (elapsed_time_ms > _time_target_ms) { 2229 set_has_aborted(); 2230 _has_timed_out = true; 2231 return; 2232 } 2233 2234 // (6) Finally, we check whether there are enough completed STAB 2235 // buffers available for processing. If there are, we abort. 2236 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2237 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2238 // we do need to process SATB buffers, we'll abort and restart 2239 // the marking task to do so 2240 set_has_aborted(); 2241 return; 2242 } 2243 } 2244 2245 void G1CMTask::recalculate_limits() { 2246 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2247 _words_scanned_limit = _real_words_scanned_limit; 2248 2249 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2250 _refs_reached_limit = _real_refs_reached_limit; 2251 } 2252 2253 void G1CMTask::decrease_limits() { 2254 // This is called when we believe that we're going to do an infrequent 2255 // operation which will increase the per byte scanned cost (i.e. move 2256 // entries to/from the global stack). It basically tries to decrease the 2257 // scanning limit so that the clock is called earlier. 2258 2259 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4; 2260 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4; 2261 } 2262 2263 void G1CMTask::move_entries_to_global_stack() { 2264 // Local array where we'll store the entries that will be popped 2265 // from the local queue. 2266 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2267 2268 size_t n = 0; 2269 G1TaskQueueEntry task_entry; 2270 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) { 2271 buffer[n] = task_entry; 2272 ++n; 2273 } 2274 if (n < G1CMMarkStack::EntriesPerChunk) { 2275 buffer[n] = G1TaskQueueEntry(); 2276 } 2277 2278 if (n > 0) { 2279 if (!_cm->mark_stack_push(buffer)) { 2280 set_has_aborted(); 2281 } 2282 } 2283 2284 // This operation was quite expensive, so decrease the limits. 2285 decrease_limits(); 2286 } 2287 2288 bool G1CMTask::get_entries_from_global_stack() { 2289 // Local array where we'll store the entries that will be popped 2290 // from the global stack. 2291 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2292 2293 if (!_cm->mark_stack_pop(buffer)) { 2294 return false; 2295 } 2296 2297 // We did actually pop at least one entry. 2298 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) { 2299 G1TaskQueueEntry task_entry = buffer[i]; 2300 if (task_entry.is_null()) { 2301 break; 2302 } 2303 assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj())); 2304 bool success = _task_queue->push(task_entry); 2305 // We only call this when the local queue is empty or under a 2306 // given target limit. So, we do not expect this push to fail. 2307 assert(success, "invariant"); 2308 } 2309 2310 // This operation was quite expensive, so decrease the limits 2311 decrease_limits(); 2312 return true; 2313 } 2314 2315 void G1CMTask::drain_local_queue(bool partially) { 2316 if (has_aborted()) { 2317 return; 2318 } 2319 2320 // Decide what the target size is, depending whether we're going to 2321 // drain it partially (so that other tasks can steal if they run out 2322 // of things to do) or totally (at the very end). 2323 size_t target_size; 2324 if (partially) { 2325 target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize); 2326 } else { 2327 target_size = 0; 2328 } 2329 2330 if (_task_queue->size() > target_size) { 2331 G1TaskQueueEntry entry; 2332 bool ret = _task_queue->pop_local(entry); 2333 while (ret) { 2334 scan_task_entry(entry); 2335 if (_task_queue->size() <= target_size || has_aborted()) { 2336 ret = false; 2337 } else { 2338 ret = _task_queue->pop_local(entry); 2339 } 2340 } 2341 } 2342 } 2343 2344 void G1CMTask::drain_global_stack(bool partially) { 2345 if (has_aborted()) { 2346 return; 2347 } 2348 2349 // We have a policy to drain the local queue before we attempt to 2350 // drain the global stack. 2351 assert(partially || _task_queue->size() == 0, "invariant"); 2352 2353 // Decide what the target size is, depending whether we're going to 2354 // drain it partially (so that other tasks can steal if they run out 2355 // of things to do) or totally (at the very end). 2356 // Notice that when draining the global mark stack partially, due to the racyness 2357 // of the mark stack size update we might in fact drop below the target. But, 2358 // this is not a problem. 2359 // In case of total draining, we simply process until the global mark stack is 2360 // totally empty, disregarding the size counter. 2361 if (partially) { 2362 size_t const target_size = _cm->partial_mark_stack_size_target(); 2363 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2364 if (get_entries_from_global_stack()) { 2365 drain_local_queue(partially); 2366 } 2367 } 2368 } else { 2369 while (!has_aborted() && get_entries_from_global_stack()) { 2370 drain_local_queue(partially); 2371 } 2372 } 2373 } 2374 2375 // SATB Queue has several assumptions on whether to call the par or 2376 // non-par versions of the methods. this is why some of the code is 2377 // replicated. We should really get rid of the single-threaded version 2378 // of the code to simplify things. 2379 void G1CMTask::drain_satb_buffers() { 2380 if (has_aborted()) { 2381 return; 2382 } 2383 2384 // We set this so that the regular clock knows that we're in the 2385 // middle of draining buffers and doesn't set the abort flag when it 2386 // notices that SATB buffers are available for draining. It'd be 2387 // very counter productive if it did that. :-) 2388 _draining_satb_buffers = true; 2389 2390 G1CMSATBBufferClosure satb_cl(this, _g1h); 2391 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2392 2393 // This keeps claiming and applying the closure to completed buffers 2394 // until we run out of buffers or we need to abort. 2395 while (!has_aborted() && 2396 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2397 regular_clock_call(); 2398 } 2399 2400 _draining_satb_buffers = false; 2401 2402 assert(has_aborted() || 2403 _cm->concurrent() || 2404 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2405 2406 // again, this was a potentially expensive operation, decrease the 2407 // limits to get the regular clock call early 2408 decrease_limits(); 2409 } 2410 2411 void G1CMTask::clear_mark_stats_cache(uint region_idx) { 2412 _mark_stats_cache.reset(region_idx); 2413 } 2414 2415 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() { 2416 return _mark_stats_cache.evict_all(); 2417 } 2418 2419 void G1CMTask::print_stats() { 2420 log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls); 2421 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2422 _elapsed_time_ms, _termination_time_ms); 2423 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms", 2424 _step_times_ms.num(), 2425 _step_times_ms.avg(), 2426 _step_times_ms.sd(), 2427 _step_times_ms.maximum(), 2428 _step_times_ms.sum()); 2429 size_t const hits = _mark_stats_cache.hits(); 2430 size_t const misses = _mark_stats_cache.misses(); 2431 log_debug(gc, stats)(" Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f", 2432 hits, misses, percent_of(hits, hits + misses)); 2433 } 2434 2435 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) { 2436 return _task_queues->steal(worker_id, task_entry); 2437 } 2438 2439 /***************************************************************************** 2440 2441 The do_marking_step(time_target_ms, ...) method is the building 2442 block of the parallel marking framework. It can be called in parallel 2443 with other invocations of do_marking_step() on different tasks 2444 (but only one per task, obviously) and concurrently with the 2445 mutator threads, or during remark, hence it eliminates the need 2446 for two versions of the code. When called during remark, it will 2447 pick up from where the task left off during the concurrent marking 2448 phase. Interestingly, tasks are also claimable during evacuation 2449 pauses too, since do_marking_step() ensures that it aborts before 2450 it needs to yield. 2451 2452 The data structures that it uses to do marking work are the 2453 following: 2454 2455 (1) Marking Bitmap. If there are gray objects that appear only 2456 on the bitmap (this happens either when dealing with an overflow 2457 or when the initial marking phase has simply marked the roots 2458 and didn't push them on the stack), then tasks claim heap 2459 regions whose bitmap they then scan to find gray objects. A 2460 global finger indicates where the end of the last claimed region 2461 is. A local finger indicates how far into the region a task has 2462 scanned. The two fingers are used to determine how to gray an 2463 object (i.e. whether simply marking it is OK, as it will be 2464 visited by a task in the future, or whether it needs to be also 2465 pushed on a stack). 2466 2467 (2) Local Queue. The local queue of the task which is accessed 2468 reasonably efficiently by the task. Other tasks can steal from 2469 it when they run out of work. Throughout the marking phase, a 2470 task attempts to keep its local queue short but not totally 2471 empty, so that entries are available for stealing by other 2472 tasks. Only when there is no more work, a task will totally 2473 drain its local queue. 2474 2475 (3) Global Mark Stack. This handles local queue overflow. During 2476 marking only sets of entries are moved between it and the local 2477 queues, as access to it requires a mutex and more fine-grain 2478 interaction with it which might cause contention. If it 2479 overflows, then the marking phase should restart and iterate 2480 over the bitmap to identify gray objects. Throughout the marking 2481 phase, tasks attempt to keep the global mark stack at a small 2482 length but not totally empty, so that entries are available for 2483 popping by other tasks. Only when there is no more work, tasks 2484 will totally drain the global mark stack. 2485 2486 (4) SATB Buffer Queue. This is where completed SATB buffers are 2487 made available. Buffers are regularly removed from this queue 2488 and scanned for roots, so that the queue doesn't get too 2489 long. During remark, all completed buffers are processed, as 2490 well as the filled in parts of any uncompleted buffers. 2491 2492 The do_marking_step() method tries to abort when the time target 2493 has been reached. There are a few other cases when the 2494 do_marking_step() method also aborts: 2495 2496 (1) When the marking phase has been aborted (after a Full GC). 2497 2498 (2) When a global overflow (on the global stack) has been 2499 triggered. Before the task aborts, it will actually sync up with 2500 the other tasks to ensure that all the marking data structures 2501 (local queues, stacks, fingers etc.) are re-initialized so that 2502 when do_marking_step() completes, the marking phase can 2503 immediately restart. 2504 2505 (3) When enough completed SATB buffers are available. The 2506 do_marking_step() method only tries to drain SATB buffers right 2507 at the beginning. So, if enough buffers are available, the 2508 marking step aborts and the SATB buffers are processed at 2509 the beginning of the next invocation. 2510 2511 (4) To yield. when we have to yield then we abort and yield 2512 right at the end of do_marking_step(). This saves us from a lot 2513 of hassle as, by yielding we might allow a Full GC. If this 2514 happens then objects will be compacted underneath our feet, the 2515 heap might shrink, etc. We save checking for this by just 2516 aborting and doing the yield right at the end. 2517 2518 From the above it follows that the do_marking_step() method should 2519 be called in a loop (or, otherwise, regularly) until it completes. 2520 2521 If a marking step completes without its has_aborted() flag being 2522 true, it means it has completed the current marking phase (and 2523 also all other marking tasks have done so and have all synced up). 2524 2525 A method called regular_clock_call() is invoked "regularly" (in 2526 sub ms intervals) throughout marking. It is this clock method that 2527 checks all the abort conditions which were mentioned above and 2528 decides when the task should abort. A work-based scheme is used to 2529 trigger this clock method: when the number of object words the 2530 marking phase has scanned or the number of references the marking 2531 phase has visited reach a given limit. Additional invocations to 2532 the method clock have been planted in a few other strategic places 2533 too. The initial reason for the clock method was to avoid calling 2534 vtime too regularly, as it is quite expensive. So, once it was in 2535 place, it was natural to piggy-back all the other conditions on it 2536 too and not constantly check them throughout the code. 2537 2538 If do_termination is true then do_marking_step will enter its 2539 termination protocol. 2540 2541 The value of is_serial must be true when do_marking_step is being 2542 called serially (i.e. by the VMThread) and do_marking_step should 2543 skip any synchronization in the termination and overflow code. 2544 Examples include the serial remark code and the serial reference 2545 processing closures. 2546 2547 The value of is_serial must be false when do_marking_step is 2548 being called by any of the worker threads in a work gang. 2549 Examples include the concurrent marking code (CMMarkingTask), 2550 the MT remark code, and the MT reference processing closures. 2551 2552 *****************************************************************************/ 2553 2554 void G1CMTask::do_marking_step(double time_target_ms, 2555 bool do_termination, 2556 bool is_serial) { 2557 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2558 2559 _start_time_ms = os::elapsedVTime() * 1000.0; 2560 2561 // If do_stealing is true then do_marking_step will attempt to 2562 // steal work from the other G1CMTasks. It only makes sense to 2563 // enable stealing when the termination protocol is enabled 2564 // and do_marking_step() is not being called serially. 2565 bool do_stealing = do_termination && !is_serial; 2566 2567 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2568 _time_target_ms = time_target_ms - diff_prediction_ms; 2569 2570 // set up the variables that are used in the work-based scheme to 2571 // call the regular clock method 2572 _words_scanned = 0; 2573 _refs_reached = 0; 2574 recalculate_limits(); 2575 2576 // clear all flags 2577 clear_has_aborted(); 2578 _has_timed_out = false; 2579 _draining_satb_buffers = false; 2580 2581 ++_calls; 2582 2583 // Set up the bitmap and oop closures. Anything that uses them is 2584 // eventually called from this method, so it is OK to allocate these 2585 // statically. 2586 G1CMBitMapClosure bitmap_closure(this, _cm); 2587 G1CMOopClosure cm_oop_closure(_g1h, this); 2588 set_cm_oop_closure(&cm_oop_closure); 2589 2590 if (_cm->has_overflown()) { 2591 // This can happen if the mark stack overflows during a GC pause 2592 // and this task, after a yield point, restarts. We have to abort 2593 // as we need to get into the overflow protocol which happens 2594 // right at the end of this task. 2595 set_has_aborted(); 2596 } 2597 2598 // First drain any available SATB buffers. After this, we will not 2599 // look at SATB buffers before the next invocation of this method. 2600 // If enough completed SATB buffers are queued up, the regular clock 2601 // will abort this task so that it restarts. 2602 drain_satb_buffers(); 2603 // ...then partially drain the local queue and the global stack 2604 drain_local_queue(true); 2605 drain_global_stack(true); 2606 2607 do { 2608 if (!has_aborted() && _curr_region != NULL) { 2609 // This means that we're already holding on to a region. 2610 assert(_finger != NULL, "if region is not NULL, then the finger " 2611 "should not be NULL either"); 2612 2613 // We might have restarted this task after an evacuation pause 2614 // which might have evacuated the region we're holding on to 2615 // underneath our feet. Let's read its limit again to make sure 2616 // that we do not iterate over a region of the heap that 2617 // contains garbage (update_region_limit() will also move 2618 // _finger to the start of the region if it is found empty). 2619 update_region_limit(); 2620 // We will start from _finger not from the start of the region, 2621 // as we might be restarting this task after aborting half-way 2622 // through scanning this region. In this case, _finger points to 2623 // the address where we last found a marked object. If this is a 2624 // fresh region, _finger points to start(). 2625 MemRegion mr = MemRegion(_finger, _region_limit); 2626 2627 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2628 "humongous regions should go around loop once only"); 2629 2630 // Some special cases: 2631 // If the memory region is empty, we can just give up the region. 2632 // If the current region is humongous then we only need to check 2633 // the bitmap for the bit associated with the start of the object, 2634 // scan the object if it's live, and give up the region. 2635 // Otherwise, let's iterate over the bitmap of the part of the region 2636 // that is left. 2637 // If the iteration is successful, give up the region. 2638 if (mr.is_empty()) { 2639 giveup_current_region(); 2640 regular_clock_call(); 2641 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2642 if (_next_mark_bitmap->is_marked(mr.start())) { 2643 // The object is marked - apply the closure 2644 bitmap_closure.do_addr(mr.start()); 2645 } 2646 // Even if this task aborted while scanning the humongous object 2647 // we can (and should) give up the current region. 2648 giveup_current_region(); 2649 regular_clock_call(); 2650 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) { 2651 giveup_current_region(); 2652 regular_clock_call(); 2653 } else { 2654 assert(has_aborted(), "currently the only way to do so"); 2655 // The only way to abort the bitmap iteration is to return 2656 // false from the do_bit() method. However, inside the 2657 // do_bit() method we move the _finger to point to the 2658 // object currently being looked at. So, if we bail out, we 2659 // have definitely set _finger to something non-null. 2660 assert(_finger != NULL, "invariant"); 2661 2662 // Region iteration was actually aborted. So now _finger 2663 // points to the address of the object we last scanned. If we 2664 // leave it there, when we restart this task, we will rescan 2665 // the object. It is easy to avoid this. We move the finger by 2666 // enough to point to the next possible object header. 2667 assert(_finger < _region_limit, "invariant"); 2668 HeapWord* const new_finger = _finger + ((oop)_finger)->size(); 2669 // Check if bitmap iteration was aborted while scanning the last object 2670 if (new_finger >= _region_limit) { 2671 giveup_current_region(); 2672 } else { 2673 move_finger_to(new_finger); 2674 } 2675 } 2676 } 2677 // At this point we have either completed iterating over the 2678 // region we were holding on to, or we have aborted. 2679 2680 // We then partially drain the local queue and the global stack. 2681 // (Do we really need this?) 2682 drain_local_queue(true); 2683 drain_global_stack(true); 2684 2685 // Read the note on the claim_region() method on why it might 2686 // return NULL with potentially more regions available for 2687 // claiming and why we have to check out_of_regions() to determine 2688 // whether we're done or not. 2689 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2690 // We are going to try to claim a new region. We should have 2691 // given up on the previous one. 2692 // Separated the asserts so that we know which one fires. 2693 assert(_curr_region == NULL, "invariant"); 2694 assert(_finger == NULL, "invariant"); 2695 assert(_region_limit == NULL, "invariant"); 2696 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2697 if (claimed_region != NULL) { 2698 // Yes, we managed to claim one 2699 setup_for_region(claimed_region); 2700 assert(_curr_region == claimed_region, "invariant"); 2701 } 2702 // It is important to call the regular clock here. It might take 2703 // a while to claim a region if, for example, we hit a large 2704 // block of empty regions. So we need to call the regular clock 2705 // method once round the loop to make sure it's called 2706 // frequently enough. 2707 regular_clock_call(); 2708 } 2709 2710 if (!has_aborted() && _curr_region == NULL) { 2711 assert(_cm->out_of_regions(), 2712 "at this point we should be out of regions"); 2713 } 2714 } while ( _curr_region != NULL && !has_aborted()); 2715 2716 if (!has_aborted()) { 2717 // We cannot check whether the global stack is empty, since other 2718 // tasks might be pushing objects to it concurrently. 2719 assert(_cm->out_of_regions(), 2720 "at this point we should be out of regions"); 2721 // Try to reduce the number of available SATB buffers so that 2722 // remark has less work to do. 2723 drain_satb_buffers(); 2724 } 2725 2726 // Since we've done everything else, we can now totally drain the 2727 // local queue and global stack. 2728 drain_local_queue(false); 2729 drain_global_stack(false); 2730 2731 // Attempt at work stealing from other task's queues. 2732 if (do_stealing && !has_aborted()) { 2733 // We have not aborted. This means that we have finished all that 2734 // we could. Let's try to do some stealing... 2735 2736 // We cannot check whether the global stack is empty, since other 2737 // tasks might be pushing objects to it concurrently. 2738 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2739 "only way to reach here"); 2740 while (!has_aborted()) { 2741 G1TaskQueueEntry entry; 2742 if (_cm->try_stealing(_worker_id, entry)) { 2743 scan_task_entry(entry); 2744 2745 // And since we're towards the end, let's totally drain the 2746 // local queue and global stack. 2747 drain_local_queue(false); 2748 drain_global_stack(false); 2749 } else { 2750 break; 2751 } 2752 } 2753 } 2754 2755 // We still haven't aborted. Now, let's try to get into the 2756 // termination protocol. 2757 if (do_termination && !has_aborted()) { 2758 // We cannot check whether the global stack is empty, since other 2759 // tasks might be concurrently pushing objects on it. 2760 // Separated the asserts so that we know which one fires. 2761 assert(_cm->out_of_regions(), "only way to reach here"); 2762 assert(_task_queue->size() == 0, "only way to reach here"); 2763 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2764 2765 // The G1CMTask class also extends the TerminatorTerminator class, 2766 // hence its should_exit_termination() method will also decide 2767 // whether to exit the termination protocol or not. 2768 bool finished = (is_serial || 2769 _cm->terminator()->offer_termination(this)); 2770 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2771 _termination_time_ms += 2772 termination_end_time_ms - _termination_start_time_ms; 2773 2774 if (finished) { 2775 // We're all done. 2776 2777 // We can now guarantee that the global stack is empty, since 2778 // all other tasks have finished. We separated the guarantees so 2779 // that, if a condition is false, we can immediately find out 2780 // which one. 2781 guarantee(_cm->out_of_regions(), "only way to reach here"); 2782 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2783 guarantee(_task_queue->size() == 0, "only way to reach here"); 2784 guarantee(!_cm->has_overflown(), "only way to reach here"); 2785 } else { 2786 // Apparently there's more work to do. Let's abort this task. It 2787 // will restart it and we can hopefully find more things to do. 2788 set_has_aborted(); 2789 } 2790 } 2791 2792 // Mainly for debugging purposes to make sure that a pointer to the 2793 // closure which was statically allocated in this frame doesn't 2794 // escape it by accident. 2795 set_cm_oop_closure(NULL); 2796 double end_time_ms = os::elapsedVTime() * 1000.0; 2797 double elapsed_time_ms = end_time_ms - _start_time_ms; 2798 // Update the step history. 2799 _step_times_ms.add(elapsed_time_ms); 2800 2801 if (has_aborted()) { 2802 // The task was aborted for some reason. 2803 if (_has_timed_out) { 2804 double diff_ms = elapsed_time_ms - _time_target_ms; 2805 // Keep statistics of how well we did with respect to hitting 2806 // our target only if we actually timed out (if we aborted for 2807 // other reasons, then the results might get skewed). 2808 _marking_step_diffs_ms.add(diff_ms); 2809 } 2810 2811 if (_cm->has_overflown()) { 2812 // This is the interesting one. We aborted because a global 2813 // overflow was raised. This means we have to restart the 2814 // marking phase and start iterating over regions. However, in 2815 // order to do this we have to make sure that all tasks stop 2816 // what they are doing and re-initialize in a safe manner. We 2817 // will achieve this with the use of two barrier sync points. 2818 2819 if (!is_serial) { 2820 // We only need to enter the sync barrier if being called 2821 // from a parallel context 2822 _cm->enter_first_sync_barrier(_worker_id); 2823 2824 // When we exit this sync barrier we know that all tasks have 2825 // stopped doing marking work. So, it's now safe to 2826 // re-initialize our data structures. 2827 } 2828 2829 clear_region_fields(); 2830 flush_mark_stats_cache(); 2831 2832 if (!is_serial) { 2833 // If we're executing the concurrent phase of marking, reset the marking 2834 // state; otherwise the marking state is reset after reference processing, 2835 // during the remark pause. 2836 // If we reset here as a result of an overflow during the remark we will 2837 // see assertion failures from any subsequent set_concurrency_and_phase() 2838 // calls. 2839 if (_cm->concurrent() && _worker_id == 0) { 2840 // Worker 0 is responsible for clearing the global data structures because 2841 // of an overflow. During STW we should not clear the overflow flag (in 2842 // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit 2843 // method to abort the pause and restart concurrent marking. 2844 _cm->reset_marking_for_restart(); 2845 2846 log_info(gc, marking)("Concurrent Mark reset for overflow"); 2847 } 2848 2849 // ...and enter the second barrier. 2850 _cm->enter_second_sync_barrier(_worker_id); 2851 } 2852 // At this point, if we're during the concurrent phase of 2853 // marking, everything has been re-initialized and we're 2854 // ready to restart. 2855 } 2856 } 2857 } 2858 2859 G1CMTask::G1CMTask(uint worker_id, 2860 G1ConcurrentMark* cm, 2861 G1CMTaskQueue* task_queue, 2862 G1RegionMarkStats* mark_stats, 2863 uint max_regions) : 2864 _objArray_processor(this), 2865 _worker_id(worker_id), 2866 _g1h(G1CollectedHeap::heap()), 2867 _cm(cm), 2868 _next_mark_bitmap(NULL), 2869 _task_queue(task_queue), 2870 _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize), 2871 _calls(0), 2872 _time_target_ms(0.0), 2873 _start_time_ms(0.0), 2874 _cm_oop_closure(NULL), 2875 _curr_region(NULL), 2876 _finger(NULL), 2877 _region_limit(NULL), 2878 _words_scanned(0), 2879 _words_scanned_limit(0), 2880 _real_words_scanned_limit(0), 2881 _refs_reached(0), 2882 _refs_reached_limit(0), 2883 _real_refs_reached_limit(0), 2884 _has_aborted(false), 2885 _has_timed_out(false), 2886 _draining_satb_buffers(false), 2887 _step_times_ms(), 2888 _elapsed_time_ms(0.0), 2889 _termination_time_ms(0.0), 2890 _termination_start_time_ms(0.0), 2891 _marking_step_diffs_ms() 2892 { 2893 guarantee(task_queue != NULL, "invariant"); 2894 2895 _marking_step_diffs_ms.add(0.5); 2896 } 2897 2898 // These are formatting macros that are used below to ensure 2899 // consistent formatting. The *_H_* versions are used to format the 2900 // header for a particular value and they should be kept consistent 2901 // with the corresponding macro. Also note that most of the macros add 2902 // the necessary white space (as a prefix) which makes them a bit 2903 // easier to compose. 2904 2905 // All the output lines are prefixed with this string to be able to 2906 // identify them easily in a large log file. 2907 #define G1PPRL_LINE_PREFIX "###" 2908 2909 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2910 #ifdef _LP64 2911 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2912 #else // _LP64 2913 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2914 #endif // _LP64 2915 2916 // For per-region info 2917 #define G1PPRL_TYPE_FORMAT " %-4s" 2918 #define G1PPRL_TYPE_H_FORMAT " %4s" 2919 #define G1PPRL_STATE_FORMAT " %-5s" 2920 #define G1PPRL_STATE_H_FORMAT " %5s" 2921 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2922 #define G1PPRL_BYTE_H_FORMAT " %9s" 2923 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2924 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2925 2926 // For summary info 2927 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2928 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2929 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2930 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2931 2932 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) : 2933 _total_used_bytes(0), _total_capacity_bytes(0), 2934 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2935 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) 2936 { 2937 if (!log_is_enabled(Trace, gc, liveness)) { 2938 return; 2939 } 2940 2941 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2942 MemRegion g1_reserved = g1h->g1_reserved(); 2943 double now = os::elapsedTime(); 2944 2945 // Print the header of the output. 2946 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2947 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2948 G1PPRL_SUM_ADDR_FORMAT("reserved") 2949 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2950 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 2951 HeapRegion::GrainBytes); 2952 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2953 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2954 G1PPRL_TYPE_H_FORMAT 2955 G1PPRL_ADDR_BASE_H_FORMAT 2956 G1PPRL_BYTE_H_FORMAT 2957 G1PPRL_BYTE_H_FORMAT 2958 G1PPRL_BYTE_H_FORMAT 2959 G1PPRL_DOUBLE_H_FORMAT 2960 G1PPRL_BYTE_H_FORMAT 2961 G1PPRL_STATE_H_FORMAT 2962 G1PPRL_BYTE_H_FORMAT, 2963 "type", "address-range", 2964 "used", "prev-live", "next-live", "gc-eff", 2965 "remset", "state", "code-roots"); 2966 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2967 G1PPRL_TYPE_H_FORMAT 2968 G1PPRL_ADDR_BASE_H_FORMAT 2969 G1PPRL_BYTE_H_FORMAT 2970 G1PPRL_BYTE_H_FORMAT 2971 G1PPRL_BYTE_H_FORMAT 2972 G1PPRL_DOUBLE_H_FORMAT 2973 G1PPRL_BYTE_H_FORMAT 2974 G1PPRL_STATE_H_FORMAT 2975 G1PPRL_BYTE_H_FORMAT, 2976 "", "", 2977 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 2978 "(bytes)", "", "(bytes)"); 2979 } 2980 2981 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) { 2982 if (!log_is_enabled(Trace, gc, liveness)) { 2983 return false; 2984 } 2985 2986 const char* type = r->get_type_str(); 2987 HeapWord* bottom = r->bottom(); 2988 HeapWord* end = r->end(); 2989 size_t capacity_bytes = r->capacity(); 2990 size_t used_bytes = r->used(); 2991 size_t prev_live_bytes = r->live_bytes(); 2992 size_t next_live_bytes = r->next_live_bytes(); 2993 double gc_eff = r->gc_efficiency(); 2994 size_t remset_bytes = r->rem_set()->mem_size(); 2995 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 2996 const char* remset_type = r->rem_set()->get_short_state_str(); 2997 2998 _total_used_bytes += used_bytes; 2999 _total_capacity_bytes += capacity_bytes; 3000 _total_prev_live_bytes += prev_live_bytes; 3001 _total_next_live_bytes += next_live_bytes; 3002 _total_remset_bytes += remset_bytes; 3003 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3004 3005 // Print a line for this particular region. 3006 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3007 G1PPRL_TYPE_FORMAT 3008 G1PPRL_ADDR_BASE_FORMAT 3009 G1PPRL_BYTE_FORMAT 3010 G1PPRL_BYTE_FORMAT 3011 G1PPRL_BYTE_FORMAT 3012 G1PPRL_DOUBLE_FORMAT 3013 G1PPRL_BYTE_FORMAT 3014 G1PPRL_STATE_FORMAT 3015 G1PPRL_BYTE_FORMAT, 3016 type, p2i(bottom), p2i(end), 3017 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3018 remset_bytes, remset_type, strong_code_roots_bytes); 3019 3020 return false; 3021 } 3022 3023 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3024 if (!log_is_enabled(Trace, gc, liveness)) { 3025 return; 3026 } 3027 3028 // add static memory usages to remembered set sizes 3029 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3030 // Print the footer of the output. 3031 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3032 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3033 " SUMMARY" 3034 G1PPRL_SUM_MB_FORMAT("capacity") 3035 G1PPRL_SUM_MB_PERC_FORMAT("used") 3036 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3037 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3038 G1PPRL_SUM_MB_FORMAT("remset") 3039 G1PPRL_SUM_MB_FORMAT("code-roots"), 3040 bytes_to_mb(_total_capacity_bytes), 3041 bytes_to_mb(_total_used_bytes), 3042 percent_of(_total_used_bytes, _total_capacity_bytes), 3043 bytes_to_mb(_total_prev_live_bytes), 3044 percent_of(_total_prev_live_bytes, _total_capacity_bytes), 3045 bytes_to_mb(_total_next_live_bytes), 3046 percent_of(_total_next_live_bytes, _total_capacity_bytes), 3047 bytes_to_mb(_total_remset_bytes), 3048 bytes_to_mb(_total_strong_code_roots_bytes)); 3049 }