1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc/g1/g1BarrierSet.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1CollectorState.hpp" 31 #include "gc/g1/g1ConcurrentMark.inline.hpp" 32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" 33 #include "gc/g1/g1HeapVerifier.hpp" 34 #include "gc/g1/g1OopClosures.inline.hpp" 35 #include "gc/g1/g1Policy.hpp" 36 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" 37 #include "gc/g1/g1StringDedup.hpp" 38 #include "gc/g1/g1ThreadLocalData.hpp" 39 #include "gc/g1/heapRegion.inline.hpp" 40 #include "gc/g1/heapRegionRemSet.hpp" 41 #include "gc/g1/heapRegionSet.inline.hpp" 42 #include "gc/shared/adaptiveSizePolicy.hpp" 43 #include "gc/shared/gcId.hpp" 44 #include "gc/shared/gcTimer.hpp" 45 #include "gc/shared/gcTrace.hpp" 46 #include "gc/shared/gcTraceTime.inline.hpp" 47 #include "gc/shared/genOopClosures.inline.hpp" 48 #include "gc/shared/referencePolicy.hpp" 49 #include "gc/shared/strongRootsScope.hpp" 50 #include "gc/shared/suspendibleThreadSet.hpp" 51 #include "gc/shared/taskqueue.inline.hpp" 52 #include "gc/shared/vmGCOperations.hpp" 53 #include "gc/shared/weakProcessor.inline.hpp" 54 #include "include/jvm.h" 55 #include "logging/log.hpp" 56 #include "memory/allocation.hpp" 57 #include "memory/resourceArea.hpp" 58 #include "oops/access.inline.hpp" 59 #include "oops/oop.inline.hpp" 60 #include "runtime/atomic.hpp" 61 #include "runtime/handles.inline.hpp" 62 #include "runtime/java.hpp" 63 #include "runtime/prefetch.inline.hpp" 64 #include "services/memTracker.hpp" 65 #include "utilities/align.hpp" 66 #include "utilities/growableArray.hpp" 67 68 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) { 69 assert(addr < _cm->finger(), "invariant"); 70 assert(addr >= _task->finger(), "invariant"); 71 72 // We move that task's local finger along. 73 _task->move_finger_to(addr); 74 75 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); 76 // we only partially drain the local queue and global stack 77 _task->drain_local_queue(true); 78 _task->drain_global_stack(true); 79 80 // if the has_aborted flag has been raised, we need to bail out of 81 // the iteration 82 return !_task->has_aborted(); 83 } 84 85 G1CMMarkStack::G1CMMarkStack() : 86 _max_chunk_capacity(0), 87 _base(NULL), 88 _chunk_capacity(0) { 89 set_empty(); 90 } 91 92 bool G1CMMarkStack::resize(size_t new_capacity) { 93 assert(is_empty(), "Only resize when stack is empty."); 94 assert(new_capacity <= _max_chunk_capacity, 95 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); 96 97 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC); 98 99 if (new_base == NULL) { 100 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); 101 return false; 102 } 103 // Release old mapping. 104 if (_base != NULL) { 105 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 106 } 107 108 _base = new_base; 109 _chunk_capacity = new_capacity; 110 set_empty(); 111 112 return true; 113 } 114 115 size_t G1CMMarkStack::capacity_alignment() { 116 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); 117 } 118 119 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { 120 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); 121 122 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); 123 124 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 125 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 126 127 guarantee(initial_chunk_capacity <= _max_chunk_capacity, 128 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, 129 _max_chunk_capacity, 130 initial_chunk_capacity); 131 132 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, 133 initial_chunk_capacity, _max_chunk_capacity); 134 135 return resize(initial_chunk_capacity); 136 } 137 138 void G1CMMarkStack::expand() { 139 if (_chunk_capacity == _max_chunk_capacity) { 140 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); 141 return; 142 } 143 size_t old_capacity = _chunk_capacity; 144 // Double capacity if possible 145 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); 146 147 if (resize(new_capacity)) { 148 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 149 old_capacity, new_capacity); 150 } else { 151 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 152 old_capacity, new_capacity); 153 } 154 } 155 156 G1CMMarkStack::~G1CMMarkStack() { 157 if (_base != NULL) { 158 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 159 } 160 } 161 162 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { 163 elem->next = *list; 164 *list = elem; 165 } 166 167 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { 168 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 169 add_chunk_to_list(&_chunk_list, elem); 170 _chunks_in_chunk_list++; 171 } 172 173 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { 174 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 175 add_chunk_to_list(&_free_list, elem); 176 } 177 178 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { 179 TaskQueueEntryChunk* result = *list; 180 if (result != NULL) { 181 *list = (*list)->next; 182 } 183 return result; 184 } 185 186 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { 187 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 188 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); 189 if (result != NULL) { 190 _chunks_in_chunk_list--; 191 } 192 return result; 193 } 194 195 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { 196 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 197 return remove_chunk_from_list(&_free_list); 198 } 199 200 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { 201 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. 202 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding 203 // wraparound of _hwm. 204 if (_hwm >= _chunk_capacity) { 205 return NULL; 206 } 207 208 size_t cur_idx = Atomic::add(1u, &_hwm) - 1; 209 if (cur_idx >= _chunk_capacity) { 210 return NULL; 211 } 212 213 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; 214 result->next = NULL; 215 return result; 216 } 217 218 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { 219 // Get a new chunk. 220 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); 221 222 if (new_chunk == NULL) { 223 // Did not get a chunk from the free list. Allocate from backing memory. 224 new_chunk = allocate_new_chunk(); 225 226 if (new_chunk == NULL) { 227 return false; 228 } 229 } 230 231 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 232 233 add_chunk_to_chunk_list(new_chunk); 234 235 return true; 236 } 237 238 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { 239 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); 240 241 if (cur == NULL) { 242 return false; 243 } 244 245 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 246 247 add_chunk_to_free_list(cur); 248 return true; 249 } 250 251 void G1CMMarkStack::set_empty() { 252 _chunks_in_chunk_list = 0; 253 _hwm = 0; 254 _chunk_list = NULL; 255 _free_list = NULL; 256 } 257 258 G1CMRootRegions::G1CMRootRegions() : 259 _root_regions(NULL), 260 _max_regions(0), 261 _cur_regions(0), 262 _scan_in_progress(false), 263 _should_abort(false), 264 _claimed_root_regions(0) { } 265 266 void G1CMRootRegions::reset(const uint max_regions) { 267 _root_regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_regions, mtGC); 268 _max_regions = max_regions; 269 _cur_regions = 0; 270 } 271 272 void G1CMRootRegions::add(HeapRegion* hr) { 273 assert_at_safepoint(); 274 size_t idx = Atomic::add((size_t)1, &_cur_regions) - 1; 275 assert(idx < _max_regions, "Trying to add more root regions than there is space %u", _max_regions); 276 _root_regions[idx] = hr; 277 } 278 279 void G1CMRootRegions::prepare_for_scan() { 280 assert(!scan_in_progress(), "pre-condition"); 281 282 _scan_in_progress = _cur_regions > 0; 283 284 _claimed_root_regions = 0; 285 _should_abort = false; 286 } 287 288 HeapRegion* G1CMRootRegions::claim_next() { 289 if (_should_abort) { 290 // If someone has set the should_abort flag, we return NULL to 291 // force the caller to bail out of their loop. 292 return NULL; 293 } 294 295 if (_claimed_root_regions >= _cur_regions) { 296 return NULL; 297 } 298 299 size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1; 300 if (claimed_index < _cur_regions) { 301 return _root_regions[claimed_index]; 302 } 303 return NULL; 304 } 305 306 uint G1CMRootRegions::num_root_regions() const { 307 return (uint)_cur_regions; 308 } 309 310 void G1CMRootRegions::notify_scan_done() { 311 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 312 _scan_in_progress = false; 313 RootRegionScan_lock->notify_all(); 314 } 315 316 void G1CMRootRegions::cancel_scan() { 317 notify_scan_done(); 318 } 319 320 void G1CMRootRegions::scan_finished() { 321 assert(scan_in_progress(), "pre-condition"); 322 323 if (!_should_abort) { 324 assert(_claimed_root_regions >= num_root_regions(), 325 "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u", 326 _claimed_root_regions, num_root_regions()); 327 } 328 329 FREE_C_HEAP_ARRAY(HeapRegion*, _root_regions); 330 _root_regions = NULL; 331 _max_regions = 0; 332 _cur_regions = 0; 333 334 notify_scan_done(); 335 } 336 337 bool G1CMRootRegions::wait_until_scan_finished() { 338 if (!scan_in_progress()) { 339 return false; 340 } 341 342 { 343 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 344 while (scan_in_progress()) { 345 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 346 } 347 } 348 return true; 349 } 350 351 // Returns the maximum number of workers to be used in a concurrent 352 // phase based on the number of GC workers being used in a STW 353 // phase. 354 static uint scale_concurrent_worker_threads(uint num_gc_workers) { 355 return MAX2((num_gc_workers + 2) / 4, 1U); 356 } 357 358 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, 359 G1RegionToSpaceMapper* prev_bitmap_storage, 360 G1RegionToSpaceMapper* next_bitmap_storage) : 361 // _cm_thread set inside the constructor 362 _g1h(g1h), 363 _completed_initialization(false), 364 365 _mark_bitmap_1(), 366 _mark_bitmap_2(), 367 _prev_mark_bitmap(&_mark_bitmap_1), 368 _next_mark_bitmap(&_mark_bitmap_2), 369 370 _heap(_g1h->reserved_region()), 371 372 _root_regions(), 373 374 _global_mark_stack(), 375 376 // _finger set in set_non_marking_state 377 378 _worker_id_offset(DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads), 379 _max_num_tasks(ParallelGCThreads), 380 // _num_active_tasks set in set_non_marking_state() 381 // _tasks set inside the constructor 382 383 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)), 384 _terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)), 385 386 _first_overflow_barrier_sync(), 387 _second_overflow_barrier_sync(), 388 389 _has_overflown(false), 390 _concurrent(false), 391 _has_aborted(false), 392 _restart_for_overflow(false), 393 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 394 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 395 396 // _verbose_level set below 397 398 _init_times(), 399 _remark_times(), 400 _remark_mark_times(), 401 _remark_weak_ref_times(), 402 _cleanup_times(), 403 _total_cleanup_time(0.0), 404 405 _accum_task_vtime(NULL), 406 407 _concurrent_workers(NULL), 408 _num_concurrent_workers(0), 409 _max_concurrent_workers(0), 410 411 _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)), 412 _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC)) 413 { 414 _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage); 415 _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage); 416 417 // Create & start ConcurrentMark thread. 418 _cm_thread = new G1ConcurrentMarkThread(this); 419 if (_cm_thread->osthread() == NULL) { 420 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 421 } 422 423 assert(CGC_lock != NULL, "CGC_lock must be initialized"); 424 425 if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) { 426 // Calculate the number of concurrent worker threads by scaling 427 // the number of parallel GC threads. 428 uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads); 429 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 430 } 431 432 assert(ConcGCThreads > 0, "ConcGCThreads have been set."); 433 if (ConcGCThreads > ParallelGCThreads) { 434 log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).", 435 ConcGCThreads, ParallelGCThreads); 436 return; 437 } 438 439 log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset); 440 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 441 442 _num_concurrent_workers = ConcGCThreads; 443 _max_concurrent_workers = _num_concurrent_workers; 444 445 _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true); 446 _concurrent_workers->initialize_workers(); 447 448 if (FLAG_IS_DEFAULT(MarkStackSize)) { 449 size_t mark_stack_size = 450 MIN2(MarkStackSizeMax, 451 MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE))); 452 // Verify that the calculated value for MarkStackSize is in range. 453 // It would be nice to use the private utility routine from Arguments. 454 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 455 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 456 "must be between 1 and " SIZE_FORMAT, 457 mark_stack_size, MarkStackSizeMax); 458 return; 459 } 460 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 461 } else { 462 // Verify MarkStackSize is in range. 463 if (FLAG_IS_CMDLINE(MarkStackSize)) { 464 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 465 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 466 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 467 "must be between 1 and " SIZE_FORMAT, 468 MarkStackSize, MarkStackSizeMax); 469 return; 470 } 471 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 472 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 473 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 474 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 475 MarkStackSize, MarkStackSizeMax); 476 return; 477 } 478 } 479 } 480 } 481 482 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { 483 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 484 } 485 486 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC); 487 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC); 488 489 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 490 _num_active_tasks = _max_num_tasks; 491 492 for (uint i = 0; i < _max_num_tasks; ++i) { 493 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 494 task_queue->initialize(); 495 _task_queues->register_queue(i, task_queue); 496 497 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions()); 498 499 _accum_task_vtime[i] = 0.0; 500 } 501 502 reset_at_marking_complete(); 503 _completed_initialization = true; 504 } 505 506 void G1ConcurrentMark::reset() { 507 _has_aborted = false; 508 509 reset_marking_for_restart(); 510 511 // Reset all tasks, since different phases will use different number of active 512 // threads. So, it's easiest to have all of them ready. 513 for (uint i = 0; i < _max_num_tasks; ++i) { 514 _tasks[i]->reset(_next_mark_bitmap); 515 } 516 517 uint max_regions = _g1h->max_regions(); 518 for (uint i = 0; i < max_regions; i++) { 519 _top_at_rebuild_starts[i] = NULL; 520 _region_mark_stats[i].clear(); 521 } 522 } 523 524 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) { 525 for (uint j = 0; j < _max_num_tasks; ++j) { 526 _tasks[j]->clear_mark_stats_cache(region_idx); 527 } 528 _top_at_rebuild_starts[region_idx] = NULL; 529 _region_mark_stats[region_idx].clear(); 530 } 531 532 void G1ConcurrentMark::clear_statistics(HeapRegion* r) { 533 uint const region_idx = r->hrm_index(); 534 if (r->is_humongous()) { 535 assert(r->is_starts_humongous(), "Got humongous continues region here"); 536 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size()); 537 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) { 538 clear_statistics_in_region(j); 539 } 540 } else { 541 clear_statistics_in_region(region_idx); 542 } 543 } 544 545 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) { 546 if (bitmap->is_marked(addr)) { 547 bitmap->clear(addr); 548 } 549 } 550 551 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) { 552 assert_at_safepoint_on_vm_thread(); 553 554 // Need to clear all mark bits of the humongous object. 555 clear_mark_if_set(_prev_mark_bitmap, r->bottom()); 556 clear_mark_if_set(_next_mark_bitmap, r->bottom()); 557 558 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 559 return; 560 } 561 562 // Clear any statistics about the region gathered so far. 563 clear_statistics(r); 564 } 565 566 void G1ConcurrentMark::reset_marking_for_restart() { 567 _global_mark_stack.set_empty(); 568 569 // Expand the marking stack, if we have to and if we can. 570 if (has_overflown()) { 571 _global_mark_stack.expand(); 572 573 uint max_regions = _g1h->max_regions(); 574 for (uint i = 0; i < max_regions; i++) { 575 _region_mark_stats[i].clear_during_overflow(); 576 } 577 } 578 579 clear_has_overflown(); 580 _finger = _heap.start(); 581 582 for (uint i = 0; i < _max_num_tasks; ++i) { 583 G1CMTaskQueue* queue = _task_queues->queue(i); 584 queue->set_empty(); 585 } 586 } 587 588 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 589 assert(active_tasks <= _max_num_tasks, "we should not have more"); 590 591 _num_active_tasks = active_tasks; 592 // Need to update the three data structures below according to the 593 // number of active threads for this phase. 594 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 595 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 596 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 597 } 598 599 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 600 set_concurrency(active_tasks); 601 602 _concurrent = concurrent; 603 604 if (!concurrent) { 605 // At this point we should be in a STW phase, and completed marking. 606 assert_at_safepoint_on_vm_thread(); 607 assert(out_of_regions(), 608 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 609 p2i(_finger), p2i(_heap.end())); 610 } 611 } 612 613 void G1ConcurrentMark::reset_at_marking_complete() { 614 // We set the global marking state to some default values when we're 615 // not doing marking. 616 reset_marking_for_restart(); 617 _num_active_tasks = 0; 618 } 619 620 G1ConcurrentMark::~G1ConcurrentMark() { 621 FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts); 622 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats); 623 // The G1ConcurrentMark instance is never freed. 624 ShouldNotReachHere(); 625 } 626 627 class G1ClearBitMapTask : public AbstractGangTask { 628 public: 629 static size_t chunk_size() { return M; } 630 631 private: 632 // Heap region closure used for clearing the given mark bitmap. 633 class G1ClearBitmapHRClosure : public HeapRegionClosure { 634 private: 635 G1CMBitMap* _bitmap; 636 G1ConcurrentMark* _cm; 637 public: 638 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) { 639 } 640 641 virtual bool do_heap_region(HeapRegion* r) { 642 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 643 644 HeapWord* cur = r->bottom(); 645 HeapWord* const end = r->end(); 646 647 while (cur < end) { 648 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 649 _bitmap->clear_range(mr); 650 651 cur += chunk_size_in_words; 652 653 // Abort iteration if after yielding the marking has been aborted. 654 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 655 return true; 656 } 657 // Repeat the asserts from before the start of the closure. We will do them 658 // as asserts here to minimize their overhead on the product. However, we 659 // will have them as guarantees at the beginning / end of the bitmap 660 // clearing to get some checking in the product. 661 assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant"); 662 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 663 } 664 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 665 666 return false; 667 } 668 }; 669 670 G1ClearBitmapHRClosure _cl; 671 HeapRegionClaimer _hr_claimer; 672 bool _suspendible; // If the task is suspendible, workers must join the STS. 673 674 public: 675 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 676 AbstractGangTask("G1 Clear Bitmap"), 677 _cl(bitmap, suspendible ? cm : NULL), 678 _hr_claimer(n_workers), 679 _suspendible(suspendible) 680 { } 681 682 void work(uint worker_id) { 683 SuspendibleThreadSetJoiner sts_join(_suspendible); 684 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id); 685 } 686 687 bool is_complete() { 688 return _cl.is_complete(); 689 } 690 }; 691 692 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 693 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 694 695 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 696 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 697 698 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 699 700 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 701 702 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 703 workers->run_task(&cl, num_workers); 704 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 705 } 706 707 void G1ConcurrentMark::cleanup_for_next_mark() { 708 // Make sure that the concurrent mark thread looks to still be in 709 // the current cycle. 710 guarantee(cm_thread()->during_cycle(), "invariant"); 711 712 // We are finishing up the current cycle by clearing the next 713 // marking bitmap and getting it ready for the next cycle. During 714 // this time no other cycle can start. So, let's make sure that this 715 // is the case. 716 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 717 718 clear_bitmap(_next_mark_bitmap, _concurrent_workers, true); 719 720 // Repeat the asserts from above. 721 guarantee(cm_thread()->during_cycle(), "invariant"); 722 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 723 } 724 725 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 726 assert_at_safepoint_on_vm_thread(); 727 clear_bitmap(_prev_mark_bitmap, workers, false); 728 } 729 730 class NoteStartOfMarkHRClosure : public HeapRegionClosure { 731 public: 732 bool do_heap_region(HeapRegion* r) { 733 r->note_start_of_marking(); 734 return false; 735 } 736 }; 737 738 void G1ConcurrentMark::pre_initial_mark() { 739 // Initialize marking structures. This has to be done in a STW phase. 740 reset(); 741 742 // For each region note start of marking. 743 NoteStartOfMarkHRClosure startcl; 744 _g1h->heap_region_iterate(&startcl); 745 746 _root_regions.reset(_g1h->max_regions()); 747 } 748 749 750 void G1ConcurrentMark::post_initial_mark() { 751 // Start Concurrent Marking weak-reference discovery. 752 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 753 // enable ("weak") refs discovery 754 rp->enable_discovery(); 755 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 756 757 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 758 // This is the start of the marking cycle, we're expected all 759 // threads to have SATB queues with active set to false. 760 satb_mq_set.set_active_all_threads(true, /* new active value */ 761 false /* expected_active */); 762 763 _root_regions.prepare_for_scan(); 764 765 // update_g1_committed() will be called at the end of an evac pause 766 // when marking is on. So, it's also called at the end of the 767 // initial-mark pause to update the heap end, if the heap expands 768 // during it. No need to call it here. 769 } 770 771 /* 772 * Notice that in the next two methods, we actually leave the STS 773 * during the barrier sync and join it immediately afterwards. If we 774 * do not do this, the following deadlock can occur: one thread could 775 * be in the barrier sync code, waiting for the other thread to also 776 * sync up, whereas another one could be trying to yield, while also 777 * waiting for the other threads to sync up too. 778 * 779 * Note, however, that this code is also used during remark and in 780 * this case we should not attempt to leave / enter the STS, otherwise 781 * we'll either hit an assert (debug / fastdebug) or deadlock 782 * (product). So we should only leave / enter the STS if we are 783 * operating concurrently. 784 * 785 * Because the thread that does the sync barrier has left the STS, it 786 * is possible to be suspended for a Full GC or an evacuation pause 787 * could occur. This is actually safe, since the entering the sync 788 * barrier is one of the last things do_marking_step() does, and it 789 * doesn't manipulate any data structures afterwards. 790 */ 791 792 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 793 bool barrier_aborted; 794 { 795 SuspendibleThreadSetLeaver sts_leave(concurrent()); 796 barrier_aborted = !_first_overflow_barrier_sync.enter(); 797 } 798 799 // at this point everyone should have synced up and not be doing any 800 // more work 801 802 if (barrier_aborted) { 803 // If the barrier aborted we ignore the overflow condition and 804 // just abort the whole marking phase as quickly as possible. 805 return; 806 } 807 } 808 809 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 810 SuspendibleThreadSetLeaver sts_leave(concurrent()); 811 _second_overflow_barrier_sync.enter(); 812 813 // at this point everything should be re-initialized and ready to go 814 } 815 816 class G1CMConcurrentMarkingTask : public AbstractGangTask { 817 G1ConcurrentMark* _cm; 818 819 public: 820 void work(uint worker_id) { 821 assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread"); 822 ResourceMark rm; 823 824 double start_vtime = os::elapsedVTime(); 825 826 { 827 SuspendibleThreadSetJoiner sts_join; 828 829 assert(worker_id < _cm->active_tasks(), "invariant"); 830 831 G1CMTask* task = _cm->task(worker_id); 832 task->record_start_time(); 833 if (!_cm->has_aborted()) { 834 do { 835 task->do_marking_step(G1ConcMarkStepDurationMillis, 836 true /* do_termination */, 837 false /* is_serial*/); 838 839 _cm->do_yield_check(); 840 } while (!_cm->has_aborted() && task->has_aborted()); 841 } 842 task->record_end_time(); 843 guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant"); 844 } 845 846 double end_vtime = os::elapsedVTime(); 847 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 848 } 849 850 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) : 851 AbstractGangTask("Concurrent Mark"), _cm(cm) { } 852 853 ~G1CMConcurrentMarkingTask() { } 854 }; 855 856 uint G1ConcurrentMark::calc_active_marking_workers() { 857 uint result = 0; 858 if (!UseDynamicNumberOfGCThreads || 859 (!FLAG_IS_DEFAULT(ConcGCThreads) && 860 !ForceDynamicNumberOfGCThreads)) { 861 result = _max_concurrent_workers; 862 } else { 863 result = 864 AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers, 865 1, /* Minimum workers */ 866 _num_concurrent_workers, 867 Threads::number_of_non_daemon_threads()); 868 // Don't scale the result down by scale_concurrent_workers() because 869 // that scaling has already gone into "_max_concurrent_workers". 870 } 871 assert(result > 0 && result <= _max_concurrent_workers, 872 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u", 873 _max_concurrent_workers, result); 874 return result; 875 } 876 877 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) { 878 assert(hr->is_old() || (hr->is_survivor() && hr->next_top_at_mark_start() == hr->bottom()), 879 "Root regions must be old or survivor but region %u is %s", hr->hrm_index(), hr->get_type_str()); 880 G1RootRegionScanClosure cl(_g1h, this, worker_id); 881 882 const uintx interval = PrefetchScanIntervalInBytes; 883 HeapWord* curr = hr->next_top_at_mark_start(); 884 const HeapWord* end = hr->top(); 885 while (curr < end) { 886 Prefetch::read(curr, interval); 887 oop obj = oop(curr); 888 int size = obj->oop_iterate_size(&cl); 889 assert(size == obj->size(), "sanity"); 890 curr += size; 891 } 892 } 893 894 class G1CMRootRegionScanTask : public AbstractGangTask { 895 G1ConcurrentMark* _cm; 896 public: 897 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 898 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 899 900 void work(uint worker_id) { 901 assert(Thread::current()->is_ConcurrentGC_thread(), 902 "this should only be done by a conc GC thread"); 903 904 G1CMRootRegions* root_regions = _cm->root_regions(); 905 HeapRegion* hr = root_regions->claim_next(); 906 while (hr != NULL) { 907 _cm->scan_root_region(hr, worker_id); 908 hr = root_regions->claim_next(); 909 } 910 } 911 }; 912 913 void G1ConcurrentMark::scan_root_regions() { 914 // scan_in_progress() will have been set to true only if there was 915 // at least one root region to scan. So, if it's false, we 916 // should not attempt to do any further work. 917 if (root_regions()->scan_in_progress()) { 918 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 919 920 _num_concurrent_workers = MIN2(calc_active_marking_workers(), 921 // We distribute work on a per-region basis, so starting 922 // more threads than that is useless. 923 root_regions()->num_root_regions()); 924 assert(_num_concurrent_workers <= _max_concurrent_workers, 925 "Maximum number of marking threads exceeded"); 926 927 G1CMRootRegionScanTask task(this); 928 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 929 task.name(), _num_concurrent_workers, root_regions()->num_root_regions()); 930 _concurrent_workers->run_task(&task, _num_concurrent_workers); 931 932 // It's possible that has_aborted() is true here without actually 933 // aborting the survivor scan earlier. This is OK as it's 934 // mainly used for sanity checking. 935 root_regions()->scan_finished(); 936 } 937 } 938 939 void G1ConcurrentMark::concurrent_cycle_start() { 940 _gc_timer_cm->register_gc_start(); 941 942 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 943 944 _g1h->trace_heap_before_gc(_gc_tracer_cm); 945 } 946 947 void G1ConcurrentMark::concurrent_cycle_end() { 948 _g1h->collector_state()->set_clearing_next_bitmap(false); 949 950 _g1h->trace_heap_after_gc(_gc_tracer_cm); 951 952 if (has_aborted()) { 953 log_info(gc, marking)("Concurrent Mark Abort"); 954 _gc_tracer_cm->report_concurrent_mode_failure(); 955 } 956 957 _gc_timer_cm->register_gc_end(); 958 959 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 960 } 961 962 void G1ConcurrentMark::mark_from_roots() { 963 _restart_for_overflow = false; 964 965 _num_concurrent_workers = calc_active_marking_workers(); 966 967 uint active_workers = MAX2(1U, _num_concurrent_workers); 968 969 // Setting active workers is not guaranteed since fewer 970 // worker threads may currently exist and more may not be 971 // available. 972 active_workers = _concurrent_workers->update_active_workers(active_workers); 973 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers()); 974 975 // Parallel task terminator is set in "set_concurrency_and_phase()" 976 set_concurrency_and_phase(active_workers, true /* concurrent */); 977 978 G1CMConcurrentMarkingTask marking_task(this); 979 _concurrent_workers->run_task(&marking_task); 980 print_stats(); 981 } 982 983 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) { 984 G1HeapVerifier* verifier = _g1h->verifier(); 985 986 verifier->verify_region_sets_optional(); 987 988 if (VerifyDuringGC) { 989 GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm); 990 991 size_t const BufLen = 512; 992 char buffer[BufLen]; 993 994 jio_snprintf(buffer, BufLen, "During GC (%s)", caller); 995 verifier->verify(type, vo, buffer); 996 } 997 998 verifier->check_bitmaps(caller); 999 } 1000 1001 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask { 1002 G1CollectedHeap* _g1h; 1003 G1ConcurrentMark* _cm; 1004 HeapRegionClaimer _hrclaimer; 1005 uint volatile _total_selected_for_rebuild; 1006 1007 G1PrintRegionLivenessInfoClosure _cl; 1008 1009 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure { 1010 G1CollectedHeap* _g1h; 1011 G1ConcurrentMark* _cm; 1012 1013 G1PrintRegionLivenessInfoClosure* _cl; 1014 1015 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild. 1016 1017 void update_remset_before_rebuild(HeapRegion* hr) { 1018 G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker(); 1019 1020 bool selected_for_rebuild; 1021 if (hr->is_humongous()) { 1022 bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0; 1023 selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live); 1024 } else { 1025 size_t const live_bytes = _cm->liveness(hr->hrm_index()); 1026 selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes); 1027 } 1028 if (selected_for_rebuild) { 1029 _num_regions_selected_for_rebuild++; 1030 } 1031 _cm->update_top_at_rebuild_start(hr); 1032 } 1033 1034 // Distribute the given words across the humongous object starting with hr and 1035 // note end of marking. 1036 void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) { 1037 uint const region_idx = hr->hrm_index(); 1038 size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size(); 1039 uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words); 1040 1041 // "Distributing" zero words means that we only note end of marking for these 1042 // regions. 1043 assert(marked_words == 0 || obj_size_in_words == marked_words, 1044 "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT, 1045 obj_size_in_words, marked_words); 1046 1047 for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) { 1048 HeapRegion* const r = _g1h->region_at(i); 1049 size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words); 1050 1051 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)", 1052 words_to_add, i, r->get_type_str()); 1053 add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize); 1054 marked_words -= words_to_add; 1055 } 1056 assert(marked_words == 0, 1057 SIZE_FORMAT " words left after distributing space across %u regions", 1058 marked_words, num_regions_in_humongous); 1059 } 1060 1061 void update_marked_bytes(HeapRegion* hr) { 1062 uint const region_idx = hr->hrm_index(); 1063 size_t const marked_words = _cm->liveness(region_idx); 1064 // The marking attributes the object's size completely to the humongous starts 1065 // region. We need to distribute this value across the entire set of regions a 1066 // humongous object spans. 1067 if (hr->is_humongous()) { 1068 assert(hr->is_starts_humongous() || marked_words == 0, 1069 "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)", 1070 marked_words, region_idx, hr->get_type_str()); 1071 if (hr->is_starts_humongous()) { 1072 distribute_marked_bytes(hr, marked_words); 1073 } 1074 } else { 1075 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str()); 1076 add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize); 1077 } 1078 } 1079 1080 void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) { 1081 hr->add_to_marked_bytes(marked_bytes); 1082 _cl->do_heap_region(hr); 1083 hr->note_end_of_marking(); 1084 } 1085 1086 public: 1087 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) : 1088 _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { } 1089 1090 virtual bool do_heap_region(HeapRegion* r) { 1091 update_remset_before_rebuild(r); 1092 update_marked_bytes(r); 1093 1094 return false; 1095 } 1096 1097 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; } 1098 }; 1099 1100 public: 1101 G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) : 1102 AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"), 1103 _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { } 1104 1105 virtual void work(uint worker_id) { 1106 G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl); 1107 _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id); 1108 Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild); 1109 } 1110 1111 uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; } 1112 1113 // Number of regions for which roughly one thread should be spawned for this work. 1114 static const uint RegionsPerThread = 384; 1115 }; 1116 1117 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure { 1118 G1CollectedHeap* _g1h; 1119 public: 1120 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { } 1121 1122 virtual bool do_heap_region(HeapRegion* r) { 1123 _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r); 1124 return false; 1125 } 1126 }; 1127 1128 void G1ConcurrentMark::remark() { 1129 assert_at_safepoint_on_vm_thread(); 1130 1131 // If a full collection has happened, we should not continue. However we might 1132 // have ended up here as the Remark VM operation has been scheduled already. 1133 if (has_aborted()) { 1134 return; 1135 } 1136 1137 G1Policy* g1p = _g1h->g1_policy(); 1138 g1p->record_concurrent_mark_remark_start(); 1139 1140 double start = os::elapsedTime(); 1141 1142 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before"); 1143 1144 { 1145 GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm); 1146 finalize_marking(); 1147 } 1148 1149 double mark_work_end = os::elapsedTime(); 1150 1151 bool const mark_finished = !has_overflown(); 1152 if (mark_finished) { 1153 weak_refs_work(false /* clear_all_soft_refs */); 1154 1155 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1156 // We're done with marking. 1157 // This is the end of the marking cycle, we're expected all 1158 // threads to have SATB queues with active set to true. 1159 satb_mq_set.set_active_all_threads(false, /* new active value */ 1160 true /* expected_active */); 1161 1162 { 1163 GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm); 1164 flush_all_task_caches(); 1165 } 1166 1167 // Install newly created mark bitmap as "prev". 1168 swap_mark_bitmaps(); 1169 { 1170 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm); 1171 1172 uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) / 1173 G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread; 1174 uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity); 1175 1176 G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers); 1177 log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions()); 1178 _g1h->workers()->run_task(&cl, num_workers); 1179 1180 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u", 1181 _g1h->num_regions(), cl.total_selected_for_rebuild()); 1182 } 1183 { 1184 GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm); 1185 reclaim_empty_regions(); 1186 } 1187 1188 // Clean out dead classes 1189 if (ClassUnloadingWithConcurrentMark) { 1190 GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm); 1191 ClassLoaderDataGraph::purge(); 1192 } 1193 1194 _g1h->resize_heap_if_necessary(); 1195 1196 compute_new_sizes(); 1197 1198 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after"); 1199 1200 assert(!restart_for_overflow(), "sanity"); 1201 // Completely reset the marking state since marking completed 1202 reset_at_marking_complete(); 1203 } else { 1204 // We overflowed. Restart concurrent marking. 1205 _restart_for_overflow = true; 1206 1207 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow"); 1208 1209 // Clear the marking state because we will be restarting 1210 // marking due to overflowing the global mark stack. 1211 reset_marking_for_restart(); 1212 } 1213 1214 { 1215 GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm); 1216 report_object_count(mark_finished); 1217 } 1218 1219 // Statistics 1220 double now = os::elapsedTime(); 1221 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1222 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1223 _remark_times.add((now - start) * 1000.0); 1224 1225 g1p->record_concurrent_mark_remark_end(); 1226 } 1227 1228 class G1ReclaimEmptyRegionsTask : public AbstractGangTask { 1229 // Per-region work during the Cleanup pause. 1230 class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure { 1231 G1CollectedHeap* _g1h; 1232 size_t _freed_bytes; 1233 FreeRegionList* _local_cleanup_list; 1234 uint _old_regions_removed; 1235 uint _humongous_regions_removed; 1236 1237 public: 1238 G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h, 1239 FreeRegionList* local_cleanup_list) : 1240 _g1h(g1h), 1241 _freed_bytes(0), 1242 _local_cleanup_list(local_cleanup_list), 1243 _old_regions_removed(0), 1244 _humongous_regions_removed(0) { } 1245 1246 size_t freed_bytes() { return _freed_bytes; } 1247 const uint old_regions_removed() { return _old_regions_removed; } 1248 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1249 1250 bool do_heap_region(HeapRegion *hr) { 1251 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) { 1252 _freed_bytes += hr->used(); 1253 hr->set_containing_set(NULL); 1254 if (hr->is_humongous()) { 1255 _humongous_regions_removed++; 1256 _g1h->free_humongous_region(hr, _local_cleanup_list); 1257 } else { 1258 _old_regions_removed++; 1259 _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */); 1260 } 1261 hr->clear_cardtable(); 1262 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index()); 1263 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom())); 1264 } 1265 1266 return false; 1267 } 1268 }; 1269 1270 G1CollectedHeap* _g1h; 1271 FreeRegionList* _cleanup_list; 1272 HeapRegionClaimer _hrclaimer; 1273 1274 public: 1275 G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1276 AbstractGangTask("G1 Cleanup"), 1277 _g1h(g1h), 1278 _cleanup_list(cleanup_list), 1279 _hrclaimer(n_workers) { 1280 } 1281 1282 void work(uint worker_id) { 1283 FreeRegionList local_cleanup_list("Local Cleanup List"); 1284 G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list); 1285 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id); 1286 assert(cl.is_complete(), "Shouldn't have aborted!"); 1287 1288 // Now update the old/humongous region sets 1289 _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed()); 1290 { 1291 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1292 _g1h->decrement_summary_bytes(cl.freed_bytes()); 1293 1294 _cleanup_list->add_ordered(&local_cleanup_list); 1295 assert(local_cleanup_list.is_empty(), "post-condition"); 1296 } 1297 } 1298 }; 1299 1300 void G1ConcurrentMark::reclaim_empty_regions() { 1301 WorkGang* workers = _g1h->workers(); 1302 FreeRegionList empty_regions_list("Empty Regions After Mark List"); 1303 1304 G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers()); 1305 workers->run_task(&cl); 1306 1307 if (!empty_regions_list.is_empty()) { 1308 log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length()); 1309 // Now print the empty regions list. 1310 G1HRPrinter* hrp = _g1h->hr_printer(); 1311 if (hrp->is_active()) { 1312 FreeRegionListIterator iter(&empty_regions_list); 1313 while (iter.more_available()) { 1314 HeapRegion* hr = iter.get_next(); 1315 hrp->cleanup(hr); 1316 } 1317 } 1318 // And actually make them available. 1319 _g1h->prepend_to_freelist(&empty_regions_list); 1320 } 1321 } 1322 1323 void G1ConcurrentMark::compute_new_sizes() { 1324 MetaspaceGC::compute_new_size(); 1325 1326 // Cleanup will have freed any regions completely full of garbage. 1327 // Update the soft reference policy with the new heap occupancy. 1328 Universe::update_heap_info_at_gc(); 1329 1330 // We reclaimed old regions so we should calculate the sizes to make 1331 // sure we update the old gen/space data. 1332 _g1h->g1mm()->update_sizes(); 1333 } 1334 1335 void G1ConcurrentMark::cleanup() { 1336 assert_at_safepoint_on_vm_thread(); 1337 1338 // If a full collection has happened, we shouldn't do this. 1339 if (has_aborted()) { 1340 return; 1341 } 1342 1343 G1Policy* g1p = _g1h->g1_policy(); 1344 g1p->record_concurrent_mark_cleanup_start(); 1345 1346 double start = os::elapsedTime(); 1347 1348 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before"); 1349 1350 { 1351 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm); 1352 G1UpdateRemSetTrackingAfterRebuild cl(_g1h); 1353 _g1h->heap_region_iterate(&cl); 1354 } 1355 1356 if (log_is_enabled(Trace, gc, liveness)) { 1357 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup"); 1358 _g1h->heap_region_iterate(&cl); 1359 } 1360 1361 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after"); 1362 1363 // We need to make this be a "collection" so any collection pause that 1364 // races with it goes around and waits for Cleanup to finish. 1365 _g1h->increment_total_collections(); 1366 1367 // Local statistics 1368 double recent_cleanup_time = (os::elapsedTime() - start); 1369 _total_cleanup_time += recent_cleanup_time; 1370 _cleanup_times.add(recent_cleanup_time); 1371 1372 { 1373 GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm); 1374 _g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1375 } 1376 } 1377 1378 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1379 // Uses the G1CMTask associated with a worker thread (for serial reference 1380 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1381 // trace referent objects. 1382 // 1383 // Using the G1CMTask and embedded local queues avoids having the worker 1384 // threads operating on the global mark stack. This reduces the risk 1385 // of overflowing the stack - which we would rather avoid at this late 1386 // state. Also using the tasks' local queues removes the potential 1387 // of the workers interfering with each other that could occur if 1388 // operating on the global stack. 1389 1390 class G1CMKeepAliveAndDrainClosure : public OopClosure { 1391 G1ConcurrentMark* _cm; 1392 G1CMTask* _task; 1393 uint _ref_counter_limit; 1394 uint _ref_counter; 1395 bool _is_serial; 1396 public: 1397 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1398 _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval), 1399 _ref_counter(_ref_counter_limit), _is_serial(is_serial) { 1400 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1401 } 1402 1403 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1404 virtual void do_oop( oop* p) { do_oop_work(p); } 1405 1406 template <class T> void do_oop_work(T* p) { 1407 if (_cm->has_overflown()) { 1408 return; 1409 } 1410 if (!_task->deal_with_reference(p)) { 1411 // We did not add anything to the mark bitmap (or mark stack), so there is 1412 // no point trying to drain it. 1413 return; 1414 } 1415 _ref_counter--; 1416 1417 if (_ref_counter == 0) { 1418 // We have dealt with _ref_counter_limit references, pushing them 1419 // and objects reachable from them on to the local stack (and 1420 // possibly the global stack). Call G1CMTask::do_marking_step() to 1421 // process these entries. 1422 // 1423 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1424 // there's nothing more to do (i.e. we're done with the entries that 1425 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1426 // above) or we overflow. 1427 // 1428 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1429 // flag while there may still be some work to do. (See the comment at 1430 // the beginning of G1CMTask::do_marking_step() for those conditions - 1431 // one of which is reaching the specified time target.) It is only 1432 // when G1CMTask::do_marking_step() returns without setting the 1433 // has_aborted() flag that the marking step has completed. 1434 do { 1435 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1436 _task->do_marking_step(mark_step_duration_ms, 1437 false /* do_termination */, 1438 _is_serial); 1439 } while (_task->has_aborted() && !_cm->has_overflown()); 1440 _ref_counter = _ref_counter_limit; 1441 } 1442 } 1443 }; 1444 1445 // 'Drain' oop closure used by both serial and parallel reference processing. 1446 // Uses the G1CMTask associated with a given worker thread (for serial 1447 // reference processing the G1CMtask for worker 0 is used). Calls the 1448 // do_marking_step routine, with an unbelievably large timeout value, 1449 // to drain the marking data structures of the remaining entries 1450 // added by the 'keep alive' oop closure above. 1451 1452 class G1CMDrainMarkingStackClosure : public VoidClosure { 1453 G1ConcurrentMark* _cm; 1454 G1CMTask* _task; 1455 bool _is_serial; 1456 public: 1457 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1458 _cm(cm), _task(task), _is_serial(is_serial) { 1459 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1460 } 1461 1462 void do_void() { 1463 do { 1464 // We call G1CMTask::do_marking_step() to completely drain the local 1465 // and global marking stacks of entries pushed by the 'keep alive' 1466 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1467 // 1468 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1469 // if there's nothing more to do (i.e. we've completely drained the 1470 // entries that were pushed as a a result of applying the 'keep alive' 1471 // closure to the entries on the discovered ref lists) or we overflow 1472 // the global marking stack. 1473 // 1474 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1475 // flag while there may still be some work to do. (See the comment at 1476 // the beginning of G1CMTask::do_marking_step() for those conditions - 1477 // one of which is reaching the specified time target.) It is only 1478 // when G1CMTask::do_marking_step() returns without setting the 1479 // has_aborted() flag that the marking step has completed. 1480 1481 _task->do_marking_step(1000000000.0 /* something very large */, 1482 true /* do_termination */, 1483 _is_serial); 1484 } while (_task->has_aborted() && !_cm->has_overflown()); 1485 } 1486 }; 1487 1488 // Implementation of AbstractRefProcTaskExecutor for parallel 1489 // reference processing at the end of G1 concurrent marking 1490 1491 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 1492 private: 1493 G1CollectedHeap* _g1h; 1494 G1ConcurrentMark* _cm; 1495 WorkGang* _workers; 1496 uint _active_workers; 1497 1498 public: 1499 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1500 G1ConcurrentMark* cm, 1501 WorkGang* workers, 1502 uint n_workers) : 1503 _g1h(g1h), _cm(cm), 1504 _workers(workers), _active_workers(n_workers) { } 1505 1506 virtual void execute(ProcessTask& task, uint ergo_workers); 1507 }; 1508 1509 class G1CMRefProcTaskProxy : public AbstractGangTask { 1510 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1511 ProcessTask& _proc_task; 1512 G1CollectedHeap* _g1h; 1513 G1ConcurrentMark* _cm; 1514 1515 public: 1516 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1517 G1CollectedHeap* g1h, 1518 G1ConcurrentMark* cm) : 1519 AbstractGangTask("Process reference objects in parallel"), 1520 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1521 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1522 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1523 } 1524 1525 virtual void work(uint worker_id) { 1526 ResourceMark rm; 1527 HandleMark hm; 1528 G1CMTask* task = _cm->task(worker_id); 1529 G1CMIsAliveClosure g1_is_alive(_g1h); 1530 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1531 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1532 1533 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1534 } 1535 }; 1536 1537 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) { 1538 assert(_workers != NULL, "Need parallel worker threads."); 1539 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1540 assert(_workers->active_workers() >= ergo_workers, 1541 "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)", 1542 ergo_workers, _workers->active_workers()); 1543 1544 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1545 1546 // We need to reset the concurrency level before each 1547 // proxy task execution, so that the termination protocol 1548 // and overflow handling in G1CMTask::do_marking_step() knows 1549 // how many workers to wait for. 1550 _cm->set_concurrency(ergo_workers); 1551 _workers->run_task(&proc_task_proxy, ergo_workers); 1552 } 1553 1554 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { 1555 ResourceMark rm; 1556 HandleMark hm; 1557 1558 // Is alive closure. 1559 G1CMIsAliveClosure g1_is_alive(_g1h); 1560 1561 // Inner scope to exclude the cleaning of the string table 1562 // from the displayed time. 1563 { 1564 GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm); 1565 1566 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1567 1568 // See the comment in G1CollectedHeap::ref_processing_init() 1569 // about how reference processing currently works in G1. 1570 1571 // Set the soft reference policy 1572 rp->setup_policy(clear_all_soft_refs); 1573 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1574 1575 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1576 // in serial reference processing. Note these closures are also 1577 // used for serially processing (by the the current thread) the 1578 // JNI references during parallel reference processing. 1579 // 1580 // These closures do not need to synchronize with the worker 1581 // threads involved in parallel reference processing as these 1582 // instances are executed serially by the current thread (e.g. 1583 // reference processing is not multi-threaded and is thus 1584 // performed by the current thread instead of a gang worker). 1585 // 1586 // The gang tasks involved in parallel reference processing create 1587 // their own instances of these closures, which do their own 1588 // synchronization among themselves. 1589 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1590 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1591 1592 // We need at least one active thread. If reference processing 1593 // is not multi-threaded we use the current (VMThread) thread, 1594 // otherwise we use the work gang from the G1CollectedHeap and 1595 // we utilize all the worker threads we can. 1596 bool processing_is_mt = rp->processing_is_mt(); 1597 uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U); 1598 active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U); 1599 1600 // Parallel processing task executor. 1601 G1CMRefProcTaskExecutor par_task_executor(_g1h, this, 1602 _g1h->workers(), active_workers); 1603 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1604 1605 // Set the concurrency level. The phase was already set prior to 1606 // executing the remark task. 1607 set_concurrency(active_workers); 1608 1609 // Set the degree of MT processing here. If the discovery was done MT, 1610 // the number of threads involved during discovery could differ from 1611 // the number of active workers. This is OK as long as the discovered 1612 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1613 rp->set_active_mt_degree(active_workers); 1614 1615 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues()); 1616 1617 // Process the weak references. 1618 const ReferenceProcessorStats& stats = 1619 rp->process_discovered_references(&g1_is_alive, 1620 &g1_keep_alive, 1621 &g1_drain_mark_stack, 1622 executor, 1623 &pt); 1624 _gc_tracer_cm->report_gc_reference_stats(stats); 1625 pt.print_all_references(); 1626 1627 // The do_oop work routines of the keep_alive and drain_marking_stack 1628 // oop closures will set the has_overflown flag if we overflow the 1629 // global marking stack. 1630 1631 assert(has_overflown() || _global_mark_stack.is_empty(), 1632 "Mark stack should be empty (unless it has overflown)"); 1633 1634 assert(rp->num_queues() == active_workers, "why not"); 1635 1636 rp->verify_no_references_recorded(); 1637 assert(!rp->discovery_enabled(), "Post condition"); 1638 } 1639 1640 if (has_overflown()) { 1641 // We can not trust g1_is_alive and the contents of the heap if the marking stack 1642 // overflowed while processing references. Exit the VM. 1643 fatal("Overflow during reference processing, can not continue. Please " 1644 "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and " 1645 "restart.", MarkStackSizeMax); 1646 return; 1647 } 1648 1649 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1650 1651 { 1652 GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm); 1653 WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1); 1654 } 1655 1656 // Unload Klasses, String, Code Cache, etc. 1657 if (ClassUnloadingWithConcurrentMark) { 1658 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); 1659 bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm); 1660 _g1h->complete_cleaning(&g1_is_alive, purged_classes); 1661 } else { 1662 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm); 1663 // No need to clean string table as it is treated as strong roots when 1664 // class unloading is disabled. 1665 _g1h->partial_cleaning(&g1_is_alive, false, G1StringDedup::is_enabled()); 1666 } 1667 } 1668 1669 class G1PrecleanYieldClosure : public YieldClosure { 1670 G1ConcurrentMark* _cm; 1671 1672 public: 1673 G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { } 1674 1675 virtual bool should_return() { 1676 return _cm->has_aborted(); 1677 } 1678 1679 virtual bool should_return_fine_grain() { 1680 _cm->do_yield_check(); 1681 return _cm->has_aborted(); 1682 } 1683 }; 1684 1685 void G1ConcurrentMark::preclean() { 1686 assert(G1UseReferencePrecleaning, "Precleaning must be enabled."); 1687 1688 SuspendibleThreadSetJoiner joiner; 1689 1690 G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */); 1691 G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */); 1692 1693 set_concurrency_and_phase(1, true); 1694 1695 G1PrecleanYieldClosure yield_cl(this); 1696 1697 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1698 // Precleaning is single threaded. Temporarily disable MT discovery. 1699 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); 1700 rp->preclean_discovered_references(rp->is_alive_non_header(), 1701 &keep_alive, 1702 &drain_mark_stack, 1703 &yield_cl, 1704 _gc_timer_cm); 1705 } 1706 1707 // When sampling object counts, we already swapped the mark bitmaps, so we need to use 1708 // the prev bitmap determining liveness. 1709 class G1ObjectCountIsAliveClosure: public BoolObjectClosure { 1710 G1CollectedHeap* _g1h; 1711 public: 1712 G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { } 1713 1714 bool do_object_b(oop obj) { 1715 HeapWord* addr = (HeapWord*)obj; 1716 return addr != NULL && 1717 (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj)); 1718 } 1719 }; 1720 1721 void G1ConcurrentMark::report_object_count(bool mark_completed) { 1722 // Depending on the completion of the marking liveness needs to be determined 1723 // using either the next or prev bitmap. 1724 if (mark_completed) { 1725 G1ObjectCountIsAliveClosure is_alive(_g1h); 1726 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1727 } else { 1728 G1CMIsAliveClosure is_alive(_g1h); 1729 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1730 } 1731 } 1732 1733 1734 void G1ConcurrentMark::swap_mark_bitmaps() { 1735 G1CMBitMap* temp = _prev_mark_bitmap; 1736 _prev_mark_bitmap = _next_mark_bitmap; 1737 _next_mark_bitmap = temp; 1738 _g1h->collector_state()->set_clearing_next_bitmap(true); 1739 } 1740 1741 // Closure for marking entries in SATB buffers. 1742 class G1CMSATBBufferClosure : public SATBBufferClosure { 1743 private: 1744 G1CMTask* _task; 1745 G1CollectedHeap* _g1h; 1746 1747 // This is very similar to G1CMTask::deal_with_reference, but with 1748 // more relaxed requirements for the argument, so this must be more 1749 // circumspect about treating the argument as an object. 1750 void do_entry(void* entry) const { 1751 _task->increment_refs_reached(); 1752 oop const obj = static_cast<oop>(entry); 1753 _task->make_reference_grey(obj); 1754 } 1755 1756 public: 1757 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1758 : _task(task), _g1h(g1h) { } 1759 1760 virtual void do_buffer(void** buffer, size_t size) { 1761 for (size_t i = 0; i < size; ++i) { 1762 do_entry(buffer[i]); 1763 } 1764 } 1765 }; 1766 1767 class G1RemarkThreadsClosure : public ThreadClosure { 1768 G1CMSATBBufferClosure _cm_satb_cl; 1769 G1CMOopClosure _cm_cl; 1770 MarkingCodeBlobClosure _code_cl; 1771 int _thread_parity; 1772 1773 public: 1774 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1775 _cm_satb_cl(task, g1h), 1776 _cm_cl(g1h, task), 1777 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1778 _thread_parity(Threads::thread_claim_parity()) {} 1779 1780 void do_thread(Thread* thread) { 1781 if (thread->is_Java_thread()) { 1782 if (thread->claim_oops_do(true, _thread_parity)) { 1783 JavaThread* jt = (JavaThread*)thread; 1784 1785 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1786 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1787 // * Alive if on the stack of an executing method 1788 // * Weakly reachable otherwise 1789 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1790 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1791 jt->nmethods_do(&_code_cl); 1792 1793 G1ThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(&_cm_satb_cl); 1794 } 1795 } else if (thread->is_VM_thread()) { 1796 if (thread->claim_oops_do(true, _thread_parity)) { 1797 G1BarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1798 } 1799 } 1800 } 1801 }; 1802 1803 class G1CMRemarkTask : public AbstractGangTask { 1804 G1ConcurrentMark* _cm; 1805 public: 1806 void work(uint worker_id) { 1807 G1CMTask* task = _cm->task(worker_id); 1808 task->record_start_time(); 1809 { 1810 ResourceMark rm; 1811 HandleMark hm; 1812 1813 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1814 Threads::threads_do(&threads_f); 1815 } 1816 1817 do { 1818 task->do_marking_step(1000000000.0 /* something very large */, 1819 true /* do_termination */, 1820 false /* is_serial */); 1821 } while (task->has_aborted() && !_cm->has_overflown()); 1822 // If we overflow, then we do not want to restart. We instead 1823 // want to abort remark and do concurrent marking again. 1824 task->record_end_time(); 1825 } 1826 1827 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1828 AbstractGangTask("Par Remark"), _cm(cm) { 1829 _cm->terminator()->reset_for_reuse(active_workers); 1830 } 1831 }; 1832 1833 void G1ConcurrentMark::finalize_marking() { 1834 ResourceMark rm; 1835 HandleMark hm; 1836 1837 _g1h->ensure_parsability(false); 1838 1839 // this is remark, so we'll use up all active threads 1840 uint active_workers = _g1h->workers()->active_workers(); 1841 set_concurrency_and_phase(active_workers, false /* concurrent */); 1842 // Leave _parallel_marking_threads at it's 1843 // value originally calculated in the G1ConcurrentMark 1844 // constructor and pass values of the active workers 1845 // through the gang in the task. 1846 1847 { 1848 StrongRootsScope srs(active_workers); 1849 1850 G1CMRemarkTask remarkTask(this, active_workers); 1851 // We will start all available threads, even if we decide that the 1852 // active_workers will be fewer. The extra ones will just bail out 1853 // immediately. 1854 _g1h->workers()->run_task(&remarkTask); 1855 } 1856 1857 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1858 guarantee(has_overflown() || 1859 satb_mq_set.completed_buffers_num() == 0, 1860 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1861 BOOL_TO_STR(has_overflown()), 1862 satb_mq_set.completed_buffers_num()); 1863 1864 print_stats(); 1865 } 1866 1867 void G1ConcurrentMark::flush_all_task_caches() { 1868 size_t hits = 0; 1869 size_t misses = 0; 1870 for (uint i = 0; i < _max_num_tasks; i++) { 1871 Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache(); 1872 hits += stats.first; 1873 misses += stats.second; 1874 } 1875 size_t sum = hits + misses; 1876 log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf", 1877 hits, misses, percent_of(hits, sum)); 1878 } 1879 1880 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) { 1881 _prev_mark_bitmap->clear_range(mr); 1882 } 1883 1884 HeapRegion* 1885 G1ConcurrentMark::claim_region(uint worker_id) { 1886 // "checkpoint" the finger 1887 HeapWord* finger = _finger; 1888 1889 while (finger < _heap.end()) { 1890 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1891 1892 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1893 // Make sure that the reads below do not float before loading curr_region. 1894 OrderAccess::loadload(); 1895 // Above heap_region_containing may return NULL as we always scan claim 1896 // until the end of the heap. In this case, just jump to the next region. 1897 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1898 1899 // Is the gap between reading the finger and doing the CAS too long? 1900 HeapWord* res = Atomic::cmpxchg(end, &_finger, finger); 1901 if (res == finger && curr_region != NULL) { 1902 // we succeeded 1903 HeapWord* bottom = curr_region->bottom(); 1904 HeapWord* limit = curr_region->next_top_at_mark_start(); 1905 1906 // notice that _finger == end cannot be guaranteed here since, 1907 // someone else might have moved the finger even further 1908 assert(_finger >= end, "the finger should have moved forward"); 1909 1910 if (limit > bottom) { 1911 return curr_region; 1912 } else { 1913 assert(limit == bottom, 1914 "the region limit should be at bottom"); 1915 // we return NULL and the caller should try calling 1916 // claim_region() again. 1917 return NULL; 1918 } 1919 } else { 1920 assert(_finger > finger, "the finger should have moved forward"); 1921 // read it again 1922 finger = _finger; 1923 } 1924 } 1925 1926 return NULL; 1927 } 1928 1929 #ifndef PRODUCT 1930 class VerifyNoCSetOops { 1931 G1CollectedHeap* _g1h; 1932 const char* _phase; 1933 int _info; 1934 1935 public: 1936 VerifyNoCSetOops(const char* phase, int info = -1) : 1937 _g1h(G1CollectedHeap::heap()), 1938 _phase(phase), 1939 _info(info) 1940 { } 1941 1942 void operator()(G1TaskQueueEntry task_entry) const { 1943 if (task_entry.is_array_slice()) { 1944 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); 1945 return; 1946 } 1947 guarantee(oopDesc::is_oop(task_entry.obj()), 1948 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1949 p2i(task_entry.obj()), _phase, _info); 1950 guarantee(!_g1h->is_in_cset(task_entry.obj()), 1951 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1952 p2i(task_entry.obj()), _phase, _info); 1953 } 1954 }; 1955 1956 void G1ConcurrentMark::verify_no_cset_oops() { 1957 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1958 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 1959 return; 1960 } 1961 1962 // Verify entries on the global mark stack 1963 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 1964 1965 // Verify entries on the task queues 1966 for (uint i = 0; i < _max_num_tasks; ++i) { 1967 G1CMTaskQueue* queue = _task_queues->queue(i); 1968 queue->iterate(VerifyNoCSetOops("Queue", i)); 1969 } 1970 1971 // Verify the global finger 1972 HeapWord* global_finger = finger(); 1973 if (global_finger != NULL && global_finger < _heap.end()) { 1974 // Since we always iterate over all regions, we might get a NULL HeapRegion 1975 // here. 1976 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1977 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1978 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1979 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1980 } 1981 1982 // Verify the task fingers 1983 assert(_num_concurrent_workers <= _max_num_tasks, "sanity"); 1984 for (uint i = 0; i < _num_concurrent_workers; ++i) { 1985 G1CMTask* task = _tasks[i]; 1986 HeapWord* task_finger = task->finger(); 1987 if (task_finger != NULL && task_finger < _heap.end()) { 1988 // See above note on the global finger verification. 1989 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 1990 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 1991 !task_hr->in_collection_set(), 1992 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 1993 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 1994 } 1995 } 1996 } 1997 #endif // PRODUCT 1998 1999 void G1ConcurrentMark::rebuild_rem_set_concurrently() { 2000 _g1h->g1_rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset); 2001 } 2002 2003 void G1ConcurrentMark::print_stats() { 2004 if (!log_is_enabled(Debug, gc, stats)) { 2005 return; 2006 } 2007 log_debug(gc, stats)("---------------------------------------------------------------------"); 2008 for (size_t i = 0; i < _num_active_tasks; ++i) { 2009 _tasks[i]->print_stats(); 2010 log_debug(gc, stats)("---------------------------------------------------------------------"); 2011 } 2012 } 2013 2014 void G1ConcurrentMark::concurrent_cycle_abort() { 2015 if (!cm_thread()->during_cycle() || _has_aborted) { 2016 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2017 return; 2018 } 2019 2020 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2021 // concurrent bitmap clearing. 2022 { 2023 GCTraceTime(Debug, gc) debug("Clear Next Bitmap"); 2024 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false); 2025 } 2026 // Note we cannot clear the previous marking bitmap here 2027 // since VerifyDuringGC verifies the objects marked during 2028 // a full GC against the previous bitmap. 2029 2030 // Empty mark stack 2031 reset_marking_for_restart(); 2032 for (uint i = 0; i < _max_num_tasks; ++i) { 2033 _tasks[i]->clear_region_fields(); 2034 } 2035 _first_overflow_barrier_sync.abort(); 2036 _second_overflow_barrier_sync.abort(); 2037 _has_aborted = true; 2038 2039 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2040 satb_mq_set.abandon_partial_marking(); 2041 // This can be called either during or outside marking, we'll read 2042 // the expected_active value from the SATB queue set. 2043 satb_mq_set.set_active_all_threads( 2044 false, /* new active value */ 2045 satb_mq_set.is_active() /* expected_active */); 2046 } 2047 2048 static void print_ms_time_info(const char* prefix, const char* name, 2049 NumberSeq& ns) { 2050 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2051 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2052 if (ns.num() > 0) { 2053 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2054 prefix, ns.sd(), ns.maximum()); 2055 } 2056 } 2057 2058 void G1ConcurrentMark::print_summary_info() { 2059 Log(gc, marking) log; 2060 if (!log.is_trace()) { 2061 return; 2062 } 2063 2064 log.trace(" Concurrent marking:"); 2065 print_ms_time_info(" ", "init marks", _init_times); 2066 print_ms_time_info(" ", "remarks", _remark_times); 2067 { 2068 print_ms_time_info(" ", "final marks", _remark_mark_times); 2069 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2070 2071 } 2072 print_ms_time_info(" ", "cleanups", _cleanup_times); 2073 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2074 _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2075 log.trace(" Total stop_world time = %8.2f s.", 2076 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2077 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2078 cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum()); 2079 } 2080 2081 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2082 _concurrent_workers->print_worker_threads_on(st); 2083 } 2084 2085 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2086 _concurrent_workers->threads_do(tc); 2087 } 2088 2089 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2090 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2091 p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap)); 2092 _prev_mark_bitmap->print_on_error(st, " Prev Bits: "); 2093 _next_mark_bitmap->print_on_error(st, " Next Bits: "); 2094 } 2095 2096 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2097 ReferenceProcessor* result = g1h->ref_processor_cm(); 2098 assert(result != NULL, "CM reference processor should not be NULL"); 2099 return result; 2100 } 2101 2102 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2103 G1CMTask* task) 2104 : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)), 2105 _g1h(g1h), _task(task) 2106 { } 2107 2108 void G1CMTask::setup_for_region(HeapRegion* hr) { 2109 assert(hr != NULL, 2110 "claim_region() should have filtered out NULL regions"); 2111 _curr_region = hr; 2112 _finger = hr->bottom(); 2113 update_region_limit(); 2114 } 2115 2116 void G1CMTask::update_region_limit() { 2117 HeapRegion* hr = _curr_region; 2118 HeapWord* bottom = hr->bottom(); 2119 HeapWord* limit = hr->next_top_at_mark_start(); 2120 2121 if (limit == bottom) { 2122 // The region was collected underneath our feet. 2123 // We set the finger to bottom to ensure that the bitmap 2124 // iteration that will follow this will not do anything. 2125 // (this is not a condition that holds when we set the region up, 2126 // as the region is not supposed to be empty in the first place) 2127 _finger = bottom; 2128 } else if (limit >= _region_limit) { 2129 assert(limit >= _finger, "peace of mind"); 2130 } else { 2131 assert(limit < _region_limit, "only way to get here"); 2132 // This can happen under some pretty unusual circumstances. An 2133 // evacuation pause empties the region underneath our feet (NTAMS 2134 // at bottom). We then do some allocation in the region (NTAMS 2135 // stays at bottom), followed by the region being used as a GC 2136 // alloc region (NTAMS will move to top() and the objects 2137 // originally below it will be grayed). All objects now marked in 2138 // the region are explicitly grayed, if below the global finger, 2139 // and we do not need in fact to scan anything else. So, we simply 2140 // set _finger to be limit to ensure that the bitmap iteration 2141 // doesn't do anything. 2142 _finger = limit; 2143 } 2144 2145 _region_limit = limit; 2146 } 2147 2148 void G1CMTask::giveup_current_region() { 2149 assert(_curr_region != NULL, "invariant"); 2150 clear_region_fields(); 2151 } 2152 2153 void G1CMTask::clear_region_fields() { 2154 // Values for these three fields that indicate that we're not 2155 // holding on to a region. 2156 _curr_region = NULL; 2157 _finger = NULL; 2158 _region_limit = NULL; 2159 } 2160 2161 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2162 if (cm_oop_closure == NULL) { 2163 assert(_cm_oop_closure != NULL, "invariant"); 2164 } else { 2165 assert(_cm_oop_closure == NULL, "invariant"); 2166 } 2167 _cm_oop_closure = cm_oop_closure; 2168 } 2169 2170 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) { 2171 guarantee(next_mark_bitmap != NULL, "invariant"); 2172 _next_mark_bitmap = next_mark_bitmap; 2173 clear_region_fields(); 2174 2175 _calls = 0; 2176 _elapsed_time_ms = 0.0; 2177 _termination_time_ms = 0.0; 2178 _termination_start_time_ms = 0.0; 2179 2180 _mark_stats_cache.reset(); 2181 } 2182 2183 bool G1CMTask::should_exit_termination() { 2184 regular_clock_call(); 2185 // This is called when we are in the termination protocol. We should 2186 // quit if, for some reason, this task wants to abort or the global 2187 // stack is not empty (this means that we can get work from it). 2188 return !_cm->mark_stack_empty() || has_aborted(); 2189 } 2190 2191 void G1CMTask::reached_limit() { 2192 assert(_words_scanned >= _words_scanned_limit || 2193 _refs_reached >= _refs_reached_limit , 2194 "shouldn't have been called otherwise"); 2195 regular_clock_call(); 2196 } 2197 2198 void G1CMTask::regular_clock_call() { 2199 if (has_aborted()) { 2200 return; 2201 } 2202 2203 // First, we need to recalculate the words scanned and refs reached 2204 // limits for the next clock call. 2205 recalculate_limits(); 2206 2207 // During the regular clock call we do the following 2208 2209 // (1) If an overflow has been flagged, then we abort. 2210 if (_cm->has_overflown()) { 2211 set_has_aborted(); 2212 return; 2213 } 2214 2215 // If we are not concurrent (i.e. we're doing remark) we don't need 2216 // to check anything else. The other steps are only needed during 2217 // the concurrent marking phase. 2218 if (!_cm->concurrent()) { 2219 return; 2220 } 2221 2222 // (2) If marking has been aborted for Full GC, then we also abort. 2223 if (_cm->has_aborted()) { 2224 set_has_aborted(); 2225 return; 2226 } 2227 2228 double curr_time_ms = os::elapsedVTime() * 1000.0; 2229 2230 // (4) We check whether we should yield. If we have to, then we abort. 2231 if (SuspendibleThreadSet::should_yield()) { 2232 // We should yield. To do this we abort the task. The caller is 2233 // responsible for yielding. 2234 set_has_aborted(); 2235 return; 2236 } 2237 2238 // (5) We check whether we've reached our time quota. If we have, 2239 // then we abort. 2240 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2241 if (elapsed_time_ms > _time_target_ms) { 2242 set_has_aborted(); 2243 _has_timed_out = true; 2244 return; 2245 } 2246 2247 // (6) Finally, we check whether there are enough completed STAB 2248 // buffers available for processing. If there are, we abort. 2249 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2250 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2251 // we do need to process SATB buffers, we'll abort and restart 2252 // the marking task to do so 2253 set_has_aborted(); 2254 return; 2255 } 2256 } 2257 2258 void G1CMTask::recalculate_limits() { 2259 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2260 _words_scanned_limit = _real_words_scanned_limit; 2261 2262 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2263 _refs_reached_limit = _real_refs_reached_limit; 2264 } 2265 2266 void G1CMTask::decrease_limits() { 2267 // This is called when we believe that we're going to do an infrequent 2268 // operation which will increase the per byte scanned cost (i.e. move 2269 // entries to/from the global stack). It basically tries to decrease the 2270 // scanning limit so that the clock is called earlier. 2271 2272 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4; 2273 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4; 2274 } 2275 2276 void G1CMTask::move_entries_to_global_stack() { 2277 // Local array where we'll store the entries that will be popped 2278 // from the local queue. 2279 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2280 2281 size_t n = 0; 2282 G1TaskQueueEntry task_entry; 2283 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) { 2284 buffer[n] = task_entry; 2285 ++n; 2286 } 2287 if (n < G1CMMarkStack::EntriesPerChunk) { 2288 buffer[n] = G1TaskQueueEntry(); 2289 } 2290 2291 if (n > 0) { 2292 if (!_cm->mark_stack_push(buffer)) { 2293 set_has_aborted(); 2294 } 2295 } 2296 2297 // This operation was quite expensive, so decrease the limits. 2298 decrease_limits(); 2299 } 2300 2301 bool G1CMTask::get_entries_from_global_stack() { 2302 // Local array where we'll store the entries that will be popped 2303 // from the global stack. 2304 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2305 2306 if (!_cm->mark_stack_pop(buffer)) { 2307 return false; 2308 } 2309 2310 // We did actually pop at least one entry. 2311 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) { 2312 G1TaskQueueEntry task_entry = buffer[i]; 2313 if (task_entry.is_null()) { 2314 break; 2315 } 2316 assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj())); 2317 bool success = _task_queue->push(task_entry); 2318 // We only call this when the local queue is empty or under a 2319 // given target limit. So, we do not expect this push to fail. 2320 assert(success, "invariant"); 2321 } 2322 2323 // This operation was quite expensive, so decrease the limits 2324 decrease_limits(); 2325 return true; 2326 } 2327 2328 void G1CMTask::drain_local_queue(bool partially) { 2329 if (has_aborted()) { 2330 return; 2331 } 2332 2333 // Decide what the target size is, depending whether we're going to 2334 // drain it partially (so that other tasks can steal if they run out 2335 // of things to do) or totally (at the very end). 2336 size_t target_size; 2337 if (partially) { 2338 target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize); 2339 } else { 2340 target_size = 0; 2341 } 2342 2343 if (_task_queue->size() > target_size) { 2344 G1TaskQueueEntry entry; 2345 bool ret = _task_queue->pop_local(entry); 2346 while (ret) { 2347 scan_task_entry(entry); 2348 if (_task_queue->size() <= target_size || has_aborted()) { 2349 ret = false; 2350 } else { 2351 ret = _task_queue->pop_local(entry); 2352 } 2353 } 2354 } 2355 } 2356 2357 void G1CMTask::drain_global_stack(bool partially) { 2358 if (has_aborted()) { 2359 return; 2360 } 2361 2362 // We have a policy to drain the local queue before we attempt to 2363 // drain the global stack. 2364 assert(partially || _task_queue->size() == 0, "invariant"); 2365 2366 // Decide what the target size is, depending whether we're going to 2367 // drain it partially (so that other tasks can steal if they run out 2368 // of things to do) or totally (at the very end). 2369 // Notice that when draining the global mark stack partially, due to the racyness 2370 // of the mark stack size update we might in fact drop below the target. But, 2371 // this is not a problem. 2372 // In case of total draining, we simply process until the global mark stack is 2373 // totally empty, disregarding the size counter. 2374 if (partially) { 2375 size_t const target_size = _cm->partial_mark_stack_size_target(); 2376 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2377 if (get_entries_from_global_stack()) { 2378 drain_local_queue(partially); 2379 } 2380 } 2381 } else { 2382 while (!has_aborted() && get_entries_from_global_stack()) { 2383 drain_local_queue(partially); 2384 } 2385 } 2386 } 2387 2388 // SATB Queue has several assumptions on whether to call the par or 2389 // non-par versions of the methods. this is why some of the code is 2390 // replicated. We should really get rid of the single-threaded version 2391 // of the code to simplify things. 2392 void G1CMTask::drain_satb_buffers() { 2393 if (has_aborted()) { 2394 return; 2395 } 2396 2397 // We set this so that the regular clock knows that we're in the 2398 // middle of draining buffers and doesn't set the abort flag when it 2399 // notices that SATB buffers are available for draining. It'd be 2400 // very counter productive if it did that. :-) 2401 _draining_satb_buffers = true; 2402 2403 G1CMSATBBufferClosure satb_cl(this, _g1h); 2404 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2405 2406 // This keeps claiming and applying the closure to completed buffers 2407 // until we run out of buffers or we need to abort. 2408 while (!has_aborted() && 2409 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2410 regular_clock_call(); 2411 } 2412 2413 _draining_satb_buffers = false; 2414 2415 assert(has_aborted() || 2416 _cm->concurrent() || 2417 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2418 2419 // again, this was a potentially expensive operation, decrease the 2420 // limits to get the regular clock call early 2421 decrease_limits(); 2422 } 2423 2424 void G1CMTask::clear_mark_stats_cache(uint region_idx) { 2425 _mark_stats_cache.reset(region_idx); 2426 } 2427 2428 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() { 2429 return _mark_stats_cache.evict_all(); 2430 } 2431 2432 void G1CMTask::print_stats() { 2433 log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls); 2434 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2435 _elapsed_time_ms, _termination_time_ms); 2436 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms", 2437 _step_times_ms.num(), 2438 _step_times_ms.avg(), 2439 _step_times_ms.sd(), 2440 _step_times_ms.maximum(), 2441 _step_times_ms.sum()); 2442 size_t const hits = _mark_stats_cache.hits(); 2443 size_t const misses = _mark_stats_cache.misses(); 2444 log_debug(gc, stats)(" Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f", 2445 hits, misses, percent_of(hits, hits + misses)); 2446 } 2447 2448 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) { 2449 return _task_queues->steal(worker_id, task_entry); 2450 } 2451 2452 /***************************************************************************** 2453 2454 The do_marking_step(time_target_ms, ...) method is the building 2455 block of the parallel marking framework. It can be called in parallel 2456 with other invocations of do_marking_step() on different tasks 2457 (but only one per task, obviously) and concurrently with the 2458 mutator threads, or during remark, hence it eliminates the need 2459 for two versions of the code. When called during remark, it will 2460 pick up from where the task left off during the concurrent marking 2461 phase. Interestingly, tasks are also claimable during evacuation 2462 pauses too, since do_marking_step() ensures that it aborts before 2463 it needs to yield. 2464 2465 The data structures that it uses to do marking work are the 2466 following: 2467 2468 (1) Marking Bitmap. If there are gray objects that appear only 2469 on the bitmap (this happens either when dealing with an overflow 2470 or when the initial marking phase has simply marked the roots 2471 and didn't push them on the stack), then tasks claim heap 2472 regions whose bitmap they then scan to find gray objects. A 2473 global finger indicates where the end of the last claimed region 2474 is. A local finger indicates how far into the region a task has 2475 scanned. The two fingers are used to determine how to gray an 2476 object (i.e. whether simply marking it is OK, as it will be 2477 visited by a task in the future, or whether it needs to be also 2478 pushed on a stack). 2479 2480 (2) Local Queue. The local queue of the task which is accessed 2481 reasonably efficiently by the task. Other tasks can steal from 2482 it when they run out of work. Throughout the marking phase, a 2483 task attempts to keep its local queue short but not totally 2484 empty, so that entries are available for stealing by other 2485 tasks. Only when there is no more work, a task will totally 2486 drain its local queue. 2487 2488 (3) Global Mark Stack. This handles local queue overflow. During 2489 marking only sets of entries are moved between it and the local 2490 queues, as access to it requires a mutex and more fine-grain 2491 interaction with it which might cause contention. If it 2492 overflows, then the marking phase should restart and iterate 2493 over the bitmap to identify gray objects. Throughout the marking 2494 phase, tasks attempt to keep the global mark stack at a small 2495 length but not totally empty, so that entries are available for 2496 popping by other tasks. Only when there is no more work, tasks 2497 will totally drain the global mark stack. 2498 2499 (4) SATB Buffer Queue. This is where completed SATB buffers are 2500 made available. Buffers are regularly removed from this queue 2501 and scanned for roots, so that the queue doesn't get too 2502 long. During remark, all completed buffers are processed, as 2503 well as the filled in parts of any uncompleted buffers. 2504 2505 The do_marking_step() method tries to abort when the time target 2506 has been reached. There are a few other cases when the 2507 do_marking_step() method also aborts: 2508 2509 (1) When the marking phase has been aborted (after a Full GC). 2510 2511 (2) When a global overflow (on the global stack) has been 2512 triggered. Before the task aborts, it will actually sync up with 2513 the other tasks to ensure that all the marking data structures 2514 (local queues, stacks, fingers etc.) are re-initialized so that 2515 when do_marking_step() completes, the marking phase can 2516 immediately restart. 2517 2518 (3) When enough completed SATB buffers are available. The 2519 do_marking_step() method only tries to drain SATB buffers right 2520 at the beginning. So, if enough buffers are available, the 2521 marking step aborts and the SATB buffers are processed at 2522 the beginning of the next invocation. 2523 2524 (4) To yield. when we have to yield then we abort and yield 2525 right at the end of do_marking_step(). This saves us from a lot 2526 of hassle as, by yielding we might allow a Full GC. If this 2527 happens then objects will be compacted underneath our feet, the 2528 heap might shrink, etc. We save checking for this by just 2529 aborting and doing the yield right at the end. 2530 2531 From the above it follows that the do_marking_step() method should 2532 be called in a loop (or, otherwise, regularly) until it completes. 2533 2534 If a marking step completes without its has_aborted() flag being 2535 true, it means it has completed the current marking phase (and 2536 also all other marking tasks have done so and have all synced up). 2537 2538 A method called regular_clock_call() is invoked "regularly" (in 2539 sub ms intervals) throughout marking. It is this clock method that 2540 checks all the abort conditions which were mentioned above and 2541 decides when the task should abort. A work-based scheme is used to 2542 trigger this clock method: when the number of object words the 2543 marking phase has scanned or the number of references the marking 2544 phase has visited reach a given limit. Additional invocations to 2545 the method clock have been planted in a few other strategic places 2546 too. The initial reason for the clock method was to avoid calling 2547 vtime too regularly, as it is quite expensive. So, once it was in 2548 place, it was natural to piggy-back all the other conditions on it 2549 too and not constantly check them throughout the code. 2550 2551 If do_termination is true then do_marking_step will enter its 2552 termination protocol. 2553 2554 The value of is_serial must be true when do_marking_step is being 2555 called serially (i.e. by the VMThread) and do_marking_step should 2556 skip any synchronization in the termination and overflow code. 2557 Examples include the serial remark code and the serial reference 2558 processing closures. 2559 2560 The value of is_serial must be false when do_marking_step is 2561 being called by any of the worker threads in a work gang. 2562 Examples include the concurrent marking code (CMMarkingTask), 2563 the MT remark code, and the MT reference processing closures. 2564 2565 *****************************************************************************/ 2566 2567 void G1CMTask::do_marking_step(double time_target_ms, 2568 bool do_termination, 2569 bool is_serial) { 2570 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2571 2572 _start_time_ms = os::elapsedVTime() * 1000.0; 2573 2574 // If do_stealing is true then do_marking_step will attempt to 2575 // steal work from the other G1CMTasks. It only makes sense to 2576 // enable stealing when the termination protocol is enabled 2577 // and do_marking_step() is not being called serially. 2578 bool do_stealing = do_termination && !is_serial; 2579 2580 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2581 _time_target_ms = time_target_ms - diff_prediction_ms; 2582 2583 // set up the variables that are used in the work-based scheme to 2584 // call the regular clock method 2585 _words_scanned = 0; 2586 _refs_reached = 0; 2587 recalculate_limits(); 2588 2589 // clear all flags 2590 clear_has_aborted(); 2591 _has_timed_out = false; 2592 _draining_satb_buffers = false; 2593 2594 ++_calls; 2595 2596 // Set up the bitmap and oop closures. Anything that uses them is 2597 // eventually called from this method, so it is OK to allocate these 2598 // statically. 2599 G1CMBitMapClosure bitmap_closure(this, _cm); 2600 G1CMOopClosure cm_oop_closure(_g1h, this); 2601 set_cm_oop_closure(&cm_oop_closure); 2602 2603 if (_cm->has_overflown()) { 2604 // This can happen if the mark stack overflows during a GC pause 2605 // and this task, after a yield point, restarts. We have to abort 2606 // as we need to get into the overflow protocol which happens 2607 // right at the end of this task. 2608 set_has_aborted(); 2609 } 2610 2611 // First drain any available SATB buffers. After this, we will not 2612 // look at SATB buffers before the next invocation of this method. 2613 // If enough completed SATB buffers are queued up, the regular clock 2614 // will abort this task so that it restarts. 2615 drain_satb_buffers(); 2616 // ...then partially drain the local queue and the global stack 2617 drain_local_queue(true); 2618 drain_global_stack(true); 2619 2620 do { 2621 if (!has_aborted() && _curr_region != NULL) { 2622 // This means that we're already holding on to a region. 2623 assert(_finger != NULL, "if region is not NULL, then the finger " 2624 "should not be NULL either"); 2625 2626 // We might have restarted this task after an evacuation pause 2627 // which might have evacuated the region we're holding on to 2628 // underneath our feet. Let's read its limit again to make sure 2629 // that we do not iterate over a region of the heap that 2630 // contains garbage (update_region_limit() will also move 2631 // _finger to the start of the region if it is found empty). 2632 update_region_limit(); 2633 // We will start from _finger not from the start of the region, 2634 // as we might be restarting this task after aborting half-way 2635 // through scanning this region. In this case, _finger points to 2636 // the address where we last found a marked object. If this is a 2637 // fresh region, _finger points to start(). 2638 MemRegion mr = MemRegion(_finger, _region_limit); 2639 2640 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2641 "humongous regions should go around loop once only"); 2642 2643 // Some special cases: 2644 // If the memory region is empty, we can just give up the region. 2645 // If the current region is humongous then we only need to check 2646 // the bitmap for the bit associated with the start of the object, 2647 // scan the object if it's live, and give up the region. 2648 // Otherwise, let's iterate over the bitmap of the part of the region 2649 // that is left. 2650 // If the iteration is successful, give up the region. 2651 if (mr.is_empty()) { 2652 giveup_current_region(); 2653 regular_clock_call(); 2654 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2655 if (_next_mark_bitmap->is_marked(mr.start())) { 2656 // The object is marked - apply the closure 2657 bitmap_closure.do_addr(mr.start()); 2658 } 2659 // Even if this task aborted while scanning the humongous object 2660 // we can (and should) give up the current region. 2661 giveup_current_region(); 2662 regular_clock_call(); 2663 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) { 2664 giveup_current_region(); 2665 regular_clock_call(); 2666 } else { 2667 assert(has_aborted(), "currently the only way to do so"); 2668 // The only way to abort the bitmap iteration is to return 2669 // false from the do_bit() method. However, inside the 2670 // do_bit() method we move the _finger to point to the 2671 // object currently being looked at. So, if we bail out, we 2672 // have definitely set _finger to something non-null. 2673 assert(_finger != NULL, "invariant"); 2674 2675 // Region iteration was actually aborted. So now _finger 2676 // points to the address of the object we last scanned. If we 2677 // leave it there, when we restart this task, we will rescan 2678 // the object. It is easy to avoid this. We move the finger by 2679 // enough to point to the next possible object header. 2680 assert(_finger < _region_limit, "invariant"); 2681 HeapWord* const new_finger = _finger + ((oop)_finger)->size(); 2682 // Check if bitmap iteration was aborted while scanning the last object 2683 if (new_finger >= _region_limit) { 2684 giveup_current_region(); 2685 } else { 2686 move_finger_to(new_finger); 2687 } 2688 } 2689 } 2690 // At this point we have either completed iterating over the 2691 // region we were holding on to, or we have aborted. 2692 2693 // We then partially drain the local queue and the global stack. 2694 // (Do we really need this?) 2695 drain_local_queue(true); 2696 drain_global_stack(true); 2697 2698 // Read the note on the claim_region() method on why it might 2699 // return NULL with potentially more regions available for 2700 // claiming and why we have to check out_of_regions() to determine 2701 // whether we're done or not. 2702 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2703 // We are going to try to claim a new region. We should have 2704 // given up on the previous one. 2705 // Separated the asserts so that we know which one fires. 2706 assert(_curr_region == NULL, "invariant"); 2707 assert(_finger == NULL, "invariant"); 2708 assert(_region_limit == NULL, "invariant"); 2709 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2710 if (claimed_region != NULL) { 2711 // Yes, we managed to claim one 2712 setup_for_region(claimed_region); 2713 assert(_curr_region == claimed_region, "invariant"); 2714 } 2715 // It is important to call the regular clock here. It might take 2716 // a while to claim a region if, for example, we hit a large 2717 // block of empty regions. So we need to call the regular clock 2718 // method once round the loop to make sure it's called 2719 // frequently enough. 2720 regular_clock_call(); 2721 } 2722 2723 if (!has_aborted() && _curr_region == NULL) { 2724 assert(_cm->out_of_regions(), 2725 "at this point we should be out of regions"); 2726 } 2727 } while ( _curr_region != NULL && !has_aborted()); 2728 2729 if (!has_aborted()) { 2730 // We cannot check whether the global stack is empty, since other 2731 // tasks might be pushing objects to it concurrently. 2732 assert(_cm->out_of_regions(), 2733 "at this point we should be out of regions"); 2734 // Try to reduce the number of available SATB buffers so that 2735 // remark has less work to do. 2736 drain_satb_buffers(); 2737 } 2738 2739 // Since we've done everything else, we can now totally drain the 2740 // local queue and global stack. 2741 drain_local_queue(false); 2742 drain_global_stack(false); 2743 2744 // Attempt at work stealing from other task's queues. 2745 if (do_stealing && !has_aborted()) { 2746 // We have not aborted. This means that we have finished all that 2747 // we could. Let's try to do some stealing... 2748 2749 // We cannot check whether the global stack is empty, since other 2750 // tasks might be pushing objects to it concurrently. 2751 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2752 "only way to reach here"); 2753 while (!has_aborted()) { 2754 G1TaskQueueEntry entry; 2755 if (_cm->try_stealing(_worker_id, entry)) { 2756 scan_task_entry(entry); 2757 2758 // And since we're towards the end, let's totally drain the 2759 // local queue and global stack. 2760 drain_local_queue(false); 2761 drain_global_stack(false); 2762 } else { 2763 break; 2764 } 2765 } 2766 } 2767 2768 // We still haven't aborted. Now, let's try to get into the 2769 // termination protocol. 2770 if (do_termination && !has_aborted()) { 2771 // We cannot check whether the global stack is empty, since other 2772 // tasks might be concurrently pushing objects on it. 2773 // Separated the asserts so that we know which one fires. 2774 assert(_cm->out_of_regions(), "only way to reach here"); 2775 assert(_task_queue->size() == 0, "only way to reach here"); 2776 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2777 2778 // The G1CMTask class also extends the TerminatorTerminator class, 2779 // hence its should_exit_termination() method will also decide 2780 // whether to exit the termination protocol or not. 2781 bool finished = (is_serial || 2782 _cm->terminator()->offer_termination(this)); 2783 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2784 _termination_time_ms += 2785 termination_end_time_ms - _termination_start_time_ms; 2786 2787 if (finished) { 2788 // We're all done. 2789 2790 // We can now guarantee that the global stack is empty, since 2791 // all other tasks have finished. We separated the guarantees so 2792 // that, if a condition is false, we can immediately find out 2793 // which one. 2794 guarantee(_cm->out_of_regions(), "only way to reach here"); 2795 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2796 guarantee(_task_queue->size() == 0, "only way to reach here"); 2797 guarantee(!_cm->has_overflown(), "only way to reach here"); 2798 } else { 2799 // Apparently there's more work to do. Let's abort this task. It 2800 // will restart it and we can hopefully find more things to do. 2801 set_has_aborted(); 2802 } 2803 } 2804 2805 // Mainly for debugging purposes to make sure that a pointer to the 2806 // closure which was statically allocated in this frame doesn't 2807 // escape it by accident. 2808 set_cm_oop_closure(NULL); 2809 double end_time_ms = os::elapsedVTime() * 1000.0; 2810 double elapsed_time_ms = end_time_ms - _start_time_ms; 2811 // Update the step history. 2812 _step_times_ms.add(elapsed_time_ms); 2813 2814 if (has_aborted()) { 2815 // The task was aborted for some reason. 2816 if (_has_timed_out) { 2817 double diff_ms = elapsed_time_ms - _time_target_ms; 2818 // Keep statistics of how well we did with respect to hitting 2819 // our target only if we actually timed out (if we aborted for 2820 // other reasons, then the results might get skewed). 2821 _marking_step_diffs_ms.add(diff_ms); 2822 } 2823 2824 if (_cm->has_overflown()) { 2825 // This is the interesting one. We aborted because a global 2826 // overflow was raised. This means we have to restart the 2827 // marking phase and start iterating over regions. However, in 2828 // order to do this we have to make sure that all tasks stop 2829 // what they are doing and re-initialize in a safe manner. We 2830 // will achieve this with the use of two barrier sync points. 2831 2832 if (!is_serial) { 2833 // We only need to enter the sync barrier if being called 2834 // from a parallel context 2835 _cm->enter_first_sync_barrier(_worker_id); 2836 2837 // When we exit this sync barrier we know that all tasks have 2838 // stopped doing marking work. So, it's now safe to 2839 // re-initialize our data structures. 2840 } 2841 2842 clear_region_fields(); 2843 flush_mark_stats_cache(); 2844 2845 if (!is_serial) { 2846 // If we're executing the concurrent phase of marking, reset the marking 2847 // state; otherwise the marking state is reset after reference processing, 2848 // during the remark pause. 2849 // If we reset here as a result of an overflow during the remark we will 2850 // see assertion failures from any subsequent set_concurrency_and_phase() 2851 // calls. 2852 if (_cm->concurrent() && _worker_id == 0) { 2853 // Worker 0 is responsible for clearing the global data structures because 2854 // of an overflow. During STW we should not clear the overflow flag (in 2855 // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit 2856 // method to abort the pause and restart concurrent marking. 2857 _cm->reset_marking_for_restart(); 2858 2859 log_info(gc, marking)("Concurrent Mark reset for overflow"); 2860 } 2861 2862 // ...and enter the second barrier. 2863 _cm->enter_second_sync_barrier(_worker_id); 2864 } 2865 // At this point, if we're during the concurrent phase of 2866 // marking, everything has been re-initialized and we're 2867 // ready to restart. 2868 } 2869 } 2870 } 2871 2872 G1CMTask::G1CMTask(uint worker_id, 2873 G1ConcurrentMark* cm, 2874 G1CMTaskQueue* task_queue, 2875 G1RegionMarkStats* mark_stats, 2876 uint max_regions) : 2877 _objArray_processor(this), 2878 _worker_id(worker_id), 2879 _g1h(G1CollectedHeap::heap()), 2880 _cm(cm), 2881 _next_mark_bitmap(NULL), 2882 _task_queue(task_queue), 2883 _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize), 2884 _calls(0), 2885 _time_target_ms(0.0), 2886 _start_time_ms(0.0), 2887 _cm_oop_closure(NULL), 2888 _curr_region(NULL), 2889 _finger(NULL), 2890 _region_limit(NULL), 2891 _words_scanned(0), 2892 _words_scanned_limit(0), 2893 _real_words_scanned_limit(0), 2894 _refs_reached(0), 2895 _refs_reached_limit(0), 2896 _real_refs_reached_limit(0), 2897 _has_aborted(false), 2898 _has_timed_out(false), 2899 _draining_satb_buffers(false), 2900 _step_times_ms(), 2901 _elapsed_time_ms(0.0), 2902 _termination_time_ms(0.0), 2903 _termination_start_time_ms(0.0), 2904 _marking_step_diffs_ms() 2905 { 2906 guarantee(task_queue != NULL, "invariant"); 2907 2908 _marking_step_diffs_ms.add(0.5); 2909 } 2910 2911 // These are formatting macros that are used below to ensure 2912 // consistent formatting. The *_H_* versions are used to format the 2913 // header for a particular value and they should be kept consistent 2914 // with the corresponding macro. Also note that most of the macros add 2915 // the necessary white space (as a prefix) which makes them a bit 2916 // easier to compose. 2917 2918 // All the output lines are prefixed with this string to be able to 2919 // identify them easily in a large log file. 2920 #define G1PPRL_LINE_PREFIX "###" 2921 2922 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2923 #ifdef _LP64 2924 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2925 #else // _LP64 2926 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2927 #endif // _LP64 2928 2929 // For per-region info 2930 #define G1PPRL_TYPE_FORMAT " %-4s" 2931 #define G1PPRL_TYPE_H_FORMAT " %4s" 2932 #define G1PPRL_STATE_FORMAT " %-5s" 2933 #define G1PPRL_STATE_H_FORMAT " %5s" 2934 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2935 #define G1PPRL_BYTE_H_FORMAT " %9s" 2936 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2937 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2938 2939 // For summary info 2940 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2941 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2942 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2943 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2944 2945 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) : 2946 _total_used_bytes(0), _total_capacity_bytes(0), 2947 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2948 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) 2949 { 2950 if (!log_is_enabled(Trace, gc, liveness)) { 2951 return; 2952 } 2953 2954 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2955 MemRegion g1_reserved = g1h->g1_reserved(); 2956 double now = os::elapsedTime(); 2957 2958 // Print the header of the output. 2959 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2960 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2961 G1PPRL_SUM_ADDR_FORMAT("reserved") 2962 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2963 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 2964 HeapRegion::GrainBytes); 2965 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2966 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2967 G1PPRL_TYPE_H_FORMAT 2968 G1PPRL_ADDR_BASE_H_FORMAT 2969 G1PPRL_BYTE_H_FORMAT 2970 G1PPRL_BYTE_H_FORMAT 2971 G1PPRL_BYTE_H_FORMAT 2972 G1PPRL_DOUBLE_H_FORMAT 2973 G1PPRL_BYTE_H_FORMAT 2974 G1PPRL_STATE_H_FORMAT 2975 G1PPRL_BYTE_H_FORMAT, 2976 "type", "address-range", 2977 "used", "prev-live", "next-live", "gc-eff", 2978 "remset", "state", "code-roots"); 2979 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2980 G1PPRL_TYPE_H_FORMAT 2981 G1PPRL_ADDR_BASE_H_FORMAT 2982 G1PPRL_BYTE_H_FORMAT 2983 G1PPRL_BYTE_H_FORMAT 2984 G1PPRL_BYTE_H_FORMAT 2985 G1PPRL_DOUBLE_H_FORMAT 2986 G1PPRL_BYTE_H_FORMAT 2987 G1PPRL_STATE_H_FORMAT 2988 G1PPRL_BYTE_H_FORMAT, 2989 "", "", 2990 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 2991 "(bytes)", "", "(bytes)"); 2992 } 2993 2994 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) { 2995 if (!log_is_enabled(Trace, gc, liveness)) { 2996 return false; 2997 } 2998 2999 const char* type = r->get_type_str(); 3000 HeapWord* bottom = r->bottom(); 3001 HeapWord* end = r->end(); 3002 size_t capacity_bytes = r->capacity(); 3003 size_t used_bytes = r->used(); 3004 size_t prev_live_bytes = r->live_bytes(); 3005 size_t next_live_bytes = r->next_live_bytes(); 3006 double gc_eff = r->gc_efficiency(); 3007 size_t remset_bytes = r->rem_set()->mem_size(); 3008 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3009 const char* remset_type = r->rem_set()->get_short_state_str(); 3010 3011 _total_used_bytes += used_bytes; 3012 _total_capacity_bytes += capacity_bytes; 3013 _total_prev_live_bytes += prev_live_bytes; 3014 _total_next_live_bytes += next_live_bytes; 3015 _total_remset_bytes += remset_bytes; 3016 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3017 3018 // Print a line for this particular region. 3019 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3020 G1PPRL_TYPE_FORMAT 3021 G1PPRL_ADDR_BASE_FORMAT 3022 G1PPRL_BYTE_FORMAT 3023 G1PPRL_BYTE_FORMAT 3024 G1PPRL_BYTE_FORMAT 3025 G1PPRL_DOUBLE_FORMAT 3026 G1PPRL_BYTE_FORMAT 3027 G1PPRL_STATE_FORMAT 3028 G1PPRL_BYTE_FORMAT, 3029 type, p2i(bottom), p2i(end), 3030 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3031 remset_bytes, remset_type, strong_code_roots_bytes); 3032 3033 return false; 3034 } 3035 3036 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3037 if (!log_is_enabled(Trace, gc, liveness)) { 3038 return; 3039 } 3040 3041 // add static memory usages to remembered set sizes 3042 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3043 // Print the footer of the output. 3044 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3045 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3046 " SUMMARY" 3047 G1PPRL_SUM_MB_FORMAT("capacity") 3048 G1PPRL_SUM_MB_PERC_FORMAT("used") 3049 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3050 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3051 G1PPRL_SUM_MB_FORMAT("remset") 3052 G1PPRL_SUM_MB_FORMAT("code-roots"), 3053 bytes_to_mb(_total_capacity_bytes), 3054 bytes_to_mb(_total_used_bytes), 3055 percent_of(_total_used_bytes, _total_capacity_bytes), 3056 bytes_to_mb(_total_prev_live_bytes), 3057 percent_of(_total_prev_live_bytes, _total_capacity_bytes), 3058 bytes_to_mb(_total_next_live_bytes), 3059 percent_of(_total_next_live_bytes, _total_capacity_bytes), 3060 bytes_to_mb(_total_remset_bytes), 3061 bytes_to_mb(_total_strong_code_roots_bytes)); 3062 }