1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc/g1/g1BarrierSet.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1CollectorState.hpp" 31 #include "gc/g1/g1ConcurrentMark.inline.hpp" 32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" 33 #include "gc/g1/g1DirtyCardQueue.hpp" 34 #include "gc/g1/g1HeapVerifier.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1Policy.hpp" 37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" 38 #include "gc/g1/g1StringDedup.hpp" 39 #include "gc/g1/g1ThreadLocalData.hpp" 40 #include "gc/g1/heapRegion.inline.hpp" 41 #include "gc/g1/heapRegionRemSet.hpp" 42 #include "gc/g1/heapRegionSet.inline.hpp" 43 #include "gc/shared/gcId.hpp" 44 #include "gc/shared/gcTimer.hpp" 45 #include "gc/shared/gcTrace.hpp" 46 #include "gc/shared/gcTraceTime.inline.hpp" 47 #include "gc/shared/gcVMOperations.hpp" 48 #include "gc/shared/genOopClosures.inline.hpp" 49 #include "gc/shared/referencePolicy.hpp" 50 #include "gc/shared/strongRootsScope.hpp" 51 #include "gc/shared/suspendibleThreadSet.hpp" 52 #include "gc/shared/taskqueue.inline.hpp" 53 #include "gc/shared/weakProcessor.inline.hpp" 54 #include "gc/shared/workerPolicy.hpp" 55 #include "include/jvm.h" 56 #include "logging/log.hpp" 57 #include "memory/allocation.hpp" 58 #include "memory/resourceArea.hpp" 59 #include "oops/access.inline.hpp" 60 #include "oops/oop.inline.hpp" 61 #include "runtime/atomic.hpp" 62 #include "runtime/handles.inline.hpp" 63 #include "runtime/java.hpp" 64 #include "runtime/prefetch.inline.hpp" 65 #include "services/memTracker.hpp" 66 #include "utilities/align.hpp" 67 #include "utilities/growableArray.hpp" 68 69 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) { 70 assert(addr < _cm->finger(), "invariant"); 71 assert(addr >= _task->finger(), "invariant"); 72 73 // We move that task's local finger along. 74 _task->move_finger_to(addr); 75 76 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); 77 // we only partially drain the local queue and global stack 78 _task->drain_local_queue(true); 79 _task->drain_global_stack(true); 80 81 // if the has_aborted flag has been raised, we need to bail out of 82 // the iteration 83 return !_task->has_aborted(); 84 } 85 86 G1CMMarkStack::G1CMMarkStack() : 87 _max_chunk_capacity(0), 88 _base(NULL), 89 _chunk_capacity(0) { 90 set_empty(); 91 } 92 93 bool G1CMMarkStack::resize(size_t new_capacity) { 94 assert(is_empty(), "Only resize when stack is empty."); 95 assert(new_capacity <= _max_chunk_capacity, 96 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); 97 98 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC); 99 100 if (new_base == NULL) { 101 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); 102 return false; 103 } 104 // Release old mapping. 105 if (_base != NULL) { 106 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 107 } 108 109 _base = new_base; 110 _chunk_capacity = new_capacity; 111 set_empty(); 112 113 return true; 114 } 115 116 size_t G1CMMarkStack::capacity_alignment() { 117 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); 118 } 119 120 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { 121 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); 122 123 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); 124 125 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 126 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 127 128 guarantee(initial_chunk_capacity <= _max_chunk_capacity, 129 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, 130 _max_chunk_capacity, 131 initial_chunk_capacity); 132 133 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, 134 initial_chunk_capacity, _max_chunk_capacity); 135 136 return resize(initial_chunk_capacity); 137 } 138 139 void G1CMMarkStack::expand() { 140 if (_chunk_capacity == _max_chunk_capacity) { 141 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); 142 return; 143 } 144 size_t old_capacity = _chunk_capacity; 145 // Double capacity if possible 146 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); 147 148 if (resize(new_capacity)) { 149 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 150 old_capacity, new_capacity); 151 } else { 152 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 153 old_capacity, new_capacity); 154 } 155 } 156 157 G1CMMarkStack::~G1CMMarkStack() { 158 if (_base != NULL) { 159 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 160 } 161 } 162 163 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { 164 elem->next = *list; 165 *list = elem; 166 } 167 168 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { 169 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 170 add_chunk_to_list(&_chunk_list, elem); 171 _chunks_in_chunk_list++; 172 } 173 174 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { 175 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 176 add_chunk_to_list(&_free_list, elem); 177 } 178 179 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { 180 TaskQueueEntryChunk* result = *list; 181 if (result != NULL) { 182 *list = (*list)->next; 183 } 184 return result; 185 } 186 187 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { 188 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 189 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); 190 if (result != NULL) { 191 _chunks_in_chunk_list--; 192 } 193 return result; 194 } 195 196 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { 197 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 198 return remove_chunk_from_list(&_free_list); 199 } 200 201 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { 202 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. 203 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding 204 // wraparound of _hwm. 205 if (_hwm >= _chunk_capacity) { 206 return NULL; 207 } 208 209 size_t cur_idx = Atomic::add(1u, &_hwm) - 1; 210 if (cur_idx >= _chunk_capacity) { 211 return NULL; 212 } 213 214 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; 215 result->next = NULL; 216 return result; 217 } 218 219 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { 220 // Get a new chunk. 221 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); 222 223 if (new_chunk == NULL) { 224 // Did not get a chunk from the free list. Allocate from backing memory. 225 new_chunk = allocate_new_chunk(); 226 227 if (new_chunk == NULL) { 228 return false; 229 } 230 } 231 232 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 233 234 add_chunk_to_chunk_list(new_chunk); 235 236 return true; 237 } 238 239 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { 240 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); 241 242 if (cur == NULL) { 243 return false; 244 } 245 246 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 247 248 add_chunk_to_free_list(cur); 249 return true; 250 } 251 252 void G1CMMarkStack::set_empty() { 253 _chunks_in_chunk_list = 0; 254 _hwm = 0; 255 _chunk_list = NULL; 256 _free_list = NULL; 257 } 258 259 G1CMRootRegions::G1CMRootRegions(uint const max_regions) : 260 _root_regions(NEW_C_HEAP_ARRAY(HeapRegion*, max_regions, mtGC)), 261 _max_regions(max_regions), 262 _num_root_regions(0), 263 _claimed_root_regions(0), 264 _scan_in_progress(false), 265 _should_abort(false) { } 266 267 G1CMRootRegions::~G1CMRootRegions() { 268 FREE_C_HEAP_ARRAY(HeapRegion*, _max_regions); 269 } 270 271 void G1CMRootRegions::reset() { 272 _num_root_regions = 0; 273 } 274 275 void G1CMRootRegions::add(HeapRegion* hr) { 276 assert_at_safepoint(); 277 size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1; 278 assert(idx < _max_regions, "Trying to add more root regions than there is space " SIZE_FORMAT, _max_regions); 279 _root_regions[idx] = hr; 280 } 281 282 void G1CMRootRegions::prepare_for_scan() { 283 assert(!scan_in_progress(), "pre-condition"); 284 285 _scan_in_progress = _num_root_regions > 0; 286 287 _claimed_root_regions = 0; 288 _should_abort = false; 289 } 290 291 HeapRegion* G1CMRootRegions::claim_next() { 292 if (_should_abort) { 293 // If someone has set the should_abort flag, we return NULL to 294 // force the caller to bail out of their loop. 295 return NULL; 296 } 297 298 if (_claimed_root_regions >= _num_root_regions) { 299 return NULL; 300 } 301 302 size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1; 303 if (claimed_index < _num_root_regions) { 304 return _root_regions[claimed_index]; 305 } 306 return NULL; 307 } 308 309 uint G1CMRootRegions::num_root_regions() const { 310 return (uint)_num_root_regions; 311 } 312 313 void G1CMRootRegions::notify_scan_done() { 314 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 315 _scan_in_progress = false; 316 RootRegionScan_lock->notify_all(); 317 } 318 319 void G1CMRootRegions::cancel_scan() { 320 notify_scan_done(); 321 } 322 323 void G1CMRootRegions::scan_finished() { 324 assert(scan_in_progress(), "pre-condition"); 325 326 if (!_should_abort) { 327 assert(_claimed_root_regions >= num_root_regions(), 328 "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u", 329 _claimed_root_regions, num_root_regions()); 330 } 331 332 notify_scan_done(); 333 } 334 335 bool G1CMRootRegions::wait_until_scan_finished() { 336 if (!scan_in_progress()) { 337 return false; 338 } 339 340 { 341 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 342 while (scan_in_progress()) { 343 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 344 } 345 } 346 return true; 347 } 348 349 // Returns the maximum number of workers to be used in a concurrent 350 // phase based on the number of GC workers being used in a STW 351 // phase. 352 static uint scale_concurrent_worker_threads(uint num_gc_workers) { 353 return MAX2((num_gc_workers + 2) / 4, 1U); 354 } 355 356 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, 357 G1RegionToSpaceMapper* prev_bitmap_storage, 358 G1RegionToSpaceMapper* next_bitmap_storage) : 359 // _cm_thread set inside the constructor 360 _g1h(g1h), 361 _completed_initialization(false), 362 363 _mark_bitmap_1(), 364 _mark_bitmap_2(), 365 _prev_mark_bitmap(&_mark_bitmap_1), 366 _next_mark_bitmap(&_mark_bitmap_2), 367 368 _heap(_g1h->reserved_region()), 369 370 _root_regions(_g1h->max_regions()), 371 372 _global_mark_stack(), 373 374 // _finger set in set_non_marking_state 375 376 _worker_id_offset(G1DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads), 377 _max_num_tasks(ParallelGCThreads), 378 // _num_active_tasks set in set_non_marking_state() 379 // _tasks set inside the constructor 380 381 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)), 382 _terminator((int) _max_num_tasks, _task_queues), 383 384 _first_overflow_barrier_sync(), 385 _second_overflow_barrier_sync(), 386 387 _has_overflown(false), 388 _concurrent(false), 389 _has_aborted(false), 390 _restart_for_overflow(false), 391 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 392 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 393 394 // _verbose_level set below 395 396 _init_times(), 397 _remark_times(), 398 _remark_mark_times(), 399 _remark_weak_ref_times(), 400 _cleanup_times(), 401 _total_cleanup_time(0.0), 402 403 _accum_task_vtime(NULL), 404 405 _concurrent_workers(NULL), 406 _num_concurrent_workers(0), 407 _max_concurrent_workers(0), 408 409 _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)), 410 _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC)) 411 { 412 _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage); 413 _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage); 414 415 // Create & start ConcurrentMark thread. 416 _cm_thread = new G1ConcurrentMarkThread(this); 417 if (_cm_thread->osthread() == NULL) { 418 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 419 } 420 421 assert(CGC_lock != NULL, "CGC_lock must be initialized"); 422 423 if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) { 424 // Calculate the number of concurrent worker threads by scaling 425 // the number of parallel GC threads. 426 uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads); 427 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 428 } 429 430 assert(ConcGCThreads > 0, "ConcGCThreads have been set."); 431 if (ConcGCThreads > ParallelGCThreads) { 432 log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).", 433 ConcGCThreads, ParallelGCThreads); 434 return; 435 } 436 437 log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset); 438 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 439 440 _num_concurrent_workers = ConcGCThreads; 441 _max_concurrent_workers = _num_concurrent_workers; 442 443 _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true); 444 _concurrent_workers->initialize_workers(); 445 446 if (FLAG_IS_DEFAULT(MarkStackSize)) { 447 size_t mark_stack_size = 448 MIN2(MarkStackSizeMax, 449 MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE))); 450 // Verify that the calculated value for MarkStackSize is in range. 451 // It would be nice to use the private utility routine from Arguments. 452 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 453 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 454 "must be between 1 and " SIZE_FORMAT, 455 mark_stack_size, MarkStackSizeMax); 456 return; 457 } 458 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 459 } else { 460 // Verify MarkStackSize is in range. 461 if (FLAG_IS_CMDLINE(MarkStackSize)) { 462 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 463 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 464 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 465 "must be between 1 and " SIZE_FORMAT, 466 MarkStackSize, MarkStackSizeMax); 467 return; 468 } 469 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 470 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 471 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 472 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 473 MarkStackSize, MarkStackSizeMax); 474 return; 475 } 476 } 477 } 478 } 479 480 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { 481 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 482 } 483 484 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC); 485 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC); 486 487 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 488 _num_active_tasks = _max_num_tasks; 489 490 for (uint i = 0; i < _max_num_tasks; ++i) { 491 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 492 task_queue->initialize(); 493 _task_queues->register_queue(i, task_queue); 494 495 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions()); 496 497 _accum_task_vtime[i] = 0.0; 498 } 499 500 reset_at_marking_complete(); 501 _completed_initialization = true; 502 } 503 504 void G1ConcurrentMark::reset() { 505 _has_aborted = false; 506 507 reset_marking_for_restart(); 508 509 // Reset all tasks, since different phases will use different number of active 510 // threads. So, it's easiest to have all of them ready. 511 for (uint i = 0; i < _max_num_tasks; ++i) { 512 _tasks[i]->reset(_next_mark_bitmap); 513 } 514 515 uint max_regions = _g1h->max_regions(); 516 for (uint i = 0; i < max_regions; i++) { 517 _top_at_rebuild_starts[i] = NULL; 518 _region_mark_stats[i].clear(); 519 } 520 } 521 522 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) { 523 for (uint j = 0; j < _max_num_tasks; ++j) { 524 _tasks[j]->clear_mark_stats_cache(region_idx); 525 } 526 _top_at_rebuild_starts[region_idx] = NULL; 527 _region_mark_stats[region_idx].clear(); 528 } 529 530 void G1ConcurrentMark::clear_statistics(HeapRegion* r) { 531 uint const region_idx = r->hrm_index(); 532 if (r->is_humongous()) { 533 assert(r->is_starts_humongous(), "Got humongous continues region here"); 534 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size()); 535 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) { 536 clear_statistics_in_region(j); 537 } 538 } else { 539 clear_statistics_in_region(region_idx); 540 } 541 } 542 543 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) { 544 if (bitmap->is_marked(addr)) { 545 bitmap->clear(addr); 546 } 547 } 548 549 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) { 550 assert_at_safepoint_on_vm_thread(); 551 552 // Need to clear all mark bits of the humongous object. 553 clear_mark_if_set(_prev_mark_bitmap, r->bottom()); 554 clear_mark_if_set(_next_mark_bitmap, r->bottom()); 555 556 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 557 return; 558 } 559 560 // Clear any statistics about the region gathered so far. 561 clear_statistics(r); 562 } 563 564 void G1ConcurrentMark::reset_marking_for_restart() { 565 _global_mark_stack.set_empty(); 566 567 // Expand the marking stack, if we have to and if we can. 568 if (has_overflown()) { 569 _global_mark_stack.expand(); 570 571 uint max_regions = _g1h->max_regions(); 572 for (uint i = 0; i < max_regions; i++) { 573 _region_mark_stats[i].clear_during_overflow(); 574 } 575 } 576 577 clear_has_overflown(); 578 _finger = _heap.start(); 579 580 for (uint i = 0; i < _max_num_tasks; ++i) { 581 G1CMTaskQueue* queue = _task_queues->queue(i); 582 queue->set_empty(); 583 } 584 } 585 586 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 587 assert(active_tasks <= _max_num_tasks, "we should not have more"); 588 589 _num_active_tasks = active_tasks; 590 // Need to update the three data structures below according to the 591 // number of active threads for this phase. 592 _terminator.terminator()->reset_for_reuse((int) active_tasks); 593 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 594 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 595 } 596 597 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 598 set_concurrency(active_tasks); 599 600 _concurrent = concurrent; 601 602 if (!concurrent) { 603 // At this point we should be in a STW phase, and completed marking. 604 assert_at_safepoint_on_vm_thread(); 605 assert(out_of_regions(), 606 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 607 p2i(_finger), p2i(_heap.end())); 608 } 609 } 610 611 void G1ConcurrentMark::reset_at_marking_complete() { 612 // We set the global marking state to some default values when we're 613 // not doing marking. 614 reset_marking_for_restart(); 615 _num_active_tasks = 0; 616 } 617 618 G1ConcurrentMark::~G1ConcurrentMark() { 619 FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts); 620 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats); 621 // The G1ConcurrentMark instance is never freed. 622 ShouldNotReachHere(); 623 } 624 625 class G1ClearBitMapTask : public AbstractGangTask { 626 public: 627 static size_t chunk_size() { return M; } 628 629 private: 630 // Heap region closure used for clearing the given mark bitmap. 631 class G1ClearBitmapHRClosure : public HeapRegionClosure { 632 private: 633 G1CMBitMap* _bitmap; 634 G1ConcurrentMark* _cm; 635 public: 636 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) { 637 } 638 639 virtual bool do_heap_region(HeapRegion* r) { 640 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 641 642 HeapWord* cur = r->bottom(); 643 HeapWord* const end = r->end(); 644 645 while (cur < end) { 646 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 647 _bitmap->clear_range(mr); 648 649 cur += chunk_size_in_words; 650 651 // Abort iteration if after yielding the marking has been aborted. 652 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 653 return true; 654 } 655 // Repeat the asserts from before the start of the closure. We will do them 656 // as asserts here to minimize their overhead on the product. However, we 657 // will have them as guarantees at the beginning / end of the bitmap 658 // clearing to get some checking in the product. 659 assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant"); 660 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 661 } 662 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 663 664 return false; 665 } 666 }; 667 668 G1ClearBitmapHRClosure _cl; 669 HeapRegionClaimer _hr_claimer; 670 bool _suspendible; // If the task is suspendible, workers must join the STS. 671 672 public: 673 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 674 AbstractGangTask("G1 Clear Bitmap"), 675 _cl(bitmap, suspendible ? cm : NULL), 676 _hr_claimer(n_workers), 677 _suspendible(suspendible) 678 { } 679 680 void work(uint worker_id) { 681 SuspendibleThreadSetJoiner sts_join(_suspendible); 682 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id); 683 } 684 685 bool is_complete() { 686 return _cl.is_complete(); 687 } 688 }; 689 690 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 691 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 692 693 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 694 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 695 696 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 697 698 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 699 700 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 701 workers->run_task(&cl, num_workers); 702 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 703 } 704 705 void G1ConcurrentMark::cleanup_for_next_mark() { 706 // Make sure that the concurrent mark thread looks to still be in 707 // the current cycle. 708 guarantee(cm_thread()->during_cycle(), "invariant"); 709 710 // We are finishing up the current cycle by clearing the next 711 // marking bitmap and getting it ready for the next cycle. During 712 // this time no other cycle can start. So, let's make sure that this 713 // is the case. 714 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 715 716 clear_bitmap(_next_mark_bitmap, _concurrent_workers, true); 717 718 // Repeat the asserts from above. 719 guarantee(cm_thread()->during_cycle(), "invariant"); 720 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 721 } 722 723 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 724 assert_at_safepoint_on_vm_thread(); 725 clear_bitmap(_prev_mark_bitmap, workers, false); 726 } 727 728 class NoteStartOfMarkHRClosure : public HeapRegionClosure { 729 public: 730 bool do_heap_region(HeapRegion* r) { 731 r->note_start_of_marking(); 732 return false; 733 } 734 }; 735 736 void G1ConcurrentMark::pre_initial_mark() { 737 // Initialize marking structures. This has to be done in a STW phase. 738 reset(); 739 740 // For each region note start of marking. 741 NoteStartOfMarkHRClosure startcl; 742 _g1h->heap_region_iterate(&startcl); 743 744 _root_regions.reset(); 745 } 746 747 748 void G1ConcurrentMark::post_initial_mark() { 749 // Start Concurrent Marking weak-reference discovery. 750 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 751 // enable ("weak") refs discovery 752 rp->enable_discovery(); 753 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 754 755 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 756 // This is the start of the marking cycle, we're expected all 757 // threads to have SATB queues with active set to false. 758 satb_mq_set.set_active_all_threads(true, /* new active value */ 759 false /* expected_active */); 760 761 _root_regions.prepare_for_scan(); 762 763 // update_g1_committed() will be called at the end of an evac pause 764 // when marking is on. So, it's also called at the end of the 765 // initial-mark pause to update the heap end, if the heap expands 766 // during it. No need to call it here. 767 } 768 769 /* 770 * Notice that in the next two methods, we actually leave the STS 771 * during the barrier sync and join it immediately afterwards. If we 772 * do not do this, the following deadlock can occur: one thread could 773 * be in the barrier sync code, waiting for the other thread to also 774 * sync up, whereas another one could be trying to yield, while also 775 * waiting for the other threads to sync up too. 776 * 777 * Note, however, that this code is also used during remark and in 778 * this case we should not attempt to leave / enter the STS, otherwise 779 * we'll either hit an assert (debug / fastdebug) or deadlock 780 * (product). So we should only leave / enter the STS if we are 781 * operating concurrently. 782 * 783 * Because the thread that does the sync barrier has left the STS, it 784 * is possible to be suspended for a Full GC or an evacuation pause 785 * could occur. This is actually safe, since the entering the sync 786 * barrier is one of the last things do_marking_step() does, and it 787 * doesn't manipulate any data structures afterwards. 788 */ 789 790 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 791 bool barrier_aborted; 792 { 793 SuspendibleThreadSetLeaver sts_leave(concurrent()); 794 barrier_aborted = !_first_overflow_barrier_sync.enter(); 795 } 796 797 // at this point everyone should have synced up and not be doing any 798 // more work 799 800 if (barrier_aborted) { 801 // If the barrier aborted we ignore the overflow condition and 802 // just abort the whole marking phase as quickly as possible. 803 return; 804 } 805 } 806 807 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 808 SuspendibleThreadSetLeaver sts_leave(concurrent()); 809 _second_overflow_barrier_sync.enter(); 810 811 // at this point everything should be re-initialized and ready to go 812 } 813 814 class G1CMConcurrentMarkingTask : public AbstractGangTask { 815 G1ConcurrentMark* _cm; 816 817 public: 818 void work(uint worker_id) { 819 assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread"); 820 ResourceMark rm; 821 822 double start_vtime = os::elapsedVTime(); 823 824 { 825 SuspendibleThreadSetJoiner sts_join; 826 827 assert(worker_id < _cm->active_tasks(), "invariant"); 828 829 G1CMTask* task = _cm->task(worker_id); 830 task->record_start_time(); 831 if (!_cm->has_aborted()) { 832 do { 833 task->do_marking_step(G1ConcMarkStepDurationMillis, 834 true /* do_termination */, 835 false /* is_serial*/); 836 837 _cm->do_yield_check(); 838 } while (!_cm->has_aborted() && task->has_aborted()); 839 } 840 task->record_end_time(); 841 guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant"); 842 } 843 844 double end_vtime = os::elapsedVTime(); 845 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 846 } 847 848 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) : 849 AbstractGangTask("Concurrent Mark"), _cm(cm) { } 850 851 ~G1CMConcurrentMarkingTask() { } 852 }; 853 854 uint G1ConcurrentMark::calc_active_marking_workers() { 855 uint result = 0; 856 if (!UseDynamicNumberOfGCThreads || 857 (!FLAG_IS_DEFAULT(ConcGCThreads) && 858 !ForceDynamicNumberOfGCThreads)) { 859 result = _max_concurrent_workers; 860 } else { 861 result = 862 WorkerPolicy::calc_default_active_workers(_max_concurrent_workers, 863 1, /* Minimum workers */ 864 _num_concurrent_workers, 865 Threads::number_of_non_daemon_threads()); 866 // Don't scale the result down by scale_concurrent_workers() because 867 // that scaling has already gone into "_max_concurrent_workers". 868 } 869 assert(result > 0 && result <= _max_concurrent_workers, 870 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u", 871 _max_concurrent_workers, result); 872 return result; 873 } 874 875 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) { 876 assert(hr->is_old() || (hr->is_survivor() && hr->next_top_at_mark_start() == hr->bottom()), 877 "Root regions must be old or survivor but region %u is %s", hr->hrm_index(), hr->get_type_str()); 878 G1RootRegionScanClosure cl(_g1h, this, worker_id); 879 880 const uintx interval = PrefetchScanIntervalInBytes; 881 HeapWord* curr = hr->next_top_at_mark_start(); 882 const HeapWord* end = hr->top(); 883 while (curr < end) { 884 Prefetch::read(curr, interval); 885 oop obj = oop(curr); 886 int size = obj->oop_iterate_size(&cl); 887 assert(size == obj->size(), "sanity"); 888 curr += size; 889 } 890 } 891 892 class G1CMRootRegionScanTask : public AbstractGangTask { 893 G1ConcurrentMark* _cm; 894 public: 895 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 896 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 897 898 void work(uint worker_id) { 899 assert(Thread::current()->is_ConcurrentGC_thread(), 900 "this should only be done by a conc GC thread"); 901 902 G1CMRootRegions* root_regions = _cm->root_regions(); 903 HeapRegion* hr = root_regions->claim_next(); 904 while (hr != NULL) { 905 _cm->scan_root_region(hr, worker_id); 906 hr = root_regions->claim_next(); 907 } 908 } 909 }; 910 911 void G1ConcurrentMark::scan_root_regions() { 912 // scan_in_progress() will have been set to true only if there was 913 // at least one root region to scan. So, if it's false, we 914 // should not attempt to do any further work. 915 if (root_regions()->scan_in_progress()) { 916 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 917 918 _num_concurrent_workers = MIN2(calc_active_marking_workers(), 919 // We distribute work on a per-region basis, so starting 920 // more threads than that is useless. 921 root_regions()->num_root_regions()); 922 assert(_num_concurrent_workers <= _max_concurrent_workers, 923 "Maximum number of marking threads exceeded"); 924 925 G1CMRootRegionScanTask task(this); 926 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 927 task.name(), _num_concurrent_workers, root_regions()->num_root_regions()); 928 _concurrent_workers->run_task(&task, _num_concurrent_workers); 929 930 // It's possible that has_aborted() is true here without actually 931 // aborting the survivor scan earlier. This is OK as it's 932 // mainly used for sanity checking. 933 root_regions()->scan_finished(); 934 } 935 } 936 937 void G1ConcurrentMark::concurrent_cycle_start() { 938 _gc_timer_cm->register_gc_start(); 939 940 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 941 942 _g1h->trace_heap_before_gc(_gc_tracer_cm); 943 } 944 945 void G1ConcurrentMark::concurrent_cycle_end() { 946 _g1h->collector_state()->set_clearing_next_bitmap(false); 947 948 _g1h->trace_heap_after_gc(_gc_tracer_cm); 949 950 if (has_aborted()) { 951 log_info(gc, marking)("Concurrent Mark Abort"); 952 _gc_tracer_cm->report_concurrent_mode_failure(); 953 } 954 955 _gc_timer_cm->register_gc_end(); 956 957 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 958 } 959 960 void G1ConcurrentMark::mark_from_roots() { 961 _restart_for_overflow = false; 962 963 _num_concurrent_workers = calc_active_marking_workers(); 964 965 uint active_workers = MAX2(1U, _num_concurrent_workers); 966 967 // Setting active workers is not guaranteed since fewer 968 // worker threads may currently exist and more may not be 969 // available. 970 active_workers = _concurrent_workers->update_active_workers(active_workers); 971 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers()); 972 973 // Parallel task terminator is set in "set_concurrency_and_phase()" 974 set_concurrency_and_phase(active_workers, true /* concurrent */); 975 976 G1CMConcurrentMarkingTask marking_task(this); 977 _concurrent_workers->run_task(&marking_task); 978 print_stats(); 979 } 980 981 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) { 982 G1HeapVerifier* verifier = _g1h->verifier(); 983 984 verifier->verify_region_sets_optional(); 985 986 if (VerifyDuringGC) { 987 GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm); 988 989 size_t const BufLen = 512; 990 char buffer[BufLen]; 991 992 jio_snprintf(buffer, BufLen, "During GC (%s)", caller); 993 verifier->verify(type, vo, buffer); 994 } 995 996 verifier->check_bitmaps(caller); 997 } 998 999 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask { 1000 G1CollectedHeap* _g1h; 1001 G1ConcurrentMark* _cm; 1002 HeapRegionClaimer _hrclaimer; 1003 uint volatile _total_selected_for_rebuild; 1004 1005 G1PrintRegionLivenessInfoClosure _cl; 1006 1007 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure { 1008 G1CollectedHeap* _g1h; 1009 G1ConcurrentMark* _cm; 1010 1011 G1PrintRegionLivenessInfoClosure* _cl; 1012 1013 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild. 1014 1015 void update_remset_before_rebuild(HeapRegion* hr) { 1016 G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker(); 1017 1018 bool selected_for_rebuild; 1019 if (hr->is_humongous()) { 1020 bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0; 1021 selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live); 1022 } else { 1023 size_t const live_bytes = _cm->liveness(hr->hrm_index()); 1024 selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes); 1025 } 1026 if (selected_for_rebuild) { 1027 _num_regions_selected_for_rebuild++; 1028 } 1029 _cm->update_top_at_rebuild_start(hr); 1030 } 1031 1032 // Distribute the given words across the humongous object starting with hr and 1033 // note end of marking. 1034 void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) { 1035 uint const region_idx = hr->hrm_index(); 1036 size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size(); 1037 uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words); 1038 1039 // "Distributing" zero words means that we only note end of marking for these 1040 // regions. 1041 assert(marked_words == 0 || obj_size_in_words == marked_words, 1042 "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT, 1043 obj_size_in_words, marked_words); 1044 1045 for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) { 1046 HeapRegion* const r = _g1h->region_at(i); 1047 size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words); 1048 1049 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)", 1050 words_to_add, i, r->get_type_str()); 1051 add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize); 1052 marked_words -= words_to_add; 1053 } 1054 assert(marked_words == 0, 1055 SIZE_FORMAT " words left after distributing space across %u regions", 1056 marked_words, num_regions_in_humongous); 1057 } 1058 1059 void update_marked_bytes(HeapRegion* hr) { 1060 uint const region_idx = hr->hrm_index(); 1061 size_t const marked_words = _cm->liveness(region_idx); 1062 // The marking attributes the object's size completely to the humongous starts 1063 // region. We need to distribute this value across the entire set of regions a 1064 // humongous object spans. 1065 if (hr->is_humongous()) { 1066 assert(hr->is_starts_humongous() || marked_words == 0, 1067 "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)", 1068 marked_words, region_idx, hr->get_type_str()); 1069 if (hr->is_starts_humongous()) { 1070 distribute_marked_bytes(hr, marked_words); 1071 } 1072 } else { 1073 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str()); 1074 add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize); 1075 } 1076 } 1077 1078 void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) { 1079 hr->add_to_marked_bytes(marked_bytes); 1080 _cl->do_heap_region(hr); 1081 hr->note_end_of_marking(); 1082 } 1083 1084 public: 1085 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) : 1086 _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { } 1087 1088 virtual bool do_heap_region(HeapRegion* r) { 1089 update_remset_before_rebuild(r); 1090 update_marked_bytes(r); 1091 1092 return false; 1093 } 1094 1095 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; } 1096 }; 1097 1098 public: 1099 G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) : 1100 AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"), 1101 _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { } 1102 1103 virtual void work(uint worker_id) { 1104 G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl); 1105 _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id); 1106 Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild); 1107 } 1108 1109 uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; } 1110 1111 // Number of regions for which roughly one thread should be spawned for this work. 1112 static const uint RegionsPerThread = 384; 1113 }; 1114 1115 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure { 1116 G1CollectedHeap* _g1h; 1117 public: 1118 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { } 1119 1120 virtual bool do_heap_region(HeapRegion* r) { 1121 _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r); 1122 return false; 1123 } 1124 }; 1125 1126 void G1ConcurrentMark::remark() { 1127 assert_at_safepoint_on_vm_thread(); 1128 1129 // If a full collection has happened, we should not continue. However we might 1130 // have ended up here as the Remark VM operation has been scheduled already. 1131 if (has_aborted()) { 1132 return; 1133 } 1134 1135 G1Policy* g1p = _g1h->g1_policy(); 1136 g1p->record_concurrent_mark_remark_start(); 1137 1138 double start = os::elapsedTime(); 1139 1140 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before"); 1141 1142 { 1143 GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm); 1144 finalize_marking(); 1145 } 1146 1147 double mark_work_end = os::elapsedTime(); 1148 1149 bool const mark_finished = !has_overflown(); 1150 if (mark_finished) { 1151 weak_refs_work(false /* clear_all_soft_refs */); 1152 1153 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1154 // We're done with marking. 1155 // This is the end of the marking cycle, we're expected all 1156 // threads to have SATB queues with active set to true. 1157 satb_mq_set.set_active_all_threads(false, /* new active value */ 1158 true /* expected_active */); 1159 1160 { 1161 GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm); 1162 flush_all_task_caches(); 1163 } 1164 1165 // Install newly created mark bitmap as "prev". 1166 swap_mark_bitmaps(); 1167 { 1168 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm); 1169 1170 uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) / 1171 G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread; 1172 uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity); 1173 1174 G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers); 1175 log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions()); 1176 _g1h->workers()->run_task(&cl, num_workers); 1177 1178 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u", 1179 _g1h->num_regions(), cl.total_selected_for_rebuild()); 1180 } 1181 { 1182 GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm); 1183 reclaim_empty_regions(); 1184 } 1185 1186 // Clean out dead classes 1187 if (ClassUnloadingWithConcurrentMark) { 1188 GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm); 1189 ClassLoaderDataGraph::purge(); 1190 } 1191 1192 _g1h->resize_heap_if_necessary(); 1193 1194 compute_new_sizes(); 1195 1196 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after"); 1197 1198 assert(!restart_for_overflow(), "sanity"); 1199 // Completely reset the marking state since marking completed 1200 reset_at_marking_complete(); 1201 } else { 1202 // We overflowed. Restart concurrent marking. 1203 _restart_for_overflow = true; 1204 1205 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow"); 1206 1207 // Clear the marking state because we will be restarting 1208 // marking due to overflowing the global mark stack. 1209 reset_marking_for_restart(); 1210 } 1211 1212 { 1213 GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm); 1214 report_object_count(mark_finished); 1215 } 1216 1217 // Statistics 1218 double now = os::elapsedTime(); 1219 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1220 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1221 _remark_times.add((now - start) * 1000.0); 1222 1223 g1p->record_concurrent_mark_remark_end(); 1224 } 1225 1226 class G1ReclaimEmptyRegionsTask : public AbstractGangTask { 1227 // Per-region work during the Cleanup pause. 1228 class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure { 1229 G1CollectedHeap* _g1h; 1230 size_t _freed_bytes; 1231 FreeRegionList* _local_cleanup_list; 1232 uint _old_regions_removed; 1233 uint _humongous_regions_removed; 1234 1235 public: 1236 G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h, 1237 FreeRegionList* local_cleanup_list) : 1238 _g1h(g1h), 1239 _freed_bytes(0), 1240 _local_cleanup_list(local_cleanup_list), 1241 _old_regions_removed(0), 1242 _humongous_regions_removed(0) { } 1243 1244 size_t freed_bytes() { return _freed_bytes; } 1245 const uint old_regions_removed() { return _old_regions_removed; } 1246 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1247 1248 bool do_heap_region(HeapRegion *hr) { 1249 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) { 1250 _freed_bytes += hr->used(); 1251 hr->set_containing_set(NULL); 1252 if (hr->is_humongous()) { 1253 _humongous_regions_removed++; 1254 _g1h->free_humongous_region(hr, _local_cleanup_list); 1255 } else { 1256 _old_regions_removed++; 1257 _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */); 1258 } 1259 hr->clear_cardtable(); 1260 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index()); 1261 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom())); 1262 } 1263 1264 return false; 1265 } 1266 }; 1267 1268 G1CollectedHeap* _g1h; 1269 FreeRegionList* _cleanup_list; 1270 HeapRegionClaimer _hrclaimer; 1271 1272 public: 1273 G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1274 AbstractGangTask("G1 Cleanup"), 1275 _g1h(g1h), 1276 _cleanup_list(cleanup_list), 1277 _hrclaimer(n_workers) { 1278 } 1279 1280 void work(uint worker_id) { 1281 FreeRegionList local_cleanup_list("Local Cleanup List"); 1282 G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list); 1283 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id); 1284 assert(cl.is_complete(), "Shouldn't have aborted!"); 1285 1286 // Now update the old/humongous region sets 1287 _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed()); 1288 { 1289 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1290 _g1h->decrement_summary_bytes(cl.freed_bytes()); 1291 1292 _cleanup_list->add_ordered(&local_cleanup_list); 1293 assert(local_cleanup_list.is_empty(), "post-condition"); 1294 } 1295 } 1296 }; 1297 1298 void G1ConcurrentMark::reclaim_empty_regions() { 1299 WorkGang* workers = _g1h->workers(); 1300 FreeRegionList empty_regions_list("Empty Regions After Mark List"); 1301 1302 G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers()); 1303 workers->run_task(&cl); 1304 1305 if (!empty_regions_list.is_empty()) { 1306 log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length()); 1307 // Now print the empty regions list. 1308 G1HRPrinter* hrp = _g1h->hr_printer(); 1309 if (hrp->is_active()) { 1310 FreeRegionListIterator iter(&empty_regions_list); 1311 while (iter.more_available()) { 1312 HeapRegion* hr = iter.get_next(); 1313 hrp->cleanup(hr); 1314 } 1315 } 1316 // And actually make them available. 1317 _g1h->prepend_to_freelist(&empty_regions_list); 1318 } 1319 } 1320 1321 void G1ConcurrentMark::compute_new_sizes() { 1322 MetaspaceGC::compute_new_size(); 1323 1324 // Cleanup will have freed any regions completely full of garbage. 1325 // Update the soft reference policy with the new heap occupancy. 1326 Universe::update_heap_info_at_gc(); 1327 1328 // We reclaimed old regions so we should calculate the sizes to make 1329 // sure we update the old gen/space data. 1330 _g1h->g1mm()->update_sizes(); 1331 } 1332 1333 void G1ConcurrentMark::cleanup() { 1334 assert_at_safepoint_on_vm_thread(); 1335 1336 // If a full collection has happened, we shouldn't do this. 1337 if (has_aborted()) { 1338 return; 1339 } 1340 1341 G1Policy* g1p = _g1h->g1_policy(); 1342 g1p->record_concurrent_mark_cleanup_start(); 1343 1344 double start = os::elapsedTime(); 1345 1346 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before"); 1347 1348 { 1349 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm); 1350 G1UpdateRemSetTrackingAfterRebuild cl(_g1h); 1351 _g1h->heap_region_iterate(&cl); 1352 } 1353 1354 if (log_is_enabled(Trace, gc, liveness)) { 1355 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup"); 1356 _g1h->heap_region_iterate(&cl); 1357 } 1358 1359 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after"); 1360 1361 // We need to make this be a "collection" so any collection pause that 1362 // races with it goes around and waits for Cleanup to finish. 1363 _g1h->increment_total_collections(); 1364 1365 // Local statistics 1366 double recent_cleanup_time = (os::elapsedTime() - start); 1367 _total_cleanup_time += recent_cleanup_time; 1368 _cleanup_times.add(recent_cleanup_time); 1369 1370 { 1371 GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm); 1372 _g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1373 } 1374 } 1375 1376 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1377 // Uses the G1CMTask associated with a worker thread (for serial reference 1378 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1379 // trace referent objects. 1380 // 1381 // Using the G1CMTask and embedded local queues avoids having the worker 1382 // threads operating on the global mark stack. This reduces the risk 1383 // of overflowing the stack - which we would rather avoid at this late 1384 // state. Also using the tasks' local queues removes the potential 1385 // of the workers interfering with each other that could occur if 1386 // operating on the global stack. 1387 1388 class G1CMKeepAliveAndDrainClosure : public OopClosure { 1389 G1ConcurrentMark* _cm; 1390 G1CMTask* _task; 1391 uint _ref_counter_limit; 1392 uint _ref_counter; 1393 bool _is_serial; 1394 public: 1395 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1396 _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval), 1397 _ref_counter(_ref_counter_limit), _is_serial(is_serial) { 1398 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1399 } 1400 1401 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1402 virtual void do_oop( oop* p) { do_oop_work(p); } 1403 1404 template <class T> void do_oop_work(T* p) { 1405 if (_cm->has_overflown()) { 1406 return; 1407 } 1408 if (!_task->deal_with_reference(p)) { 1409 // We did not add anything to the mark bitmap (or mark stack), so there is 1410 // no point trying to drain it. 1411 return; 1412 } 1413 _ref_counter--; 1414 1415 if (_ref_counter == 0) { 1416 // We have dealt with _ref_counter_limit references, pushing them 1417 // and objects reachable from them on to the local stack (and 1418 // possibly the global stack). Call G1CMTask::do_marking_step() to 1419 // process these entries. 1420 // 1421 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1422 // there's nothing more to do (i.e. we're done with the entries that 1423 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1424 // above) or we overflow. 1425 // 1426 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1427 // flag while there may still be some work to do. (See the comment at 1428 // the beginning of G1CMTask::do_marking_step() for those conditions - 1429 // one of which is reaching the specified time target.) It is only 1430 // when G1CMTask::do_marking_step() returns without setting the 1431 // has_aborted() flag that the marking step has completed. 1432 do { 1433 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1434 _task->do_marking_step(mark_step_duration_ms, 1435 false /* do_termination */, 1436 _is_serial); 1437 } while (_task->has_aborted() && !_cm->has_overflown()); 1438 _ref_counter = _ref_counter_limit; 1439 } 1440 } 1441 }; 1442 1443 // 'Drain' oop closure used by both serial and parallel reference processing. 1444 // Uses the G1CMTask associated with a given worker thread (for serial 1445 // reference processing the G1CMtask for worker 0 is used). Calls the 1446 // do_marking_step routine, with an unbelievably large timeout value, 1447 // to drain the marking data structures of the remaining entries 1448 // added by the 'keep alive' oop closure above. 1449 1450 class G1CMDrainMarkingStackClosure : public VoidClosure { 1451 G1ConcurrentMark* _cm; 1452 G1CMTask* _task; 1453 bool _is_serial; 1454 public: 1455 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1456 _cm(cm), _task(task), _is_serial(is_serial) { 1457 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1458 } 1459 1460 void do_void() { 1461 do { 1462 // We call G1CMTask::do_marking_step() to completely drain the local 1463 // and global marking stacks of entries pushed by the 'keep alive' 1464 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1465 // 1466 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1467 // if there's nothing more to do (i.e. we've completely drained the 1468 // entries that were pushed as a a result of applying the 'keep alive' 1469 // closure to the entries on the discovered ref lists) or we overflow 1470 // the global marking stack. 1471 // 1472 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1473 // flag while there may still be some work to do. (See the comment at 1474 // the beginning of G1CMTask::do_marking_step() for those conditions - 1475 // one of which is reaching the specified time target.) It is only 1476 // when G1CMTask::do_marking_step() returns without setting the 1477 // has_aborted() flag that the marking step has completed. 1478 1479 _task->do_marking_step(1000000000.0 /* something very large */, 1480 true /* do_termination */, 1481 _is_serial); 1482 } while (_task->has_aborted() && !_cm->has_overflown()); 1483 } 1484 }; 1485 1486 // Implementation of AbstractRefProcTaskExecutor for parallel 1487 // reference processing at the end of G1 concurrent marking 1488 1489 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 1490 private: 1491 G1CollectedHeap* _g1h; 1492 G1ConcurrentMark* _cm; 1493 WorkGang* _workers; 1494 uint _active_workers; 1495 1496 public: 1497 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1498 G1ConcurrentMark* cm, 1499 WorkGang* workers, 1500 uint n_workers) : 1501 _g1h(g1h), _cm(cm), 1502 _workers(workers), _active_workers(n_workers) { } 1503 1504 virtual void execute(ProcessTask& task, uint ergo_workers); 1505 }; 1506 1507 class G1CMRefProcTaskProxy : public AbstractGangTask { 1508 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1509 ProcessTask& _proc_task; 1510 G1CollectedHeap* _g1h; 1511 G1ConcurrentMark* _cm; 1512 1513 public: 1514 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1515 G1CollectedHeap* g1h, 1516 G1ConcurrentMark* cm) : 1517 AbstractGangTask("Process reference objects in parallel"), 1518 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1519 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1520 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1521 } 1522 1523 virtual void work(uint worker_id) { 1524 ResourceMark rm; 1525 HandleMark hm; 1526 G1CMTask* task = _cm->task(worker_id); 1527 G1CMIsAliveClosure g1_is_alive(_g1h); 1528 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1529 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1530 1531 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1532 } 1533 }; 1534 1535 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) { 1536 assert(_workers != NULL, "Need parallel worker threads."); 1537 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1538 assert(_workers->active_workers() >= ergo_workers, 1539 "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)", 1540 ergo_workers, _workers->active_workers()); 1541 1542 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1543 1544 // We need to reset the concurrency level before each 1545 // proxy task execution, so that the termination protocol 1546 // and overflow handling in G1CMTask::do_marking_step() knows 1547 // how many workers to wait for. 1548 _cm->set_concurrency(ergo_workers); 1549 _workers->run_task(&proc_task_proxy, ergo_workers); 1550 } 1551 1552 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { 1553 ResourceMark rm; 1554 HandleMark hm; 1555 1556 // Is alive closure. 1557 G1CMIsAliveClosure g1_is_alive(_g1h); 1558 1559 // Inner scope to exclude the cleaning of the string table 1560 // from the displayed time. 1561 { 1562 GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm); 1563 1564 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1565 1566 // See the comment in G1CollectedHeap::ref_processing_init() 1567 // about how reference processing currently works in G1. 1568 1569 // Set the soft reference policy 1570 rp->setup_policy(clear_all_soft_refs); 1571 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1572 1573 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1574 // in serial reference processing. Note these closures are also 1575 // used for serially processing (by the the current thread) the 1576 // JNI references during parallel reference processing. 1577 // 1578 // These closures do not need to synchronize with the worker 1579 // threads involved in parallel reference processing as these 1580 // instances are executed serially by the current thread (e.g. 1581 // reference processing is not multi-threaded and is thus 1582 // performed by the current thread instead of a gang worker). 1583 // 1584 // The gang tasks involved in parallel reference processing create 1585 // their own instances of these closures, which do their own 1586 // synchronization among themselves. 1587 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1588 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1589 1590 // We need at least one active thread. If reference processing 1591 // is not multi-threaded we use the current (VMThread) thread, 1592 // otherwise we use the work gang from the G1CollectedHeap and 1593 // we utilize all the worker threads we can. 1594 bool processing_is_mt = rp->processing_is_mt(); 1595 uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U); 1596 active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U); 1597 1598 // Parallel processing task executor. 1599 G1CMRefProcTaskExecutor par_task_executor(_g1h, this, 1600 _g1h->workers(), active_workers); 1601 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1602 1603 // Set the concurrency level. The phase was already set prior to 1604 // executing the remark task. 1605 set_concurrency(active_workers); 1606 1607 // Set the degree of MT processing here. If the discovery was done MT, 1608 // the number of threads involved during discovery could differ from 1609 // the number of active workers. This is OK as long as the discovered 1610 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1611 rp->set_active_mt_degree(active_workers); 1612 1613 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues()); 1614 1615 // Process the weak references. 1616 const ReferenceProcessorStats& stats = 1617 rp->process_discovered_references(&g1_is_alive, 1618 &g1_keep_alive, 1619 &g1_drain_mark_stack, 1620 executor, 1621 &pt); 1622 _gc_tracer_cm->report_gc_reference_stats(stats); 1623 pt.print_all_references(); 1624 1625 // The do_oop work routines of the keep_alive and drain_marking_stack 1626 // oop closures will set the has_overflown flag if we overflow the 1627 // global marking stack. 1628 1629 assert(has_overflown() || _global_mark_stack.is_empty(), 1630 "Mark stack should be empty (unless it has overflown)"); 1631 1632 assert(rp->num_queues() == active_workers, "why not"); 1633 1634 rp->verify_no_references_recorded(); 1635 assert(!rp->discovery_enabled(), "Post condition"); 1636 } 1637 1638 if (has_overflown()) { 1639 // We can not trust g1_is_alive and the contents of the heap if the marking stack 1640 // overflowed while processing references. Exit the VM. 1641 fatal("Overflow during reference processing, can not continue. Please " 1642 "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and " 1643 "restart.", MarkStackSizeMax); 1644 return; 1645 } 1646 1647 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1648 1649 { 1650 GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm); 1651 WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1); 1652 } 1653 1654 // Unload Klasses, String, Code Cache, etc. 1655 if (ClassUnloadingWithConcurrentMark) { 1656 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); 1657 bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm); 1658 _g1h->complete_cleaning(&g1_is_alive, purged_classes); 1659 } else if (StringDedup::is_enabled()) { 1660 GCTraceTime(Debug, gc, phases) debug("String Deduplication", _gc_timer_cm); 1661 _g1h->string_dedup_cleaning(&g1_is_alive, NULL); 1662 } 1663 } 1664 1665 class G1PrecleanYieldClosure : public YieldClosure { 1666 G1ConcurrentMark* _cm; 1667 1668 public: 1669 G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { } 1670 1671 virtual bool should_return() { 1672 return _cm->has_aborted(); 1673 } 1674 1675 virtual bool should_return_fine_grain() { 1676 _cm->do_yield_check(); 1677 return _cm->has_aborted(); 1678 } 1679 }; 1680 1681 void G1ConcurrentMark::preclean() { 1682 assert(G1UseReferencePrecleaning, "Precleaning must be enabled."); 1683 1684 SuspendibleThreadSetJoiner joiner; 1685 1686 G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */); 1687 G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */); 1688 1689 set_concurrency_and_phase(1, true); 1690 1691 G1PrecleanYieldClosure yield_cl(this); 1692 1693 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1694 // Precleaning is single threaded. Temporarily disable MT discovery. 1695 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); 1696 rp->preclean_discovered_references(rp->is_alive_non_header(), 1697 &keep_alive, 1698 &drain_mark_stack, 1699 &yield_cl, 1700 _gc_timer_cm); 1701 } 1702 1703 // When sampling object counts, we already swapped the mark bitmaps, so we need to use 1704 // the prev bitmap determining liveness. 1705 class G1ObjectCountIsAliveClosure: public BoolObjectClosure { 1706 G1CollectedHeap* _g1h; 1707 public: 1708 G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { } 1709 1710 bool do_object_b(oop obj) { 1711 HeapWord* addr = (HeapWord*)obj; 1712 return addr != NULL && 1713 (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj)); 1714 } 1715 }; 1716 1717 void G1ConcurrentMark::report_object_count(bool mark_completed) { 1718 // Depending on the completion of the marking liveness needs to be determined 1719 // using either the next or prev bitmap. 1720 if (mark_completed) { 1721 G1ObjectCountIsAliveClosure is_alive(_g1h); 1722 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1723 } else { 1724 G1CMIsAliveClosure is_alive(_g1h); 1725 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1726 } 1727 } 1728 1729 1730 void G1ConcurrentMark::swap_mark_bitmaps() { 1731 G1CMBitMap* temp = _prev_mark_bitmap; 1732 _prev_mark_bitmap = _next_mark_bitmap; 1733 _next_mark_bitmap = temp; 1734 _g1h->collector_state()->set_clearing_next_bitmap(true); 1735 } 1736 1737 // Closure for marking entries in SATB buffers. 1738 class G1CMSATBBufferClosure : public SATBBufferClosure { 1739 private: 1740 G1CMTask* _task; 1741 G1CollectedHeap* _g1h; 1742 1743 // This is very similar to G1CMTask::deal_with_reference, but with 1744 // more relaxed requirements for the argument, so this must be more 1745 // circumspect about treating the argument as an object. 1746 void do_entry(void* entry) const { 1747 _task->increment_refs_reached(); 1748 oop const obj = static_cast<oop>(entry); 1749 _task->make_reference_grey(obj); 1750 } 1751 1752 public: 1753 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1754 : _task(task), _g1h(g1h) { } 1755 1756 virtual void do_buffer(void** buffer, size_t size) { 1757 for (size_t i = 0; i < size; ++i) { 1758 do_entry(buffer[i]); 1759 } 1760 } 1761 }; 1762 1763 class G1RemarkThreadsClosure : public ThreadClosure { 1764 G1CMSATBBufferClosure _cm_satb_cl; 1765 G1CMOopClosure _cm_cl; 1766 MarkingCodeBlobClosure _code_cl; 1767 int _thread_parity; 1768 1769 public: 1770 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1771 _cm_satb_cl(task, g1h), 1772 _cm_cl(g1h, task), 1773 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1774 _thread_parity(Threads::thread_claim_parity()) {} 1775 1776 void do_thread(Thread* thread) { 1777 if (thread->is_Java_thread()) { 1778 if (thread->claim_oops_do(true, _thread_parity)) { 1779 JavaThread* jt = (JavaThread*)thread; 1780 1781 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1782 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1783 // * Alive if on the stack of an executing method 1784 // * Weakly reachable otherwise 1785 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1786 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1787 jt->nmethods_do(&_code_cl); 1788 1789 G1ThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(&_cm_satb_cl); 1790 } 1791 } else if (thread->is_VM_thread()) { 1792 if (thread->claim_oops_do(true, _thread_parity)) { 1793 G1BarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1794 } 1795 } 1796 } 1797 }; 1798 1799 class G1CMRemarkTask : public AbstractGangTask { 1800 G1ConcurrentMark* _cm; 1801 public: 1802 void work(uint worker_id) { 1803 G1CMTask* task = _cm->task(worker_id); 1804 task->record_start_time(); 1805 { 1806 ResourceMark rm; 1807 HandleMark hm; 1808 1809 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1810 Threads::threads_do(&threads_f); 1811 } 1812 1813 do { 1814 task->do_marking_step(1000000000.0 /* something very large */, 1815 true /* do_termination */, 1816 false /* is_serial */); 1817 } while (task->has_aborted() && !_cm->has_overflown()); 1818 // If we overflow, then we do not want to restart. We instead 1819 // want to abort remark and do concurrent marking again. 1820 task->record_end_time(); 1821 } 1822 1823 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1824 AbstractGangTask("Par Remark"), _cm(cm) { 1825 _cm->terminator()->reset_for_reuse(active_workers); 1826 } 1827 }; 1828 1829 void G1ConcurrentMark::finalize_marking() { 1830 ResourceMark rm; 1831 HandleMark hm; 1832 1833 _g1h->ensure_parsability(false); 1834 1835 // this is remark, so we'll use up all active threads 1836 uint active_workers = _g1h->workers()->active_workers(); 1837 set_concurrency_and_phase(active_workers, false /* concurrent */); 1838 // Leave _parallel_marking_threads at it's 1839 // value originally calculated in the G1ConcurrentMark 1840 // constructor and pass values of the active workers 1841 // through the gang in the task. 1842 1843 { 1844 StrongRootsScope srs(active_workers); 1845 1846 G1CMRemarkTask remarkTask(this, active_workers); 1847 // We will start all available threads, even if we decide that the 1848 // active_workers will be fewer. The extra ones will just bail out 1849 // immediately. 1850 _g1h->workers()->run_task(&remarkTask); 1851 } 1852 1853 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1854 guarantee(has_overflown() || 1855 satb_mq_set.completed_buffers_num() == 0, 1856 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1857 BOOL_TO_STR(has_overflown()), 1858 satb_mq_set.completed_buffers_num()); 1859 1860 print_stats(); 1861 } 1862 1863 void G1ConcurrentMark::flush_all_task_caches() { 1864 size_t hits = 0; 1865 size_t misses = 0; 1866 for (uint i = 0; i < _max_num_tasks; i++) { 1867 Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache(); 1868 hits += stats.first; 1869 misses += stats.second; 1870 } 1871 size_t sum = hits + misses; 1872 log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf", 1873 hits, misses, percent_of(hits, sum)); 1874 } 1875 1876 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) { 1877 _prev_mark_bitmap->clear_range(mr); 1878 } 1879 1880 HeapRegion* 1881 G1ConcurrentMark::claim_region(uint worker_id) { 1882 // "checkpoint" the finger 1883 HeapWord* finger = _finger; 1884 1885 while (finger < _heap.end()) { 1886 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1887 1888 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1889 // Make sure that the reads below do not float before loading curr_region. 1890 OrderAccess::loadload(); 1891 // Above heap_region_containing may return NULL as we always scan claim 1892 // until the end of the heap. In this case, just jump to the next region. 1893 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1894 1895 // Is the gap between reading the finger and doing the CAS too long? 1896 HeapWord* res = Atomic::cmpxchg(end, &_finger, finger); 1897 if (res == finger && curr_region != NULL) { 1898 // we succeeded 1899 HeapWord* bottom = curr_region->bottom(); 1900 HeapWord* limit = curr_region->next_top_at_mark_start(); 1901 1902 // notice that _finger == end cannot be guaranteed here since, 1903 // someone else might have moved the finger even further 1904 assert(_finger >= end, "the finger should have moved forward"); 1905 1906 if (limit > bottom) { 1907 return curr_region; 1908 } else { 1909 assert(limit == bottom, 1910 "the region limit should be at bottom"); 1911 // we return NULL and the caller should try calling 1912 // claim_region() again. 1913 return NULL; 1914 } 1915 } else { 1916 assert(_finger > finger, "the finger should have moved forward"); 1917 // read it again 1918 finger = _finger; 1919 } 1920 } 1921 1922 return NULL; 1923 } 1924 1925 #ifndef PRODUCT 1926 class VerifyNoCSetOops { 1927 G1CollectedHeap* _g1h; 1928 const char* _phase; 1929 int _info; 1930 1931 public: 1932 VerifyNoCSetOops(const char* phase, int info = -1) : 1933 _g1h(G1CollectedHeap::heap()), 1934 _phase(phase), 1935 _info(info) 1936 { } 1937 1938 void operator()(G1TaskQueueEntry task_entry) const { 1939 if (task_entry.is_array_slice()) { 1940 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); 1941 return; 1942 } 1943 guarantee(oopDesc::is_oop(task_entry.obj()), 1944 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1945 p2i(task_entry.obj()), _phase, _info); 1946 guarantee(!_g1h->is_in_cset(task_entry.obj()), 1947 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1948 p2i(task_entry.obj()), _phase, _info); 1949 } 1950 }; 1951 1952 void G1ConcurrentMark::verify_no_cset_oops() { 1953 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1954 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 1955 return; 1956 } 1957 1958 // Verify entries on the global mark stack 1959 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 1960 1961 // Verify entries on the task queues 1962 for (uint i = 0; i < _max_num_tasks; ++i) { 1963 G1CMTaskQueue* queue = _task_queues->queue(i); 1964 queue->iterate(VerifyNoCSetOops("Queue", i)); 1965 } 1966 1967 // Verify the global finger 1968 HeapWord* global_finger = finger(); 1969 if (global_finger != NULL && global_finger < _heap.end()) { 1970 // Since we always iterate over all regions, we might get a NULL HeapRegion 1971 // here. 1972 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1973 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1974 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1975 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1976 } 1977 1978 // Verify the task fingers 1979 assert(_num_concurrent_workers <= _max_num_tasks, "sanity"); 1980 for (uint i = 0; i < _num_concurrent_workers; ++i) { 1981 G1CMTask* task = _tasks[i]; 1982 HeapWord* task_finger = task->finger(); 1983 if (task_finger != NULL && task_finger < _heap.end()) { 1984 // See above note on the global finger verification. 1985 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 1986 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 1987 !task_hr->in_collection_set(), 1988 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 1989 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 1990 } 1991 } 1992 } 1993 #endif // PRODUCT 1994 1995 void G1ConcurrentMark::rebuild_rem_set_concurrently() { 1996 _g1h->g1_rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset); 1997 } 1998 1999 void G1ConcurrentMark::print_stats() { 2000 if (!log_is_enabled(Debug, gc, stats)) { 2001 return; 2002 } 2003 log_debug(gc, stats)("---------------------------------------------------------------------"); 2004 for (size_t i = 0; i < _num_active_tasks; ++i) { 2005 _tasks[i]->print_stats(); 2006 log_debug(gc, stats)("---------------------------------------------------------------------"); 2007 } 2008 } 2009 2010 void G1ConcurrentMark::concurrent_cycle_abort() { 2011 if (!cm_thread()->during_cycle() || _has_aborted) { 2012 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2013 return; 2014 } 2015 2016 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2017 // concurrent bitmap clearing. 2018 { 2019 GCTraceTime(Debug, gc) debug("Clear Next Bitmap"); 2020 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false); 2021 } 2022 // Note we cannot clear the previous marking bitmap here 2023 // since VerifyDuringGC verifies the objects marked during 2024 // a full GC against the previous bitmap. 2025 2026 // Empty mark stack 2027 reset_marking_for_restart(); 2028 for (uint i = 0; i < _max_num_tasks; ++i) { 2029 _tasks[i]->clear_region_fields(); 2030 } 2031 _first_overflow_barrier_sync.abort(); 2032 _second_overflow_barrier_sync.abort(); 2033 _has_aborted = true; 2034 2035 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2036 satb_mq_set.abandon_partial_marking(); 2037 // This can be called either during or outside marking, we'll read 2038 // the expected_active value from the SATB queue set. 2039 satb_mq_set.set_active_all_threads( 2040 false, /* new active value */ 2041 satb_mq_set.is_active() /* expected_active */); 2042 } 2043 2044 static void print_ms_time_info(const char* prefix, const char* name, 2045 NumberSeq& ns) { 2046 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2047 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2048 if (ns.num() > 0) { 2049 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2050 prefix, ns.sd(), ns.maximum()); 2051 } 2052 } 2053 2054 void G1ConcurrentMark::print_summary_info() { 2055 Log(gc, marking) log; 2056 if (!log.is_trace()) { 2057 return; 2058 } 2059 2060 log.trace(" Concurrent marking:"); 2061 print_ms_time_info(" ", "init marks", _init_times); 2062 print_ms_time_info(" ", "remarks", _remark_times); 2063 { 2064 print_ms_time_info(" ", "final marks", _remark_mark_times); 2065 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2066 2067 } 2068 print_ms_time_info(" ", "cleanups", _cleanup_times); 2069 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2070 _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2071 log.trace(" Total stop_world time = %8.2f s.", 2072 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2073 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2074 cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum()); 2075 } 2076 2077 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2078 _concurrent_workers->print_worker_threads_on(st); 2079 } 2080 2081 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2082 _concurrent_workers->threads_do(tc); 2083 } 2084 2085 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2086 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2087 p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap)); 2088 _prev_mark_bitmap->print_on_error(st, " Prev Bits: "); 2089 _next_mark_bitmap->print_on_error(st, " Next Bits: "); 2090 } 2091 2092 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2093 ReferenceProcessor* result = g1h->ref_processor_cm(); 2094 assert(result != NULL, "CM reference processor should not be NULL"); 2095 return result; 2096 } 2097 2098 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2099 G1CMTask* task) 2100 : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)), 2101 _g1h(g1h), _task(task) 2102 { } 2103 2104 void G1CMTask::setup_for_region(HeapRegion* hr) { 2105 assert(hr != NULL, 2106 "claim_region() should have filtered out NULL regions"); 2107 _curr_region = hr; 2108 _finger = hr->bottom(); 2109 update_region_limit(); 2110 } 2111 2112 void G1CMTask::update_region_limit() { 2113 HeapRegion* hr = _curr_region; 2114 HeapWord* bottom = hr->bottom(); 2115 HeapWord* limit = hr->next_top_at_mark_start(); 2116 2117 if (limit == bottom) { 2118 // The region was collected underneath our feet. 2119 // We set the finger to bottom to ensure that the bitmap 2120 // iteration that will follow this will not do anything. 2121 // (this is not a condition that holds when we set the region up, 2122 // as the region is not supposed to be empty in the first place) 2123 _finger = bottom; 2124 } else if (limit >= _region_limit) { 2125 assert(limit >= _finger, "peace of mind"); 2126 } else { 2127 assert(limit < _region_limit, "only way to get here"); 2128 // This can happen under some pretty unusual circumstances. An 2129 // evacuation pause empties the region underneath our feet (NTAMS 2130 // at bottom). We then do some allocation in the region (NTAMS 2131 // stays at bottom), followed by the region being used as a GC 2132 // alloc region (NTAMS will move to top() and the objects 2133 // originally below it will be grayed). All objects now marked in 2134 // the region are explicitly grayed, if below the global finger, 2135 // and we do not need in fact to scan anything else. So, we simply 2136 // set _finger to be limit to ensure that the bitmap iteration 2137 // doesn't do anything. 2138 _finger = limit; 2139 } 2140 2141 _region_limit = limit; 2142 } 2143 2144 void G1CMTask::giveup_current_region() { 2145 assert(_curr_region != NULL, "invariant"); 2146 clear_region_fields(); 2147 } 2148 2149 void G1CMTask::clear_region_fields() { 2150 // Values for these three fields that indicate that we're not 2151 // holding on to a region. 2152 _curr_region = NULL; 2153 _finger = NULL; 2154 _region_limit = NULL; 2155 } 2156 2157 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2158 if (cm_oop_closure == NULL) { 2159 assert(_cm_oop_closure != NULL, "invariant"); 2160 } else { 2161 assert(_cm_oop_closure == NULL, "invariant"); 2162 } 2163 _cm_oop_closure = cm_oop_closure; 2164 } 2165 2166 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) { 2167 guarantee(next_mark_bitmap != NULL, "invariant"); 2168 _next_mark_bitmap = next_mark_bitmap; 2169 clear_region_fields(); 2170 2171 _calls = 0; 2172 _elapsed_time_ms = 0.0; 2173 _termination_time_ms = 0.0; 2174 _termination_start_time_ms = 0.0; 2175 2176 _mark_stats_cache.reset(); 2177 } 2178 2179 bool G1CMTask::should_exit_termination() { 2180 if (!regular_clock_call()) { 2181 return true; 2182 } 2183 2184 // This is called when we are in the termination protocol. We should 2185 // quit if, for some reason, this task wants to abort or the global 2186 // stack is not empty (this means that we can get work from it). 2187 return !_cm->mark_stack_empty() || has_aborted(); 2188 } 2189 2190 void G1CMTask::reached_limit() { 2191 assert(_words_scanned >= _words_scanned_limit || 2192 _refs_reached >= _refs_reached_limit , 2193 "shouldn't have been called otherwise"); 2194 abort_marking_if_regular_check_fail(); 2195 } 2196 2197 bool G1CMTask::regular_clock_call() { 2198 if (has_aborted()) { 2199 return false; 2200 } 2201 2202 // First, we need to recalculate the words scanned and refs reached 2203 // limits for the next clock call. 2204 recalculate_limits(); 2205 2206 // During the regular clock call we do the following 2207 2208 // (1) If an overflow has been flagged, then we abort. 2209 if (_cm->has_overflown()) { 2210 return false; 2211 } 2212 2213 // If we are not concurrent (i.e. we're doing remark) we don't need 2214 // to check anything else. The other steps are only needed during 2215 // the concurrent marking phase. 2216 if (!_cm->concurrent()) { 2217 return true; 2218 } 2219 2220 // (2) If marking has been aborted for Full GC, then we also abort. 2221 if (_cm->has_aborted()) { 2222 return false; 2223 } 2224 2225 double curr_time_ms = os::elapsedVTime() * 1000.0; 2226 2227 // (4) We check whether we should yield. If we have to, then we abort. 2228 if (SuspendibleThreadSet::should_yield()) { 2229 // We should yield. To do this we abort the task. The caller is 2230 // responsible for yielding. 2231 return false; 2232 } 2233 2234 // (5) We check whether we've reached our time quota. If we have, 2235 // then we abort. 2236 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2237 if (elapsed_time_ms > _time_target_ms) { 2238 _has_timed_out = true; 2239 return false; 2240 } 2241 2242 // (6) Finally, we check whether there are enough completed STAB 2243 // buffers available for processing. If there are, we abort. 2244 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2245 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2246 // we do need to process SATB buffers, we'll abort and restart 2247 // the marking task to do so 2248 return false; 2249 } 2250 return true; 2251 } 2252 2253 void G1CMTask::recalculate_limits() { 2254 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2255 _words_scanned_limit = _real_words_scanned_limit; 2256 2257 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2258 _refs_reached_limit = _real_refs_reached_limit; 2259 } 2260 2261 void G1CMTask::decrease_limits() { 2262 // This is called when we believe that we're going to do an infrequent 2263 // operation which will increase the per byte scanned cost (i.e. move 2264 // entries to/from the global stack). It basically tries to decrease the 2265 // scanning limit so that the clock is called earlier. 2266 2267 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4; 2268 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4; 2269 } 2270 2271 void G1CMTask::move_entries_to_global_stack() { 2272 // Local array where we'll store the entries that will be popped 2273 // from the local queue. 2274 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2275 2276 size_t n = 0; 2277 G1TaskQueueEntry task_entry; 2278 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) { 2279 buffer[n] = task_entry; 2280 ++n; 2281 } 2282 if (n < G1CMMarkStack::EntriesPerChunk) { 2283 buffer[n] = G1TaskQueueEntry(); 2284 } 2285 2286 if (n > 0) { 2287 if (!_cm->mark_stack_push(buffer)) { 2288 set_has_aborted(); 2289 } 2290 } 2291 2292 // This operation was quite expensive, so decrease the limits. 2293 decrease_limits(); 2294 } 2295 2296 bool G1CMTask::get_entries_from_global_stack() { 2297 // Local array where we'll store the entries that will be popped 2298 // from the global stack. 2299 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2300 2301 if (!_cm->mark_stack_pop(buffer)) { 2302 return false; 2303 } 2304 2305 // We did actually pop at least one entry. 2306 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) { 2307 G1TaskQueueEntry task_entry = buffer[i]; 2308 if (task_entry.is_null()) { 2309 break; 2310 } 2311 assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj())); 2312 bool success = _task_queue->push(task_entry); 2313 // We only call this when the local queue is empty or under a 2314 // given target limit. So, we do not expect this push to fail. 2315 assert(success, "invariant"); 2316 } 2317 2318 // This operation was quite expensive, so decrease the limits 2319 decrease_limits(); 2320 return true; 2321 } 2322 2323 void G1CMTask::drain_local_queue(bool partially) { 2324 if (has_aborted()) { 2325 return; 2326 } 2327 2328 // Decide what the target size is, depending whether we're going to 2329 // drain it partially (so that other tasks can steal if they run out 2330 // of things to do) or totally (at the very end). 2331 size_t target_size; 2332 if (partially) { 2333 target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize); 2334 } else { 2335 target_size = 0; 2336 } 2337 2338 if (_task_queue->size() > target_size) { 2339 G1TaskQueueEntry entry; 2340 bool ret = _task_queue->pop_local(entry); 2341 while (ret) { 2342 scan_task_entry(entry); 2343 if (_task_queue->size() <= target_size || has_aborted()) { 2344 ret = false; 2345 } else { 2346 ret = _task_queue->pop_local(entry); 2347 } 2348 } 2349 } 2350 } 2351 2352 void G1CMTask::drain_global_stack(bool partially) { 2353 if (has_aborted()) { 2354 return; 2355 } 2356 2357 // We have a policy to drain the local queue before we attempt to 2358 // drain the global stack. 2359 assert(partially || _task_queue->size() == 0, "invariant"); 2360 2361 // Decide what the target size is, depending whether we're going to 2362 // drain it partially (so that other tasks can steal if they run out 2363 // of things to do) or totally (at the very end). 2364 // Notice that when draining the global mark stack partially, due to the racyness 2365 // of the mark stack size update we might in fact drop below the target. But, 2366 // this is not a problem. 2367 // In case of total draining, we simply process until the global mark stack is 2368 // totally empty, disregarding the size counter. 2369 if (partially) { 2370 size_t const target_size = _cm->partial_mark_stack_size_target(); 2371 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2372 if (get_entries_from_global_stack()) { 2373 drain_local_queue(partially); 2374 } 2375 } 2376 } else { 2377 while (!has_aborted() && get_entries_from_global_stack()) { 2378 drain_local_queue(partially); 2379 } 2380 } 2381 } 2382 2383 // SATB Queue has several assumptions on whether to call the par or 2384 // non-par versions of the methods. this is why some of the code is 2385 // replicated. We should really get rid of the single-threaded version 2386 // of the code to simplify things. 2387 void G1CMTask::drain_satb_buffers() { 2388 if (has_aborted()) { 2389 return; 2390 } 2391 2392 // We set this so that the regular clock knows that we're in the 2393 // middle of draining buffers and doesn't set the abort flag when it 2394 // notices that SATB buffers are available for draining. It'd be 2395 // very counter productive if it did that. :-) 2396 _draining_satb_buffers = true; 2397 2398 G1CMSATBBufferClosure satb_cl(this, _g1h); 2399 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2400 2401 // This keeps claiming and applying the closure to completed buffers 2402 // until we run out of buffers or we need to abort. 2403 while (!has_aborted() && 2404 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2405 abort_marking_if_regular_check_fail(); 2406 } 2407 2408 _draining_satb_buffers = false; 2409 2410 assert(has_aborted() || 2411 _cm->concurrent() || 2412 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2413 2414 // again, this was a potentially expensive operation, decrease the 2415 // limits to get the regular clock call early 2416 decrease_limits(); 2417 } 2418 2419 void G1CMTask::clear_mark_stats_cache(uint region_idx) { 2420 _mark_stats_cache.reset(region_idx); 2421 } 2422 2423 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() { 2424 return _mark_stats_cache.evict_all(); 2425 } 2426 2427 void G1CMTask::print_stats() { 2428 log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls); 2429 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2430 _elapsed_time_ms, _termination_time_ms); 2431 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms", 2432 _step_times_ms.num(), 2433 _step_times_ms.avg(), 2434 _step_times_ms.sd(), 2435 _step_times_ms.maximum(), 2436 _step_times_ms.sum()); 2437 size_t const hits = _mark_stats_cache.hits(); 2438 size_t const misses = _mark_stats_cache.misses(); 2439 log_debug(gc, stats)(" Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f", 2440 hits, misses, percent_of(hits, hits + misses)); 2441 } 2442 2443 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) { 2444 return _task_queues->steal(worker_id, task_entry); 2445 } 2446 2447 /***************************************************************************** 2448 2449 The do_marking_step(time_target_ms, ...) method is the building 2450 block of the parallel marking framework. It can be called in parallel 2451 with other invocations of do_marking_step() on different tasks 2452 (but only one per task, obviously) and concurrently with the 2453 mutator threads, or during remark, hence it eliminates the need 2454 for two versions of the code. When called during remark, it will 2455 pick up from where the task left off during the concurrent marking 2456 phase. Interestingly, tasks are also claimable during evacuation 2457 pauses too, since do_marking_step() ensures that it aborts before 2458 it needs to yield. 2459 2460 The data structures that it uses to do marking work are the 2461 following: 2462 2463 (1) Marking Bitmap. If there are gray objects that appear only 2464 on the bitmap (this happens either when dealing with an overflow 2465 or when the initial marking phase has simply marked the roots 2466 and didn't push them on the stack), then tasks claim heap 2467 regions whose bitmap they then scan to find gray objects. A 2468 global finger indicates where the end of the last claimed region 2469 is. A local finger indicates how far into the region a task has 2470 scanned. The two fingers are used to determine how to gray an 2471 object (i.e. whether simply marking it is OK, as it will be 2472 visited by a task in the future, or whether it needs to be also 2473 pushed on a stack). 2474 2475 (2) Local Queue. The local queue of the task which is accessed 2476 reasonably efficiently by the task. Other tasks can steal from 2477 it when they run out of work. Throughout the marking phase, a 2478 task attempts to keep its local queue short but not totally 2479 empty, so that entries are available for stealing by other 2480 tasks. Only when there is no more work, a task will totally 2481 drain its local queue. 2482 2483 (3) Global Mark Stack. This handles local queue overflow. During 2484 marking only sets of entries are moved between it and the local 2485 queues, as access to it requires a mutex and more fine-grain 2486 interaction with it which might cause contention. If it 2487 overflows, then the marking phase should restart and iterate 2488 over the bitmap to identify gray objects. Throughout the marking 2489 phase, tasks attempt to keep the global mark stack at a small 2490 length but not totally empty, so that entries are available for 2491 popping by other tasks. Only when there is no more work, tasks 2492 will totally drain the global mark stack. 2493 2494 (4) SATB Buffer Queue. This is where completed SATB buffers are 2495 made available. Buffers are regularly removed from this queue 2496 and scanned for roots, so that the queue doesn't get too 2497 long. During remark, all completed buffers are processed, as 2498 well as the filled in parts of any uncompleted buffers. 2499 2500 The do_marking_step() method tries to abort when the time target 2501 has been reached. There are a few other cases when the 2502 do_marking_step() method also aborts: 2503 2504 (1) When the marking phase has been aborted (after a Full GC). 2505 2506 (2) When a global overflow (on the global stack) has been 2507 triggered. Before the task aborts, it will actually sync up with 2508 the other tasks to ensure that all the marking data structures 2509 (local queues, stacks, fingers etc.) are re-initialized so that 2510 when do_marking_step() completes, the marking phase can 2511 immediately restart. 2512 2513 (3) When enough completed SATB buffers are available. The 2514 do_marking_step() method only tries to drain SATB buffers right 2515 at the beginning. So, if enough buffers are available, the 2516 marking step aborts and the SATB buffers are processed at 2517 the beginning of the next invocation. 2518 2519 (4) To yield. when we have to yield then we abort and yield 2520 right at the end of do_marking_step(). This saves us from a lot 2521 of hassle as, by yielding we might allow a Full GC. If this 2522 happens then objects will be compacted underneath our feet, the 2523 heap might shrink, etc. We save checking for this by just 2524 aborting and doing the yield right at the end. 2525 2526 From the above it follows that the do_marking_step() method should 2527 be called in a loop (or, otherwise, regularly) until it completes. 2528 2529 If a marking step completes without its has_aborted() flag being 2530 true, it means it has completed the current marking phase (and 2531 also all other marking tasks have done so and have all synced up). 2532 2533 A method called regular_clock_call() is invoked "regularly" (in 2534 sub ms intervals) throughout marking. It is this clock method that 2535 checks all the abort conditions which were mentioned above and 2536 decides when the task should abort. A work-based scheme is used to 2537 trigger this clock method: when the number of object words the 2538 marking phase has scanned or the number of references the marking 2539 phase has visited reach a given limit. Additional invocations to 2540 the method clock have been planted in a few other strategic places 2541 too. The initial reason for the clock method was to avoid calling 2542 vtime too regularly, as it is quite expensive. So, once it was in 2543 place, it was natural to piggy-back all the other conditions on it 2544 too and not constantly check them throughout the code. 2545 2546 If do_termination is true then do_marking_step will enter its 2547 termination protocol. 2548 2549 The value of is_serial must be true when do_marking_step is being 2550 called serially (i.e. by the VMThread) and do_marking_step should 2551 skip any synchronization in the termination and overflow code. 2552 Examples include the serial remark code and the serial reference 2553 processing closures. 2554 2555 The value of is_serial must be false when do_marking_step is 2556 being called by any of the worker threads in a work gang. 2557 Examples include the concurrent marking code (CMMarkingTask), 2558 the MT remark code, and the MT reference processing closures. 2559 2560 *****************************************************************************/ 2561 2562 void G1CMTask::do_marking_step(double time_target_ms, 2563 bool do_termination, 2564 bool is_serial) { 2565 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2566 2567 _start_time_ms = os::elapsedVTime() * 1000.0; 2568 2569 // If do_stealing is true then do_marking_step will attempt to 2570 // steal work from the other G1CMTasks. It only makes sense to 2571 // enable stealing when the termination protocol is enabled 2572 // and do_marking_step() is not being called serially. 2573 bool do_stealing = do_termination && !is_serial; 2574 2575 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2576 _time_target_ms = time_target_ms - diff_prediction_ms; 2577 2578 // set up the variables that are used in the work-based scheme to 2579 // call the regular clock method 2580 _words_scanned = 0; 2581 _refs_reached = 0; 2582 recalculate_limits(); 2583 2584 // clear all flags 2585 clear_has_aborted(); 2586 _has_timed_out = false; 2587 _draining_satb_buffers = false; 2588 2589 ++_calls; 2590 2591 // Set up the bitmap and oop closures. Anything that uses them is 2592 // eventually called from this method, so it is OK to allocate these 2593 // statically. 2594 G1CMBitMapClosure bitmap_closure(this, _cm); 2595 G1CMOopClosure cm_oop_closure(_g1h, this); 2596 set_cm_oop_closure(&cm_oop_closure); 2597 2598 if (_cm->has_overflown()) { 2599 // This can happen if the mark stack overflows during a GC pause 2600 // and this task, after a yield point, restarts. We have to abort 2601 // as we need to get into the overflow protocol which happens 2602 // right at the end of this task. 2603 set_has_aborted(); 2604 } 2605 2606 // First drain any available SATB buffers. After this, we will not 2607 // look at SATB buffers before the next invocation of this method. 2608 // If enough completed SATB buffers are queued up, the regular clock 2609 // will abort this task so that it restarts. 2610 drain_satb_buffers(); 2611 // ...then partially drain the local queue and the global stack 2612 drain_local_queue(true); 2613 drain_global_stack(true); 2614 2615 do { 2616 if (!has_aborted() && _curr_region != NULL) { 2617 // This means that we're already holding on to a region. 2618 assert(_finger != NULL, "if region is not NULL, then the finger " 2619 "should not be NULL either"); 2620 2621 // We might have restarted this task after an evacuation pause 2622 // which might have evacuated the region we're holding on to 2623 // underneath our feet. Let's read its limit again to make sure 2624 // that we do not iterate over a region of the heap that 2625 // contains garbage (update_region_limit() will also move 2626 // _finger to the start of the region if it is found empty). 2627 update_region_limit(); 2628 // We will start from _finger not from the start of the region, 2629 // as we might be restarting this task after aborting half-way 2630 // through scanning this region. In this case, _finger points to 2631 // the address where we last found a marked object. If this is a 2632 // fresh region, _finger points to start(). 2633 MemRegion mr = MemRegion(_finger, _region_limit); 2634 2635 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2636 "humongous regions should go around loop once only"); 2637 2638 // Some special cases: 2639 // If the memory region is empty, we can just give up the region. 2640 // If the current region is humongous then we only need to check 2641 // the bitmap for the bit associated with the start of the object, 2642 // scan the object if it's live, and give up the region. 2643 // Otherwise, let's iterate over the bitmap of the part of the region 2644 // that is left. 2645 // If the iteration is successful, give up the region. 2646 if (mr.is_empty()) { 2647 giveup_current_region(); 2648 abort_marking_if_regular_check_fail(); 2649 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2650 if (_next_mark_bitmap->is_marked(mr.start())) { 2651 // The object is marked - apply the closure 2652 bitmap_closure.do_addr(mr.start()); 2653 } 2654 // Even if this task aborted while scanning the humongous object 2655 // we can (and should) give up the current region. 2656 giveup_current_region(); 2657 abort_marking_if_regular_check_fail(); 2658 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) { 2659 giveup_current_region(); 2660 abort_marking_if_regular_check_fail(); 2661 } else { 2662 assert(has_aborted(), "currently the only way to do so"); 2663 // The only way to abort the bitmap iteration is to return 2664 // false from the do_bit() method. However, inside the 2665 // do_bit() method we move the _finger to point to the 2666 // object currently being looked at. So, if we bail out, we 2667 // have definitely set _finger to something non-null. 2668 assert(_finger != NULL, "invariant"); 2669 2670 // Region iteration was actually aborted. So now _finger 2671 // points to the address of the object we last scanned. If we 2672 // leave it there, when we restart this task, we will rescan 2673 // the object. It is easy to avoid this. We move the finger by 2674 // enough to point to the next possible object header. 2675 assert(_finger < _region_limit, "invariant"); 2676 HeapWord* const new_finger = _finger + ((oop)_finger)->size(); 2677 // Check if bitmap iteration was aborted while scanning the last object 2678 if (new_finger >= _region_limit) { 2679 giveup_current_region(); 2680 } else { 2681 move_finger_to(new_finger); 2682 } 2683 } 2684 } 2685 // At this point we have either completed iterating over the 2686 // region we were holding on to, or we have aborted. 2687 2688 // We then partially drain the local queue and the global stack. 2689 // (Do we really need this?) 2690 drain_local_queue(true); 2691 drain_global_stack(true); 2692 2693 // Read the note on the claim_region() method on why it might 2694 // return NULL with potentially more regions available for 2695 // claiming and why we have to check out_of_regions() to determine 2696 // whether we're done or not. 2697 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2698 // We are going to try to claim a new region. We should have 2699 // given up on the previous one. 2700 // Separated the asserts so that we know which one fires. 2701 assert(_curr_region == NULL, "invariant"); 2702 assert(_finger == NULL, "invariant"); 2703 assert(_region_limit == NULL, "invariant"); 2704 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2705 if (claimed_region != NULL) { 2706 // Yes, we managed to claim one 2707 setup_for_region(claimed_region); 2708 assert(_curr_region == claimed_region, "invariant"); 2709 } 2710 // It is important to call the regular clock here. It might take 2711 // a while to claim a region if, for example, we hit a large 2712 // block of empty regions. So we need to call the regular clock 2713 // method once round the loop to make sure it's called 2714 // frequently enough. 2715 abort_marking_if_regular_check_fail(); 2716 } 2717 2718 if (!has_aborted() && _curr_region == NULL) { 2719 assert(_cm->out_of_regions(), 2720 "at this point we should be out of regions"); 2721 } 2722 } while ( _curr_region != NULL && !has_aborted()); 2723 2724 if (!has_aborted()) { 2725 // We cannot check whether the global stack is empty, since other 2726 // tasks might be pushing objects to it concurrently. 2727 assert(_cm->out_of_regions(), 2728 "at this point we should be out of regions"); 2729 // Try to reduce the number of available SATB buffers so that 2730 // remark has less work to do. 2731 drain_satb_buffers(); 2732 } 2733 2734 // Since we've done everything else, we can now totally drain the 2735 // local queue and global stack. 2736 drain_local_queue(false); 2737 drain_global_stack(false); 2738 2739 // Attempt at work stealing from other task's queues. 2740 if (do_stealing && !has_aborted()) { 2741 // We have not aborted. This means that we have finished all that 2742 // we could. Let's try to do some stealing... 2743 2744 // We cannot check whether the global stack is empty, since other 2745 // tasks might be pushing objects to it concurrently. 2746 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2747 "only way to reach here"); 2748 while (!has_aborted()) { 2749 G1TaskQueueEntry entry; 2750 if (_cm->try_stealing(_worker_id, entry)) { 2751 scan_task_entry(entry); 2752 2753 // And since we're towards the end, let's totally drain the 2754 // local queue and global stack. 2755 drain_local_queue(false); 2756 drain_global_stack(false); 2757 } else { 2758 break; 2759 } 2760 } 2761 } 2762 2763 // We still haven't aborted. Now, let's try to get into the 2764 // termination protocol. 2765 if (do_termination && !has_aborted()) { 2766 // We cannot check whether the global stack is empty, since other 2767 // tasks might be concurrently pushing objects on it. 2768 // Separated the asserts so that we know which one fires. 2769 assert(_cm->out_of_regions(), "only way to reach here"); 2770 assert(_task_queue->size() == 0, "only way to reach here"); 2771 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2772 2773 // The G1CMTask class also extends the TerminatorTerminator class, 2774 // hence its should_exit_termination() method will also decide 2775 // whether to exit the termination protocol or not. 2776 bool finished = (is_serial || 2777 _cm->terminator()->offer_termination(this)); 2778 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2779 _termination_time_ms += 2780 termination_end_time_ms - _termination_start_time_ms; 2781 2782 if (finished) { 2783 // We're all done. 2784 2785 // We can now guarantee that the global stack is empty, since 2786 // all other tasks have finished. We separated the guarantees so 2787 // that, if a condition is false, we can immediately find out 2788 // which one. 2789 guarantee(_cm->out_of_regions(), "only way to reach here"); 2790 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2791 guarantee(_task_queue->size() == 0, "only way to reach here"); 2792 guarantee(!_cm->has_overflown(), "only way to reach here"); 2793 guarantee(!has_aborted(), "should never happen if termination has completed"); 2794 } else { 2795 // Apparently there's more work to do. Let's abort this task. It 2796 // will restart it and we can hopefully find more things to do. 2797 set_has_aborted(); 2798 } 2799 } 2800 2801 // Mainly for debugging purposes to make sure that a pointer to the 2802 // closure which was statically allocated in this frame doesn't 2803 // escape it by accident. 2804 set_cm_oop_closure(NULL); 2805 double end_time_ms = os::elapsedVTime() * 1000.0; 2806 double elapsed_time_ms = end_time_ms - _start_time_ms; 2807 // Update the step history. 2808 _step_times_ms.add(elapsed_time_ms); 2809 2810 if (has_aborted()) { 2811 // The task was aborted for some reason. 2812 if (_has_timed_out) { 2813 double diff_ms = elapsed_time_ms - _time_target_ms; 2814 // Keep statistics of how well we did with respect to hitting 2815 // our target only if we actually timed out (if we aborted for 2816 // other reasons, then the results might get skewed). 2817 _marking_step_diffs_ms.add(diff_ms); 2818 } 2819 2820 if (_cm->has_overflown()) { 2821 // This is the interesting one. We aborted because a global 2822 // overflow was raised. This means we have to restart the 2823 // marking phase and start iterating over regions. However, in 2824 // order to do this we have to make sure that all tasks stop 2825 // what they are doing and re-initialize in a safe manner. We 2826 // will achieve this with the use of two barrier sync points. 2827 2828 if (!is_serial) { 2829 // We only need to enter the sync barrier if being called 2830 // from a parallel context 2831 _cm->enter_first_sync_barrier(_worker_id); 2832 2833 // When we exit this sync barrier we know that all tasks have 2834 // stopped doing marking work. So, it's now safe to 2835 // re-initialize our data structures. 2836 } 2837 2838 clear_region_fields(); 2839 flush_mark_stats_cache(); 2840 2841 if (!is_serial) { 2842 // If we're executing the concurrent phase of marking, reset the marking 2843 // state; otherwise the marking state is reset after reference processing, 2844 // during the remark pause. 2845 // If we reset here as a result of an overflow during the remark we will 2846 // see assertion failures from any subsequent set_concurrency_and_phase() 2847 // calls. 2848 if (_cm->concurrent() && _worker_id == 0) { 2849 // Worker 0 is responsible for clearing the global data structures because 2850 // of an overflow. During STW we should not clear the overflow flag (in 2851 // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit 2852 // method to abort the pause and restart concurrent marking. 2853 _cm->reset_marking_for_restart(); 2854 2855 log_info(gc, marking)("Concurrent Mark reset for overflow"); 2856 } 2857 2858 // ...and enter the second barrier. 2859 _cm->enter_second_sync_barrier(_worker_id); 2860 } 2861 // At this point, if we're during the concurrent phase of 2862 // marking, everything has been re-initialized and we're 2863 // ready to restart. 2864 } 2865 } 2866 } 2867 2868 G1CMTask::G1CMTask(uint worker_id, 2869 G1ConcurrentMark* cm, 2870 G1CMTaskQueue* task_queue, 2871 G1RegionMarkStats* mark_stats, 2872 uint max_regions) : 2873 _objArray_processor(this), 2874 _worker_id(worker_id), 2875 _g1h(G1CollectedHeap::heap()), 2876 _cm(cm), 2877 _next_mark_bitmap(NULL), 2878 _task_queue(task_queue), 2879 _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize), 2880 _calls(0), 2881 _time_target_ms(0.0), 2882 _start_time_ms(0.0), 2883 _cm_oop_closure(NULL), 2884 _curr_region(NULL), 2885 _finger(NULL), 2886 _region_limit(NULL), 2887 _words_scanned(0), 2888 _words_scanned_limit(0), 2889 _real_words_scanned_limit(0), 2890 _refs_reached(0), 2891 _refs_reached_limit(0), 2892 _real_refs_reached_limit(0), 2893 _has_aborted(false), 2894 _has_timed_out(false), 2895 _draining_satb_buffers(false), 2896 _step_times_ms(), 2897 _elapsed_time_ms(0.0), 2898 _termination_time_ms(0.0), 2899 _termination_start_time_ms(0.0), 2900 _marking_step_diffs_ms() 2901 { 2902 guarantee(task_queue != NULL, "invariant"); 2903 2904 _marking_step_diffs_ms.add(0.5); 2905 } 2906 2907 // These are formatting macros that are used below to ensure 2908 // consistent formatting. The *_H_* versions are used to format the 2909 // header for a particular value and they should be kept consistent 2910 // with the corresponding macro. Also note that most of the macros add 2911 // the necessary white space (as a prefix) which makes them a bit 2912 // easier to compose. 2913 2914 // All the output lines are prefixed with this string to be able to 2915 // identify them easily in a large log file. 2916 #define G1PPRL_LINE_PREFIX "###" 2917 2918 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2919 #ifdef _LP64 2920 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2921 #else // _LP64 2922 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2923 #endif // _LP64 2924 2925 // For per-region info 2926 #define G1PPRL_TYPE_FORMAT " %-4s" 2927 #define G1PPRL_TYPE_H_FORMAT " %4s" 2928 #define G1PPRL_STATE_FORMAT " %-5s" 2929 #define G1PPRL_STATE_H_FORMAT " %5s" 2930 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2931 #define G1PPRL_BYTE_H_FORMAT " %9s" 2932 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2933 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2934 2935 // For summary info 2936 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2937 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2938 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2939 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2940 2941 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) : 2942 _total_used_bytes(0), _total_capacity_bytes(0), 2943 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2944 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) 2945 { 2946 if (!log_is_enabled(Trace, gc, liveness)) { 2947 return; 2948 } 2949 2950 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2951 MemRegion g1_reserved = g1h->g1_reserved(); 2952 double now = os::elapsedTime(); 2953 2954 // Print the header of the output. 2955 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2956 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2957 G1PPRL_SUM_ADDR_FORMAT("reserved") 2958 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2959 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 2960 HeapRegion::GrainBytes); 2961 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2962 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2963 G1PPRL_TYPE_H_FORMAT 2964 G1PPRL_ADDR_BASE_H_FORMAT 2965 G1PPRL_BYTE_H_FORMAT 2966 G1PPRL_BYTE_H_FORMAT 2967 G1PPRL_BYTE_H_FORMAT 2968 G1PPRL_DOUBLE_H_FORMAT 2969 G1PPRL_BYTE_H_FORMAT 2970 G1PPRL_STATE_H_FORMAT 2971 G1PPRL_BYTE_H_FORMAT, 2972 "type", "address-range", 2973 "used", "prev-live", "next-live", "gc-eff", 2974 "remset", "state", "code-roots"); 2975 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2976 G1PPRL_TYPE_H_FORMAT 2977 G1PPRL_ADDR_BASE_H_FORMAT 2978 G1PPRL_BYTE_H_FORMAT 2979 G1PPRL_BYTE_H_FORMAT 2980 G1PPRL_BYTE_H_FORMAT 2981 G1PPRL_DOUBLE_H_FORMAT 2982 G1PPRL_BYTE_H_FORMAT 2983 G1PPRL_STATE_H_FORMAT 2984 G1PPRL_BYTE_H_FORMAT, 2985 "", "", 2986 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 2987 "(bytes)", "", "(bytes)"); 2988 } 2989 2990 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) { 2991 if (!log_is_enabled(Trace, gc, liveness)) { 2992 return false; 2993 } 2994 2995 const char* type = r->get_type_str(); 2996 HeapWord* bottom = r->bottom(); 2997 HeapWord* end = r->end(); 2998 size_t capacity_bytes = r->capacity(); 2999 size_t used_bytes = r->used(); 3000 size_t prev_live_bytes = r->live_bytes(); 3001 size_t next_live_bytes = r->next_live_bytes(); 3002 double gc_eff = r->gc_efficiency(); 3003 size_t remset_bytes = r->rem_set()->mem_size(); 3004 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3005 const char* remset_type = r->rem_set()->get_short_state_str(); 3006 3007 _total_used_bytes += used_bytes; 3008 _total_capacity_bytes += capacity_bytes; 3009 _total_prev_live_bytes += prev_live_bytes; 3010 _total_next_live_bytes += next_live_bytes; 3011 _total_remset_bytes += remset_bytes; 3012 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3013 3014 // Print a line for this particular region. 3015 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3016 G1PPRL_TYPE_FORMAT 3017 G1PPRL_ADDR_BASE_FORMAT 3018 G1PPRL_BYTE_FORMAT 3019 G1PPRL_BYTE_FORMAT 3020 G1PPRL_BYTE_FORMAT 3021 G1PPRL_DOUBLE_FORMAT 3022 G1PPRL_BYTE_FORMAT 3023 G1PPRL_STATE_FORMAT 3024 G1PPRL_BYTE_FORMAT, 3025 type, p2i(bottom), p2i(end), 3026 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3027 remset_bytes, remset_type, strong_code_roots_bytes); 3028 3029 return false; 3030 } 3031 3032 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3033 if (!log_is_enabled(Trace, gc, liveness)) { 3034 return; 3035 } 3036 3037 // add static memory usages to remembered set sizes 3038 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3039 // Print the footer of the output. 3040 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3041 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3042 " SUMMARY" 3043 G1PPRL_SUM_MB_FORMAT("capacity") 3044 G1PPRL_SUM_MB_PERC_FORMAT("used") 3045 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3046 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3047 G1PPRL_SUM_MB_FORMAT("remset") 3048 G1PPRL_SUM_MB_FORMAT("code-roots"), 3049 bytes_to_mb(_total_capacity_bytes), 3050 bytes_to_mb(_total_used_bytes), 3051 percent_of(_total_used_bytes, _total_capacity_bytes), 3052 bytes_to_mb(_total_prev_live_bytes), 3053 percent_of(_total_prev_live_bytes, _total_capacity_bytes), 3054 bytes_to_mb(_total_next_live_bytes), 3055 percent_of(_total_next_live_bytes, _total_capacity_bytes), 3056 bytes_to_mb(_total_remset_bytes), 3057 bytes_to_mb(_total_strong_code_roots_bytes)); 3058 }