1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc/g1/g1BarrierSet.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1CollectorState.hpp" 31 #include "gc/g1/g1ConcurrentMark.inline.hpp" 32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" 33 #include "gc/g1/g1DirtyCardQueue.hpp" 34 #include "gc/g1/g1HeapVerifier.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1Policy.hpp" 37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" 38 #include "gc/g1/g1StringDedup.hpp" 39 #include "gc/g1/g1ThreadLocalData.hpp" 40 #include "gc/g1/heapRegion.inline.hpp" 41 #include "gc/g1/heapRegionRemSet.hpp" 42 #include "gc/g1/heapRegionSet.inline.hpp" 43 #include "gc/shared/gcId.hpp" 44 #include "gc/shared/gcTimer.hpp" 45 #include "gc/shared/gcTrace.hpp" 46 #include "gc/shared/gcTraceTime.inline.hpp" 47 #include "gc/shared/gcVMOperations.hpp" 48 #include "gc/shared/genOopClosures.inline.hpp" 49 #include "gc/shared/referencePolicy.hpp" 50 #include "gc/shared/strongRootsScope.hpp" 51 #include "gc/shared/suspendibleThreadSet.hpp" 52 #include "gc/shared/taskqueue.inline.hpp" 53 #include "gc/shared/weakProcessor.inline.hpp" 54 #include "gc/shared/workerPolicy.hpp" 55 #include "include/jvm.h" 56 #include "logging/log.hpp" 57 #include "memory/allocation.hpp" 58 #include "memory/resourceArea.hpp" 59 #include "memory/universe.hpp" 60 #include "oops/access.inline.hpp" 61 #include "oops/oop.inline.hpp" 62 #include "runtime/atomic.hpp" 63 #include "runtime/handles.inline.hpp" 64 #include "runtime/java.hpp" 65 #include "runtime/prefetch.inline.hpp" 66 #include "services/memTracker.hpp" 67 #include "utilities/align.hpp" 68 #include "utilities/growableArray.hpp" 69 70 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) { 71 assert(addr < _cm->finger(), "invariant"); 72 assert(addr >= _task->finger(), "invariant"); 73 74 // We move that task's local finger along. 75 _task->move_finger_to(addr); 76 77 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); 78 // we only partially drain the local queue and global stack 79 _task->drain_local_queue(true); 80 _task->drain_global_stack(true); 81 82 // if the has_aborted flag has been raised, we need to bail out of 83 // the iteration 84 return !_task->has_aborted(); 85 } 86 87 G1CMMarkStack::G1CMMarkStack() : 88 _max_chunk_capacity(0), 89 _base(NULL), 90 _chunk_capacity(0) { 91 set_empty(); 92 } 93 94 bool G1CMMarkStack::resize(size_t new_capacity) { 95 assert(is_empty(), "Only resize when stack is empty."); 96 assert(new_capacity <= _max_chunk_capacity, 97 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); 98 99 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC); 100 101 if (new_base == NULL) { 102 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); 103 return false; 104 } 105 // Release old mapping. 106 if (_base != NULL) { 107 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 108 } 109 110 _base = new_base; 111 _chunk_capacity = new_capacity; 112 set_empty(); 113 114 return true; 115 } 116 117 size_t G1CMMarkStack::capacity_alignment() { 118 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); 119 } 120 121 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { 122 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); 123 124 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); 125 126 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 127 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 128 129 guarantee(initial_chunk_capacity <= _max_chunk_capacity, 130 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, 131 _max_chunk_capacity, 132 initial_chunk_capacity); 133 134 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, 135 initial_chunk_capacity, _max_chunk_capacity); 136 137 return resize(initial_chunk_capacity); 138 } 139 140 void G1CMMarkStack::expand() { 141 if (_chunk_capacity == _max_chunk_capacity) { 142 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); 143 return; 144 } 145 size_t old_capacity = _chunk_capacity; 146 // Double capacity if possible 147 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); 148 149 if (resize(new_capacity)) { 150 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 151 old_capacity, new_capacity); 152 } else { 153 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 154 old_capacity, new_capacity); 155 } 156 } 157 158 G1CMMarkStack::~G1CMMarkStack() { 159 if (_base != NULL) { 160 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 161 } 162 } 163 164 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { 165 elem->next = *list; 166 *list = elem; 167 } 168 169 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { 170 MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 171 add_chunk_to_list(&_chunk_list, elem); 172 _chunks_in_chunk_list++; 173 } 174 175 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { 176 MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 177 add_chunk_to_list(&_free_list, elem); 178 } 179 180 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { 181 TaskQueueEntryChunk* result = *list; 182 if (result != NULL) { 183 *list = (*list)->next; 184 } 185 return result; 186 } 187 188 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { 189 MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 190 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); 191 if (result != NULL) { 192 _chunks_in_chunk_list--; 193 } 194 return result; 195 } 196 197 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { 198 MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 199 return remove_chunk_from_list(&_free_list); 200 } 201 202 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { 203 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. 204 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding 205 // wraparound of _hwm. 206 if (_hwm >= _chunk_capacity) { 207 return NULL; 208 } 209 210 size_t cur_idx = Atomic::add(1u, &_hwm) - 1; 211 if (cur_idx >= _chunk_capacity) { 212 return NULL; 213 } 214 215 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; 216 result->next = NULL; 217 return result; 218 } 219 220 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { 221 // Get a new chunk. 222 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); 223 224 if (new_chunk == NULL) { 225 // Did not get a chunk from the free list. Allocate from backing memory. 226 new_chunk = allocate_new_chunk(); 227 228 if (new_chunk == NULL) { 229 return false; 230 } 231 } 232 233 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 234 235 add_chunk_to_chunk_list(new_chunk); 236 237 return true; 238 } 239 240 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { 241 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); 242 243 if (cur == NULL) { 244 return false; 245 } 246 247 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 248 249 add_chunk_to_free_list(cur); 250 return true; 251 } 252 253 void G1CMMarkStack::set_empty() { 254 _chunks_in_chunk_list = 0; 255 _hwm = 0; 256 _chunk_list = NULL; 257 _free_list = NULL; 258 } 259 260 G1CMRootRegions::G1CMRootRegions(uint const max_regions) : 261 _root_regions(NEW_C_HEAP_ARRAY(HeapRegion*, max_regions, mtGC)), 262 _max_regions(max_regions), 263 _num_root_regions(0), 264 _claimed_root_regions(0), 265 _scan_in_progress(false), 266 _should_abort(false) { } 267 268 G1CMRootRegions::~G1CMRootRegions() { 269 FREE_C_HEAP_ARRAY(HeapRegion*, _max_regions); 270 } 271 272 void G1CMRootRegions::reset() { 273 _num_root_regions = 0; 274 } 275 276 void G1CMRootRegions::add(HeapRegion* hr) { 277 assert_at_safepoint(); 278 size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1; 279 assert(idx < _max_regions, "Trying to add more root regions than there is space " SIZE_FORMAT, _max_regions); 280 _root_regions[idx] = hr; 281 } 282 283 void G1CMRootRegions::prepare_for_scan() { 284 assert(!scan_in_progress(), "pre-condition"); 285 286 _scan_in_progress = _num_root_regions > 0; 287 288 _claimed_root_regions = 0; 289 _should_abort = false; 290 } 291 292 HeapRegion* G1CMRootRegions::claim_next() { 293 if (_should_abort) { 294 // If someone has set the should_abort flag, we return NULL to 295 // force the caller to bail out of their loop. 296 return NULL; 297 } 298 299 if (_claimed_root_regions >= _num_root_regions) { 300 return NULL; 301 } 302 303 size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1; 304 if (claimed_index < _num_root_regions) { 305 return _root_regions[claimed_index]; 306 } 307 return NULL; 308 } 309 310 uint G1CMRootRegions::num_root_regions() const { 311 return (uint)_num_root_regions; 312 } 313 314 void G1CMRootRegions::notify_scan_done() { 315 MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 316 _scan_in_progress = false; 317 RootRegionScan_lock->notify_all(); 318 } 319 320 void G1CMRootRegions::cancel_scan() { 321 notify_scan_done(); 322 } 323 324 void G1CMRootRegions::scan_finished() { 325 assert(scan_in_progress(), "pre-condition"); 326 327 if (!_should_abort) { 328 assert(_claimed_root_regions >= num_root_regions(), 329 "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u", 330 _claimed_root_regions, num_root_regions()); 331 } 332 333 notify_scan_done(); 334 } 335 336 bool G1CMRootRegions::wait_until_scan_finished() { 337 if (!scan_in_progress()) { 338 return false; 339 } 340 341 { 342 MonitorLocker ml(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 343 while (scan_in_progress()) { 344 ml.wait(); 345 } 346 } 347 return true; 348 } 349 350 // Returns the maximum number of workers to be used in a concurrent 351 // phase based on the number of GC workers being used in a STW 352 // phase. 353 static uint scale_concurrent_worker_threads(uint num_gc_workers) { 354 return MAX2((num_gc_workers + 2) / 4, 1U); 355 } 356 357 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, 358 G1RegionToSpaceMapper* prev_bitmap_storage, 359 G1RegionToSpaceMapper* next_bitmap_storage) : 360 // _cm_thread set inside the constructor 361 _g1h(g1h), 362 _completed_initialization(false), 363 364 _mark_bitmap_1(), 365 _mark_bitmap_2(), 366 _prev_mark_bitmap(&_mark_bitmap_1), 367 _next_mark_bitmap(&_mark_bitmap_2), 368 369 _heap(_g1h->reserved_region()), 370 371 _root_regions(_g1h->max_regions()), 372 373 _global_mark_stack(), 374 375 // _finger set in set_non_marking_state 376 377 _worker_id_offset(G1DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads), 378 _max_num_tasks(ParallelGCThreads), 379 // _num_active_tasks set in set_non_marking_state() 380 // _tasks set inside the constructor 381 382 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)), 383 _terminator((int) _max_num_tasks, _task_queues), 384 385 _first_overflow_barrier_sync(), 386 _second_overflow_barrier_sync(), 387 388 _has_overflown(false), 389 _concurrent(false), 390 _has_aborted(false), 391 _restart_for_overflow(false), 392 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 393 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 394 395 // _verbose_level set below 396 397 _init_times(), 398 _remark_times(), 399 _remark_mark_times(), 400 _remark_weak_ref_times(), 401 _cleanup_times(), 402 _total_cleanup_time(0.0), 403 404 _accum_task_vtime(NULL), 405 406 _concurrent_workers(NULL), 407 _num_concurrent_workers(0), 408 _max_concurrent_workers(0), 409 410 _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)), 411 _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC)) 412 { 413 _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage); 414 _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage); 415 416 // Create & start ConcurrentMark thread. 417 _cm_thread = new G1ConcurrentMarkThread(this); 418 if (_cm_thread->osthread() == NULL) { 419 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 420 } 421 422 assert(CGC_lock != NULL, "CGC_lock must be initialized"); 423 424 if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) { 425 // Calculate the number of concurrent worker threads by scaling 426 // the number of parallel GC threads. 427 uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads); 428 FLAG_SET_ERGO(ConcGCThreads, marking_thread_num); 429 } 430 431 assert(ConcGCThreads > 0, "ConcGCThreads have been set."); 432 if (ConcGCThreads > ParallelGCThreads) { 433 log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).", 434 ConcGCThreads, ParallelGCThreads); 435 return; 436 } 437 438 log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset); 439 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 440 441 _num_concurrent_workers = ConcGCThreads; 442 _max_concurrent_workers = _num_concurrent_workers; 443 444 _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true); 445 _concurrent_workers->initialize_workers(); 446 447 if (FLAG_IS_DEFAULT(MarkStackSize)) { 448 size_t mark_stack_size = 449 MIN2(MarkStackSizeMax, 450 MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE))); 451 // Verify that the calculated value for MarkStackSize is in range. 452 // It would be nice to use the private utility routine from Arguments. 453 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 454 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 455 "must be between 1 and " SIZE_FORMAT, 456 mark_stack_size, MarkStackSizeMax); 457 return; 458 } 459 FLAG_SET_ERGO(MarkStackSize, mark_stack_size); 460 } else { 461 // Verify MarkStackSize is in range. 462 if (FLAG_IS_CMDLINE(MarkStackSize)) { 463 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 464 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 465 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 466 "must be between 1 and " SIZE_FORMAT, 467 MarkStackSize, MarkStackSizeMax); 468 return; 469 } 470 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 471 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 472 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 473 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 474 MarkStackSize, MarkStackSizeMax); 475 return; 476 } 477 } 478 } 479 } 480 481 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { 482 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 483 } 484 485 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC); 486 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC); 487 488 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 489 _num_active_tasks = _max_num_tasks; 490 491 for (uint i = 0; i < _max_num_tasks; ++i) { 492 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 493 task_queue->initialize(); 494 _task_queues->register_queue(i, task_queue); 495 496 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions()); 497 498 _accum_task_vtime[i] = 0.0; 499 } 500 501 reset_at_marking_complete(); 502 _completed_initialization = true; 503 } 504 505 void G1ConcurrentMark::reset() { 506 _has_aborted = false; 507 508 reset_marking_for_restart(); 509 510 // Reset all tasks, since different phases will use different number of active 511 // threads. So, it's easiest to have all of them ready. 512 for (uint i = 0; i < _max_num_tasks; ++i) { 513 _tasks[i]->reset(_next_mark_bitmap); 514 } 515 516 uint max_regions = _g1h->max_regions(); 517 for (uint i = 0; i < max_regions; i++) { 518 _top_at_rebuild_starts[i] = NULL; 519 _region_mark_stats[i].clear(); 520 } 521 } 522 523 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) { 524 for (uint j = 0; j < _max_num_tasks; ++j) { 525 _tasks[j]->clear_mark_stats_cache(region_idx); 526 } 527 _top_at_rebuild_starts[region_idx] = NULL; 528 _region_mark_stats[region_idx].clear(); 529 } 530 531 void G1ConcurrentMark::clear_statistics(HeapRegion* r) { 532 uint const region_idx = r->hrm_index(); 533 if (r->is_humongous()) { 534 assert(r->is_starts_humongous(), "Got humongous continues region here"); 535 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size()); 536 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) { 537 clear_statistics_in_region(j); 538 } 539 } else { 540 clear_statistics_in_region(region_idx); 541 } 542 } 543 544 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) { 545 if (bitmap->is_marked(addr)) { 546 bitmap->clear(addr); 547 } 548 } 549 550 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) { 551 assert_at_safepoint_on_vm_thread(); 552 553 // Need to clear all mark bits of the humongous object. 554 clear_mark_if_set(_prev_mark_bitmap, r->bottom()); 555 clear_mark_if_set(_next_mark_bitmap, r->bottom()); 556 557 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 558 return; 559 } 560 561 // Clear any statistics about the region gathered so far. 562 clear_statistics(r); 563 } 564 565 void G1ConcurrentMark::reset_marking_for_restart() { 566 _global_mark_stack.set_empty(); 567 568 // Expand the marking stack, if we have to and if we can. 569 if (has_overflown()) { 570 _global_mark_stack.expand(); 571 572 uint max_regions = _g1h->max_regions(); 573 for (uint i = 0; i < max_regions; i++) { 574 _region_mark_stats[i].clear_during_overflow(); 575 } 576 } 577 578 clear_has_overflown(); 579 _finger = _heap.start(); 580 581 for (uint i = 0; i < _max_num_tasks; ++i) { 582 G1CMTaskQueue* queue = _task_queues->queue(i); 583 queue->set_empty(); 584 } 585 } 586 587 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 588 assert(active_tasks <= _max_num_tasks, "we should not have more"); 589 590 _num_active_tasks = active_tasks; 591 // Need to update the three data structures below according to the 592 // number of active threads for this phase. 593 _terminator.terminator()->reset_for_reuse((int) active_tasks); 594 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 595 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 596 } 597 598 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 599 set_concurrency(active_tasks); 600 601 _concurrent = concurrent; 602 603 if (!concurrent) { 604 // At this point we should be in a STW phase, and completed marking. 605 assert_at_safepoint_on_vm_thread(); 606 assert(out_of_regions(), 607 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 608 p2i(_finger), p2i(_heap.end())); 609 } 610 } 611 612 void G1ConcurrentMark::reset_at_marking_complete() { 613 // We set the global marking state to some default values when we're 614 // not doing marking. 615 reset_marking_for_restart(); 616 _num_active_tasks = 0; 617 } 618 619 G1ConcurrentMark::~G1ConcurrentMark() { 620 FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts); 621 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats); 622 // The G1ConcurrentMark instance is never freed. 623 ShouldNotReachHere(); 624 } 625 626 class G1ClearBitMapTask : public AbstractGangTask { 627 public: 628 static size_t chunk_size() { return M; } 629 630 private: 631 // Heap region closure used for clearing the given mark bitmap. 632 class G1ClearBitmapHRClosure : public HeapRegionClosure { 633 private: 634 G1CMBitMap* _bitmap; 635 G1ConcurrentMark* _cm; 636 public: 637 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) { 638 } 639 640 virtual bool do_heap_region(HeapRegion* r) { 641 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 642 643 HeapWord* cur = r->bottom(); 644 HeapWord* const end = r->end(); 645 646 while (cur < end) { 647 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 648 _bitmap->clear_range(mr); 649 650 cur += chunk_size_in_words; 651 652 // Abort iteration if after yielding the marking has been aborted. 653 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 654 return true; 655 } 656 // Repeat the asserts from before the start of the closure. We will do them 657 // as asserts here to minimize their overhead on the product. However, we 658 // will have them as guarantees at the beginning / end of the bitmap 659 // clearing to get some checking in the product. 660 assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant"); 661 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 662 } 663 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 664 665 return false; 666 } 667 }; 668 669 G1ClearBitmapHRClosure _cl; 670 HeapRegionClaimer _hr_claimer; 671 bool _suspendible; // If the task is suspendible, workers must join the STS. 672 673 public: 674 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 675 AbstractGangTask("G1 Clear Bitmap"), 676 _cl(bitmap, suspendible ? cm : NULL), 677 _hr_claimer(n_workers), 678 _suspendible(suspendible) 679 { } 680 681 void work(uint worker_id) { 682 SuspendibleThreadSetJoiner sts_join(_suspendible); 683 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id); 684 } 685 686 bool is_complete() { 687 return _cl.is_complete(); 688 } 689 }; 690 691 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 692 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 693 694 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 695 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 696 697 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 698 699 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 700 701 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 702 workers->run_task(&cl, num_workers); 703 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 704 } 705 706 void G1ConcurrentMark::cleanup_for_next_mark() { 707 // Make sure that the concurrent mark thread looks to still be in 708 // the current cycle. 709 guarantee(cm_thread()->during_cycle(), "invariant"); 710 711 // We are finishing up the current cycle by clearing the next 712 // marking bitmap and getting it ready for the next cycle. During 713 // this time no other cycle can start. So, let's make sure that this 714 // is the case. 715 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 716 717 clear_bitmap(_next_mark_bitmap, _concurrent_workers, true); 718 719 // Repeat the asserts from above. 720 guarantee(cm_thread()->during_cycle(), "invariant"); 721 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 722 } 723 724 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 725 assert_at_safepoint_on_vm_thread(); 726 clear_bitmap(_prev_mark_bitmap, workers, false); 727 } 728 729 class NoteStartOfMarkHRClosure : public HeapRegionClosure { 730 public: 731 bool do_heap_region(HeapRegion* r) { 732 r->note_start_of_marking(); 733 return false; 734 } 735 }; 736 737 void G1ConcurrentMark::pre_initial_mark() { 738 assert_at_safepoint_on_vm_thread(); 739 740 // Reset marking state. 741 reset(); 742 743 // For each region note start of marking. 744 NoteStartOfMarkHRClosure startcl; 745 _g1h->heap_region_iterate(&startcl); 746 747 _root_regions.reset(); 748 } 749 750 751 void G1ConcurrentMark::post_initial_mark() { 752 // Start Concurrent Marking weak-reference discovery. 753 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 754 // enable ("weak") refs discovery 755 rp->enable_discovery(); 756 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 757 758 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 759 // This is the start of the marking cycle, we're expected all 760 // threads to have SATB queues with active set to false. 761 satb_mq_set.set_active_all_threads(true, /* new active value */ 762 false /* expected_active */); 763 764 _root_regions.prepare_for_scan(); 765 766 // update_g1_committed() will be called at the end of an evac pause 767 // when marking is on. So, it's also called at the end of the 768 // initial-mark pause to update the heap end, if the heap expands 769 // during it. No need to call it here. 770 } 771 772 /* 773 * Notice that in the next two methods, we actually leave the STS 774 * during the barrier sync and join it immediately afterwards. If we 775 * do not do this, the following deadlock can occur: one thread could 776 * be in the barrier sync code, waiting for the other thread to also 777 * sync up, whereas another one could be trying to yield, while also 778 * waiting for the other threads to sync up too. 779 * 780 * Note, however, that this code is also used during remark and in 781 * this case we should not attempt to leave / enter the STS, otherwise 782 * we'll either hit an assert (debug / fastdebug) or deadlock 783 * (product). So we should only leave / enter the STS if we are 784 * operating concurrently. 785 * 786 * Because the thread that does the sync barrier has left the STS, it 787 * is possible to be suspended for a Full GC or an evacuation pause 788 * could occur. This is actually safe, since the entering the sync 789 * barrier is one of the last things do_marking_step() does, and it 790 * doesn't manipulate any data structures afterwards. 791 */ 792 793 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 794 bool barrier_aborted; 795 { 796 SuspendibleThreadSetLeaver sts_leave(concurrent()); 797 barrier_aborted = !_first_overflow_barrier_sync.enter(); 798 } 799 800 // at this point everyone should have synced up and not be doing any 801 // more work 802 803 if (barrier_aborted) { 804 // If the barrier aborted we ignore the overflow condition and 805 // just abort the whole marking phase as quickly as possible. 806 return; 807 } 808 } 809 810 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 811 SuspendibleThreadSetLeaver sts_leave(concurrent()); 812 _second_overflow_barrier_sync.enter(); 813 814 // at this point everything should be re-initialized and ready to go 815 } 816 817 class G1CMConcurrentMarkingTask : public AbstractGangTask { 818 G1ConcurrentMark* _cm; 819 820 public: 821 void work(uint worker_id) { 822 assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread"); 823 ResourceMark rm; 824 825 double start_vtime = os::elapsedVTime(); 826 827 { 828 SuspendibleThreadSetJoiner sts_join; 829 830 assert(worker_id < _cm->active_tasks(), "invariant"); 831 832 G1CMTask* task = _cm->task(worker_id); 833 task->record_start_time(); 834 if (!_cm->has_aborted()) { 835 do { 836 task->do_marking_step(G1ConcMarkStepDurationMillis, 837 true /* do_termination */, 838 false /* is_serial*/); 839 840 _cm->do_yield_check(); 841 } while (!_cm->has_aborted() && task->has_aborted()); 842 } 843 task->record_end_time(); 844 guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant"); 845 } 846 847 double end_vtime = os::elapsedVTime(); 848 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 849 } 850 851 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) : 852 AbstractGangTask("Concurrent Mark"), _cm(cm) { } 853 854 ~G1CMConcurrentMarkingTask() { } 855 }; 856 857 uint G1ConcurrentMark::calc_active_marking_workers() { 858 uint result = 0; 859 if (!UseDynamicNumberOfGCThreads || 860 (!FLAG_IS_DEFAULT(ConcGCThreads) && 861 !ForceDynamicNumberOfGCThreads)) { 862 result = _max_concurrent_workers; 863 } else { 864 result = 865 WorkerPolicy::calc_default_active_workers(_max_concurrent_workers, 866 1, /* Minimum workers */ 867 _num_concurrent_workers, 868 Threads::number_of_non_daemon_threads()); 869 // Don't scale the result down by scale_concurrent_workers() because 870 // that scaling has already gone into "_max_concurrent_workers". 871 } 872 assert(result > 0 && result <= _max_concurrent_workers, 873 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u", 874 _max_concurrent_workers, result); 875 return result; 876 } 877 878 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) { 879 assert(hr->is_old() || (hr->is_survivor() && hr->next_top_at_mark_start() == hr->bottom()), 880 "Root regions must be old or survivor but region %u is %s", hr->hrm_index(), hr->get_type_str()); 881 G1RootRegionScanClosure cl(_g1h, this, worker_id); 882 883 const uintx interval = PrefetchScanIntervalInBytes; 884 HeapWord* curr = hr->next_top_at_mark_start(); 885 const HeapWord* end = hr->top(); 886 while (curr < end) { 887 Prefetch::read(curr, interval); 888 oop obj = oop(curr); 889 int size = obj->oop_iterate_size(&cl); 890 assert(size == obj->size(), "sanity"); 891 curr += size; 892 } 893 } 894 895 class G1CMRootRegionScanTask : public AbstractGangTask { 896 G1ConcurrentMark* _cm; 897 public: 898 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 899 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 900 901 void work(uint worker_id) { 902 assert(Thread::current()->is_ConcurrentGC_thread(), 903 "this should only be done by a conc GC thread"); 904 905 G1CMRootRegions* root_regions = _cm->root_regions(); 906 HeapRegion* hr = root_regions->claim_next(); 907 while (hr != NULL) { 908 _cm->scan_root_region(hr, worker_id); 909 hr = root_regions->claim_next(); 910 } 911 } 912 }; 913 914 void G1ConcurrentMark::scan_root_regions() { 915 // scan_in_progress() will have been set to true only if there was 916 // at least one root region to scan. So, if it's false, we 917 // should not attempt to do any further work. 918 if (root_regions()->scan_in_progress()) { 919 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 920 921 _num_concurrent_workers = MIN2(calc_active_marking_workers(), 922 // We distribute work on a per-region basis, so starting 923 // more threads than that is useless. 924 root_regions()->num_root_regions()); 925 assert(_num_concurrent_workers <= _max_concurrent_workers, 926 "Maximum number of marking threads exceeded"); 927 928 G1CMRootRegionScanTask task(this); 929 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 930 task.name(), _num_concurrent_workers, root_regions()->num_root_regions()); 931 _concurrent_workers->run_task(&task, _num_concurrent_workers); 932 933 // It's possible that has_aborted() is true here without actually 934 // aborting the survivor scan earlier. This is OK as it's 935 // mainly used for sanity checking. 936 root_regions()->scan_finished(); 937 } 938 } 939 940 void G1ConcurrentMark::concurrent_cycle_start() { 941 _gc_timer_cm->register_gc_start(); 942 943 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 944 945 _g1h->trace_heap_before_gc(_gc_tracer_cm); 946 } 947 948 void G1ConcurrentMark::concurrent_cycle_end() { 949 _g1h->collector_state()->set_clearing_next_bitmap(false); 950 951 _g1h->trace_heap_after_gc(_gc_tracer_cm); 952 953 if (has_aborted()) { 954 log_info(gc, marking)("Concurrent Mark Abort"); 955 _gc_tracer_cm->report_concurrent_mode_failure(); 956 } 957 958 _gc_timer_cm->register_gc_end(); 959 960 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 961 } 962 963 void G1ConcurrentMark::mark_from_roots() { 964 _restart_for_overflow = false; 965 966 _num_concurrent_workers = calc_active_marking_workers(); 967 968 uint active_workers = MAX2(1U, _num_concurrent_workers); 969 970 // Setting active workers is not guaranteed since fewer 971 // worker threads may currently exist and more may not be 972 // available. 973 active_workers = _concurrent_workers->update_active_workers(active_workers); 974 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers()); 975 976 // Parallel task terminator is set in "set_concurrency_and_phase()" 977 set_concurrency_and_phase(active_workers, true /* concurrent */); 978 979 G1CMConcurrentMarkingTask marking_task(this); 980 _concurrent_workers->run_task(&marking_task); 981 print_stats(); 982 } 983 984 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) { 985 G1HeapVerifier* verifier = _g1h->verifier(); 986 987 verifier->verify_region_sets_optional(); 988 989 if (VerifyDuringGC) { 990 GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm); 991 992 size_t const BufLen = 512; 993 char buffer[BufLen]; 994 995 jio_snprintf(buffer, BufLen, "During GC (%s)", caller); 996 verifier->verify(type, vo, buffer); 997 } 998 999 verifier->check_bitmaps(caller); 1000 } 1001 1002 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask { 1003 G1CollectedHeap* _g1h; 1004 G1ConcurrentMark* _cm; 1005 HeapRegionClaimer _hrclaimer; 1006 uint volatile _total_selected_for_rebuild; 1007 1008 G1PrintRegionLivenessInfoClosure _cl; 1009 1010 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure { 1011 G1CollectedHeap* _g1h; 1012 G1ConcurrentMark* _cm; 1013 1014 G1PrintRegionLivenessInfoClosure* _cl; 1015 1016 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild. 1017 1018 void update_remset_before_rebuild(HeapRegion* hr) { 1019 G1RemSetTrackingPolicy* tracking_policy = _g1h->policy()->remset_tracker(); 1020 1021 bool selected_for_rebuild; 1022 if (hr->is_humongous()) { 1023 bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0; 1024 selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live); 1025 } else { 1026 size_t const live_bytes = _cm->liveness(hr->hrm_index()); 1027 selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes); 1028 } 1029 if (selected_for_rebuild) { 1030 _num_regions_selected_for_rebuild++; 1031 } 1032 _cm->update_top_at_rebuild_start(hr); 1033 } 1034 1035 // Distribute the given words across the humongous object starting with hr and 1036 // note end of marking. 1037 void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) { 1038 uint const region_idx = hr->hrm_index(); 1039 size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size(); 1040 uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words); 1041 1042 // "Distributing" zero words means that we only note end of marking for these 1043 // regions. 1044 assert(marked_words == 0 || obj_size_in_words == marked_words, 1045 "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT, 1046 obj_size_in_words, marked_words); 1047 1048 for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) { 1049 HeapRegion* const r = _g1h->region_at(i); 1050 size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words); 1051 1052 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)", 1053 words_to_add, i, r->get_type_str()); 1054 add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize); 1055 marked_words -= words_to_add; 1056 } 1057 assert(marked_words == 0, 1058 SIZE_FORMAT " words left after distributing space across %u regions", 1059 marked_words, num_regions_in_humongous); 1060 } 1061 1062 void update_marked_bytes(HeapRegion* hr) { 1063 uint const region_idx = hr->hrm_index(); 1064 size_t const marked_words = _cm->liveness(region_idx); 1065 // The marking attributes the object's size completely to the humongous starts 1066 // region. We need to distribute this value across the entire set of regions a 1067 // humongous object spans. 1068 if (hr->is_humongous()) { 1069 assert(hr->is_starts_humongous() || marked_words == 0, 1070 "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)", 1071 marked_words, region_idx, hr->get_type_str()); 1072 if (hr->is_starts_humongous()) { 1073 distribute_marked_bytes(hr, marked_words); 1074 } 1075 } else { 1076 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str()); 1077 add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize); 1078 } 1079 } 1080 1081 void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) { 1082 hr->add_to_marked_bytes(marked_bytes); 1083 _cl->do_heap_region(hr); 1084 hr->note_end_of_marking(); 1085 } 1086 1087 public: 1088 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) : 1089 _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { } 1090 1091 virtual bool do_heap_region(HeapRegion* r) { 1092 update_remset_before_rebuild(r); 1093 update_marked_bytes(r); 1094 1095 return false; 1096 } 1097 1098 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; } 1099 }; 1100 1101 public: 1102 G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) : 1103 AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"), 1104 _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { } 1105 1106 virtual void work(uint worker_id) { 1107 G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl); 1108 _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id); 1109 Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild); 1110 } 1111 1112 uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; } 1113 1114 // Number of regions for which roughly one thread should be spawned for this work. 1115 static const uint RegionsPerThread = 384; 1116 }; 1117 1118 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure { 1119 G1CollectedHeap* _g1h; 1120 public: 1121 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { } 1122 1123 virtual bool do_heap_region(HeapRegion* r) { 1124 _g1h->policy()->remset_tracker()->update_after_rebuild(r); 1125 return false; 1126 } 1127 }; 1128 1129 void G1ConcurrentMark::remark() { 1130 assert_at_safepoint_on_vm_thread(); 1131 1132 // If a full collection has happened, we should not continue. However we might 1133 // have ended up here as the Remark VM operation has been scheduled already. 1134 if (has_aborted()) { 1135 return; 1136 } 1137 1138 G1Policy* policy = _g1h->policy(); 1139 policy->record_concurrent_mark_remark_start(); 1140 1141 double start = os::elapsedTime(); 1142 1143 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before"); 1144 1145 { 1146 GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm); 1147 finalize_marking(); 1148 } 1149 1150 double mark_work_end = os::elapsedTime(); 1151 1152 bool const mark_finished = !has_overflown(); 1153 if (mark_finished) { 1154 weak_refs_work(false /* clear_all_soft_refs */); 1155 1156 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1157 // We're done with marking. 1158 // This is the end of the marking cycle, we're expected all 1159 // threads to have SATB queues with active set to true. 1160 satb_mq_set.set_active_all_threads(false, /* new active value */ 1161 true /* expected_active */); 1162 1163 { 1164 GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm); 1165 flush_all_task_caches(); 1166 } 1167 1168 // Install newly created mark bitmap as "prev". 1169 swap_mark_bitmaps(); 1170 { 1171 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm); 1172 1173 uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) / 1174 G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread; 1175 uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity); 1176 1177 G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers); 1178 log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions()); 1179 _g1h->workers()->run_task(&cl, num_workers); 1180 1181 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u", 1182 _g1h->num_regions(), cl.total_selected_for_rebuild()); 1183 } 1184 { 1185 GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm); 1186 reclaim_empty_regions(); 1187 } 1188 1189 // Clean out dead classes 1190 if (ClassUnloadingWithConcurrentMark) { 1191 GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm); 1192 ClassLoaderDataGraph::purge(); 1193 } 1194 1195 _g1h->resize_heap_if_necessary(); 1196 1197 compute_new_sizes(); 1198 1199 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after"); 1200 1201 assert(!restart_for_overflow(), "sanity"); 1202 // Completely reset the marking state since marking completed 1203 reset_at_marking_complete(); 1204 } else { 1205 // We overflowed. Restart concurrent marking. 1206 _restart_for_overflow = true; 1207 1208 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow"); 1209 1210 // Clear the marking state because we will be restarting 1211 // marking due to overflowing the global mark stack. 1212 reset_marking_for_restart(); 1213 } 1214 1215 { 1216 GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm); 1217 report_object_count(mark_finished); 1218 } 1219 1220 // Statistics 1221 double now = os::elapsedTime(); 1222 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1223 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1224 _remark_times.add((now - start) * 1000.0); 1225 1226 policy->record_concurrent_mark_remark_end(); 1227 } 1228 1229 class G1ReclaimEmptyRegionsTask : public AbstractGangTask { 1230 // Per-region work during the Cleanup pause. 1231 class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure { 1232 G1CollectedHeap* _g1h; 1233 size_t _freed_bytes; 1234 FreeRegionList* _local_cleanup_list; 1235 uint _old_regions_removed; 1236 uint _humongous_regions_removed; 1237 1238 public: 1239 G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h, 1240 FreeRegionList* local_cleanup_list) : 1241 _g1h(g1h), 1242 _freed_bytes(0), 1243 _local_cleanup_list(local_cleanup_list), 1244 _old_regions_removed(0), 1245 _humongous_regions_removed(0) { } 1246 1247 size_t freed_bytes() { return _freed_bytes; } 1248 const uint old_regions_removed() { return _old_regions_removed; } 1249 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1250 1251 bool do_heap_region(HeapRegion *hr) { 1252 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) { 1253 _freed_bytes += hr->used(); 1254 hr->set_containing_set(NULL); 1255 if (hr->is_humongous()) { 1256 _humongous_regions_removed++; 1257 _g1h->free_humongous_region(hr, _local_cleanup_list); 1258 } else { 1259 _old_regions_removed++; 1260 _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */); 1261 } 1262 hr->clear_cardtable(); 1263 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index()); 1264 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom())); 1265 } 1266 1267 return false; 1268 } 1269 }; 1270 1271 G1CollectedHeap* _g1h; 1272 FreeRegionList* _cleanup_list; 1273 HeapRegionClaimer _hrclaimer; 1274 1275 public: 1276 G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1277 AbstractGangTask("G1 Cleanup"), 1278 _g1h(g1h), 1279 _cleanup_list(cleanup_list), 1280 _hrclaimer(n_workers) { 1281 } 1282 1283 void work(uint worker_id) { 1284 FreeRegionList local_cleanup_list("Local Cleanup List"); 1285 G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list); 1286 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id); 1287 assert(cl.is_complete(), "Shouldn't have aborted!"); 1288 1289 // Now update the old/humongous region sets 1290 _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed()); 1291 { 1292 MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1293 _g1h->decrement_summary_bytes(cl.freed_bytes()); 1294 1295 _cleanup_list->add_ordered(&local_cleanup_list); 1296 assert(local_cleanup_list.is_empty(), "post-condition"); 1297 } 1298 } 1299 }; 1300 1301 void G1ConcurrentMark::reclaim_empty_regions() { 1302 WorkGang* workers = _g1h->workers(); 1303 FreeRegionList empty_regions_list("Empty Regions After Mark List"); 1304 1305 G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers()); 1306 workers->run_task(&cl); 1307 1308 if (!empty_regions_list.is_empty()) { 1309 log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length()); 1310 // Now print the empty regions list. 1311 G1HRPrinter* hrp = _g1h->hr_printer(); 1312 if (hrp->is_active()) { 1313 FreeRegionListIterator iter(&empty_regions_list); 1314 while (iter.more_available()) { 1315 HeapRegion* hr = iter.get_next(); 1316 hrp->cleanup(hr); 1317 } 1318 } 1319 // And actually make them available. 1320 _g1h->prepend_to_freelist(&empty_regions_list); 1321 } 1322 } 1323 1324 void G1ConcurrentMark::compute_new_sizes() { 1325 MetaspaceGC::compute_new_size(); 1326 1327 // Cleanup will have freed any regions completely full of garbage. 1328 // Update the soft reference policy with the new heap occupancy. 1329 Universe::update_heap_info_at_gc(); 1330 1331 // We reclaimed old regions so we should calculate the sizes to make 1332 // sure we update the old gen/space data. 1333 _g1h->g1mm()->update_sizes(); 1334 } 1335 1336 void G1ConcurrentMark::cleanup() { 1337 assert_at_safepoint_on_vm_thread(); 1338 1339 // If a full collection has happened, we shouldn't do this. 1340 if (has_aborted()) { 1341 return; 1342 } 1343 1344 G1Policy* policy = _g1h->policy(); 1345 policy->record_concurrent_mark_cleanup_start(); 1346 1347 double start = os::elapsedTime(); 1348 1349 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before"); 1350 1351 { 1352 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm); 1353 G1UpdateRemSetTrackingAfterRebuild cl(_g1h); 1354 _g1h->heap_region_iterate(&cl); 1355 } 1356 1357 if (log_is_enabled(Trace, gc, liveness)) { 1358 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup"); 1359 _g1h->heap_region_iterate(&cl); 1360 } 1361 1362 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after"); 1363 1364 // We need to make this be a "collection" so any collection pause that 1365 // races with it goes around and waits for Cleanup to finish. 1366 _g1h->increment_total_collections(); 1367 1368 // Local statistics 1369 double recent_cleanup_time = (os::elapsedTime() - start); 1370 _total_cleanup_time += recent_cleanup_time; 1371 _cleanup_times.add(recent_cleanup_time); 1372 1373 { 1374 GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm); 1375 policy->record_concurrent_mark_cleanup_end(); 1376 } 1377 } 1378 1379 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1380 // Uses the G1CMTask associated with a worker thread (for serial reference 1381 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1382 // trace referent objects. 1383 // 1384 // Using the G1CMTask and embedded local queues avoids having the worker 1385 // threads operating on the global mark stack. This reduces the risk 1386 // of overflowing the stack - which we would rather avoid at this late 1387 // state. Also using the tasks' local queues removes the potential 1388 // of the workers interfering with each other that could occur if 1389 // operating on the global stack. 1390 1391 class G1CMKeepAliveAndDrainClosure : public OopClosure { 1392 G1ConcurrentMark* _cm; 1393 G1CMTask* _task; 1394 uint _ref_counter_limit; 1395 uint _ref_counter; 1396 bool _is_serial; 1397 public: 1398 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1399 _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval), 1400 _ref_counter(_ref_counter_limit), _is_serial(is_serial) { 1401 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1402 } 1403 1404 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1405 virtual void do_oop( oop* p) { do_oop_work(p); } 1406 1407 template <class T> void do_oop_work(T* p) { 1408 if (_cm->has_overflown()) { 1409 return; 1410 } 1411 if (!_task->deal_with_reference(p)) { 1412 // We did not add anything to the mark bitmap (or mark stack), so there is 1413 // no point trying to drain it. 1414 return; 1415 } 1416 _ref_counter--; 1417 1418 if (_ref_counter == 0) { 1419 // We have dealt with _ref_counter_limit references, pushing them 1420 // and objects reachable from them on to the local stack (and 1421 // possibly the global stack). Call G1CMTask::do_marking_step() to 1422 // process these entries. 1423 // 1424 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1425 // there's nothing more to do (i.e. we're done with the entries that 1426 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1427 // above) or we overflow. 1428 // 1429 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1430 // flag while there may still be some work to do. (See the comment at 1431 // the beginning of G1CMTask::do_marking_step() for those conditions - 1432 // one of which is reaching the specified time target.) It is only 1433 // when G1CMTask::do_marking_step() returns without setting the 1434 // has_aborted() flag that the marking step has completed. 1435 do { 1436 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1437 _task->do_marking_step(mark_step_duration_ms, 1438 false /* do_termination */, 1439 _is_serial); 1440 } while (_task->has_aborted() && !_cm->has_overflown()); 1441 _ref_counter = _ref_counter_limit; 1442 } 1443 } 1444 }; 1445 1446 // 'Drain' oop closure used by both serial and parallel reference processing. 1447 // Uses the G1CMTask associated with a given worker thread (for serial 1448 // reference processing the G1CMtask for worker 0 is used). Calls the 1449 // do_marking_step routine, with an unbelievably large timeout value, 1450 // to drain the marking data structures of the remaining entries 1451 // added by the 'keep alive' oop closure above. 1452 1453 class G1CMDrainMarkingStackClosure : public VoidClosure { 1454 G1ConcurrentMark* _cm; 1455 G1CMTask* _task; 1456 bool _is_serial; 1457 public: 1458 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1459 _cm(cm), _task(task), _is_serial(is_serial) { 1460 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1461 } 1462 1463 void do_void() { 1464 do { 1465 // We call G1CMTask::do_marking_step() to completely drain the local 1466 // and global marking stacks of entries pushed by the 'keep alive' 1467 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1468 // 1469 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1470 // if there's nothing more to do (i.e. we've completely drained the 1471 // entries that were pushed as a a result of applying the 'keep alive' 1472 // closure to the entries on the discovered ref lists) or we overflow 1473 // the global marking stack. 1474 // 1475 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1476 // flag while there may still be some work to do. (See the comment at 1477 // the beginning of G1CMTask::do_marking_step() for those conditions - 1478 // one of which is reaching the specified time target.) It is only 1479 // when G1CMTask::do_marking_step() returns without setting the 1480 // has_aborted() flag that the marking step has completed. 1481 1482 _task->do_marking_step(1000000000.0 /* something very large */, 1483 true /* do_termination */, 1484 _is_serial); 1485 } while (_task->has_aborted() && !_cm->has_overflown()); 1486 } 1487 }; 1488 1489 // Implementation of AbstractRefProcTaskExecutor for parallel 1490 // reference processing at the end of G1 concurrent marking 1491 1492 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 1493 private: 1494 G1CollectedHeap* _g1h; 1495 G1ConcurrentMark* _cm; 1496 WorkGang* _workers; 1497 uint _active_workers; 1498 1499 public: 1500 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1501 G1ConcurrentMark* cm, 1502 WorkGang* workers, 1503 uint n_workers) : 1504 _g1h(g1h), _cm(cm), 1505 _workers(workers), _active_workers(n_workers) { } 1506 1507 virtual void execute(ProcessTask& task, uint ergo_workers); 1508 }; 1509 1510 class G1CMRefProcTaskProxy : public AbstractGangTask { 1511 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1512 ProcessTask& _proc_task; 1513 G1CollectedHeap* _g1h; 1514 G1ConcurrentMark* _cm; 1515 1516 public: 1517 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1518 G1CollectedHeap* g1h, 1519 G1ConcurrentMark* cm) : 1520 AbstractGangTask("Process reference objects in parallel"), 1521 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1522 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1523 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1524 } 1525 1526 virtual void work(uint worker_id) { 1527 ResourceMark rm; 1528 HandleMark hm; 1529 G1CMTask* task = _cm->task(worker_id); 1530 G1CMIsAliveClosure g1_is_alive(_g1h); 1531 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1532 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1533 1534 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1535 } 1536 }; 1537 1538 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) { 1539 assert(_workers != NULL, "Need parallel worker threads."); 1540 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1541 assert(_workers->active_workers() >= ergo_workers, 1542 "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)", 1543 ergo_workers, _workers->active_workers()); 1544 1545 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1546 1547 // We need to reset the concurrency level before each 1548 // proxy task execution, so that the termination protocol 1549 // and overflow handling in G1CMTask::do_marking_step() knows 1550 // how many workers to wait for. 1551 _cm->set_concurrency(ergo_workers); 1552 _workers->run_task(&proc_task_proxy, ergo_workers); 1553 } 1554 1555 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { 1556 ResourceMark rm; 1557 HandleMark hm; 1558 1559 // Is alive closure. 1560 G1CMIsAliveClosure g1_is_alive(_g1h); 1561 1562 // Inner scope to exclude the cleaning of the string table 1563 // from the displayed time. 1564 { 1565 GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm); 1566 1567 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1568 1569 // See the comment in G1CollectedHeap::ref_processing_init() 1570 // about how reference processing currently works in G1. 1571 1572 // Set the soft reference policy 1573 rp->setup_policy(clear_all_soft_refs); 1574 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1575 1576 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1577 // in serial reference processing. Note these closures are also 1578 // used for serially processing (by the the current thread) the 1579 // JNI references during parallel reference processing. 1580 // 1581 // These closures do not need to synchronize with the worker 1582 // threads involved in parallel reference processing as these 1583 // instances are executed serially by the current thread (e.g. 1584 // reference processing is not multi-threaded and is thus 1585 // performed by the current thread instead of a gang worker). 1586 // 1587 // The gang tasks involved in parallel reference processing create 1588 // their own instances of these closures, which do their own 1589 // synchronization among themselves. 1590 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1591 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1592 1593 // We need at least one active thread. If reference processing 1594 // is not multi-threaded we use the current (VMThread) thread, 1595 // otherwise we use the work gang from the G1CollectedHeap and 1596 // we utilize all the worker threads we can. 1597 bool processing_is_mt = rp->processing_is_mt(); 1598 uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U); 1599 active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U); 1600 1601 // Parallel processing task executor. 1602 G1CMRefProcTaskExecutor par_task_executor(_g1h, this, 1603 _g1h->workers(), active_workers); 1604 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1605 1606 // Set the concurrency level. The phase was already set prior to 1607 // executing the remark task. 1608 set_concurrency(active_workers); 1609 1610 // Set the degree of MT processing here. If the discovery was done MT, 1611 // the number of threads involved during discovery could differ from 1612 // the number of active workers. This is OK as long as the discovered 1613 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1614 rp->set_active_mt_degree(active_workers); 1615 1616 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues()); 1617 1618 // Process the weak references. 1619 const ReferenceProcessorStats& stats = 1620 rp->process_discovered_references(&g1_is_alive, 1621 &g1_keep_alive, 1622 &g1_drain_mark_stack, 1623 executor, 1624 &pt); 1625 _gc_tracer_cm->report_gc_reference_stats(stats); 1626 pt.print_all_references(); 1627 1628 // The do_oop work routines of the keep_alive and drain_marking_stack 1629 // oop closures will set the has_overflown flag if we overflow the 1630 // global marking stack. 1631 1632 assert(has_overflown() || _global_mark_stack.is_empty(), 1633 "Mark stack should be empty (unless it has overflown)"); 1634 1635 assert(rp->num_queues() == active_workers, "why not"); 1636 1637 rp->verify_no_references_recorded(); 1638 assert(!rp->discovery_enabled(), "Post condition"); 1639 } 1640 1641 if (has_overflown()) { 1642 // We can not trust g1_is_alive and the contents of the heap if the marking stack 1643 // overflowed while processing references. Exit the VM. 1644 fatal("Overflow during reference processing, can not continue. Please " 1645 "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and " 1646 "restart.", MarkStackSizeMax); 1647 return; 1648 } 1649 1650 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1651 1652 { 1653 GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm); 1654 WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1); 1655 } 1656 1657 // Unload Klasses, String, Code Cache, etc. 1658 if (ClassUnloadingWithConcurrentMark) { 1659 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); 1660 bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm); 1661 _g1h->complete_cleaning(&g1_is_alive, purged_classes); 1662 } else if (StringDedup::is_enabled()) { 1663 GCTraceTime(Debug, gc, phases) debug("String Deduplication", _gc_timer_cm); 1664 _g1h->string_dedup_cleaning(&g1_is_alive, NULL); 1665 } 1666 } 1667 1668 class G1PrecleanYieldClosure : public YieldClosure { 1669 G1ConcurrentMark* _cm; 1670 1671 public: 1672 G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { } 1673 1674 virtual bool should_return() { 1675 return _cm->has_aborted(); 1676 } 1677 1678 virtual bool should_return_fine_grain() { 1679 _cm->do_yield_check(); 1680 return _cm->has_aborted(); 1681 } 1682 }; 1683 1684 void G1ConcurrentMark::preclean() { 1685 assert(G1UseReferencePrecleaning, "Precleaning must be enabled."); 1686 1687 SuspendibleThreadSetJoiner joiner; 1688 1689 G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */); 1690 G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */); 1691 1692 set_concurrency_and_phase(1, true); 1693 1694 G1PrecleanYieldClosure yield_cl(this); 1695 1696 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1697 // Precleaning is single threaded. Temporarily disable MT discovery. 1698 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); 1699 rp->preclean_discovered_references(rp->is_alive_non_header(), 1700 &keep_alive, 1701 &drain_mark_stack, 1702 &yield_cl, 1703 _gc_timer_cm); 1704 } 1705 1706 // When sampling object counts, we already swapped the mark bitmaps, so we need to use 1707 // the prev bitmap determining liveness. 1708 class G1ObjectCountIsAliveClosure: public BoolObjectClosure { 1709 G1CollectedHeap* _g1h; 1710 public: 1711 G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { } 1712 1713 bool do_object_b(oop obj) { 1714 HeapWord* addr = (HeapWord*)obj; 1715 return addr != NULL && 1716 (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj)); 1717 } 1718 }; 1719 1720 void G1ConcurrentMark::report_object_count(bool mark_completed) { 1721 // Depending on the completion of the marking liveness needs to be determined 1722 // using either the next or prev bitmap. 1723 if (mark_completed) { 1724 G1ObjectCountIsAliveClosure is_alive(_g1h); 1725 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1726 } else { 1727 G1CMIsAliveClosure is_alive(_g1h); 1728 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1729 } 1730 } 1731 1732 1733 void G1ConcurrentMark::swap_mark_bitmaps() { 1734 G1CMBitMap* temp = _prev_mark_bitmap; 1735 _prev_mark_bitmap = _next_mark_bitmap; 1736 _next_mark_bitmap = temp; 1737 _g1h->collector_state()->set_clearing_next_bitmap(true); 1738 } 1739 1740 // Closure for marking entries in SATB buffers. 1741 class G1CMSATBBufferClosure : public SATBBufferClosure { 1742 private: 1743 G1CMTask* _task; 1744 G1CollectedHeap* _g1h; 1745 1746 // This is very similar to G1CMTask::deal_with_reference, but with 1747 // more relaxed requirements for the argument, so this must be more 1748 // circumspect about treating the argument as an object. 1749 void do_entry(void* entry) const { 1750 _task->increment_refs_reached(); 1751 oop const obj = static_cast<oop>(entry); 1752 _task->make_reference_grey(obj); 1753 } 1754 1755 public: 1756 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1757 : _task(task), _g1h(g1h) { } 1758 1759 virtual void do_buffer(void** buffer, size_t size) { 1760 for (size_t i = 0; i < size; ++i) { 1761 do_entry(buffer[i]); 1762 } 1763 } 1764 }; 1765 1766 class G1RemarkThreadsClosure : public ThreadClosure { 1767 G1CMSATBBufferClosure _cm_satb_cl; 1768 G1CMOopClosure _cm_cl; 1769 MarkingCodeBlobClosure _code_cl; 1770 uintx _claim_token; 1771 1772 public: 1773 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1774 _cm_satb_cl(task, g1h), 1775 _cm_cl(g1h, task), 1776 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1777 _claim_token(Threads::thread_claim_token()) {} 1778 1779 void do_thread(Thread* thread) { 1780 if (thread->claim_threads_do(true, _claim_token)) { 1781 SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(thread); 1782 queue.apply_closure_and_empty(&_cm_satb_cl); 1783 if (thread->is_Java_thread()) { 1784 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1785 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1786 // * Alive if on the stack of an executing method 1787 // * Weakly reachable otherwise 1788 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1789 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1790 JavaThread* jt = (JavaThread*)thread; 1791 jt->nmethods_do(&_code_cl); 1792 } 1793 } 1794 } 1795 }; 1796 1797 class G1CMRemarkTask : public AbstractGangTask { 1798 G1ConcurrentMark* _cm; 1799 public: 1800 void work(uint worker_id) { 1801 G1CMTask* task = _cm->task(worker_id); 1802 task->record_start_time(); 1803 { 1804 ResourceMark rm; 1805 HandleMark hm; 1806 1807 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1808 Threads::threads_do(&threads_f); 1809 } 1810 1811 do { 1812 task->do_marking_step(1000000000.0 /* something very large */, 1813 true /* do_termination */, 1814 false /* is_serial */); 1815 } while (task->has_aborted() && !_cm->has_overflown()); 1816 // If we overflow, then we do not want to restart. We instead 1817 // want to abort remark and do concurrent marking again. 1818 task->record_end_time(); 1819 } 1820 1821 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1822 AbstractGangTask("Par Remark"), _cm(cm) { 1823 _cm->terminator()->reset_for_reuse(active_workers); 1824 } 1825 }; 1826 1827 void G1ConcurrentMark::finalize_marking() { 1828 ResourceMark rm; 1829 HandleMark hm; 1830 1831 _g1h->ensure_parsability(false); 1832 1833 // this is remark, so we'll use up all active threads 1834 uint active_workers = _g1h->workers()->active_workers(); 1835 set_concurrency_and_phase(active_workers, false /* concurrent */); 1836 // Leave _parallel_marking_threads at it's 1837 // value originally calculated in the G1ConcurrentMark 1838 // constructor and pass values of the active workers 1839 // through the gang in the task. 1840 1841 { 1842 StrongRootsScope srs(active_workers); 1843 1844 G1CMRemarkTask remarkTask(this, active_workers); 1845 // We will start all available threads, even if we decide that the 1846 // active_workers will be fewer. The extra ones will just bail out 1847 // immediately. 1848 _g1h->workers()->run_task(&remarkTask); 1849 } 1850 1851 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1852 guarantee(has_overflown() || 1853 satb_mq_set.completed_buffers_num() == 0, 1854 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1855 BOOL_TO_STR(has_overflown()), 1856 satb_mq_set.completed_buffers_num()); 1857 1858 print_stats(); 1859 } 1860 1861 void G1ConcurrentMark::flush_all_task_caches() { 1862 size_t hits = 0; 1863 size_t misses = 0; 1864 for (uint i = 0; i < _max_num_tasks; i++) { 1865 Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache(); 1866 hits += stats.first; 1867 misses += stats.second; 1868 } 1869 size_t sum = hits + misses; 1870 log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf", 1871 hits, misses, percent_of(hits, sum)); 1872 } 1873 1874 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) { 1875 _prev_mark_bitmap->clear_range(mr); 1876 } 1877 1878 HeapRegion* 1879 G1ConcurrentMark::claim_region(uint worker_id) { 1880 // "checkpoint" the finger 1881 HeapWord* finger = _finger; 1882 1883 while (finger < _heap.end()) { 1884 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1885 1886 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1887 // Make sure that the reads below do not float before loading curr_region. 1888 OrderAccess::loadload(); 1889 // Above heap_region_containing may return NULL as we always scan claim 1890 // until the end of the heap. In this case, just jump to the next region. 1891 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1892 1893 // Is the gap between reading the finger and doing the CAS too long? 1894 HeapWord* res = Atomic::cmpxchg(end, &_finger, finger); 1895 if (res == finger && curr_region != NULL) { 1896 // we succeeded 1897 HeapWord* bottom = curr_region->bottom(); 1898 HeapWord* limit = curr_region->next_top_at_mark_start(); 1899 1900 // notice that _finger == end cannot be guaranteed here since, 1901 // someone else might have moved the finger even further 1902 assert(_finger >= end, "the finger should have moved forward"); 1903 1904 if (limit > bottom) { 1905 return curr_region; 1906 } else { 1907 assert(limit == bottom, 1908 "the region limit should be at bottom"); 1909 // we return NULL and the caller should try calling 1910 // claim_region() again. 1911 return NULL; 1912 } 1913 } else { 1914 assert(_finger > finger, "the finger should have moved forward"); 1915 // read it again 1916 finger = _finger; 1917 } 1918 } 1919 1920 return NULL; 1921 } 1922 1923 #ifndef PRODUCT 1924 class VerifyNoCSetOops { 1925 G1CollectedHeap* _g1h; 1926 const char* _phase; 1927 int _info; 1928 1929 public: 1930 VerifyNoCSetOops(const char* phase, int info = -1) : 1931 _g1h(G1CollectedHeap::heap()), 1932 _phase(phase), 1933 _info(info) 1934 { } 1935 1936 void operator()(G1TaskQueueEntry task_entry) const { 1937 if (task_entry.is_array_slice()) { 1938 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); 1939 return; 1940 } 1941 guarantee(oopDesc::is_oop(task_entry.obj()), 1942 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1943 p2i(task_entry.obj()), _phase, _info); 1944 HeapRegion* r = _g1h->heap_region_containing(task_entry.obj()); 1945 guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()), 1946 "obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set", 1947 p2i(task_entry.obj()), _phase, _info, r->hrm_index()); 1948 } 1949 }; 1950 1951 void G1ConcurrentMark::verify_no_collection_set_oops() { 1952 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1953 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 1954 return; 1955 } 1956 1957 // Verify entries on the global mark stack 1958 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 1959 1960 // Verify entries on the task queues 1961 for (uint i = 0; i < _max_num_tasks; ++i) { 1962 G1CMTaskQueue* queue = _task_queues->queue(i); 1963 queue->iterate(VerifyNoCSetOops("Queue", i)); 1964 } 1965 1966 // Verify the global finger 1967 HeapWord* global_finger = finger(); 1968 if (global_finger != NULL && global_finger < _heap.end()) { 1969 // Since we always iterate over all regions, we might get a NULL HeapRegion 1970 // here. 1971 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1972 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1973 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1974 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1975 } 1976 1977 // Verify the task fingers 1978 assert(_num_concurrent_workers <= _max_num_tasks, "sanity"); 1979 for (uint i = 0; i < _num_concurrent_workers; ++i) { 1980 G1CMTask* task = _tasks[i]; 1981 HeapWord* task_finger = task->finger(); 1982 if (task_finger != NULL && task_finger < _heap.end()) { 1983 // See above note on the global finger verification. 1984 HeapRegion* r = _g1h->heap_region_containing(task_finger); 1985 guarantee(r == NULL || task_finger == r->bottom() || 1986 !r->in_collection_set() || !r->has_index_in_opt_cset(), 1987 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 1988 p2i(task_finger), HR_FORMAT_PARAMS(r)); 1989 } 1990 } 1991 } 1992 #endif // PRODUCT 1993 1994 void G1ConcurrentMark::rebuild_rem_set_concurrently() { 1995 _g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset); 1996 } 1997 1998 void G1ConcurrentMark::print_stats() { 1999 if (!log_is_enabled(Debug, gc, stats)) { 2000 return; 2001 } 2002 log_debug(gc, stats)("---------------------------------------------------------------------"); 2003 for (size_t i = 0; i < _num_active_tasks; ++i) { 2004 _tasks[i]->print_stats(); 2005 log_debug(gc, stats)("---------------------------------------------------------------------"); 2006 } 2007 } 2008 2009 void G1ConcurrentMark::concurrent_cycle_abort() { 2010 if (!cm_thread()->during_cycle() || _has_aborted) { 2011 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2012 return; 2013 } 2014 2015 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2016 // concurrent bitmap clearing. 2017 { 2018 GCTraceTime(Debug, gc) debug("Clear Next Bitmap"); 2019 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false); 2020 } 2021 // Note we cannot clear the previous marking bitmap here 2022 // since VerifyDuringGC verifies the objects marked during 2023 // a full GC against the previous bitmap. 2024 2025 // Empty mark stack 2026 reset_marking_for_restart(); 2027 for (uint i = 0; i < _max_num_tasks; ++i) { 2028 _tasks[i]->clear_region_fields(); 2029 } 2030 _first_overflow_barrier_sync.abort(); 2031 _second_overflow_barrier_sync.abort(); 2032 _has_aborted = true; 2033 2034 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2035 satb_mq_set.abandon_partial_marking(); 2036 // This can be called either during or outside marking, we'll read 2037 // the expected_active value from the SATB queue set. 2038 satb_mq_set.set_active_all_threads( 2039 false, /* new active value */ 2040 satb_mq_set.is_active() /* expected_active */); 2041 } 2042 2043 static void print_ms_time_info(const char* prefix, const char* name, 2044 NumberSeq& ns) { 2045 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2046 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2047 if (ns.num() > 0) { 2048 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2049 prefix, ns.sd(), ns.maximum()); 2050 } 2051 } 2052 2053 void G1ConcurrentMark::print_summary_info() { 2054 Log(gc, marking) log; 2055 if (!log.is_trace()) { 2056 return; 2057 } 2058 2059 log.trace(" Concurrent marking:"); 2060 print_ms_time_info(" ", "init marks", _init_times); 2061 print_ms_time_info(" ", "remarks", _remark_times); 2062 { 2063 print_ms_time_info(" ", "final marks", _remark_mark_times); 2064 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2065 2066 } 2067 print_ms_time_info(" ", "cleanups", _cleanup_times); 2068 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2069 _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2070 log.trace(" Total stop_world time = %8.2f s.", 2071 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2072 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2073 cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum()); 2074 } 2075 2076 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2077 _concurrent_workers->print_worker_threads_on(st); 2078 } 2079 2080 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2081 _concurrent_workers->threads_do(tc); 2082 } 2083 2084 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2085 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2086 p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap)); 2087 _prev_mark_bitmap->print_on_error(st, " Prev Bits: "); 2088 _next_mark_bitmap->print_on_error(st, " Next Bits: "); 2089 } 2090 2091 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2092 ReferenceProcessor* result = g1h->ref_processor_cm(); 2093 assert(result != NULL, "CM reference processor should not be NULL"); 2094 return result; 2095 } 2096 2097 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2098 G1CMTask* task) 2099 : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)), 2100 _g1h(g1h), _task(task) 2101 { } 2102 2103 void G1CMTask::setup_for_region(HeapRegion* hr) { 2104 assert(hr != NULL, 2105 "claim_region() should have filtered out NULL regions"); 2106 _curr_region = hr; 2107 _finger = hr->bottom(); 2108 update_region_limit(); 2109 } 2110 2111 void G1CMTask::update_region_limit() { 2112 HeapRegion* hr = _curr_region; 2113 HeapWord* bottom = hr->bottom(); 2114 HeapWord* limit = hr->next_top_at_mark_start(); 2115 2116 if (limit == bottom) { 2117 // The region was collected underneath our feet. 2118 // We set the finger to bottom to ensure that the bitmap 2119 // iteration that will follow this will not do anything. 2120 // (this is not a condition that holds when we set the region up, 2121 // as the region is not supposed to be empty in the first place) 2122 _finger = bottom; 2123 } else if (limit >= _region_limit) { 2124 assert(limit >= _finger, "peace of mind"); 2125 } else { 2126 assert(limit < _region_limit, "only way to get here"); 2127 // This can happen under some pretty unusual circumstances. An 2128 // evacuation pause empties the region underneath our feet (NTAMS 2129 // at bottom). We then do some allocation in the region (NTAMS 2130 // stays at bottom), followed by the region being used as a GC 2131 // alloc region (NTAMS will move to top() and the objects 2132 // originally below it will be grayed). All objects now marked in 2133 // the region are explicitly grayed, if below the global finger, 2134 // and we do not need in fact to scan anything else. So, we simply 2135 // set _finger to be limit to ensure that the bitmap iteration 2136 // doesn't do anything. 2137 _finger = limit; 2138 } 2139 2140 _region_limit = limit; 2141 } 2142 2143 void G1CMTask::giveup_current_region() { 2144 assert(_curr_region != NULL, "invariant"); 2145 clear_region_fields(); 2146 } 2147 2148 void G1CMTask::clear_region_fields() { 2149 // Values for these three fields that indicate that we're not 2150 // holding on to a region. 2151 _curr_region = NULL; 2152 _finger = NULL; 2153 _region_limit = NULL; 2154 } 2155 2156 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2157 if (cm_oop_closure == NULL) { 2158 assert(_cm_oop_closure != NULL, "invariant"); 2159 } else { 2160 assert(_cm_oop_closure == NULL, "invariant"); 2161 } 2162 _cm_oop_closure = cm_oop_closure; 2163 } 2164 2165 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) { 2166 guarantee(next_mark_bitmap != NULL, "invariant"); 2167 _next_mark_bitmap = next_mark_bitmap; 2168 clear_region_fields(); 2169 2170 _calls = 0; 2171 _elapsed_time_ms = 0.0; 2172 _termination_time_ms = 0.0; 2173 _termination_start_time_ms = 0.0; 2174 2175 _mark_stats_cache.reset(); 2176 } 2177 2178 bool G1CMTask::should_exit_termination() { 2179 if (!regular_clock_call()) { 2180 return true; 2181 } 2182 2183 // This is called when we are in the termination protocol. We should 2184 // quit if, for some reason, this task wants to abort or the global 2185 // stack is not empty (this means that we can get work from it). 2186 return !_cm->mark_stack_empty() || has_aborted(); 2187 } 2188 2189 void G1CMTask::reached_limit() { 2190 assert(_words_scanned >= _words_scanned_limit || 2191 _refs_reached >= _refs_reached_limit , 2192 "shouldn't have been called otherwise"); 2193 abort_marking_if_regular_check_fail(); 2194 } 2195 2196 bool G1CMTask::regular_clock_call() { 2197 if (has_aborted()) { 2198 return false; 2199 } 2200 2201 // First, we need to recalculate the words scanned and refs reached 2202 // limits for the next clock call. 2203 recalculate_limits(); 2204 2205 // During the regular clock call we do the following 2206 2207 // (1) If an overflow has been flagged, then we abort. 2208 if (_cm->has_overflown()) { 2209 return false; 2210 } 2211 2212 // If we are not concurrent (i.e. we're doing remark) we don't need 2213 // to check anything else. The other steps are only needed during 2214 // the concurrent marking phase. 2215 if (!_cm->concurrent()) { 2216 return true; 2217 } 2218 2219 // (2) If marking has been aborted for Full GC, then we also abort. 2220 if (_cm->has_aborted()) { 2221 return false; 2222 } 2223 2224 double curr_time_ms = os::elapsedVTime() * 1000.0; 2225 2226 // (4) We check whether we should yield. If we have to, then we abort. 2227 if (SuspendibleThreadSet::should_yield()) { 2228 // We should yield. To do this we abort the task. The caller is 2229 // responsible for yielding. 2230 return false; 2231 } 2232 2233 // (5) We check whether we've reached our time quota. If we have, 2234 // then we abort. 2235 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2236 if (elapsed_time_ms > _time_target_ms) { 2237 _has_timed_out = true; 2238 return false; 2239 } 2240 2241 // (6) Finally, we check whether there are enough completed STAB 2242 // buffers available for processing. If there are, we abort. 2243 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2244 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2245 // we do need to process SATB buffers, we'll abort and restart 2246 // the marking task to do so 2247 return false; 2248 } 2249 return true; 2250 } 2251 2252 void G1CMTask::recalculate_limits() { 2253 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2254 _words_scanned_limit = _real_words_scanned_limit; 2255 2256 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2257 _refs_reached_limit = _real_refs_reached_limit; 2258 } 2259 2260 void G1CMTask::decrease_limits() { 2261 // This is called when we believe that we're going to do an infrequent 2262 // operation which will increase the per byte scanned cost (i.e. move 2263 // entries to/from the global stack). It basically tries to decrease the 2264 // scanning limit so that the clock is called earlier. 2265 2266 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4; 2267 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4; 2268 } 2269 2270 void G1CMTask::move_entries_to_global_stack() { 2271 // Local array where we'll store the entries that will be popped 2272 // from the local queue. 2273 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2274 2275 size_t n = 0; 2276 G1TaskQueueEntry task_entry; 2277 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) { 2278 buffer[n] = task_entry; 2279 ++n; 2280 } 2281 if (n < G1CMMarkStack::EntriesPerChunk) { 2282 buffer[n] = G1TaskQueueEntry(); 2283 } 2284 2285 if (n > 0) { 2286 if (!_cm->mark_stack_push(buffer)) { 2287 set_has_aborted(); 2288 } 2289 } 2290 2291 // This operation was quite expensive, so decrease the limits. 2292 decrease_limits(); 2293 } 2294 2295 bool G1CMTask::get_entries_from_global_stack() { 2296 // Local array where we'll store the entries that will be popped 2297 // from the global stack. 2298 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2299 2300 if (!_cm->mark_stack_pop(buffer)) { 2301 return false; 2302 } 2303 2304 // We did actually pop at least one entry. 2305 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) { 2306 G1TaskQueueEntry task_entry = buffer[i]; 2307 if (task_entry.is_null()) { 2308 break; 2309 } 2310 assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj())); 2311 bool success = _task_queue->push(task_entry); 2312 // We only call this when the local queue is empty or under a 2313 // given target limit. So, we do not expect this push to fail. 2314 assert(success, "invariant"); 2315 } 2316 2317 // This operation was quite expensive, so decrease the limits 2318 decrease_limits(); 2319 return true; 2320 } 2321 2322 void G1CMTask::drain_local_queue(bool partially) { 2323 if (has_aborted()) { 2324 return; 2325 } 2326 2327 // Decide what the target size is, depending whether we're going to 2328 // drain it partially (so that other tasks can steal if they run out 2329 // of things to do) or totally (at the very end). 2330 size_t target_size; 2331 if (partially) { 2332 target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize); 2333 } else { 2334 target_size = 0; 2335 } 2336 2337 if (_task_queue->size() > target_size) { 2338 G1TaskQueueEntry entry; 2339 bool ret = _task_queue->pop_local(entry); 2340 while (ret) { 2341 scan_task_entry(entry); 2342 if (_task_queue->size() <= target_size || has_aborted()) { 2343 ret = false; 2344 } else { 2345 ret = _task_queue->pop_local(entry); 2346 } 2347 } 2348 } 2349 } 2350 2351 void G1CMTask::drain_global_stack(bool partially) { 2352 if (has_aborted()) { 2353 return; 2354 } 2355 2356 // We have a policy to drain the local queue before we attempt to 2357 // drain the global stack. 2358 assert(partially || _task_queue->size() == 0, "invariant"); 2359 2360 // Decide what the target size is, depending whether we're going to 2361 // drain it partially (so that other tasks can steal if they run out 2362 // of things to do) or totally (at the very end). 2363 // Notice that when draining the global mark stack partially, due to the racyness 2364 // of the mark stack size update we might in fact drop below the target. But, 2365 // this is not a problem. 2366 // In case of total draining, we simply process until the global mark stack is 2367 // totally empty, disregarding the size counter. 2368 if (partially) { 2369 size_t const target_size = _cm->partial_mark_stack_size_target(); 2370 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2371 if (get_entries_from_global_stack()) { 2372 drain_local_queue(partially); 2373 } 2374 } 2375 } else { 2376 while (!has_aborted() && get_entries_from_global_stack()) { 2377 drain_local_queue(partially); 2378 } 2379 } 2380 } 2381 2382 // SATB Queue has several assumptions on whether to call the par or 2383 // non-par versions of the methods. this is why some of the code is 2384 // replicated. We should really get rid of the single-threaded version 2385 // of the code to simplify things. 2386 void G1CMTask::drain_satb_buffers() { 2387 if (has_aborted()) { 2388 return; 2389 } 2390 2391 // We set this so that the regular clock knows that we're in the 2392 // middle of draining buffers and doesn't set the abort flag when it 2393 // notices that SATB buffers are available for draining. It'd be 2394 // very counter productive if it did that. :-) 2395 _draining_satb_buffers = true; 2396 2397 G1CMSATBBufferClosure satb_cl(this, _g1h); 2398 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2399 2400 // This keeps claiming and applying the closure to completed buffers 2401 // until we run out of buffers or we need to abort. 2402 while (!has_aborted() && 2403 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2404 abort_marking_if_regular_check_fail(); 2405 } 2406 2407 _draining_satb_buffers = false; 2408 2409 assert(has_aborted() || 2410 _cm->concurrent() || 2411 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2412 2413 // again, this was a potentially expensive operation, decrease the 2414 // limits to get the regular clock call early 2415 decrease_limits(); 2416 } 2417 2418 void G1CMTask::clear_mark_stats_cache(uint region_idx) { 2419 _mark_stats_cache.reset(region_idx); 2420 } 2421 2422 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() { 2423 return _mark_stats_cache.evict_all(); 2424 } 2425 2426 void G1CMTask::print_stats() { 2427 log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls); 2428 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2429 _elapsed_time_ms, _termination_time_ms); 2430 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms", 2431 _step_times_ms.num(), 2432 _step_times_ms.avg(), 2433 _step_times_ms.sd(), 2434 _step_times_ms.maximum(), 2435 _step_times_ms.sum()); 2436 size_t const hits = _mark_stats_cache.hits(); 2437 size_t const misses = _mark_stats_cache.misses(); 2438 log_debug(gc, stats)(" Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f", 2439 hits, misses, percent_of(hits, hits + misses)); 2440 } 2441 2442 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) { 2443 return _task_queues->steal(worker_id, task_entry); 2444 } 2445 2446 /***************************************************************************** 2447 2448 The do_marking_step(time_target_ms, ...) method is the building 2449 block of the parallel marking framework. It can be called in parallel 2450 with other invocations of do_marking_step() on different tasks 2451 (but only one per task, obviously) and concurrently with the 2452 mutator threads, or during remark, hence it eliminates the need 2453 for two versions of the code. When called during remark, it will 2454 pick up from where the task left off during the concurrent marking 2455 phase. Interestingly, tasks are also claimable during evacuation 2456 pauses too, since do_marking_step() ensures that it aborts before 2457 it needs to yield. 2458 2459 The data structures that it uses to do marking work are the 2460 following: 2461 2462 (1) Marking Bitmap. If there are gray objects that appear only 2463 on the bitmap (this happens either when dealing with an overflow 2464 or when the initial marking phase has simply marked the roots 2465 and didn't push them on the stack), then tasks claim heap 2466 regions whose bitmap they then scan to find gray objects. A 2467 global finger indicates where the end of the last claimed region 2468 is. A local finger indicates how far into the region a task has 2469 scanned. The two fingers are used to determine how to gray an 2470 object (i.e. whether simply marking it is OK, as it will be 2471 visited by a task in the future, or whether it needs to be also 2472 pushed on a stack). 2473 2474 (2) Local Queue. The local queue of the task which is accessed 2475 reasonably efficiently by the task. Other tasks can steal from 2476 it when they run out of work. Throughout the marking phase, a 2477 task attempts to keep its local queue short but not totally 2478 empty, so that entries are available for stealing by other 2479 tasks. Only when there is no more work, a task will totally 2480 drain its local queue. 2481 2482 (3) Global Mark Stack. This handles local queue overflow. During 2483 marking only sets of entries are moved between it and the local 2484 queues, as access to it requires a mutex and more fine-grain 2485 interaction with it which might cause contention. If it 2486 overflows, then the marking phase should restart and iterate 2487 over the bitmap to identify gray objects. Throughout the marking 2488 phase, tasks attempt to keep the global mark stack at a small 2489 length but not totally empty, so that entries are available for 2490 popping by other tasks. Only when there is no more work, tasks 2491 will totally drain the global mark stack. 2492 2493 (4) SATB Buffer Queue. This is where completed SATB buffers are 2494 made available. Buffers are regularly removed from this queue 2495 and scanned for roots, so that the queue doesn't get too 2496 long. During remark, all completed buffers are processed, as 2497 well as the filled in parts of any uncompleted buffers. 2498 2499 The do_marking_step() method tries to abort when the time target 2500 has been reached. There are a few other cases when the 2501 do_marking_step() method also aborts: 2502 2503 (1) When the marking phase has been aborted (after a Full GC). 2504 2505 (2) When a global overflow (on the global stack) has been 2506 triggered. Before the task aborts, it will actually sync up with 2507 the other tasks to ensure that all the marking data structures 2508 (local queues, stacks, fingers etc.) are re-initialized so that 2509 when do_marking_step() completes, the marking phase can 2510 immediately restart. 2511 2512 (3) When enough completed SATB buffers are available. The 2513 do_marking_step() method only tries to drain SATB buffers right 2514 at the beginning. So, if enough buffers are available, the 2515 marking step aborts and the SATB buffers are processed at 2516 the beginning of the next invocation. 2517 2518 (4) To yield. when we have to yield then we abort and yield 2519 right at the end of do_marking_step(). This saves us from a lot 2520 of hassle as, by yielding we might allow a Full GC. If this 2521 happens then objects will be compacted underneath our feet, the 2522 heap might shrink, etc. We save checking for this by just 2523 aborting and doing the yield right at the end. 2524 2525 From the above it follows that the do_marking_step() method should 2526 be called in a loop (or, otherwise, regularly) until it completes. 2527 2528 If a marking step completes without its has_aborted() flag being 2529 true, it means it has completed the current marking phase (and 2530 also all other marking tasks have done so and have all synced up). 2531 2532 A method called regular_clock_call() is invoked "regularly" (in 2533 sub ms intervals) throughout marking. It is this clock method that 2534 checks all the abort conditions which were mentioned above and 2535 decides when the task should abort. A work-based scheme is used to 2536 trigger this clock method: when the number of object words the 2537 marking phase has scanned or the number of references the marking 2538 phase has visited reach a given limit. Additional invocations to 2539 the method clock have been planted in a few other strategic places 2540 too. The initial reason for the clock method was to avoid calling 2541 vtime too regularly, as it is quite expensive. So, once it was in 2542 place, it was natural to piggy-back all the other conditions on it 2543 too and not constantly check them throughout the code. 2544 2545 If do_termination is true then do_marking_step will enter its 2546 termination protocol. 2547 2548 The value of is_serial must be true when do_marking_step is being 2549 called serially (i.e. by the VMThread) and do_marking_step should 2550 skip any synchronization in the termination and overflow code. 2551 Examples include the serial remark code and the serial reference 2552 processing closures. 2553 2554 The value of is_serial must be false when do_marking_step is 2555 being called by any of the worker threads in a work gang. 2556 Examples include the concurrent marking code (CMMarkingTask), 2557 the MT remark code, and the MT reference processing closures. 2558 2559 *****************************************************************************/ 2560 2561 void G1CMTask::do_marking_step(double time_target_ms, 2562 bool do_termination, 2563 bool is_serial) { 2564 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2565 2566 _start_time_ms = os::elapsedVTime() * 1000.0; 2567 2568 // If do_stealing is true then do_marking_step will attempt to 2569 // steal work from the other G1CMTasks. It only makes sense to 2570 // enable stealing when the termination protocol is enabled 2571 // and do_marking_step() is not being called serially. 2572 bool do_stealing = do_termination && !is_serial; 2573 2574 double diff_prediction_ms = _g1h->policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2575 _time_target_ms = time_target_ms - diff_prediction_ms; 2576 2577 // set up the variables that are used in the work-based scheme to 2578 // call the regular clock method 2579 _words_scanned = 0; 2580 _refs_reached = 0; 2581 recalculate_limits(); 2582 2583 // clear all flags 2584 clear_has_aborted(); 2585 _has_timed_out = false; 2586 _draining_satb_buffers = false; 2587 2588 ++_calls; 2589 2590 // Set up the bitmap and oop closures. Anything that uses them is 2591 // eventually called from this method, so it is OK to allocate these 2592 // statically. 2593 G1CMBitMapClosure bitmap_closure(this, _cm); 2594 G1CMOopClosure cm_oop_closure(_g1h, this); 2595 set_cm_oop_closure(&cm_oop_closure); 2596 2597 if (_cm->has_overflown()) { 2598 // This can happen if the mark stack overflows during a GC pause 2599 // and this task, after a yield point, restarts. We have to abort 2600 // as we need to get into the overflow protocol which happens 2601 // right at the end of this task. 2602 set_has_aborted(); 2603 } 2604 2605 // First drain any available SATB buffers. After this, we will not 2606 // look at SATB buffers before the next invocation of this method. 2607 // If enough completed SATB buffers are queued up, the regular clock 2608 // will abort this task so that it restarts. 2609 drain_satb_buffers(); 2610 // ...then partially drain the local queue and the global stack 2611 drain_local_queue(true); 2612 drain_global_stack(true); 2613 2614 do { 2615 if (!has_aborted() && _curr_region != NULL) { 2616 // This means that we're already holding on to a region. 2617 assert(_finger != NULL, "if region is not NULL, then the finger " 2618 "should not be NULL either"); 2619 2620 // We might have restarted this task after an evacuation pause 2621 // which might have evacuated the region we're holding on to 2622 // underneath our feet. Let's read its limit again to make sure 2623 // that we do not iterate over a region of the heap that 2624 // contains garbage (update_region_limit() will also move 2625 // _finger to the start of the region if it is found empty). 2626 update_region_limit(); 2627 // We will start from _finger not from the start of the region, 2628 // as we might be restarting this task after aborting half-way 2629 // through scanning this region. In this case, _finger points to 2630 // the address where we last found a marked object. If this is a 2631 // fresh region, _finger points to start(). 2632 MemRegion mr = MemRegion(_finger, _region_limit); 2633 2634 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2635 "humongous regions should go around loop once only"); 2636 2637 // Some special cases: 2638 // If the memory region is empty, we can just give up the region. 2639 // If the current region is humongous then we only need to check 2640 // the bitmap for the bit associated with the start of the object, 2641 // scan the object if it's live, and give up the region. 2642 // Otherwise, let's iterate over the bitmap of the part of the region 2643 // that is left. 2644 // If the iteration is successful, give up the region. 2645 if (mr.is_empty()) { 2646 giveup_current_region(); 2647 abort_marking_if_regular_check_fail(); 2648 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2649 if (_next_mark_bitmap->is_marked(mr.start())) { 2650 // The object is marked - apply the closure 2651 bitmap_closure.do_addr(mr.start()); 2652 } 2653 // Even if this task aborted while scanning the humongous object 2654 // we can (and should) give up the current region. 2655 giveup_current_region(); 2656 abort_marking_if_regular_check_fail(); 2657 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) { 2658 giveup_current_region(); 2659 abort_marking_if_regular_check_fail(); 2660 } else { 2661 assert(has_aborted(), "currently the only way to do so"); 2662 // The only way to abort the bitmap iteration is to return 2663 // false from the do_bit() method. However, inside the 2664 // do_bit() method we move the _finger to point to the 2665 // object currently being looked at. So, if we bail out, we 2666 // have definitely set _finger to something non-null. 2667 assert(_finger != NULL, "invariant"); 2668 2669 // Region iteration was actually aborted. So now _finger 2670 // points to the address of the object we last scanned. If we 2671 // leave it there, when we restart this task, we will rescan 2672 // the object. It is easy to avoid this. We move the finger by 2673 // enough to point to the next possible object header. 2674 assert(_finger < _region_limit, "invariant"); 2675 HeapWord* const new_finger = _finger + ((oop)_finger)->size(); 2676 // Check if bitmap iteration was aborted while scanning the last object 2677 if (new_finger >= _region_limit) { 2678 giveup_current_region(); 2679 } else { 2680 move_finger_to(new_finger); 2681 } 2682 } 2683 } 2684 // At this point we have either completed iterating over the 2685 // region we were holding on to, or we have aborted. 2686 2687 // We then partially drain the local queue and the global stack. 2688 // (Do we really need this?) 2689 drain_local_queue(true); 2690 drain_global_stack(true); 2691 2692 // Read the note on the claim_region() method on why it might 2693 // return NULL with potentially more regions available for 2694 // claiming and why we have to check out_of_regions() to determine 2695 // whether we're done or not. 2696 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2697 // We are going to try to claim a new region. We should have 2698 // given up on the previous one. 2699 // Separated the asserts so that we know which one fires. 2700 assert(_curr_region == NULL, "invariant"); 2701 assert(_finger == NULL, "invariant"); 2702 assert(_region_limit == NULL, "invariant"); 2703 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2704 if (claimed_region != NULL) { 2705 // Yes, we managed to claim one 2706 setup_for_region(claimed_region); 2707 assert(_curr_region == claimed_region, "invariant"); 2708 } 2709 // It is important to call the regular clock here. It might take 2710 // a while to claim a region if, for example, we hit a large 2711 // block of empty regions. So we need to call the regular clock 2712 // method once round the loop to make sure it's called 2713 // frequently enough. 2714 abort_marking_if_regular_check_fail(); 2715 } 2716 2717 if (!has_aborted() && _curr_region == NULL) { 2718 assert(_cm->out_of_regions(), 2719 "at this point we should be out of regions"); 2720 } 2721 } while ( _curr_region != NULL && !has_aborted()); 2722 2723 if (!has_aborted()) { 2724 // We cannot check whether the global stack is empty, since other 2725 // tasks might be pushing objects to it concurrently. 2726 assert(_cm->out_of_regions(), 2727 "at this point we should be out of regions"); 2728 // Try to reduce the number of available SATB buffers so that 2729 // remark has less work to do. 2730 drain_satb_buffers(); 2731 } 2732 2733 // Since we've done everything else, we can now totally drain the 2734 // local queue and global stack. 2735 drain_local_queue(false); 2736 drain_global_stack(false); 2737 2738 // Attempt at work stealing from other task's queues. 2739 if (do_stealing && !has_aborted()) { 2740 // We have not aborted. This means that we have finished all that 2741 // we could. Let's try to do some stealing... 2742 2743 // We cannot check whether the global stack is empty, since other 2744 // tasks might be pushing objects to it concurrently. 2745 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2746 "only way to reach here"); 2747 while (!has_aborted()) { 2748 G1TaskQueueEntry entry; 2749 if (_cm->try_stealing(_worker_id, entry)) { 2750 scan_task_entry(entry); 2751 2752 // And since we're towards the end, let's totally drain the 2753 // local queue and global stack. 2754 drain_local_queue(false); 2755 drain_global_stack(false); 2756 } else { 2757 break; 2758 } 2759 } 2760 } 2761 2762 // We still haven't aborted. Now, let's try to get into the 2763 // termination protocol. 2764 if (do_termination && !has_aborted()) { 2765 // We cannot check whether the global stack is empty, since other 2766 // tasks might be concurrently pushing objects on it. 2767 // Separated the asserts so that we know which one fires. 2768 assert(_cm->out_of_regions(), "only way to reach here"); 2769 assert(_task_queue->size() == 0, "only way to reach here"); 2770 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2771 2772 // The G1CMTask class also extends the TerminatorTerminator class, 2773 // hence its should_exit_termination() method will also decide 2774 // whether to exit the termination protocol or not. 2775 bool finished = (is_serial || 2776 _cm->terminator()->offer_termination(this)); 2777 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2778 _termination_time_ms += 2779 termination_end_time_ms - _termination_start_time_ms; 2780 2781 if (finished) { 2782 // We're all done. 2783 2784 // We can now guarantee that the global stack is empty, since 2785 // all other tasks have finished. We separated the guarantees so 2786 // that, if a condition is false, we can immediately find out 2787 // which one. 2788 guarantee(_cm->out_of_regions(), "only way to reach here"); 2789 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2790 guarantee(_task_queue->size() == 0, "only way to reach here"); 2791 guarantee(!_cm->has_overflown(), "only way to reach here"); 2792 guarantee(!has_aborted(), "should never happen if termination has completed"); 2793 } else { 2794 // Apparently there's more work to do. Let's abort this task. It 2795 // will restart it and we can hopefully find more things to do. 2796 set_has_aborted(); 2797 } 2798 } 2799 2800 // Mainly for debugging purposes to make sure that a pointer to the 2801 // closure which was statically allocated in this frame doesn't 2802 // escape it by accident. 2803 set_cm_oop_closure(NULL); 2804 double end_time_ms = os::elapsedVTime() * 1000.0; 2805 double elapsed_time_ms = end_time_ms - _start_time_ms; 2806 // Update the step history. 2807 _step_times_ms.add(elapsed_time_ms); 2808 2809 if (has_aborted()) { 2810 // The task was aborted for some reason. 2811 if (_has_timed_out) { 2812 double diff_ms = elapsed_time_ms - _time_target_ms; 2813 // Keep statistics of how well we did with respect to hitting 2814 // our target only if we actually timed out (if we aborted for 2815 // other reasons, then the results might get skewed). 2816 _marking_step_diffs_ms.add(diff_ms); 2817 } 2818 2819 if (_cm->has_overflown()) { 2820 // This is the interesting one. We aborted because a global 2821 // overflow was raised. This means we have to restart the 2822 // marking phase and start iterating over regions. However, in 2823 // order to do this we have to make sure that all tasks stop 2824 // what they are doing and re-initialize in a safe manner. We 2825 // will achieve this with the use of two barrier sync points. 2826 2827 if (!is_serial) { 2828 // We only need to enter the sync barrier if being called 2829 // from a parallel context 2830 _cm->enter_first_sync_barrier(_worker_id); 2831 2832 // When we exit this sync barrier we know that all tasks have 2833 // stopped doing marking work. So, it's now safe to 2834 // re-initialize our data structures. 2835 } 2836 2837 clear_region_fields(); 2838 flush_mark_stats_cache(); 2839 2840 if (!is_serial) { 2841 // If we're executing the concurrent phase of marking, reset the marking 2842 // state; otherwise the marking state is reset after reference processing, 2843 // during the remark pause. 2844 // If we reset here as a result of an overflow during the remark we will 2845 // see assertion failures from any subsequent set_concurrency_and_phase() 2846 // calls. 2847 if (_cm->concurrent() && _worker_id == 0) { 2848 // Worker 0 is responsible for clearing the global data structures because 2849 // of an overflow. During STW we should not clear the overflow flag (in 2850 // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit 2851 // method to abort the pause and restart concurrent marking. 2852 _cm->reset_marking_for_restart(); 2853 2854 log_info(gc, marking)("Concurrent Mark reset for overflow"); 2855 } 2856 2857 // ...and enter the second barrier. 2858 _cm->enter_second_sync_barrier(_worker_id); 2859 } 2860 // At this point, if we're during the concurrent phase of 2861 // marking, everything has been re-initialized and we're 2862 // ready to restart. 2863 } 2864 } 2865 } 2866 2867 G1CMTask::G1CMTask(uint worker_id, 2868 G1ConcurrentMark* cm, 2869 G1CMTaskQueue* task_queue, 2870 G1RegionMarkStats* mark_stats, 2871 uint max_regions) : 2872 _objArray_processor(this), 2873 _worker_id(worker_id), 2874 _g1h(G1CollectedHeap::heap()), 2875 _cm(cm), 2876 _next_mark_bitmap(NULL), 2877 _task_queue(task_queue), 2878 _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize), 2879 _calls(0), 2880 _time_target_ms(0.0), 2881 _start_time_ms(0.0), 2882 _cm_oop_closure(NULL), 2883 _curr_region(NULL), 2884 _finger(NULL), 2885 _region_limit(NULL), 2886 _words_scanned(0), 2887 _words_scanned_limit(0), 2888 _real_words_scanned_limit(0), 2889 _refs_reached(0), 2890 _refs_reached_limit(0), 2891 _real_refs_reached_limit(0), 2892 _has_aborted(false), 2893 _has_timed_out(false), 2894 _draining_satb_buffers(false), 2895 _step_times_ms(), 2896 _elapsed_time_ms(0.0), 2897 _termination_time_ms(0.0), 2898 _termination_start_time_ms(0.0), 2899 _marking_step_diffs_ms() 2900 { 2901 guarantee(task_queue != NULL, "invariant"); 2902 2903 _marking_step_diffs_ms.add(0.5); 2904 } 2905 2906 // These are formatting macros that are used below to ensure 2907 // consistent formatting. The *_H_* versions are used to format the 2908 // header for a particular value and they should be kept consistent 2909 // with the corresponding macro. Also note that most of the macros add 2910 // the necessary white space (as a prefix) which makes them a bit 2911 // easier to compose. 2912 2913 // All the output lines are prefixed with this string to be able to 2914 // identify them easily in a large log file. 2915 #define G1PPRL_LINE_PREFIX "###" 2916 2917 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2918 #ifdef _LP64 2919 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2920 #else // _LP64 2921 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2922 #endif // _LP64 2923 2924 // For per-region info 2925 #define G1PPRL_TYPE_FORMAT " %-4s" 2926 #define G1PPRL_TYPE_H_FORMAT " %4s" 2927 #define G1PPRL_STATE_FORMAT " %-5s" 2928 #define G1PPRL_STATE_H_FORMAT " %5s" 2929 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2930 #define G1PPRL_BYTE_H_FORMAT " %9s" 2931 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2932 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2933 2934 // For summary info 2935 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2936 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2937 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2938 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2939 2940 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) : 2941 _total_used_bytes(0), _total_capacity_bytes(0), 2942 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2943 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) 2944 { 2945 if (!log_is_enabled(Trace, gc, liveness)) { 2946 return; 2947 } 2948 2949 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2950 MemRegion g1_reserved = g1h->g1_reserved(); 2951 double now = os::elapsedTime(); 2952 2953 // Print the header of the output. 2954 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2955 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2956 G1PPRL_SUM_ADDR_FORMAT("reserved") 2957 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2958 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 2959 HeapRegion::GrainBytes); 2960 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2961 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2962 G1PPRL_TYPE_H_FORMAT 2963 G1PPRL_ADDR_BASE_H_FORMAT 2964 G1PPRL_BYTE_H_FORMAT 2965 G1PPRL_BYTE_H_FORMAT 2966 G1PPRL_BYTE_H_FORMAT 2967 G1PPRL_DOUBLE_H_FORMAT 2968 G1PPRL_BYTE_H_FORMAT 2969 G1PPRL_STATE_H_FORMAT 2970 G1PPRL_BYTE_H_FORMAT, 2971 "type", "address-range", 2972 "used", "prev-live", "next-live", "gc-eff", 2973 "remset", "state", "code-roots"); 2974 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2975 G1PPRL_TYPE_H_FORMAT 2976 G1PPRL_ADDR_BASE_H_FORMAT 2977 G1PPRL_BYTE_H_FORMAT 2978 G1PPRL_BYTE_H_FORMAT 2979 G1PPRL_BYTE_H_FORMAT 2980 G1PPRL_DOUBLE_H_FORMAT 2981 G1PPRL_BYTE_H_FORMAT 2982 G1PPRL_STATE_H_FORMAT 2983 G1PPRL_BYTE_H_FORMAT, 2984 "", "", 2985 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 2986 "(bytes)", "", "(bytes)"); 2987 } 2988 2989 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) { 2990 if (!log_is_enabled(Trace, gc, liveness)) { 2991 return false; 2992 } 2993 2994 const char* type = r->get_type_str(); 2995 HeapWord* bottom = r->bottom(); 2996 HeapWord* end = r->end(); 2997 size_t capacity_bytes = r->capacity(); 2998 size_t used_bytes = r->used(); 2999 size_t prev_live_bytes = r->live_bytes(); 3000 size_t next_live_bytes = r->next_live_bytes(); 3001 double gc_eff = r->gc_efficiency(); 3002 size_t remset_bytes = r->rem_set()->mem_size(); 3003 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3004 const char* remset_type = r->rem_set()->get_short_state_str(); 3005 3006 _total_used_bytes += used_bytes; 3007 _total_capacity_bytes += capacity_bytes; 3008 _total_prev_live_bytes += prev_live_bytes; 3009 _total_next_live_bytes += next_live_bytes; 3010 _total_remset_bytes += remset_bytes; 3011 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3012 3013 // Print a line for this particular region. 3014 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3015 G1PPRL_TYPE_FORMAT 3016 G1PPRL_ADDR_BASE_FORMAT 3017 G1PPRL_BYTE_FORMAT 3018 G1PPRL_BYTE_FORMAT 3019 G1PPRL_BYTE_FORMAT 3020 G1PPRL_DOUBLE_FORMAT 3021 G1PPRL_BYTE_FORMAT 3022 G1PPRL_STATE_FORMAT 3023 G1PPRL_BYTE_FORMAT, 3024 type, p2i(bottom), p2i(end), 3025 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3026 remset_bytes, remset_type, strong_code_roots_bytes); 3027 3028 return false; 3029 } 3030 3031 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3032 if (!log_is_enabled(Trace, gc, liveness)) { 3033 return; 3034 } 3035 3036 // add static memory usages to remembered set sizes 3037 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3038 // Print the footer of the output. 3039 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3040 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3041 " SUMMARY" 3042 G1PPRL_SUM_MB_FORMAT("capacity") 3043 G1PPRL_SUM_MB_PERC_FORMAT("used") 3044 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3045 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3046 G1PPRL_SUM_MB_FORMAT("remset") 3047 G1PPRL_SUM_MB_FORMAT("code-roots"), 3048 bytes_to_mb(_total_capacity_bytes), 3049 bytes_to_mb(_total_used_bytes), 3050 percent_of(_total_used_bytes, _total_capacity_bytes), 3051 bytes_to_mb(_total_prev_live_bytes), 3052 percent_of(_total_prev_live_bytes, _total_capacity_bytes), 3053 bytes_to_mb(_total_next_live_bytes), 3054 percent_of(_total_next_live_bytes, _total_capacity_bytes), 3055 bytes_to_mb(_total_remset_bytes), 3056 bytes_to_mb(_total_strong_code_roots_bytes)); 3057 }