1 /* 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc/g1/g1BarrierSet.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1CollectorState.hpp" 31 #include "gc/g1/g1ConcurrentMark.inline.hpp" 32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" 33 #include "gc/g1/g1DirtyCardQueue.hpp" 34 #include "gc/g1/g1HeapVerifier.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1Policy.hpp" 37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" 38 #include "gc/g1/g1StringDedup.hpp" 39 #include "gc/g1/g1ThreadLocalData.hpp" 40 #include "gc/g1/g1Trace.hpp" 41 #include "gc/g1/heapRegion.inline.hpp" 42 #include "gc/g1/heapRegionRemSet.hpp" 43 #include "gc/g1/heapRegionSet.inline.hpp" 44 #include "gc/shared/gcId.hpp" 45 #include "gc/shared/gcTimer.hpp" 46 #include "gc/shared/gcTraceTime.inline.hpp" 47 #include "gc/shared/gcVMOperations.hpp" 48 #include "gc/shared/genOopClosures.inline.hpp" 49 #include "gc/shared/referencePolicy.hpp" 50 #include "gc/shared/strongRootsScope.hpp" 51 #include "gc/shared/suspendibleThreadSet.hpp" 52 #include "gc/shared/taskTerminator.hpp" 53 #include "gc/shared/taskqueue.inline.hpp" 54 #include "gc/shared/weakProcessor.inline.hpp" 55 #include "gc/shared/workerPolicy.hpp" 56 #include "include/jvm.h" 57 #include "logging/log.hpp" 58 #include "memory/allocation.hpp" 59 #include "memory/iterator.hpp" 60 #include "memory/resourceArea.hpp" 61 #include "memory/universe.hpp" 62 #include "oops/access.inline.hpp" 63 #include "oops/oop.inline.hpp" 64 #include "runtime/atomic.hpp" 65 #include "runtime/handles.inline.hpp" 66 #include "runtime/java.hpp" 67 #include "runtime/orderAccess.hpp" 68 #include "runtime/prefetch.inline.hpp" 69 #include "services/memTracker.hpp" 70 #include "utilities/align.hpp" 71 #include "utilities/growableArray.hpp" 72 73 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) { 74 assert(addr < _cm->finger(), "invariant"); 75 assert(addr >= _task->finger(), "invariant"); 76 77 // We move that task's local finger along. 78 _task->move_finger_to(addr); 79 80 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); 81 // we only partially drain the local queue and global stack 82 _task->drain_local_queue(true); 83 _task->drain_global_stack(true); 84 85 // if the has_aborted flag has been raised, we need to bail out of 86 // the iteration 87 return !_task->has_aborted(); 88 } 89 90 G1CMMarkStack::G1CMMarkStack() : 91 _max_chunk_capacity(0), 92 _base(NULL), 93 _chunk_capacity(0) { 94 set_empty(); 95 } 96 97 bool G1CMMarkStack::resize(size_t new_capacity) { 98 assert(is_empty(), "Only resize when stack is empty."); 99 assert(new_capacity <= _max_chunk_capacity, 100 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); 101 102 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC); 103 104 if (new_base == NULL) { 105 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); 106 return false; 107 } 108 // Release old mapping. 109 if (_base != NULL) { 110 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 111 } 112 113 _base = new_base; 114 _chunk_capacity = new_capacity; 115 set_empty(); 116 117 return true; 118 } 119 120 size_t G1CMMarkStack::capacity_alignment() { 121 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); 122 } 123 124 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { 125 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); 126 127 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); 128 129 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 130 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 131 132 guarantee(initial_chunk_capacity <= _max_chunk_capacity, 133 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, 134 _max_chunk_capacity, 135 initial_chunk_capacity); 136 137 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, 138 initial_chunk_capacity, _max_chunk_capacity); 139 140 return resize(initial_chunk_capacity); 141 } 142 143 void G1CMMarkStack::expand() { 144 if (_chunk_capacity == _max_chunk_capacity) { 145 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); 146 return; 147 } 148 size_t old_capacity = _chunk_capacity; 149 // Double capacity if possible 150 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); 151 152 if (resize(new_capacity)) { 153 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 154 old_capacity, new_capacity); 155 } else { 156 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 157 old_capacity, new_capacity); 158 } 159 } 160 161 G1CMMarkStack::~G1CMMarkStack() { 162 if (_base != NULL) { 163 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 164 } 165 } 166 167 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { 168 elem->next = *list; 169 *list = elem; 170 } 171 172 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { 173 MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 174 add_chunk_to_list(&_chunk_list, elem); 175 _chunks_in_chunk_list++; 176 } 177 178 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { 179 MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 180 add_chunk_to_list(&_free_list, elem); 181 } 182 183 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { 184 TaskQueueEntryChunk* result = *list; 185 if (result != NULL) { 186 *list = (*list)->next; 187 } 188 return result; 189 } 190 191 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { 192 MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 193 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); 194 if (result != NULL) { 195 _chunks_in_chunk_list--; 196 } 197 return result; 198 } 199 200 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { 201 MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 202 return remove_chunk_from_list(&_free_list); 203 } 204 205 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { 206 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. 207 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding 208 // wraparound of _hwm. 209 if (_hwm >= _chunk_capacity) { 210 return NULL; 211 } 212 213 size_t cur_idx = Atomic::fetch_and_add(&_hwm, 1u); 214 if (cur_idx >= _chunk_capacity) { 215 return NULL; 216 } 217 218 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; 219 result->next = NULL; 220 return result; 221 } 222 223 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { 224 // Get a new chunk. 225 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); 226 227 if (new_chunk == NULL) { 228 // Did not get a chunk from the free list. Allocate from backing memory. 229 new_chunk = allocate_new_chunk(); 230 231 if (new_chunk == NULL) { 232 return false; 233 } 234 } 235 236 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 237 238 add_chunk_to_chunk_list(new_chunk); 239 240 return true; 241 } 242 243 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { 244 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); 245 246 if (cur == NULL) { 247 return false; 248 } 249 250 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 251 252 add_chunk_to_free_list(cur); 253 return true; 254 } 255 256 void G1CMMarkStack::set_empty() { 257 _chunks_in_chunk_list = 0; 258 _hwm = 0; 259 _chunk_list = NULL; 260 _free_list = NULL; 261 } 262 263 G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) : 264 _root_regions(MemRegion::create_array(max_regions, mtGC)), 265 _max_regions(max_regions), 266 _num_root_regions(0), 267 _claimed_root_regions(0), 268 _scan_in_progress(false), 269 _should_abort(false) { } 270 271 G1CMRootMemRegions::~G1CMRootMemRegions() { 272 FREE_C_HEAP_ARRAY(MemRegion, _root_regions); 273 } 274 275 void G1CMRootMemRegions::reset() { 276 _num_root_regions = 0; 277 } 278 279 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) { 280 assert_at_safepoint(); 281 size_t idx = Atomic::fetch_and_add(&_num_root_regions, 1u); 282 assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions); 283 assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to " 284 "end (" PTR_FORMAT ")", p2i(start), p2i(end)); 285 _root_regions[idx].set_start(start); 286 _root_regions[idx].set_end(end); 287 } 288 289 void G1CMRootMemRegions::prepare_for_scan() { 290 assert(!scan_in_progress(), "pre-condition"); 291 292 _scan_in_progress = _num_root_regions > 0; 293 294 _claimed_root_regions = 0; 295 _should_abort = false; 296 } 297 298 const MemRegion* G1CMRootMemRegions::claim_next() { 299 if (_should_abort) { 300 // If someone has set the should_abort flag, we return NULL to 301 // force the caller to bail out of their loop. 302 return NULL; 303 } 304 305 if (_claimed_root_regions >= _num_root_regions) { 306 return NULL; 307 } 308 309 size_t claimed_index = Atomic::fetch_and_add(&_claimed_root_regions, 1u); 310 if (claimed_index < _num_root_regions) { 311 return &_root_regions[claimed_index]; 312 } 313 return NULL; 314 } 315 316 uint G1CMRootMemRegions::num_root_regions() const { 317 return (uint)_num_root_regions; 318 } 319 320 void G1CMRootMemRegions::notify_scan_done() { 321 MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 322 _scan_in_progress = false; 323 RootRegionScan_lock->notify_all(); 324 } 325 326 void G1CMRootMemRegions::cancel_scan() { 327 notify_scan_done(); 328 } 329 330 void G1CMRootMemRegions::scan_finished() { 331 assert(scan_in_progress(), "pre-condition"); 332 333 if (!_should_abort) { 334 assert(_claimed_root_regions >= num_root_regions(), 335 "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u", 336 _claimed_root_regions, num_root_regions()); 337 } 338 339 notify_scan_done(); 340 } 341 342 bool G1CMRootMemRegions::wait_until_scan_finished() { 343 if (!scan_in_progress()) { 344 return false; 345 } 346 347 { 348 MonitorLocker ml(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 349 while (scan_in_progress()) { 350 ml.wait(); 351 } 352 } 353 return true; 354 } 355 356 // Returns the maximum number of workers to be used in a concurrent 357 // phase based on the number of GC workers being used in a STW 358 // phase. 359 static uint scale_concurrent_worker_threads(uint num_gc_workers) { 360 return MAX2((num_gc_workers + 2) / 4, 1U); 361 } 362 363 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, 364 G1RegionToSpaceMapper* prev_bitmap_storage, 365 G1RegionToSpaceMapper* next_bitmap_storage) : 366 // _cm_thread set inside the constructor 367 _g1h(g1h), 368 _completed_initialization(false), 369 370 _mark_bitmap_1(), 371 _mark_bitmap_2(), 372 _prev_mark_bitmap(&_mark_bitmap_1), 373 _next_mark_bitmap(&_mark_bitmap_2), 374 375 _heap(_g1h->reserved_region()), 376 377 _root_regions(_g1h->max_regions()), 378 379 _global_mark_stack(), 380 381 // _finger set in set_non_marking_state 382 383 _worker_id_offset(G1DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads), 384 _max_num_tasks(ParallelGCThreads), 385 // _num_active_tasks set in set_non_marking_state() 386 // _tasks set inside the constructor 387 388 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)), 389 _terminator((int) _max_num_tasks, _task_queues), 390 391 _first_overflow_barrier_sync(), 392 _second_overflow_barrier_sync(), 393 394 _has_overflown(false), 395 _concurrent(false), 396 _has_aborted(false), 397 _restart_for_overflow(false), 398 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 399 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 400 401 // _verbose_level set below 402 403 _init_times(), 404 _remark_times(), 405 _remark_mark_times(), 406 _remark_weak_ref_times(), 407 _cleanup_times(), 408 _total_cleanup_time(0.0), 409 410 _accum_task_vtime(NULL), 411 412 _concurrent_workers(NULL), 413 _num_concurrent_workers(0), 414 _max_concurrent_workers(0), 415 416 _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)), 417 _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC)) 418 { 419 _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage); 420 _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage); 421 422 // Create & start ConcurrentMark thread. 423 _cm_thread = new G1ConcurrentMarkThread(this); 424 if (_cm_thread->osthread() == NULL) { 425 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 426 } 427 428 assert(CGC_lock != NULL, "CGC_lock must be initialized"); 429 430 if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) { 431 // Calculate the number of concurrent worker threads by scaling 432 // the number of parallel GC threads. 433 uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads); 434 FLAG_SET_ERGO(ConcGCThreads, marking_thread_num); 435 } 436 437 assert(ConcGCThreads > 0, "ConcGCThreads have been set."); 438 if (ConcGCThreads > ParallelGCThreads) { 439 log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).", 440 ConcGCThreads, ParallelGCThreads); 441 return; 442 } 443 444 log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset); 445 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 446 447 _num_concurrent_workers = ConcGCThreads; 448 _max_concurrent_workers = _num_concurrent_workers; 449 450 _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true); 451 _concurrent_workers->initialize_workers(); 452 453 if (FLAG_IS_DEFAULT(MarkStackSize)) { 454 size_t mark_stack_size = 455 MIN2(MarkStackSizeMax, 456 MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE))); 457 // Verify that the calculated value for MarkStackSize is in range. 458 // It would be nice to use the private utility routine from Arguments. 459 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 460 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 461 "must be between 1 and " SIZE_FORMAT, 462 mark_stack_size, MarkStackSizeMax); 463 return; 464 } 465 FLAG_SET_ERGO(MarkStackSize, mark_stack_size); 466 } else { 467 // Verify MarkStackSize is in range. 468 if (FLAG_IS_CMDLINE(MarkStackSize)) { 469 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 470 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 471 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 472 "must be between 1 and " SIZE_FORMAT, 473 MarkStackSize, MarkStackSizeMax); 474 return; 475 } 476 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 477 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 478 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 479 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 480 MarkStackSize, MarkStackSizeMax); 481 return; 482 } 483 } 484 } 485 } 486 487 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { 488 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 489 } 490 491 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC); 492 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC); 493 494 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 495 _num_active_tasks = _max_num_tasks; 496 497 for (uint i = 0; i < _max_num_tasks; ++i) { 498 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 499 task_queue->initialize(); 500 _task_queues->register_queue(i, task_queue); 501 502 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions()); 503 504 _accum_task_vtime[i] = 0.0; 505 } 506 507 reset_at_marking_complete(); 508 _completed_initialization = true; 509 } 510 511 void G1ConcurrentMark::reset() { 512 _has_aborted = false; 513 514 reset_marking_for_restart(); 515 516 // Reset all tasks, since different phases will use different number of active 517 // threads. So, it's easiest to have all of them ready. 518 for (uint i = 0; i < _max_num_tasks; ++i) { 519 _tasks[i]->reset(_next_mark_bitmap); 520 } 521 522 uint max_regions = _g1h->max_regions(); 523 for (uint i = 0; i < max_regions; i++) { 524 _top_at_rebuild_starts[i] = NULL; 525 _region_mark_stats[i].clear(); 526 } 527 } 528 529 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) { 530 for (uint j = 0; j < _max_num_tasks; ++j) { 531 _tasks[j]->clear_mark_stats_cache(region_idx); 532 } 533 _top_at_rebuild_starts[region_idx] = NULL; 534 _region_mark_stats[region_idx].clear(); 535 } 536 537 void G1ConcurrentMark::clear_statistics(HeapRegion* r) { 538 uint const region_idx = r->hrm_index(); 539 if (r->is_humongous()) { 540 assert(r->is_starts_humongous(), "Got humongous continues region here"); 541 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size()); 542 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) { 543 clear_statistics_in_region(j); 544 } 545 } else { 546 clear_statistics_in_region(region_idx); 547 } 548 } 549 550 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) { 551 if (bitmap->is_marked(addr)) { 552 bitmap->clear(addr); 553 } 554 } 555 556 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) { 557 assert_at_safepoint_on_vm_thread(); 558 559 // Need to clear all mark bits of the humongous object. 560 clear_mark_if_set(_prev_mark_bitmap, r->bottom()); 561 clear_mark_if_set(_next_mark_bitmap, r->bottom()); 562 563 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 564 return; 565 } 566 567 // Clear any statistics about the region gathered so far. 568 clear_statistics(r); 569 } 570 571 void G1ConcurrentMark::reset_marking_for_restart() { 572 _global_mark_stack.set_empty(); 573 574 // Expand the marking stack, if we have to and if we can. 575 if (has_overflown()) { 576 _global_mark_stack.expand(); 577 578 uint max_regions = _g1h->max_regions(); 579 for (uint i = 0; i < max_regions; i++) { 580 _region_mark_stats[i].clear_during_overflow(); 581 } 582 } 583 584 clear_has_overflown(); 585 _finger = _heap.start(); 586 587 for (uint i = 0; i < _max_num_tasks; ++i) { 588 G1CMTaskQueue* queue = _task_queues->queue(i); 589 queue->set_empty(); 590 } 591 } 592 593 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 594 assert(active_tasks <= _max_num_tasks, "we should not have more"); 595 596 _num_active_tasks = active_tasks; 597 // Need to update the three data structures below according to the 598 // number of active threads for this phase. 599 _terminator.reset_for_reuse(active_tasks); 600 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 601 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 602 } 603 604 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 605 set_concurrency(active_tasks); 606 607 _concurrent = concurrent; 608 609 if (!concurrent) { 610 // At this point we should be in a STW phase, and completed marking. 611 assert_at_safepoint_on_vm_thread(); 612 assert(out_of_regions(), 613 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 614 p2i(_finger), p2i(_heap.end())); 615 } 616 } 617 618 void G1ConcurrentMark::reset_at_marking_complete() { 619 // We set the global marking state to some default values when we're 620 // not doing marking. 621 reset_marking_for_restart(); 622 _num_active_tasks = 0; 623 } 624 625 G1ConcurrentMark::~G1ConcurrentMark() { 626 FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts); 627 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats); 628 // The G1ConcurrentMark instance is never freed. 629 ShouldNotReachHere(); 630 } 631 632 class G1ClearBitMapTask : public AbstractGangTask { 633 public: 634 static size_t chunk_size() { return M; } 635 636 private: 637 // Heap region closure used for clearing the given mark bitmap. 638 class G1ClearBitmapHRClosure : public HeapRegionClosure { 639 private: 640 G1CMBitMap* _bitmap; 641 G1ConcurrentMark* _cm; 642 public: 643 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) { 644 } 645 646 virtual bool do_heap_region(HeapRegion* r) { 647 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 648 649 HeapWord* cur = r->bottom(); 650 HeapWord* const end = r->end(); 651 652 while (cur < end) { 653 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 654 _bitmap->clear_range(mr); 655 656 cur += chunk_size_in_words; 657 658 // Abort iteration if after yielding the marking has been aborted. 659 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 660 return true; 661 } 662 // Repeat the asserts from before the start of the closure. We will do them 663 // as asserts here to minimize their overhead on the product. However, we 664 // will have them as guarantees at the beginning / end of the bitmap 665 // clearing to get some checking in the product. 666 assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant"); 667 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 668 } 669 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 670 671 return false; 672 } 673 }; 674 675 G1ClearBitmapHRClosure _cl; 676 HeapRegionClaimer _hr_claimer; 677 bool _suspendible; // If the task is suspendible, workers must join the STS. 678 679 public: 680 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 681 AbstractGangTask("G1 Clear Bitmap"), 682 _cl(bitmap, suspendible ? cm : NULL), 683 _hr_claimer(n_workers), 684 _suspendible(suspendible) 685 { } 686 687 void work(uint worker_id) { 688 SuspendibleThreadSetJoiner sts_join(_suspendible); 689 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id); 690 } 691 692 bool is_complete() { 693 return _cl.is_complete(); 694 } 695 }; 696 697 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 698 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 699 700 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 701 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 702 703 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 704 705 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 706 707 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 708 workers->run_task(&cl, num_workers); 709 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 710 } 711 712 void G1ConcurrentMark::cleanup_for_next_mark() { 713 // Make sure that the concurrent mark thread looks to still be in 714 // the current cycle. 715 guarantee(cm_thread()->during_cycle(), "invariant"); 716 717 // We are finishing up the current cycle by clearing the next 718 // marking bitmap and getting it ready for the next cycle. During 719 // this time no other cycle can start. So, let's make sure that this 720 // is the case. 721 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 722 723 clear_bitmap(_next_mark_bitmap, _concurrent_workers, true); 724 725 // Repeat the asserts from above. 726 guarantee(cm_thread()->during_cycle(), "invariant"); 727 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 728 } 729 730 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 731 assert_at_safepoint_on_vm_thread(); 732 clear_bitmap(_prev_mark_bitmap, workers, false); 733 } 734 735 class NoteStartOfMarkHRClosure : public HeapRegionClosure { 736 public: 737 bool do_heap_region(HeapRegion* r) { 738 r->note_start_of_marking(); 739 return false; 740 } 741 }; 742 743 void G1ConcurrentMark::pre_initial_mark() { 744 assert_at_safepoint_on_vm_thread(); 745 746 // Reset marking state. 747 reset(); 748 749 // For each region note start of marking. 750 NoteStartOfMarkHRClosure startcl; 751 _g1h->heap_region_iterate(&startcl); 752 753 _root_regions.reset(); 754 } 755 756 757 void G1ConcurrentMark::post_initial_mark() { 758 // Start Concurrent Marking weak-reference discovery. 759 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 760 // enable ("weak") refs discovery 761 rp->enable_discovery(); 762 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 763 764 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 765 // This is the start of the marking cycle, we're expected all 766 // threads to have SATB queues with active set to false. 767 satb_mq_set.set_active_all_threads(true, /* new active value */ 768 false /* expected_active */); 769 770 _root_regions.prepare_for_scan(); 771 772 // update_g1_committed() will be called at the end of an evac pause 773 // when marking is on. So, it's also called at the end of the 774 // initial-mark pause to update the heap end, if the heap expands 775 // during it. No need to call it here. 776 } 777 778 /* 779 * Notice that in the next two methods, we actually leave the STS 780 * during the barrier sync and join it immediately afterwards. If we 781 * do not do this, the following deadlock can occur: one thread could 782 * be in the barrier sync code, waiting for the other thread to also 783 * sync up, whereas another one could be trying to yield, while also 784 * waiting for the other threads to sync up too. 785 * 786 * Note, however, that this code is also used during remark and in 787 * this case we should not attempt to leave / enter the STS, otherwise 788 * we'll either hit an assert (debug / fastdebug) or deadlock 789 * (product). So we should only leave / enter the STS if we are 790 * operating concurrently. 791 * 792 * Because the thread that does the sync barrier has left the STS, it 793 * is possible to be suspended for a Full GC or an evacuation pause 794 * could occur. This is actually safe, since the entering the sync 795 * barrier is one of the last things do_marking_step() does, and it 796 * doesn't manipulate any data structures afterwards. 797 */ 798 799 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 800 bool barrier_aborted; 801 { 802 SuspendibleThreadSetLeaver sts_leave(concurrent()); 803 barrier_aborted = !_first_overflow_barrier_sync.enter(); 804 } 805 806 // at this point everyone should have synced up and not be doing any 807 // more work 808 809 if (barrier_aborted) { 810 // If the barrier aborted we ignore the overflow condition and 811 // just abort the whole marking phase as quickly as possible. 812 return; 813 } 814 } 815 816 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 817 SuspendibleThreadSetLeaver sts_leave(concurrent()); 818 _second_overflow_barrier_sync.enter(); 819 820 // at this point everything should be re-initialized and ready to go 821 } 822 823 class G1CMConcurrentMarkingTask : public AbstractGangTask { 824 G1ConcurrentMark* _cm; 825 826 public: 827 void work(uint worker_id) { 828 assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread"); 829 ResourceMark rm; 830 831 double start_vtime = os::elapsedVTime(); 832 833 { 834 SuspendibleThreadSetJoiner sts_join; 835 836 assert(worker_id < _cm->active_tasks(), "invariant"); 837 838 G1CMTask* task = _cm->task(worker_id); 839 task->record_start_time(); 840 if (!_cm->has_aborted()) { 841 do { 842 task->do_marking_step(G1ConcMarkStepDurationMillis, 843 true /* do_termination */, 844 false /* is_serial*/); 845 846 _cm->do_yield_check(); 847 } while (!_cm->has_aborted() && task->has_aborted()); 848 } 849 task->record_end_time(); 850 guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant"); 851 } 852 853 double end_vtime = os::elapsedVTime(); 854 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 855 } 856 857 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) : 858 AbstractGangTask("Concurrent Mark"), _cm(cm) { } 859 860 ~G1CMConcurrentMarkingTask() { } 861 }; 862 863 uint G1ConcurrentMark::calc_active_marking_workers() { 864 uint result = 0; 865 if (!UseDynamicNumberOfGCThreads || 866 (!FLAG_IS_DEFAULT(ConcGCThreads) && 867 !ForceDynamicNumberOfGCThreads)) { 868 result = _max_concurrent_workers; 869 } else { 870 result = 871 WorkerPolicy::calc_default_active_workers(_max_concurrent_workers, 872 1, /* Minimum workers */ 873 _num_concurrent_workers, 874 Threads::number_of_non_daemon_threads()); 875 // Don't scale the result down by scale_concurrent_workers() because 876 // that scaling has already gone into "_max_concurrent_workers". 877 } 878 assert(result > 0 && result <= _max_concurrent_workers, 879 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u", 880 _max_concurrent_workers, result); 881 return result; 882 } 883 884 void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) { 885 #ifdef ASSERT 886 HeapWord* last = region->last(); 887 HeapRegion* hr = _g1h->heap_region_containing(last); 888 assert(hr->is_old() || hr->next_top_at_mark_start() == hr->bottom(), 889 "Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str()); 890 assert(hr->next_top_at_mark_start() == region->start(), 891 "MemRegion start should be equal to nTAMS"); 892 #endif 893 894 G1RootRegionScanClosure cl(_g1h, this, worker_id); 895 896 const uintx interval = PrefetchScanIntervalInBytes; 897 HeapWord* curr = region->start(); 898 const HeapWord* end = region->end(); 899 while (curr < end) { 900 Prefetch::read(curr, interval); 901 oop obj = oop(curr); 902 int size = obj->oop_iterate_size(&cl); 903 assert(size == obj->size(), "sanity"); 904 curr += size; 905 } 906 } 907 908 class G1CMRootRegionScanTask : public AbstractGangTask { 909 G1ConcurrentMark* _cm; 910 public: 911 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 912 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 913 914 void work(uint worker_id) { 915 assert(Thread::current()->is_ConcurrentGC_thread(), 916 "this should only be done by a conc GC thread"); 917 918 G1CMRootMemRegions* root_regions = _cm->root_regions(); 919 const MemRegion* region = root_regions->claim_next(); 920 while (region != NULL) { 921 _cm->scan_root_region(region, worker_id); 922 region = root_regions->claim_next(); 923 } 924 } 925 }; 926 927 void G1ConcurrentMark::scan_root_regions() { 928 // scan_in_progress() will have been set to true only if there was 929 // at least one root region to scan. So, if it's false, we 930 // should not attempt to do any further work. 931 if (root_regions()->scan_in_progress()) { 932 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 933 934 _num_concurrent_workers = MIN2(calc_active_marking_workers(), 935 // We distribute work on a per-region basis, so starting 936 // more threads than that is useless. 937 root_regions()->num_root_regions()); 938 assert(_num_concurrent_workers <= _max_concurrent_workers, 939 "Maximum number of marking threads exceeded"); 940 941 G1CMRootRegionScanTask task(this); 942 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 943 task.name(), _num_concurrent_workers, root_regions()->num_root_regions()); 944 _concurrent_workers->run_task(&task, _num_concurrent_workers); 945 946 // It's possible that has_aborted() is true here without actually 947 // aborting the survivor scan earlier. This is OK as it's 948 // mainly used for sanity checking. 949 root_regions()->scan_finished(); 950 } 951 } 952 953 void G1ConcurrentMark::concurrent_cycle_start() { 954 _gc_timer_cm->register_gc_start(); 955 956 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 957 958 _g1h->trace_heap_before_gc(_gc_tracer_cm); 959 } 960 961 void G1ConcurrentMark::concurrent_cycle_end() { 962 _g1h->collector_state()->set_clearing_next_bitmap(false); 963 964 _g1h->trace_heap_after_gc(_gc_tracer_cm); 965 966 if (has_aborted()) { 967 log_info(gc, marking)("Concurrent Mark Abort"); 968 _gc_tracer_cm->report_concurrent_mode_failure(); 969 } 970 971 _gc_timer_cm->register_gc_end(); 972 973 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 974 } 975 976 void G1ConcurrentMark::mark_from_roots() { 977 _restart_for_overflow = false; 978 979 _num_concurrent_workers = calc_active_marking_workers(); 980 981 uint active_workers = MAX2(1U, _num_concurrent_workers); 982 983 // Setting active workers is not guaranteed since fewer 984 // worker threads may currently exist and more may not be 985 // available. 986 active_workers = _concurrent_workers->update_active_workers(active_workers); 987 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers()); 988 989 // Parallel task terminator is set in "set_concurrency_and_phase()" 990 set_concurrency_and_phase(active_workers, true /* concurrent */); 991 992 G1CMConcurrentMarkingTask marking_task(this); 993 _concurrent_workers->run_task(&marking_task); 994 print_stats(); 995 } 996 997 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) { 998 G1HeapVerifier* verifier = _g1h->verifier(); 999 1000 verifier->verify_region_sets_optional(); 1001 1002 if (VerifyDuringGC) { 1003 GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm); 1004 1005 size_t const BufLen = 512; 1006 char buffer[BufLen]; 1007 1008 jio_snprintf(buffer, BufLen, "During GC (%s)", caller); 1009 verifier->verify(type, vo, buffer); 1010 } 1011 1012 verifier->check_bitmaps(caller); 1013 } 1014 1015 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask { 1016 G1CollectedHeap* _g1h; 1017 G1ConcurrentMark* _cm; 1018 HeapRegionClaimer _hrclaimer; 1019 uint volatile _total_selected_for_rebuild; 1020 1021 G1PrintRegionLivenessInfoClosure _cl; 1022 1023 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure { 1024 G1CollectedHeap* _g1h; 1025 G1ConcurrentMark* _cm; 1026 1027 G1PrintRegionLivenessInfoClosure* _cl; 1028 1029 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild. 1030 1031 void update_remset_before_rebuild(HeapRegion* hr) { 1032 G1RemSetTrackingPolicy* tracking_policy = _g1h->policy()->remset_tracker(); 1033 1034 bool selected_for_rebuild; 1035 if (hr->is_humongous()) { 1036 bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0; 1037 selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live); 1038 } else { 1039 size_t const live_bytes = _cm->liveness(hr->hrm_index()); 1040 selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes); 1041 } 1042 if (selected_for_rebuild) { 1043 _num_regions_selected_for_rebuild++; 1044 } 1045 _cm->update_top_at_rebuild_start(hr); 1046 } 1047 1048 // Distribute the given words across the humongous object starting with hr and 1049 // note end of marking. 1050 void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) { 1051 uint const region_idx = hr->hrm_index(); 1052 size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size(); 1053 uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words); 1054 1055 // "Distributing" zero words means that we only note end of marking for these 1056 // regions. 1057 assert(marked_words == 0 || obj_size_in_words == marked_words, 1058 "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT, 1059 obj_size_in_words, marked_words); 1060 1061 for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) { 1062 HeapRegion* const r = _g1h->region_at(i); 1063 size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words); 1064 1065 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)", 1066 words_to_add, i, r->get_type_str()); 1067 add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize); 1068 marked_words -= words_to_add; 1069 } 1070 assert(marked_words == 0, 1071 SIZE_FORMAT " words left after distributing space across %u regions", 1072 marked_words, num_regions_in_humongous); 1073 } 1074 1075 void update_marked_bytes(HeapRegion* hr) { 1076 uint const region_idx = hr->hrm_index(); 1077 size_t const marked_words = _cm->liveness(region_idx); 1078 // The marking attributes the object's size completely to the humongous starts 1079 // region. We need to distribute this value across the entire set of regions a 1080 // humongous object spans. 1081 if (hr->is_humongous()) { 1082 assert(hr->is_starts_humongous() || marked_words == 0, 1083 "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)", 1084 marked_words, region_idx, hr->get_type_str()); 1085 if (hr->is_starts_humongous()) { 1086 distribute_marked_bytes(hr, marked_words); 1087 } 1088 } else { 1089 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str()); 1090 add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize); 1091 } 1092 } 1093 1094 void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) { 1095 hr->add_to_marked_bytes(marked_bytes); 1096 _cl->do_heap_region(hr); 1097 hr->note_end_of_marking(); 1098 } 1099 1100 public: 1101 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) : 1102 _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { } 1103 1104 virtual bool do_heap_region(HeapRegion* r) { 1105 update_remset_before_rebuild(r); 1106 update_marked_bytes(r); 1107 1108 return false; 1109 } 1110 1111 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; } 1112 }; 1113 1114 public: 1115 G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) : 1116 AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"), 1117 _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { } 1118 1119 virtual void work(uint worker_id) { 1120 G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl); 1121 _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id); 1122 Atomic::add(&_total_selected_for_rebuild, update_cl.num_selected_for_rebuild()); 1123 } 1124 1125 uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; } 1126 1127 // Number of regions for which roughly one thread should be spawned for this work. 1128 static const uint RegionsPerThread = 384; 1129 }; 1130 1131 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure { 1132 G1CollectedHeap* _g1h; 1133 public: 1134 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { } 1135 1136 virtual bool do_heap_region(HeapRegion* r) { 1137 _g1h->policy()->remset_tracker()->update_after_rebuild(r); 1138 return false; 1139 } 1140 }; 1141 1142 void G1ConcurrentMark::remark() { 1143 assert_at_safepoint_on_vm_thread(); 1144 1145 // If a full collection has happened, we should not continue. However we might 1146 // have ended up here as the Remark VM operation has been scheduled already. 1147 if (has_aborted()) { 1148 return; 1149 } 1150 1151 G1Policy* policy = _g1h->policy(); 1152 policy->record_concurrent_mark_remark_start(); 1153 1154 double start = os::elapsedTime(); 1155 1156 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before"); 1157 1158 { 1159 GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm); 1160 finalize_marking(); 1161 } 1162 1163 double mark_work_end = os::elapsedTime(); 1164 1165 bool const mark_finished = !has_overflown(); 1166 if (mark_finished) { 1167 weak_refs_work(false /* clear_all_soft_refs */); 1168 1169 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1170 // We're done with marking. 1171 // This is the end of the marking cycle, we're expected all 1172 // threads to have SATB queues with active set to true. 1173 satb_mq_set.set_active_all_threads(false, /* new active value */ 1174 true /* expected_active */); 1175 1176 { 1177 GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm); 1178 flush_all_task_caches(); 1179 } 1180 1181 // Install newly created mark bitmap as "prev". 1182 swap_mark_bitmaps(); 1183 { 1184 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm); 1185 1186 uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) / 1187 G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread; 1188 uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity); 1189 1190 G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers); 1191 log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions()); 1192 _g1h->workers()->run_task(&cl, num_workers); 1193 1194 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u", 1195 _g1h->num_regions(), cl.total_selected_for_rebuild()); 1196 } 1197 { 1198 GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm); 1199 reclaim_empty_regions(); 1200 } 1201 1202 // Clean out dead classes 1203 if (ClassUnloadingWithConcurrentMark) { 1204 GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm); 1205 ClassLoaderDataGraph::purge(); 1206 } 1207 1208 compute_new_sizes(); 1209 1210 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after"); 1211 1212 assert(!restart_for_overflow(), "sanity"); 1213 // Completely reset the marking state since marking completed 1214 reset_at_marking_complete(); 1215 } else { 1216 // We overflowed. Restart concurrent marking. 1217 _restart_for_overflow = true; 1218 1219 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow"); 1220 1221 // Clear the marking state because we will be restarting 1222 // marking due to overflowing the global mark stack. 1223 reset_marking_for_restart(); 1224 } 1225 1226 { 1227 GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm); 1228 report_object_count(mark_finished); 1229 } 1230 1231 // Statistics 1232 double now = os::elapsedTime(); 1233 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1234 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1235 _remark_times.add((now - start) * 1000.0); 1236 1237 policy->record_concurrent_mark_remark_end(); 1238 } 1239 1240 class G1ReclaimEmptyRegionsTask : public AbstractGangTask { 1241 // Per-region work during the Cleanup pause. 1242 class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure { 1243 G1CollectedHeap* _g1h; 1244 size_t _freed_bytes; 1245 FreeRegionList* _local_cleanup_list; 1246 uint _old_regions_removed; 1247 uint _humongous_regions_removed; 1248 1249 public: 1250 G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h, 1251 FreeRegionList* local_cleanup_list) : 1252 _g1h(g1h), 1253 _freed_bytes(0), 1254 _local_cleanup_list(local_cleanup_list), 1255 _old_regions_removed(0), 1256 _humongous_regions_removed(0) { } 1257 1258 size_t freed_bytes() { return _freed_bytes; } 1259 const uint old_regions_removed() { return _old_regions_removed; } 1260 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1261 1262 bool do_heap_region(HeapRegion *hr) { 1263 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) { 1264 _freed_bytes += hr->used(); 1265 hr->set_containing_set(NULL); 1266 if (hr->is_humongous()) { 1267 _humongous_regions_removed++; 1268 _g1h->free_humongous_region(hr, _local_cleanup_list); 1269 } else { 1270 _old_regions_removed++; 1271 _g1h->free_region(hr, _local_cleanup_list); 1272 } 1273 hr->clear_cardtable(); 1274 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index()); 1275 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom())); 1276 } 1277 1278 return false; 1279 } 1280 }; 1281 1282 G1CollectedHeap* _g1h; 1283 FreeRegionList* _cleanup_list; 1284 HeapRegionClaimer _hrclaimer; 1285 1286 public: 1287 G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1288 AbstractGangTask("G1 Cleanup"), 1289 _g1h(g1h), 1290 _cleanup_list(cleanup_list), 1291 _hrclaimer(n_workers) { 1292 } 1293 1294 void work(uint worker_id) { 1295 FreeRegionList local_cleanup_list("Local Cleanup List"); 1296 G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list); 1297 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id); 1298 assert(cl.is_complete(), "Shouldn't have aborted!"); 1299 1300 // Now update the old/humongous region sets 1301 _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed()); 1302 { 1303 MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1304 _g1h->decrement_summary_bytes(cl.freed_bytes()); 1305 1306 _cleanup_list->add_ordered(&local_cleanup_list); 1307 assert(local_cleanup_list.is_empty(), "post-condition"); 1308 } 1309 } 1310 }; 1311 1312 void G1ConcurrentMark::reclaim_empty_regions() { 1313 WorkGang* workers = _g1h->workers(); 1314 FreeRegionList empty_regions_list("Empty Regions After Mark List"); 1315 1316 G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers()); 1317 workers->run_task(&cl); 1318 1319 if (!empty_regions_list.is_empty()) { 1320 log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length()); 1321 // Now print the empty regions list. 1322 G1HRPrinter* hrp = _g1h->hr_printer(); 1323 if (hrp->is_active()) { 1324 FreeRegionListIterator iter(&empty_regions_list); 1325 while (iter.more_available()) { 1326 HeapRegion* hr = iter.get_next(); 1327 hrp->cleanup(hr); 1328 } 1329 } 1330 // And actually make them available. 1331 _g1h->prepend_to_freelist(&empty_regions_list); 1332 } 1333 } 1334 1335 void G1ConcurrentMark::compute_new_sizes() { 1336 MetaspaceGC::compute_new_size(); 1337 1338 // Cleanup will have freed any regions completely full of garbage. 1339 // Update the soft reference policy with the new heap occupancy. 1340 Universe::update_heap_info_at_gc(); 1341 1342 // We reclaimed old regions so we should calculate the sizes to make 1343 // sure we update the old gen/space data. 1344 _g1h->g1mm()->update_sizes(); 1345 } 1346 1347 void G1ConcurrentMark::cleanup() { 1348 assert_at_safepoint_on_vm_thread(); 1349 1350 // If a full collection has happened, we shouldn't do this. 1351 if (has_aborted()) { 1352 return; 1353 } 1354 1355 G1Policy* policy = _g1h->policy(); 1356 policy->record_concurrent_mark_cleanup_start(); 1357 1358 double start = os::elapsedTime(); 1359 1360 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before"); 1361 1362 { 1363 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm); 1364 G1UpdateRemSetTrackingAfterRebuild cl(_g1h); 1365 _g1h->heap_region_iterate(&cl); 1366 } 1367 1368 if (log_is_enabled(Trace, gc, liveness)) { 1369 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup"); 1370 _g1h->heap_region_iterate(&cl); 1371 } 1372 1373 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after"); 1374 1375 // We need to make this be a "collection" so any collection pause that 1376 // races with it goes around and waits for Cleanup to finish. 1377 _g1h->increment_total_collections(); 1378 1379 // Local statistics 1380 double recent_cleanup_time = (os::elapsedTime() - start); 1381 _total_cleanup_time += recent_cleanup_time; 1382 _cleanup_times.add(recent_cleanup_time); 1383 1384 { 1385 GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm); 1386 policy->record_concurrent_mark_cleanup_end(); 1387 } 1388 } 1389 1390 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1391 // Uses the G1CMTask associated with a worker thread (for serial reference 1392 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1393 // trace referent objects. 1394 // 1395 // Using the G1CMTask and embedded local queues avoids having the worker 1396 // threads operating on the global mark stack. This reduces the risk 1397 // of overflowing the stack - which we would rather avoid at this late 1398 // state. Also using the tasks' local queues removes the potential 1399 // of the workers interfering with each other that could occur if 1400 // operating on the global stack. 1401 1402 class G1CMKeepAliveAndDrainClosure : public OopClosure { 1403 G1ConcurrentMark* _cm; 1404 G1CMTask* _task; 1405 uint _ref_counter_limit; 1406 uint _ref_counter; 1407 bool _is_serial; 1408 public: 1409 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1410 _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval), 1411 _ref_counter(_ref_counter_limit), _is_serial(is_serial) { 1412 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1413 } 1414 1415 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1416 virtual void do_oop( oop* p) { do_oop_work(p); } 1417 1418 template <class T> void do_oop_work(T* p) { 1419 if (_cm->has_overflown()) { 1420 return; 1421 } 1422 if (!_task->deal_with_reference(p)) { 1423 // We did not add anything to the mark bitmap (or mark stack), so there is 1424 // no point trying to drain it. 1425 return; 1426 } 1427 _ref_counter--; 1428 1429 if (_ref_counter == 0) { 1430 // We have dealt with _ref_counter_limit references, pushing them 1431 // and objects reachable from them on to the local stack (and 1432 // possibly the global stack). Call G1CMTask::do_marking_step() to 1433 // process these entries. 1434 // 1435 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1436 // there's nothing more to do (i.e. we're done with the entries that 1437 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1438 // above) or we overflow. 1439 // 1440 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1441 // flag while there may still be some work to do. (See the comment at 1442 // the beginning of G1CMTask::do_marking_step() for those conditions - 1443 // one of which is reaching the specified time target.) It is only 1444 // when G1CMTask::do_marking_step() returns without setting the 1445 // has_aborted() flag that the marking step has completed. 1446 do { 1447 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1448 _task->do_marking_step(mark_step_duration_ms, 1449 false /* do_termination */, 1450 _is_serial); 1451 } while (_task->has_aborted() && !_cm->has_overflown()); 1452 _ref_counter = _ref_counter_limit; 1453 } 1454 } 1455 }; 1456 1457 // 'Drain' oop closure used by both serial and parallel reference processing. 1458 // Uses the G1CMTask associated with a given worker thread (for serial 1459 // reference processing the G1CMtask for worker 0 is used). Calls the 1460 // do_marking_step routine, with an unbelievably large timeout value, 1461 // to drain the marking data structures of the remaining entries 1462 // added by the 'keep alive' oop closure above. 1463 1464 class G1CMDrainMarkingStackClosure : public VoidClosure { 1465 G1ConcurrentMark* _cm; 1466 G1CMTask* _task; 1467 bool _is_serial; 1468 public: 1469 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1470 _cm(cm), _task(task), _is_serial(is_serial) { 1471 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1472 } 1473 1474 void do_void() { 1475 do { 1476 // We call G1CMTask::do_marking_step() to completely drain the local 1477 // and global marking stacks of entries pushed by the 'keep alive' 1478 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1479 // 1480 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1481 // if there's nothing more to do (i.e. we've completely drained the 1482 // entries that were pushed as a a result of applying the 'keep alive' 1483 // closure to the entries on the discovered ref lists) or we overflow 1484 // the global marking stack. 1485 // 1486 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1487 // flag while there may still be some work to do. (See the comment at 1488 // the beginning of G1CMTask::do_marking_step() for those conditions - 1489 // one of which is reaching the specified time target.) It is only 1490 // when G1CMTask::do_marking_step() returns without setting the 1491 // has_aborted() flag that the marking step has completed. 1492 1493 _task->do_marking_step(1000000000.0 /* something very large */, 1494 true /* do_termination */, 1495 _is_serial); 1496 } while (_task->has_aborted() && !_cm->has_overflown()); 1497 } 1498 }; 1499 1500 // Implementation of AbstractRefProcTaskExecutor for parallel 1501 // reference processing at the end of G1 concurrent marking 1502 1503 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 1504 private: 1505 G1CollectedHeap* _g1h; 1506 G1ConcurrentMark* _cm; 1507 WorkGang* _workers; 1508 uint _active_workers; 1509 1510 public: 1511 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1512 G1ConcurrentMark* cm, 1513 WorkGang* workers, 1514 uint n_workers) : 1515 _g1h(g1h), _cm(cm), 1516 _workers(workers), _active_workers(n_workers) { } 1517 1518 virtual void execute(ProcessTask& task, uint ergo_workers); 1519 }; 1520 1521 class G1CMRefProcTaskProxy : public AbstractGangTask { 1522 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1523 ProcessTask& _proc_task; 1524 G1CollectedHeap* _g1h; 1525 G1ConcurrentMark* _cm; 1526 1527 public: 1528 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1529 G1CollectedHeap* g1h, 1530 G1ConcurrentMark* cm) : 1531 AbstractGangTask("Process reference objects in parallel"), 1532 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1533 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1534 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1535 } 1536 1537 virtual void work(uint worker_id) { 1538 ResourceMark rm; 1539 HandleMark hm; 1540 G1CMTask* task = _cm->task(worker_id); 1541 G1CMIsAliveClosure g1_is_alive(_g1h); 1542 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1543 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1544 1545 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1546 } 1547 }; 1548 1549 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) { 1550 assert(_workers != NULL, "Need parallel worker threads."); 1551 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1552 assert(_workers->active_workers() >= ergo_workers, 1553 "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)", 1554 ergo_workers, _workers->active_workers()); 1555 1556 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1557 1558 // We need to reset the concurrency level before each 1559 // proxy task execution, so that the termination protocol 1560 // and overflow handling in G1CMTask::do_marking_step() knows 1561 // how many workers to wait for. 1562 _cm->set_concurrency(ergo_workers); 1563 _workers->run_task(&proc_task_proxy, ergo_workers); 1564 } 1565 1566 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { 1567 ResourceMark rm; 1568 HandleMark hm; 1569 1570 // Is alive closure. 1571 G1CMIsAliveClosure g1_is_alive(_g1h); 1572 1573 // Inner scope to exclude the cleaning of the string table 1574 // from the displayed time. 1575 { 1576 GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm); 1577 1578 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1579 1580 // See the comment in G1CollectedHeap::ref_processing_init() 1581 // about how reference processing currently works in G1. 1582 1583 // Set the soft reference policy 1584 rp->setup_policy(clear_all_soft_refs); 1585 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1586 1587 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1588 // in serial reference processing. Note these closures are also 1589 // used for serially processing (by the the current thread) the 1590 // JNI references during parallel reference processing. 1591 // 1592 // These closures do not need to synchronize with the worker 1593 // threads involved in parallel reference processing as these 1594 // instances are executed serially by the current thread (e.g. 1595 // reference processing is not multi-threaded and is thus 1596 // performed by the current thread instead of a gang worker). 1597 // 1598 // The gang tasks involved in parallel reference processing create 1599 // their own instances of these closures, which do their own 1600 // synchronization among themselves. 1601 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1602 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1603 1604 // We need at least one active thread. If reference processing 1605 // is not multi-threaded we use the current (VMThread) thread, 1606 // otherwise we use the work gang from the G1CollectedHeap and 1607 // we utilize all the worker threads we can. 1608 bool processing_is_mt = rp->processing_is_mt(); 1609 uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U); 1610 active_workers = clamp(active_workers, 1u, _max_num_tasks); 1611 1612 // Parallel processing task executor. 1613 G1CMRefProcTaskExecutor par_task_executor(_g1h, this, 1614 _g1h->workers(), active_workers); 1615 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1616 1617 // Set the concurrency level. The phase was already set prior to 1618 // executing the remark task. 1619 set_concurrency(active_workers); 1620 1621 // Set the degree of MT processing here. If the discovery was done MT, 1622 // the number of threads involved during discovery could differ from 1623 // the number of active workers. This is OK as long as the discovered 1624 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1625 rp->set_active_mt_degree(active_workers); 1626 1627 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues()); 1628 1629 // Process the weak references. 1630 const ReferenceProcessorStats& stats = 1631 rp->process_discovered_references(&g1_is_alive, 1632 &g1_keep_alive, 1633 &g1_drain_mark_stack, 1634 executor, 1635 &pt); 1636 _gc_tracer_cm->report_gc_reference_stats(stats); 1637 pt.print_all_references(); 1638 1639 // The do_oop work routines of the keep_alive and drain_marking_stack 1640 // oop closures will set the has_overflown flag if we overflow the 1641 // global marking stack. 1642 1643 assert(has_overflown() || _global_mark_stack.is_empty(), 1644 "Mark stack should be empty (unless it has overflown)"); 1645 1646 assert(rp->num_queues() == active_workers, "why not"); 1647 1648 rp->verify_no_references_recorded(); 1649 assert(!rp->discovery_enabled(), "Post condition"); 1650 } 1651 1652 if (has_overflown()) { 1653 // We can not trust g1_is_alive and the contents of the heap if the marking stack 1654 // overflowed while processing references. Exit the VM. 1655 fatal("Overflow during reference processing, can not continue. Please " 1656 "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and " 1657 "restart.", MarkStackSizeMax); 1658 return; 1659 } 1660 1661 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1662 1663 { 1664 GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm); 1665 WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1); 1666 } 1667 1668 // Unload Klasses, String, Code Cache, etc. 1669 if (ClassUnloadingWithConcurrentMark) { 1670 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); 1671 bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm); 1672 _g1h->complete_cleaning(&g1_is_alive, purged_classes); 1673 } else if (StringDedup::is_enabled()) { 1674 GCTraceTime(Debug, gc, phases) debug("String Deduplication", _gc_timer_cm); 1675 _g1h->string_dedup_cleaning(&g1_is_alive, NULL); 1676 } 1677 } 1678 1679 class G1PrecleanYieldClosure : public YieldClosure { 1680 G1ConcurrentMark* _cm; 1681 1682 public: 1683 G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { } 1684 1685 virtual bool should_return() { 1686 return _cm->has_aborted(); 1687 } 1688 1689 virtual bool should_return_fine_grain() { 1690 _cm->do_yield_check(); 1691 return _cm->has_aborted(); 1692 } 1693 }; 1694 1695 void G1ConcurrentMark::preclean() { 1696 assert(G1UseReferencePrecleaning, "Precleaning must be enabled."); 1697 1698 SuspendibleThreadSetJoiner joiner; 1699 1700 G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */); 1701 G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */); 1702 1703 set_concurrency_and_phase(1, true); 1704 1705 G1PrecleanYieldClosure yield_cl(this); 1706 1707 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1708 // Precleaning is single threaded. Temporarily disable MT discovery. 1709 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); 1710 rp->preclean_discovered_references(rp->is_alive_non_header(), 1711 &keep_alive, 1712 &drain_mark_stack, 1713 &yield_cl, 1714 _gc_timer_cm); 1715 } 1716 1717 // When sampling object counts, we already swapped the mark bitmaps, so we need to use 1718 // the prev bitmap determining liveness. 1719 class G1ObjectCountIsAliveClosure: public BoolObjectClosure { 1720 G1CollectedHeap* _g1h; 1721 public: 1722 G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { } 1723 1724 bool do_object_b(oop obj) { 1725 return obj != NULL && 1726 (!_g1h->is_in_g1_reserved(obj) || !_g1h->is_obj_dead(obj)); 1727 } 1728 }; 1729 1730 void G1ConcurrentMark::report_object_count(bool mark_completed) { 1731 // Depending on the completion of the marking liveness needs to be determined 1732 // using either the next or prev bitmap. 1733 if (mark_completed) { 1734 G1ObjectCountIsAliveClosure is_alive(_g1h); 1735 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1736 } else { 1737 G1CMIsAliveClosure is_alive(_g1h); 1738 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1739 } 1740 } 1741 1742 1743 void G1ConcurrentMark::swap_mark_bitmaps() { 1744 G1CMBitMap* temp = _prev_mark_bitmap; 1745 _prev_mark_bitmap = _next_mark_bitmap; 1746 _next_mark_bitmap = temp; 1747 _g1h->collector_state()->set_clearing_next_bitmap(true); 1748 } 1749 1750 // Closure for marking entries in SATB buffers. 1751 class G1CMSATBBufferClosure : public SATBBufferClosure { 1752 private: 1753 G1CMTask* _task; 1754 G1CollectedHeap* _g1h; 1755 1756 // This is very similar to G1CMTask::deal_with_reference, but with 1757 // more relaxed requirements for the argument, so this must be more 1758 // circumspect about treating the argument as an object. 1759 void do_entry(void* entry) const { 1760 _task->increment_refs_reached(); 1761 oop const obj = static_cast<oop>(entry); 1762 _task->make_reference_grey(obj); 1763 } 1764 1765 public: 1766 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1767 : _task(task), _g1h(g1h) { } 1768 1769 virtual void do_buffer(void** buffer, size_t size) { 1770 for (size_t i = 0; i < size; ++i) { 1771 do_entry(buffer[i]); 1772 } 1773 } 1774 }; 1775 1776 class G1RemarkThreadsClosure : public ThreadClosure { 1777 G1CMSATBBufferClosure _cm_satb_cl; 1778 G1CMOopClosure _cm_cl; 1779 MarkingCodeBlobClosure _code_cl; 1780 uintx _claim_token; 1781 1782 public: 1783 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1784 _cm_satb_cl(task, g1h), 1785 _cm_cl(g1h, task), 1786 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1787 _claim_token(Threads::thread_claim_token()) {} 1788 1789 void do_thread(Thread* thread) { 1790 if (thread->claim_threads_do(true, _claim_token)) { 1791 SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(thread); 1792 queue.apply_closure_and_empty(&_cm_satb_cl); 1793 if (thread->is_Java_thread()) { 1794 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1795 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1796 // * Alive if on the stack of an executing method 1797 // * Weakly reachable otherwise 1798 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1799 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1800 JavaThread* jt = (JavaThread*)thread; 1801 jt->nmethods_do(&_code_cl); 1802 } 1803 } 1804 } 1805 }; 1806 1807 class G1CMRemarkTask : public AbstractGangTask { 1808 G1ConcurrentMark* _cm; 1809 public: 1810 void work(uint worker_id) { 1811 G1CMTask* task = _cm->task(worker_id); 1812 task->record_start_time(); 1813 { 1814 ResourceMark rm; 1815 HandleMark hm; 1816 1817 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1818 Threads::threads_do(&threads_f); 1819 } 1820 1821 do { 1822 task->do_marking_step(1000000000.0 /* something very large */, 1823 true /* do_termination */, 1824 false /* is_serial */); 1825 } while (task->has_aborted() && !_cm->has_overflown()); 1826 // If we overflow, then we do not want to restart. We instead 1827 // want to abort remark and do concurrent marking again. 1828 task->record_end_time(); 1829 } 1830 1831 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1832 AbstractGangTask("Par Remark"), _cm(cm) { 1833 _cm->terminator()->reset_for_reuse(active_workers); 1834 } 1835 }; 1836 1837 void G1ConcurrentMark::finalize_marking() { 1838 ResourceMark rm; 1839 HandleMark hm; 1840 1841 _g1h->ensure_parsability(false); 1842 1843 // this is remark, so we'll use up all active threads 1844 uint active_workers = _g1h->workers()->active_workers(); 1845 set_concurrency_and_phase(active_workers, false /* concurrent */); 1846 // Leave _parallel_marking_threads at it's 1847 // value originally calculated in the G1ConcurrentMark 1848 // constructor and pass values of the active workers 1849 // through the gang in the task. 1850 1851 { 1852 StrongRootsScope srs(active_workers); 1853 1854 G1CMRemarkTask remarkTask(this, active_workers); 1855 // We will start all available threads, even if we decide that the 1856 // active_workers will be fewer. The extra ones will just bail out 1857 // immediately. 1858 _g1h->workers()->run_task(&remarkTask); 1859 } 1860 1861 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1862 guarantee(has_overflown() || 1863 satb_mq_set.completed_buffers_num() == 0, 1864 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1865 BOOL_TO_STR(has_overflown()), 1866 satb_mq_set.completed_buffers_num()); 1867 1868 print_stats(); 1869 } 1870 1871 void G1ConcurrentMark::flush_all_task_caches() { 1872 size_t hits = 0; 1873 size_t misses = 0; 1874 for (uint i = 0; i < _max_num_tasks; i++) { 1875 Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache(); 1876 hits += stats.first; 1877 misses += stats.second; 1878 } 1879 size_t sum = hits + misses; 1880 log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf", 1881 hits, misses, percent_of(hits, sum)); 1882 } 1883 1884 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) { 1885 _prev_mark_bitmap->clear_range(mr); 1886 } 1887 1888 HeapRegion* 1889 G1ConcurrentMark::claim_region(uint worker_id) { 1890 // "checkpoint" the finger 1891 HeapWord* finger = _finger; 1892 1893 while (finger < _heap.end()) { 1894 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1895 1896 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1897 // Make sure that the reads below do not float before loading curr_region. 1898 OrderAccess::loadload(); 1899 // Above heap_region_containing may return NULL as we always scan claim 1900 // until the end of the heap. In this case, just jump to the next region. 1901 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1902 1903 // Is the gap between reading the finger and doing the CAS too long? 1904 HeapWord* res = Atomic::cmpxchg(&_finger, finger, end); 1905 if (res == finger && curr_region != NULL) { 1906 // we succeeded 1907 HeapWord* bottom = curr_region->bottom(); 1908 HeapWord* limit = curr_region->next_top_at_mark_start(); 1909 1910 // notice that _finger == end cannot be guaranteed here since, 1911 // someone else might have moved the finger even further 1912 assert(_finger >= end, "the finger should have moved forward"); 1913 1914 if (limit > bottom) { 1915 return curr_region; 1916 } else { 1917 assert(limit == bottom, 1918 "the region limit should be at bottom"); 1919 // we return NULL and the caller should try calling 1920 // claim_region() again. 1921 return NULL; 1922 } 1923 } else { 1924 assert(_finger > finger, "the finger should have moved forward"); 1925 // read it again 1926 finger = _finger; 1927 } 1928 } 1929 1930 return NULL; 1931 } 1932 1933 #ifndef PRODUCT 1934 class VerifyNoCSetOops { 1935 G1CollectedHeap* _g1h; 1936 const char* _phase; 1937 int _info; 1938 1939 public: 1940 VerifyNoCSetOops(const char* phase, int info = -1) : 1941 _g1h(G1CollectedHeap::heap()), 1942 _phase(phase), 1943 _info(info) 1944 { } 1945 1946 void operator()(G1TaskQueueEntry task_entry) const { 1947 if (task_entry.is_array_slice()) { 1948 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); 1949 return; 1950 } 1951 guarantee(oopDesc::is_oop(task_entry.obj()), 1952 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1953 p2i(task_entry.obj()), _phase, _info); 1954 HeapRegion* r = _g1h->heap_region_containing(task_entry.obj()); 1955 guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()), 1956 "obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set", 1957 p2i(task_entry.obj()), _phase, _info, r->hrm_index()); 1958 } 1959 }; 1960 1961 void G1ConcurrentMark::verify_no_collection_set_oops() { 1962 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1963 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 1964 return; 1965 } 1966 1967 // Verify entries on the global mark stack 1968 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 1969 1970 // Verify entries on the task queues 1971 for (uint i = 0; i < _max_num_tasks; ++i) { 1972 G1CMTaskQueue* queue = _task_queues->queue(i); 1973 queue->iterate(VerifyNoCSetOops("Queue", i)); 1974 } 1975 1976 // Verify the global finger 1977 HeapWord* global_finger = finger(); 1978 if (global_finger != NULL && global_finger < _heap.end()) { 1979 // Since we always iterate over all regions, we might get a NULL HeapRegion 1980 // here. 1981 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1982 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1983 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1984 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1985 } 1986 1987 // Verify the task fingers 1988 assert(_num_concurrent_workers <= _max_num_tasks, "sanity"); 1989 for (uint i = 0; i < _num_concurrent_workers; ++i) { 1990 G1CMTask* task = _tasks[i]; 1991 HeapWord* task_finger = task->finger(); 1992 if (task_finger != NULL && task_finger < _heap.end()) { 1993 // See above note on the global finger verification. 1994 HeapRegion* r = _g1h->heap_region_containing(task_finger); 1995 guarantee(r == NULL || task_finger == r->bottom() || 1996 !r->in_collection_set() || !r->has_index_in_opt_cset(), 1997 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 1998 p2i(task_finger), HR_FORMAT_PARAMS(r)); 1999 } 2000 } 2001 } 2002 #endif // PRODUCT 2003 2004 void G1ConcurrentMark::rebuild_rem_set_concurrently() { 2005 _g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset); 2006 } 2007 2008 void G1ConcurrentMark::print_stats() { 2009 if (!log_is_enabled(Debug, gc, stats)) { 2010 return; 2011 } 2012 log_debug(gc, stats)("---------------------------------------------------------------------"); 2013 for (size_t i = 0; i < _num_active_tasks; ++i) { 2014 _tasks[i]->print_stats(); 2015 log_debug(gc, stats)("---------------------------------------------------------------------"); 2016 } 2017 } 2018 2019 void G1ConcurrentMark::concurrent_cycle_abort() { 2020 if (!cm_thread()->during_cycle() || _has_aborted) { 2021 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2022 return; 2023 } 2024 2025 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2026 // concurrent bitmap clearing. 2027 { 2028 GCTraceTime(Debug, gc) debug("Clear Next Bitmap"); 2029 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false); 2030 } 2031 // Note we cannot clear the previous marking bitmap here 2032 // since VerifyDuringGC verifies the objects marked during 2033 // a full GC against the previous bitmap. 2034 2035 // Empty mark stack 2036 reset_marking_for_restart(); 2037 for (uint i = 0; i < _max_num_tasks; ++i) { 2038 _tasks[i]->clear_region_fields(); 2039 } 2040 _first_overflow_barrier_sync.abort(); 2041 _second_overflow_barrier_sync.abort(); 2042 _has_aborted = true; 2043 2044 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2045 satb_mq_set.abandon_partial_marking(); 2046 // This can be called either during or outside marking, we'll read 2047 // the expected_active value from the SATB queue set. 2048 satb_mq_set.set_active_all_threads( 2049 false, /* new active value */ 2050 satb_mq_set.is_active() /* expected_active */); 2051 } 2052 2053 static void print_ms_time_info(const char* prefix, const char* name, 2054 NumberSeq& ns) { 2055 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2056 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2057 if (ns.num() > 0) { 2058 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2059 prefix, ns.sd(), ns.maximum()); 2060 } 2061 } 2062 2063 void G1ConcurrentMark::print_summary_info() { 2064 Log(gc, marking) log; 2065 if (!log.is_trace()) { 2066 return; 2067 } 2068 2069 log.trace(" Concurrent marking:"); 2070 print_ms_time_info(" ", "init marks", _init_times); 2071 print_ms_time_info(" ", "remarks", _remark_times); 2072 { 2073 print_ms_time_info(" ", "final marks", _remark_mark_times); 2074 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2075 2076 } 2077 print_ms_time_info(" ", "cleanups", _cleanup_times); 2078 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2079 _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2080 log.trace(" Total stop_world time = %8.2f s.", 2081 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2082 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2083 cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum()); 2084 } 2085 2086 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2087 _concurrent_workers->print_worker_threads_on(st); 2088 } 2089 2090 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2091 _concurrent_workers->threads_do(tc); 2092 } 2093 2094 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2095 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2096 p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap)); 2097 _prev_mark_bitmap->print_on_error(st, " Prev Bits: "); 2098 _next_mark_bitmap->print_on_error(st, " Next Bits: "); 2099 } 2100 2101 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2102 ReferenceProcessor* result = g1h->ref_processor_cm(); 2103 assert(result != NULL, "CM reference processor should not be NULL"); 2104 return result; 2105 } 2106 2107 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2108 G1CMTask* task) 2109 : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)), 2110 _g1h(g1h), _task(task) 2111 { } 2112 2113 void G1CMTask::setup_for_region(HeapRegion* hr) { 2114 assert(hr != NULL, 2115 "claim_region() should have filtered out NULL regions"); 2116 _curr_region = hr; 2117 _finger = hr->bottom(); 2118 update_region_limit(); 2119 } 2120 2121 void G1CMTask::update_region_limit() { 2122 HeapRegion* hr = _curr_region; 2123 HeapWord* bottom = hr->bottom(); 2124 HeapWord* limit = hr->next_top_at_mark_start(); 2125 2126 if (limit == bottom) { 2127 // The region was collected underneath our feet. 2128 // We set the finger to bottom to ensure that the bitmap 2129 // iteration that will follow this will not do anything. 2130 // (this is not a condition that holds when we set the region up, 2131 // as the region is not supposed to be empty in the first place) 2132 _finger = bottom; 2133 } else if (limit >= _region_limit) { 2134 assert(limit >= _finger, "peace of mind"); 2135 } else { 2136 assert(limit < _region_limit, "only way to get here"); 2137 // This can happen under some pretty unusual circumstances. An 2138 // evacuation pause empties the region underneath our feet (NTAMS 2139 // at bottom). We then do some allocation in the region (NTAMS 2140 // stays at bottom), followed by the region being used as a GC 2141 // alloc region (NTAMS will move to top() and the objects 2142 // originally below it will be grayed). All objects now marked in 2143 // the region are explicitly grayed, if below the global finger, 2144 // and we do not need in fact to scan anything else. So, we simply 2145 // set _finger to be limit to ensure that the bitmap iteration 2146 // doesn't do anything. 2147 _finger = limit; 2148 } 2149 2150 _region_limit = limit; 2151 } 2152 2153 void G1CMTask::giveup_current_region() { 2154 assert(_curr_region != NULL, "invariant"); 2155 clear_region_fields(); 2156 } 2157 2158 void G1CMTask::clear_region_fields() { 2159 // Values for these three fields that indicate that we're not 2160 // holding on to a region. 2161 _curr_region = NULL; 2162 _finger = NULL; 2163 _region_limit = NULL; 2164 } 2165 2166 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2167 if (cm_oop_closure == NULL) { 2168 assert(_cm_oop_closure != NULL, "invariant"); 2169 } else { 2170 assert(_cm_oop_closure == NULL, "invariant"); 2171 } 2172 _cm_oop_closure = cm_oop_closure; 2173 } 2174 2175 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) { 2176 guarantee(next_mark_bitmap != NULL, "invariant"); 2177 _next_mark_bitmap = next_mark_bitmap; 2178 clear_region_fields(); 2179 2180 _calls = 0; 2181 _elapsed_time_ms = 0.0; 2182 _termination_time_ms = 0.0; 2183 _termination_start_time_ms = 0.0; 2184 2185 _mark_stats_cache.reset(); 2186 } 2187 2188 bool G1CMTask::should_exit_termination() { 2189 if (!regular_clock_call()) { 2190 return true; 2191 } 2192 2193 // This is called when we are in the termination protocol. We should 2194 // quit if, for some reason, this task wants to abort or the global 2195 // stack is not empty (this means that we can get work from it). 2196 return !_cm->mark_stack_empty() || has_aborted(); 2197 } 2198 2199 void G1CMTask::reached_limit() { 2200 assert(_words_scanned >= _words_scanned_limit || 2201 _refs_reached >= _refs_reached_limit , 2202 "shouldn't have been called otherwise"); 2203 abort_marking_if_regular_check_fail(); 2204 } 2205 2206 bool G1CMTask::regular_clock_call() { 2207 if (has_aborted()) { 2208 return false; 2209 } 2210 2211 // First, we need to recalculate the words scanned and refs reached 2212 // limits for the next clock call. 2213 recalculate_limits(); 2214 2215 // During the regular clock call we do the following 2216 2217 // (1) If an overflow has been flagged, then we abort. 2218 if (_cm->has_overflown()) { 2219 return false; 2220 } 2221 2222 // If we are not concurrent (i.e. we're doing remark) we don't need 2223 // to check anything else. The other steps are only needed during 2224 // the concurrent marking phase. 2225 if (!_cm->concurrent()) { 2226 return true; 2227 } 2228 2229 // (2) If marking has been aborted for Full GC, then we also abort. 2230 if (_cm->has_aborted()) { 2231 return false; 2232 } 2233 2234 double curr_time_ms = os::elapsedVTime() * 1000.0; 2235 2236 // (4) We check whether we should yield. If we have to, then we abort. 2237 if (SuspendibleThreadSet::should_yield()) { 2238 // We should yield. To do this we abort the task. The caller is 2239 // responsible for yielding. 2240 return false; 2241 } 2242 2243 // (5) We check whether we've reached our time quota. If we have, 2244 // then we abort. 2245 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2246 if (elapsed_time_ms > _time_target_ms) { 2247 _has_timed_out = true; 2248 return false; 2249 } 2250 2251 // (6) Finally, we check whether there are enough completed STAB 2252 // buffers available for processing. If there are, we abort. 2253 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2254 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2255 // we do need to process SATB buffers, we'll abort and restart 2256 // the marking task to do so 2257 return false; 2258 } 2259 return true; 2260 } 2261 2262 void G1CMTask::recalculate_limits() { 2263 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2264 _words_scanned_limit = _real_words_scanned_limit; 2265 2266 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2267 _refs_reached_limit = _real_refs_reached_limit; 2268 } 2269 2270 void G1CMTask::decrease_limits() { 2271 // This is called when we believe that we're going to do an infrequent 2272 // operation which will increase the per byte scanned cost (i.e. move 2273 // entries to/from the global stack). It basically tries to decrease the 2274 // scanning limit so that the clock is called earlier. 2275 2276 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4; 2277 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4; 2278 } 2279 2280 void G1CMTask::move_entries_to_global_stack() { 2281 // Local array where we'll store the entries that will be popped 2282 // from the local queue. 2283 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2284 2285 size_t n = 0; 2286 G1TaskQueueEntry task_entry; 2287 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) { 2288 buffer[n] = task_entry; 2289 ++n; 2290 } 2291 if (n < G1CMMarkStack::EntriesPerChunk) { 2292 buffer[n] = G1TaskQueueEntry(); 2293 } 2294 2295 if (n > 0) { 2296 if (!_cm->mark_stack_push(buffer)) { 2297 set_has_aborted(); 2298 } 2299 } 2300 2301 // This operation was quite expensive, so decrease the limits. 2302 decrease_limits(); 2303 } 2304 2305 bool G1CMTask::get_entries_from_global_stack() { 2306 // Local array where we'll store the entries that will be popped 2307 // from the global stack. 2308 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2309 2310 if (!_cm->mark_stack_pop(buffer)) { 2311 return false; 2312 } 2313 2314 // We did actually pop at least one entry. 2315 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) { 2316 G1TaskQueueEntry task_entry = buffer[i]; 2317 if (task_entry.is_null()) { 2318 break; 2319 } 2320 assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj())); 2321 bool success = _task_queue->push(task_entry); 2322 // We only call this when the local queue is empty or under a 2323 // given target limit. So, we do not expect this push to fail. 2324 assert(success, "invariant"); 2325 } 2326 2327 // This operation was quite expensive, so decrease the limits 2328 decrease_limits(); 2329 return true; 2330 } 2331 2332 void G1CMTask::drain_local_queue(bool partially) { 2333 if (has_aborted()) { 2334 return; 2335 } 2336 2337 // Decide what the target size is, depending whether we're going to 2338 // drain it partially (so that other tasks can steal if they run out 2339 // of things to do) or totally (at the very end). 2340 size_t target_size; 2341 if (partially) { 2342 target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize); 2343 } else { 2344 target_size = 0; 2345 } 2346 2347 if (_task_queue->size() > target_size) { 2348 G1TaskQueueEntry entry; 2349 bool ret = _task_queue->pop_local(entry); 2350 while (ret) { 2351 scan_task_entry(entry); 2352 if (_task_queue->size() <= target_size || has_aborted()) { 2353 ret = false; 2354 } else { 2355 ret = _task_queue->pop_local(entry); 2356 } 2357 } 2358 } 2359 } 2360 2361 void G1CMTask::drain_global_stack(bool partially) { 2362 if (has_aborted()) { 2363 return; 2364 } 2365 2366 // We have a policy to drain the local queue before we attempt to 2367 // drain the global stack. 2368 assert(partially || _task_queue->size() == 0, "invariant"); 2369 2370 // Decide what the target size is, depending whether we're going to 2371 // drain it partially (so that other tasks can steal if they run out 2372 // of things to do) or totally (at the very end). 2373 // Notice that when draining the global mark stack partially, due to the racyness 2374 // of the mark stack size update we might in fact drop below the target. But, 2375 // this is not a problem. 2376 // In case of total draining, we simply process until the global mark stack is 2377 // totally empty, disregarding the size counter. 2378 if (partially) { 2379 size_t const target_size = _cm->partial_mark_stack_size_target(); 2380 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2381 if (get_entries_from_global_stack()) { 2382 drain_local_queue(partially); 2383 } 2384 } 2385 } else { 2386 while (!has_aborted() && get_entries_from_global_stack()) { 2387 drain_local_queue(partially); 2388 } 2389 } 2390 } 2391 2392 // SATB Queue has several assumptions on whether to call the par or 2393 // non-par versions of the methods. this is why some of the code is 2394 // replicated. We should really get rid of the single-threaded version 2395 // of the code to simplify things. 2396 void G1CMTask::drain_satb_buffers() { 2397 if (has_aborted()) { 2398 return; 2399 } 2400 2401 // We set this so that the regular clock knows that we're in the 2402 // middle of draining buffers and doesn't set the abort flag when it 2403 // notices that SATB buffers are available for draining. It'd be 2404 // very counter productive if it did that. :-) 2405 _draining_satb_buffers = true; 2406 2407 G1CMSATBBufferClosure satb_cl(this, _g1h); 2408 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2409 2410 // This keeps claiming and applying the closure to completed buffers 2411 // until we run out of buffers or we need to abort. 2412 while (!has_aborted() && 2413 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2414 abort_marking_if_regular_check_fail(); 2415 } 2416 2417 // Can't assert qset is empty here, even if not aborted. If concurrent, 2418 // some other thread might be adding to the queue. If not concurrent, 2419 // some other thread might have won the race for the last buffer, but 2420 // has not yet decremented the count. 2421 2422 _draining_satb_buffers = false; 2423 2424 // again, this was a potentially expensive operation, decrease the 2425 // limits to get the regular clock call early 2426 decrease_limits(); 2427 } 2428 2429 void G1CMTask::clear_mark_stats_cache(uint region_idx) { 2430 _mark_stats_cache.reset(region_idx); 2431 } 2432 2433 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() { 2434 return _mark_stats_cache.evict_all(); 2435 } 2436 2437 void G1CMTask::print_stats() { 2438 log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls); 2439 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2440 _elapsed_time_ms, _termination_time_ms); 2441 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms", 2442 _step_times_ms.num(), 2443 _step_times_ms.avg(), 2444 _step_times_ms.sd(), 2445 _step_times_ms.maximum(), 2446 _step_times_ms.sum()); 2447 size_t const hits = _mark_stats_cache.hits(); 2448 size_t const misses = _mark_stats_cache.misses(); 2449 log_debug(gc, stats)(" Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f", 2450 hits, misses, percent_of(hits, hits + misses)); 2451 } 2452 2453 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) { 2454 return _task_queues->steal(worker_id, task_entry); 2455 } 2456 2457 /***************************************************************************** 2458 2459 The do_marking_step(time_target_ms, ...) method is the building 2460 block of the parallel marking framework. It can be called in parallel 2461 with other invocations of do_marking_step() on different tasks 2462 (but only one per task, obviously) and concurrently with the 2463 mutator threads, or during remark, hence it eliminates the need 2464 for two versions of the code. When called during remark, it will 2465 pick up from where the task left off during the concurrent marking 2466 phase. Interestingly, tasks are also claimable during evacuation 2467 pauses too, since do_marking_step() ensures that it aborts before 2468 it needs to yield. 2469 2470 The data structures that it uses to do marking work are the 2471 following: 2472 2473 (1) Marking Bitmap. If there are gray objects that appear only 2474 on the bitmap (this happens either when dealing with an overflow 2475 or when the initial marking phase has simply marked the roots 2476 and didn't push them on the stack), then tasks claim heap 2477 regions whose bitmap they then scan to find gray objects. A 2478 global finger indicates where the end of the last claimed region 2479 is. A local finger indicates how far into the region a task has 2480 scanned. The two fingers are used to determine how to gray an 2481 object (i.e. whether simply marking it is OK, as it will be 2482 visited by a task in the future, or whether it needs to be also 2483 pushed on a stack). 2484 2485 (2) Local Queue. The local queue of the task which is accessed 2486 reasonably efficiently by the task. Other tasks can steal from 2487 it when they run out of work. Throughout the marking phase, a 2488 task attempts to keep its local queue short but not totally 2489 empty, so that entries are available for stealing by other 2490 tasks. Only when there is no more work, a task will totally 2491 drain its local queue. 2492 2493 (3) Global Mark Stack. This handles local queue overflow. During 2494 marking only sets of entries are moved between it and the local 2495 queues, as access to it requires a mutex and more fine-grain 2496 interaction with it which might cause contention. If it 2497 overflows, then the marking phase should restart and iterate 2498 over the bitmap to identify gray objects. Throughout the marking 2499 phase, tasks attempt to keep the global mark stack at a small 2500 length but not totally empty, so that entries are available for 2501 popping by other tasks. Only when there is no more work, tasks 2502 will totally drain the global mark stack. 2503 2504 (4) SATB Buffer Queue. This is where completed SATB buffers are 2505 made available. Buffers are regularly removed from this queue 2506 and scanned for roots, so that the queue doesn't get too 2507 long. During remark, all completed buffers are processed, as 2508 well as the filled in parts of any uncompleted buffers. 2509 2510 The do_marking_step() method tries to abort when the time target 2511 has been reached. There are a few other cases when the 2512 do_marking_step() method also aborts: 2513 2514 (1) When the marking phase has been aborted (after a Full GC). 2515 2516 (2) When a global overflow (on the global stack) has been 2517 triggered. Before the task aborts, it will actually sync up with 2518 the other tasks to ensure that all the marking data structures 2519 (local queues, stacks, fingers etc.) are re-initialized so that 2520 when do_marking_step() completes, the marking phase can 2521 immediately restart. 2522 2523 (3) When enough completed SATB buffers are available. The 2524 do_marking_step() method only tries to drain SATB buffers right 2525 at the beginning. So, if enough buffers are available, the 2526 marking step aborts and the SATB buffers are processed at 2527 the beginning of the next invocation. 2528 2529 (4) To yield. when we have to yield then we abort and yield 2530 right at the end of do_marking_step(). This saves us from a lot 2531 of hassle as, by yielding we might allow a Full GC. If this 2532 happens then objects will be compacted underneath our feet, the 2533 heap might shrink, etc. We save checking for this by just 2534 aborting and doing the yield right at the end. 2535 2536 From the above it follows that the do_marking_step() method should 2537 be called in a loop (or, otherwise, regularly) until it completes. 2538 2539 If a marking step completes without its has_aborted() flag being 2540 true, it means it has completed the current marking phase (and 2541 also all other marking tasks have done so and have all synced up). 2542 2543 A method called regular_clock_call() is invoked "regularly" (in 2544 sub ms intervals) throughout marking. It is this clock method that 2545 checks all the abort conditions which were mentioned above and 2546 decides when the task should abort. A work-based scheme is used to 2547 trigger this clock method: when the number of object words the 2548 marking phase has scanned or the number of references the marking 2549 phase has visited reach a given limit. Additional invocations to 2550 the method clock have been planted in a few other strategic places 2551 too. The initial reason for the clock method was to avoid calling 2552 vtime too regularly, as it is quite expensive. So, once it was in 2553 place, it was natural to piggy-back all the other conditions on it 2554 too and not constantly check them throughout the code. 2555 2556 If do_termination is true then do_marking_step will enter its 2557 termination protocol. 2558 2559 The value of is_serial must be true when do_marking_step is being 2560 called serially (i.e. by the VMThread) and do_marking_step should 2561 skip any synchronization in the termination and overflow code. 2562 Examples include the serial remark code and the serial reference 2563 processing closures. 2564 2565 The value of is_serial must be false when do_marking_step is 2566 being called by any of the worker threads in a work gang. 2567 Examples include the concurrent marking code (CMMarkingTask), 2568 the MT remark code, and the MT reference processing closures. 2569 2570 *****************************************************************************/ 2571 2572 void G1CMTask::do_marking_step(double time_target_ms, 2573 bool do_termination, 2574 bool is_serial) { 2575 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2576 2577 _start_time_ms = os::elapsedVTime() * 1000.0; 2578 2579 // If do_stealing is true then do_marking_step will attempt to 2580 // steal work from the other G1CMTasks. It only makes sense to 2581 // enable stealing when the termination protocol is enabled 2582 // and do_marking_step() is not being called serially. 2583 bool do_stealing = do_termination && !is_serial; 2584 2585 G1Predictions const& predictor = _g1h->policy()->predictor(); 2586 double diff_prediction_ms = predictor.predict_zero_bounded(&_marking_step_diff_ms); 2587 _time_target_ms = time_target_ms - diff_prediction_ms; 2588 2589 // set up the variables that are used in the work-based scheme to 2590 // call the regular clock method 2591 _words_scanned = 0; 2592 _refs_reached = 0; 2593 recalculate_limits(); 2594 2595 // clear all flags 2596 clear_has_aborted(); 2597 _has_timed_out = false; 2598 _draining_satb_buffers = false; 2599 2600 ++_calls; 2601 2602 // Set up the bitmap and oop closures. Anything that uses them is 2603 // eventually called from this method, so it is OK to allocate these 2604 // statically. 2605 G1CMBitMapClosure bitmap_closure(this, _cm); 2606 G1CMOopClosure cm_oop_closure(_g1h, this); 2607 set_cm_oop_closure(&cm_oop_closure); 2608 2609 if (_cm->has_overflown()) { 2610 // This can happen if the mark stack overflows during a GC pause 2611 // and this task, after a yield point, restarts. We have to abort 2612 // as we need to get into the overflow protocol which happens 2613 // right at the end of this task. 2614 set_has_aborted(); 2615 } 2616 2617 // First drain any available SATB buffers. After this, we will not 2618 // look at SATB buffers before the next invocation of this method. 2619 // If enough completed SATB buffers are queued up, the regular clock 2620 // will abort this task so that it restarts. 2621 drain_satb_buffers(); 2622 // ...then partially drain the local queue and the global stack 2623 drain_local_queue(true); 2624 drain_global_stack(true); 2625 2626 do { 2627 if (!has_aborted() && _curr_region != NULL) { 2628 // This means that we're already holding on to a region. 2629 assert(_finger != NULL, "if region is not NULL, then the finger " 2630 "should not be NULL either"); 2631 2632 // We might have restarted this task after an evacuation pause 2633 // which might have evacuated the region we're holding on to 2634 // underneath our feet. Let's read its limit again to make sure 2635 // that we do not iterate over a region of the heap that 2636 // contains garbage (update_region_limit() will also move 2637 // _finger to the start of the region if it is found empty). 2638 update_region_limit(); 2639 // We will start from _finger not from the start of the region, 2640 // as we might be restarting this task after aborting half-way 2641 // through scanning this region. In this case, _finger points to 2642 // the address where we last found a marked object. If this is a 2643 // fresh region, _finger points to start(). 2644 MemRegion mr = MemRegion(_finger, _region_limit); 2645 2646 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2647 "humongous regions should go around loop once only"); 2648 2649 // Some special cases: 2650 // If the memory region is empty, we can just give up the region. 2651 // If the current region is humongous then we only need to check 2652 // the bitmap for the bit associated with the start of the object, 2653 // scan the object if it's live, and give up the region. 2654 // Otherwise, let's iterate over the bitmap of the part of the region 2655 // that is left. 2656 // If the iteration is successful, give up the region. 2657 if (mr.is_empty()) { 2658 giveup_current_region(); 2659 abort_marking_if_regular_check_fail(); 2660 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2661 if (_next_mark_bitmap->is_marked(mr.start())) { 2662 // The object is marked - apply the closure 2663 bitmap_closure.do_addr(mr.start()); 2664 } 2665 // Even if this task aborted while scanning the humongous object 2666 // we can (and should) give up the current region. 2667 giveup_current_region(); 2668 abort_marking_if_regular_check_fail(); 2669 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) { 2670 giveup_current_region(); 2671 abort_marking_if_regular_check_fail(); 2672 } else { 2673 assert(has_aborted(), "currently the only way to do so"); 2674 // The only way to abort the bitmap iteration is to return 2675 // false from the do_bit() method. However, inside the 2676 // do_bit() method we move the _finger to point to the 2677 // object currently being looked at. So, if we bail out, we 2678 // have definitely set _finger to something non-null. 2679 assert(_finger != NULL, "invariant"); 2680 2681 // Region iteration was actually aborted. So now _finger 2682 // points to the address of the object we last scanned. If we 2683 // leave it there, when we restart this task, we will rescan 2684 // the object. It is easy to avoid this. We move the finger by 2685 // enough to point to the next possible object header. 2686 assert(_finger < _region_limit, "invariant"); 2687 HeapWord* const new_finger = _finger + ((oop)_finger)->size(); 2688 // Check if bitmap iteration was aborted while scanning the last object 2689 if (new_finger >= _region_limit) { 2690 giveup_current_region(); 2691 } else { 2692 move_finger_to(new_finger); 2693 } 2694 } 2695 } 2696 // At this point we have either completed iterating over the 2697 // region we were holding on to, or we have aborted. 2698 2699 // We then partially drain the local queue and the global stack. 2700 // (Do we really need this?) 2701 drain_local_queue(true); 2702 drain_global_stack(true); 2703 2704 // Read the note on the claim_region() method on why it might 2705 // return NULL with potentially more regions available for 2706 // claiming and why we have to check out_of_regions() to determine 2707 // whether we're done or not. 2708 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2709 // We are going to try to claim a new region. We should have 2710 // given up on the previous one. 2711 // Separated the asserts so that we know which one fires. 2712 assert(_curr_region == NULL, "invariant"); 2713 assert(_finger == NULL, "invariant"); 2714 assert(_region_limit == NULL, "invariant"); 2715 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2716 if (claimed_region != NULL) { 2717 // Yes, we managed to claim one 2718 setup_for_region(claimed_region); 2719 assert(_curr_region == claimed_region, "invariant"); 2720 } 2721 // It is important to call the regular clock here. It might take 2722 // a while to claim a region if, for example, we hit a large 2723 // block of empty regions. So we need to call the regular clock 2724 // method once round the loop to make sure it's called 2725 // frequently enough. 2726 abort_marking_if_regular_check_fail(); 2727 } 2728 2729 if (!has_aborted() && _curr_region == NULL) { 2730 assert(_cm->out_of_regions(), 2731 "at this point we should be out of regions"); 2732 } 2733 } while ( _curr_region != NULL && !has_aborted()); 2734 2735 if (!has_aborted()) { 2736 // We cannot check whether the global stack is empty, since other 2737 // tasks might be pushing objects to it concurrently. 2738 assert(_cm->out_of_regions(), 2739 "at this point we should be out of regions"); 2740 // Try to reduce the number of available SATB buffers so that 2741 // remark has less work to do. 2742 drain_satb_buffers(); 2743 } 2744 2745 // Since we've done everything else, we can now totally drain the 2746 // local queue and global stack. 2747 drain_local_queue(false); 2748 drain_global_stack(false); 2749 2750 // Attempt at work stealing from other task's queues. 2751 if (do_stealing && !has_aborted()) { 2752 // We have not aborted. This means that we have finished all that 2753 // we could. Let's try to do some stealing... 2754 2755 // We cannot check whether the global stack is empty, since other 2756 // tasks might be pushing objects to it concurrently. 2757 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2758 "only way to reach here"); 2759 while (!has_aborted()) { 2760 G1TaskQueueEntry entry; 2761 if (_cm->try_stealing(_worker_id, entry)) { 2762 scan_task_entry(entry); 2763 2764 // And since we're towards the end, let's totally drain the 2765 // local queue and global stack. 2766 drain_local_queue(false); 2767 drain_global_stack(false); 2768 } else { 2769 break; 2770 } 2771 } 2772 } 2773 2774 // We still haven't aborted. Now, let's try to get into the 2775 // termination protocol. 2776 if (do_termination && !has_aborted()) { 2777 // We cannot check whether the global stack is empty, since other 2778 // tasks might be concurrently pushing objects on it. 2779 // Separated the asserts so that we know which one fires. 2780 assert(_cm->out_of_regions(), "only way to reach here"); 2781 assert(_task_queue->size() == 0, "only way to reach here"); 2782 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2783 2784 // The G1CMTask class also extends the TerminatorTerminator class, 2785 // hence its should_exit_termination() method will also decide 2786 // whether to exit the termination protocol or not. 2787 bool finished = (is_serial || 2788 _cm->terminator()->offer_termination(this)); 2789 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2790 _termination_time_ms += 2791 termination_end_time_ms - _termination_start_time_ms; 2792 2793 if (finished) { 2794 // We're all done. 2795 2796 // We can now guarantee that the global stack is empty, since 2797 // all other tasks have finished. We separated the guarantees so 2798 // that, if a condition is false, we can immediately find out 2799 // which one. 2800 guarantee(_cm->out_of_regions(), "only way to reach here"); 2801 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2802 guarantee(_task_queue->size() == 0, "only way to reach here"); 2803 guarantee(!_cm->has_overflown(), "only way to reach here"); 2804 guarantee(!has_aborted(), "should never happen if termination has completed"); 2805 } else { 2806 // Apparently there's more work to do. Let's abort this task. It 2807 // will restart it and we can hopefully find more things to do. 2808 set_has_aborted(); 2809 } 2810 } 2811 2812 // Mainly for debugging purposes to make sure that a pointer to the 2813 // closure which was statically allocated in this frame doesn't 2814 // escape it by accident. 2815 set_cm_oop_closure(NULL); 2816 double end_time_ms = os::elapsedVTime() * 1000.0; 2817 double elapsed_time_ms = end_time_ms - _start_time_ms; 2818 // Update the step history. 2819 _step_times_ms.add(elapsed_time_ms); 2820 2821 if (has_aborted()) { 2822 // The task was aborted for some reason. 2823 if (_has_timed_out) { 2824 double diff_ms = elapsed_time_ms - _time_target_ms; 2825 // Keep statistics of how well we did with respect to hitting 2826 // our target only if we actually timed out (if we aborted for 2827 // other reasons, then the results might get skewed). 2828 _marking_step_diff_ms.add(diff_ms); 2829 } 2830 2831 if (_cm->has_overflown()) { 2832 // This is the interesting one. We aborted because a global 2833 // overflow was raised. This means we have to restart the 2834 // marking phase and start iterating over regions. However, in 2835 // order to do this we have to make sure that all tasks stop 2836 // what they are doing and re-initialize in a safe manner. We 2837 // will achieve this with the use of two barrier sync points. 2838 2839 if (!is_serial) { 2840 // We only need to enter the sync barrier if being called 2841 // from a parallel context 2842 _cm->enter_first_sync_barrier(_worker_id); 2843 2844 // When we exit this sync barrier we know that all tasks have 2845 // stopped doing marking work. So, it's now safe to 2846 // re-initialize our data structures. 2847 } 2848 2849 clear_region_fields(); 2850 flush_mark_stats_cache(); 2851 2852 if (!is_serial) { 2853 // If we're executing the concurrent phase of marking, reset the marking 2854 // state; otherwise the marking state is reset after reference processing, 2855 // during the remark pause. 2856 // If we reset here as a result of an overflow during the remark we will 2857 // see assertion failures from any subsequent set_concurrency_and_phase() 2858 // calls. 2859 if (_cm->concurrent() && _worker_id == 0) { 2860 // Worker 0 is responsible for clearing the global data structures because 2861 // of an overflow. During STW we should not clear the overflow flag (in 2862 // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit 2863 // method to abort the pause and restart concurrent marking. 2864 _cm->reset_marking_for_restart(); 2865 2866 log_info(gc, marking)("Concurrent Mark reset for overflow"); 2867 } 2868 2869 // ...and enter the second barrier. 2870 _cm->enter_second_sync_barrier(_worker_id); 2871 } 2872 // At this point, if we're during the concurrent phase of 2873 // marking, everything has been re-initialized and we're 2874 // ready to restart. 2875 } 2876 } 2877 } 2878 2879 G1CMTask::G1CMTask(uint worker_id, 2880 G1ConcurrentMark* cm, 2881 G1CMTaskQueue* task_queue, 2882 G1RegionMarkStats* mark_stats, 2883 uint max_regions) : 2884 _objArray_processor(this), 2885 _worker_id(worker_id), 2886 _g1h(G1CollectedHeap::heap()), 2887 _cm(cm), 2888 _next_mark_bitmap(NULL), 2889 _task_queue(task_queue), 2890 _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize), 2891 _calls(0), 2892 _time_target_ms(0.0), 2893 _start_time_ms(0.0), 2894 _cm_oop_closure(NULL), 2895 _curr_region(NULL), 2896 _finger(NULL), 2897 _region_limit(NULL), 2898 _words_scanned(0), 2899 _words_scanned_limit(0), 2900 _real_words_scanned_limit(0), 2901 _refs_reached(0), 2902 _refs_reached_limit(0), 2903 _real_refs_reached_limit(0), 2904 _has_aborted(false), 2905 _has_timed_out(false), 2906 _draining_satb_buffers(false), 2907 _step_times_ms(), 2908 _elapsed_time_ms(0.0), 2909 _termination_time_ms(0.0), 2910 _termination_start_time_ms(0.0), 2911 _marking_step_diff_ms() 2912 { 2913 guarantee(task_queue != NULL, "invariant"); 2914 2915 _marking_step_diff_ms.add(0.5); 2916 } 2917 2918 // These are formatting macros that are used below to ensure 2919 // consistent formatting. The *_H_* versions are used to format the 2920 // header for a particular value and they should be kept consistent 2921 // with the corresponding macro. Also note that most of the macros add 2922 // the necessary white space (as a prefix) which makes them a bit 2923 // easier to compose. 2924 2925 // All the output lines are prefixed with this string to be able to 2926 // identify them easily in a large log file. 2927 #define G1PPRL_LINE_PREFIX "###" 2928 2929 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2930 #ifdef _LP64 2931 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2932 #else // _LP64 2933 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2934 #endif // _LP64 2935 2936 // For per-region info 2937 #define G1PPRL_TYPE_FORMAT " %-4s" 2938 #define G1PPRL_TYPE_H_FORMAT " %4s" 2939 #define G1PPRL_STATE_FORMAT " %-5s" 2940 #define G1PPRL_STATE_H_FORMAT " %5s" 2941 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2942 #define G1PPRL_BYTE_H_FORMAT " %9s" 2943 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2944 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2945 2946 // For summary info 2947 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2948 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2949 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2950 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2951 2952 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) : 2953 _total_used_bytes(0), _total_capacity_bytes(0), 2954 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2955 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) 2956 { 2957 if (!log_is_enabled(Trace, gc, liveness)) { 2958 return; 2959 } 2960 2961 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2962 MemRegion g1_reserved = g1h->g1_reserved(); 2963 double now = os::elapsedTime(); 2964 2965 // Print the header of the output. 2966 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2967 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2968 G1PPRL_SUM_ADDR_FORMAT("reserved") 2969 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2970 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 2971 HeapRegion::GrainBytes); 2972 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2973 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2974 G1PPRL_TYPE_H_FORMAT 2975 G1PPRL_ADDR_BASE_H_FORMAT 2976 G1PPRL_BYTE_H_FORMAT 2977 G1PPRL_BYTE_H_FORMAT 2978 G1PPRL_BYTE_H_FORMAT 2979 G1PPRL_DOUBLE_H_FORMAT 2980 G1PPRL_BYTE_H_FORMAT 2981 G1PPRL_STATE_H_FORMAT 2982 G1PPRL_BYTE_H_FORMAT, 2983 "type", "address-range", 2984 "used", "prev-live", "next-live", "gc-eff", 2985 "remset", "state", "code-roots"); 2986 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2987 G1PPRL_TYPE_H_FORMAT 2988 G1PPRL_ADDR_BASE_H_FORMAT 2989 G1PPRL_BYTE_H_FORMAT 2990 G1PPRL_BYTE_H_FORMAT 2991 G1PPRL_BYTE_H_FORMAT 2992 G1PPRL_DOUBLE_H_FORMAT 2993 G1PPRL_BYTE_H_FORMAT 2994 G1PPRL_STATE_H_FORMAT 2995 G1PPRL_BYTE_H_FORMAT, 2996 "", "", 2997 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 2998 "(bytes)", "", "(bytes)"); 2999 } 3000 3001 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) { 3002 if (!log_is_enabled(Trace, gc, liveness)) { 3003 return false; 3004 } 3005 3006 const char* type = r->get_type_str(); 3007 HeapWord* bottom = r->bottom(); 3008 HeapWord* end = r->end(); 3009 size_t capacity_bytes = r->capacity(); 3010 size_t used_bytes = r->used(); 3011 size_t prev_live_bytes = r->live_bytes(); 3012 size_t next_live_bytes = r->next_live_bytes(); 3013 double gc_eff = r->gc_efficiency(); 3014 size_t remset_bytes = r->rem_set()->mem_size(); 3015 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3016 const char* remset_type = r->rem_set()->get_short_state_str(); 3017 3018 _total_used_bytes += used_bytes; 3019 _total_capacity_bytes += capacity_bytes; 3020 _total_prev_live_bytes += prev_live_bytes; 3021 _total_next_live_bytes += next_live_bytes; 3022 _total_remset_bytes += remset_bytes; 3023 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3024 3025 // Print a line for this particular region. 3026 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3027 G1PPRL_TYPE_FORMAT 3028 G1PPRL_ADDR_BASE_FORMAT 3029 G1PPRL_BYTE_FORMAT 3030 G1PPRL_BYTE_FORMAT 3031 G1PPRL_BYTE_FORMAT 3032 G1PPRL_DOUBLE_FORMAT 3033 G1PPRL_BYTE_FORMAT 3034 G1PPRL_STATE_FORMAT 3035 G1PPRL_BYTE_FORMAT, 3036 type, p2i(bottom), p2i(end), 3037 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3038 remset_bytes, remset_type, strong_code_roots_bytes); 3039 3040 return false; 3041 } 3042 3043 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3044 if (!log_is_enabled(Trace, gc, liveness)) { 3045 return; 3046 } 3047 3048 // add static memory usages to remembered set sizes 3049 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3050 // Print the footer of the output. 3051 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3052 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3053 " SUMMARY" 3054 G1PPRL_SUM_MB_FORMAT("capacity") 3055 G1PPRL_SUM_MB_PERC_FORMAT("used") 3056 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3057 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3058 G1PPRL_SUM_MB_FORMAT("remset") 3059 G1PPRL_SUM_MB_FORMAT("code-roots"), 3060 bytes_to_mb(_total_capacity_bytes), 3061 bytes_to_mb(_total_used_bytes), 3062 percent_of(_total_used_bytes, _total_capacity_bytes), 3063 bytes_to_mb(_total_prev_live_bytes), 3064 percent_of(_total_prev_live_bytes, _total_capacity_bytes), 3065 bytes_to_mb(_total_next_live_bytes), 3066 percent_of(_total_next_live_bytes, _total_capacity_bytes), 3067 bytes_to_mb(_total_remset_bytes), 3068 bytes_to_mb(_total_strong_code_roots_bytes)); 3069 }