1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc/g1/g1BarrierSet.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1CollectorState.hpp" 31 #include "gc/g1/g1ConcurrentMark.inline.hpp" 32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" 33 #include "gc/g1/g1HeapVerifier.hpp" 34 #include "gc/g1/g1OopClosures.inline.hpp" 35 #include "gc/g1/g1Policy.hpp" 36 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" 37 #include "gc/g1/g1StringDedup.hpp" 38 #include "gc/g1/g1ThreadLocalData.hpp" 39 #include "gc/g1/heapRegion.inline.hpp" 40 #include "gc/g1/heapRegionRemSet.hpp" 41 #include "gc/g1/heapRegionSet.inline.hpp" 42 #include "gc/shared/adaptiveSizePolicy.hpp" 43 #include "gc/shared/gcId.hpp" 44 #include "gc/shared/gcTimer.hpp" 45 #include "gc/shared/gcTrace.hpp" 46 #include "gc/shared/gcTraceTime.inline.hpp" 47 #include "gc/shared/genOopClosures.inline.hpp" 48 #include "gc/shared/referencePolicy.hpp" 49 #include "gc/shared/strongRootsScope.hpp" 50 #include "gc/shared/suspendibleThreadSet.hpp" 51 #include "gc/shared/taskqueue.inline.hpp" 52 #include "gc/shared/vmGCOperations.hpp" 53 #include "gc/shared/weakProcessor.inline.hpp" 54 #include "include/jvm.h" 55 #include "logging/log.hpp" 56 #include "memory/allocation.hpp" 57 #include "memory/resourceArea.hpp" 58 #include "oops/access.inline.hpp" 59 #include "oops/oop.inline.hpp" 60 #include "runtime/atomic.hpp" 61 #include "runtime/handles.inline.hpp" 62 #include "runtime/java.hpp" 63 #include "runtime/prefetch.inline.hpp" 64 #include "services/memTracker.hpp" 65 #include "utilities/align.hpp" 66 #include "utilities/growableArray.hpp" 67 68 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) { 69 assert(addr < _cm->finger(), "invariant"); 70 assert(addr >= _task->finger(), "invariant"); 71 72 // We move that task's local finger along. 73 _task->move_finger_to(addr); 74 75 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); 76 // we only partially drain the local queue and global stack 77 _task->drain_local_queue(true); 78 _task->drain_global_stack(true); 79 80 // if the has_aborted flag has been raised, we need to bail out of 81 // the iteration 82 return !_task->has_aborted(); 83 } 84 85 G1CMMarkStack::G1CMMarkStack() : 86 _max_chunk_capacity(0), 87 _base(NULL), 88 _chunk_capacity(0) { 89 set_empty(); 90 } 91 92 bool G1CMMarkStack::resize(size_t new_capacity) { 93 assert(is_empty(), "Only resize when stack is empty."); 94 assert(new_capacity <= _max_chunk_capacity, 95 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); 96 97 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC); 98 99 if (new_base == NULL) { 100 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); 101 return false; 102 } 103 // Release old mapping. 104 if (_base != NULL) { 105 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 106 } 107 108 _base = new_base; 109 _chunk_capacity = new_capacity; 110 set_empty(); 111 112 return true; 113 } 114 115 size_t G1CMMarkStack::capacity_alignment() { 116 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); 117 } 118 119 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { 120 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); 121 122 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); 123 124 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 125 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 126 127 guarantee(initial_chunk_capacity <= _max_chunk_capacity, 128 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, 129 _max_chunk_capacity, 130 initial_chunk_capacity); 131 132 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, 133 initial_chunk_capacity, _max_chunk_capacity); 134 135 return resize(initial_chunk_capacity); 136 } 137 138 void G1CMMarkStack::expand() { 139 if (_chunk_capacity == _max_chunk_capacity) { 140 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); 141 return; 142 } 143 size_t old_capacity = _chunk_capacity; 144 // Double capacity if possible 145 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); 146 147 if (resize(new_capacity)) { 148 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 149 old_capacity, new_capacity); 150 } else { 151 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 152 old_capacity, new_capacity); 153 } 154 } 155 156 G1CMMarkStack::~G1CMMarkStack() { 157 if (_base != NULL) { 158 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 159 } 160 } 161 162 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { 163 elem->next = *list; 164 *list = elem; 165 } 166 167 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { 168 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 169 add_chunk_to_list(&_chunk_list, elem); 170 _chunks_in_chunk_list++; 171 } 172 173 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { 174 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 175 add_chunk_to_list(&_free_list, elem); 176 } 177 178 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { 179 TaskQueueEntryChunk* result = *list; 180 if (result != NULL) { 181 *list = (*list)->next; 182 } 183 return result; 184 } 185 186 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { 187 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 188 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); 189 if (result != NULL) { 190 _chunks_in_chunk_list--; 191 } 192 return result; 193 } 194 195 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { 196 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 197 return remove_chunk_from_list(&_free_list); 198 } 199 200 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { 201 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. 202 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding 203 // wraparound of _hwm. 204 if (_hwm >= _chunk_capacity) { 205 return NULL; 206 } 207 208 size_t cur_idx = Atomic::add(1u, &_hwm) - 1; 209 if (cur_idx >= _chunk_capacity) { 210 return NULL; 211 } 212 213 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; 214 result->next = NULL; 215 return result; 216 } 217 218 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { 219 // Get a new chunk. 220 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); 221 222 if (new_chunk == NULL) { 223 // Did not get a chunk from the free list. Allocate from backing memory. 224 new_chunk = allocate_new_chunk(); 225 226 if (new_chunk == NULL) { 227 return false; 228 } 229 } 230 231 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 232 233 add_chunk_to_chunk_list(new_chunk); 234 235 return true; 236 } 237 238 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { 239 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); 240 241 if (cur == NULL) { 242 return false; 243 } 244 245 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 246 247 add_chunk_to_free_list(cur); 248 return true; 249 } 250 251 void G1CMMarkStack::set_empty() { 252 _chunks_in_chunk_list = 0; 253 _hwm = 0; 254 _chunk_list = NULL; 255 _free_list = NULL; 256 } 257 258 G1CMRootRegions::G1CMRootRegions() : 259 _survivors(NULL), _cm(NULL), _scan_in_progress(false), 260 _should_abort(false), _claimed_survivor_index(0) { } 261 262 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) { 263 _survivors = survivors; 264 _cm = cm; 265 } 266 267 void G1CMRootRegions::prepare_for_scan() { 268 assert(!scan_in_progress(), "pre-condition"); 269 270 // Currently, only survivors can be root regions. 271 _claimed_survivor_index = 0; 272 _scan_in_progress = _survivors->regions()->is_nonempty(); 273 _should_abort = false; 274 } 275 276 HeapRegion* G1CMRootRegions::claim_next() { 277 if (_should_abort) { 278 // If someone has set the should_abort flag, we return NULL to 279 // force the caller to bail out of their loop. 280 return NULL; 281 } 282 283 // Currently, only survivors can be root regions. 284 const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions(); 285 286 int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1; 287 if (claimed_index < survivor_regions->length()) { 288 return survivor_regions->at(claimed_index); 289 } 290 return NULL; 291 } 292 293 uint G1CMRootRegions::num_root_regions() const { 294 return (uint)_survivors->regions()->length(); 295 } 296 297 void G1CMRootRegions::notify_scan_done() { 298 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 299 _scan_in_progress = false; 300 RootRegionScan_lock->notify_all(); 301 } 302 303 void G1CMRootRegions::cancel_scan() { 304 notify_scan_done(); 305 } 306 307 void G1CMRootRegions::scan_finished() { 308 assert(scan_in_progress(), "pre-condition"); 309 310 // Currently, only survivors can be root regions. 311 if (!_should_abort) { 312 assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index); 313 assert((uint)_claimed_survivor_index >= _survivors->length(), 314 "we should have claimed all survivors, claimed index = %u, length = %u", 315 (uint)_claimed_survivor_index, _survivors->length()); 316 } 317 318 notify_scan_done(); 319 } 320 321 bool G1CMRootRegions::wait_until_scan_finished() { 322 if (!scan_in_progress()) { 323 return false; 324 } 325 326 { 327 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 328 while (scan_in_progress()) { 329 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 330 } 331 } 332 return true; 333 } 334 335 // Returns the maximum number of workers to be used in a concurrent 336 // phase based on the number of GC workers being used in a STW 337 // phase. 338 static uint scale_concurrent_worker_threads(uint num_gc_workers) { 339 return MAX2((num_gc_workers + 2) / 4, 1U); 340 } 341 342 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, 343 G1RegionToSpaceMapper* prev_bitmap_storage, 344 G1RegionToSpaceMapper* next_bitmap_storage) : 345 // _cm_thread set inside the constructor 346 _g1h(g1h), 347 _completed_initialization(false), 348 349 _mark_bitmap_1(), 350 _mark_bitmap_2(), 351 _prev_mark_bitmap(&_mark_bitmap_1), 352 _next_mark_bitmap(&_mark_bitmap_2), 353 354 _heap(_g1h->reserved_region()), 355 356 _root_regions(), 357 358 _global_mark_stack(), 359 360 // _finger set in set_non_marking_state 361 362 _worker_id_offset(DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads), 363 _max_num_tasks(ParallelGCThreads), 364 // _num_active_tasks set in set_non_marking_state() 365 // _tasks set inside the constructor 366 367 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)), 368 _terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)), 369 370 _first_overflow_barrier_sync(), 371 _second_overflow_barrier_sync(), 372 373 _has_overflown(false), 374 _concurrent(false), 375 _has_aborted(false), 376 _restart_for_overflow(false), 377 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 378 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 379 380 // _verbose_level set below 381 382 _init_times(), 383 _remark_times(), 384 _remark_mark_times(), 385 _remark_weak_ref_times(), 386 _cleanup_times(), 387 _total_cleanup_time(0.0), 388 389 _accum_task_vtime(NULL), 390 391 _concurrent_workers(NULL), 392 _num_concurrent_workers(0), 393 _max_concurrent_workers(0), 394 395 _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)), 396 _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC)) 397 { 398 _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage); 399 _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage); 400 401 // Create & start ConcurrentMark thread. 402 _cm_thread = new G1ConcurrentMarkThread(this); 403 if (_cm_thread->osthread() == NULL) { 404 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 405 } 406 407 assert(CGC_lock != NULL, "CGC_lock must be initialized"); 408 409 _root_regions.init(_g1h->survivor(), this); 410 411 if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) { 412 // Calculate the number of concurrent worker threads by scaling 413 // the number of parallel GC threads. 414 uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads); 415 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 416 } 417 418 assert(ConcGCThreads > 0, "ConcGCThreads have been set."); 419 if (ConcGCThreads > ParallelGCThreads) { 420 log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).", 421 ConcGCThreads, ParallelGCThreads); 422 return; 423 } 424 425 log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset); 426 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 427 428 _num_concurrent_workers = ConcGCThreads; 429 _max_concurrent_workers = _num_concurrent_workers; 430 431 _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true); 432 _concurrent_workers->initialize_workers(); 433 434 if (FLAG_IS_DEFAULT(MarkStackSize)) { 435 size_t mark_stack_size = 436 MIN2(MarkStackSizeMax, 437 MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE))); 438 // Verify that the calculated value for MarkStackSize is in range. 439 // It would be nice to use the private utility routine from Arguments. 440 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 441 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 442 "must be between 1 and " SIZE_FORMAT, 443 mark_stack_size, MarkStackSizeMax); 444 return; 445 } 446 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 447 } else { 448 // Verify MarkStackSize is in range. 449 if (FLAG_IS_CMDLINE(MarkStackSize)) { 450 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 451 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 452 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 453 "must be between 1 and " SIZE_FORMAT, 454 MarkStackSize, MarkStackSizeMax); 455 return; 456 } 457 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 458 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 459 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 460 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 461 MarkStackSize, MarkStackSizeMax); 462 return; 463 } 464 } 465 } 466 } 467 468 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { 469 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 470 } 471 472 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC); 473 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC); 474 475 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 476 _num_active_tasks = _max_num_tasks; 477 478 for (uint i = 0; i < _max_num_tasks; ++i) { 479 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 480 task_queue->initialize(); 481 _task_queues->register_queue(i, task_queue); 482 483 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions()); 484 485 _accum_task_vtime[i] = 0.0; 486 } 487 488 reset_at_marking_complete(); 489 _completed_initialization = true; 490 } 491 492 void G1ConcurrentMark::reset() { 493 _has_aborted = false; 494 495 reset_marking_for_restart(); 496 497 // Reset all tasks, since different phases will use different number of active 498 // threads. So, it's easiest to have all of them ready. 499 for (uint i = 0; i < _max_num_tasks; ++i) { 500 _tasks[i]->reset(_next_mark_bitmap); 501 } 502 503 uint max_regions = _g1h->max_regions(); 504 for (uint i = 0; i < max_regions; i++) { 505 _top_at_rebuild_starts[i] = NULL; 506 _region_mark_stats[i].clear(); 507 } 508 } 509 510 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) { 511 for (uint j = 0; j < _max_num_tasks; ++j) { 512 _tasks[j]->clear_mark_stats_cache(region_idx); 513 } 514 _top_at_rebuild_starts[region_idx] = NULL; 515 _region_mark_stats[region_idx].clear(); 516 } 517 518 void G1ConcurrentMark::clear_statistics(HeapRegion* r) { 519 uint const region_idx = r->hrm_index(); 520 if (r->is_humongous()) { 521 assert(r->is_starts_humongous(), "Got humongous continues region here"); 522 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size()); 523 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) { 524 clear_statistics_in_region(j); 525 } 526 } else { 527 clear_statistics_in_region(region_idx); 528 } 529 } 530 531 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) { 532 if (bitmap->is_marked(addr)) { 533 bitmap->clear(addr); 534 } 535 } 536 537 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) { 538 assert_at_safepoint_on_vm_thread(); 539 540 // Need to clear all mark bits of the humongous object. 541 clear_mark_if_set(_prev_mark_bitmap, r->bottom()); 542 clear_mark_if_set(_next_mark_bitmap, r->bottom()); 543 544 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 545 return; 546 } 547 548 // Clear any statistics about the region gathered so far. 549 clear_statistics(r); 550 } 551 552 void G1ConcurrentMark::reset_marking_for_restart() { 553 _global_mark_stack.set_empty(); 554 555 // Expand the marking stack, if we have to and if we can. 556 if (has_overflown()) { 557 _global_mark_stack.expand(); 558 559 uint max_regions = _g1h->max_regions(); 560 for (uint i = 0; i < max_regions; i++) { 561 _region_mark_stats[i].clear_during_overflow(); 562 } 563 } 564 565 clear_has_overflown(); 566 _finger = _heap.start(); 567 568 for (uint i = 0; i < _max_num_tasks; ++i) { 569 G1CMTaskQueue* queue = _task_queues->queue(i); 570 queue->set_empty(); 571 } 572 } 573 574 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 575 assert(active_tasks <= _max_num_tasks, "we should not have more"); 576 577 _num_active_tasks = active_tasks; 578 // Need to update the three data structures below according to the 579 // number of active threads for this phase. 580 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 581 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 582 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 583 } 584 585 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 586 set_concurrency(active_tasks); 587 588 _concurrent = concurrent; 589 590 if (!concurrent) { 591 // At this point we should be in a STW phase, and completed marking. 592 assert_at_safepoint_on_vm_thread(); 593 assert(out_of_regions(), 594 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 595 p2i(_finger), p2i(_heap.end())); 596 } 597 } 598 599 void G1ConcurrentMark::reset_at_marking_complete() { 600 // We set the global marking state to some default values when we're 601 // not doing marking. 602 reset_marking_for_restart(); 603 _num_active_tasks = 0; 604 } 605 606 G1ConcurrentMark::~G1ConcurrentMark() { 607 FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts); 608 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats); 609 // The G1ConcurrentMark instance is never freed. 610 ShouldNotReachHere(); 611 } 612 613 class G1ClearBitMapTask : public AbstractGangTask { 614 public: 615 static size_t chunk_size() { return M; } 616 617 private: 618 // Heap region closure used for clearing the given mark bitmap. 619 class G1ClearBitmapHRClosure : public HeapRegionClosure { 620 private: 621 G1CMBitMap* _bitmap; 622 G1ConcurrentMark* _cm; 623 public: 624 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) { 625 } 626 627 virtual bool do_heap_region(HeapRegion* r) { 628 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 629 630 HeapWord* cur = r->bottom(); 631 HeapWord* const end = r->end(); 632 633 while (cur < end) { 634 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 635 _bitmap->clear_range(mr); 636 637 cur += chunk_size_in_words; 638 639 // Abort iteration if after yielding the marking has been aborted. 640 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 641 return true; 642 } 643 // Repeat the asserts from before the start of the closure. We will do them 644 // as asserts here to minimize their overhead on the product. However, we 645 // will have them as guarantees at the beginning / end of the bitmap 646 // clearing to get some checking in the product. 647 assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant"); 648 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 649 } 650 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 651 652 return false; 653 } 654 }; 655 656 G1ClearBitmapHRClosure _cl; 657 HeapRegionClaimer _hr_claimer; 658 bool _suspendible; // If the task is suspendible, workers must join the STS. 659 660 public: 661 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 662 AbstractGangTask("G1 Clear Bitmap"), 663 _cl(bitmap, suspendible ? cm : NULL), 664 _hr_claimer(n_workers), 665 _suspendible(suspendible) 666 { } 667 668 void work(uint worker_id) { 669 SuspendibleThreadSetJoiner sts_join(_suspendible); 670 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id); 671 } 672 673 bool is_complete() { 674 return _cl.is_complete(); 675 } 676 }; 677 678 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 679 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 680 681 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 682 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 683 684 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 685 686 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 687 688 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 689 workers->run_task(&cl, num_workers); 690 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 691 } 692 693 void G1ConcurrentMark::cleanup_for_next_mark() { 694 // Make sure that the concurrent mark thread looks to still be in 695 // the current cycle. 696 guarantee(cm_thread()->during_cycle(), "invariant"); 697 698 // We are finishing up the current cycle by clearing the next 699 // marking bitmap and getting it ready for the next cycle. During 700 // this time no other cycle can start. So, let's make sure that this 701 // is the case. 702 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 703 704 clear_bitmap(_next_mark_bitmap, _concurrent_workers, true); 705 706 // Repeat the asserts from above. 707 guarantee(cm_thread()->during_cycle(), "invariant"); 708 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 709 } 710 711 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 712 assert_at_safepoint_on_vm_thread(); 713 clear_bitmap(_prev_mark_bitmap, workers, false); 714 } 715 716 class NoteStartOfMarkHRClosure : public HeapRegionClosure { 717 public: 718 bool do_heap_region(HeapRegion* r) { 719 r->note_start_of_marking(); 720 return false; 721 } 722 }; 723 724 void G1ConcurrentMark::pre_initial_mark() { 725 // Initialize marking structures. This has to be done in a STW phase. 726 reset(); 727 728 // For each region note start of marking. 729 NoteStartOfMarkHRClosure startcl; 730 _g1h->heap_region_iterate(&startcl); 731 } 732 733 734 void G1ConcurrentMark::post_initial_mark() { 735 // Start Concurrent Marking weak-reference discovery. 736 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 737 // enable ("weak") refs discovery 738 rp->enable_discovery(); 739 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 740 741 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 742 // This is the start of the marking cycle, we're expected all 743 // threads to have SATB queues with active set to false. 744 satb_mq_set.set_active_all_threads(true, /* new active value */ 745 false /* expected_active */); 746 747 _root_regions.prepare_for_scan(); 748 749 // update_g1_committed() will be called at the end of an evac pause 750 // when marking is on. So, it's also called at the end of the 751 // initial-mark pause to update the heap end, if the heap expands 752 // during it. No need to call it here. 753 } 754 755 /* 756 * Notice that in the next two methods, we actually leave the STS 757 * during the barrier sync and join it immediately afterwards. If we 758 * do not do this, the following deadlock can occur: one thread could 759 * be in the barrier sync code, waiting for the other thread to also 760 * sync up, whereas another one could be trying to yield, while also 761 * waiting for the other threads to sync up too. 762 * 763 * Note, however, that this code is also used during remark and in 764 * this case we should not attempt to leave / enter the STS, otherwise 765 * we'll either hit an assert (debug / fastdebug) or deadlock 766 * (product). So we should only leave / enter the STS if we are 767 * operating concurrently. 768 * 769 * Because the thread that does the sync barrier has left the STS, it 770 * is possible to be suspended for a Full GC or an evacuation pause 771 * could occur. This is actually safe, since the entering the sync 772 * barrier is one of the last things do_marking_step() does, and it 773 * doesn't manipulate any data structures afterwards. 774 */ 775 776 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 777 bool barrier_aborted; 778 { 779 SuspendibleThreadSetLeaver sts_leave(concurrent()); 780 barrier_aborted = !_first_overflow_barrier_sync.enter(); 781 } 782 783 // at this point everyone should have synced up and not be doing any 784 // more work 785 786 if (barrier_aborted) { 787 // If the barrier aborted we ignore the overflow condition and 788 // just abort the whole marking phase as quickly as possible. 789 return; 790 } 791 } 792 793 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 794 SuspendibleThreadSetLeaver sts_leave(concurrent()); 795 _second_overflow_barrier_sync.enter(); 796 797 // at this point everything should be re-initialized and ready to go 798 } 799 800 class G1CMConcurrentMarkingTask : public AbstractGangTask { 801 G1ConcurrentMark* _cm; 802 803 public: 804 void work(uint worker_id) { 805 assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread"); 806 ResourceMark rm; 807 808 double start_vtime = os::elapsedVTime(); 809 810 { 811 SuspendibleThreadSetJoiner sts_join; 812 813 assert(worker_id < _cm->active_tasks(), "invariant"); 814 815 G1CMTask* task = _cm->task(worker_id); 816 task->record_start_time(); 817 if (!_cm->has_aborted()) { 818 do { 819 task->do_marking_step(G1ConcMarkStepDurationMillis, 820 true /* do_termination */, 821 false /* is_serial*/); 822 823 _cm->do_yield_check(); 824 } while (!_cm->has_aborted() && task->has_aborted()); 825 } 826 task->record_end_time(); 827 guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant"); 828 } 829 830 double end_vtime = os::elapsedVTime(); 831 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 832 } 833 834 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) : 835 AbstractGangTask("Concurrent Mark"), _cm(cm) { } 836 837 ~G1CMConcurrentMarkingTask() { } 838 }; 839 840 uint G1ConcurrentMark::calc_active_marking_workers() { 841 uint result = 0; 842 if (!UseDynamicNumberOfGCThreads || 843 (!FLAG_IS_DEFAULT(ConcGCThreads) && 844 !ForceDynamicNumberOfGCThreads)) { 845 result = _max_concurrent_workers; 846 } else { 847 result = 848 AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers, 849 1, /* Minimum workers */ 850 _num_concurrent_workers, 851 Threads::number_of_non_daemon_threads()); 852 // Don't scale the result down by scale_concurrent_workers() because 853 // that scaling has already gone into "_max_concurrent_workers". 854 } 855 assert(result > 0 && result <= _max_concurrent_workers, 856 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u", 857 _max_concurrent_workers, result); 858 return result; 859 } 860 861 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) { 862 // Currently, only survivors can be root regions. 863 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 864 G1RootRegionScanClosure cl(_g1h, this, worker_id); 865 866 const uintx interval = PrefetchScanIntervalInBytes; 867 HeapWord* curr = hr->bottom(); 868 const HeapWord* end = hr->top(); 869 while (curr < end) { 870 Prefetch::read(curr, interval); 871 oop obj = oop(curr); 872 int size = obj->oop_iterate_size(&cl); 873 assert(size == obj->size(), "sanity"); 874 curr += size; 875 } 876 } 877 878 class G1CMRootRegionScanTask : public AbstractGangTask { 879 G1ConcurrentMark* _cm; 880 public: 881 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 882 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 883 884 void work(uint worker_id) { 885 assert(Thread::current()->is_ConcurrentGC_thread(), 886 "this should only be done by a conc GC thread"); 887 888 G1CMRootRegions* root_regions = _cm->root_regions(); 889 HeapRegion* hr = root_regions->claim_next(); 890 while (hr != NULL) { 891 _cm->scan_root_region(hr, worker_id); 892 hr = root_regions->claim_next(); 893 } 894 } 895 }; 896 897 void G1ConcurrentMark::scan_root_regions() { 898 // scan_in_progress() will have been set to true only if there was 899 // at least one root region to scan. So, if it's false, we 900 // should not attempt to do any further work. 901 if (root_regions()->scan_in_progress()) { 902 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 903 904 _num_concurrent_workers = MIN2(calc_active_marking_workers(), 905 // We distribute work on a per-region basis, so starting 906 // more threads than that is useless. 907 root_regions()->num_root_regions()); 908 assert(_num_concurrent_workers <= _max_concurrent_workers, 909 "Maximum number of marking threads exceeded"); 910 911 G1CMRootRegionScanTask task(this); 912 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 913 task.name(), _num_concurrent_workers, root_regions()->num_root_regions()); 914 _concurrent_workers->run_task(&task, _num_concurrent_workers); 915 916 // It's possible that has_aborted() is true here without actually 917 // aborting the survivor scan earlier. This is OK as it's 918 // mainly used for sanity checking. 919 root_regions()->scan_finished(); 920 } 921 } 922 923 void G1ConcurrentMark::concurrent_cycle_start() { 924 _gc_timer_cm->register_gc_start(); 925 926 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 927 928 _g1h->trace_heap_before_gc(_gc_tracer_cm); 929 } 930 931 void G1ConcurrentMark::concurrent_cycle_end() { 932 _g1h->collector_state()->set_clearing_next_bitmap(false); 933 934 _g1h->trace_heap_after_gc(_gc_tracer_cm); 935 936 if (has_aborted()) { 937 log_info(gc, marking)("Concurrent Mark Abort"); 938 _gc_tracer_cm->report_concurrent_mode_failure(); 939 } 940 941 _gc_timer_cm->register_gc_end(); 942 943 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 944 } 945 946 void G1ConcurrentMark::mark_from_roots() { 947 _restart_for_overflow = false; 948 949 _num_concurrent_workers = calc_active_marking_workers(); 950 951 uint active_workers = MAX2(1U, _num_concurrent_workers); 952 953 // Setting active workers is not guaranteed since fewer 954 // worker threads may currently exist and more may not be 955 // available. 956 active_workers = _concurrent_workers->update_active_workers(active_workers); 957 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers()); 958 959 // Parallel task terminator is set in "set_concurrency_and_phase()" 960 set_concurrency_and_phase(active_workers, true /* concurrent */); 961 962 G1CMConcurrentMarkingTask marking_task(this); 963 _concurrent_workers->run_task(&marking_task); 964 print_stats(); 965 } 966 967 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) { 968 G1HeapVerifier* verifier = _g1h->verifier(); 969 970 verifier->verify_region_sets_optional(); 971 972 if (VerifyDuringGC) { 973 GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm); 974 975 size_t const BufLen = 512; 976 char buffer[BufLen]; 977 978 jio_snprintf(buffer, BufLen, "During GC (%s)", caller); 979 verifier->verify(type, vo, buffer); 980 } 981 982 verifier->check_bitmaps(caller); 983 } 984 985 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask { 986 G1CollectedHeap* _g1h; 987 G1ConcurrentMark* _cm; 988 HeapRegionClaimer _hrclaimer; 989 uint volatile _total_selected_for_rebuild; 990 991 G1PrintRegionLivenessInfoClosure _cl; 992 993 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure { 994 G1CollectedHeap* _g1h; 995 G1ConcurrentMark* _cm; 996 997 G1PrintRegionLivenessInfoClosure* _cl; 998 999 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild. 1000 1001 void update_remset_before_rebuild(HeapRegion* hr) { 1002 G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker(); 1003 1004 bool selected_for_rebuild; 1005 if (hr->is_humongous()) { 1006 bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0; 1007 selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live); 1008 } else { 1009 size_t const live_bytes = _cm->liveness(hr->hrm_index()); 1010 selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes); 1011 } 1012 if (selected_for_rebuild) { 1013 _num_regions_selected_for_rebuild++; 1014 } 1015 _cm->update_top_at_rebuild_start(hr); 1016 } 1017 1018 // Distribute the given words across the humongous object starting with hr and 1019 // note end of marking. 1020 void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) { 1021 uint const region_idx = hr->hrm_index(); 1022 size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size(); 1023 uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words); 1024 1025 // "Distributing" zero words means that we only note end of marking for these 1026 // regions. 1027 assert(marked_words == 0 || obj_size_in_words == marked_words, 1028 "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT, 1029 obj_size_in_words, marked_words); 1030 1031 for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) { 1032 HeapRegion* const r = _g1h->region_at(i); 1033 size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words); 1034 1035 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)", 1036 words_to_add, i, r->get_type_str()); 1037 add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize); 1038 marked_words -= words_to_add; 1039 } 1040 assert(marked_words == 0, 1041 SIZE_FORMAT " words left after distributing space across %u regions", 1042 marked_words, num_regions_in_humongous); 1043 } 1044 1045 void update_marked_bytes(HeapRegion* hr) { 1046 uint const region_idx = hr->hrm_index(); 1047 size_t const marked_words = _cm->liveness(region_idx); 1048 // The marking attributes the object's size completely to the humongous starts 1049 // region. We need to distribute this value across the entire set of regions a 1050 // humongous object spans. 1051 if (hr->is_humongous()) { 1052 assert(hr->is_starts_humongous() || marked_words == 0, 1053 "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)", 1054 marked_words, region_idx, hr->get_type_str()); 1055 if (hr->is_starts_humongous()) { 1056 distribute_marked_bytes(hr, marked_words); 1057 } 1058 } else { 1059 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str()); 1060 add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize); 1061 } 1062 } 1063 1064 void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) { 1065 hr->add_to_marked_bytes(marked_bytes); 1066 _cl->do_heap_region(hr); 1067 hr->note_end_of_marking(); 1068 } 1069 1070 public: 1071 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) : 1072 _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { } 1073 1074 virtual bool do_heap_region(HeapRegion* r) { 1075 update_remset_before_rebuild(r); 1076 update_marked_bytes(r); 1077 1078 return false; 1079 } 1080 1081 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; } 1082 }; 1083 1084 public: 1085 G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) : 1086 AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"), 1087 _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { } 1088 1089 virtual void work(uint worker_id) { 1090 G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl); 1091 _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id); 1092 Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild); 1093 } 1094 1095 uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; } 1096 1097 // Number of regions for which roughly one thread should be spawned for this work. 1098 static const uint RegionsPerThread = 384; 1099 }; 1100 1101 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure { 1102 G1CollectedHeap* _g1h; 1103 public: 1104 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { } 1105 1106 virtual bool do_heap_region(HeapRegion* r) { 1107 _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r); 1108 return false; 1109 } 1110 }; 1111 1112 void G1ConcurrentMark::remark() { 1113 assert_at_safepoint_on_vm_thread(); 1114 1115 // If a full collection has happened, we should not continue. However we might 1116 // have ended up here as the Remark VM operation has been scheduled already. 1117 if (has_aborted()) { 1118 return; 1119 } 1120 1121 G1Policy* g1p = _g1h->g1_policy(); 1122 g1p->record_concurrent_mark_remark_start(); 1123 1124 double start = os::elapsedTime(); 1125 1126 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before"); 1127 1128 { 1129 GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm); 1130 finalize_marking(); 1131 } 1132 1133 double mark_work_end = os::elapsedTime(); 1134 1135 bool const mark_finished = !has_overflown(); 1136 if (mark_finished) { 1137 weak_refs_work(false /* clear_all_soft_refs */); 1138 1139 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1140 // We're done with marking. 1141 // This is the end of the marking cycle, we're expected all 1142 // threads to have SATB queues with active set to true. 1143 satb_mq_set.set_active_all_threads(false, /* new active value */ 1144 true /* expected_active */); 1145 1146 { 1147 GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm); 1148 flush_all_task_caches(); 1149 } 1150 1151 // Install newly created mark bitmap as "prev". 1152 swap_mark_bitmaps(); 1153 { 1154 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm); 1155 1156 uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) / 1157 G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread; 1158 uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity); 1159 1160 G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers); 1161 log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions()); 1162 _g1h->workers()->run_task(&cl, num_workers); 1163 1164 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u", 1165 _g1h->num_regions(), cl.total_selected_for_rebuild()); 1166 } 1167 { 1168 GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm); 1169 reclaim_empty_regions(); 1170 } 1171 1172 // Clean out dead classes 1173 if (ClassUnloadingWithConcurrentMark) { 1174 GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm); 1175 ClassLoaderDataGraph::purge(); 1176 } 1177 1178 _g1h->resize_heap_if_necessary(); 1179 1180 compute_new_sizes(); 1181 1182 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after"); 1183 1184 assert(!restart_for_overflow(), "sanity"); 1185 // Completely reset the marking state since marking completed 1186 reset_at_marking_complete(); 1187 } else { 1188 // We overflowed. Restart concurrent marking. 1189 _restart_for_overflow = true; 1190 1191 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow"); 1192 1193 // Clear the marking state because we will be restarting 1194 // marking due to overflowing the global mark stack. 1195 reset_marking_for_restart(); 1196 } 1197 1198 { 1199 GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm); 1200 report_object_count(mark_finished); 1201 } 1202 1203 // Statistics 1204 double now = os::elapsedTime(); 1205 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1206 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1207 _remark_times.add((now - start) * 1000.0); 1208 1209 g1p->record_concurrent_mark_remark_end(); 1210 } 1211 1212 class G1ReclaimEmptyRegionsTask : public AbstractGangTask { 1213 // Per-region work during the Cleanup pause. 1214 class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure { 1215 G1CollectedHeap* _g1h; 1216 size_t _freed_bytes; 1217 FreeRegionList* _local_cleanup_list; 1218 uint _old_regions_removed; 1219 uint _humongous_regions_removed; 1220 HRRSCleanupTask* _hrrs_cleanup_task; 1221 1222 public: 1223 G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h, 1224 FreeRegionList* local_cleanup_list, 1225 HRRSCleanupTask* hrrs_cleanup_task) : 1226 _g1h(g1h), 1227 _freed_bytes(0), 1228 _local_cleanup_list(local_cleanup_list), 1229 _old_regions_removed(0), 1230 _humongous_regions_removed(0), 1231 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1232 1233 size_t freed_bytes() { return _freed_bytes; } 1234 const uint old_regions_removed() { return _old_regions_removed; } 1235 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1236 1237 bool do_heap_region(HeapRegion *hr) { 1238 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) { 1239 _freed_bytes += hr->used(); 1240 hr->set_containing_set(NULL); 1241 if (hr->is_humongous()) { 1242 _humongous_regions_removed++; 1243 _g1h->free_humongous_region(hr, _local_cleanup_list); 1244 } else { 1245 _old_regions_removed++; 1246 _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */); 1247 } 1248 hr->clear_cardtable(); 1249 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index()); 1250 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom())); 1251 } else { 1252 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1253 } 1254 1255 return false; 1256 } 1257 }; 1258 1259 G1CollectedHeap* _g1h; 1260 FreeRegionList* _cleanup_list; 1261 HeapRegionClaimer _hrclaimer; 1262 1263 public: 1264 G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1265 AbstractGangTask("G1 Cleanup"), 1266 _g1h(g1h), 1267 _cleanup_list(cleanup_list), 1268 _hrclaimer(n_workers) { 1269 1270 HeapRegionRemSet::reset_for_cleanup_tasks(); 1271 } 1272 1273 void work(uint worker_id) { 1274 FreeRegionList local_cleanup_list("Local Cleanup List"); 1275 HRRSCleanupTask hrrs_cleanup_task; 1276 G1ReclaimEmptyRegionsClosure cl(_g1h, 1277 &local_cleanup_list, 1278 &hrrs_cleanup_task); 1279 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id); 1280 assert(cl.is_complete(), "Shouldn't have aborted!"); 1281 1282 // Now update the old/humongous region sets 1283 _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed()); 1284 { 1285 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1286 _g1h->decrement_summary_bytes(cl.freed_bytes()); 1287 1288 _cleanup_list->add_ordered(&local_cleanup_list); 1289 assert(local_cleanup_list.is_empty(), "post-condition"); 1290 1291 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1292 } 1293 } 1294 }; 1295 1296 void G1ConcurrentMark::reclaim_empty_regions() { 1297 WorkGang* workers = _g1h->workers(); 1298 FreeRegionList empty_regions_list("Empty Regions After Mark List"); 1299 1300 G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers()); 1301 workers->run_task(&cl); 1302 1303 if (!empty_regions_list.is_empty()) { 1304 log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length()); 1305 // Now print the empty regions list. 1306 G1HRPrinter* hrp = _g1h->hr_printer(); 1307 if (hrp->is_active()) { 1308 FreeRegionListIterator iter(&empty_regions_list); 1309 while (iter.more_available()) { 1310 HeapRegion* hr = iter.get_next(); 1311 hrp->cleanup(hr); 1312 } 1313 } 1314 // And actually make them available. 1315 _g1h->prepend_to_freelist(&empty_regions_list); 1316 } 1317 } 1318 1319 void G1ConcurrentMark::compute_new_sizes() { 1320 MetaspaceGC::compute_new_size(); 1321 1322 // Cleanup will have freed any regions completely full of garbage. 1323 // Update the soft reference policy with the new heap occupancy. 1324 Universe::update_heap_info_at_gc(); 1325 1326 // We reclaimed old regions so we should calculate the sizes to make 1327 // sure we update the old gen/space data. 1328 _g1h->g1mm()->update_sizes(); 1329 } 1330 1331 void G1ConcurrentMark::cleanup() { 1332 assert_at_safepoint_on_vm_thread(); 1333 1334 // If a full collection has happened, we shouldn't do this. 1335 if (has_aborted()) { 1336 return; 1337 } 1338 1339 G1Policy* g1p = _g1h->g1_policy(); 1340 g1p->record_concurrent_mark_cleanup_start(); 1341 1342 double start = os::elapsedTime(); 1343 1344 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before"); 1345 1346 { 1347 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm); 1348 G1UpdateRemSetTrackingAfterRebuild cl(_g1h); 1349 _g1h->heap_region_iterate(&cl); 1350 } 1351 1352 if (log_is_enabled(Trace, gc, liveness)) { 1353 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup"); 1354 _g1h->heap_region_iterate(&cl); 1355 } 1356 1357 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after"); 1358 1359 // We need to make this be a "collection" so any collection pause that 1360 // races with it goes around and waits for Cleanup to finish. 1361 _g1h->increment_total_collections(); 1362 1363 // Local statistics 1364 double recent_cleanup_time = (os::elapsedTime() - start); 1365 _total_cleanup_time += recent_cleanup_time; 1366 _cleanup_times.add(recent_cleanup_time); 1367 1368 { 1369 GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm); 1370 _g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1371 } 1372 } 1373 1374 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1375 // Uses the G1CMTask associated with a worker thread (for serial reference 1376 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1377 // trace referent objects. 1378 // 1379 // Using the G1CMTask and embedded local queues avoids having the worker 1380 // threads operating on the global mark stack. This reduces the risk 1381 // of overflowing the stack - which we would rather avoid at this late 1382 // state. Also using the tasks' local queues removes the potential 1383 // of the workers interfering with each other that could occur if 1384 // operating on the global stack. 1385 1386 class G1CMKeepAliveAndDrainClosure : public OopClosure { 1387 G1ConcurrentMark* _cm; 1388 G1CMTask* _task; 1389 uint _ref_counter_limit; 1390 uint _ref_counter; 1391 bool _is_serial; 1392 public: 1393 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1394 _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval), 1395 _ref_counter(_ref_counter_limit), _is_serial(is_serial) { 1396 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1397 } 1398 1399 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1400 virtual void do_oop( oop* p) { do_oop_work(p); } 1401 1402 template <class T> void do_oop_work(T* p) { 1403 if (_cm->has_overflown()) { 1404 return; 1405 } 1406 if (!_task->deal_with_reference(p)) { 1407 // We did not add anything to the mark bitmap (or mark stack), so there is 1408 // no point trying to drain it. 1409 return; 1410 } 1411 _ref_counter--; 1412 1413 if (_ref_counter == 0) { 1414 // We have dealt with _ref_counter_limit references, pushing them 1415 // and objects reachable from them on to the local stack (and 1416 // possibly the global stack). Call G1CMTask::do_marking_step() to 1417 // process these entries. 1418 // 1419 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1420 // there's nothing more to do (i.e. we're done with the entries that 1421 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1422 // above) or we overflow. 1423 // 1424 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1425 // flag while there may still be some work to do. (See the comment at 1426 // the beginning of G1CMTask::do_marking_step() for those conditions - 1427 // one of which is reaching the specified time target.) It is only 1428 // when G1CMTask::do_marking_step() returns without setting the 1429 // has_aborted() flag that the marking step has completed. 1430 do { 1431 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1432 _task->do_marking_step(mark_step_duration_ms, 1433 false /* do_termination */, 1434 _is_serial); 1435 } while (_task->has_aborted() && !_cm->has_overflown()); 1436 _ref_counter = _ref_counter_limit; 1437 } 1438 } 1439 }; 1440 1441 // 'Drain' oop closure used by both serial and parallel reference processing. 1442 // Uses the G1CMTask associated with a given worker thread (for serial 1443 // reference processing the G1CMtask for worker 0 is used). Calls the 1444 // do_marking_step routine, with an unbelievably large timeout value, 1445 // to drain the marking data structures of the remaining entries 1446 // added by the 'keep alive' oop closure above. 1447 1448 class G1CMDrainMarkingStackClosure : public VoidClosure { 1449 G1ConcurrentMark* _cm; 1450 G1CMTask* _task; 1451 bool _is_serial; 1452 public: 1453 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1454 _cm(cm), _task(task), _is_serial(is_serial) { 1455 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1456 } 1457 1458 void do_void() { 1459 do { 1460 // We call G1CMTask::do_marking_step() to completely drain the local 1461 // and global marking stacks of entries pushed by the 'keep alive' 1462 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1463 // 1464 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1465 // if there's nothing more to do (i.e. we've completely drained the 1466 // entries that were pushed as a a result of applying the 'keep alive' 1467 // closure to the entries on the discovered ref lists) or we overflow 1468 // the global marking stack. 1469 // 1470 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1471 // flag while there may still be some work to do. (See the comment at 1472 // the beginning of G1CMTask::do_marking_step() for those conditions - 1473 // one of which is reaching the specified time target.) It is only 1474 // when G1CMTask::do_marking_step() returns without setting the 1475 // has_aborted() flag that the marking step has completed. 1476 1477 _task->do_marking_step(1000000000.0 /* something very large */, 1478 true /* do_termination */, 1479 _is_serial); 1480 } while (_task->has_aborted() && !_cm->has_overflown()); 1481 } 1482 }; 1483 1484 // Implementation of AbstractRefProcTaskExecutor for parallel 1485 // reference processing at the end of G1 concurrent marking 1486 1487 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 1488 private: 1489 G1CollectedHeap* _g1h; 1490 G1ConcurrentMark* _cm; 1491 WorkGang* _workers; 1492 uint _active_workers; 1493 1494 public: 1495 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1496 G1ConcurrentMark* cm, 1497 WorkGang* workers, 1498 uint n_workers) : 1499 _g1h(g1h), _cm(cm), 1500 _workers(workers), _active_workers(n_workers) { } 1501 1502 virtual void execute(ProcessTask& task, uint ergo_workers); 1503 }; 1504 1505 class G1CMRefProcTaskProxy : public AbstractGangTask { 1506 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1507 ProcessTask& _proc_task; 1508 G1CollectedHeap* _g1h; 1509 G1ConcurrentMark* _cm; 1510 1511 public: 1512 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1513 G1CollectedHeap* g1h, 1514 G1ConcurrentMark* cm) : 1515 AbstractGangTask("Process reference objects in parallel"), 1516 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1517 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1518 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1519 } 1520 1521 virtual void work(uint worker_id) { 1522 ResourceMark rm; 1523 HandleMark hm; 1524 G1CMTask* task = _cm->task(worker_id); 1525 G1CMIsAliveClosure g1_is_alive(_g1h); 1526 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1527 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1528 1529 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1530 } 1531 }; 1532 1533 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) { 1534 assert(_workers != NULL, "Need parallel worker threads."); 1535 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1536 assert(_workers->active_workers() >= ergo_workers, 1537 "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)", 1538 ergo_workers, _workers->active_workers()); 1539 1540 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1541 1542 // We need to reset the concurrency level before each 1543 // proxy task execution, so that the termination protocol 1544 // and overflow handling in G1CMTask::do_marking_step() knows 1545 // how many workers to wait for. 1546 _cm->set_concurrency(ergo_workers); 1547 _workers->run_task(&proc_task_proxy, ergo_workers); 1548 } 1549 1550 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { 1551 ResourceMark rm; 1552 HandleMark hm; 1553 1554 // Is alive closure. 1555 G1CMIsAliveClosure g1_is_alive(_g1h); 1556 1557 // Inner scope to exclude the cleaning of the string table 1558 // from the displayed time. 1559 { 1560 GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm); 1561 1562 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1563 1564 // See the comment in G1CollectedHeap::ref_processing_init() 1565 // about how reference processing currently works in G1. 1566 1567 // Set the soft reference policy 1568 rp->setup_policy(clear_all_soft_refs); 1569 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1570 1571 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1572 // in serial reference processing. Note these closures are also 1573 // used for serially processing (by the the current thread) the 1574 // JNI references during parallel reference processing. 1575 // 1576 // These closures do not need to synchronize with the worker 1577 // threads involved in parallel reference processing as these 1578 // instances are executed serially by the current thread (e.g. 1579 // reference processing is not multi-threaded and is thus 1580 // performed by the current thread instead of a gang worker). 1581 // 1582 // The gang tasks involved in parallel reference processing create 1583 // their own instances of these closures, which do their own 1584 // synchronization among themselves. 1585 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1586 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1587 1588 // We need at least one active thread. If reference processing 1589 // is not multi-threaded we use the current (VMThread) thread, 1590 // otherwise we use the work gang from the G1CollectedHeap and 1591 // we utilize all the worker threads we can. 1592 bool processing_is_mt = rp->processing_is_mt(); 1593 uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U); 1594 active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U); 1595 1596 // Parallel processing task executor. 1597 G1CMRefProcTaskExecutor par_task_executor(_g1h, this, 1598 _g1h->workers(), active_workers); 1599 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1600 1601 // Set the concurrency level. The phase was already set prior to 1602 // executing the remark task. 1603 set_concurrency(active_workers); 1604 1605 // Set the degree of MT processing here. If the discovery was done MT, 1606 // the number of threads involved during discovery could differ from 1607 // the number of active workers. This is OK as long as the discovered 1608 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1609 rp->set_active_mt_degree(active_workers); 1610 1611 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues()); 1612 1613 // Process the weak references. 1614 const ReferenceProcessorStats& stats = 1615 rp->process_discovered_references(&g1_is_alive, 1616 &g1_keep_alive, 1617 &g1_drain_mark_stack, 1618 executor, 1619 &pt); 1620 _gc_tracer_cm->report_gc_reference_stats(stats); 1621 pt.print_all_references(); 1622 1623 // The do_oop work routines of the keep_alive and drain_marking_stack 1624 // oop closures will set the has_overflown flag if we overflow the 1625 // global marking stack. 1626 1627 assert(has_overflown() || _global_mark_stack.is_empty(), 1628 "Mark stack should be empty (unless it has overflown)"); 1629 1630 assert(rp->num_queues() == active_workers, "why not"); 1631 1632 rp->verify_no_references_recorded(); 1633 assert(!rp->discovery_enabled(), "Post condition"); 1634 } 1635 1636 if (has_overflown()) { 1637 // We can not trust g1_is_alive and the contents of the heap if the marking stack 1638 // overflowed while processing references. Exit the VM. 1639 fatal("Overflow during reference processing, can not continue. Please " 1640 "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and " 1641 "restart.", MarkStackSizeMax); 1642 return; 1643 } 1644 1645 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1646 1647 { 1648 GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm); 1649 WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1); 1650 } 1651 1652 // Unload Klasses, String, Code Cache, etc. 1653 if (ClassUnloadingWithConcurrentMark) { 1654 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); 1655 bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm); 1656 _g1h->complete_cleaning(&g1_is_alive, purged_classes); 1657 } else { 1658 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm); 1659 // No need to clean string table as it is treated as strong roots when 1660 // class unloading is disabled. 1661 _g1h->partial_cleaning(&g1_is_alive, false, G1StringDedup::is_enabled()); 1662 } 1663 } 1664 1665 class G1PrecleanYieldClosure : public YieldClosure { 1666 G1ConcurrentMark* _cm; 1667 1668 public: 1669 G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { } 1670 1671 virtual bool should_return() { 1672 return _cm->has_aborted(); 1673 } 1674 1675 virtual bool should_return_fine_grain() { 1676 _cm->do_yield_check(); 1677 return _cm->has_aborted(); 1678 } 1679 }; 1680 1681 void G1ConcurrentMark::preclean() { 1682 assert(G1UseReferencePrecleaning, "Precleaning must be enabled."); 1683 1684 SuspendibleThreadSetJoiner joiner; 1685 1686 G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */); 1687 G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */); 1688 1689 set_concurrency_and_phase(1, true); 1690 1691 G1PrecleanYieldClosure yield_cl(this); 1692 1693 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1694 // Precleaning is single threaded. Temporarily disable MT discovery. 1695 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); 1696 rp->preclean_discovered_references(rp->is_alive_non_header(), 1697 &keep_alive, 1698 &drain_mark_stack, 1699 &yield_cl, 1700 _gc_timer_cm); 1701 } 1702 1703 // When sampling object counts, we already swapped the mark bitmaps, so we need to use 1704 // the prev bitmap determining liveness. 1705 class G1ObjectCountIsAliveClosure: public BoolObjectClosure { 1706 G1CollectedHeap* _g1h; 1707 public: 1708 G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { } 1709 1710 bool do_object_b(oop obj) { 1711 HeapWord* addr = (HeapWord*)obj; 1712 return addr != NULL && 1713 (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj)); 1714 } 1715 }; 1716 1717 void G1ConcurrentMark::report_object_count(bool mark_completed) { 1718 // Depending on the completion of the marking liveness needs to be determined 1719 // using either the next or prev bitmap. 1720 if (mark_completed) { 1721 G1ObjectCountIsAliveClosure is_alive(_g1h); 1722 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1723 } else { 1724 G1CMIsAliveClosure is_alive(_g1h); 1725 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1726 } 1727 } 1728 1729 1730 void G1ConcurrentMark::swap_mark_bitmaps() { 1731 G1CMBitMap* temp = _prev_mark_bitmap; 1732 _prev_mark_bitmap = _next_mark_bitmap; 1733 _next_mark_bitmap = temp; 1734 _g1h->collector_state()->set_clearing_next_bitmap(true); 1735 } 1736 1737 // Closure for marking entries in SATB buffers. 1738 class G1CMSATBBufferClosure : public SATBBufferClosure { 1739 private: 1740 G1CMTask* _task; 1741 G1CollectedHeap* _g1h; 1742 1743 // This is very similar to G1CMTask::deal_with_reference, but with 1744 // more relaxed requirements for the argument, so this must be more 1745 // circumspect about treating the argument as an object. 1746 void do_entry(void* entry) const { 1747 _task->increment_refs_reached(); 1748 oop const obj = static_cast<oop>(entry); 1749 _task->make_reference_grey(obj); 1750 } 1751 1752 public: 1753 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1754 : _task(task), _g1h(g1h) { } 1755 1756 virtual void do_buffer(void** buffer, size_t size) { 1757 for (size_t i = 0; i < size; ++i) { 1758 do_entry(buffer[i]); 1759 } 1760 } 1761 }; 1762 1763 class G1RemarkThreadsClosure : public ThreadClosure { 1764 G1CMSATBBufferClosure _cm_satb_cl; 1765 G1CMOopClosure _cm_cl; 1766 MarkingCodeBlobClosure _code_cl; 1767 int _thread_parity; 1768 1769 public: 1770 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1771 _cm_satb_cl(task, g1h), 1772 _cm_cl(g1h, task), 1773 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1774 _thread_parity(Threads::thread_claim_parity()) {} 1775 1776 void do_thread(Thread* thread) { 1777 if (thread->is_Java_thread()) { 1778 if (thread->claim_oops_do(true, _thread_parity)) { 1779 JavaThread* jt = (JavaThread*)thread; 1780 1781 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1782 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1783 // * Alive if on the stack of an executing method 1784 // * Weakly reachable otherwise 1785 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1786 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1787 jt->nmethods_do(&_code_cl); 1788 1789 G1ThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(&_cm_satb_cl); 1790 } 1791 } else if (thread->is_VM_thread()) { 1792 if (thread->claim_oops_do(true, _thread_parity)) { 1793 G1BarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1794 } 1795 } 1796 } 1797 }; 1798 1799 class G1CMRemarkTask : public AbstractGangTask { 1800 G1ConcurrentMark* _cm; 1801 public: 1802 void work(uint worker_id) { 1803 G1CMTask* task = _cm->task(worker_id); 1804 task->record_start_time(); 1805 { 1806 ResourceMark rm; 1807 HandleMark hm; 1808 1809 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1810 Threads::threads_do(&threads_f); 1811 } 1812 1813 do { 1814 task->do_marking_step(1000000000.0 /* something very large */, 1815 true /* do_termination */, 1816 false /* is_serial */); 1817 } while (task->has_aborted() && !_cm->has_overflown()); 1818 // If we overflow, then we do not want to restart. We instead 1819 // want to abort remark and do concurrent marking again. 1820 task->record_end_time(); 1821 } 1822 1823 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1824 AbstractGangTask("Par Remark"), _cm(cm) { 1825 _cm->terminator()->reset_for_reuse(active_workers); 1826 } 1827 }; 1828 1829 void G1ConcurrentMark::finalize_marking() { 1830 ResourceMark rm; 1831 HandleMark hm; 1832 1833 _g1h->ensure_parsability(false); 1834 1835 // this is remark, so we'll use up all active threads 1836 uint active_workers = _g1h->workers()->active_workers(); 1837 set_concurrency_and_phase(active_workers, false /* concurrent */); 1838 // Leave _parallel_marking_threads at it's 1839 // value originally calculated in the G1ConcurrentMark 1840 // constructor and pass values of the active workers 1841 // through the gang in the task. 1842 1843 { 1844 StrongRootsScope srs(active_workers); 1845 1846 G1CMRemarkTask remarkTask(this, active_workers); 1847 // We will start all available threads, even if we decide that the 1848 // active_workers will be fewer. The extra ones will just bail out 1849 // immediately. 1850 _g1h->workers()->run_task(&remarkTask); 1851 } 1852 1853 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1854 guarantee(has_overflown() || 1855 satb_mq_set.completed_buffers_num() == 0, 1856 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1857 BOOL_TO_STR(has_overflown()), 1858 satb_mq_set.completed_buffers_num()); 1859 1860 print_stats(); 1861 } 1862 1863 void G1ConcurrentMark::flush_all_task_caches() { 1864 size_t hits = 0; 1865 size_t misses = 0; 1866 for (uint i = 0; i < _max_num_tasks; i++) { 1867 Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache(); 1868 hits += stats.first; 1869 misses += stats.second; 1870 } 1871 size_t sum = hits + misses; 1872 log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf", 1873 hits, misses, percent_of(hits, sum)); 1874 } 1875 1876 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) { 1877 _prev_mark_bitmap->clear_range(mr); 1878 } 1879 1880 HeapRegion* 1881 G1ConcurrentMark::claim_region(uint worker_id) { 1882 // "checkpoint" the finger 1883 HeapWord* finger = _finger; 1884 1885 while (finger < _heap.end()) { 1886 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1887 1888 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1889 // Make sure that the reads below do not float before loading curr_region. 1890 OrderAccess::loadload(); 1891 // Above heap_region_containing may return NULL as we always scan claim 1892 // until the end of the heap. In this case, just jump to the next region. 1893 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1894 1895 // Is the gap between reading the finger and doing the CAS too long? 1896 HeapWord* res = Atomic::cmpxchg(end, &_finger, finger); 1897 if (res == finger && curr_region != NULL) { 1898 // we succeeded 1899 HeapWord* bottom = curr_region->bottom(); 1900 HeapWord* limit = curr_region->next_top_at_mark_start(); 1901 1902 // notice that _finger == end cannot be guaranteed here since, 1903 // someone else might have moved the finger even further 1904 assert(_finger >= end, "the finger should have moved forward"); 1905 1906 if (limit > bottom) { 1907 return curr_region; 1908 } else { 1909 assert(limit == bottom, 1910 "the region limit should be at bottom"); 1911 // we return NULL and the caller should try calling 1912 // claim_region() again. 1913 return NULL; 1914 } 1915 } else { 1916 assert(_finger > finger, "the finger should have moved forward"); 1917 // read it again 1918 finger = _finger; 1919 } 1920 } 1921 1922 return NULL; 1923 } 1924 1925 #ifndef PRODUCT 1926 class VerifyNoCSetOops { 1927 G1CollectedHeap* _g1h; 1928 const char* _phase; 1929 int _info; 1930 1931 public: 1932 VerifyNoCSetOops(const char* phase, int info = -1) : 1933 _g1h(G1CollectedHeap::heap()), 1934 _phase(phase), 1935 _info(info) 1936 { } 1937 1938 void operator()(G1TaskQueueEntry task_entry) const { 1939 if (task_entry.is_array_slice()) { 1940 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); 1941 return; 1942 } 1943 guarantee(oopDesc::is_oop(task_entry.obj()), 1944 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1945 p2i(task_entry.obj()), _phase, _info); 1946 guarantee(!_g1h->is_in_cset(task_entry.obj()), 1947 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1948 p2i(task_entry.obj()), _phase, _info); 1949 } 1950 }; 1951 1952 void G1ConcurrentMark::verify_no_cset_oops() { 1953 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1954 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 1955 return; 1956 } 1957 1958 // Verify entries on the global mark stack 1959 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 1960 1961 // Verify entries on the task queues 1962 for (uint i = 0; i < _max_num_tasks; ++i) { 1963 G1CMTaskQueue* queue = _task_queues->queue(i); 1964 queue->iterate(VerifyNoCSetOops("Queue", i)); 1965 } 1966 1967 // Verify the global finger 1968 HeapWord* global_finger = finger(); 1969 if (global_finger != NULL && global_finger < _heap.end()) { 1970 // Since we always iterate over all regions, we might get a NULL HeapRegion 1971 // here. 1972 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1973 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1974 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1975 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1976 } 1977 1978 // Verify the task fingers 1979 assert(_num_concurrent_workers <= _max_num_tasks, "sanity"); 1980 for (uint i = 0; i < _num_concurrent_workers; ++i) { 1981 G1CMTask* task = _tasks[i]; 1982 HeapWord* task_finger = task->finger(); 1983 if (task_finger != NULL && task_finger < _heap.end()) { 1984 // See above note on the global finger verification. 1985 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 1986 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 1987 !task_hr->in_collection_set(), 1988 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 1989 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 1990 } 1991 } 1992 } 1993 #endif // PRODUCT 1994 1995 void G1ConcurrentMark::rebuild_rem_set_concurrently() { 1996 _g1h->g1_rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset); 1997 } 1998 1999 void G1ConcurrentMark::print_stats() { 2000 if (!log_is_enabled(Debug, gc, stats)) { 2001 return; 2002 } 2003 log_debug(gc, stats)("---------------------------------------------------------------------"); 2004 for (size_t i = 0; i < _num_active_tasks; ++i) { 2005 _tasks[i]->print_stats(); 2006 log_debug(gc, stats)("---------------------------------------------------------------------"); 2007 } 2008 } 2009 2010 void G1ConcurrentMark::concurrent_cycle_abort() { 2011 if (!cm_thread()->during_cycle() || _has_aborted) { 2012 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2013 return; 2014 } 2015 2016 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2017 // concurrent bitmap clearing. 2018 { 2019 GCTraceTime(Debug, gc) debug("Clear Next Bitmap"); 2020 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false); 2021 } 2022 // Note we cannot clear the previous marking bitmap here 2023 // since VerifyDuringGC verifies the objects marked during 2024 // a full GC against the previous bitmap. 2025 2026 // Empty mark stack 2027 reset_marking_for_restart(); 2028 for (uint i = 0; i < _max_num_tasks; ++i) { 2029 _tasks[i]->clear_region_fields(); 2030 } 2031 _first_overflow_barrier_sync.abort(); 2032 _second_overflow_barrier_sync.abort(); 2033 _has_aborted = true; 2034 2035 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2036 satb_mq_set.abandon_partial_marking(); 2037 // This can be called either during or outside marking, we'll read 2038 // the expected_active value from the SATB queue set. 2039 satb_mq_set.set_active_all_threads( 2040 false, /* new active value */ 2041 satb_mq_set.is_active() /* expected_active */); 2042 } 2043 2044 static void print_ms_time_info(const char* prefix, const char* name, 2045 NumberSeq& ns) { 2046 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2047 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2048 if (ns.num() > 0) { 2049 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2050 prefix, ns.sd(), ns.maximum()); 2051 } 2052 } 2053 2054 void G1ConcurrentMark::print_summary_info() { 2055 Log(gc, marking) log; 2056 if (!log.is_trace()) { 2057 return; 2058 } 2059 2060 log.trace(" Concurrent marking:"); 2061 print_ms_time_info(" ", "init marks", _init_times); 2062 print_ms_time_info(" ", "remarks", _remark_times); 2063 { 2064 print_ms_time_info(" ", "final marks", _remark_mark_times); 2065 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2066 2067 } 2068 print_ms_time_info(" ", "cleanups", _cleanup_times); 2069 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2070 _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2071 log.trace(" Total stop_world time = %8.2f s.", 2072 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2073 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2074 cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum()); 2075 } 2076 2077 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2078 _concurrent_workers->print_worker_threads_on(st); 2079 } 2080 2081 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2082 _concurrent_workers->threads_do(tc); 2083 } 2084 2085 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2086 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2087 p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap)); 2088 _prev_mark_bitmap->print_on_error(st, " Prev Bits: "); 2089 _next_mark_bitmap->print_on_error(st, " Next Bits: "); 2090 } 2091 2092 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2093 ReferenceProcessor* result = g1h->ref_processor_cm(); 2094 assert(result != NULL, "CM reference processor should not be NULL"); 2095 return result; 2096 } 2097 2098 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2099 G1CMTask* task) 2100 : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)), 2101 _g1h(g1h), _task(task) 2102 { } 2103 2104 void G1CMTask::setup_for_region(HeapRegion* hr) { 2105 assert(hr != NULL, 2106 "claim_region() should have filtered out NULL regions"); 2107 _curr_region = hr; 2108 _finger = hr->bottom(); 2109 update_region_limit(); 2110 } 2111 2112 void G1CMTask::update_region_limit() { 2113 HeapRegion* hr = _curr_region; 2114 HeapWord* bottom = hr->bottom(); 2115 HeapWord* limit = hr->next_top_at_mark_start(); 2116 2117 if (limit == bottom) { 2118 // The region was collected underneath our feet. 2119 // We set the finger to bottom to ensure that the bitmap 2120 // iteration that will follow this will not do anything. 2121 // (this is not a condition that holds when we set the region up, 2122 // as the region is not supposed to be empty in the first place) 2123 _finger = bottom; 2124 } else if (limit >= _region_limit) { 2125 assert(limit >= _finger, "peace of mind"); 2126 } else { 2127 assert(limit < _region_limit, "only way to get here"); 2128 // This can happen under some pretty unusual circumstances. An 2129 // evacuation pause empties the region underneath our feet (NTAMS 2130 // at bottom). We then do some allocation in the region (NTAMS 2131 // stays at bottom), followed by the region being used as a GC 2132 // alloc region (NTAMS will move to top() and the objects 2133 // originally below it will be grayed). All objects now marked in 2134 // the region are explicitly grayed, if below the global finger, 2135 // and we do not need in fact to scan anything else. So, we simply 2136 // set _finger to be limit to ensure that the bitmap iteration 2137 // doesn't do anything. 2138 _finger = limit; 2139 } 2140 2141 _region_limit = limit; 2142 } 2143 2144 void G1CMTask::giveup_current_region() { 2145 assert(_curr_region != NULL, "invariant"); 2146 clear_region_fields(); 2147 } 2148 2149 void G1CMTask::clear_region_fields() { 2150 // Values for these three fields that indicate that we're not 2151 // holding on to a region. 2152 _curr_region = NULL; 2153 _finger = NULL; 2154 _region_limit = NULL; 2155 } 2156 2157 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2158 if (cm_oop_closure == NULL) { 2159 assert(_cm_oop_closure != NULL, "invariant"); 2160 } else { 2161 assert(_cm_oop_closure == NULL, "invariant"); 2162 } 2163 _cm_oop_closure = cm_oop_closure; 2164 } 2165 2166 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) { 2167 guarantee(next_mark_bitmap != NULL, "invariant"); 2168 _next_mark_bitmap = next_mark_bitmap; 2169 clear_region_fields(); 2170 2171 _calls = 0; 2172 _elapsed_time_ms = 0.0; 2173 _termination_time_ms = 0.0; 2174 _termination_start_time_ms = 0.0; 2175 2176 _mark_stats_cache.reset(); 2177 } 2178 2179 bool G1CMTask::should_exit_termination() { 2180 regular_clock_call(); 2181 // This is called when we are in the termination protocol. We should 2182 // quit if, for some reason, this task wants to abort or the global 2183 // stack is not empty (this means that we can get work from it). 2184 return !_cm->mark_stack_empty() || has_aborted(); 2185 } 2186 2187 void G1CMTask::reached_limit() { 2188 assert(_words_scanned >= _words_scanned_limit || 2189 _refs_reached >= _refs_reached_limit , 2190 "shouldn't have been called otherwise"); 2191 regular_clock_call(); 2192 } 2193 2194 void G1CMTask::regular_clock_call() { 2195 if (has_aborted()) { 2196 return; 2197 } 2198 2199 // First, we need to recalculate the words scanned and refs reached 2200 // limits for the next clock call. 2201 recalculate_limits(); 2202 2203 // During the regular clock call we do the following 2204 2205 // (1) If an overflow has been flagged, then we abort. 2206 if (_cm->has_overflown()) { 2207 set_has_aborted(); 2208 return; 2209 } 2210 2211 // If we are not concurrent (i.e. we're doing remark) we don't need 2212 // to check anything else. The other steps are only needed during 2213 // the concurrent marking phase. 2214 if (!_cm->concurrent()) { 2215 return; 2216 } 2217 2218 // (2) If marking has been aborted for Full GC, then we also abort. 2219 if (_cm->has_aborted()) { 2220 set_has_aborted(); 2221 return; 2222 } 2223 2224 double curr_time_ms = os::elapsedVTime() * 1000.0; 2225 2226 // (4) We check whether we should yield. If we have to, then we abort. 2227 if (SuspendibleThreadSet::should_yield()) { 2228 // We should yield. To do this we abort the task. The caller is 2229 // responsible for yielding. 2230 set_has_aborted(); 2231 return; 2232 } 2233 2234 // (5) We check whether we've reached our time quota. If we have, 2235 // then we abort. 2236 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2237 if (elapsed_time_ms > _time_target_ms) { 2238 set_has_aborted(); 2239 _has_timed_out = true; 2240 return; 2241 } 2242 2243 // (6) Finally, we check whether there are enough completed STAB 2244 // buffers available for processing. If there are, we abort. 2245 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2246 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2247 // we do need to process SATB buffers, we'll abort and restart 2248 // the marking task to do so 2249 set_has_aborted(); 2250 return; 2251 } 2252 } 2253 2254 void G1CMTask::recalculate_limits() { 2255 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2256 _words_scanned_limit = _real_words_scanned_limit; 2257 2258 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2259 _refs_reached_limit = _real_refs_reached_limit; 2260 } 2261 2262 void G1CMTask::decrease_limits() { 2263 // This is called when we believe that we're going to do an infrequent 2264 // operation which will increase the per byte scanned cost (i.e. move 2265 // entries to/from the global stack). It basically tries to decrease the 2266 // scanning limit so that the clock is called earlier. 2267 2268 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4; 2269 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4; 2270 } 2271 2272 void G1CMTask::move_entries_to_global_stack() { 2273 // Local array where we'll store the entries that will be popped 2274 // from the local queue. 2275 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2276 2277 size_t n = 0; 2278 G1TaskQueueEntry task_entry; 2279 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) { 2280 buffer[n] = task_entry; 2281 ++n; 2282 } 2283 if (n < G1CMMarkStack::EntriesPerChunk) { 2284 buffer[n] = G1TaskQueueEntry(); 2285 } 2286 2287 if (n > 0) { 2288 if (!_cm->mark_stack_push(buffer)) { 2289 set_has_aborted(); 2290 } 2291 } 2292 2293 // This operation was quite expensive, so decrease the limits. 2294 decrease_limits(); 2295 } 2296 2297 bool G1CMTask::get_entries_from_global_stack() { 2298 // Local array where we'll store the entries that will be popped 2299 // from the global stack. 2300 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2301 2302 if (!_cm->mark_stack_pop(buffer)) { 2303 return false; 2304 } 2305 2306 // We did actually pop at least one entry. 2307 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) { 2308 G1TaskQueueEntry task_entry = buffer[i]; 2309 if (task_entry.is_null()) { 2310 break; 2311 } 2312 assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj())); 2313 bool success = _task_queue->push(task_entry); 2314 // We only call this when the local queue is empty or under a 2315 // given target limit. So, we do not expect this push to fail. 2316 assert(success, "invariant"); 2317 } 2318 2319 // This operation was quite expensive, so decrease the limits 2320 decrease_limits(); 2321 return true; 2322 } 2323 2324 void G1CMTask::drain_local_queue(bool partially) { 2325 if (has_aborted()) { 2326 return; 2327 } 2328 2329 // Decide what the target size is, depending whether we're going to 2330 // drain it partially (so that other tasks can steal if they run out 2331 // of things to do) or totally (at the very end). 2332 size_t target_size; 2333 if (partially) { 2334 target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize); 2335 } else { 2336 target_size = 0; 2337 } 2338 2339 if (_task_queue->size() > target_size) { 2340 G1TaskQueueEntry entry; 2341 bool ret = _task_queue->pop_local(entry); 2342 while (ret) { 2343 scan_task_entry(entry); 2344 if (_task_queue->size() <= target_size || has_aborted()) { 2345 ret = false; 2346 } else { 2347 ret = _task_queue->pop_local(entry); 2348 } 2349 } 2350 } 2351 } 2352 2353 void G1CMTask::drain_global_stack(bool partially) { 2354 if (has_aborted()) { 2355 return; 2356 } 2357 2358 // We have a policy to drain the local queue before we attempt to 2359 // drain the global stack. 2360 assert(partially || _task_queue->size() == 0, "invariant"); 2361 2362 // Decide what the target size is, depending whether we're going to 2363 // drain it partially (so that other tasks can steal if they run out 2364 // of things to do) or totally (at the very end). 2365 // Notice that when draining the global mark stack partially, due to the racyness 2366 // of the mark stack size update we might in fact drop below the target. But, 2367 // this is not a problem. 2368 // In case of total draining, we simply process until the global mark stack is 2369 // totally empty, disregarding the size counter. 2370 if (partially) { 2371 size_t const target_size = _cm->partial_mark_stack_size_target(); 2372 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2373 if (get_entries_from_global_stack()) { 2374 drain_local_queue(partially); 2375 } 2376 } 2377 } else { 2378 while (!has_aborted() && get_entries_from_global_stack()) { 2379 drain_local_queue(partially); 2380 } 2381 } 2382 } 2383 2384 // SATB Queue has several assumptions on whether to call the par or 2385 // non-par versions of the methods. this is why some of the code is 2386 // replicated. We should really get rid of the single-threaded version 2387 // of the code to simplify things. 2388 void G1CMTask::drain_satb_buffers() { 2389 if (has_aborted()) { 2390 return; 2391 } 2392 2393 // We set this so that the regular clock knows that we're in the 2394 // middle of draining buffers and doesn't set the abort flag when it 2395 // notices that SATB buffers are available for draining. It'd be 2396 // very counter productive if it did that. :-) 2397 _draining_satb_buffers = true; 2398 2399 G1CMSATBBufferClosure satb_cl(this, _g1h); 2400 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2401 2402 // This keeps claiming and applying the closure to completed buffers 2403 // until we run out of buffers or we need to abort. 2404 while (!has_aborted() && 2405 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2406 regular_clock_call(); 2407 } 2408 2409 _draining_satb_buffers = false; 2410 2411 assert(has_aborted() || 2412 _cm->concurrent() || 2413 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2414 2415 // again, this was a potentially expensive operation, decrease the 2416 // limits to get the regular clock call early 2417 decrease_limits(); 2418 } 2419 2420 void G1CMTask::clear_mark_stats_cache(uint region_idx) { 2421 _mark_stats_cache.reset(region_idx); 2422 } 2423 2424 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() { 2425 return _mark_stats_cache.evict_all(); 2426 } 2427 2428 void G1CMTask::print_stats() { 2429 log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls); 2430 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2431 _elapsed_time_ms, _termination_time_ms); 2432 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms", 2433 _step_times_ms.num(), 2434 _step_times_ms.avg(), 2435 _step_times_ms.sd(), 2436 _step_times_ms.maximum(), 2437 _step_times_ms.sum()); 2438 size_t const hits = _mark_stats_cache.hits(); 2439 size_t const misses = _mark_stats_cache.misses(); 2440 log_debug(gc, stats)(" Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f", 2441 hits, misses, percent_of(hits, hits + misses)); 2442 } 2443 2444 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) { 2445 return _task_queues->steal(worker_id, task_entry); 2446 } 2447 2448 /***************************************************************************** 2449 2450 The do_marking_step(time_target_ms, ...) method is the building 2451 block of the parallel marking framework. It can be called in parallel 2452 with other invocations of do_marking_step() on different tasks 2453 (but only one per task, obviously) and concurrently with the 2454 mutator threads, or during remark, hence it eliminates the need 2455 for two versions of the code. When called during remark, it will 2456 pick up from where the task left off during the concurrent marking 2457 phase. Interestingly, tasks are also claimable during evacuation 2458 pauses too, since do_marking_step() ensures that it aborts before 2459 it needs to yield. 2460 2461 The data structures that it uses to do marking work are the 2462 following: 2463 2464 (1) Marking Bitmap. If there are gray objects that appear only 2465 on the bitmap (this happens either when dealing with an overflow 2466 or when the initial marking phase has simply marked the roots 2467 and didn't push them on the stack), then tasks claim heap 2468 regions whose bitmap they then scan to find gray objects. A 2469 global finger indicates where the end of the last claimed region 2470 is. A local finger indicates how far into the region a task has 2471 scanned. The two fingers are used to determine how to gray an 2472 object (i.e. whether simply marking it is OK, as it will be 2473 visited by a task in the future, or whether it needs to be also 2474 pushed on a stack). 2475 2476 (2) Local Queue. The local queue of the task which is accessed 2477 reasonably efficiently by the task. Other tasks can steal from 2478 it when they run out of work. Throughout the marking phase, a 2479 task attempts to keep its local queue short but not totally 2480 empty, so that entries are available for stealing by other 2481 tasks. Only when there is no more work, a task will totally 2482 drain its local queue. 2483 2484 (3) Global Mark Stack. This handles local queue overflow. During 2485 marking only sets of entries are moved between it and the local 2486 queues, as access to it requires a mutex and more fine-grain 2487 interaction with it which might cause contention. If it 2488 overflows, then the marking phase should restart and iterate 2489 over the bitmap to identify gray objects. Throughout the marking 2490 phase, tasks attempt to keep the global mark stack at a small 2491 length but not totally empty, so that entries are available for 2492 popping by other tasks. Only when there is no more work, tasks 2493 will totally drain the global mark stack. 2494 2495 (4) SATB Buffer Queue. This is where completed SATB buffers are 2496 made available. Buffers are regularly removed from this queue 2497 and scanned for roots, so that the queue doesn't get too 2498 long. During remark, all completed buffers are processed, as 2499 well as the filled in parts of any uncompleted buffers. 2500 2501 The do_marking_step() method tries to abort when the time target 2502 has been reached. There are a few other cases when the 2503 do_marking_step() method also aborts: 2504 2505 (1) When the marking phase has been aborted (after a Full GC). 2506 2507 (2) When a global overflow (on the global stack) has been 2508 triggered. Before the task aborts, it will actually sync up with 2509 the other tasks to ensure that all the marking data structures 2510 (local queues, stacks, fingers etc.) are re-initialized so that 2511 when do_marking_step() completes, the marking phase can 2512 immediately restart. 2513 2514 (3) When enough completed SATB buffers are available. The 2515 do_marking_step() method only tries to drain SATB buffers right 2516 at the beginning. So, if enough buffers are available, the 2517 marking step aborts and the SATB buffers are processed at 2518 the beginning of the next invocation. 2519 2520 (4) To yield. when we have to yield then we abort and yield 2521 right at the end of do_marking_step(). This saves us from a lot 2522 of hassle as, by yielding we might allow a Full GC. If this 2523 happens then objects will be compacted underneath our feet, the 2524 heap might shrink, etc. We save checking for this by just 2525 aborting and doing the yield right at the end. 2526 2527 From the above it follows that the do_marking_step() method should 2528 be called in a loop (or, otherwise, regularly) until it completes. 2529 2530 If a marking step completes without its has_aborted() flag being 2531 true, it means it has completed the current marking phase (and 2532 also all other marking tasks have done so and have all synced up). 2533 2534 A method called regular_clock_call() is invoked "regularly" (in 2535 sub ms intervals) throughout marking. It is this clock method that 2536 checks all the abort conditions which were mentioned above and 2537 decides when the task should abort. A work-based scheme is used to 2538 trigger this clock method: when the number of object words the 2539 marking phase has scanned or the number of references the marking 2540 phase has visited reach a given limit. Additional invocations to 2541 the method clock have been planted in a few other strategic places 2542 too. The initial reason for the clock method was to avoid calling 2543 vtime too regularly, as it is quite expensive. So, once it was in 2544 place, it was natural to piggy-back all the other conditions on it 2545 too and not constantly check them throughout the code. 2546 2547 If do_termination is true then do_marking_step will enter its 2548 termination protocol. 2549 2550 The value of is_serial must be true when do_marking_step is being 2551 called serially (i.e. by the VMThread) and do_marking_step should 2552 skip any synchronization in the termination and overflow code. 2553 Examples include the serial remark code and the serial reference 2554 processing closures. 2555 2556 The value of is_serial must be false when do_marking_step is 2557 being called by any of the worker threads in a work gang. 2558 Examples include the concurrent marking code (CMMarkingTask), 2559 the MT remark code, and the MT reference processing closures. 2560 2561 *****************************************************************************/ 2562 2563 void G1CMTask::do_marking_step(double time_target_ms, 2564 bool do_termination, 2565 bool is_serial) { 2566 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2567 2568 _start_time_ms = os::elapsedVTime() * 1000.0; 2569 2570 // If do_stealing is true then do_marking_step will attempt to 2571 // steal work from the other G1CMTasks. It only makes sense to 2572 // enable stealing when the termination protocol is enabled 2573 // and do_marking_step() is not being called serially. 2574 bool do_stealing = do_termination && !is_serial; 2575 2576 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2577 _time_target_ms = time_target_ms - diff_prediction_ms; 2578 2579 // set up the variables that are used in the work-based scheme to 2580 // call the regular clock method 2581 _words_scanned = 0; 2582 _refs_reached = 0; 2583 recalculate_limits(); 2584 2585 // clear all flags 2586 clear_has_aborted(); 2587 _has_timed_out = false; 2588 _draining_satb_buffers = false; 2589 2590 ++_calls; 2591 2592 // Set up the bitmap and oop closures. Anything that uses them is 2593 // eventually called from this method, so it is OK to allocate these 2594 // statically. 2595 G1CMBitMapClosure bitmap_closure(this, _cm); 2596 G1CMOopClosure cm_oop_closure(_g1h, this); 2597 set_cm_oop_closure(&cm_oop_closure); 2598 2599 if (_cm->has_overflown()) { 2600 // This can happen if the mark stack overflows during a GC pause 2601 // and this task, after a yield point, restarts. We have to abort 2602 // as we need to get into the overflow protocol which happens 2603 // right at the end of this task. 2604 set_has_aborted(); 2605 } 2606 2607 // First drain any available SATB buffers. After this, we will not 2608 // look at SATB buffers before the next invocation of this method. 2609 // If enough completed SATB buffers are queued up, the regular clock 2610 // will abort this task so that it restarts. 2611 drain_satb_buffers(); 2612 // ...then partially drain the local queue and the global stack 2613 drain_local_queue(true); 2614 drain_global_stack(true); 2615 2616 do { 2617 if (!has_aborted() && _curr_region != NULL) { 2618 // This means that we're already holding on to a region. 2619 assert(_finger != NULL, "if region is not NULL, then the finger " 2620 "should not be NULL either"); 2621 2622 // We might have restarted this task after an evacuation pause 2623 // which might have evacuated the region we're holding on to 2624 // underneath our feet. Let's read its limit again to make sure 2625 // that we do not iterate over a region of the heap that 2626 // contains garbage (update_region_limit() will also move 2627 // _finger to the start of the region if it is found empty). 2628 update_region_limit(); 2629 // We will start from _finger not from the start of the region, 2630 // as we might be restarting this task after aborting half-way 2631 // through scanning this region. In this case, _finger points to 2632 // the address where we last found a marked object. If this is a 2633 // fresh region, _finger points to start(). 2634 MemRegion mr = MemRegion(_finger, _region_limit); 2635 2636 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2637 "humongous regions should go around loop once only"); 2638 2639 // Some special cases: 2640 // If the memory region is empty, we can just give up the region. 2641 // If the current region is humongous then we only need to check 2642 // the bitmap for the bit associated with the start of the object, 2643 // scan the object if it's live, and give up the region. 2644 // Otherwise, let's iterate over the bitmap of the part of the region 2645 // that is left. 2646 // If the iteration is successful, give up the region. 2647 if (mr.is_empty()) { 2648 giveup_current_region(); 2649 regular_clock_call(); 2650 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2651 if (_next_mark_bitmap->is_marked(mr.start())) { 2652 // The object is marked - apply the closure 2653 bitmap_closure.do_addr(mr.start()); 2654 } 2655 // Even if this task aborted while scanning the humongous object 2656 // we can (and should) give up the current region. 2657 giveup_current_region(); 2658 regular_clock_call(); 2659 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) { 2660 giveup_current_region(); 2661 regular_clock_call(); 2662 } else { 2663 assert(has_aborted(), "currently the only way to do so"); 2664 // The only way to abort the bitmap iteration is to return 2665 // false from the do_bit() method. However, inside the 2666 // do_bit() method we move the _finger to point to the 2667 // object currently being looked at. So, if we bail out, we 2668 // have definitely set _finger to something non-null. 2669 assert(_finger != NULL, "invariant"); 2670 2671 // Region iteration was actually aborted. So now _finger 2672 // points to the address of the object we last scanned. If we 2673 // leave it there, when we restart this task, we will rescan 2674 // the object. It is easy to avoid this. We move the finger by 2675 // enough to point to the next possible object header. 2676 assert(_finger < _region_limit, "invariant"); 2677 HeapWord* const new_finger = _finger + ((oop)_finger)->size(); 2678 // Check if bitmap iteration was aborted while scanning the last object 2679 if (new_finger >= _region_limit) { 2680 giveup_current_region(); 2681 } else { 2682 move_finger_to(new_finger); 2683 } 2684 } 2685 } 2686 // At this point we have either completed iterating over the 2687 // region we were holding on to, or we have aborted. 2688 2689 // We then partially drain the local queue and the global stack. 2690 // (Do we really need this?) 2691 drain_local_queue(true); 2692 drain_global_stack(true); 2693 2694 // Read the note on the claim_region() method on why it might 2695 // return NULL with potentially more regions available for 2696 // claiming and why we have to check out_of_regions() to determine 2697 // whether we're done or not. 2698 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2699 // We are going to try to claim a new region. We should have 2700 // given up on the previous one. 2701 // Separated the asserts so that we know which one fires. 2702 assert(_curr_region == NULL, "invariant"); 2703 assert(_finger == NULL, "invariant"); 2704 assert(_region_limit == NULL, "invariant"); 2705 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2706 if (claimed_region != NULL) { 2707 // Yes, we managed to claim one 2708 setup_for_region(claimed_region); 2709 assert(_curr_region == claimed_region, "invariant"); 2710 } 2711 // It is important to call the regular clock here. It might take 2712 // a while to claim a region if, for example, we hit a large 2713 // block of empty regions. So we need to call the regular clock 2714 // method once round the loop to make sure it's called 2715 // frequently enough. 2716 regular_clock_call(); 2717 } 2718 2719 if (!has_aborted() && _curr_region == NULL) { 2720 assert(_cm->out_of_regions(), 2721 "at this point we should be out of regions"); 2722 } 2723 } while ( _curr_region != NULL && !has_aborted()); 2724 2725 if (!has_aborted()) { 2726 // We cannot check whether the global stack is empty, since other 2727 // tasks might be pushing objects to it concurrently. 2728 assert(_cm->out_of_regions(), 2729 "at this point we should be out of regions"); 2730 // Try to reduce the number of available SATB buffers so that 2731 // remark has less work to do. 2732 drain_satb_buffers(); 2733 } 2734 2735 // Since we've done everything else, we can now totally drain the 2736 // local queue and global stack. 2737 drain_local_queue(false); 2738 drain_global_stack(false); 2739 2740 // Attempt at work stealing from other task's queues. 2741 if (do_stealing && !has_aborted()) { 2742 // We have not aborted. This means that we have finished all that 2743 // we could. Let's try to do some stealing... 2744 2745 // We cannot check whether the global stack is empty, since other 2746 // tasks might be pushing objects to it concurrently. 2747 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2748 "only way to reach here"); 2749 while (!has_aborted()) { 2750 G1TaskQueueEntry entry; 2751 if (_cm->try_stealing(_worker_id, entry)) { 2752 scan_task_entry(entry); 2753 2754 // And since we're towards the end, let's totally drain the 2755 // local queue and global stack. 2756 drain_local_queue(false); 2757 drain_global_stack(false); 2758 } else { 2759 break; 2760 } 2761 } 2762 } 2763 2764 // We still haven't aborted. Now, let's try to get into the 2765 // termination protocol. 2766 if (do_termination && !has_aborted()) { 2767 // We cannot check whether the global stack is empty, since other 2768 // tasks might be concurrently pushing objects on it. 2769 // Separated the asserts so that we know which one fires. 2770 assert(_cm->out_of_regions(), "only way to reach here"); 2771 assert(_task_queue->size() == 0, "only way to reach here"); 2772 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2773 2774 // The G1CMTask class also extends the TerminatorTerminator class, 2775 // hence its should_exit_termination() method will also decide 2776 // whether to exit the termination protocol or not. 2777 bool finished = (is_serial || 2778 _cm->terminator()->offer_termination(this)); 2779 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2780 _termination_time_ms += 2781 termination_end_time_ms - _termination_start_time_ms; 2782 2783 if (finished) { 2784 // We're all done. 2785 2786 // We can now guarantee that the global stack is empty, since 2787 // all other tasks have finished. We separated the guarantees so 2788 // that, if a condition is false, we can immediately find out 2789 // which one. 2790 guarantee(_cm->out_of_regions(), "only way to reach here"); 2791 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2792 guarantee(_task_queue->size() == 0, "only way to reach here"); 2793 guarantee(!_cm->has_overflown(), "only way to reach here"); 2794 } else { 2795 // Apparently there's more work to do. Let's abort this task. It 2796 // will restart it and we can hopefully find more things to do. 2797 set_has_aborted(); 2798 } 2799 } 2800 2801 // Mainly for debugging purposes to make sure that a pointer to the 2802 // closure which was statically allocated in this frame doesn't 2803 // escape it by accident. 2804 set_cm_oop_closure(NULL); 2805 double end_time_ms = os::elapsedVTime() * 1000.0; 2806 double elapsed_time_ms = end_time_ms - _start_time_ms; 2807 // Update the step history. 2808 _step_times_ms.add(elapsed_time_ms); 2809 2810 if (has_aborted()) { 2811 // The task was aborted for some reason. 2812 if (_has_timed_out) { 2813 double diff_ms = elapsed_time_ms - _time_target_ms; 2814 // Keep statistics of how well we did with respect to hitting 2815 // our target only if we actually timed out (if we aborted for 2816 // other reasons, then the results might get skewed). 2817 _marking_step_diffs_ms.add(diff_ms); 2818 } 2819 2820 if (_cm->has_overflown()) { 2821 // This is the interesting one. We aborted because a global 2822 // overflow was raised. This means we have to restart the 2823 // marking phase and start iterating over regions. However, in 2824 // order to do this we have to make sure that all tasks stop 2825 // what they are doing and re-initialize in a safe manner. We 2826 // will achieve this with the use of two barrier sync points. 2827 2828 if (!is_serial) { 2829 // We only need to enter the sync barrier if being called 2830 // from a parallel context 2831 _cm->enter_first_sync_barrier(_worker_id); 2832 2833 // When we exit this sync barrier we know that all tasks have 2834 // stopped doing marking work. So, it's now safe to 2835 // re-initialize our data structures. 2836 } 2837 2838 clear_region_fields(); 2839 flush_mark_stats_cache(); 2840 2841 if (!is_serial) { 2842 // If we're executing the concurrent phase of marking, reset the marking 2843 // state; otherwise the marking state is reset after reference processing, 2844 // during the remark pause. 2845 // If we reset here as a result of an overflow during the remark we will 2846 // see assertion failures from any subsequent set_concurrency_and_phase() 2847 // calls. 2848 if (_cm->concurrent() && _worker_id == 0) { 2849 // Worker 0 is responsible for clearing the global data structures because 2850 // of an overflow. During STW we should not clear the overflow flag (in 2851 // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit 2852 // method to abort the pause and restart concurrent marking. 2853 _cm->reset_marking_for_restart(); 2854 2855 log_info(gc, marking)("Concurrent Mark reset for overflow"); 2856 } 2857 2858 // ...and enter the second barrier. 2859 _cm->enter_second_sync_barrier(_worker_id); 2860 } 2861 // At this point, if we're during the concurrent phase of 2862 // marking, everything has been re-initialized and we're 2863 // ready to restart. 2864 } 2865 } 2866 } 2867 2868 G1CMTask::G1CMTask(uint worker_id, 2869 G1ConcurrentMark* cm, 2870 G1CMTaskQueue* task_queue, 2871 G1RegionMarkStats* mark_stats, 2872 uint max_regions) : 2873 _objArray_processor(this), 2874 _worker_id(worker_id), 2875 _g1h(G1CollectedHeap::heap()), 2876 _cm(cm), 2877 _next_mark_bitmap(NULL), 2878 _task_queue(task_queue), 2879 _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize), 2880 _calls(0), 2881 _time_target_ms(0.0), 2882 _start_time_ms(0.0), 2883 _cm_oop_closure(NULL), 2884 _curr_region(NULL), 2885 _finger(NULL), 2886 _region_limit(NULL), 2887 _words_scanned(0), 2888 _words_scanned_limit(0), 2889 _real_words_scanned_limit(0), 2890 _refs_reached(0), 2891 _refs_reached_limit(0), 2892 _real_refs_reached_limit(0), 2893 _has_aborted(false), 2894 _has_timed_out(false), 2895 _draining_satb_buffers(false), 2896 _step_times_ms(), 2897 _elapsed_time_ms(0.0), 2898 _termination_time_ms(0.0), 2899 _termination_start_time_ms(0.0), 2900 _marking_step_diffs_ms() 2901 { 2902 guarantee(task_queue != NULL, "invariant"); 2903 2904 _marking_step_diffs_ms.add(0.5); 2905 } 2906 2907 // These are formatting macros that are used below to ensure 2908 // consistent formatting. The *_H_* versions are used to format the 2909 // header for a particular value and they should be kept consistent 2910 // with the corresponding macro. Also note that most of the macros add 2911 // the necessary white space (as a prefix) which makes them a bit 2912 // easier to compose. 2913 2914 // All the output lines are prefixed with this string to be able to 2915 // identify them easily in a large log file. 2916 #define G1PPRL_LINE_PREFIX "###" 2917 2918 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2919 #ifdef _LP64 2920 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2921 #else // _LP64 2922 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2923 #endif // _LP64 2924 2925 // For per-region info 2926 #define G1PPRL_TYPE_FORMAT " %-4s" 2927 #define G1PPRL_TYPE_H_FORMAT " %4s" 2928 #define G1PPRL_STATE_FORMAT " %-5s" 2929 #define G1PPRL_STATE_H_FORMAT " %5s" 2930 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2931 #define G1PPRL_BYTE_H_FORMAT " %9s" 2932 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2933 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2934 2935 // For summary info 2936 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2937 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2938 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2939 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2940 2941 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) : 2942 _total_used_bytes(0), _total_capacity_bytes(0), 2943 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2944 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) 2945 { 2946 if (!log_is_enabled(Trace, gc, liveness)) { 2947 return; 2948 } 2949 2950 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2951 MemRegion g1_reserved = g1h->g1_reserved(); 2952 double now = os::elapsedTime(); 2953 2954 // Print the header of the output. 2955 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2956 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2957 G1PPRL_SUM_ADDR_FORMAT("reserved") 2958 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2959 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 2960 HeapRegion::GrainBytes); 2961 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2962 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2963 G1PPRL_TYPE_H_FORMAT 2964 G1PPRL_ADDR_BASE_H_FORMAT 2965 G1PPRL_BYTE_H_FORMAT 2966 G1PPRL_BYTE_H_FORMAT 2967 G1PPRL_BYTE_H_FORMAT 2968 G1PPRL_DOUBLE_H_FORMAT 2969 G1PPRL_BYTE_H_FORMAT 2970 G1PPRL_STATE_H_FORMAT 2971 G1PPRL_BYTE_H_FORMAT, 2972 "type", "address-range", 2973 "used", "prev-live", "next-live", "gc-eff", 2974 "remset", "state", "code-roots"); 2975 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2976 G1PPRL_TYPE_H_FORMAT 2977 G1PPRL_ADDR_BASE_H_FORMAT 2978 G1PPRL_BYTE_H_FORMAT 2979 G1PPRL_BYTE_H_FORMAT 2980 G1PPRL_BYTE_H_FORMAT 2981 G1PPRL_DOUBLE_H_FORMAT 2982 G1PPRL_BYTE_H_FORMAT 2983 G1PPRL_STATE_H_FORMAT 2984 G1PPRL_BYTE_H_FORMAT, 2985 "", "", 2986 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 2987 "(bytes)", "", "(bytes)"); 2988 } 2989 2990 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) { 2991 if (!log_is_enabled(Trace, gc, liveness)) { 2992 return false; 2993 } 2994 2995 const char* type = r->get_type_str(); 2996 HeapWord* bottom = r->bottom(); 2997 HeapWord* end = r->end(); 2998 size_t capacity_bytes = r->capacity(); 2999 size_t used_bytes = r->used(); 3000 size_t prev_live_bytes = r->live_bytes(); 3001 size_t next_live_bytes = r->next_live_bytes(); 3002 double gc_eff = r->gc_efficiency(); 3003 size_t remset_bytes = r->rem_set()->mem_size(); 3004 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3005 const char* remset_type = r->rem_set()->get_short_state_str(); 3006 3007 _total_used_bytes += used_bytes; 3008 _total_capacity_bytes += capacity_bytes; 3009 _total_prev_live_bytes += prev_live_bytes; 3010 _total_next_live_bytes += next_live_bytes; 3011 _total_remset_bytes += remset_bytes; 3012 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3013 3014 // Print a line for this particular region. 3015 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3016 G1PPRL_TYPE_FORMAT 3017 G1PPRL_ADDR_BASE_FORMAT 3018 G1PPRL_BYTE_FORMAT 3019 G1PPRL_BYTE_FORMAT 3020 G1PPRL_BYTE_FORMAT 3021 G1PPRL_DOUBLE_FORMAT 3022 G1PPRL_BYTE_FORMAT 3023 G1PPRL_STATE_FORMAT 3024 G1PPRL_BYTE_FORMAT, 3025 type, p2i(bottom), p2i(end), 3026 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3027 remset_bytes, remset_type, strong_code_roots_bytes); 3028 3029 return false; 3030 } 3031 3032 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3033 if (!log_is_enabled(Trace, gc, liveness)) { 3034 return; 3035 } 3036 3037 // add static memory usages to remembered set sizes 3038 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3039 // Print the footer of the output. 3040 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3041 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3042 " SUMMARY" 3043 G1PPRL_SUM_MB_FORMAT("capacity") 3044 G1PPRL_SUM_MB_PERC_FORMAT("used") 3045 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3046 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3047 G1PPRL_SUM_MB_FORMAT("remset") 3048 G1PPRL_SUM_MB_FORMAT("code-roots"), 3049 bytes_to_mb(_total_capacity_bytes), 3050 bytes_to_mb(_total_used_bytes), 3051 percent_of(_total_used_bytes, _total_capacity_bytes), 3052 bytes_to_mb(_total_prev_live_bytes), 3053 percent_of(_total_prev_live_bytes, _total_capacity_bytes), 3054 bytes_to_mb(_total_next_live_bytes), 3055 percent_of(_total_next_live_bytes, _total_capacity_bytes), 3056 bytes_to_mb(_total_remset_bytes), 3057 bytes_to_mb(_total_strong_code_roots_bytes)); 3058 }