1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc/g1/g1BarrierSet.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1CollectorState.hpp" 31 #include "gc/g1/g1ConcurrentMark.inline.hpp" 32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" 33 #include "gc/g1/g1HeapVerifier.hpp" 34 #include "gc/g1/g1OopClosures.inline.hpp" 35 #include "gc/g1/g1Policy.hpp" 36 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" 37 #include "gc/g1/g1StringDedup.hpp" 38 #include "gc/g1/g1ThreadLocalData.hpp" 39 #include "gc/g1/heapRegion.inline.hpp" 40 #include "gc/g1/heapRegionRemSet.hpp" 41 #include "gc/g1/heapRegionSet.inline.hpp" 42 #include "gc/shared/adaptiveSizePolicy.hpp" 43 #include "gc/shared/gcId.hpp" 44 #include "gc/shared/gcTimer.hpp" 45 #include "gc/shared/gcTrace.hpp" 46 #include "gc/shared/gcTraceTime.inline.hpp" 47 #include "gc/shared/genOopClosures.inline.hpp" 48 #include "gc/shared/referencePolicy.hpp" 49 #include "gc/shared/strongRootsScope.hpp" 50 #include "gc/shared/suspendibleThreadSet.hpp" 51 #include "gc/shared/taskqueue.inline.hpp" 52 #include "gc/shared/vmGCOperations.hpp" 53 #include "gc/shared/weakProcessor.inline.hpp" 54 #include "include/jvm.h" 55 #include "logging/log.hpp" 56 #include "memory/allocation.hpp" 57 #include "memory/resourceArea.hpp" 58 #include "oops/access.inline.hpp" 59 #include "oops/oop.inline.hpp" 60 #include "runtime/atomic.hpp" 61 #include "runtime/handles.inline.hpp" 62 #include "runtime/java.hpp" 63 #include "runtime/prefetch.inline.hpp" 64 #include "services/memTracker.hpp" 65 #include "utilities/align.hpp" 66 #include "utilities/growableArray.hpp" 67 68 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) { 69 assert(addr < _cm->finger(), "invariant"); 70 assert(addr >= _task->finger(), "invariant"); 71 72 // We move that task's local finger along. 73 _task->move_finger_to(addr); 74 75 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); 76 // we only partially drain the local queue and global stack 77 _task->drain_local_queue(true); 78 _task->drain_global_stack(true); 79 80 // if the has_aborted flag has been raised, we need to bail out of 81 // the iteration 82 return !_task->has_aborted(); 83 } 84 85 G1CMMarkStack::G1CMMarkStack() : 86 _max_chunk_capacity(0), 87 _base(NULL), 88 _chunk_capacity(0) { 89 set_empty(); 90 } 91 92 bool G1CMMarkStack::resize(size_t new_capacity) { 93 assert(is_empty(), "Only resize when stack is empty."); 94 assert(new_capacity <= _max_chunk_capacity, 95 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); 96 97 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC); 98 99 if (new_base == NULL) { 100 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); 101 return false; 102 } 103 // Release old mapping. 104 if (_base != NULL) { 105 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 106 } 107 108 _base = new_base; 109 _chunk_capacity = new_capacity; 110 set_empty(); 111 112 return true; 113 } 114 115 size_t G1CMMarkStack::capacity_alignment() { 116 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); 117 } 118 119 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { 120 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); 121 122 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); 123 124 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 125 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 126 127 guarantee(initial_chunk_capacity <= _max_chunk_capacity, 128 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, 129 _max_chunk_capacity, 130 initial_chunk_capacity); 131 132 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, 133 initial_chunk_capacity, _max_chunk_capacity); 134 135 return resize(initial_chunk_capacity); 136 } 137 138 void G1CMMarkStack::expand() { 139 if (_chunk_capacity == _max_chunk_capacity) { 140 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); 141 return; 142 } 143 size_t old_capacity = _chunk_capacity; 144 // Double capacity if possible 145 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); 146 147 if (resize(new_capacity)) { 148 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 149 old_capacity, new_capacity); 150 } else { 151 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 152 old_capacity, new_capacity); 153 } 154 } 155 156 G1CMMarkStack::~G1CMMarkStack() { 157 if (_base != NULL) { 158 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 159 } 160 } 161 162 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { 163 elem->next = *list; 164 *list = elem; 165 } 166 167 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { 168 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 169 add_chunk_to_list(&_chunk_list, elem); 170 _chunks_in_chunk_list++; 171 } 172 173 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { 174 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 175 add_chunk_to_list(&_free_list, elem); 176 } 177 178 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { 179 TaskQueueEntryChunk* result = *list; 180 if (result != NULL) { 181 *list = (*list)->next; 182 } 183 return result; 184 } 185 186 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { 187 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 188 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); 189 if (result != NULL) { 190 _chunks_in_chunk_list--; 191 } 192 return result; 193 } 194 195 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { 196 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 197 return remove_chunk_from_list(&_free_list); 198 } 199 200 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { 201 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. 202 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding 203 // wraparound of _hwm. 204 if (_hwm >= _chunk_capacity) { 205 return NULL; 206 } 207 208 size_t cur_idx = Atomic::add(1u, &_hwm) - 1; 209 if (cur_idx >= _chunk_capacity) { 210 return NULL; 211 } 212 213 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; 214 result->next = NULL; 215 return result; 216 } 217 218 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { 219 // Get a new chunk. 220 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); 221 222 if (new_chunk == NULL) { 223 // Did not get a chunk from the free list. Allocate from backing memory. 224 new_chunk = allocate_new_chunk(); 225 226 if (new_chunk == NULL) { 227 return false; 228 } 229 } 230 231 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 232 233 add_chunk_to_chunk_list(new_chunk); 234 235 return true; 236 } 237 238 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { 239 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); 240 241 if (cur == NULL) { 242 return false; 243 } 244 245 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 246 247 add_chunk_to_free_list(cur); 248 return true; 249 } 250 251 void G1CMMarkStack::set_empty() { 252 _chunks_in_chunk_list = 0; 253 _hwm = 0; 254 _chunk_list = NULL; 255 _free_list = NULL; 256 } 257 258 G1CMRootRegions::G1CMRootRegions() : 259 _survivors(NULL), _cm(NULL), _scan_in_progress(false), 260 _should_abort(false), _claimed_survivor_index(0) { } 261 262 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) { 263 _survivors = survivors; 264 _cm = cm; 265 } 266 267 void G1CMRootRegions::prepare_for_scan() { 268 assert(!scan_in_progress(), "pre-condition"); 269 270 // Currently, only survivors can be root regions. 271 _claimed_survivor_index = 0; 272 _scan_in_progress = _survivors->regions()->is_nonempty(); 273 _should_abort = false; 274 } 275 276 HeapRegion* G1CMRootRegions::claim_next() { 277 if (_should_abort) { 278 // If someone has set the should_abort flag, we return NULL to 279 // force the caller to bail out of their loop. 280 return NULL; 281 } 282 283 // Currently, only survivors can be root regions. 284 const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions(); 285 286 int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1; 287 if (claimed_index < survivor_regions->length()) { 288 return survivor_regions->at(claimed_index); 289 } 290 return NULL; 291 } 292 293 uint G1CMRootRegions::num_root_regions() const { 294 return (uint)_survivors->regions()->length(); 295 } 296 297 void G1CMRootRegions::notify_scan_done() { 298 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 299 _scan_in_progress = false; 300 RootRegionScan_lock->notify_all(); 301 } 302 303 void G1CMRootRegions::cancel_scan() { 304 notify_scan_done(); 305 } 306 307 void G1CMRootRegions::scan_finished() { 308 assert(scan_in_progress(), "pre-condition"); 309 310 // Currently, only survivors can be root regions. 311 if (!_should_abort) { 312 assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index); 313 assert((uint)_claimed_survivor_index >= _survivors->length(), 314 "we should have claimed all survivors, claimed index = %u, length = %u", 315 (uint)_claimed_survivor_index, _survivors->length()); 316 } 317 318 notify_scan_done(); 319 } 320 321 bool G1CMRootRegions::wait_until_scan_finished() { 322 if (!scan_in_progress()) { 323 return false; 324 } 325 326 { 327 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 328 while (scan_in_progress()) { 329 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 330 } 331 } 332 return true; 333 } 334 335 // Returns the maximum number of workers to be used in a concurrent 336 // phase based on the number of GC workers being used in a STW 337 // phase. 338 static uint scale_concurrent_worker_threads(uint num_gc_workers) { 339 return MAX2((num_gc_workers + 2) / 4, 1U); 340 } 341 342 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, 343 G1RegionToSpaceMapper* prev_bitmap_storage, 344 G1RegionToSpaceMapper* next_bitmap_storage) : 345 // _cm_thread set inside the constructor 346 _g1h(g1h), 347 _completed_initialization(false), 348 349 _mark_bitmap_1(), 350 _mark_bitmap_2(), 351 _prev_mark_bitmap(&_mark_bitmap_1), 352 _next_mark_bitmap(&_mark_bitmap_2), 353 354 _heap(_g1h->reserved_region()), 355 356 _root_regions(), 357 358 _global_mark_stack(), 359 360 // _finger set in set_non_marking_state 361 362 _worker_id_offset(DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads), 363 _max_num_tasks(ParallelGCThreads), 364 // _num_active_tasks set in set_non_marking_state() 365 // _tasks set inside the constructor 366 367 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)), 368 _terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)), 369 370 _first_overflow_barrier_sync(), 371 _second_overflow_barrier_sync(), 372 373 _has_overflown(false), 374 _concurrent(false), 375 _has_aborted(false), 376 _restart_for_overflow(false), 377 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 378 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 379 380 // _verbose_level set below 381 382 _init_times(), 383 _remark_times(), 384 _remark_mark_times(), 385 _remark_weak_ref_times(), 386 _cleanup_times(), 387 _total_cleanup_time(0.0), 388 389 _accum_task_vtime(NULL), 390 391 _concurrent_workers(NULL), 392 _num_concurrent_workers(0), 393 _max_concurrent_workers(0), 394 395 _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)), 396 _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC)) 397 { 398 _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage); 399 _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage); 400 401 // Create & start ConcurrentMark thread. 402 _cm_thread = new G1ConcurrentMarkThread(this); 403 if (_cm_thread->osthread() == NULL) { 404 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 405 } 406 407 assert(CGC_lock != NULL, "CGC_lock must be initialized"); 408 409 SATBMarkQueueSet& satb_qs = G1BarrierSet::satb_mark_queue_set(); 410 satb_qs.set_buffer_size(G1SATBBufferSize); 411 412 _root_regions.init(_g1h->survivor(), this); 413 414 if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) { 415 // Calculate the number of concurrent worker threads by scaling 416 // the number of parallel GC threads. 417 uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads); 418 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 419 } 420 421 assert(ConcGCThreads > 0, "ConcGCThreads have been set."); 422 if (ConcGCThreads > ParallelGCThreads) { 423 log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).", 424 ConcGCThreads, ParallelGCThreads); 425 return; 426 } 427 428 log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset); 429 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 430 431 _num_concurrent_workers = ConcGCThreads; 432 _max_concurrent_workers = _num_concurrent_workers; 433 434 _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true); 435 _concurrent_workers->initialize_workers(); 436 437 if (FLAG_IS_DEFAULT(MarkStackSize)) { 438 size_t mark_stack_size = 439 MIN2(MarkStackSizeMax, 440 MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE))); 441 // Verify that the calculated value for MarkStackSize is in range. 442 // It would be nice to use the private utility routine from Arguments. 443 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 444 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 445 "must be between 1 and " SIZE_FORMAT, 446 mark_stack_size, MarkStackSizeMax); 447 return; 448 } 449 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 450 } else { 451 // Verify MarkStackSize is in range. 452 if (FLAG_IS_CMDLINE(MarkStackSize)) { 453 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 454 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 455 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 456 "must be between 1 and " SIZE_FORMAT, 457 MarkStackSize, MarkStackSizeMax); 458 return; 459 } 460 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 461 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 462 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 463 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 464 MarkStackSize, MarkStackSizeMax); 465 return; 466 } 467 } 468 } 469 } 470 471 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { 472 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 473 } 474 475 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC); 476 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC); 477 478 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 479 _num_active_tasks = _max_num_tasks; 480 481 for (uint i = 0; i < _max_num_tasks; ++i) { 482 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 483 task_queue->initialize(); 484 _task_queues->register_queue(i, task_queue); 485 486 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions()); 487 488 _accum_task_vtime[i] = 0.0; 489 } 490 491 reset_at_marking_complete(); 492 _completed_initialization = true; 493 } 494 495 void G1ConcurrentMark::reset() { 496 _has_aborted = false; 497 498 reset_marking_for_restart(); 499 500 // Reset all tasks, since different phases will use different number of active 501 // threads. So, it's easiest to have all of them ready. 502 for (uint i = 0; i < _max_num_tasks; ++i) { 503 _tasks[i]->reset(_next_mark_bitmap); 504 } 505 506 uint max_regions = _g1h->max_regions(); 507 for (uint i = 0; i < max_regions; i++) { 508 _top_at_rebuild_starts[i] = NULL; 509 _region_mark_stats[i].clear(); 510 } 511 } 512 513 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) { 514 for (uint j = 0; j < _max_num_tasks; ++j) { 515 _tasks[j]->clear_mark_stats_cache(region_idx); 516 } 517 _top_at_rebuild_starts[region_idx] = NULL; 518 _region_mark_stats[region_idx].clear(); 519 } 520 521 void G1ConcurrentMark::clear_statistics(HeapRegion* r) { 522 uint const region_idx = r->hrm_index(); 523 if (r->is_humongous()) { 524 assert(r->is_starts_humongous(), "Got humongous continues region here"); 525 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size()); 526 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) { 527 clear_statistics_in_region(j); 528 } 529 } else { 530 clear_statistics_in_region(region_idx); 531 } 532 } 533 534 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) { 535 if (bitmap->is_marked(addr)) { 536 bitmap->clear(addr); 537 } 538 } 539 540 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) { 541 assert_at_safepoint_on_vm_thread(); 542 543 // Need to clear all mark bits of the humongous object. 544 clear_mark_if_set(_prev_mark_bitmap, r->bottom()); 545 clear_mark_if_set(_next_mark_bitmap, r->bottom()); 546 547 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 548 return; 549 } 550 551 // Clear any statistics about the region gathered so far. 552 clear_statistics(r); 553 } 554 555 void G1ConcurrentMark::reset_marking_for_restart() { 556 _global_mark_stack.set_empty(); 557 558 // Expand the marking stack, if we have to and if we can. 559 if (has_overflown()) { 560 _global_mark_stack.expand(); 561 562 uint max_regions = _g1h->max_regions(); 563 for (uint i = 0; i < max_regions; i++) { 564 _region_mark_stats[i].clear_during_overflow(); 565 } 566 } 567 568 clear_has_overflown(); 569 _finger = _heap.start(); 570 571 for (uint i = 0; i < _max_num_tasks; ++i) { 572 G1CMTaskQueue* queue = _task_queues->queue(i); 573 queue->set_empty(); 574 } 575 } 576 577 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 578 assert(active_tasks <= _max_num_tasks, "we should not have more"); 579 580 _num_active_tasks = active_tasks; 581 // Need to update the three data structures below according to the 582 // number of active threads for this phase. 583 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 584 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 585 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 586 } 587 588 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 589 set_concurrency(active_tasks); 590 591 _concurrent = concurrent; 592 593 if (!concurrent) { 594 // At this point we should be in a STW phase, and completed marking. 595 assert_at_safepoint_on_vm_thread(); 596 assert(out_of_regions(), 597 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 598 p2i(_finger), p2i(_heap.end())); 599 } 600 } 601 602 void G1ConcurrentMark::reset_at_marking_complete() { 603 // We set the global marking state to some default values when we're 604 // not doing marking. 605 reset_marking_for_restart(); 606 _num_active_tasks = 0; 607 } 608 609 G1ConcurrentMark::~G1ConcurrentMark() { 610 FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts); 611 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats); 612 // The G1ConcurrentMark instance is never freed. 613 ShouldNotReachHere(); 614 } 615 616 class G1ClearBitMapTask : public AbstractGangTask { 617 public: 618 static size_t chunk_size() { return M; } 619 620 private: 621 // Heap region closure used for clearing the given mark bitmap. 622 class G1ClearBitmapHRClosure : public HeapRegionClosure { 623 private: 624 G1CMBitMap* _bitmap; 625 G1ConcurrentMark* _cm; 626 public: 627 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) { 628 } 629 630 virtual bool do_heap_region(HeapRegion* r) { 631 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 632 633 HeapWord* cur = r->bottom(); 634 HeapWord* const end = r->end(); 635 636 while (cur < end) { 637 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 638 _bitmap->clear_range(mr); 639 640 cur += chunk_size_in_words; 641 642 // Abort iteration if after yielding the marking has been aborted. 643 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 644 return true; 645 } 646 // Repeat the asserts from before the start of the closure. We will do them 647 // as asserts here to minimize their overhead on the product. However, we 648 // will have them as guarantees at the beginning / end of the bitmap 649 // clearing to get some checking in the product. 650 assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant"); 651 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 652 } 653 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 654 655 return false; 656 } 657 }; 658 659 G1ClearBitmapHRClosure _cl; 660 HeapRegionClaimer _hr_claimer; 661 bool _suspendible; // If the task is suspendible, workers must join the STS. 662 663 public: 664 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 665 AbstractGangTask("G1 Clear Bitmap"), 666 _cl(bitmap, suspendible ? cm : NULL), 667 _hr_claimer(n_workers), 668 _suspendible(suspendible) 669 { } 670 671 void work(uint worker_id) { 672 SuspendibleThreadSetJoiner sts_join(_suspendible); 673 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id); 674 } 675 676 bool is_complete() { 677 return _cl.is_complete(); 678 } 679 }; 680 681 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 682 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 683 684 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 685 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 686 687 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 688 689 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 690 691 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 692 workers->run_task(&cl, num_workers); 693 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 694 } 695 696 void G1ConcurrentMark::cleanup_for_next_mark() { 697 // Make sure that the concurrent mark thread looks to still be in 698 // the current cycle. 699 guarantee(cm_thread()->during_cycle(), "invariant"); 700 701 // We are finishing up the current cycle by clearing the next 702 // marking bitmap and getting it ready for the next cycle. During 703 // this time no other cycle can start. So, let's make sure that this 704 // is the case. 705 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 706 707 clear_bitmap(_next_mark_bitmap, _concurrent_workers, true); 708 709 // Repeat the asserts from above. 710 guarantee(cm_thread()->during_cycle(), "invariant"); 711 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 712 } 713 714 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 715 assert_at_safepoint_on_vm_thread(); 716 clear_bitmap(_prev_mark_bitmap, workers, false); 717 } 718 719 class NoteStartOfMarkHRClosure : public HeapRegionClosure { 720 public: 721 bool do_heap_region(HeapRegion* r) { 722 r->note_start_of_marking(); 723 return false; 724 } 725 }; 726 727 void G1ConcurrentMark::pre_initial_mark() { 728 // Initialize marking structures. This has to be done in a STW phase. 729 reset(); 730 731 // For each region note start of marking. 732 NoteStartOfMarkHRClosure startcl; 733 _g1h->heap_region_iterate(&startcl); 734 } 735 736 737 void G1ConcurrentMark::post_initial_mark() { 738 // Start Concurrent Marking weak-reference discovery. 739 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 740 // enable ("weak") refs discovery 741 rp->enable_discovery(); 742 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 743 744 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 745 // This is the start of the marking cycle, we're expected all 746 // threads to have SATB queues with active set to false. 747 satb_mq_set.set_active_all_threads(true, /* new active value */ 748 false /* expected_active */); 749 750 _root_regions.prepare_for_scan(); 751 752 // update_g1_committed() will be called at the end of an evac pause 753 // when marking is on. So, it's also called at the end of the 754 // initial-mark pause to update the heap end, if the heap expands 755 // during it. No need to call it here. 756 } 757 758 /* 759 * Notice that in the next two methods, we actually leave the STS 760 * during the barrier sync and join it immediately afterwards. If we 761 * do not do this, the following deadlock can occur: one thread could 762 * be in the barrier sync code, waiting for the other thread to also 763 * sync up, whereas another one could be trying to yield, while also 764 * waiting for the other threads to sync up too. 765 * 766 * Note, however, that this code is also used during remark and in 767 * this case we should not attempt to leave / enter the STS, otherwise 768 * we'll either hit an assert (debug / fastdebug) or deadlock 769 * (product). So we should only leave / enter the STS if we are 770 * operating concurrently. 771 * 772 * Because the thread that does the sync barrier has left the STS, it 773 * is possible to be suspended for a Full GC or an evacuation pause 774 * could occur. This is actually safe, since the entering the sync 775 * barrier is one of the last things do_marking_step() does, and it 776 * doesn't manipulate any data structures afterwards. 777 */ 778 779 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 780 bool barrier_aborted; 781 { 782 SuspendibleThreadSetLeaver sts_leave(concurrent()); 783 barrier_aborted = !_first_overflow_barrier_sync.enter(); 784 } 785 786 // at this point everyone should have synced up and not be doing any 787 // more work 788 789 if (barrier_aborted) { 790 // If the barrier aborted we ignore the overflow condition and 791 // just abort the whole marking phase as quickly as possible. 792 return; 793 } 794 } 795 796 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 797 SuspendibleThreadSetLeaver sts_leave(concurrent()); 798 _second_overflow_barrier_sync.enter(); 799 800 // at this point everything should be re-initialized and ready to go 801 } 802 803 class G1CMConcurrentMarkingTask : public AbstractGangTask { 804 G1ConcurrentMark* _cm; 805 806 public: 807 void work(uint worker_id) { 808 assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread"); 809 ResourceMark rm; 810 811 double start_vtime = os::elapsedVTime(); 812 813 { 814 SuspendibleThreadSetJoiner sts_join; 815 816 assert(worker_id < _cm->active_tasks(), "invariant"); 817 818 G1CMTask* task = _cm->task(worker_id); 819 task->record_start_time(); 820 if (!_cm->has_aborted()) { 821 do { 822 task->do_marking_step(G1ConcMarkStepDurationMillis, 823 true /* do_termination */, 824 false /* is_serial*/); 825 826 _cm->do_yield_check(); 827 } while (!_cm->has_aborted() && task->has_aborted()); 828 } 829 task->record_end_time(); 830 guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant"); 831 } 832 833 double end_vtime = os::elapsedVTime(); 834 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 835 } 836 837 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) : 838 AbstractGangTask("Concurrent Mark"), _cm(cm) { } 839 840 ~G1CMConcurrentMarkingTask() { } 841 }; 842 843 uint G1ConcurrentMark::calc_active_marking_workers() { 844 uint result = 0; 845 if (!UseDynamicNumberOfGCThreads || 846 (!FLAG_IS_DEFAULT(ConcGCThreads) && 847 !ForceDynamicNumberOfGCThreads)) { 848 result = _max_concurrent_workers; 849 } else { 850 result = 851 AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers, 852 1, /* Minimum workers */ 853 _num_concurrent_workers, 854 Threads::number_of_non_daemon_threads()); 855 // Don't scale the result down by scale_concurrent_workers() because 856 // that scaling has already gone into "_max_concurrent_workers". 857 } 858 assert(result > 0 && result <= _max_concurrent_workers, 859 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u", 860 _max_concurrent_workers, result); 861 return result; 862 } 863 864 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) { 865 // Currently, only survivors can be root regions. 866 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 867 G1RootRegionScanClosure cl(_g1h, this, worker_id); 868 869 const uintx interval = PrefetchScanIntervalInBytes; 870 HeapWord* curr = hr->bottom(); 871 const HeapWord* end = hr->top(); 872 while (curr < end) { 873 Prefetch::read(curr, interval); 874 oop obj = oop(curr); 875 int size = obj->oop_iterate_size(&cl); 876 assert(size == obj->size(), "sanity"); 877 curr += size; 878 } 879 } 880 881 class G1CMRootRegionScanTask : public AbstractGangTask { 882 G1ConcurrentMark* _cm; 883 public: 884 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 885 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 886 887 void work(uint worker_id) { 888 assert(Thread::current()->is_ConcurrentGC_thread(), 889 "this should only be done by a conc GC thread"); 890 891 G1CMRootRegions* root_regions = _cm->root_regions(); 892 HeapRegion* hr = root_regions->claim_next(); 893 while (hr != NULL) { 894 _cm->scan_root_region(hr, worker_id); 895 hr = root_regions->claim_next(); 896 } 897 } 898 }; 899 900 void G1ConcurrentMark::scan_root_regions() { 901 // scan_in_progress() will have been set to true only if there was 902 // at least one root region to scan. So, if it's false, we 903 // should not attempt to do any further work. 904 if (root_regions()->scan_in_progress()) { 905 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 906 907 _num_concurrent_workers = MIN2(calc_active_marking_workers(), 908 // We distribute work on a per-region basis, so starting 909 // more threads than that is useless. 910 root_regions()->num_root_regions()); 911 assert(_num_concurrent_workers <= _max_concurrent_workers, 912 "Maximum number of marking threads exceeded"); 913 914 G1CMRootRegionScanTask task(this); 915 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 916 task.name(), _num_concurrent_workers, root_regions()->num_root_regions()); 917 _concurrent_workers->run_task(&task, _num_concurrent_workers); 918 919 // It's possible that has_aborted() is true here without actually 920 // aborting the survivor scan earlier. This is OK as it's 921 // mainly used for sanity checking. 922 root_regions()->scan_finished(); 923 } 924 } 925 926 void G1ConcurrentMark::concurrent_cycle_start() { 927 _gc_timer_cm->register_gc_start(); 928 929 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 930 931 _g1h->trace_heap_before_gc(_gc_tracer_cm); 932 // Record start, but take no time 933 TraceConcMemoryManagerStats tms(TraceConcMemoryManagerStats::CycleStart, _g1h->gc_cause()); 934 } 935 936 void G1ConcurrentMark::concurrent_cycle_end() { 937 _g1h->collector_state()->set_clearing_next_bitmap(false); 938 939 _g1h->trace_heap_after_gc(_gc_tracer_cm); 940 941 if (has_aborted()) { 942 log_info(gc, marking)("Concurrent Mark Abort"); 943 _gc_tracer_cm->report_concurrent_mode_failure(); 944 } 945 946 _gc_timer_cm->register_gc_end(); 947 948 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 949 // Record end, but take no time 950 TraceConcMemoryManagerStats tms(TraceConcMemoryManagerStats::CycleEnd, _g1h->gc_cause()); 951 } 952 953 void G1ConcurrentMark::mark_from_roots() { 954 _restart_for_overflow = false; 955 956 _num_concurrent_workers = calc_active_marking_workers(); 957 958 uint active_workers = MAX2(1U, _num_concurrent_workers); 959 960 // Setting active workers is not guaranteed since fewer 961 // worker threads may currently exist and more may not be 962 // available. 963 active_workers = _concurrent_workers->update_active_workers(active_workers); 964 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers()); 965 966 // Parallel task terminator is set in "set_concurrency_and_phase()" 967 set_concurrency_and_phase(active_workers, true /* concurrent */); 968 969 G1CMConcurrentMarkingTask marking_task(this); 970 _concurrent_workers->run_task(&marking_task); 971 print_stats(); 972 } 973 974 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) { 975 G1HeapVerifier* verifier = _g1h->verifier(); 976 977 verifier->verify_region_sets_optional(); 978 979 if (VerifyDuringGC) { 980 GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm); 981 982 size_t const BufLen = 512; 983 char buffer[BufLen]; 984 985 jio_snprintf(buffer, BufLen, "During GC (%s)", caller); 986 verifier->verify(type, vo, buffer); 987 } 988 989 verifier->check_bitmaps(caller); 990 } 991 992 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask { 993 G1CollectedHeap* _g1h; 994 G1ConcurrentMark* _cm; 995 HeapRegionClaimer _hrclaimer; 996 uint volatile _total_selected_for_rebuild; 997 998 G1PrintRegionLivenessInfoClosure _cl; 999 1000 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure { 1001 G1CollectedHeap* _g1h; 1002 G1ConcurrentMark* _cm; 1003 1004 G1PrintRegionLivenessInfoClosure* _cl; 1005 1006 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild. 1007 1008 void update_remset_before_rebuild(HeapRegion* hr) { 1009 G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker(); 1010 1011 bool selected_for_rebuild; 1012 if (hr->is_humongous()) { 1013 bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0; 1014 selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live); 1015 } else { 1016 size_t const live_bytes = _cm->liveness(hr->hrm_index()); 1017 selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes); 1018 } 1019 if (selected_for_rebuild) { 1020 _num_regions_selected_for_rebuild++; 1021 } 1022 _cm->update_top_at_rebuild_start(hr); 1023 } 1024 1025 // Distribute the given words across the humongous object starting with hr and 1026 // note end of marking. 1027 void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) { 1028 uint const region_idx = hr->hrm_index(); 1029 size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size(); 1030 uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words); 1031 1032 // "Distributing" zero words means that we only note end of marking for these 1033 // regions. 1034 assert(marked_words == 0 || obj_size_in_words == marked_words, 1035 "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT, 1036 obj_size_in_words, marked_words); 1037 1038 for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) { 1039 HeapRegion* const r = _g1h->region_at(i); 1040 size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words); 1041 1042 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)", 1043 words_to_add, i, r->get_type_str()); 1044 add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize); 1045 marked_words -= words_to_add; 1046 } 1047 assert(marked_words == 0, 1048 SIZE_FORMAT " words left after distributing space across %u regions", 1049 marked_words, num_regions_in_humongous); 1050 } 1051 1052 void update_marked_bytes(HeapRegion* hr) { 1053 uint const region_idx = hr->hrm_index(); 1054 size_t const marked_words = _cm->liveness(region_idx); 1055 // The marking attributes the object's size completely to the humongous starts 1056 // region. We need to distribute this value across the entire set of regions a 1057 // humongous object spans. 1058 if (hr->is_humongous()) { 1059 assert(hr->is_starts_humongous() || marked_words == 0, 1060 "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)", 1061 marked_words, region_idx, hr->get_type_str()); 1062 if (hr->is_starts_humongous()) { 1063 distribute_marked_bytes(hr, marked_words); 1064 } 1065 } else { 1066 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str()); 1067 add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize); 1068 } 1069 } 1070 1071 void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) { 1072 hr->add_to_marked_bytes(marked_bytes); 1073 _cl->do_heap_region(hr); 1074 hr->note_end_of_marking(); 1075 } 1076 1077 public: 1078 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) : 1079 _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { } 1080 1081 virtual bool do_heap_region(HeapRegion* r) { 1082 update_remset_before_rebuild(r); 1083 update_marked_bytes(r); 1084 1085 return false; 1086 } 1087 1088 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; } 1089 }; 1090 1091 public: 1092 G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) : 1093 AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"), 1094 _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { } 1095 1096 virtual void work(uint worker_id) { 1097 G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl); 1098 _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id); 1099 Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild); 1100 } 1101 1102 uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; } 1103 1104 // Number of regions for which roughly one thread should be spawned for this work. 1105 static const uint RegionsPerThread = 384; 1106 }; 1107 1108 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure { 1109 G1CollectedHeap* _g1h; 1110 public: 1111 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { } 1112 1113 virtual bool do_heap_region(HeapRegion* r) { 1114 _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r); 1115 return false; 1116 } 1117 }; 1118 1119 void G1ConcurrentMark::remark() { 1120 assert_at_safepoint_on_vm_thread(); 1121 1122 // If a full collection has happened, we should not continue. However we might 1123 // have ended up here as the Remark VM operation has been scheduled already. 1124 if (has_aborted()) { 1125 return; 1126 } 1127 1128 TraceConcMemoryManagerStats tms(TraceConcMemoryManagerStats::Remark, _g1h->gc_cause()); 1129 1130 G1Policy* g1p = _g1h->g1_policy(); 1131 g1p->record_concurrent_mark_remark_start(); 1132 1133 double start = os::elapsedTime(); 1134 1135 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before"); 1136 1137 { 1138 GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm); 1139 finalize_marking(); 1140 } 1141 1142 double mark_work_end = os::elapsedTime(); 1143 1144 bool const mark_finished = !has_overflown(); 1145 if (mark_finished) { 1146 weak_refs_work(false /* clear_all_soft_refs */); 1147 1148 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1149 // We're done with marking. 1150 // This is the end of the marking cycle, we're expected all 1151 // threads to have SATB queues with active set to true. 1152 satb_mq_set.set_active_all_threads(false, /* new active value */ 1153 true /* expected_active */); 1154 1155 { 1156 GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm); 1157 flush_all_task_caches(); 1158 } 1159 1160 // Install newly created mark bitmap as "prev". 1161 swap_mark_bitmaps(); 1162 { 1163 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm); 1164 1165 uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) / 1166 G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread; 1167 uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity); 1168 1169 G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers); 1170 log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions()); 1171 _g1h->workers()->run_task(&cl, num_workers); 1172 1173 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u", 1174 _g1h->num_regions(), cl.total_selected_for_rebuild()); 1175 } 1176 { 1177 GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm); 1178 reclaim_empty_regions(); 1179 } 1180 1181 // Clean out dead classes 1182 if (ClassUnloadingWithConcurrentMark) { 1183 GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm); 1184 ClassLoaderDataGraph::purge(); 1185 } 1186 1187 compute_new_sizes(); 1188 1189 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after"); 1190 1191 assert(!restart_for_overflow(), "sanity"); 1192 // Completely reset the marking state since marking completed 1193 reset_at_marking_complete(); 1194 } else { 1195 // We overflowed. Restart concurrent marking. 1196 _restart_for_overflow = true; 1197 1198 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow"); 1199 1200 // Clear the marking state because we will be restarting 1201 // marking due to overflowing the global mark stack. 1202 reset_marking_for_restart(); 1203 } 1204 1205 { 1206 GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm); 1207 report_object_count(mark_finished); 1208 } 1209 1210 // Statistics 1211 double now = os::elapsedTime(); 1212 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1213 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1214 _remark_times.add((now - start) * 1000.0); 1215 1216 g1p->record_concurrent_mark_remark_end(); 1217 } 1218 1219 class G1ReclaimEmptyRegionsTask : public AbstractGangTask { 1220 // Per-region work during the Cleanup pause. 1221 class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure { 1222 G1CollectedHeap* _g1h; 1223 size_t _freed_bytes; 1224 FreeRegionList* _local_cleanup_list; 1225 uint _old_regions_removed; 1226 uint _humongous_regions_removed; 1227 HRRSCleanupTask* _hrrs_cleanup_task; 1228 1229 public: 1230 G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h, 1231 FreeRegionList* local_cleanup_list, 1232 HRRSCleanupTask* hrrs_cleanup_task) : 1233 _g1h(g1h), 1234 _freed_bytes(0), 1235 _local_cleanup_list(local_cleanup_list), 1236 _old_regions_removed(0), 1237 _humongous_regions_removed(0), 1238 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1239 1240 size_t freed_bytes() { return _freed_bytes; } 1241 const uint old_regions_removed() { return _old_regions_removed; } 1242 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1243 1244 bool do_heap_region(HeapRegion *hr) { 1245 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) { 1246 _freed_bytes += hr->used(); 1247 hr->set_containing_set(NULL); 1248 if (hr->is_humongous()) { 1249 _humongous_regions_removed++; 1250 _g1h->free_humongous_region(hr, _local_cleanup_list); 1251 } else { 1252 _old_regions_removed++; 1253 _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */); 1254 } 1255 hr->clear_cardtable(); 1256 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index()); 1257 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom())); 1258 } else { 1259 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1260 } 1261 1262 return false; 1263 } 1264 }; 1265 1266 G1CollectedHeap* _g1h; 1267 FreeRegionList* _cleanup_list; 1268 HeapRegionClaimer _hrclaimer; 1269 1270 public: 1271 G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1272 AbstractGangTask("G1 Cleanup"), 1273 _g1h(g1h), 1274 _cleanup_list(cleanup_list), 1275 _hrclaimer(n_workers) { 1276 1277 HeapRegionRemSet::reset_for_cleanup_tasks(); 1278 } 1279 1280 void work(uint worker_id) { 1281 FreeRegionList local_cleanup_list("Local Cleanup List"); 1282 HRRSCleanupTask hrrs_cleanup_task; 1283 G1ReclaimEmptyRegionsClosure cl(_g1h, 1284 &local_cleanup_list, 1285 &hrrs_cleanup_task); 1286 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id); 1287 assert(cl.is_complete(), "Shouldn't have aborted!"); 1288 1289 // Now update the old/humongous region sets 1290 _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed()); 1291 { 1292 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1293 _g1h->decrement_summary_bytes(cl.freed_bytes()); 1294 1295 _cleanup_list->add_ordered(&local_cleanup_list); 1296 assert(local_cleanup_list.is_empty(), "post-condition"); 1297 1298 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1299 } 1300 } 1301 }; 1302 1303 void G1ConcurrentMark::reclaim_empty_regions() { 1304 WorkGang* workers = _g1h->workers(); 1305 FreeRegionList empty_regions_list("Empty Regions After Mark List"); 1306 1307 G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers()); 1308 workers->run_task(&cl); 1309 1310 if (!empty_regions_list.is_empty()) { 1311 log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length()); 1312 // Now print the empty regions list. 1313 G1HRPrinter* hrp = _g1h->hr_printer(); 1314 if (hrp->is_active()) { 1315 FreeRegionListIterator iter(&empty_regions_list); 1316 while (iter.more_available()) { 1317 HeapRegion* hr = iter.get_next(); 1318 hrp->cleanup(hr); 1319 } 1320 } 1321 // And actually make them available. 1322 _g1h->prepend_to_freelist(&empty_regions_list); 1323 } 1324 } 1325 1326 void G1ConcurrentMark::compute_new_sizes() { 1327 MetaspaceGC::compute_new_size(); 1328 1329 // Cleanup will have freed any regions completely full of garbage. 1330 // Update the soft reference policy with the new heap occupancy. 1331 Universe::update_heap_info_at_gc(); 1332 1333 // We reclaimed old regions so we should calculate the sizes to make 1334 // sure we update the old gen/space data. 1335 _g1h->g1mm()->update_sizes(); 1336 } 1337 1338 void G1ConcurrentMark::cleanup() { 1339 assert_at_safepoint_on_vm_thread(); 1340 1341 // If a full collection has happened, we shouldn't do this. 1342 if (has_aborted()) { 1343 return; 1344 } 1345 1346 TraceConcMemoryManagerStats tms(TraceConcMemoryManagerStats::Cleanup, _g1h->gc_cause()); 1347 1348 G1Policy* g1p = _g1h->g1_policy(); 1349 g1p->record_concurrent_mark_cleanup_start(); 1350 1351 double start = os::elapsedTime(); 1352 1353 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before"); 1354 1355 { 1356 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm); 1357 G1UpdateRemSetTrackingAfterRebuild cl(_g1h); 1358 _g1h->heap_region_iterate(&cl); 1359 } 1360 1361 if (log_is_enabled(Trace, gc, liveness)) { 1362 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup"); 1363 _g1h->heap_region_iterate(&cl); 1364 } 1365 1366 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after"); 1367 1368 // We need to make this be a "collection" so any collection pause that 1369 // races with it goes around and waits for Cleanup to finish. 1370 _g1h->increment_total_collections(); 1371 1372 // Local statistics 1373 double recent_cleanup_time = (os::elapsedTime() - start); 1374 _total_cleanup_time += recent_cleanup_time; 1375 _cleanup_times.add(recent_cleanup_time); 1376 1377 { 1378 GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm); 1379 _g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1380 } 1381 } 1382 1383 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1384 // Uses the G1CMTask associated with a worker thread (for serial reference 1385 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1386 // trace referent objects. 1387 // 1388 // Using the G1CMTask and embedded local queues avoids having the worker 1389 // threads operating on the global mark stack. This reduces the risk 1390 // of overflowing the stack - which we would rather avoid at this late 1391 // state. Also using the tasks' local queues removes the potential 1392 // of the workers interfering with each other that could occur if 1393 // operating on the global stack. 1394 1395 class G1CMKeepAliveAndDrainClosure : public OopClosure { 1396 G1ConcurrentMark* _cm; 1397 G1CMTask* _task; 1398 uint _ref_counter_limit; 1399 uint _ref_counter; 1400 bool _is_serial; 1401 public: 1402 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1403 _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval), 1404 _ref_counter(_ref_counter_limit), _is_serial(is_serial) { 1405 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1406 } 1407 1408 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1409 virtual void do_oop( oop* p) { do_oop_work(p); } 1410 1411 template <class T> void do_oop_work(T* p) { 1412 if (_cm->has_overflown()) { 1413 return; 1414 } 1415 if (!_task->deal_with_reference(p)) { 1416 // We did not add anything to the mark bitmap (or mark stack), so there is 1417 // no point trying to drain it. 1418 return; 1419 } 1420 _ref_counter--; 1421 1422 if (_ref_counter == 0) { 1423 // We have dealt with _ref_counter_limit references, pushing them 1424 // and objects reachable from them on to the local stack (and 1425 // possibly the global stack). Call G1CMTask::do_marking_step() to 1426 // process these entries. 1427 // 1428 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1429 // there's nothing more to do (i.e. we're done with the entries that 1430 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1431 // above) or we overflow. 1432 // 1433 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1434 // flag while there may still be some work to do. (See the comment at 1435 // the beginning of G1CMTask::do_marking_step() for those conditions - 1436 // one of which is reaching the specified time target.) It is only 1437 // when G1CMTask::do_marking_step() returns without setting the 1438 // has_aborted() flag that the marking step has completed. 1439 do { 1440 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1441 _task->do_marking_step(mark_step_duration_ms, 1442 false /* do_termination */, 1443 _is_serial); 1444 } while (_task->has_aborted() && !_cm->has_overflown()); 1445 _ref_counter = _ref_counter_limit; 1446 } 1447 } 1448 }; 1449 1450 // 'Drain' oop closure used by both serial and parallel reference processing. 1451 // Uses the G1CMTask associated with a given worker thread (for serial 1452 // reference processing the G1CMtask for worker 0 is used). Calls the 1453 // do_marking_step routine, with an unbelievably large timeout value, 1454 // to drain the marking data structures of the remaining entries 1455 // added by the 'keep alive' oop closure above. 1456 1457 class G1CMDrainMarkingStackClosure : public VoidClosure { 1458 G1ConcurrentMark* _cm; 1459 G1CMTask* _task; 1460 bool _is_serial; 1461 public: 1462 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1463 _cm(cm), _task(task), _is_serial(is_serial) { 1464 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1465 } 1466 1467 void do_void() { 1468 do { 1469 // We call G1CMTask::do_marking_step() to completely drain the local 1470 // and global marking stacks of entries pushed by the 'keep alive' 1471 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1472 // 1473 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1474 // if there's nothing more to do (i.e. we've completely drained the 1475 // entries that were pushed as a a result of applying the 'keep alive' 1476 // closure to the entries on the discovered ref lists) or we overflow 1477 // the global marking stack. 1478 // 1479 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1480 // flag while there may still be some work to do. (See the comment at 1481 // the beginning of G1CMTask::do_marking_step() for those conditions - 1482 // one of which is reaching the specified time target.) It is only 1483 // when G1CMTask::do_marking_step() returns without setting the 1484 // has_aborted() flag that the marking step has completed. 1485 1486 _task->do_marking_step(1000000000.0 /* something very large */, 1487 true /* do_termination */, 1488 _is_serial); 1489 } while (_task->has_aborted() && !_cm->has_overflown()); 1490 } 1491 }; 1492 1493 // Implementation of AbstractRefProcTaskExecutor for parallel 1494 // reference processing at the end of G1 concurrent marking 1495 1496 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 1497 private: 1498 G1CollectedHeap* _g1h; 1499 G1ConcurrentMark* _cm; 1500 WorkGang* _workers; 1501 uint _active_workers; 1502 1503 public: 1504 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1505 G1ConcurrentMark* cm, 1506 WorkGang* workers, 1507 uint n_workers) : 1508 _g1h(g1h), _cm(cm), 1509 _workers(workers), _active_workers(n_workers) { } 1510 1511 virtual void execute(ProcessTask& task, uint ergo_workers); 1512 }; 1513 1514 class G1CMRefProcTaskProxy : public AbstractGangTask { 1515 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1516 ProcessTask& _proc_task; 1517 G1CollectedHeap* _g1h; 1518 G1ConcurrentMark* _cm; 1519 1520 public: 1521 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1522 G1CollectedHeap* g1h, 1523 G1ConcurrentMark* cm) : 1524 AbstractGangTask("Process reference objects in parallel"), 1525 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1526 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1527 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1528 } 1529 1530 virtual void work(uint worker_id) { 1531 ResourceMark rm; 1532 HandleMark hm; 1533 G1CMTask* task = _cm->task(worker_id); 1534 G1CMIsAliveClosure g1_is_alive(_g1h); 1535 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1536 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1537 1538 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1539 } 1540 }; 1541 1542 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) { 1543 assert(_workers != NULL, "Need parallel worker threads."); 1544 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1545 assert(_workers->active_workers() >= ergo_workers, 1546 "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)", 1547 ergo_workers, _workers->active_workers()); 1548 1549 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1550 1551 // We need to reset the concurrency level before each 1552 // proxy task execution, so that the termination protocol 1553 // and overflow handling in G1CMTask::do_marking_step() knows 1554 // how many workers to wait for. 1555 _cm->set_concurrency(ergo_workers); 1556 _workers->run_task(&proc_task_proxy, ergo_workers); 1557 } 1558 1559 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { 1560 ResourceMark rm; 1561 HandleMark hm; 1562 1563 // Is alive closure. 1564 G1CMIsAliveClosure g1_is_alive(_g1h); 1565 1566 // Inner scope to exclude the cleaning of the string table 1567 // from the displayed time. 1568 { 1569 GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm); 1570 1571 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1572 1573 // See the comment in G1CollectedHeap::ref_processing_init() 1574 // about how reference processing currently works in G1. 1575 1576 // Set the soft reference policy 1577 rp->setup_policy(clear_all_soft_refs); 1578 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1579 1580 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1581 // in serial reference processing. Note these closures are also 1582 // used for serially processing (by the the current thread) the 1583 // JNI references during parallel reference processing. 1584 // 1585 // These closures do not need to synchronize with the worker 1586 // threads involved in parallel reference processing as these 1587 // instances are executed serially by the current thread (e.g. 1588 // reference processing is not multi-threaded and is thus 1589 // performed by the current thread instead of a gang worker). 1590 // 1591 // The gang tasks involved in parallel reference processing create 1592 // their own instances of these closures, which do their own 1593 // synchronization among themselves. 1594 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1595 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1596 1597 // We need at least one active thread. If reference processing 1598 // is not multi-threaded we use the current (VMThread) thread, 1599 // otherwise we use the work gang from the G1CollectedHeap and 1600 // we utilize all the worker threads we can. 1601 bool processing_is_mt = rp->processing_is_mt(); 1602 uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U); 1603 active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U); 1604 1605 // Parallel processing task executor. 1606 G1CMRefProcTaskExecutor par_task_executor(_g1h, this, 1607 _g1h->workers(), active_workers); 1608 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1609 1610 // Set the concurrency level. The phase was already set prior to 1611 // executing the remark task. 1612 set_concurrency(active_workers); 1613 1614 // Set the degree of MT processing here. If the discovery was done MT, 1615 // the number of threads involved during discovery could differ from 1616 // the number of active workers. This is OK as long as the discovered 1617 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1618 rp->set_active_mt_degree(active_workers); 1619 1620 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues()); 1621 1622 // Process the weak references. 1623 const ReferenceProcessorStats& stats = 1624 rp->process_discovered_references(&g1_is_alive, 1625 &g1_keep_alive, 1626 &g1_drain_mark_stack, 1627 executor, 1628 &pt); 1629 _gc_tracer_cm->report_gc_reference_stats(stats); 1630 pt.print_all_references(); 1631 1632 // The do_oop work routines of the keep_alive and drain_marking_stack 1633 // oop closures will set the has_overflown flag if we overflow the 1634 // global marking stack. 1635 1636 assert(has_overflown() || _global_mark_stack.is_empty(), 1637 "Mark stack should be empty (unless it has overflown)"); 1638 1639 assert(rp->num_queues() == active_workers, "why not"); 1640 1641 rp->verify_no_references_recorded(); 1642 assert(!rp->discovery_enabled(), "Post condition"); 1643 } 1644 1645 if (has_overflown()) { 1646 // We can not trust g1_is_alive and the contents of the heap if the marking stack 1647 // overflowed while processing references. Exit the VM. 1648 fatal("Overflow during reference processing, can not continue. Please " 1649 "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and " 1650 "restart.", MarkStackSizeMax); 1651 return; 1652 } 1653 1654 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1655 1656 { 1657 GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm); 1658 WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1); 1659 } 1660 1661 // Unload Klasses, String, Code Cache, etc. 1662 if (ClassUnloadingWithConcurrentMark) { 1663 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); 1664 bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm, false /* Defer cleaning */); 1665 _g1h->complete_cleaning(&g1_is_alive, purged_classes); 1666 } else { 1667 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm); 1668 // No need to clean string table as it is treated as strong roots when 1669 // class unloading is disabled. 1670 _g1h->partial_cleaning(&g1_is_alive, false, G1StringDedup::is_enabled()); 1671 } 1672 } 1673 1674 class G1PrecleanYieldClosure : public YieldClosure { 1675 G1ConcurrentMark* _cm; 1676 1677 public: 1678 G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { } 1679 1680 virtual bool should_return() { 1681 return _cm->has_aborted(); 1682 } 1683 1684 virtual bool should_return_fine_grain() { 1685 _cm->do_yield_check(); 1686 return _cm->has_aborted(); 1687 } 1688 }; 1689 1690 void G1ConcurrentMark::preclean() { 1691 assert(G1UseReferencePrecleaning, "Precleaning must be enabled."); 1692 1693 SuspendibleThreadSetJoiner joiner; 1694 1695 G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */); 1696 G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */); 1697 1698 set_concurrency_and_phase(1, true); 1699 1700 G1PrecleanYieldClosure yield_cl(this); 1701 1702 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1703 // Precleaning is single threaded. Temporarily disable MT discovery. 1704 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); 1705 rp->preclean_discovered_references(rp->is_alive_non_header(), 1706 &keep_alive, 1707 &drain_mark_stack, 1708 &yield_cl, 1709 _gc_timer_cm); 1710 } 1711 1712 // When sampling object counts, we already swapped the mark bitmaps, so we need to use 1713 // the prev bitmap determining liveness. 1714 class G1ObjectCountIsAliveClosure: public BoolObjectClosure { 1715 G1CollectedHeap* _g1h; 1716 public: 1717 G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { } 1718 1719 bool do_object_b(oop obj) { 1720 HeapWord* addr = (HeapWord*)obj; 1721 return addr != NULL && 1722 (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj)); 1723 } 1724 }; 1725 1726 void G1ConcurrentMark::report_object_count(bool mark_completed) { 1727 // Depending on the completion of the marking liveness needs to be determined 1728 // using either the next or prev bitmap. 1729 if (mark_completed) { 1730 G1ObjectCountIsAliveClosure is_alive(_g1h); 1731 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1732 } else { 1733 G1CMIsAliveClosure is_alive(_g1h); 1734 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1735 } 1736 } 1737 1738 1739 void G1ConcurrentMark::swap_mark_bitmaps() { 1740 G1CMBitMap* temp = _prev_mark_bitmap; 1741 _prev_mark_bitmap = _next_mark_bitmap; 1742 _next_mark_bitmap = temp; 1743 _g1h->collector_state()->set_clearing_next_bitmap(true); 1744 } 1745 1746 // Closure for marking entries in SATB buffers. 1747 class G1CMSATBBufferClosure : public SATBBufferClosure { 1748 private: 1749 G1CMTask* _task; 1750 G1CollectedHeap* _g1h; 1751 1752 // This is very similar to G1CMTask::deal_with_reference, but with 1753 // more relaxed requirements for the argument, so this must be more 1754 // circumspect about treating the argument as an object. 1755 void do_entry(void* entry) const { 1756 _task->increment_refs_reached(); 1757 oop const obj = static_cast<oop>(entry); 1758 _task->make_reference_grey(obj); 1759 } 1760 1761 public: 1762 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1763 : _task(task), _g1h(g1h) { } 1764 1765 virtual void do_buffer(void** buffer, size_t size) { 1766 for (size_t i = 0; i < size; ++i) { 1767 do_entry(buffer[i]); 1768 } 1769 } 1770 }; 1771 1772 class G1RemarkThreadsClosure : public ThreadClosure { 1773 G1CMSATBBufferClosure _cm_satb_cl; 1774 G1CMOopClosure _cm_cl; 1775 MarkingCodeBlobClosure _code_cl; 1776 int _thread_parity; 1777 1778 public: 1779 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1780 _cm_satb_cl(task, g1h), 1781 _cm_cl(g1h, task), 1782 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1783 _thread_parity(Threads::thread_claim_parity()) {} 1784 1785 void do_thread(Thread* thread) { 1786 if (thread->is_Java_thread()) { 1787 if (thread->claim_oops_do(true, _thread_parity)) { 1788 JavaThread* jt = (JavaThread*)thread; 1789 1790 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1791 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1792 // * Alive if on the stack of an executing method 1793 // * Weakly reachable otherwise 1794 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1795 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1796 jt->nmethods_do(&_code_cl); 1797 1798 G1ThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(&_cm_satb_cl); 1799 } 1800 } else if (thread->is_VM_thread()) { 1801 if (thread->claim_oops_do(true, _thread_parity)) { 1802 G1BarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1803 } 1804 } 1805 } 1806 }; 1807 1808 class G1CMRemarkTask : public AbstractGangTask { 1809 G1ConcurrentMark* _cm; 1810 public: 1811 void work(uint worker_id) { 1812 G1CMTask* task = _cm->task(worker_id); 1813 task->record_start_time(); 1814 { 1815 ResourceMark rm; 1816 HandleMark hm; 1817 1818 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1819 Threads::threads_do(&threads_f); 1820 } 1821 1822 do { 1823 task->do_marking_step(1000000000.0 /* something very large */, 1824 true /* do_termination */, 1825 false /* is_serial */); 1826 } while (task->has_aborted() && !_cm->has_overflown()); 1827 // If we overflow, then we do not want to restart. We instead 1828 // want to abort remark and do concurrent marking again. 1829 task->record_end_time(); 1830 } 1831 1832 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1833 AbstractGangTask("Par Remark"), _cm(cm) { 1834 _cm->terminator()->reset_for_reuse(active_workers); 1835 } 1836 }; 1837 1838 void G1ConcurrentMark::finalize_marking() { 1839 ResourceMark rm; 1840 HandleMark hm; 1841 1842 _g1h->ensure_parsability(false); 1843 1844 // this is remark, so we'll use up all active threads 1845 uint active_workers = _g1h->workers()->active_workers(); 1846 set_concurrency_and_phase(active_workers, false /* concurrent */); 1847 // Leave _parallel_marking_threads at it's 1848 // value originally calculated in the G1ConcurrentMark 1849 // constructor and pass values of the active workers 1850 // through the gang in the task. 1851 1852 { 1853 StrongRootsScope srs(active_workers); 1854 1855 G1CMRemarkTask remarkTask(this, active_workers); 1856 // We will start all available threads, even if we decide that the 1857 // active_workers will be fewer. The extra ones will just bail out 1858 // immediately. 1859 _g1h->workers()->run_task(&remarkTask); 1860 } 1861 1862 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1863 guarantee(has_overflown() || 1864 satb_mq_set.completed_buffers_num() == 0, 1865 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1866 BOOL_TO_STR(has_overflown()), 1867 satb_mq_set.completed_buffers_num()); 1868 1869 print_stats(); 1870 } 1871 1872 void G1ConcurrentMark::flush_all_task_caches() { 1873 size_t hits = 0; 1874 size_t misses = 0; 1875 for (uint i = 0; i < _max_num_tasks; i++) { 1876 Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache(); 1877 hits += stats.first; 1878 misses += stats.second; 1879 } 1880 size_t sum = hits + misses; 1881 log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf", 1882 hits, misses, percent_of(hits, sum)); 1883 } 1884 1885 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) { 1886 _prev_mark_bitmap->clear_range(mr); 1887 } 1888 1889 HeapRegion* 1890 G1ConcurrentMark::claim_region(uint worker_id) { 1891 // "checkpoint" the finger 1892 HeapWord* finger = _finger; 1893 1894 while (finger < _heap.end()) { 1895 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1896 1897 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1898 // Make sure that the reads below do not float before loading curr_region. 1899 OrderAccess::loadload(); 1900 // Above heap_region_containing may return NULL as we always scan claim 1901 // until the end of the heap. In this case, just jump to the next region. 1902 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1903 1904 // Is the gap between reading the finger and doing the CAS too long? 1905 HeapWord* res = Atomic::cmpxchg(end, &_finger, finger); 1906 if (res == finger && curr_region != NULL) { 1907 // we succeeded 1908 HeapWord* bottom = curr_region->bottom(); 1909 HeapWord* limit = curr_region->next_top_at_mark_start(); 1910 1911 // notice that _finger == end cannot be guaranteed here since, 1912 // someone else might have moved the finger even further 1913 assert(_finger >= end, "the finger should have moved forward"); 1914 1915 if (limit > bottom) { 1916 return curr_region; 1917 } else { 1918 assert(limit == bottom, 1919 "the region limit should be at bottom"); 1920 // we return NULL and the caller should try calling 1921 // claim_region() again. 1922 return NULL; 1923 } 1924 } else { 1925 assert(_finger > finger, "the finger should have moved forward"); 1926 // read it again 1927 finger = _finger; 1928 } 1929 } 1930 1931 return NULL; 1932 } 1933 1934 #ifndef PRODUCT 1935 class VerifyNoCSetOops { 1936 G1CollectedHeap* _g1h; 1937 const char* _phase; 1938 int _info; 1939 1940 public: 1941 VerifyNoCSetOops(const char* phase, int info = -1) : 1942 _g1h(G1CollectedHeap::heap()), 1943 _phase(phase), 1944 _info(info) 1945 { } 1946 1947 void operator()(G1TaskQueueEntry task_entry) const { 1948 if (task_entry.is_array_slice()) { 1949 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); 1950 return; 1951 } 1952 guarantee(oopDesc::is_oop(task_entry.obj()), 1953 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1954 p2i(task_entry.obj()), _phase, _info); 1955 guarantee(!_g1h->is_in_cset(task_entry.obj()), 1956 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1957 p2i(task_entry.obj()), _phase, _info); 1958 } 1959 }; 1960 1961 void G1ConcurrentMark::verify_no_cset_oops() { 1962 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1963 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 1964 return; 1965 } 1966 1967 // Verify entries on the global mark stack 1968 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 1969 1970 // Verify entries on the task queues 1971 for (uint i = 0; i < _max_num_tasks; ++i) { 1972 G1CMTaskQueue* queue = _task_queues->queue(i); 1973 queue->iterate(VerifyNoCSetOops("Queue", i)); 1974 } 1975 1976 // Verify the global finger 1977 HeapWord* global_finger = finger(); 1978 if (global_finger != NULL && global_finger < _heap.end()) { 1979 // Since we always iterate over all regions, we might get a NULL HeapRegion 1980 // here. 1981 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1982 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1983 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1984 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1985 } 1986 1987 // Verify the task fingers 1988 assert(_num_concurrent_workers <= _max_num_tasks, "sanity"); 1989 for (uint i = 0; i < _num_concurrent_workers; ++i) { 1990 G1CMTask* task = _tasks[i]; 1991 HeapWord* task_finger = task->finger(); 1992 if (task_finger != NULL && task_finger < _heap.end()) { 1993 // See above note on the global finger verification. 1994 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 1995 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 1996 !task_hr->in_collection_set(), 1997 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 1998 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 1999 } 2000 } 2001 } 2002 #endif // PRODUCT 2003 2004 void G1ConcurrentMark::rebuild_rem_set_concurrently() { 2005 _g1h->g1_rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset); 2006 } 2007 2008 void G1ConcurrentMark::print_stats() { 2009 if (!log_is_enabled(Debug, gc, stats)) { 2010 return; 2011 } 2012 log_debug(gc, stats)("---------------------------------------------------------------------"); 2013 for (size_t i = 0; i < _num_active_tasks; ++i) { 2014 _tasks[i]->print_stats(); 2015 log_debug(gc, stats)("---------------------------------------------------------------------"); 2016 } 2017 } 2018 2019 void G1ConcurrentMark::concurrent_cycle_abort() { 2020 if (!cm_thread()->during_cycle() || _has_aborted) { 2021 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2022 return; 2023 } 2024 2025 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2026 // concurrent bitmap clearing. 2027 { 2028 GCTraceTime(Debug, gc) debug("Clear Next Bitmap"); 2029 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false); 2030 } 2031 // Note we cannot clear the previous marking bitmap here 2032 // since VerifyDuringGC verifies the objects marked during 2033 // a full GC against the previous bitmap. 2034 2035 // Empty mark stack 2036 reset_marking_for_restart(); 2037 for (uint i = 0; i < _max_num_tasks; ++i) { 2038 _tasks[i]->clear_region_fields(); 2039 } 2040 _first_overflow_barrier_sync.abort(); 2041 _second_overflow_barrier_sync.abort(); 2042 _has_aborted = true; 2043 2044 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2045 satb_mq_set.abandon_partial_marking(); 2046 // This can be called either during or outside marking, we'll read 2047 // the expected_active value from the SATB queue set. 2048 satb_mq_set.set_active_all_threads( 2049 false, /* new active value */ 2050 satb_mq_set.is_active() /* expected_active */); 2051 } 2052 2053 static void print_ms_time_info(const char* prefix, const char* name, 2054 NumberSeq& ns) { 2055 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2056 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2057 if (ns.num() > 0) { 2058 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2059 prefix, ns.sd(), ns.maximum()); 2060 } 2061 } 2062 2063 void G1ConcurrentMark::print_summary_info() { 2064 Log(gc, marking) log; 2065 if (!log.is_trace()) { 2066 return; 2067 } 2068 2069 log.trace(" Concurrent marking:"); 2070 print_ms_time_info(" ", "init marks", _init_times); 2071 print_ms_time_info(" ", "remarks", _remark_times); 2072 { 2073 print_ms_time_info(" ", "final marks", _remark_mark_times); 2074 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2075 2076 } 2077 print_ms_time_info(" ", "cleanups", _cleanup_times); 2078 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2079 _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2080 log.trace(" Total stop_world time = %8.2f s.", 2081 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2082 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2083 cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum()); 2084 } 2085 2086 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2087 _concurrent_workers->print_worker_threads_on(st); 2088 } 2089 2090 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2091 _concurrent_workers->threads_do(tc); 2092 } 2093 2094 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2095 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2096 p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap)); 2097 _prev_mark_bitmap->print_on_error(st, " Prev Bits: "); 2098 _next_mark_bitmap->print_on_error(st, " Next Bits: "); 2099 } 2100 2101 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2102 ReferenceProcessor* result = g1h->ref_processor_cm(); 2103 assert(result != NULL, "CM reference processor should not be NULL"); 2104 return result; 2105 } 2106 2107 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2108 G1CMTask* task) 2109 : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)), 2110 _g1h(g1h), _task(task) 2111 { } 2112 2113 void G1CMTask::setup_for_region(HeapRegion* hr) { 2114 assert(hr != NULL, 2115 "claim_region() should have filtered out NULL regions"); 2116 _curr_region = hr; 2117 _finger = hr->bottom(); 2118 update_region_limit(); 2119 } 2120 2121 void G1CMTask::update_region_limit() { 2122 HeapRegion* hr = _curr_region; 2123 HeapWord* bottom = hr->bottom(); 2124 HeapWord* limit = hr->next_top_at_mark_start(); 2125 2126 if (limit == bottom) { 2127 // The region was collected underneath our feet. 2128 // We set the finger to bottom to ensure that the bitmap 2129 // iteration that will follow this will not do anything. 2130 // (this is not a condition that holds when we set the region up, 2131 // as the region is not supposed to be empty in the first place) 2132 _finger = bottom; 2133 } else if (limit >= _region_limit) { 2134 assert(limit >= _finger, "peace of mind"); 2135 } else { 2136 assert(limit < _region_limit, "only way to get here"); 2137 // This can happen under some pretty unusual circumstances. An 2138 // evacuation pause empties the region underneath our feet (NTAMS 2139 // at bottom). We then do some allocation in the region (NTAMS 2140 // stays at bottom), followed by the region being used as a GC 2141 // alloc region (NTAMS will move to top() and the objects 2142 // originally below it will be grayed). All objects now marked in 2143 // the region are explicitly grayed, if below the global finger, 2144 // and we do not need in fact to scan anything else. So, we simply 2145 // set _finger to be limit to ensure that the bitmap iteration 2146 // doesn't do anything. 2147 _finger = limit; 2148 } 2149 2150 _region_limit = limit; 2151 } 2152 2153 void G1CMTask::giveup_current_region() { 2154 assert(_curr_region != NULL, "invariant"); 2155 clear_region_fields(); 2156 } 2157 2158 void G1CMTask::clear_region_fields() { 2159 // Values for these three fields that indicate that we're not 2160 // holding on to a region. 2161 _curr_region = NULL; 2162 _finger = NULL; 2163 _region_limit = NULL; 2164 } 2165 2166 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2167 if (cm_oop_closure == NULL) { 2168 assert(_cm_oop_closure != NULL, "invariant"); 2169 } else { 2170 assert(_cm_oop_closure == NULL, "invariant"); 2171 } 2172 _cm_oop_closure = cm_oop_closure; 2173 } 2174 2175 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) { 2176 guarantee(next_mark_bitmap != NULL, "invariant"); 2177 _next_mark_bitmap = next_mark_bitmap; 2178 clear_region_fields(); 2179 2180 _calls = 0; 2181 _elapsed_time_ms = 0.0; 2182 _termination_time_ms = 0.0; 2183 _termination_start_time_ms = 0.0; 2184 2185 _mark_stats_cache.reset(); 2186 } 2187 2188 bool G1CMTask::should_exit_termination() { 2189 regular_clock_call(); 2190 // This is called when we are in the termination protocol. We should 2191 // quit if, for some reason, this task wants to abort or the global 2192 // stack is not empty (this means that we can get work from it). 2193 return !_cm->mark_stack_empty() || has_aborted(); 2194 } 2195 2196 void G1CMTask::reached_limit() { 2197 assert(_words_scanned >= _words_scanned_limit || 2198 _refs_reached >= _refs_reached_limit , 2199 "shouldn't have been called otherwise"); 2200 regular_clock_call(); 2201 } 2202 2203 void G1CMTask::regular_clock_call() { 2204 if (has_aborted()) { 2205 return; 2206 } 2207 2208 // First, we need to recalculate the words scanned and refs reached 2209 // limits for the next clock call. 2210 recalculate_limits(); 2211 2212 // During the regular clock call we do the following 2213 2214 // (1) If an overflow has been flagged, then we abort. 2215 if (_cm->has_overflown()) { 2216 set_has_aborted(); 2217 return; 2218 } 2219 2220 // If we are not concurrent (i.e. we're doing remark) we don't need 2221 // to check anything else. The other steps are only needed during 2222 // the concurrent marking phase. 2223 if (!_cm->concurrent()) { 2224 return; 2225 } 2226 2227 // (2) If marking has been aborted for Full GC, then we also abort. 2228 if (_cm->has_aborted()) { 2229 set_has_aborted(); 2230 return; 2231 } 2232 2233 double curr_time_ms = os::elapsedVTime() * 1000.0; 2234 2235 // (4) We check whether we should yield. If we have to, then we abort. 2236 if (SuspendibleThreadSet::should_yield()) { 2237 // We should yield. To do this we abort the task. The caller is 2238 // responsible for yielding. 2239 set_has_aborted(); 2240 return; 2241 } 2242 2243 // (5) We check whether we've reached our time quota. If we have, 2244 // then we abort. 2245 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2246 if (elapsed_time_ms > _time_target_ms) { 2247 set_has_aborted(); 2248 _has_timed_out = true; 2249 return; 2250 } 2251 2252 // (6) Finally, we check whether there are enough completed STAB 2253 // buffers available for processing. If there are, we abort. 2254 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2255 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2256 // we do need to process SATB buffers, we'll abort and restart 2257 // the marking task to do so 2258 set_has_aborted(); 2259 return; 2260 } 2261 } 2262 2263 void G1CMTask::recalculate_limits() { 2264 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2265 _words_scanned_limit = _real_words_scanned_limit; 2266 2267 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2268 _refs_reached_limit = _real_refs_reached_limit; 2269 } 2270 2271 void G1CMTask::decrease_limits() { 2272 // This is called when we believe that we're going to do an infrequent 2273 // operation which will increase the per byte scanned cost (i.e. move 2274 // entries to/from the global stack). It basically tries to decrease the 2275 // scanning limit so that the clock is called earlier. 2276 2277 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4; 2278 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4; 2279 } 2280 2281 void G1CMTask::move_entries_to_global_stack() { 2282 // Local array where we'll store the entries that will be popped 2283 // from the local queue. 2284 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2285 2286 size_t n = 0; 2287 G1TaskQueueEntry task_entry; 2288 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) { 2289 buffer[n] = task_entry; 2290 ++n; 2291 } 2292 if (n < G1CMMarkStack::EntriesPerChunk) { 2293 buffer[n] = G1TaskQueueEntry(); 2294 } 2295 2296 if (n > 0) { 2297 if (!_cm->mark_stack_push(buffer)) { 2298 set_has_aborted(); 2299 } 2300 } 2301 2302 // This operation was quite expensive, so decrease the limits. 2303 decrease_limits(); 2304 } 2305 2306 bool G1CMTask::get_entries_from_global_stack() { 2307 // Local array where we'll store the entries that will be popped 2308 // from the global stack. 2309 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2310 2311 if (!_cm->mark_stack_pop(buffer)) { 2312 return false; 2313 } 2314 2315 // We did actually pop at least one entry. 2316 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) { 2317 G1TaskQueueEntry task_entry = buffer[i]; 2318 if (task_entry.is_null()) { 2319 break; 2320 } 2321 assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj())); 2322 bool success = _task_queue->push(task_entry); 2323 // We only call this when the local queue is empty or under a 2324 // given target limit. So, we do not expect this push to fail. 2325 assert(success, "invariant"); 2326 } 2327 2328 // This operation was quite expensive, so decrease the limits 2329 decrease_limits(); 2330 return true; 2331 } 2332 2333 void G1CMTask::drain_local_queue(bool partially) { 2334 if (has_aborted()) { 2335 return; 2336 } 2337 2338 // Decide what the target size is, depending whether we're going to 2339 // drain it partially (so that other tasks can steal if they run out 2340 // of things to do) or totally (at the very end). 2341 size_t target_size; 2342 if (partially) { 2343 target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize); 2344 } else { 2345 target_size = 0; 2346 } 2347 2348 if (_task_queue->size() > target_size) { 2349 G1TaskQueueEntry entry; 2350 bool ret = _task_queue->pop_local(entry); 2351 while (ret) { 2352 scan_task_entry(entry); 2353 if (_task_queue->size() <= target_size || has_aborted()) { 2354 ret = false; 2355 } else { 2356 ret = _task_queue->pop_local(entry); 2357 } 2358 } 2359 } 2360 } 2361 2362 void G1CMTask::drain_global_stack(bool partially) { 2363 if (has_aborted()) { 2364 return; 2365 } 2366 2367 // We have a policy to drain the local queue before we attempt to 2368 // drain the global stack. 2369 assert(partially || _task_queue->size() == 0, "invariant"); 2370 2371 // Decide what the target size is, depending whether we're going to 2372 // drain it partially (so that other tasks can steal if they run out 2373 // of things to do) or totally (at the very end). 2374 // Notice that when draining the global mark stack partially, due to the racyness 2375 // of the mark stack size update we might in fact drop below the target. But, 2376 // this is not a problem. 2377 // In case of total draining, we simply process until the global mark stack is 2378 // totally empty, disregarding the size counter. 2379 if (partially) { 2380 size_t const target_size = _cm->partial_mark_stack_size_target(); 2381 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2382 if (get_entries_from_global_stack()) { 2383 drain_local_queue(partially); 2384 } 2385 } 2386 } else { 2387 while (!has_aborted() && get_entries_from_global_stack()) { 2388 drain_local_queue(partially); 2389 } 2390 } 2391 } 2392 2393 // SATB Queue has several assumptions on whether to call the par or 2394 // non-par versions of the methods. this is why some of the code is 2395 // replicated. We should really get rid of the single-threaded version 2396 // of the code to simplify things. 2397 void G1CMTask::drain_satb_buffers() { 2398 if (has_aborted()) { 2399 return; 2400 } 2401 2402 // We set this so that the regular clock knows that we're in the 2403 // middle of draining buffers and doesn't set the abort flag when it 2404 // notices that SATB buffers are available for draining. It'd be 2405 // very counter productive if it did that. :-) 2406 _draining_satb_buffers = true; 2407 2408 G1CMSATBBufferClosure satb_cl(this, _g1h); 2409 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2410 2411 // This keeps claiming and applying the closure to completed buffers 2412 // until we run out of buffers or we need to abort. 2413 while (!has_aborted() && 2414 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2415 regular_clock_call(); 2416 } 2417 2418 _draining_satb_buffers = false; 2419 2420 assert(has_aborted() || 2421 _cm->concurrent() || 2422 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2423 2424 // again, this was a potentially expensive operation, decrease the 2425 // limits to get the regular clock call early 2426 decrease_limits(); 2427 } 2428 2429 void G1CMTask::clear_mark_stats_cache(uint region_idx) { 2430 _mark_stats_cache.reset(region_idx); 2431 } 2432 2433 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() { 2434 return _mark_stats_cache.evict_all(); 2435 } 2436 2437 void G1CMTask::print_stats() { 2438 log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls); 2439 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2440 _elapsed_time_ms, _termination_time_ms); 2441 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms", 2442 _step_times_ms.num(), 2443 _step_times_ms.avg(), 2444 _step_times_ms.sd(), 2445 _step_times_ms.maximum(), 2446 _step_times_ms.sum()); 2447 size_t const hits = _mark_stats_cache.hits(); 2448 size_t const misses = _mark_stats_cache.misses(); 2449 log_debug(gc, stats)(" Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f", 2450 hits, misses, percent_of(hits, hits + misses)); 2451 } 2452 2453 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) { 2454 return _task_queues->steal(worker_id, task_entry); 2455 } 2456 2457 /***************************************************************************** 2458 2459 The do_marking_step(time_target_ms, ...) method is the building 2460 block of the parallel marking framework. It can be called in parallel 2461 with other invocations of do_marking_step() on different tasks 2462 (but only one per task, obviously) and concurrently with the 2463 mutator threads, or during remark, hence it eliminates the need 2464 for two versions of the code. When called during remark, it will 2465 pick up from where the task left off during the concurrent marking 2466 phase. Interestingly, tasks are also claimable during evacuation 2467 pauses too, since do_marking_step() ensures that it aborts before 2468 it needs to yield. 2469 2470 The data structures that it uses to do marking work are the 2471 following: 2472 2473 (1) Marking Bitmap. If there are gray objects that appear only 2474 on the bitmap (this happens either when dealing with an overflow 2475 or when the initial marking phase has simply marked the roots 2476 and didn't push them on the stack), then tasks claim heap 2477 regions whose bitmap they then scan to find gray objects. A 2478 global finger indicates where the end of the last claimed region 2479 is. A local finger indicates how far into the region a task has 2480 scanned. The two fingers are used to determine how to gray an 2481 object (i.e. whether simply marking it is OK, as it will be 2482 visited by a task in the future, or whether it needs to be also 2483 pushed on a stack). 2484 2485 (2) Local Queue. The local queue of the task which is accessed 2486 reasonably efficiently by the task. Other tasks can steal from 2487 it when they run out of work. Throughout the marking phase, a 2488 task attempts to keep its local queue short but not totally 2489 empty, so that entries are available for stealing by other 2490 tasks. Only when there is no more work, a task will totally 2491 drain its local queue. 2492 2493 (3) Global Mark Stack. This handles local queue overflow. During 2494 marking only sets of entries are moved between it and the local 2495 queues, as access to it requires a mutex and more fine-grain 2496 interaction with it which might cause contention. If it 2497 overflows, then the marking phase should restart and iterate 2498 over the bitmap to identify gray objects. Throughout the marking 2499 phase, tasks attempt to keep the global mark stack at a small 2500 length but not totally empty, so that entries are available for 2501 popping by other tasks. Only when there is no more work, tasks 2502 will totally drain the global mark stack. 2503 2504 (4) SATB Buffer Queue. This is where completed SATB buffers are 2505 made available. Buffers are regularly removed from this queue 2506 and scanned for roots, so that the queue doesn't get too 2507 long. During remark, all completed buffers are processed, as 2508 well as the filled in parts of any uncompleted buffers. 2509 2510 The do_marking_step() method tries to abort when the time target 2511 has been reached. There are a few other cases when the 2512 do_marking_step() method also aborts: 2513 2514 (1) When the marking phase has been aborted (after a Full GC). 2515 2516 (2) When a global overflow (on the global stack) has been 2517 triggered. Before the task aborts, it will actually sync up with 2518 the other tasks to ensure that all the marking data structures 2519 (local queues, stacks, fingers etc.) are re-initialized so that 2520 when do_marking_step() completes, the marking phase can 2521 immediately restart. 2522 2523 (3) When enough completed SATB buffers are available. The 2524 do_marking_step() method only tries to drain SATB buffers right 2525 at the beginning. So, if enough buffers are available, the 2526 marking step aborts and the SATB buffers are processed at 2527 the beginning of the next invocation. 2528 2529 (4) To yield. when we have to yield then we abort and yield 2530 right at the end of do_marking_step(). This saves us from a lot 2531 of hassle as, by yielding we might allow a Full GC. If this 2532 happens then objects will be compacted underneath our feet, the 2533 heap might shrink, etc. We save checking for this by just 2534 aborting and doing the yield right at the end. 2535 2536 From the above it follows that the do_marking_step() method should 2537 be called in a loop (or, otherwise, regularly) until it completes. 2538 2539 If a marking step completes without its has_aborted() flag being 2540 true, it means it has completed the current marking phase (and 2541 also all other marking tasks have done so and have all synced up). 2542 2543 A method called regular_clock_call() is invoked "regularly" (in 2544 sub ms intervals) throughout marking. It is this clock method that 2545 checks all the abort conditions which were mentioned above and 2546 decides when the task should abort. A work-based scheme is used to 2547 trigger this clock method: when the number of object words the 2548 marking phase has scanned or the number of references the marking 2549 phase has visited reach a given limit. Additional invocations to 2550 the method clock have been planted in a few other strategic places 2551 too. The initial reason for the clock method was to avoid calling 2552 vtime too regularly, as it is quite expensive. So, once it was in 2553 place, it was natural to piggy-back all the other conditions on it 2554 too and not constantly check them throughout the code. 2555 2556 If do_termination is true then do_marking_step will enter its 2557 termination protocol. 2558 2559 The value of is_serial must be true when do_marking_step is being 2560 called serially (i.e. by the VMThread) and do_marking_step should 2561 skip any synchronization in the termination and overflow code. 2562 Examples include the serial remark code and the serial reference 2563 processing closures. 2564 2565 The value of is_serial must be false when do_marking_step is 2566 being called by any of the worker threads in a work gang. 2567 Examples include the concurrent marking code (CMMarkingTask), 2568 the MT remark code, and the MT reference processing closures. 2569 2570 *****************************************************************************/ 2571 2572 void G1CMTask::do_marking_step(double time_target_ms, 2573 bool do_termination, 2574 bool is_serial) { 2575 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2576 2577 _start_time_ms = os::elapsedVTime() * 1000.0; 2578 2579 // If do_stealing is true then do_marking_step will attempt to 2580 // steal work from the other G1CMTasks. It only makes sense to 2581 // enable stealing when the termination protocol is enabled 2582 // and do_marking_step() is not being called serially. 2583 bool do_stealing = do_termination && !is_serial; 2584 2585 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2586 _time_target_ms = time_target_ms - diff_prediction_ms; 2587 2588 // set up the variables that are used in the work-based scheme to 2589 // call the regular clock method 2590 _words_scanned = 0; 2591 _refs_reached = 0; 2592 recalculate_limits(); 2593 2594 // clear all flags 2595 clear_has_aborted(); 2596 _has_timed_out = false; 2597 _draining_satb_buffers = false; 2598 2599 ++_calls; 2600 2601 // Set up the bitmap and oop closures. Anything that uses them is 2602 // eventually called from this method, so it is OK to allocate these 2603 // statically. 2604 G1CMBitMapClosure bitmap_closure(this, _cm); 2605 G1CMOopClosure cm_oop_closure(_g1h, this); 2606 set_cm_oop_closure(&cm_oop_closure); 2607 2608 if (_cm->has_overflown()) { 2609 // This can happen if the mark stack overflows during a GC pause 2610 // and this task, after a yield point, restarts. We have to abort 2611 // as we need to get into the overflow protocol which happens 2612 // right at the end of this task. 2613 set_has_aborted(); 2614 } 2615 2616 // First drain any available SATB buffers. After this, we will not 2617 // look at SATB buffers before the next invocation of this method. 2618 // If enough completed SATB buffers are queued up, the regular clock 2619 // will abort this task so that it restarts. 2620 drain_satb_buffers(); 2621 // ...then partially drain the local queue and the global stack 2622 drain_local_queue(true); 2623 drain_global_stack(true); 2624 2625 do { 2626 if (!has_aborted() && _curr_region != NULL) { 2627 // This means that we're already holding on to a region. 2628 assert(_finger != NULL, "if region is not NULL, then the finger " 2629 "should not be NULL either"); 2630 2631 // We might have restarted this task after an evacuation pause 2632 // which might have evacuated the region we're holding on to 2633 // underneath our feet. Let's read its limit again to make sure 2634 // that we do not iterate over a region of the heap that 2635 // contains garbage (update_region_limit() will also move 2636 // _finger to the start of the region if it is found empty). 2637 update_region_limit(); 2638 // We will start from _finger not from the start of the region, 2639 // as we might be restarting this task after aborting half-way 2640 // through scanning this region. In this case, _finger points to 2641 // the address where we last found a marked object. If this is a 2642 // fresh region, _finger points to start(). 2643 MemRegion mr = MemRegion(_finger, _region_limit); 2644 2645 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2646 "humongous regions should go around loop once only"); 2647 2648 // Some special cases: 2649 // If the memory region is empty, we can just give up the region. 2650 // If the current region is humongous then we only need to check 2651 // the bitmap for the bit associated with the start of the object, 2652 // scan the object if it's live, and give up the region. 2653 // Otherwise, let's iterate over the bitmap of the part of the region 2654 // that is left. 2655 // If the iteration is successful, give up the region. 2656 if (mr.is_empty()) { 2657 giveup_current_region(); 2658 regular_clock_call(); 2659 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2660 if (_next_mark_bitmap->is_marked(mr.start())) { 2661 // The object is marked - apply the closure 2662 bitmap_closure.do_addr(mr.start()); 2663 } 2664 // Even if this task aborted while scanning the humongous object 2665 // we can (and should) give up the current region. 2666 giveup_current_region(); 2667 regular_clock_call(); 2668 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) { 2669 giveup_current_region(); 2670 regular_clock_call(); 2671 } else { 2672 assert(has_aborted(), "currently the only way to do so"); 2673 // The only way to abort the bitmap iteration is to return 2674 // false from the do_bit() method. However, inside the 2675 // do_bit() method we move the _finger to point to the 2676 // object currently being looked at. So, if we bail out, we 2677 // have definitely set _finger to something non-null. 2678 assert(_finger != NULL, "invariant"); 2679 2680 // Region iteration was actually aborted. So now _finger 2681 // points to the address of the object we last scanned. If we 2682 // leave it there, when we restart this task, we will rescan 2683 // the object. It is easy to avoid this. We move the finger by 2684 // enough to point to the next possible object header. 2685 assert(_finger < _region_limit, "invariant"); 2686 HeapWord* const new_finger = _finger + ((oop)_finger)->size(); 2687 // Check if bitmap iteration was aborted while scanning the last object 2688 if (new_finger >= _region_limit) { 2689 giveup_current_region(); 2690 } else { 2691 move_finger_to(new_finger); 2692 } 2693 } 2694 } 2695 // At this point we have either completed iterating over the 2696 // region we were holding on to, or we have aborted. 2697 2698 // We then partially drain the local queue and the global stack. 2699 // (Do we really need this?) 2700 drain_local_queue(true); 2701 drain_global_stack(true); 2702 2703 // Read the note on the claim_region() method on why it might 2704 // return NULL with potentially more regions available for 2705 // claiming and why we have to check out_of_regions() to determine 2706 // whether we're done or not. 2707 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2708 // We are going to try to claim a new region. We should have 2709 // given up on the previous one. 2710 // Separated the asserts so that we know which one fires. 2711 assert(_curr_region == NULL, "invariant"); 2712 assert(_finger == NULL, "invariant"); 2713 assert(_region_limit == NULL, "invariant"); 2714 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2715 if (claimed_region != NULL) { 2716 // Yes, we managed to claim one 2717 setup_for_region(claimed_region); 2718 assert(_curr_region == claimed_region, "invariant"); 2719 } 2720 // It is important to call the regular clock here. It might take 2721 // a while to claim a region if, for example, we hit a large 2722 // block of empty regions. So we need to call the regular clock 2723 // method once round the loop to make sure it's called 2724 // frequently enough. 2725 regular_clock_call(); 2726 } 2727 2728 if (!has_aborted() && _curr_region == NULL) { 2729 assert(_cm->out_of_regions(), 2730 "at this point we should be out of regions"); 2731 } 2732 } while ( _curr_region != NULL && !has_aborted()); 2733 2734 if (!has_aborted()) { 2735 // We cannot check whether the global stack is empty, since other 2736 // tasks might be pushing objects to it concurrently. 2737 assert(_cm->out_of_regions(), 2738 "at this point we should be out of regions"); 2739 // Try to reduce the number of available SATB buffers so that 2740 // remark has less work to do. 2741 drain_satb_buffers(); 2742 } 2743 2744 // Since we've done everything else, we can now totally drain the 2745 // local queue and global stack. 2746 drain_local_queue(false); 2747 drain_global_stack(false); 2748 2749 // Attempt at work stealing from other task's queues. 2750 if (do_stealing && !has_aborted()) { 2751 // We have not aborted. This means that we have finished all that 2752 // we could. Let's try to do some stealing... 2753 2754 // We cannot check whether the global stack is empty, since other 2755 // tasks might be pushing objects to it concurrently. 2756 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2757 "only way to reach here"); 2758 while (!has_aborted()) { 2759 G1TaskQueueEntry entry; 2760 if (_cm->try_stealing(_worker_id, entry)) { 2761 scan_task_entry(entry); 2762 2763 // And since we're towards the end, let's totally drain the 2764 // local queue and global stack. 2765 drain_local_queue(false); 2766 drain_global_stack(false); 2767 } else { 2768 break; 2769 } 2770 } 2771 } 2772 2773 // We still haven't aborted. Now, let's try to get into the 2774 // termination protocol. 2775 if (do_termination && !has_aborted()) { 2776 // We cannot check whether the global stack is empty, since other 2777 // tasks might be concurrently pushing objects on it. 2778 // Separated the asserts so that we know which one fires. 2779 assert(_cm->out_of_regions(), "only way to reach here"); 2780 assert(_task_queue->size() == 0, "only way to reach here"); 2781 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2782 2783 // The G1CMTask class also extends the TerminatorTerminator class, 2784 // hence its should_exit_termination() method will also decide 2785 // whether to exit the termination protocol or not. 2786 bool finished = (is_serial || 2787 _cm->terminator()->offer_termination(this)); 2788 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2789 _termination_time_ms += 2790 termination_end_time_ms - _termination_start_time_ms; 2791 2792 if (finished) { 2793 // We're all done. 2794 2795 // We can now guarantee that the global stack is empty, since 2796 // all other tasks have finished. We separated the guarantees so 2797 // that, if a condition is false, we can immediately find out 2798 // which one. 2799 guarantee(_cm->out_of_regions(), "only way to reach here"); 2800 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2801 guarantee(_task_queue->size() == 0, "only way to reach here"); 2802 guarantee(!_cm->has_overflown(), "only way to reach here"); 2803 } else { 2804 // Apparently there's more work to do. Let's abort this task. It 2805 // will restart it and we can hopefully find more things to do. 2806 set_has_aborted(); 2807 } 2808 } 2809 2810 // Mainly for debugging purposes to make sure that a pointer to the 2811 // closure which was statically allocated in this frame doesn't 2812 // escape it by accident. 2813 set_cm_oop_closure(NULL); 2814 double end_time_ms = os::elapsedVTime() * 1000.0; 2815 double elapsed_time_ms = end_time_ms - _start_time_ms; 2816 // Update the step history. 2817 _step_times_ms.add(elapsed_time_ms); 2818 2819 if (has_aborted()) { 2820 // The task was aborted for some reason. 2821 if (_has_timed_out) { 2822 double diff_ms = elapsed_time_ms - _time_target_ms; 2823 // Keep statistics of how well we did with respect to hitting 2824 // our target only if we actually timed out (if we aborted for 2825 // other reasons, then the results might get skewed). 2826 _marking_step_diffs_ms.add(diff_ms); 2827 } 2828 2829 if (_cm->has_overflown()) { 2830 // This is the interesting one. We aborted because a global 2831 // overflow was raised. This means we have to restart the 2832 // marking phase and start iterating over regions. However, in 2833 // order to do this we have to make sure that all tasks stop 2834 // what they are doing and re-initialize in a safe manner. We 2835 // will achieve this with the use of two barrier sync points. 2836 2837 if (!is_serial) { 2838 // We only need to enter the sync barrier if being called 2839 // from a parallel context 2840 _cm->enter_first_sync_barrier(_worker_id); 2841 2842 // When we exit this sync barrier we know that all tasks have 2843 // stopped doing marking work. So, it's now safe to 2844 // re-initialize our data structures. 2845 } 2846 2847 clear_region_fields(); 2848 flush_mark_stats_cache(); 2849 2850 if (!is_serial) { 2851 // If we're executing the concurrent phase of marking, reset the marking 2852 // state; otherwise the marking state is reset after reference processing, 2853 // during the remark pause. 2854 // If we reset here as a result of an overflow during the remark we will 2855 // see assertion failures from any subsequent set_concurrency_and_phase() 2856 // calls. 2857 if (_cm->concurrent() && _worker_id == 0) { 2858 // Worker 0 is responsible for clearing the global data structures because 2859 // of an overflow. During STW we should not clear the overflow flag (in 2860 // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit 2861 // method to abort the pause and restart concurrent marking. 2862 _cm->reset_marking_for_restart(); 2863 2864 log_info(gc, marking)("Concurrent Mark reset for overflow"); 2865 } 2866 2867 // ...and enter the second barrier. 2868 _cm->enter_second_sync_barrier(_worker_id); 2869 } 2870 // At this point, if we're during the concurrent phase of 2871 // marking, everything has been re-initialized and we're 2872 // ready to restart. 2873 } 2874 } 2875 } 2876 2877 G1CMTask::G1CMTask(uint worker_id, 2878 G1ConcurrentMark* cm, 2879 G1CMTaskQueue* task_queue, 2880 G1RegionMarkStats* mark_stats, 2881 uint max_regions) : 2882 _objArray_processor(this), 2883 _worker_id(worker_id), 2884 _g1h(G1CollectedHeap::heap()), 2885 _cm(cm), 2886 _next_mark_bitmap(NULL), 2887 _task_queue(task_queue), 2888 _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize), 2889 _calls(0), 2890 _time_target_ms(0.0), 2891 _start_time_ms(0.0), 2892 _cm_oop_closure(NULL), 2893 _curr_region(NULL), 2894 _finger(NULL), 2895 _region_limit(NULL), 2896 _words_scanned(0), 2897 _words_scanned_limit(0), 2898 _real_words_scanned_limit(0), 2899 _refs_reached(0), 2900 _refs_reached_limit(0), 2901 _real_refs_reached_limit(0), 2902 _has_aborted(false), 2903 _has_timed_out(false), 2904 _draining_satb_buffers(false), 2905 _step_times_ms(), 2906 _elapsed_time_ms(0.0), 2907 _termination_time_ms(0.0), 2908 _termination_start_time_ms(0.0), 2909 _marking_step_diffs_ms() 2910 { 2911 guarantee(task_queue != NULL, "invariant"); 2912 2913 _marking_step_diffs_ms.add(0.5); 2914 } 2915 2916 // These are formatting macros that are used below to ensure 2917 // consistent formatting. The *_H_* versions are used to format the 2918 // header for a particular value and they should be kept consistent 2919 // with the corresponding macro. Also note that most of the macros add 2920 // the necessary white space (as a prefix) which makes them a bit 2921 // easier to compose. 2922 2923 // All the output lines are prefixed with this string to be able to 2924 // identify them easily in a large log file. 2925 #define G1PPRL_LINE_PREFIX "###" 2926 2927 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2928 #ifdef _LP64 2929 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2930 #else // _LP64 2931 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2932 #endif // _LP64 2933 2934 // For per-region info 2935 #define G1PPRL_TYPE_FORMAT " %-4s" 2936 #define G1PPRL_TYPE_H_FORMAT " %4s" 2937 #define G1PPRL_STATE_FORMAT " %-5s" 2938 #define G1PPRL_STATE_H_FORMAT " %5s" 2939 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2940 #define G1PPRL_BYTE_H_FORMAT " %9s" 2941 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2942 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2943 2944 // For summary info 2945 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2946 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2947 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2948 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2949 2950 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) : 2951 _total_used_bytes(0), _total_capacity_bytes(0), 2952 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2953 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) 2954 { 2955 if (!log_is_enabled(Trace, gc, liveness)) { 2956 return; 2957 } 2958 2959 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2960 MemRegion g1_reserved = g1h->g1_reserved(); 2961 double now = os::elapsedTime(); 2962 2963 // Print the header of the output. 2964 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2965 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2966 G1PPRL_SUM_ADDR_FORMAT("reserved") 2967 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2968 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 2969 HeapRegion::GrainBytes); 2970 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2971 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2972 G1PPRL_TYPE_H_FORMAT 2973 G1PPRL_ADDR_BASE_H_FORMAT 2974 G1PPRL_BYTE_H_FORMAT 2975 G1PPRL_BYTE_H_FORMAT 2976 G1PPRL_BYTE_H_FORMAT 2977 G1PPRL_DOUBLE_H_FORMAT 2978 G1PPRL_BYTE_H_FORMAT 2979 G1PPRL_STATE_H_FORMAT 2980 G1PPRL_BYTE_H_FORMAT, 2981 "type", "address-range", 2982 "used", "prev-live", "next-live", "gc-eff", 2983 "remset", "state", "code-roots"); 2984 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2985 G1PPRL_TYPE_H_FORMAT 2986 G1PPRL_ADDR_BASE_H_FORMAT 2987 G1PPRL_BYTE_H_FORMAT 2988 G1PPRL_BYTE_H_FORMAT 2989 G1PPRL_BYTE_H_FORMAT 2990 G1PPRL_DOUBLE_H_FORMAT 2991 G1PPRL_BYTE_H_FORMAT 2992 G1PPRL_STATE_H_FORMAT 2993 G1PPRL_BYTE_H_FORMAT, 2994 "", "", 2995 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 2996 "(bytes)", "", "(bytes)"); 2997 } 2998 2999 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) { 3000 if (!log_is_enabled(Trace, gc, liveness)) { 3001 return false; 3002 } 3003 3004 const char* type = r->get_type_str(); 3005 HeapWord* bottom = r->bottom(); 3006 HeapWord* end = r->end(); 3007 size_t capacity_bytes = r->capacity(); 3008 size_t used_bytes = r->used(); 3009 size_t prev_live_bytes = r->live_bytes(); 3010 size_t next_live_bytes = r->next_live_bytes(); 3011 double gc_eff = r->gc_efficiency(); 3012 size_t remset_bytes = r->rem_set()->mem_size(); 3013 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3014 const char* remset_type = r->rem_set()->get_short_state_str(); 3015 3016 _total_used_bytes += used_bytes; 3017 _total_capacity_bytes += capacity_bytes; 3018 _total_prev_live_bytes += prev_live_bytes; 3019 _total_next_live_bytes += next_live_bytes; 3020 _total_remset_bytes += remset_bytes; 3021 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3022 3023 // Print a line for this particular region. 3024 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3025 G1PPRL_TYPE_FORMAT 3026 G1PPRL_ADDR_BASE_FORMAT 3027 G1PPRL_BYTE_FORMAT 3028 G1PPRL_BYTE_FORMAT 3029 G1PPRL_BYTE_FORMAT 3030 G1PPRL_DOUBLE_FORMAT 3031 G1PPRL_BYTE_FORMAT 3032 G1PPRL_STATE_FORMAT 3033 G1PPRL_BYTE_FORMAT, 3034 type, p2i(bottom), p2i(end), 3035 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3036 remset_bytes, remset_type, strong_code_roots_bytes); 3037 3038 return false; 3039 } 3040 3041 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3042 if (!log_is_enabled(Trace, gc, liveness)) { 3043 return; 3044 } 3045 3046 // add static memory usages to remembered set sizes 3047 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3048 // Print the footer of the output. 3049 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3050 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3051 " SUMMARY" 3052 G1PPRL_SUM_MB_FORMAT("capacity") 3053 G1PPRL_SUM_MB_PERC_FORMAT("used") 3054 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3055 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3056 G1PPRL_SUM_MB_FORMAT("remset") 3057 G1PPRL_SUM_MB_FORMAT("code-roots"), 3058 bytes_to_mb(_total_capacity_bytes), 3059 bytes_to_mb(_total_used_bytes), 3060 percent_of(_total_used_bytes, _total_capacity_bytes), 3061 bytes_to_mb(_total_prev_live_bytes), 3062 percent_of(_total_prev_live_bytes, _total_capacity_bytes), 3063 bytes_to_mb(_total_next_live_bytes), 3064 percent_of(_total_next_live_bytes, _total_capacity_bytes), 3065 bytes_to_mb(_total_remset_bytes), 3066 bytes_to_mb(_total_strong_code_roots_bytes)); 3067 }