1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/g1BarrierSet.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorState.hpp" 32 #include "gc/g1/g1ConcurrentMark.inline.hpp" 33 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" 34 #include "gc/g1/g1HeapVerifier.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1Policy.hpp" 37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" 38 #include "gc/g1/g1StringDedup.hpp" 39 #include "gc/g1/g1ThreadLocalData.hpp" 40 #include "gc/g1/heapRegion.inline.hpp" 41 #include "gc/g1/heapRegionRemSet.hpp" 42 #include "gc/g1/heapRegionSet.inline.hpp" 43 #include "gc/shared/adaptiveSizePolicy.hpp" 44 #include "gc/shared/gcId.hpp" 45 #include "gc/shared/gcTimer.hpp" 46 #include "gc/shared/gcTrace.hpp" 47 #include "gc/shared/gcTraceTime.inline.hpp" 48 #include "gc/shared/genOopClosures.inline.hpp" 49 #include "gc/shared/referencePolicy.hpp" 50 #include "gc/shared/strongRootsScope.hpp" 51 #include "gc/shared/suspendibleThreadSet.hpp" 52 #include "gc/shared/taskqueue.inline.hpp" 53 #include "gc/shared/vmGCOperations.hpp" 54 #include "gc/shared/weakProcessor.hpp" 55 #include "include/jvm.h" 56 #include "logging/log.hpp" 57 #include "memory/allocation.hpp" 58 #include "memory/resourceArea.hpp" 59 #include "oops/access.inline.hpp" 60 #include "oops/oop.inline.hpp" 61 #include "runtime/atomic.hpp" 62 #include "runtime/handles.inline.hpp" 63 #include "runtime/java.hpp" 64 #include "runtime/prefetch.inline.hpp" 65 #include "services/memTracker.hpp" 66 #include "utilities/align.hpp" 67 #include "utilities/growableArray.hpp" 68 69 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) { 70 assert(addr < _cm->finger(), "invariant"); 71 assert(addr >= _task->finger(), "invariant"); 72 73 // We move that task's local finger along. 74 _task->move_finger_to(addr); 75 76 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); 77 // we only partially drain the local queue and global stack 78 _task->drain_local_queue(true); 79 _task->drain_global_stack(true); 80 81 // if the has_aborted flag has been raised, we need to bail out of 82 // the iteration 83 return !_task->has_aborted(); 84 } 85 86 G1CMMarkStack::G1CMMarkStack() : 87 _max_chunk_capacity(0), 88 _base(NULL), 89 _chunk_capacity(0) { 90 set_empty(); 91 } 92 93 bool G1CMMarkStack::resize(size_t new_capacity) { 94 assert(is_empty(), "Only resize when stack is empty."); 95 assert(new_capacity <= _max_chunk_capacity, 96 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); 97 98 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC); 99 100 if (new_base == NULL) { 101 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); 102 return false; 103 } 104 // Release old mapping. 105 if (_base != NULL) { 106 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 107 } 108 109 _base = new_base; 110 _chunk_capacity = new_capacity; 111 set_empty(); 112 113 return true; 114 } 115 116 size_t G1CMMarkStack::capacity_alignment() { 117 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); 118 } 119 120 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { 121 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); 122 123 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); 124 125 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 126 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 127 128 guarantee(initial_chunk_capacity <= _max_chunk_capacity, 129 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, 130 _max_chunk_capacity, 131 initial_chunk_capacity); 132 133 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, 134 initial_chunk_capacity, _max_chunk_capacity); 135 136 return resize(initial_chunk_capacity); 137 } 138 139 void G1CMMarkStack::expand() { 140 if (_chunk_capacity == _max_chunk_capacity) { 141 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); 142 return; 143 } 144 size_t old_capacity = _chunk_capacity; 145 // Double capacity if possible 146 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); 147 148 if (resize(new_capacity)) { 149 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 150 old_capacity, new_capacity); 151 } else { 152 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 153 old_capacity, new_capacity); 154 } 155 } 156 157 G1CMMarkStack::~G1CMMarkStack() { 158 if (_base != NULL) { 159 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 160 } 161 } 162 163 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { 164 elem->next = *list; 165 *list = elem; 166 } 167 168 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { 169 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 170 add_chunk_to_list(&_chunk_list, elem); 171 _chunks_in_chunk_list++; 172 } 173 174 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { 175 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 176 add_chunk_to_list(&_free_list, elem); 177 } 178 179 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { 180 TaskQueueEntryChunk* result = *list; 181 if (result != NULL) { 182 *list = (*list)->next; 183 } 184 return result; 185 } 186 187 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { 188 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 189 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); 190 if (result != NULL) { 191 _chunks_in_chunk_list--; 192 } 193 return result; 194 } 195 196 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { 197 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 198 return remove_chunk_from_list(&_free_list); 199 } 200 201 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { 202 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. 203 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding 204 // wraparound of _hwm. 205 if (_hwm >= _chunk_capacity) { 206 return NULL; 207 } 208 209 size_t cur_idx = Atomic::add(1u, &_hwm) - 1; 210 if (cur_idx >= _chunk_capacity) { 211 return NULL; 212 } 213 214 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; 215 result->next = NULL; 216 return result; 217 } 218 219 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { 220 // Get a new chunk. 221 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); 222 223 if (new_chunk == NULL) { 224 // Did not get a chunk from the free list. Allocate from backing memory. 225 new_chunk = allocate_new_chunk(); 226 227 if (new_chunk == NULL) { 228 return false; 229 } 230 } 231 232 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 233 234 add_chunk_to_chunk_list(new_chunk); 235 236 return true; 237 } 238 239 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { 240 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); 241 242 if (cur == NULL) { 243 return false; 244 } 245 246 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 247 248 add_chunk_to_free_list(cur); 249 return true; 250 } 251 252 void G1CMMarkStack::set_empty() { 253 _chunks_in_chunk_list = 0; 254 _hwm = 0; 255 _chunk_list = NULL; 256 _free_list = NULL; 257 } 258 259 G1CMRootRegions::G1CMRootRegions() : 260 _survivors(NULL), _cm(NULL), _scan_in_progress(false), 261 _should_abort(false), _claimed_survivor_index(0) { } 262 263 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) { 264 _survivors = survivors; 265 _cm = cm; 266 } 267 268 void G1CMRootRegions::prepare_for_scan() { 269 assert(!scan_in_progress(), "pre-condition"); 270 271 // Currently, only survivors can be root regions. 272 _claimed_survivor_index = 0; 273 _scan_in_progress = _survivors->regions()->is_nonempty(); 274 _should_abort = false; 275 } 276 277 HeapRegion* G1CMRootRegions::claim_next() { 278 if (_should_abort) { 279 // If someone has set the should_abort flag, we return NULL to 280 // force the caller to bail out of their loop. 281 return NULL; 282 } 283 284 // Currently, only survivors can be root regions. 285 const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions(); 286 287 int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1; 288 if (claimed_index < survivor_regions->length()) { 289 return survivor_regions->at(claimed_index); 290 } 291 return NULL; 292 } 293 294 uint G1CMRootRegions::num_root_regions() const { 295 return (uint)_survivors->regions()->length(); 296 } 297 298 void G1CMRootRegions::notify_scan_done() { 299 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 300 _scan_in_progress = false; 301 RootRegionScan_lock->notify_all(); 302 } 303 304 void G1CMRootRegions::cancel_scan() { 305 notify_scan_done(); 306 } 307 308 void G1CMRootRegions::scan_finished() { 309 assert(scan_in_progress(), "pre-condition"); 310 311 // Currently, only survivors can be root regions. 312 if (!_should_abort) { 313 assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index); 314 assert((uint)_claimed_survivor_index >= _survivors->length(), 315 "we should have claimed all survivors, claimed index = %u, length = %u", 316 (uint)_claimed_survivor_index, _survivors->length()); 317 } 318 319 notify_scan_done(); 320 } 321 322 bool G1CMRootRegions::wait_until_scan_finished() { 323 if (!scan_in_progress()) { 324 return false; 325 } 326 327 { 328 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 329 while (scan_in_progress()) { 330 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 331 } 332 } 333 return true; 334 } 335 336 // Returns the maximum number of workers to be used in a concurrent 337 // phase based on the number of GC workers being used in a STW 338 // phase. 339 static uint scale_concurrent_worker_threads(uint num_gc_workers) { 340 return MAX2((num_gc_workers + 2) / 4, 1U); 341 } 342 343 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, 344 G1RegionToSpaceMapper* prev_bitmap_storage, 345 G1RegionToSpaceMapper* next_bitmap_storage) : 346 // _cm_thread set inside the constructor 347 _g1h(g1h), 348 _completed_initialization(false), 349 350 _mark_bitmap_1(), 351 _mark_bitmap_2(), 352 _prev_mark_bitmap(&_mark_bitmap_1), 353 _next_mark_bitmap(&_mark_bitmap_2), 354 355 _heap(_g1h->reserved_region()), 356 357 _root_regions(), 358 359 _global_mark_stack(), 360 361 // _finger set in set_non_marking_state 362 363 _worker_id_offset(DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads), 364 _max_num_tasks(ParallelGCThreads), 365 // _num_active_tasks set in set_non_marking_state() 366 // _tasks set inside the constructor 367 368 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)), 369 _terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)), 370 371 _first_overflow_barrier_sync(), 372 _second_overflow_barrier_sync(), 373 374 _has_overflown(false), 375 _concurrent(false), 376 _has_aborted(false), 377 _restart_for_overflow(false), 378 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 379 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 380 381 // _verbose_level set below 382 383 _init_times(), 384 _remark_times(), 385 _remark_mark_times(), 386 _remark_weak_ref_times(), 387 _cleanup_times(), 388 _total_cleanup_time(0.0), 389 390 _accum_task_vtime(NULL), 391 392 _concurrent_workers(NULL), 393 _num_concurrent_workers(0), 394 _max_concurrent_workers(0), 395 396 _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)), 397 _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC)) 398 { 399 _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage); 400 _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage); 401 402 // Create & start ConcurrentMark thread. 403 _cm_thread = new G1ConcurrentMarkThread(this); 404 if (_cm_thread->osthread() == NULL) { 405 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 406 } 407 408 assert(CGC_lock != NULL, "CGC_lock must be initialized"); 409 410 SATBMarkQueueSet& satb_qs = G1BarrierSet::satb_mark_queue_set(); 411 satb_qs.set_buffer_size(G1SATBBufferSize); 412 413 _root_regions.init(_g1h->survivor(), this); 414 415 if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) { 416 // Calculate the number of concurrent worker threads by scaling 417 // the number of parallel GC threads. 418 uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads); 419 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 420 } 421 422 assert(ConcGCThreads > 0, "ConcGCThreads have been set."); 423 if (ConcGCThreads > ParallelGCThreads) { 424 log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).", 425 ConcGCThreads, ParallelGCThreads); 426 return; 427 } 428 429 log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset); 430 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 431 432 _num_concurrent_workers = ConcGCThreads; 433 _max_concurrent_workers = _num_concurrent_workers; 434 435 _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true); 436 _concurrent_workers->initialize_workers(); 437 438 if (FLAG_IS_DEFAULT(MarkStackSize)) { 439 size_t mark_stack_size = 440 MIN2(MarkStackSizeMax, 441 MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE))); 442 // Verify that the calculated value for MarkStackSize is in range. 443 // It would be nice to use the private utility routine from Arguments. 444 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 445 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 446 "must be between 1 and " SIZE_FORMAT, 447 mark_stack_size, MarkStackSizeMax); 448 return; 449 } 450 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 451 } else { 452 // Verify MarkStackSize is in range. 453 if (FLAG_IS_CMDLINE(MarkStackSize)) { 454 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 455 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 456 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 457 "must be between 1 and " SIZE_FORMAT, 458 MarkStackSize, MarkStackSizeMax); 459 return; 460 } 461 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 462 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 463 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 464 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 465 MarkStackSize, MarkStackSizeMax); 466 return; 467 } 468 } 469 } 470 } 471 472 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { 473 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 474 } 475 476 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC); 477 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC); 478 479 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 480 _num_active_tasks = _max_num_tasks; 481 482 for (uint i = 0; i < _max_num_tasks; ++i) { 483 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 484 task_queue->initialize(); 485 _task_queues->register_queue(i, task_queue); 486 487 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions()); 488 489 _accum_task_vtime[i] = 0.0; 490 } 491 492 reset_at_marking_complete(); 493 _completed_initialization = true; 494 } 495 496 void G1ConcurrentMark::reset() { 497 _has_aborted = false; 498 499 reset_marking_for_restart(); 500 501 // Reset all tasks, since different phases will use different number of active 502 // threads. So, it's easiest to have all of them ready. 503 for (uint i = 0; i < _max_num_tasks; ++i) { 504 _tasks[i]->reset(_next_mark_bitmap); 505 } 506 507 uint max_regions = _g1h->max_regions(); 508 for (uint i = 0; i < max_regions; i++) { 509 _top_at_rebuild_starts[i] = NULL; 510 _region_mark_stats[i].clear(); 511 } 512 } 513 514 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) { 515 for (uint j = 0; j < _max_num_tasks; ++j) { 516 _tasks[j]->clear_mark_stats_cache(region_idx); 517 } 518 _top_at_rebuild_starts[region_idx] = NULL; 519 _region_mark_stats[region_idx].clear(); 520 } 521 522 void G1ConcurrentMark::clear_statistics(HeapRegion* r) { 523 uint const region_idx = r->hrm_index(); 524 if (r->is_humongous()) { 525 assert(r->is_starts_humongous(), "Got humongous continues region here"); 526 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size()); 527 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) { 528 clear_statistics_in_region(j); 529 } 530 } else { 531 clear_statistics_in_region(region_idx); 532 } 533 } 534 535 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) { 536 if (bitmap->is_marked(addr)) { 537 bitmap->clear(addr); 538 } 539 } 540 541 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) { 542 assert_at_safepoint_on_vm_thread(); 543 544 // Need to clear all mark bits of the humongous object. 545 clear_mark_if_set(_prev_mark_bitmap, r->bottom()); 546 clear_mark_if_set(_next_mark_bitmap, r->bottom()); 547 548 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 549 return; 550 } 551 552 // Clear any statistics about the region gathered so far. 553 clear_statistics(r); 554 } 555 556 void G1ConcurrentMark::reset_marking_for_restart() { 557 _global_mark_stack.set_empty(); 558 559 // Expand the marking stack, if we have to and if we can. 560 if (has_overflown()) { 561 _global_mark_stack.expand(); 562 563 uint max_regions = _g1h->max_regions(); 564 for (uint i = 0; i < max_regions; i++) { 565 _region_mark_stats[i].clear_during_overflow(); 566 } 567 } 568 569 clear_has_overflown(); 570 _finger = _heap.start(); 571 572 for (uint i = 0; i < _max_num_tasks; ++i) { 573 G1CMTaskQueue* queue = _task_queues->queue(i); 574 queue->set_empty(); 575 } 576 } 577 578 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 579 assert(active_tasks <= _max_num_tasks, "we should not have more"); 580 581 _num_active_tasks = active_tasks; 582 // Need to update the three data structures below according to the 583 // number of active threads for this phase. 584 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 585 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 586 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 587 } 588 589 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 590 set_concurrency(active_tasks); 591 592 _concurrent = concurrent; 593 594 if (!concurrent) { 595 // At this point we should be in a STW phase, and completed marking. 596 assert_at_safepoint_on_vm_thread(); 597 assert(out_of_regions(), 598 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 599 p2i(_finger), p2i(_heap.end())); 600 } 601 } 602 603 void G1ConcurrentMark::reset_at_marking_complete() { 604 // We set the global marking state to some default values when we're 605 // not doing marking. 606 reset_marking_for_restart(); 607 _num_active_tasks = 0; 608 } 609 610 G1ConcurrentMark::~G1ConcurrentMark() { 611 FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts); 612 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats); 613 // The G1ConcurrentMark instance is never freed. 614 ShouldNotReachHere(); 615 } 616 617 class G1ClearBitMapTask : public AbstractGangTask { 618 public: 619 static size_t chunk_size() { return M; } 620 621 private: 622 // Heap region closure used for clearing the given mark bitmap. 623 class G1ClearBitmapHRClosure : public HeapRegionClosure { 624 private: 625 G1CMBitMap* _bitmap; 626 G1ConcurrentMark* _cm; 627 public: 628 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) { 629 } 630 631 virtual bool do_heap_region(HeapRegion* r) { 632 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 633 634 HeapWord* cur = r->bottom(); 635 HeapWord* const end = r->end(); 636 637 while (cur < end) { 638 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 639 _bitmap->clear_range(mr); 640 641 cur += chunk_size_in_words; 642 643 // Abort iteration if after yielding the marking has been aborted. 644 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 645 return true; 646 } 647 // Repeat the asserts from before the start of the closure. We will do them 648 // as asserts here to minimize their overhead on the product. However, we 649 // will have them as guarantees at the beginning / end of the bitmap 650 // clearing to get some checking in the product. 651 assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant"); 652 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 653 } 654 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 655 656 return false; 657 } 658 }; 659 660 G1ClearBitmapHRClosure _cl; 661 HeapRegionClaimer _hr_claimer; 662 bool _suspendible; // If the task is suspendible, workers must join the STS. 663 664 public: 665 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 666 AbstractGangTask("G1 Clear Bitmap"), 667 _cl(bitmap, suspendible ? cm : NULL), 668 _hr_claimer(n_workers), 669 _suspendible(suspendible) 670 { } 671 672 void work(uint worker_id) { 673 SuspendibleThreadSetJoiner sts_join(_suspendible); 674 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id); 675 } 676 677 bool is_complete() { 678 return _cl.is_complete(); 679 } 680 }; 681 682 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 683 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 684 685 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 686 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 687 688 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 689 690 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 691 692 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 693 workers->run_task(&cl, num_workers); 694 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 695 } 696 697 void G1ConcurrentMark::cleanup_for_next_mark() { 698 // Make sure that the concurrent mark thread looks to still be in 699 // the current cycle. 700 guarantee(cm_thread()->during_cycle(), "invariant"); 701 702 // We are finishing up the current cycle by clearing the next 703 // marking bitmap and getting it ready for the next cycle. During 704 // this time no other cycle can start. So, let's make sure that this 705 // is the case. 706 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 707 708 clear_bitmap(_next_mark_bitmap, _concurrent_workers, true); 709 710 // Repeat the asserts from above. 711 guarantee(cm_thread()->during_cycle(), "invariant"); 712 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 713 } 714 715 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 716 assert_at_safepoint_on_vm_thread(); 717 clear_bitmap(_prev_mark_bitmap, workers, false); 718 } 719 720 class CheckBitmapClearHRClosure : public HeapRegionClosure { 721 G1CMBitMap* _bitmap; 722 public: 723 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { 724 } 725 726 virtual bool do_heap_region(HeapRegion* r) { 727 // This closure can be called concurrently to the mutator, so we must make sure 728 // that the result of the getNextMarkedWordAddress() call is compared to the 729 // value passed to it as limit to detect any found bits. 730 // end never changes in G1. 731 HeapWord* end = r->end(); 732 return _bitmap->get_next_marked_addr(r->bottom(), end) != end; 733 } 734 }; 735 736 bool G1ConcurrentMark::next_mark_bitmap_is_clear() { 737 CheckBitmapClearHRClosure cl(_next_mark_bitmap); 738 _g1h->heap_region_iterate(&cl); 739 return cl.is_complete(); 740 } 741 742 class NoteStartOfMarkHRClosure : public HeapRegionClosure { 743 public: 744 bool do_heap_region(HeapRegion* r) { 745 r->note_start_of_marking(); 746 return false; 747 } 748 }; 749 750 void G1ConcurrentMark::pre_initial_mark() { 751 // Initialize marking structures. This has to be done in a STW phase. 752 reset(); 753 754 // For each region note start of marking. 755 NoteStartOfMarkHRClosure startcl; 756 _g1h->heap_region_iterate(&startcl); 757 } 758 759 760 void G1ConcurrentMark::post_initial_mark() { 761 // Start Concurrent Marking weak-reference discovery. 762 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 763 // enable ("weak") refs discovery 764 rp->enable_discovery(); 765 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 766 767 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 768 // This is the start of the marking cycle, we're expected all 769 // threads to have SATB queues with active set to false. 770 satb_mq_set.set_active_all_threads(true, /* new active value */ 771 false /* expected_active */); 772 773 _root_regions.prepare_for_scan(); 774 775 // update_g1_committed() will be called at the end of an evac pause 776 // when marking is on. So, it's also called at the end of the 777 // initial-mark pause to update the heap end, if the heap expands 778 // during it. No need to call it here. 779 } 780 781 /* 782 * Notice that in the next two methods, we actually leave the STS 783 * during the barrier sync and join it immediately afterwards. If we 784 * do not do this, the following deadlock can occur: one thread could 785 * be in the barrier sync code, waiting for the other thread to also 786 * sync up, whereas another one could be trying to yield, while also 787 * waiting for the other threads to sync up too. 788 * 789 * Note, however, that this code is also used during remark and in 790 * this case we should not attempt to leave / enter the STS, otherwise 791 * we'll either hit an assert (debug / fastdebug) or deadlock 792 * (product). So we should only leave / enter the STS if we are 793 * operating concurrently. 794 * 795 * Because the thread that does the sync barrier has left the STS, it 796 * is possible to be suspended for a Full GC or an evacuation pause 797 * could occur. This is actually safe, since the entering the sync 798 * barrier is one of the last things do_marking_step() does, and it 799 * doesn't manipulate any data structures afterwards. 800 */ 801 802 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 803 bool barrier_aborted; 804 { 805 SuspendibleThreadSetLeaver sts_leave(concurrent()); 806 barrier_aborted = !_first_overflow_barrier_sync.enter(); 807 } 808 809 // at this point everyone should have synced up and not be doing any 810 // more work 811 812 if (barrier_aborted) { 813 // If the barrier aborted we ignore the overflow condition and 814 // just abort the whole marking phase as quickly as possible. 815 return; 816 } 817 } 818 819 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 820 SuspendibleThreadSetLeaver sts_leave(concurrent()); 821 _second_overflow_barrier_sync.enter(); 822 823 // at this point everything should be re-initialized and ready to go 824 } 825 826 class G1CMConcurrentMarkingTask : public AbstractGangTask { 827 G1ConcurrentMark* _cm; 828 829 public: 830 void work(uint worker_id) { 831 assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread"); 832 ResourceMark rm; 833 834 double start_vtime = os::elapsedVTime(); 835 836 { 837 SuspendibleThreadSetJoiner sts_join; 838 839 assert(worker_id < _cm->active_tasks(), "invariant"); 840 841 G1CMTask* task = _cm->task(worker_id); 842 task->record_start_time(); 843 if (!_cm->has_aborted()) { 844 do { 845 task->do_marking_step(G1ConcMarkStepDurationMillis, 846 true /* do_termination */, 847 false /* is_serial*/); 848 849 _cm->do_yield_check(); 850 } while (!_cm->has_aborted() && task->has_aborted()); 851 } 852 task->record_end_time(); 853 guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant"); 854 } 855 856 double end_vtime = os::elapsedVTime(); 857 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 858 } 859 860 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) : 861 AbstractGangTask("Concurrent Mark"), _cm(cm) { } 862 863 ~G1CMConcurrentMarkingTask() { } 864 }; 865 866 uint G1ConcurrentMark::calc_active_marking_workers() { 867 uint result = 0; 868 if (!UseDynamicNumberOfGCThreads || 869 (!FLAG_IS_DEFAULT(ConcGCThreads) && 870 !ForceDynamicNumberOfGCThreads)) { 871 result = _max_concurrent_workers; 872 } else { 873 result = 874 AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers, 875 1, /* Minimum workers */ 876 _num_concurrent_workers, 877 Threads::number_of_non_daemon_threads()); 878 // Don't scale the result down by scale_concurrent_workers() because 879 // that scaling has already gone into "_max_concurrent_workers". 880 } 881 assert(result > 0 && result <= _max_concurrent_workers, 882 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u", 883 _max_concurrent_workers, result); 884 return result; 885 } 886 887 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) { 888 // Currently, only survivors can be root regions. 889 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 890 G1RootRegionScanClosure cl(_g1h, this, worker_id); 891 892 const uintx interval = PrefetchScanIntervalInBytes; 893 HeapWord* curr = hr->bottom(); 894 const HeapWord* end = hr->top(); 895 while (curr < end) { 896 Prefetch::read(curr, interval); 897 oop obj = oop(curr); 898 int size = obj->oop_iterate_size(&cl); 899 assert(size == obj->size(), "sanity"); 900 curr += size; 901 } 902 } 903 904 class G1CMRootRegionScanTask : public AbstractGangTask { 905 G1ConcurrentMark* _cm; 906 public: 907 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 908 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 909 910 void work(uint worker_id) { 911 assert(Thread::current()->is_ConcurrentGC_thread(), 912 "this should only be done by a conc GC thread"); 913 914 G1CMRootRegions* root_regions = _cm->root_regions(); 915 HeapRegion* hr = root_regions->claim_next(); 916 while (hr != NULL) { 917 _cm->scan_root_region(hr, worker_id); 918 hr = root_regions->claim_next(); 919 } 920 } 921 }; 922 923 void G1ConcurrentMark::scan_root_regions() { 924 // scan_in_progress() will have been set to true only if there was 925 // at least one root region to scan. So, if it's false, we 926 // should not attempt to do any further work. 927 if (root_regions()->scan_in_progress()) { 928 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 929 930 _num_concurrent_workers = MIN2(calc_active_marking_workers(), 931 // We distribute work on a per-region basis, so starting 932 // more threads than that is useless. 933 root_regions()->num_root_regions()); 934 assert(_num_concurrent_workers <= _max_concurrent_workers, 935 "Maximum number of marking threads exceeded"); 936 937 G1CMRootRegionScanTask task(this); 938 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 939 task.name(), _num_concurrent_workers, root_regions()->num_root_regions()); 940 _concurrent_workers->run_task(&task, _num_concurrent_workers); 941 942 // It's possible that has_aborted() is true here without actually 943 // aborting the survivor scan earlier. This is OK as it's 944 // mainly used for sanity checking. 945 root_regions()->scan_finished(); 946 } 947 } 948 949 void G1ConcurrentMark::concurrent_cycle_start() { 950 _gc_timer_cm->register_gc_start(); 951 952 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 953 954 _g1h->trace_heap_before_gc(_gc_tracer_cm); 955 } 956 957 void G1ConcurrentMark::concurrent_cycle_end() { 958 _g1h->collector_state()->set_clearing_next_bitmap(false); 959 960 _g1h->trace_heap_after_gc(_gc_tracer_cm); 961 962 if (has_aborted()) { 963 log_info(gc, marking)("Concurrent Mark Abort"); 964 _gc_tracer_cm->report_concurrent_mode_failure(); 965 } 966 967 _gc_timer_cm->register_gc_end(); 968 969 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 970 } 971 972 void G1ConcurrentMark::mark_from_roots() { 973 _restart_for_overflow = false; 974 975 _num_concurrent_workers = calc_active_marking_workers(); 976 977 uint active_workers = MAX2(1U, _num_concurrent_workers); 978 979 // Setting active workers is not guaranteed since fewer 980 // worker threads may currently exist and more may not be 981 // available. 982 active_workers = _concurrent_workers->update_active_workers(active_workers); 983 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers()); 984 985 // Parallel task terminator is set in "set_concurrency_and_phase()" 986 set_concurrency_and_phase(active_workers, true /* concurrent */); 987 988 G1CMConcurrentMarkingTask marking_task(this); 989 _concurrent_workers->run_task(&marking_task); 990 print_stats(); 991 } 992 993 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) { 994 G1HeapVerifier* verifier = _g1h->verifier(); 995 996 verifier->verify_region_sets_optional(); 997 998 if (VerifyDuringGC) { 999 GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm); 1000 1001 size_t const BufLen = 512; 1002 char buffer[BufLen]; 1003 1004 jio_snprintf(buffer, BufLen, "During GC (%s)", caller); 1005 verifier->verify(type, vo, buffer); 1006 } 1007 1008 verifier->check_bitmaps(caller); 1009 } 1010 1011 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask { 1012 G1CollectedHeap* _g1h; 1013 G1ConcurrentMark* _cm; 1014 HeapRegionClaimer _hrclaimer; 1015 uint volatile _total_selected_for_rebuild; 1016 1017 G1PrintRegionLivenessInfoClosure _cl; 1018 1019 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure { 1020 G1CollectedHeap* _g1h; 1021 G1ConcurrentMark* _cm; 1022 1023 G1PrintRegionLivenessInfoClosure* _cl; 1024 1025 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild. 1026 1027 void update_remset_before_rebuild(HeapRegion* hr) { 1028 G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker(); 1029 1030 bool selected_for_rebuild; 1031 if (hr->is_humongous()) { 1032 bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0; 1033 selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live); 1034 } else { 1035 size_t const live_bytes = _cm->liveness(hr->hrm_index()); 1036 selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes); 1037 } 1038 if (selected_for_rebuild) { 1039 _num_regions_selected_for_rebuild++; 1040 } 1041 _cm->update_top_at_rebuild_start(hr); 1042 } 1043 1044 // Distribute the given words across the humongous object starting with hr and 1045 // note end of marking. 1046 void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) { 1047 uint const region_idx = hr->hrm_index(); 1048 size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size(); 1049 uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words); 1050 1051 // "Distributing" zero words means that we only note end of marking for these 1052 // regions. 1053 assert(marked_words == 0 || obj_size_in_words == marked_words, 1054 "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT, 1055 obj_size_in_words, marked_words); 1056 1057 for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) { 1058 HeapRegion* const r = _g1h->region_at(i); 1059 size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words); 1060 1061 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)", 1062 words_to_add, i, r->get_type_str()); 1063 add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize); 1064 marked_words -= words_to_add; 1065 } 1066 assert(marked_words == 0, 1067 SIZE_FORMAT " words left after distributing space across %u regions", 1068 marked_words, num_regions_in_humongous); 1069 } 1070 1071 void update_marked_bytes(HeapRegion* hr) { 1072 uint const region_idx = hr->hrm_index(); 1073 size_t const marked_words = _cm->liveness(region_idx); 1074 // The marking attributes the object's size completely to the humongous starts 1075 // region. We need to distribute this value across the entire set of regions a 1076 // humongous object spans. 1077 if (hr->is_humongous()) { 1078 assert(hr->is_starts_humongous() || marked_words == 0, 1079 "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)", 1080 marked_words, region_idx, hr->get_type_str()); 1081 if (hr->is_starts_humongous()) { 1082 distribute_marked_bytes(hr, marked_words); 1083 } 1084 } else { 1085 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str()); 1086 add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize); 1087 } 1088 } 1089 1090 void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) { 1091 hr->add_to_marked_bytes(marked_bytes); 1092 _cl->do_heap_region(hr); 1093 hr->note_end_of_marking(); 1094 } 1095 1096 public: 1097 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) : 1098 _g1h(g1h), _cm(cm), _num_regions_selected_for_rebuild(0), _cl(cl) { } 1099 1100 virtual bool do_heap_region(HeapRegion* r) { 1101 update_remset_before_rebuild(r); 1102 update_marked_bytes(r); 1103 1104 return false; 1105 } 1106 1107 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; } 1108 }; 1109 1110 public: 1111 G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) : 1112 AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"), 1113 _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { } 1114 1115 virtual void work(uint worker_id) { 1116 G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl); 1117 _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id); 1118 Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild); 1119 } 1120 1121 uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; } 1122 1123 // Number of regions for which roughly one thread should be spawned for this work. 1124 static const uint RegionsPerThread = 384; 1125 }; 1126 1127 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure { 1128 G1CollectedHeap* _g1h; 1129 public: 1130 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { } 1131 1132 virtual bool do_heap_region(HeapRegion* r) { 1133 _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r); 1134 return false; 1135 } 1136 }; 1137 1138 void G1ConcurrentMark::remark() { 1139 assert_at_safepoint_on_vm_thread(); 1140 1141 // If a full collection has happened, we should not continue. However we might 1142 // have ended up here as the Remark VM operation has been scheduled already. 1143 if (has_aborted()) { 1144 return; 1145 } 1146 1147 G1Policy* g1p = _g1h->g1_policy(); 1148 g1p->record_concurrent_mark_remark_start(); 1149 1150 double start = os::elapsedTime(); 1151 1152 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before"); 1153 1154 { 1155 GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm); 1156 finalize_marking(); 1157 } 1158 1159 double mark_work_end = os::elapsedTime(); 1160 1161 bool const mark_finished = !has_overflown(); 1162 if (mark_finished) { 1163 weak_refs_work(false /* clear_all_soft_refs */); 1164 1165 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1166 // We're done with marking. 1167 // This is the end of the marking cycle, we're expected all 1168 // threads to have SATB queues with active set to true. 1169 satb_mq_set.set_active_all_threads(false, /* new active value */ 1170 true /* expected_active */); 1171 1172 { 1173 GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm); 1174 flush_all_task_caches(); 1175 } 1176 1177 // Install newly created mark bitmap as "prev". 1178 swap_mark_bitmaps(); 1179 { 1180 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm); 1181 1182 uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) / 1183 G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread; 1184 uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity); 1185 1186 G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers); 1187 log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions()); 1188 _g1h->workers()->run_task(&cl, num_workers); 1189 1190 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u", 1191 _g1h->num_regions(), cl.total_selected_for_rebuild()); 1192 } 1193 { 1194 GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm); 1195 reclaim_empty_regions(); 1196 } 1197 1198 // Clean out dead classes 1199 if (ClassUnloadingWithConcurrentMark) { 1200 GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm); 1201 ClassLoaderDataGraph::purge(); 1202 } 1203 1204 compute_new_sizes(); 1205 1206 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after"); 1207 1208 assert(!restart_for_overflow(), "sanity"); 1209 // Completely reset the marking state since marking completed 1210 reset_at_marking_complete(); 1211 } else { 1212 // We overflowed. Restart concurrent marking. 1213 _restart_for_overflow = true; 1214 1215 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow"); 1216 1217 // Clear the marking state because we will be restarting 1218 // marking due to overflowing the global mark stack. 1219 reset_marking_for_restart(); 1220 } 1221 1222 { 1223 GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm); 1224 report_object_count(mark_finished); 1225 } 1226 1227 // Statistics 1228 double now = os::elapsedTime(); 1229 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1230 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1231 _remark_times.add((now - start) * 1000.0); 1232 1233 g1p->record_concurrent_mark_remark_end(); 1234 } 1235 1236 class G1ReclaimEmptyRegionsTask : public AbstractGangTask { 1237 // Per-region work during the Cleanup pause. 1238 class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure { 1239 G1CollectedHeap* _g1h; 1240 size_t _freed_bytes; 1241 FreeRegionList* _local_cleanup_list; 1242 uint _old_regions_removed; 1243 uint _humongous_regions_removed; 1244 HRRSCleanupTask* _hrrs_cleanup_task; 1245 1246 public: 1247 G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h, 1248 FreeRegionList* local_cleanup_list, 1249 HRRSCleanupTask* hrrs_cleanup_task) : 1250 _g1h(g1h), 1251 _freed_bytes(0), 1252 _local_cleanup_list(local_cleanup_list), 1253 _old_regions_removed(0), 1254 _humongous_regions_removed(0), 1255 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1256 1257 size_t freed_bytes() { return _freed_bytes; } 1258 const uint old_regions_removed() { return _old_regions_removed; } 1259 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1260 1261 bool do_heap_region(HeapRegion *hr) { 1262 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) { 1263 _freed_bytes += hr->used(); 1264 hr->set_containing_set(NULL); 1265 if (hr->is_humongous()) { 1266 _humongous_regions_removed++; 1267 _g1h->free_humongous_region(hr, _local_cleanup_list); 1268 } else { 1269 _old_regions_removed++; 1270 _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */); 1271 } 1272 hr->clear_cardtable(); 1273 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index()); 1274 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom())); 1275 } else { 1276 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1277 } 1278 1279 return false; 1280 } 1281 }; 1282 1283 G1CollectedHeap* _g1h; 1284 FreeRegionList* _cleanup_list; 1285 HeapRegionClaimer _hrclaimer; 1286 1287 public: 1288 G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1289 AbstractGangTask("G1 Cleanup"), 1290 _g1h(g1h), 1291 _cleanup_list(cleanup_list), 1292 _hrclaimer(n_workers) { 1293 1294 HeapRegionRemSet::reset_for_cleanup_tasks(); 1295 } 1296 1297 void work(uint worker_id) { 1298 FreeRegionList local_cleanup_list("Local Cleanup List"); 1299 HRRSCleanupTask hrrs_cleanup_task; 1300 G1ReclaimEmptyRegionsClosure cl(_g1h, 1301 &local_cleanup_list, 1302 &hrrs_cleanup_task); 1303 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id); 1304 assert(cl.is_complete(), "Shouldn't have aborted!"); 1305 1306 // Now update the old/humongous region sets 1307 _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed()); 1308 { 1309 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1310 _g1h->decrement_summary_bytes(cl.freed_bytes()); 1311 1312 _cleanup_list->add_ordered(&local_cleanup_list); 1313 assert(local_cleanup_list.is_empty(), "post-condition"); 1314 1315 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1316 } 1317 } 1318 }; 1319 1320 void G1ConcurrentMark::reclaim_empty_regions() { 1321 WorkGang* workers = _g1h->workers(); 1322 FreeRegionList empty_regions_list("Empty Regions After Mark List"); 1323 1324 G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers()); 1325 workers->run_task(&cl); 1326 1327 if (!empty_regions_list.is_empty()) { 1328 log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length()); 1329 // Now print the empty regions list. 1330 G1HRPrinter* hrp = _g1h->hr_printer(); 1331 if (hrp->is_active()) { 1332 FreeRegionListIterator iter(&empty_regions_list); 1333 while (iter.more_available()) { 1334 HeapRegion* hr = iter.get_next(); 1335 hrp->cleanup(hr); 1336 } 1337 } 1338 // And actually make them available. 1339 _g1h->prepend_to_freelist(&empty_regions_list); 1340 } 1341 } 1342 1343 void G1ConcurrentMark::compute_new_sizes() { 1344 MetaspaceGC::compute_new_size(); 1345 1346 // Cleanup will have freed any regions completely full of garbage. 1347 // Update the soft reference policy with the new heap occupancy. 1348 Universe::update_heap_info_at_gc(); 1349 1350 // We reclaimed old regions so we should calculate the sizes to make 1351 // sure we update the old gen/space data. 1352 _g1h->g1mm()->update_sizes(); 1353 } 1354 1355 void G1ConcurrentMark::cleanup() { 1356 assert_at_safepoint_on_vm_thread(); 1357 1358 // If a full collection has happened, we shouldn't do this. 1359 if (has_aborted()) { 1360 return; 1361 } 1362 1363 G1Policy* g1p = _g1h->g1_policy(); 1364 g1p->record_concurrent_mark_cleanup_start(); 1365 1366 double start = os::elapsedTime(); 1367 1368 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before"); 1369 1370 { 1371 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm); 1372 G1UpdateRemSetTrackingAfterRebuild cl(_g1h); 1373 _g1h->heap_region_iterate(&cl); 1374 } 1375 1376 if (log_is_enabled(Trace, gc, liveness)) { 1377 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup"); 1378 _g1h->heap_region_iterate(&cl); 1379 } 1380 1381 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after"); 1382 1383 // We need to make this be a "collection" so any collection pause that 1384 // races with it goes around and waits for Cleanup to finish. 1385 _g1h->increment_total_collections(); 1386 1387 // Local statistics 1388 double recent_cleanup_time = (os::elapsedTime() - start); 1389 _total_cleanup_time += recent_cleanup_time; 1390 _cleanup_times.add(recent_cleanup_time); 1391 1392 { 1393 GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm); 1394 _g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1395 } 1396 } 1397 1398 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1399 // Uses the G1CMTask associated with a worker thread (for serial reference 1400 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1401 // trace referent objects. 1402 // 1403 // Using the G1CMTask and embedded local queues avoids having the worker 1404 // threads operating on the global mark stack. This reduces the risk 1405 // of overflowing the stack - which we would rather avoid at this late 1406 // state. Also using the tasks' local queues removes the potential 1407 // of the workers interfering with each other that could occur if 1408 // operating on the global stack. 1409 1410 class G1CMKeepAliveAndDrainClosure : public OopClosure { 1411 G1ConcurrentMark* _cm; 1412 G1CMTask* _task; 1413 uint _ref_counter_limit; 1414 uint _ref_counter; 1415 bool _is_serial; 1416 public: 1417 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1418 _cm(cm), _task(task), _is_serial(is_serial), 1419 _ref_counter_limit(G1RefProcDrainInterval) { 1420 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1421 _ref_counter = _ref_counter_limit; 1422 } 1423 1424 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1425 virtual void do_oop( oop* p) { do_oop_work(p); } 1426 1427 template <class T> void do_oop_work(T* p) { 1428 if (_cm->has_overflown()) { 1429 return; 1430 } 1431 if (!_task->deal_with_reference(p)) { 1432 // We did not add anything to the mark bitmap (or mark stack), so there is 1433 // no point trying to drain it. 1434 return; 1435 } 1436 _ref_counter--; 1437 1438 if (_ref_counter == 0) { 1439 // We have dealt with _ref_counter_limit references, pushing them 1440 // and objects reachable from them on to the local stack (and 1441 // possibly the global stack). Call G1CMTask::do_marking_step() to 1442 // process these entries. 1443 // 1444 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1445 // there's nothing more to do (i.e. we're done with the entries that 1446 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1447 // above) or we overflow. 1448 // 1449 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1450 // flag while there may still be some work to do. (See the comment at 1451 // the beginning of G1CMTask::do_marking_step() for those conditions - 1452 // one of which is reaching the specified time target.) It is only 1453 // when G1CMTask::do_marking_step() returns without setting the 1454 // has_aborted() flag that the marking step has completed. 1455 do { 1456 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1457 _task->do_marking_step(mark_step_duration_ms, 1458 false /* do_termination */, 1459 _is_serial); 1460 } while (_task->has_aborted() && !_cm->has_overflown()); 1461 _ref_counter = _ref_counter_limit; 1462 } 1463 } 1464 }; 1465 1466 // 'Drain' oop closure used by both serial and parallel reference processing. 1467 // Uses the G1CMTask associated with a given worker thread (for serial 1468 // reference processing the G1CMtask for worker 0 is used). Calls the 1469 // do_marking_step routine, with an unbelievably large timeout value, 1470 // to drain the marking data structures of the remaining entries 1471 // added by the 'keep alive' oop closure above. 1472 1473 class G1CMDrainMarkingStackClosure : public VoidClosure { 1474 G1ConcurrentMark* _cm; 1475 G1CMTask* _task; 1476 bool _is_serial; 1477 public: 1478 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1479 _cm(cm), _task(task), _is_serial(is_serial) { 1480 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1481 } 1482 1483 void do_void() { 1484 do { 1485 // We call G1CMTask::do_marking_step() to completely drain the local 1486 // and global marking stacks of entries pushed by the 'keep alive' 1487 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1488 // 1489 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1490 // if there's nothing more to do (i.e. we've completely drained the 1491 // entries that were pushed as a a result of applying the 'keep alive' 1492 // closure to the entries on the discovered ref lists) or we overflow 1493 // the global marking stack. 1494 // 1495 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1496 // flag while there may still be some work to do. (See the comment at 1497 // the beginning of G1CMTask::do_marking_step() for those conditions - 1498 // one of which is reaching the specified time target.) It is only 1499 // when G1CMTask::do_marking_step() returns without setting the 1500 // has_aborted() flag that the marking step has completed. 1501 1502 _task->do_marking_step(1000000000.0 /* something very large */, 1503 true /* do_termination */, 1504 _is_serial); 1505 } while (_task->has_aborted() && !_cm->has_overflown()); 1506 } 1507 }; 1508 1509 // Implementation of AbstractRefProcTaskExecutor for parallel 1510 // reference processing at the end of G1 concurrent marking 1511 1512 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 1513 private: 1514 G1CollectedHeap* _g1h; 1515 G1ConcurrentMark* _cm; 1516 WorkGang* _workers; 1517 uint _active_workers; 1518 1519 public: 1520 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1521 G1ConcurrentMark* cm, 1522 WorkGang* workers, 1523 uint n_workers) : 1524 _g1h(g1h), _cm(cm), 1525 _workers(workers), _active_workers(n_workers) { } 1526 1527 virtual void execute(ProcessTask& task, uint ergo_workers); 1528 }; 1529 1530 class G1CMRefProcTaskProxy : public AbstractGangTask { 1531 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1532 ProcessTask& _proc_task; 1533 G1CollectedHeap* _g1h; 1534 G1ConcurrentMark* _cm; 1535 1536 public: 1537 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1538 G1CollectedHeap* g1h, 1539 G1ConcurrentMark* cm) : 1540 AbstractGangTask("Process reference objects in parallel"), 1541 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1542 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1543 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1544 } 1545 1546 virtual void work(uint worker_id) { 1547 ResourceMark rm; 1548 HandleMark hm; 1549 G1CMTask* task = _cm->task(worker_id); 1550 G1CMIsAliveClosure g1_is_alive(_g1h); 1551 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1552 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1553 1554 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1555 } 1556 }; 1557 1558 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) { 1559 assert(_workers != NULL, "Need parallel worker threads."); 1560 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1561 assert(_workers->active_workers() >= ergo_workers, 1562 "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)", 1563 ergo_workers, _workers->active_workers()); 1564 1565 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1566 1567 // We need to reset the concurrency level before each 1568 // proxy task execution, so that the termination protocol 1569 // and overflow handling in G1CMTask::do_marking_step() knows 1570 // how many workers to wait for. 1571 _cm->set_concurrency(ergo_workers); 1572 _workers->run_task(&proc_task_proxy, ergo_workers); 1573 } 1574 1575 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { 1576 ResourceMark rm; 1577 HandleMark hm; 1578 1579 // Is alive closure. 1580 G1CMIsAliveClosure g1_is_alive(_g1h); 1581 1582 // Inner scope to exclude the cleaning of the string and symbol 1583 // tables from the displayed time. 1584 { 1585 GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm); 1586 1587 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1588 1589 // See the comment in G1CollectedHeap::ref_processing_init() 1590 // about how reference processing currently works in G1. 1591 1592 // Set the soft reference policy 1593 rp->setup_policy(clear_all_soft_refs); 1594 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1595 1596 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1597 // in serial reference processing. Note these closures are also 1598 // used for serially processing (by the the current thread) the 1599 // JNI references during parallel reference processing. 1600 // 1601 // These closures do not need to synchronize with the worker 1602 // threads involved in parallel reference processing as these 1603 // instances are executed serially by the current thread (e.g. 1604 // reference processing is not multi-threaded and is thus 1605 // performed by the current thread instead of a gang worker). 1606 // 1607 // The gang tasks involved in parallel reference processing create 1608 // their own instances of these closures, which do their own 1609 // synchronization among themselves. 1610 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1611 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1612 1613 // We need at least one active thread. If reference processing 1614 // is not multi-threaded we use the current (VMThread) thread, 1615 // otherwise we use the work gang from the G1CollectedHeap and 1616 // we utilize all the worker threads we can. 1617 bool processing_is_mt = rp->processing_is_mt(); 1618 uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U); 1619 active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U); 1620 1621 // Parallel processing task executor. 1622 G1CMRefProcTaskExecutor par_task_executor(_g1h, this, 1623 _g1h->workers(), active_workers); 1624 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1625 1626 // Set the concurrency level. The phase was already set prior to 1627 // executing the remark task. 1628 set_concurrency(active_workers); 1629 1630 // Set the degree of MT processing here. If the discovery was done MT, 1631 // the number of threads involved during discovery could differ from 1632 // the number of active workers. This is OK as long as the discovered 1633 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1634 rp->set_active_mt_degree(active_workers); 1635 1636 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues()); 1637 1638 // Process the weak references. 1639 const ReferenceProcessorStats& stats = 1640 rp->process_discovered_references(&g1_is_alive, 1641 &g1_keep_alive, 1642 &g1_drain_mark_stack, 1643 executor, 1644 &pt); 1645 _gc_tracer_cm->report_gc_reference_stats(stats); 1646 pt.print_all_references(); 1647 1648 // The do_oop work routines of the keep_alive and drain_marking_stack 1649 // oop closures will set the has_overflown flag if we overflow the 1650 // global marking stack. 1651 1652 assert(has_overflown() || _global_mark_stack.is_empty(), 1653 "Mark stack should be empty (unless it has overflown)"); 1654 1655 assert(rp->num_queues() == active_workers, "why not"); 1656 1657 rp->verify_no_references_recorded(); 1658 assert(!rp->discovery_enabled(), "Post condition"); 1659 } 1660 1661 if (has_overflown()) { 1662 // We can not trust g1_is_alive and the contents of the heap if the marking stack 1663 // overflowed while processing references. Exit the VM. 1664 fatal("Overflow during reference processing, can not continue. Please " 1665 "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and " 1666 "restart.", MarkStackSizeMax); 1667 return; 1668 } 1669 1670 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1671 1672 { 1673 GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm); 1674 WeakProcessor::weak_oops_do(&g1_is_alive, &do_nothing_cl); 1675 } 1676 1677 // Unload Klasses, String, Symbols, Code Cache, etc. 1678 if (ClassUnloadingWithConcurrentMark) { 1679 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); 1680 bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm, false /* Defer cleaning */); 1681 _g1h->complete_cleaning(&g1_is_alive, purged_classes); 1682 } else { 1683 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm); 1684 // No need to clean string table and symbol table as they are treated as strong roots when 1685 // class unloading is disabled. 1686 _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled()); 1687 } 1688 } 1689 1690 class G1PrecleanYieldClosure : public YieldClosure { 1691 G1ConcurrentMark* _cm; 1692 1693 public: 1694 G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { } 1695 1696 virtual bool should_return() { 1697 return _cm->has_aborted(); 1698 } 1699 1700 virtual bool should_return_fine_grain() { 1701 _cm->do_yield_check(); 1702 return _cm->has_aborted(); 1703 } 1704 }; 1705 1706 void G1ConcurrentMark::preclean() { 1707 assert(G1UseReferencePrecleaning, "Precleaning must be enabled."); 1708 1709 SuspendibleThreadSetJoiner joiner; 1710 1711 G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */); 1712 G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */); 1713 1714 set_concurrency_and_phase(1, true); 1715 1716 G1PrecleanYieldClosure yield_cl(this); 1717 1718 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1719 // Precleaning is single threaded. Temporarily disable MT discovery. 1720 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); 1721 rp->preclean_discovered_references(rp->is_alive_non_header(), 1722 &keep_alive, 1723 &drain_mark_stack, 1724 &yield_cl, 1725 _gc_timer_cm); 1726 } 1727 1728 // When sampling object counts, we already swapped the mark bitmaps, so we need to use 1729 // the prev bitmap determining liveness. 1730 class G1ObjectCountIsAliveClosure: public BoolObjectClosure { 1731 G1CollectedHeap* _g1h; 1732 public: 1733 G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { } 1734 1735 bool do_object_b(oop obj) { 1736 HeapWord* addr = (HeapWord*)obj; 1737 return addr != NULL && 1738 (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj)); 1739 } 1740 }; 1741 1742 void G1ConcurrentMark::report_object_count(bool mark_completed) { 1743 // Depending on the completion of the marking liveness needs to be determined 1744 // using either the next or prev bitmap. 1745 if (mark_completed) { 1746 G1ObjectCountIsAliveClosure is_alive(_g1h); 1747 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1748 } else { 1749 G1CMIsAliveClosure is_alive(_g1h); 1750 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1751 } 1752 } 1753 1754 1755 void G1ConcurrentMark::swap_mark_bitmaps() { 1756 G1CMBitMap* temp = _prev_mark_bitmap; 1757 _prev_mark_bitmap = _next_mark_bitmap; 1758 _next_mark_bitmap = temp; 1759 _g1h->collector_state()->set_clearing_next_bitmap(true); 1760 } 1761 1762 // Closure for marking entries in SATB buffers. 1763 class G1CMSATBBufferClosure : public SATBBufferClosure { 1764 private: 1765 G1CMTask* _task; 1766 G1CollectedHeap* _g1h; 1767 1768 // This is very similar to G1CMTask::deal_with_reference, but with 1769 // more relaxed requirements for the argument, so this must be more 1770 // circumspect about treating the argument as an object. 1771 void do_entry(void* entry) const { 1772 _task->increment_refs_reached(); 1773 oop const obj = static_cast<oop>(entry); 1774 _task->make_reference_grey(obj); 1775 } 1776 1777 public: 1778 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1779 : _task(task), _g1h(g1h) { } 1780 1781 virtual void do_buffer(void** buffer, size_t size) { 1782 for (size_t i = 0; i < size; ++i) { 1783 do_entry(buffer[i]); 1784 } 1785 } 1786 }; 1787 1788 class G1RemarkThreadsClosure : public ThreadClosure { 1789 G1CMSATBBufferClosure _cm_satb_cl; 1790 G1CMOopClosure _cm_cl; 1791 MarkingCodeBlobClosure _code_cl; 1792 int _thread_parity; 1793 1794 public: 1795 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1796 _cm_satb_cl(task, g1h), 1797 _cm_cl(g1h, task), 1798 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1799 _thread_parity(Threads::thread_claim_parity()) {} 1800 1801 void do_thread(Thread* thread) { 1802 if (thread->is_Java_thread()) { 1803 if (thread->claim_oops_do(true, _thread_parity)) { 1804 JavaThread* jt = (JavaThread*)thread; 1805 1806 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1807 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1808 // * Alive if on the stack of an executing method 1809 // * Weakly reachable otherwise 1810 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1811 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1812 jt->nmethods_do(&_code_cl); 1813 1814 G1ThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(&_cm_satb_cl); 1815 } 1816 } else if (thread->is_VM_thread()) { 1817 if (thread->claim_oops_do(true, _thread_parity)) { 1818 G1BarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1819 } 1820 } 1821 } 1822 }; 1823 1824 class G1CMRemarkTask : public AbstractGangTask { 1825 G1ConcurrentMark* _cm; 1826 public: 1827 void work(uint worker_id) { 1828 G1CMTask* task = _cm->task(worker_id); 1829 task->record_start_time(); 1830 { 1831 ResourceMark rm; 1832 HandleMark hm; 1833 1834 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1835 Threads::threads_do(&threads_f); 1836 } 1837 1838 do { 1839 task->do_marking_step(1000000000.0 /* something very large */, 1840 true /* do_termination */, 1841 false /* is_serial */); 1842 } while (task->has_aborted() && !_cm->has_overflown()); 1843 // If we overflow, then we do not want to restart. We instead 1844 // want to abort remark and do concurrent marking again. 1845 task->record_end_time(); 1846 } 1847 1848 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1849 AbstractGangTask("Par Remark"), _cm(cm) { 1850 _cm->terminator()->reset_for_reuse(active_workers); 1851 } 1852 }; 1853 1854 void G1ConcurrentMark::finalize_marking() { 1855 ResourceMark rm; 1856 HandleMark hm; 1857 1858 _g1h->ensure_parsability(false); 1859 1860 // this is remark, so we'll use up all active threads 1861 uint active_workers = _g1h->workers()->active_workers(); 1862 set_concurrency_and_phase(active_workers, false /* concurrent */); 1863 // Leave _parallel_marking_threads at it's 1864 // value originally calculated in the G1ConcurrentMark 1865 // constructor and pass values of the active workers 1866 // through the gang in the task. 1867 1868 { 1869 StrongRootsScope srs(active_workers); 1870 1871 G1CMRemarkTask remarkTask(this, active_workers); 1872 // We will start all available threads, even if we decide that the 1873 // active_workers will be fewer. The extra ones will just bail out 1874 // immediately. 1875 _g1h->workers()->run_task(&remarkTask); 1876 } 1877 1878 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1879 guarantee(has_overflown() || 1880 satb_mq_set.completed_buffers_num() == 0, 1881 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1882 BOOL_TO_STR(has_overflown()), 1883 satb_mq_set.completed_buffers_num()); 1884 1885 print_stats(); 1886 } 1887 1888 void G1ConcurrentMark::flush_all_task_caches() { 1889 size_t hits = 0; 1890 size_t misses = 0; 1891 for (uint i = 0; i < _max_num_tasks; i++) { 1892 Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache(); 1893 hits += stats.first; 1894 misses += stats.second; 1895 } 1896 size_t sum = hits + misses; 1897 log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf", 1898 hits, misses, percent_of(hits, sum)); 1899 } 1900 1901 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) { 1902 _prev_mark_bitmap->clear_range(mr); 1903 } 1904 1905 HeapRegion* 1906 G1ConcurrentMark::claim_region(uint worker_id) { 1907 // "checkpoint" the finger 1908 HeapWord* finger = _finger; 1909 1910 while (finger < _heap.end()) { 1911 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1912 1913 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1914 // Make sure that the reads below do not float before loading curr_region. 1915 OrderAccess::loadload(); 1916 // Above heap_region_containing may return NULL as we always scan claim 1917 // until the end of the heap. In this case, just jump to the next region. 1918 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1919 1920 // Is the gap between reading the finger and doing the CAS too long? 1921 HeapWord* res = Atomic::cmpxchg(end, &_finger, finger); 1922 if (res == finger && curr_region != NULL) { 1923 // we succeeded 1924 HeapWord* bottom = curr_region->bottom(); 1925 HeapWord* limit = curr_region->next_top_at_mark_start(); 1926 1927 // notice that _finger == end cannot be guaranteed here since, 1928 // someone else might have moved the finger even further 1929 assert(_finger >= end, "the finger should have moved forward"); 1930 1931 if (limit > bottom) { 1932 return curr_region; 1933 } else { 1934 assert(limit == bottom, 1935 "the region limit should be at bottom"); 1936 // we return NULL and the caller should try calling 1937 // claim_region() again. 1938 return NULL; 1939 } 1940 } else { 1941 assert(_finger > finger, "the finger should have moved forward"); 1942 // read it again 1943 finger = _finger; 1944 } 1945 } 1946 1947 return NULL; 1948 } 1949 1950 #ifndef PRODUCT 1951 class VerifyNoCSetOops { 1952 G1CollectedHeap* _g1h; 1953 const char* _phase; 1954 int _info; 1955 1956 public: 1957 VerifyNoCSetOops(const char* phase, int info = -1) : 1958 _g1h(G1CollectedHeap::heap()), 1959 _phase(phase), 1960 _info(info) 1961 { } 1962 1963 void operator()(G1TaskQueueEntry task_entry) const { 1964 if (task_entry.is_array_slice()) { 1965 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); 1966 return; 1967 } 1968 guarantee(oopDesc::is_oop(task_entry.obj()), 1969 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1970 p2i(task_entry.obj()), _phase, _info); 1971 guarantee(!_g1h->is_in_cset(task_entry.obj()), 1972 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1973 p2i(task_entry.obj()), _phase, _info); 1974 } 1975 }; 1976 1977 void G1ConcurrentMark::verify_no_cset_oops() { 1978 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1979 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 1980 return; 1981 } 1982 1983 // Verify entries on the global mark stack 1984 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 1985 1986 // Verify entries on the task queues 1987 for (uint i = 0; i < _max_num_tasks; ++i) { 1988 G1CMTaskQueue* queue = _task_queues->queue(i); 1989 queue->iterate(VerifyNoCSetOops("Queue", i)); 1990 } 1991 1992 // Verify the global finger 1993 HeapWord* global_finger = finger(); 1994 if (global_finger != NULL && global_finger < _heap.end()) { 1995 // Since we always iterate over all regions, we might get a NULL HeapRegion 1996 // here. 1997 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1998 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1999 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 2000 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 2001 } 2002 2003 // Verify the task fingers 2004 assert(_num_concurrent_workers <= _max_num_tasks, "sanity"); 2005 for (uint i = 0; i < _num_concurrent_workers; ++i) { 2006 G1CMTask* task = _tasks[i]; 2007 HeapWord* task_finger = task->finger(); 2008 if (task_finger != NULL && task_finger < _heap.end()) { 2009 // See above note on the global finger verification. 2010 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 2011 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2012 !task_hr->in_collection_set(), 2013 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 2014 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 2015 } 2016 } 2017 } 2018 #endif // PRODUCT 2019 2020 void G1ConcurrentMark::rebuild_rem_set_concurrently() { 2021 _g1h->g1_rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset); 2022 } 2023 2024 void G1ConcurrentMark::print_stats() { 2025 if (!log_is_enabled(Debug, gc, stats)) { 2026 return; 2027 } 2028 log_debug(gc, stats)("---------------------------------------------------------------------"); 2029 for (size_t i = 0; i < _num_active_tasks; ++i) { 2030 _tasks[i]->print_stats(); 2031 log_debug(gc, stats)("---------------------------------------------------------------------"); 2032 } 2033 } 2034 2035 void G1ConcurrentMark::concurrent_cycle_abort() { 2036 if (!cm_thread()->during_cycle() || _has_aborted) { 2037 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2038 return; 2039 } 2040 2041 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2042 // concurrent bitmap clearing. 2043 { 2044 GCTraceTime(Debug, gc) debug("Clear Next Bitmap"); 2045 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false); 2046 } 2047 // Note we cannot clear the previous marking bitmap here 2048 // since VerifyDuringGC verifies the objects marked during 2049 // a full GC against the previous bitmap. 2050 2051 // Empty mark stack 2052 reset_marking_for_restart(); 2053 for (uint i = 0; i < _max_num_tasks; ++i) { 2054 _tasks[i]->clear_region_fields(); 2055 } 2056 _first_overflow_barrier_sync.abort(); 2057 _second_overflow_barrier_sync.abort(); 2058 _has_aborted = true; 2059 2060 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2061 satb_mq_set.abandon_partial_marking(); 2062 // This can be called either during or outside marking, we'll read 2063 // the expected_active value from the SATB queue set. 2064 satb_mq_set.set_active_all_threads( 2065 false, /* new active value */ 2066 satb_mq_set.is_active() /* expected_active */); 2067 } 2068 2069 static void print_ms_time_info(const char* prefix, const char* name, 2070 NumberSeq& ns) { 2071 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2072 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2073 if (ns.num() > 0) { 2074 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2075 prefix, ns.sd(), ns.maximum()); 2076 } 2077 } 2078 2079 void G1ConcurrentMark::print_summary_info() { 2080 Log(gc, marking) log; 2081 if (!log.is_trace()) { 2082 return; 2083 } 2084 2085 log.trace(" Concurrent marking:"); 2086 print_ms_time_info(" ", "init marks", _init_times); 2087 print_ms_time_info(" ", "remarks", _remark_times); 2088 { 2089 print_ms_time_info(" ", "final marks", _remark_mark_times); 2090 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2091 2092 } 2093 print_ms_time_info(" ", "cleanups", _cleanup_times); 2094 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2095 _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2096 log.trace(" Total stop_world time = %8.2f s.", 2097 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2098 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2099 cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum()); 2100 } 2101 2102 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2103 _concurrent_workers->print_worker_threads_on(st); 2104 } 2105 2106 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2107 _concurrent_workers->threads_do(tc); 2108 } 2109 2110 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2111 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2112 p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap)); 2113 _prev_mark_bitmap->print_on_error(st, " Prev Bits: "); 2114 _next_mark_bitmap->print_on_error(st, " Next Bits: "); 2115 } 2116 2117 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2118 ReferenceProcessor* result = g1h->ref_processor_cm(); 2119 assert(result != NULL, "CM reference processor should not be NULL"); 2120 return result; 2121 } 2122 2123 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2124 G1CMTask* task) 2125 : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)), 2126 _g1h(g1h), _task(task) 2127 { } 2128 2129 void G1CMTask::setup_for_region(HeapRegion* hr) { 2130 assert(hr != NULL, 2131 "claim_region() should have filtered out NULL regions"); 2132 _curr_region = hr; 2133 _finger = hr->bottom(); 2134 update_region_limit(); 2135 } 2136 2137 void G1CMTask::update_region_limit() { 2138 HeapRegion* hr = _curr_region; 2139 HeapWord* bottom = hr->bottom(); 2140 HeapWord* limit = hr->next_top_at_mark_start(); 2141 2142 if (limit == bottom) { 2143 // The region was collected underneath our feet. 2144 // We set the finger to bottom to ensure that the bitmap 2145 // iteration that will follow this will not do anything. 2146 // (this is not a condition that holds when we set the region up, 2147 // as the region is not supposed to be empty in the first place) 2148 _finger = bottom; 2149 } else if (limit >= _region_limit) { 2150 assert(limit >= _finger, "peace of mind"); 2151 } else { 2152 assert(limit < _region_limit, "only way to get here"); 2153 // This can happen under some pretty unusual circumstances. An 2154 // evacuation pause empties the region underneath our feet (NTAMS 2155 // at bottom). We then do some allocation in the region (NTAMS 2156 // stays at bottom), followed by the region being used as a GC 2157 // alloc region (NTAMS will move to top() and the objects 2158 // originally below it will be grayed). All objects now marked in 2159 // the region are explicitly grayed, if below the global finger, 2160 // and we do not need in fact to scan anything else. So, we simply 2161 // set _finger to be limit to ensure that the bitmap iteration 2162 // doesn't do anything. 2163 _finger = limit; 2164 } 2165 2166 _region_limit = limit; 2167 } 2168 2169 void G1CMTask::giveup_current_region() { 2170 assert(_curr_region != NULL, "invariant"); 2171 clear_region_fields(); 2172 } 2173 2174 void G1CMTask::clear_region_fields() { 2175 // Values for these three fields that indicate that we're not 2176 // holding on to a region. 2177 _curr_region = NULL; 2178 _finger = NULL; 2179 _region_limit = NULL; 2180 } 2181 2182 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2183 if (cm_oop_closure == NULL) { 2184 assert(_cm_oop_closure != NULL, "invariant"); 2185 } else { 2186 assert(_cm_oop_closure == NULL, "invariant"); 2187 } 2188 _cm_oop_closure = cm_oop_closure; 2189 } 2190 2191 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) { 2192 guarantee(next_mark_bitmap != NULL, "invariant"); 2193 _next_mark_bitmap = next_mark_bitmap; 2194 clear_region_fields(); 2195 2196 _calls = 0; 2197 _elapsed_time_ms = 0.0; 2198 _termination_time_ms = 0.0; 2199 _termination_start_time_ms = 0.0; 2200 2201 _mark_stats_cache.reset(); 2202 } 2203 2204 bool G1CMTask::should_exit_termination() { 2205 regular_clock_call(); 2206 // This is called when we are in the termination protocol. We should 2207 // quit if, for some reason, this task wants to abort or the global 2208 // stack is not empty (this means that we can get work from it). 2209 return !_cm->mark_stack_empty() || has_aborted(); 2210 } 2211 2212 void G1CMTask::reached_limit() { 2213 assert(_words_scanned >= _words_scanned_limit || 2214 _refs_reached >= _refs_reached_limit , 2215 "shouldn't have been called otherwise"); 2216 regular_clock_call(); 2217 } 2218 2219 void G1CMTask::regular_clock_call() { 2220 if (has_aborted()) { 2221 return; 2222 } 2223 2224 // First, we need to recalculate the words scanned and refs reached 2225 // limits for the next clock call. 2226 recalculate_limits(); 2227 2228 // During the regular clock call we do the following 2229 2230 // (1) If an overflow has been flagged, then we abort. 2231 if (_cm->has_overflown()) { 2232 set_has_aborted(); 2233 return; 2234 } 2235 2236 // If we are not concurrent (i.e. we're doing remark) we don't need 2237 // to check anything else. The other steps are only needed during 2238 // the concurrent marking phase. 2239 if (!_cm->concurrent()) { 2240 return; 2241 } 2242 2243 // (2) If marking has been aborted for Full GC, then we also abort. 2244 if (_cm->has_aborted()) { 2245 set_has_aborted(); 2246 return; 2247 } 2248 2249 double curr_time_ms = os::elapsedVTime() * 1000.0; 2250 2251 // (4) We check whether we should yield. If we have to, then we abort. 2252 if (SuspendibleThreadSet::should_yield()) { 2253 // We should yield. To do this we abort the task. The caller is 2254 // responsible for yielding. 2255 set_has_aborted(); 2256 return; 2257 } 2258 2259 // (5) We check whether we've reached our time quota. If we have, 2260 // then we abort. 2261 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2262 if (elapsed_time_ms > _time_target_ms) { 2263 set_has_aborted(); 2264 _has_timed_out = true; 2265 return; 2266 } 2267 2268 // (6) Finally, we check whether there are enough completed STAB 2269 // buffers available for processing. If there are, we abort. 2270 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2271 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2272 // we do need to process SATB buffers, we'll abort and restart 2273 // the marking task to do so 2274 set_has_aborted(); 2275 return; 2276 } 2277 } 2278 2279 void G1CMTask::recalculate_limits() { 2280 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2281 _words_scanned_limit = _real_words_scanned_limit; 2282 2283 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2284 _refs_reached_limit = _real_refs_reached_limit; 2285 } 2286 2287 void G1CMTask::decrease_limits() { 2288 // This is called when we believe that we're going to do an infrequent 2289 // operation which will increase the per byte scanned cost (i.e. move 2290 // entries to/from the global stack). It basically tries to decrease the 2291 // scanning limit so that the clock is called earlier. 2292 2293 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4; 2294 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4; 2295 } 2296 2297 void G1CMTask::move_entries_to_global_stack() { 2298 // Local array where we'll store the entries that will be popped 2299 // from the local queue. 2300 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2301 2302 size_t n = 0; 2303 G1TaskQueueEntry task_entry; 2304 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) { 2305 buffer[n] = task_entry; 2306 ++n; 2307 } 2308 if (n < G1CMMarkStack::EntriesPerChunk) { 2309 buffer[n] = G1TaskQueueEntry(); 2310 } 2311 2312 if (n > 0) { 2313 if (!_cm->mark_stack_push(buffer)) { 2314 set_has_aborted(); 2315 } 2316 } 2317 2318 // This operation was quite expensive, so decrease the limits. 2319 decrease_limits(); 2320 } 2321 2322 bool G1CMTask::get_entries_from_global_stack() { 2323 // Local array where we'll store the entries that will be popped 2324 // from the global stack. 2325 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2326 2327 if (!_cm->mark_stack_pop(buffer)) { 2328 return false; 2329 } 2330 2331 // We did actually pop at least one entry. 2332 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) { 2333 G1TaskQueueEntry task_entry = buffer[i]; 2334 if (task_entry.is_null()) { 2335 break; 2336 } 2337 assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj())); 2338 bool success = _task_queue->push(task_entry); 2339 // We only call this when the local queue is empty or under a 2340 // given target limit. So, we do not expect this push to fail. 2341 assert(success, "invariant"); 2342 } 2343 2344 // This operation was quite expensive, so decrease the limits 2345 decrease_limits(); 2346 return true; 2347 } 2348 2349 void G1CMTask::drain_local_queue(bool partially) { 2350 if (has_aborted()) { 2351 return; 2352 } 2353 2354 // Decide what the target size is, depending whether we're going to 2355 // drain it partially (so that other tasks can steal if they run out 2356 // of things to do) or totally (at the very end). 2357 size_t target_size; 2358 if (partially) { 2359 target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize); 2360 } else { 2361 target_size = 0; 2362 } 2363 2364 if (_task_queue->size() > target_size) { 2365 G1TaskQueueEntry entry; 2366 bool ret = _task_queue->pop_local(entry); 2367 while (ret) { 2368 scan_task_entry(entry); 2369 if (_task_queue->size() <= target_size || has_aborted()) { 2370 ret = false; 2371 } else { 2372 ret = _task_queue->pop_local(entry); 2373 } 2374 } 2375 } 2376 } 2377 2378 void G1CMTask::drain_global_stack(bool partially) { 2379 if (has_aborted()) { 2380 return; 2381 } 2382 2383 // We have a policy to drain the local queue before we attempt to 2384 // drain the global stack. 2385 assert(partially || _task_queue->size() == 0, "invariant"); 2386 2387 // Decide what the target size is, depending whether we're going to 2388 // drain it partially (so that other tasks can steal if they run out 2389 // of things to do) or totally (at the very end). 2390 // Notice that when draining the global mark stack partially, due to the racyness 2391 // of the mark stack size update we might in fact drop below the target. But, 2392 // this is not a problem. 2393 // In case of total draining, we simply process until the global mark stack is 2394 // totally empty, disregarding the size counter. 2395 if (partially) { 2396 size_t const target_size = _cm->partial_mark_stack_size_target(); 2397 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2398 if (get_entries_from_global_stack()) { 2399 drain_local_queue(partially); 2400 } 2401 } 2402 } else { 2403 while (!has_aborted() && get_entries_from_global_stack()) { 2404 drain_local_queue(partially); 2405 } 2406 } 2407 } 2408 2409 // SATB Queue has several assumptions on whether to call the par or 2410 // non-par versions of the methods. this is why some of the code is 2411 // replicated. We should really get rid of the single-threaded version 2412 // of the code to simplify things. 2413 void G1CMTask::drain_satb_buffers() { 2414 if (has_aborted()) { 2415 return; 2416 } 2417 2418 // We set this so that the regular clock knows that we're in the 2419 // middle of draining buffers and doesn't set the abort flag when it 2420 // notices that SATB buffers are available for draining. It'd be 2421 // very counter productive if it did that. :-) 2422 _draining_satb_buffers = true; 2423 2424 G1CMSATBBufferClosure satb_cl(this, _g1h); 2425 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2426 2427 // This keeps claiming and applying the closure to completed buffers 2428 // until we run out of buffers or we need to abort. 2429 while (!has_aborted() && 2430 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2431 regular_clock_call(); 2432 } 2433 2434 _draining_satb_buffers = false; 2435 2436 assert(has_aborted() || 2437 _cm->concurrent() || 2438 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2439 2440 // again, this was a potentially expensive operation, decrease the 2441 // limits to get the regular clock call early 2442 decrease_limits(); 2443 } 2444 2445 void G1CMTask::clear_mark_stats_cache(uint region_idx) { 2446 _mark_stats_cache.reset(region_idx); 2447 } 2448 2449 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() { 2450 return _mark_stats_cache.evict_all(); 2451 } 2452 2453 void G1CMTask::print_stats() { 2454 log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls); 2455 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2456 _elapsed_time_ms, _termination_time_ms); 2457 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms", 2458 _step_times_ms.num(), 2459 _step_times_ms.avg(), 2460 _step_times_ms.sd(), 2461 _step_times_ms.maximum(), 2462 _step_times_ms.sum()); 2463 size_t const hits = _mark_stats_cache.hits(); 2464 size_t const misses = _mark_stats_cache.misses(); 2465 log_debug(gc, stats)(" Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f", 2466 hits, misses, percent_of(hits, hits + misses)); 2467 } 2468 2469 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry) { 2470 return _task_queues->steal(worker_id, hash_seed, task_entry); 2471 } 2472 2473 /***************************************************************************** 2474 2475 The do_marking_step(time_target_ms, ...) method is the building 2476 block of the parallel marking framework. It can be called in parallel 2477 with other invocations of do_marking_step() on different tasks 2478 (but only one per task, obviously) and concurrently with the 2479 mutator threads, or during remark, hence it eliminates the need 2480 for two versions of the code. When called during remark, it will 2481 pick up from where the task left off during the concurrent marking 2482 phase. Interestingly, tasks are also claimable during evacuation 2483 pauses too, since do_marking_step() ensures that it aborts before 2484 it needs to yield. 2485 2486 The data structures that it uses to do marking work are the 2487 following: 2488 2489 (1) Marking Bitmap. If there are gray objects that appear only 2490 on the bitmap (this happens either when dealing with an overflow 2491 or when the initial marking phase has simply marked the roots 2492 and didn't push them on the stack), then tasks claim heap 2493 regions whose bitmap they then scan to find gray objects. A 2494 global finger indicates where the end of the last claimed region 2495 is. A local finger indicates how far into the region a task has 2496 scanned. The two fingers are used to determine how to gray an 2497 object (i.e. whether simply marking it is OK, as it will be 2498 visited by a task in the future, or whether it needs to be also 2499 pushed on a stack). 2500 2501 (2) Local Queue. The local queue of the task which is accessed 2502 reasonably efficiently by the task. Other tasks can steal from 2503 it when they run out of work. Throughout the marking phase, a 2504 task attempts to keep its local queue short but not totally 2505 empty, so that entries are available for stealing by other 2506 tasks. Only when there is no more work, a task will totally 2507 drain its local queue. 2508 2509 (3) Global Mark Stack. This handles local queue overflow. During 2510 marking only sets of entries are moved between it and the local 2511 queues, as access to it requires a mutex and more fine-grain 2512 interaction with it which might cause contention. If it 2513 overflows, then the marking phase should restart and iterate 2514 over the bitmap to identify gray objects. Throughout the marking 2515 phase, tasks attempt to keep the global mark stack at a small 2516 length but not totally empty, so that entries are available for 2517 popping by other tasks. Only when there is no more work, tasks 2518 will totally drain the global mark stack. 2519 2520 (4) SATB Buffer Queue. This is where completed SATB buffers are 2521 made available. Buffers are regularly removed from this queue 2522 and scanned for roots, so that the queue doesn't get too 2523 long. During remark, all completed buffers are processed, as 2524 well as the filled in parts of any uncompleted buffers. 2525 2526 The do_marking_step() method tries to abort when the time target 2527 has been reached. There are a few other cases when the 2528 do_marking_step() method also aborts: 2529 2530 (1) When the marking phase has been aborted (after a Full GC). 2531 2532 (2) When a global overflow (on the global stack) has been 2533 triggered. Before the task aborts, it will actually sync up with 2534 the other tasks to ensure that all the marking data structures 2535 (local queues, stacks, fingers etc.) are re-initialized so that 2536 when do_marking_step() completes, the marking phase can 2537 immediately restart. 2538 2539 (3) When enough completed SATB buffers are available. The 2540 do_marking_step() method only tries to drain SATB buffers right 2541 at the beginning. So, if enough buffers are available, the 2542 marking step aborts and the SATB buffers are processed at 2543 the beginning of the next invocation. 2544 2545 (4) To yield. when we have to yield then we abort and yield 2546 right at the end of do_marking_step(). This saves us from a lot 2547 of hassle as, by yielding we might allow a Full GC. If this 2548 happens then objects will be compacted underneath our feet, the 2549 heap might shrink, etc. We save checking for this by just 2550 aborting and doing the yield right at the end. 2551 2552 From the above it follows that the do_marking_step() method should 2553 be called in a loop (or, otherwise, regularly) until it completes. 2554 2555 If a marking step completes without its has_aborted() flag being 2556 true, it means it has completed the current marking phase (and 2557 also all other marking tasks have done so and have all synced up). 2558 2559 A method called regular_clock_call() is invoked "regularly" (in 2560 sub ms intervals) throughout marking. It is this clock method that 2561 checks all the abort conditions which were mentioned above and 2562 decides when the task should abort. A work-based scheme is used to 2563 trigger this clock method: when the number of object words the 2564 marking phase has scanned or the number of references the marking 2565 phase has visited reach a given limit. Additional invocations to 2566 the method clock have been planted in a few other strategic places 2567 too. The initial reason for the clock method was to avoid calling 2568 vtime too regularly, as it is quite expensive. So, once it was in 2569 place, it was natural to piggy-back all the other conditions on it 2570 too and not constantly check them throughout the code. 2571 2572 If do_termination is true then do_marking_step will enter its 2573 termination protocol. 2574 2575 The value of is_serial must be true when do_marking_step is being 2576 called serially (i.e. by the VMThread) and do_marking_step should 2577 skip any synchronization in the termination and overflow code. 2578 Examples include the serial remark code and the serial reference 2579 processing closures. 2580 2581 The value of is_serial must be false when do_marking_step is 2582 being called by any of the worker threads in a work gang. 2583 Examples include the concurrent marking code (CMMarkingTask), 2584 the MT remark code, and the MT reference processing closures. 2585 2586 *****************************************************************************/ 2587 2588 void G1CMTask::do_marking_step(double time_target_ms, 2589 bool do_termination, 2590 bool is_serial) { 2591 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2592 2593 _start_time_ms = os::elapsedVTime() * 1000.0; 2594 2595 // If do_stealing is true then do_marking_step will attempt to 2596 // steal work from the other G1CMTasks. It only makes sense to 2597 // enable stealing when the termination protocol is enabled 2598 // and do_marking_step() is not being called serially. 2599 bool do_stealing = do_termination && !is_serial; 2600 2601 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2602 _time_target_ms = time_target_ms - diff_prediction_ms; 2603 2604 // set up the variables that are used in the work-based scheme to 2605 // call the regular clock method 2606 _words_scanned = 0; 2607 _refs_reached = 0; 2608 recalculate_limits(); 2609 2610 // clear all flags 2611 clear_has_aborted(); 2612 _has_timed_out = false; 2613 _draining_satb_buffers = false; 2614 2615 ++_calls; 2616 2617 // Set up the bitmap and oop closures. Anything that uses them is 2618 // eventually called from this method, so it is OK to allocate these 2619 // statically. 2620 G1CMBitMapClosure bitmap_closure(this, _cm); 2621 G1CMOopClosure cm_oop_closure(_g1h, this); 2622 set_cm_oop_closure(&cm_oop_closure); 2623 2624 if (_cm->has_overflown()) { 2625 // This can happen if the mark stack overflows during a GC pause 2626 // and this task, after a yield point, restarts. We have to abort 2627 // as we need to get into the overflow protocol which happens 2628 // right at the end of this task. 2629 set_has_aborted(); 2630 } 2631 2632 // First drain any available SATB buffers. After this, we will not 2633 // look at SATB buffers before the next invocation of this method. 2634 // If enough completed SATB buffers are queued up, the regular clock 2635 // will abort this task so that it restarts. 2636 drain_satb_buffers(); 2637 // ...then partially drain the local queue and the global stack 2638 drain_local_queue(true); 2639 drain_global_stack(true); 2640 2641 do { 2642 if (!has_aborted() && _curr_region != NULL) { 2643 // This means that we're already holding on to a region. 2644 assert(_finger != NULL, "if region is not NULL, then the finger " 2645 "should not be NULL either"); 2646 2647 // We might have restarted this task after an evacuation pause 2648 // which might have evacuated the region we're holding on to 2649 // underneath our feet. Let's read its limit again to make sure 2650 // that we do not iterate over a region of the heap that 2651 // contains garbage (update_region_limit() will also move 2652 // _finger to the start of the region if it is found empty). 2653 update_region_limit(); 2654 // We will start from _finger not from the start of the region, 2655 // as we might be restarting this task after aborting half-way 2656 // through scanning this region. In this case, _finger points to 2657 // the address where we last found a marked object. If this is a 2658 // fresh region, _finger points to start(). 2659 MemRegion mr = MemRegion(_finger, _region_limit); 2660 2661 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2662 "humongous regions should go around loop once only"); 2663 2664 // Some special cases: 2665 // If the memory region is empty, we can just give up the region. 2666 // If the current region is humongous then we only need to check 2667 // the bitmap for the bit associated with the start of the object, 2668 // scan the object if it's live, and give up the region. 2669 // Otherwise, let's iterate over the bitmap of the part of the region 2670 // that is left. 2671 // If the iteration is successful, give up the region. 2672 if (mr.is_empty()) { 2673 giveup_current_region(); 2674 regular_clock_call(); 2675 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2676 if (_next_mark_bitmap->is_marked(mr.start())) { 2677 // The object is marked - apply the closure 2678 bitmap_closure.do_addr(mr.start()); 2679 } 2680 // Even if this task aborted while scanning the humongous object 2681 // we can (and should) give up the current region. 2682 giveup_current_region(); 2683 regular_clock_call(); 2684 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) { 2685 giveup_current_region(); 2686 regular_clock_call(); 2687 } else { 2688 assert(has_aborted(), "currently the only way to do so"); 2689 // The only way to abort the bitmap iteration is to return 2690 // false from the do_bit() method. However, inside the 2691 // do_bit() method we move the _finger to point to the 2692 // object currently being looked at. So, if we bail out, we 2693 // have definitely set _finger to something non-null. 2694 assert(_finger != NULL, "invariant"); 2695 2696 // Region iteration was actually aborted. So now _finger 2697 // points to the address of the object we last scanned. If we 2698 // leave it there, when we restart this task, we will rescan 2699 // the object. It is easy to avoid this. We move the finger by 2700 // enough to point to the next possible object header. 2701 assert(_finger < _region_limit, "invariant"); 2702 HeapWord* const new_finger = _finger + ((oop)_finger)->size(); 2703 // Check if bitmap iteration was aborted while scanning the last object 2704 if (new_finger >= _region_limit) { 2705 giveup_current_region(); 2706 } else { 2707 move_finger_to(new_finger); 2708 } 2709 } 2710 } 2711 // At this point we have either completed iterating over the 2712 // region we were holding on to, or we have aborted. 2713 2714 // We then partially drain the local queue and the global stack. 2715 // (Do we really need this?) 2716 drain_local_queue(true); 2717 drain_global_stack(true); 2718 2719 // Read the note on the claim_region() method on why it might 2720 // return NULL with potentially more regions available for 2721 // claiming and why we have to check out_of_regions() to determine 2722 // whether we're done or not. 2723 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2724 // We are going to try to claim a new region. We should have 2725 // given up on the previous one. 2726 // Separated the asserts so that we know which one fires. 2727 assert(_curr_region == NULL, "invariant"); 2728 assert(_finger == NULL, "invariant"); 2729 assert(_region_limit == NULL, "invariant"); 2730 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2731 if (claimed_region != NULL) { 2732 // Yes, we managed to claim one 2733 setup_for_region(claimed_region); 2734 assert(_curr_region == claimed_region, "invariant"); 2735 } 2736 // It is important to call the regular clock here. It might take 2737 // a while to claim a region if, for example, we hit a large 2738 // block of empty regions. So we need to call the regular clock 2739 // method once round the loop to make sure it's called 2740 // frequently enough. 2741 regular_clock_call(); 2742 } 2743 2744 if (!has_aborted() && _curr_region == NULL) { 2745 assert(_cm->out_of_regions(), 2746 "at this point we should be out of regions"); 2747 } 2748 } while ( _curr_region != NULL && !has_aborted()); 2749 2750 if (!has_aborted()) { 2751 // We cannot check whether the global stack is empty, since other 2752 // tasks might be pushing objects to it concurrently. 2753 assert(_cm->out_of_regions(), 2754 "at this point we should be out of regions"); 2755 // Try to reduce the number of available SATB buffers so that 2756 // remark has less work to do. 2757 drain_satb_buffers(); 2758 } 2759 2760 // Since we've done everything else, we can now totally drain the 2761 // local queue and global stack. 2762 drain_local_queue(false); 2763 drain_global_stack(false); 2764 2765 // Attempt at work stealing from other task's queues. 2766 if (do_stealing && !has_aborted()) { 2767 // We have not aborted. This means that we have finished all that 2768 // we could. Let's try to do some stealing... 2769 2770 // We cannot check whether the global stack is empty, since other 2771 // tasks might be pushing objects to it concurrently. 2772 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2773 "only way to reach here"); 2774 while (!has_aborted()) { 2775 G1TaskQueueEntry entry; 2776 if (_cm->try_stealing(_worker_id, &_hash_seed, entry)) { 2777 scan_task_entry(entry); 2778 2779 // And since we're towards the end, let's totally drain the 2780 // local queue and global stack. 2781 drain_local_queue(false); 2782 drain_global_stack(false); 2783 } else { 2784 break; 2785 } 2786 } 2787 } 2788 2789 // We still haven't aborted. Now, let's try to get into the 2790 // termination protocol. 2791 if (do_termination && !has_aborted()) { 2792 // We cannot check whether the global stack is empty, since other 2793 // tasks might be concurrently pushing objects on it. 2794 // Separated the asserts so that we know which one fires. 2795 assert(_cm->out_of_regions(), "only way to reach here"); 2796 assert(_task_queue->size() == 0, "only way to reach here"); 2797 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2798 2799 // The G1CMTask class also extends the TerminatorTerminator class, 2800 // hence its should_exit_termination() method will also decide 2801 // whether to exit the termination protocol or not. 2802 bool finished = (is_serial || 2803 _cm->terminator()->offer_termination(this)); 2804 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2805 _termination_time_ms += 2806 termination_end_time_ms - _termination_start_time_ms; 2807 2808 if (finished) { 2809 // We're all done. 2810 2811 // We can now guarantee that the global stack is empty, since 2812 // all other tasks have finished. We separated the guarantees so 2813 // that, if a condition is false, we can immediately find out 2814 // which one. 2815 guarantee(_cm->out_of_regions(), "only way to reach here"); 2816 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2817 guarantee(_task_queue->size() == 0, "only way to reach here"); 2818 guarantee(!_cm->has_overflown(), "only way to reach here"); 2819 } else { 2820 // Apparently there's more work to do. Let's abort this task. It 2821 // will restart it and we can hopefully find more things to do. 2822 set_has_aborted(); 2823 } 2824 } 2825 2826 // Mainly for debugging purposes to make sure that a pointer to the 2827 // closure which was statically allocated in this frame doesn't 2828 // escape it by accident. 2829 set_cm_oop_closure(NULL); 2830 double end_time_ms = os::elapsedVTime() * 1000.0; 2831 double elapsed_time_ms = end_time_ms - _start_time_ms; 2832 // Update the step history. 2833 _step_times_ms.add(elapsed_time_ms); 2834 2835 if (has_aborted()) { 2836 // The task was aborted for some reason. 2837 if (_has_timed_out) { 2838 double diff_ms = elapsed_time_ms - _time_target_ms; 2839 // Keep statistics of how well we did with respect to hitting 2840 // our target only if we actually timed out (if we aborted for 2841 // other reasons, then the results might get skewed). 2842 _marking_step_diffs_ms.add(diff_ms); 2843 } 2844 2845 if (_cm->has_overflown()) { 2846 // This is the interesting one. We aborted because a global 2847 // overflow was raised. This means we have to restart the 2848 // marking phase and start iterating over regions. However, in 2849 // order to do this we have to make sure that all tasks stop 2850 // what they are doing and re-initialize in a safe manner. We 2851 // will achieve this with the use of two barrier sync points. 2852 2853 if (!is_serial) { 2854 // We only need to enter the sync barrier if being called 2855 // from a parallel context 2856 _cm->enter_first_sync_barrier(_worker_id); 2857 2858 // When we exit this sync barrier we know that all tasks have 2859 // stopped doing marking work. So, it's now safe to 2860 // re-initialize our data structures. 2861 } 2862 2863 clear_region_fields(); 2864 flush_mark_stats_cache(); 2865 2866 if (!is_serial) { 2867 // If we're executing the concurrent phase of marking, reset the marking 2868 // state; otherwise the marking state is reset after reference processing, 2869 // during the remark pause. 2870 // If we reset here as a result of an overflow during the remark we will 2871 // see assertion failures from any subsequent set_concurrency_and_phase() 2872 // calls. 2873 if (_cm->concurrent() && _worker_id == 0) { 2874 // Worker 0 is responsible for clearing the global data structures because 2875 // of an overflow. During STW we should not clear the overflow flag (in 2876 // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit 2877 // method to abort the pause and restart concurrent marking. 2878 _cm->reset_marking_for_restart(); 2879 2880 log_info(gc, marking)("Concurrent Mark reset for overflow"); 2881 } 2882 2883 // ...and enter the second barrier. 2884 _cm->enter_second_sync_barrier(_worker_id); 2885 } 2886 // At this point, if we're during the concurrent phase of 2887 // marking, everything has been re-initialized and we're 2888 // ready to restart. 2889 } 2890 } 2891 } 2892 2893 G1CMTask::G1CMTask(uint worker_id, 2894 G1ConcurrentMark* cm, 2895 G1CMTaskQueue* task_queue, 2896 G1RegionMarkStats* mark_stats, 2897 uint max_regions) : 2898 _objArray_processor(this), 2899 _worker_id(worker_id), 2900 _g1h(G1CollectedHeap::heap()), 2901 _cm(cm), 2902 _next_mark_bitmap(NULL), 2903 _task_queue(task_queue), 2904 _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize), 2905 _calls(0), 2906 _time_target_ms(0.0), 2907 _start_time_ms(0.0), 2908 _cm_oop_closure(NULL), 2909 _curr_region(NULL), 2910 _finger(NULL), 2911 _region_limit(NULL), 2912 _words_scanned(0), 2913 _words_scanned_limit(0), 2914 _real_words_scanned_limit(0), 2915 _refs_reached(0), 2916 _refs_reached_limit(0), 2917 _real_refs_reached_limit(0), 2918 _hash_seed(17), 2919 _has_aborted(false), 2920 _has_timed_out(false), 2921 _draining_satb_buffers(false), 2922 _step_times_ms(), 2923 _elapsed_time_ms(0.0), 2924 _termination_time_ms(0.0), 2925 _termination_start_time_ms(0.0), 2926 _marking_step_diffs_ms() 2927 { 2928 guarantee(task_queue != NULL, "invariant"); 2929 2930 _marking_step_diffs_ms.add(0.5); 2931 } 2932 2933 // These are formatting macros that are used below to ensure 2934 // consistent formatting. The *_H_* versions are used to format the 2935 // header for a particular value and they should be kept consistent 2936 // with the corresponding macro. Also note that most of the macros add 2937 // the necessary white space (as a prefix) which makes them a bit 2938 // easier to compose. 2939 2940 // All the output lines are prefixed with this string to be able to 2941 // identify them easily in a large log file. 2942 #define G1PPRL_LINE_PREFIX "###" 2943 2944 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2945 #ifdef _LP64 2946 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2947 #else // _LP64 2948 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2949 #endif // _LP64 2950 2951 // For per-region info 2952 #define G1PPRL_TYPE_FORMAT " %-4s" 2953 #define G1PPRL_TYPE_H_FORMAT " %4s" 2954 #define G1PPRL_STATE_FORMAT " %-5s" 2955 #define G1PPRL_STATE_H_FORMAT " %5s" 2956 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2957 #define G1PPRL_BYTE_H_FORMAT " %9s" 2958 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2959 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2960 2961 // For summary info 2962 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2963 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2964 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2965 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2966 2967 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) : 2968 _total_used_bytes(0), _total_capacity_bytes(0), 2969 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2970 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) 2971 { 2972 if (!log_is_enabled(Trace, gc, liveness)) { 2973 return; 2974 } 2975 2976 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2977 MemRegion g1_reserved = g1h->g1_reserved(); 2978 double now = os::elapsedTime(); 2979 2980 // Print the header of the output. 2981 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2982 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2983 G1PPRL_SUM_ADDR_FORMAT("reserved") 2984 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2985 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 2986 HeapRegion::GrainBytes); 2987 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2988 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2989 G1PPRL_TYPE_H_FORMAT 2990 G1PPRL_ADDR_BASE_H_FORMAT 2991 G1PPRL_BYTE_H_FORMAT 2992 G1PPRL_BYTE_H_FORMAT 2993 G1PPRL_BYTE_H_FORMAT 2994 G1PPRL_DOUBLE_H_FORMAT 2995 G1PPRL_BYTE_H_FORMAT 2996 G1PPRL_STATE_H_FORMAT 2997 G1PPRL_BYTE_H_FORMAT, 2998 "type", "address-range", 2999 "used", "prev-live", "next-live", "gc-eff", 3000 "remset", "state", "code-roots"); 3001 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3002 G1PPRL_TYPE_H_FORMAT 3003 G1PPRL_ADDR_BASE_H_FORMAT 3004 G1PPRL_BYTE_H_FORMAT 3005 G1PPRL_BYTE_H_FORMAT 3006 G1PPRL_BYTE_H_FORMAT 3007 G1PPRL_DOUBLE_H_FORMAT 3008 G1PPRL_BYTE_H_FORMAT 3009 G1PPRL_STATE_H_FORMAT 3010 G1PPRL_BYTE_H_FORMAT, 3011 "", "", 3012 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 3013 "(bytes)", "", "(bytes)"); 3014 } 3015 3016 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) { 3017 if (!log_is_enabled(Trace, gc, liveness)) { 3018 return false; 3019 } 3020 3021 const char* type = r->get_type_str(); 3022 HeapWord* bottom = r->bottom(); 3023 HeapWord* end = r->end(); 3024 size_t capacity_bytes = r->capacity(); 3025 size_t used_bytes = r->used(); 3026 size_t prev_live_bytes = r->live_bytes(); 3027 size_t next_live_bytes = r->next_live_bytes(); 3028 double gc_eff = r->gc_efficiency(); 3029 size_t remset_bytes = r->rem_set()->mem_size(); 3030 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3031 const char* remset_type = r->rem_set()->get_short_state_str(); 3032 3033 _total_used_bytes += used_bytes; 3034 _total_capacity_bytes += capacity_bytes; 3035 _total_prev_live_bytes += prev_live_bytes; 3036 _total_next_live_bytes += next_live_bytes; 3037 _total_remset_bytes += remset_bytes; 3038 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3039 3040 // Print a line for this particular region. 3041 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3042 G1PPRL_TYPE_FORMAT 3043 G1PPRL_ADDR_BASE_FORMAT 3044 G1PPRL_BYTE_FORMAT 3045 G1PPRL_BYTE_FORMAT 3046 G1PPRL_BYTE_FORMAT 3047 G1PPRL_DOUBLE_FORMAT 3048 G1PPRL_BYTE_FORMAT 3049 G1PPRL_STATE_FORMAT 3050 G1PPRL_BYTE_FORMAT, 3051 type, p2i(bottom), p2i(end), 3052 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3053 remset_bytes, remset_type, strong_code_roots_bytes); 3054 3055 return false; 3056 } 3057 3058 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3059 if (!log_is_enabled(Trace, gc, liveness)) { 3060 return; 3061 } 3062 3063 // add static memory usages to remembered set sizes 3064 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3065 // Print the footer of the output. 3066 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3067 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3068 " SUMMARY" 3069 G1PPRL_SUM_MB_FORMAT("capacity") 3070 G1PPRL_SUM_MB_PERC_FORMAT("used") 3071 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3072 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3073 G1PPRL_SUM_MB_FORMAT("remset") 3074 G1PPRL_SUM_MB_FORMAT("code-roots"), 3075 bytes_to_mb(_total_capacity_bytes), 3076 bytes_to_mb(_total_used_bytes), 3077 percent_of(_total_used_bytes, _total_capacity_bytes), 3078 bytes_to_mb(_total_prev_live_bytes), 3079 percent_of(_total_prev_live_bytes, _total_capacity_bytes), 3080 bytes_to_mb(_total_next_live_bytes), 3081 percent_of(_total_next_live_bytes, _total_capacity_bytes), 3082 bytes_to_mb(_total_remset_bytes), 3083 bytes_to_mb(_total_strong_code_roots_bytes)); 3084 }