1 /* 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc/g1/g1BarrierSet.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1CollectorState.hpp" 31 #include "gc/g1/g1ConcurrentMark.inline.hpp" 32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" 33 #include "gc/g1/g1DirtyCardQueue.hpp" 34 #include "gc/g1/g1HeapVerifier.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1Policy.hpp" 37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" 38 #include "gc/g1/g1StringDedup.hpp" 39 #include "gc/g1/g1ThreadLocalData.hpp" 40 #include "gc/g1/g1Trace.hpp" 41 #include "gc/g1/heapRegion.inline.hpp" 42 #include "gc/g1/heapRegionRemSet.hpp" 43 #include "gc/g1/heapRegionSet.inline.hpp" 44 #include "gc/shared/gcId.hpp" 45 #include "gc/shared/gcTimer.hpp" 46 #include "gc/shared/gcTraceTime.inline.hpp" 47 #include "gc/shared/gcVMOperations.hpp" 48 #include "gc/shared/genOopClosures.inline.hpp" 49 #include "gc/shared/referencePolicy.hpp" 50 #include "gc/shared/strongRootsScope.hpp" 51 #include "gc/shared/suspendibleThreadSet.hpp" 52 #include "gc/shared/taskTerminator.hpp" 53 #include "gc/shared/taskqueue.inline.hpp" 54 #include "gc/shared/weakProcessor.inline.hpp" 55 #include "gc/shared/workerPolicy.hpp" 56 #include "include/jvm.h" 57 #include "logging/log.hpp" 58 #include "memory/allocation.hpp" 59 #include "memory/iterator.hpp" 60 #include "memory/resourceArea.hpp" 61 #include "memory/universe.hpp" 62 #include "oops/access.inline.hpp" 63 #include "oops/oop.inline.hpp" 64 #include "runtime/atomic.hpp" 65 #include "runtime/handles.inline.hpp" 66 #include "runtime/java.hpp" 67 #include "runtime/orderAccess.hpp" 68 #include "runtime/prefetch.inline.hpp" 69 #include "services/memTracker.hpp" 70 #include "utilities/align.hpp" 71 #include "utilities/growableArray.hpp" 72 73 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) { 74 assert(addr < _cm->finger(), "invariant"); 75 assert(addr >= _task->finger(), "invariant"); 76 77 // We move that task's local finger along. 78 _task->move_finger_to(addr); 79 80 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); 81 // we only partially drain the local queue and global stack 82 _task->drain_local_queue(true); 83 _task->drain_global_stack(true); 84 85 // if the has_aborted flag has been raised, we need to bail out of 86 // the iteration 87 return !_task->has_aborted(); 88 } 89 90 G1CMMarkStack::G1CMMarkStack() : 91 _max_chunk_capacity(0), 92 _base(NULL), 93 _chunk_capacity(0) { 94 set_empty(); 95 } 96 97 bool G1CMMarkStack::resize(size_t new_capacity) { 98 assert(is_empty(), "Only resize when stack is empty."); 99 assert(new_capacity <= _max_chunk_capacity, 100 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); 101 102 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC); 103 104 if (new_base == NULL) { 105 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); 106 return false; 107 } 108 // Release old mapping. 109 if (_base != NULL) { 110 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 111 } 112 113 _base = new_base; 114 _chunk_capacity = new_capacity; 115 set_empty(); 116 117 return true; 118 } 119 120 size_t G1CMMarkStack::capacity_alignment() { 121 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); 122 } 123 124 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { 125 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); 126 127 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); 128 129 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 130 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 131 132 guarantee(initial_chunk_capacity <= _max_chunk_capacity, 133 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, 134 _max_chunk_capacity, 135 initial_chunk_capacity); 136 137 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, 138 initial_chunk_capacity, _max_chunk_capacity); 139 140 return resize(initial_chunk_capacity); 141 } 142 143 void G1CMMarkStack::expand() { 144 if (_chunk_capacity == _max_chunk_capacity) { 145 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); 146 return; 147 } 148 size_t old_capacity = _chunk_capacity; 149 // Double capacity if possible 150 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); 151 152 if (resize(new_capacity)) { 153 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 154 old_capacity, new_capacity); 155 } else { 156 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 157 old_capacity, new_capacity); 158 } 159 } 160 161 G1CMMarkStack::~G1CMMarkStack() { 162 if (_base != NULL) { 163 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 164 } 165 } 166 167 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { 168 elem->next = *list; 169 *list = elem; 170 } 171 172 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { 173 MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 174 add_chunk_to_list(&_chunk_list, elem); 175 _chunks_in_chunk_list++; 176 } 177 178 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { 179 MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 180 add_chunk_to_list(&_free_list, elem); 181 } 182 183 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { 184 TaskQueueEntryChunk* result = *list; 185 if (result != NULL) { 186 *list = (*list)->next; 187 } 188 return result; 189 } 190 191 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { 192 MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 193 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); 194 if (result != NULL) { 195 _chunks_in_chunk_list--; 196 } 197 return result; 198 } 199 200 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { 201 MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 202 return remove_chunk_from_list(&_free_list); 203 } 204 205 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { 206 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. 207 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding 208 // wraparound of _hwm. 209 if (_hwm >= _chunk_capacity) { 210 return NULL; 211 } 212 213 size_t cur_idx = Atomic::fetch_and_add(&_hwm, 1u); 214 if (cur_idx >= _chunk_capacity) { 215 return NULL; 216 } 217 218 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; 219 result->next = NULL; 220 return result; 221 } 222 223 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { 224 // Get a new chunk. 225 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); 226 227 if (new_chunk == NULL) { 228 // Did not get a chunk from the free list. Allocate from backing memory. 229 new_chunk = allocate_new_chunk(); 230 231 if (new_chunk == NULL) { 232 return false; 233 } 234 } 235 236 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 237 238 add_chunk_to_chunk_list(new_chunk); 239 240 return true; 241 } 242 243 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { 244 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); 245 246 if (cur == NULL) { 247 return false; 248 } 249 250 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 251 252 add_chunk_to_free_list(cur); 253 return true; 254 } 255 256 void G1CMMarkStack::set_empty() { 257 _chunks_in_chunk_list = 0; 258 _hwm = 0; 259 _chunk_list = NULL; 260 _free_list = NULL; 261 } 262 263 G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) : 264 _root_regions(MemRegion::create_array(max_regions, mtGC)), 265 _max_regions(max_regions), 266 _num_root_regions(0), 267 _claimed_root_regions(0), 268 _scan_in_progress(false), 269 _should_abort(false) { } 270 271 G1CMRootMemRegions::~G1CMRootMemRegions() { 272 MemRegion::destroy_array(_root_regions, _max_regions); 273 } 274 275 void G1CMRootMemRegions::reset() { 276 _num_root_regions = 0; 277 } 278 279 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) { 280 assert_at_safepoint(); 281 size_t idx = Atomic::fetch_and_add(&_num_root_regions, 1u); 282 assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions); 283 assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to " 284 "end (" PTR_FORMAT ")", p2i(start), p2i(end)); 285 _root_regions[idx].set_start(start); 286 _root_regions[idx].set_end(end); 287 } 288 289 void G1CMRootMemRegions::prepare_for_scan() { 290 assert(!scan_in_progress(), "pre-condition"); 291 292 _scan_in_progress = _num_root_regions > 0; 293 294 _claimed_root_regions = 0; 295 _should_abort = false; 296 } 297 298 const MemRegion* G1CMRootMemRegions::claim_next() { 299 if (_should_abort) { 300 // If someone has set the should_abort flag, we return NULL to 301 // force the caller to bail out of their loop. 302 return NULL; 303 } 304 305 if (_claimed_root_regions >= _num_root_regions) { 306 return NULL; 307 } 308 309 size_t claimed_index = Atomic::fetch_and_add(&_claimed_root_regions, 1u); 310 if (claimed_index < _num_root_regions) { 311 return &_root_regions[claimed_index]; 312 } 313 return NULL; 314 } 315 316 uint G1CMRootMemRegions::num_root_regions() const { 317 return (uint)_num_root_regions; 318 } 319 320 void G1CMRootMemRegions::notify_scan_done() { 321 MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 322 _scan_in_progress = false; 323 RootRegionScan_lock->notify_all(); 324 } 325 326 void G1CMRootMemRegions::cancel_scan() { 327 notify_scan_done(); 328 } 329 330 void G1CMRootMemRegions::scan_finished() { 331 assert(scan_in_progress(), "pre-condition"); 332 333 if (!_should_abort) { 334 assert(_claimed_root_regions >= num_root_regions(), 335 "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u", 336 _claimed_root_regions, num_root_regions()); 337 } 338 339 notify_scan_done(); 340 } 341 342 bool G1CMRootMemRegions::wait_until_scan_finished() { 343 if (!scan_in_progress()) { 344 return false; 345 } 346 347 { 348 MonitorLocker ml(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 349 while (scan_in_progress()) { 350 ml.wait(); 351 } 352 } 353 return true; 354 } 355 356 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, 357 G1RegionToSpaceMapper* prev_bitmap_storage, 358 G1RegionToSpaceMapper* next_bitmap_storage) : 359 // _cm_thread set inside the constructor 360 _g1h(g1h), 361 362 _mark_bitmap_1(), 363 _mark_bitmap_2(), 364 _prev_mark_bitmap(&_mark_bitmap_1), 365 _next_mark_bitmap(&_mark_bitmap_2), 366 367 _heap(_g1h->reserved_region()), 368 369 _root_regions(_g1h->max_regions()), 370 371 _global_mark_stack(), 372 373 // _finger set in set_non_marking_state 374 375 _worker_id_offset(G1DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads), 376 _max_num_tasks(ParallelGCThreads), 377 // _num_active_tasks set in set_non_marking_state() 378 // _tasks set inside the constructor 379 380 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)), 381 _terminator((int) _max_num_tasks, _task_queues), 382 383 _first_overflow_barrier_sync(), 384 _second_overflow_barrier_sync(), 385 386 _has_overflown(false), 387 _concurrent(false), 388 _has_aborted(false), 389 _restart_for_overflow(false), 390 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 391 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 392 393 // _verbose_level set below 394 395 _init_times(), 396 _remark_times(), 397 _remark_mark_times(), 398 _remark_weak_ref_times(), 399 _cleanup_times(), 400 _total_cleanup_time(0.0), 401 402 _accum_task_vtime(NULL), 403 404 _concurrent_workers(NULL), 405 _num_concurrent_workers(0), 406 _max_concurrent_workers(0), 407 408 _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)), 409 _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC)) 410 { 411 assert(CGC_lock != NULL, "CGC_lock must be initialized"); 412 413 _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage); 414 _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage); 415 416 // Create & start ConcurrentMark thread. 417 _cm_thread = new G1ConcurrentMarkThread(this); 418 if (_cm_thread->osthread() == NULL) { 419 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 420 } 421 422 log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset); 423 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 424 425 _num_concurrent_workers = ConcGCThreads; 426 _max_concurrent_workers = _num_concurrent_workers; 427 428 _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true); 429 _concurrent_workers->initialize_workers(); 430 431 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { 432 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 433 } 434 435 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC); 436 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC); 437 438 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 439 _num_active_tasks = _max_num_tasks; 440 441 for (uint i = 0; i < _max_num_tasks; ++i) { 442 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 443 task_queue->initialize(); 444 _task_queues->register_queue(i, task_queue); 445 446 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions()); 447 448 _accum_task_vtime[i] = 0.0; 449 } 450 451 reset_at_marking_complete(); 452 } 453 454 void G1ConcurrentMark::reset() { 455 _has_aborted = false; 456 457 reset_marking_for_restart(); 458 459 // Reset all tasks, since different phases will use different number of active 460 // threads. So, it's easiest to have all of them ready. 461 for (uint i = 0; i < _max_num_tasks; ++i) { 462 _tasks[i]->reset(_next_mark_bitmap); 463 } 464 465 uint max_regions = _g1h->max_regions(); 466 for (uint i = 0; i < max_regions; i++) { 467 _top_at_rebuild_starts[i] = NULL; 468 _region_mark_stats[i].clear(); 469 } 470 } 471 472 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) { 473 for (uint j = 0; j < _max_num_tasks; ++j) { 474 _tasks[j]->clear_mark_stats_cache(region_idx); 475 } 476 _top_at_rebuild_starts[region_idx] = NULL; 477 _region_mark_stats[region_idx].clear(); 478 } 479 480 void G1ConcurrentMark::clear_statistics(HeapRegion* r) { 481 uint const region_idx = r->hrm_index(); 482 if (r->is_humongous()) { 483 assert(r->is_starts_humongous(), "Got humongous continues region here"); 484 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size()); 485 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) { 486 clear_statistics_in_region(j); 487 } 488 } else { 489 clear_statistics_in_region(region_idx); 490 } 491 } 492 493 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) { 494 if (bitmap->is_marked(addr)) { 495 bitmap->clear(addr); 496 } 497 } 498 499 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) { 500 assert_at_safepoint_on_vm_thread(); 501 502 // Need to clear all mark bits of the humongous object. 503 clear_mark_if_set(_prev_mark_bitmap, r->bottom()); 504 clear_mark_if_set(_next_mark_bitmap, r->bottom()); 505 506 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 507 return; 508 } 509 510 // Clear any statistics about the region gathered so far. 511 clear_statistics(r); 512 } 513 514 void G1ConcurrentMark::reset_marking_for_restart() { 515 _global_mark_stack.set_empty(); 516 517 // Expand the marking stack, if we have to and if we can. 518 if (has_overflown()) { 519 _global_mark_stack.expand(); 520 521 uint max_regions = _g1h->max_regions(); 522 for (uint i = 0; i < max_regions; i++) { 523 _region_mark_stats[i].clear_during_overflow(); 524 } 525 } 526 527 clear_has_overflown(); 528 _finger = _heap.start(); 529 530 for (uint i = 0; i < _max_num_tasks; ++i) { 531 G1CMTaskQueue* queue = _task_queues->queue(i); 532 queue->set_empty(); 533 } 534 } 535 536 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 537 assert(active_tasks <= _max_num_tasks, "we should not have more"); 538 539 _num_active_tasks = active_tasks; 540 // Need to update the three data structures below according to the 541 // number of active threads for this phase. 542 _terminator.reset_for_reuse(active_tasks); 543 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 544 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 545 } 546 547 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 548 set_concurrency(active_tasks); 549 550 _concurrent = concurrent; 551 552 if (!concurrent) { 553 // At this point we should be in a STW phase, and completed marking. 554 assert_at_safepoint_on_vm_thread(); 555 assert(out_of_regions(), 556 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 557 p2i(_finger), p2i(_heap.end())); 558 } 559 } 560 561 void G1ConcurrentMark::reset_at_marking_complete() { 562 // We set the global marking state to some default values when we're 563 // not doing marking. 564 reset_marking_for_restart(); 565 _num_active_tasks = 0; 566 } 567 568 G1ConcurrentMark::~G1ConcurrentMark() { 569 FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts); 570 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats); 571 // The G1ConcurrentMark instance is never freed. 572 ShouldNotReachHere(); 573 } 574 575 class G1ClearBitMapTask : public AbstractGangTask { 576 public: 577 static size_t chunk_size() { return M; } 578 579 private: 580 // Heap region closure used for clearing the given mark bitmap. 581 class G1ClearBitmapHRClosure : public HeapRegionClosure { 582 private: 583 G1CMBitMap* _bitmap; 584 G1ConcurrentMark* _cm; 585 public: 586 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) { 587 } 588 589 virtual bool do_heap_region(HeapRegion* r) { 590 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 591 592 HeapWord* cur = r->bottom(); 593 HeapWord* const end = r->end(); 594 595 while (cur < end) { 596 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 597 _bitmap->clear_range(mr); 598 599 cur += chunk_size_in_words; 600 601 // Abort iteration if after yielding the marking has been aborted. 602 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 603 return true; 604 } 605 // Repeat the asserts from before the start of the closure. We will do them 606 // as asserts here to minimize their overhead on the product. However, we 607 // will have them as guarantees at the beginning / end of the bitmap 608 // clearing to get some checking in the product. 609 assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant"); 610 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 611 } 612 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 613 614 return false; 615 } 616 }; 617 618 G1ClearBitmapHRClosure _cl; 619 HeapRegionClaimer _hr_claimer; 620 bool _suspendible; // If the task is suspendible, workers must join the STS. 621 622 public: 623 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 624 AbstractGangTask("G1 Clear Bitmap"), 625 _cl(bitmap, suspendible ? cm : NULL), 626 _hr_claimer(n_workers), 627 _suspendible(suspendible) 628 { } 629 630 void work(uint worker_id) { 631 SuspendibleThreadSetJoiner sts_join(_suspendible); 632 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id); 633 } 634 635 bool is_complete() { 636 return _cl.is_complete(); 637 } 638 }; 639 640 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 641 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 642 643 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 644 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 645 646 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 647 648 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 649 650 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 651 workers->run_task(&cl, num_workers); 652 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 653 } 654 655 void G1ConcurrentMark::cleanup_for_next_mark() { 656 // Make sure that the concurrent mark thread looks to still be in 657 // the current cycle. 658 guarantee(cm_thread()->during_cycle(), "invariant"); 659 660 // We are finishing up the current cycle by clearing the next 661 // marking bitmap and getting it ready for the next cycle. During 662 // this time no other cycle can start. So, let's make sure that this 663 // is the case. 664 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 665 666 clear_bitmap(_next_mark_bitmap, _concurrent_workers, true); 667 668 // Repeat the asserts from above. 669 guarantee(cm_thread()->during_cycle(), "invariant"); 670 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 671 } 672 673 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 674 assert_at_safepoint_on_vm_thread(); 675 clear_bitmap(_prev_mark_bitmap, workers, false); 676 } 677 678 class NoteStartOfMarkHRClosure : public HeapRegionClosure { 679 public: 680 bool do_heap_region(HeapRegion* r) { 681 r->note_start_of_marking(); 682 return false; 683 } 684 }; 685 686 void G1ConcurrentMark::pre_concurrent_start() { 687 assert_at_safepoint_on_vm_thread(); 688 689 // Reset marking state. 690 reset(); 691 692 // For each region note start of marking. 693 NoteStartOfMarkHRClosure startcl; 694 _g1h->heap_region_iterate(&startcl); 695 696 _root_regions.reset(); 697 } 698 699 700 void G1ConcurrentMark::post_concurrent_start() { 701 // Start Concurrent Marking weak-reference discovery. 702 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 703 // enable ("weak") refs discovery 704 rp->enable_discovery(); 705 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 706 707 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 708 // This is the start of the marking cycle, we're expected all 709 // threads to have SATB queues with active set to false. 710 satb_mq_set.set_active_all_threads(true, /* new active value */ 711 false /* expected_active */); 712 713 _root_regions.prepare_for_scan(); 714 715 // update_g1_committed() will be called at the end of an evac pause 716 // when marking is on. So, it's also called at the end of the 717 // concurrent start pause to update the heap end, if the heap expands 718 // during it. No need to call it here. 719 } 720 721 /* 722 * Notice that in the next two methods, we actually leave the STS 723 * during the barrier sync and join it immediately afterwards. If we 724 * do not do this, the following deadlock can occur: one thread could 725 * be in the barrier sync code, waiting for the other thread to also 726 * sync up, whereas another one could be trying to yield, while also 727 * waiting for the other threads to sync up too. 728 * 729 * Note, however, that this code is also used during remark and in 730 * this case we should not attempt to leave / enter the STS, otherwise 731 * we'll either hit an assert (debug / fastdebug) or deadlock 732 * (product). So we should only leave / enter the STS if we are 733 * operating concurrently. 734 * 735 * Because the thread that does the sync barrier has left the STS, it 736 * is possible to be suspended for a Full GC or an evacuation pause 737 * could occur. This is actually safe, since the entering the sync 738 * barrier is one of the last things do_marking_step() does, and it 739 * doesn't manipulate any data structures afterwards. 740 */ 741 742 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 743 bool barrier_aborted; 744 { 745 SuspendibleThreadSetLeaver sts_leave(concurrent()); 746 barrier_aborted = !_first_overflow_barrier_sync.enter(); 747 } 748 749 // at this point everyone should have synced up and not be doing any 750 // more work 751 752 if (barrier_aborted) { 753 // If the barrier aborted we ignore the overflow condition and 754 // just abort the whole marking phase as quickly as possible. 755 return; 756 } 757 } 758 759 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 760 SuspendibleThreadSetLeaver sts_leave(concurrent()); 761 _second_overflow_barrier_sync.enter(); 762 763 // at this point everything should be re-initialized and ready to go 764 } 765 766 class G1CMConcurrentMarkingTask : public AbstractGangTask { 767 G1ConcurrentMark* _cm; 768 769 public: 770 void work(uint worker_id) { 771 assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread"); 772 ResourceMark rm; 773 774 double start_vtime = os::elapsedVTime(); 775 776 { 777 SuspendibleThreadSetJoiner sts_join; 778 779 assert(worker_id < _cm->active_tasks(), "invariant"); 780 781 G1CMTask* task = _cm->task(worker_id); 782 task->record_start_time(); 783 if (!_cm->has_aborted()) { 784 do { 785 task->do_marking_step(G1ConcMarkStepDurationMillis, 786 true /* do_termination */, 787 false /* is_serial*/); 788 789 _cm->do_yield_check(); 790 } while (!_cm->has_aborted() && task->has_aborted()); 791 } 792 task->record_end_time(); 793 guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant"); 794 } 795 796 double end_vtime = os::elapsedVTime(); 797 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 798 } 799 800 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) : 801 AbstractGangTask("Concurrent Mark"), _cm(cm) { } 802 803 ~G1CMConcurrentMarkingTask() { } 804 }; 805 806 uint G1ConcurrentMark::calc_active_marking_workers() { 807 uint result = 0; 808 if (!UseDynamicNumberOfGCThreads || !FLAG_IS_DEFAULT(ConcGCThreads)) { 809 result = _max_concurrent_workers; 810 } else { 811 result = 812 WorkerPolicy::calc_default_active_workers(_max_concurrent_workers, 813 1, /* Minimum workers */ 814 _num_concurrent_workers, 815 Threads::number_of_non_daemon_threads()); 816 // Don't scale the result down by scale_concurrent_workers() because 817 // that scaling has already gone into "_max_concurrent_workers". 818 } 819 assert(result > 0 && result <= _max_concurrent_workers, 820 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u", 821 _max_concurrent_workers, result); 822 return result; 823 } 824 825 void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) { 826 #ifdef ASSERT 827 HeapWord* last = region->last(); 828 HeapRegion* hr = _g1h->heap_region_containing(last); 829 assert(hr->is_old() || hr->next_top_at_mark_start() == hr->bottom(), 830 "Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str()); 831 assert(hr->next_top_at_mark_start() == region->start(), 832 "MemRegion start should be equal to nTAMS"); 833 #endif 834 835 G1RootRegionScanClosure cl(_g1h, this, worker_id); 836 837 const uintx interval = PrefetchScanIntervalInBytes; 838 HeapWord* curr = region->start(); 839 const HeapWord* end = region->end(); 840 while (curr < end) { 841 Prefetch::read(curr, interval); 842 oop obj = oop(curr); 843 int size = obj->oop_iterate_size(&cl); 844 assert(size == obj->size(), "sanity"); 845 curr += size; 846 } 847 } 848 849 class G1CMRootRegionScanTask : public AbstractGangTask { 850 G1ConcurrentMark* _cm; 851 public: 852 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 853 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 854 855 void work(uint worker_id) { 856 assert(Thread::current()->is_ConcurrentGC_thread(), 857 "this should only be done by a conc GC thread"); 858 859 G1CMRootMemRegions* root_regions = _cm->root_regions(); 860 const MemRegion* region = root_regions->claim_next(); 861 while (region != NULL) { 862 _cm->scan_root_region(region, worker_id); 863 region = root_regions->claim_next(); 864 } 865 } 866 }; 867 868 void G1ConcurrentMark::scan_root_regions() { 869 // scan_in_progress() will have been set to true only if there was 870 // at least one root region to scan. So, if it's false, we 871 // should not attempt to do any further work. 872 if (root_regions()->scan_in_progress()) { 873 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 874 875 _num_concurrent_workers = MIN2(calc_active_marking_workers(), 876 // We distribute work on a per-region basis, so starting 877 // more threads than that is useless. 878 root_regions()->num_root_regions()); 879 assert(_num_concurrent_workers <= _max_concurrent_workers, 880 "Maximum number of marking threads exceeded"); 881 882 G1CMRootRegionScanTask task(this); 883 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 884 task.name(), _num_concurrent_workers, root_regions()->num_root_regions()); 885 _concurrent_workers->run_task(&task, _num_concurrent_workers); 886 887 // It's possible that has_aborted() is true here without actually 888 // aborting the survivor scan earlier. This is OK as it's 889 // mainly used for sanity checking. 890 root_regions()->scan_finished(); 891 } 892 } 893 894 void G1ConcurrentMark::concurrent_cycle_start() { 895 _gc_timer_cm->register_gc_start(); 896 897 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 898 899 _g1h->trace_heap_before_gc(_gc_tracer_cm); 900 } 901 902 void G1ConcurrentMark::concurrent_cycle_end() { 903 _g1h->collector_state()->set_clearing_next_bitmap(false); 904 905 _g1h->trace_heap_after_gc(_gc_tracer_cm); 906 907 if (has_aborted()) { 908 log_info(gc, marking)("Concurrent Mark Abort"); 909 _gc_tracer_cm->report_concurrent_mode_failure(); 910 } 911 912 _gc_timer_cm->register_gc_end(); 913 914 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 915 } 916 917 void G1ConcurrentMark::mark_from_roots() { 918 _restart_for_overflow = false; 919 920 _num_concurrent_workers = calc_active_marking_workers(); 921 922 uint active_workers = MAX2(1U, _num_concurrent_workers); 923 924 // Setting active workers is not guaranteed since fewer 925 // worker threads may currently exist and more may not be 926 // available. 927 active_workers = _concurrent_workers->update_active_workers(active_workers); 928 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers()); 929 930 // Parallel task terminator is set in "set_concurrency_and_phase()" 931 set_concurrency_and_phase(active_workers, true /* concurrent */); 932 933 G1CMConcurrentMarkingTask marking_task(this); 934 _concurrent_workers->run_task(&marking_task); 935 print_stats(); 936 } 937 938 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) { 939 G1HeapVerifier* verifier = _g1h->verifier(); 940 941 verifier->verify_region_sets_optional(); 942 943 if (VerifyDuringGC) { 944 GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm); 945 946 size_t const BufLen = 512; 947 char buffer[BufLen]; 948 949 jio_snprintf(buffer, BufLen, "During GC (%s)", caller); 950 verifier->verify(type, vo, buffer); 951 } 952 953 verifier->check_bitmaps(caller); 954 } 955 956 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask { 957 G1CollectedHeap* _g1h; 958 G1ConcurrentMark* _cm; 959 HeapRegionClaimer _hrclaimer; 960 uint volatile _total_selected_for_rebuild; 961 962 G1PrintRegionLivenessInfoClosure _cl; 963 964 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure { 965 G1CollectedHeap* _g1h; 966 G1ConcurrentMark* _cm; 967 968 G1PrintRegionLivenessInfoClosure* _cl; 969 970 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild. 971 972 void update_remset_before_rebuild(HeapRegion* hr) { 973 G1RemSetTrackingPolicy* tracking_policy = _g1h->policy()->remset_tracker(); 974 975 bool selected_for_rebuild; 976 if (hr->is_humongous()) { 977 bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0; 978 selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live); 979 } else { 980 size_t const live_bytes = _cm->liveness(hr->hrm_index()); 981 selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes); 982 } 983 if (selected_for_rebuild) { 984 _num_regions_selected_for_rebuild++; 985 } 986 _cm->update_top_at_rebuild_start(hr); 987 } 988 989 // Distribute the given words across the humongous object starting with hr and 990 // note end of marking. 991 void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) { 992 uint const region_idx = hr->hrm_index(); 993 size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size(); 994 uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words); 995 996 // "Distributing" zero words means that we only note end of marking for these 997 // regions. 998 assert(marked_words == 0 || obj_size_in_words == marked_words, 999 "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT, 1000 obj_size_in_words, marked_words); 1001 1002 for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) { 1003 HeapRegion* const r = _g1h->region_at(i); 1004 size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words); 1005 1006 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)", 1007 words_to_add, i, r->get_type_str()); 1008 add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize); 1009 marked_words -= words_to_add; 1010 } 1011 assert(marked_words == 0, 1012 SIZE_FORMAT " words left after distributing space across %u regions", 1013 marked_words, num_regions_in_humongous); 1014 } 1015 1016 void update_marked_bytes(HeapRegion* hr) { 1017 uint const region_idx = hr->hrm_index(); 1018 size_t const marked_words = _cm->liveness(region_idx); 1019 // The marking attributes the object's size completely to the humongous starts 1020 // region. We need to distribute this value across the entire set of regions a 1021 // humongous object spans. 1022 if (hr->is_humongous()) { 1023 assert(hr->is_starts_humongous() || marked_words == 0, 1024 "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)", 1025 marked_words, region_idx, hr->get_type_str()); 1026 if (hr->is_starts_humongous()) { 1027 distribute_marked_bytes(hr, marked_words); 1028 } 1029 } else { 1030 log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str()); 1031 add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize); 1032 } 1033 } 1034 1035 void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) { 1036 hr->add_to_marked_bytes(marked_bytes); 1037 _cl->do_heap_region(hr); 1038 hr->note_end_of_marking(); 1039 } 1040 1041 public: 1042 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) : 1043 _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { } 1044 1045 virtual bool do_heap_region(HeapRegion* r) { 1046 update_remset_before_rebuild(r); 1047 update_marked_bytes(r); 1048 1049 return false; 1050 } 1051 1052 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; } 1053 }; 1054 1055 public: 1056 G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) : 1057 AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"), 1058 _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { } 1059 1060 virtual void work(uint worker_id) { 1061 G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl); 1062 _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id); 1063 Atomic::add(&_total_selected_for_rebuild, update_cl.num_selected_for_rebuild()); 1064 } 1065 1066 uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; } 1067 1068 // Number of regions for which roughly one thread should be spawned for this work. 1069 static const uint RegionsPerThread = 384; 1070 }; 1071 1072 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure { 1073 G1CollectedHeap* _g1h; 1074 public: 1075 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { } 1076 1077 virtual bool do_heap_region(HeapRegion* r) { 1078 _g1h->policy()->remset_tracker()->update_after_rebuild(r); 1079 return false; 1080 } 1081 }; 1082 1083 void G1ConcurrentMark::remark() { 1084 assert_at_safepoint_on_vm_thread(); 1085 1086 // If a full collection has happened, we should not continue. However we might 1087 // have ended up here as the Remark VM operation has been scheduled already. 1088 if (has_aborted()) { 1089 return; 1090 } 1091 1092 G1Policy* policy = _g1h->policy(); 1093 policy->record_concurrent_mark_remark_start(); 1094 1095 double start = os::elapsedTime(); 1096 1097 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before"); 1098 1099 { 1100 GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm); 1101 finalize_marking(); 1102 } 1103 1104 double mark_work_end = os::elapsedTime(); 1105 1106 bool const mark_finished = !has_overflown(); 1107 if (mark_finished) { 1108 weak_refs_work(false /* clear_all_soft_refs */); 1109 1110 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1111 // We're done with marking. 1112 // This is the end of the marking cycle, we're expected all 1113 // threads to have SATB queues with active set to true. 1114 satb_mq_set.set_active_all_threads(false, /* new active value */ 1115 true /* expected_active */); 1116 1117 { 1118 GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm); 1119 flush_all_task_caches(); 1120 } 1121 1122 // Install newly created mark bitmap as "prev". 1123 swap_mark_bitmaps(); 1124 { 1125 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm); 1126 1127 uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) / 1128 G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread; 1129 uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity); 1130 1131 G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers); 1132 log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions()); 1133 _g1h->workers()->run_task(&cl, num_workers); 1134 1135 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u", 1136 _g1h->num_regions(), cl.total_selected_for_rebuild()); 1137 } 1138 { 1139 GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm); 1140 reclaim_empty_regions(); 1141 } 1142 1143 // Clean out dead classes 1144 if (ClassUnloadingWithConcurrentMark) { 1145 GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm); 1146 ClassLoaderDataGraph::purge(); 1147 } 1148 1149 _g1h->resize_heap_if_necessary(); 1150 1151 compute_new_sizes(); 1152 1153 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after"); 1154 1155 assert(!restart_for_overflow(), "sanity"); 1156 // Completely reset the marking state since marking completed 1157 reset_at_marking_complete(); 1158 } else { 1159 // We overflowed. Restart concurrent marking. 1160 _restart_for_overflow = true; 1161 1162 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow"); 1163 1164 // Clear the marking state because we will be restarting 1165 // marking due to overflowing the global mark stack. 1166 reset_marking_for_restart(); 1167 } 1168 1169 { 1170 GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm); 1171 report_object_count(mark_finished); 1172 } 1173 1174 // Statistics 1175 double now = os::elapsedTime(); 1176 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1177 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1178 _remark_times.add((now - start) * 1000.0); 1179 1180 policy->record_concurrent_mark_remark_end(); 1181 } 1182 1183 class G1ReclaimEmptyRegionsTask : public AbstractGangTask { 1184 // Per-region work during the Cleanup pause. 1185 class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure { 1186 G1CollectedHeap* _g1h; 1187 size_t _freed_bytes; 1188 FreeRegionList* _local_cleanup_list; 1189 uint _old_regions_removed; 1190 uint _humongous_regions_removed; 1191 1192 public: 1193 G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h, 1194 FreeRegionList* local_cleanup_list) : 1195 _g1h(g1h), 1196 _freed_bytes(0), 1197 _local_cleanup_list(local_cleanup_list), 1198 _old_regions_removed(0), 1199 _humongous_regions_removed(0) { } 1200 1201 size_t freed_bytes() { return _freed_bytes; } 1202 const uint old_regions_removed() { return _old_regions_removed; } 1203 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1204 1205 bool do_heap_region(HeapRegion *hr) { 1206 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) { 1207 _freed_bytes += hr->used(); 1208 hr->set_containing_set(NULL); 1209 if (hr->is_humongous()) { 1210 _humongous_regions_removed++; 1211 _g1h->free_humongous_region(hr, _local_cleanup_list); 1212 } else { 1213 _old_regions_removed++; 1214 _g1h->free_region(hr, _local_cleanup_list); 1215 } 1216 hr->clear_cardtable(); 1217 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index()); 1218 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom())); 1219 } 1220 1221 return false; 1222 } 1223 }; 1224 1225 G1CollectedHeap* _g1h; 1226 FreeRegionList* _cleanup_list; 1227 HeapRegionClaimer _hrclaimer; 1228 1229 public: 1230 G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1231 AbstractGangTask("G1 Cleanup"), 1232 _g1h(g1h), 1233 _cleanup_list(cleanup_list), 1234 _hrclaimer(n_workers) { 1235 } 1236 1237 void work(uint worker_id) { 1238 FreeRegionList local_cleanup_list("Local Cleanup List"); 1239 G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list); 1240 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id); 1241 assert(cl.is_complete(), "Shouldn't have aborted!"); 1242 1243 // Now update the old/humongous region sets 1244 _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed()); 1245 { 1246 MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1247 _g1h->decrement_summary_bytes(cl.freed_bytes()); 1248 1249 _cleanup_list->add_ordered(&local_cleanup_list); 1250 assert(local_cleanup_list.is_empty(), "post-condition"); 1251 } 1252 } 1253 }; 1254 1255 void G1ConcurrentMark::reclaim_empty_regions() { 1256 WorkGang* workers = _g1h->workers(); 1257 FreeRegionList empty_regions_list("Empty Regions After Mark List"); 1258 1259 G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers()); 1260 workers->run_task(&cl); 1261 1262 if (!empty_regions_list.is_empty()) { 1263 log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length()); 1264 // Now print the empty regions list. 1265 G1HRPrinter* hrp = _g1h->hr_printer(); 1266 if (hrp->is_active()) { 1267 FreeRegionListIterator iter(&empty_regions_list); 1268 while (iter.more_available()) { 1269 HeapRegion* hr = iter.get_next(); 1270 hrp->cleanup(hr); 1271 } 1272 } 1273 // And actually make them available. 1274 _g1h->prepend_to_freelist(&empty_regions_list); 1275 } 1276 } 1277 1278 void G1ConcurrentMark::compute_new_sizes() { 1279 MetaspaceGC::compute_new_size(); 1280 1281 // Cleanup will have freed any regions completely full of garbage. 1282 // Update the soft reference policy with the new heap occupancy. 1283 Universe::update_heap_info_at_gc(); 1284 1285 // We reclaimed old regions so we should calculate the sizes to make 1286 // sure we update the old gen/space data. 1287 _g1h->g1mm()->update_sizes(); 1288 } 1289 1290 void G1ConcurrentMark::cleanup() { 1291 assert_at_safepoint_on_vm_thread(); 1292 1293 // If a full collection has happened, we shouldn't do this. 1294 if (has_aborted()) { 1295 return; 1296 } 1297 1298 G1Policy* policy = _g1h->policy(); 1299 policy->record_concurrent_mark_cleanup_start(); 1300 1301 double start = os::elapsedTime(); 1302 1303 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before"); 1304 1305 { 1306 GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm); 1307 G1UpdateRemSetTrackingAfterRebuild cl(_g1h); 1308 _g1h->heap_region_iterate(&cl); 1309 } 1310 1311 if (log_is_enabled(Trace, gc, liveness)) { 1312 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup"); 1313 _g1h->heap_region_iterate(&cl); 1314 } 1315 1316 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after"); 1317 1318 // We need to make this be a "collection" so any collection pause that 1319 // races with it goes around and waits for Cleanup to finish. 1320 _g1h->increment_total_collections(); 1321 1322 // Local statistics 1323 double recent_cleanup_time = (os::elapsedTime() - start); 1324 _total_cleanup_time += recent_cleanup_time; 1325 _cleanup_times.add(recent_cleanup_time); 1326 1327 { 1328 GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm); 1329 policy->record_concurrent_mark_cleanup_end(); 1330 } 1331 } 1332 1333 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1334 // Uses the G1CMTask associated with a worker thread (for serial reference 1335 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1336 // trace referent objects. 1337 // 1338 // Using the G1CMTask and embedded local queues avoids having the worker 1339 // threads operating on the global mark stack. This reduces the risk 1340 // of overflowing the stack - which we would rather avoid at this late 1341 // state. Also using the tasks' local queues removes the potential 1342 // of the workers interfering with each other that could occur if 1343 // operating on the global stack. 1344 1345 class G1CMKeepAliveAndDrainClosure : public OopClosure { 1346 G1ConcurrentMark* _cm; 1347 G1CMTask* _task; 1348 uint _ref_counter_limit; 1349 uint _ref_counter; 1350 bool _is_serial; 1351 public: 1352 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1353 _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval), 1354 _ref_counter(_ref_counter_limit), _is_serial(is_serial) { 1355 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1356 } 1357 1358 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1359 virtual void do_oop( oop* p) { do_oop_work(p); } 1360 1361 template <class T> void do_oop_work(T* p) { 1362 if (_cm->has_overflown()) { 1363 return; 1364 } 1365 if (!_task->deal_with_reference(p)) { 1366 // We did not add anything to the mark bitmap (or mark stack), so there is 1367 // no point trying to drain it. 1368 return; 1369 } 1370 _ref_counter--; 1371 1372 if (_ref_counter == 0) { 1373 // We have dealt with _ref_counter_limit references, pushing them 1374 // and objects reachable from them on to the local stack (and 1375 // possibly the global stack). Call G1CMTask::do_marking_step() to 1376 // process these entries. 1377 // 1378 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1379 // there's nothing more to do (i.e. we're done with the entries that 1380 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1381 // above) or we overflow. 1382 // 1383 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1384 // flag while there may still be some work to do. (See the comment at 1385 // the beginning of G1CMTask::do_marking_step() for those conditions - 1386 // one of which is reaching the specified time target.) It is only 1387 // when G1CMTask::do_marking_step() returns without setting the 1388 // has_aborted() flag that the marking step has completed. 1389 do { 1390 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1391 _task->do_marking_step(mark_step_duration_ms, 1392 false /* do_termination */, 1393 _is_serial); 1394 } while (_task->has_aborted() && !_cm->has_overflown()); 1395 _ref_counter = _ref_counter_limit; 1396 } 1397 } 1398 }; 1399 1400 // 'Drain' oop closure used by both serial and parallel reference processing. 1401 // Uses the G1CMTask associated with a given worker thread (for serial 1402 // reference processing the G1CMtask for worker 0 is used). Calls the 1403 // do_marking_step routine, with an unbelievably large timeout value, 1404 // to drain the marking data structures of the remaining entries 1405 // added by the 'keep alive' oop closure above. 1406 1407 class G1CMDrainMarkingStackClosure : public VoidClosure { 1408 G1ConcurrentMark* _cm; 1409 G1CMTask* _task; 1410 bool _is_serial; 1411 public: 1412 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1413 _cm(cm), _task(task), _is_serial(is_serial) { 1414 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1415 } 1416 1417 void do_void() { 1418 do { 1419 // We call G1CMTask::do_marking_step() to completely drain the local 1420 // and global marking stacks of entries pushed by the 'keep alive' 1421 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1422 // 1423 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1424 // if there's nothing more to do (i.e. we've completely drained the 1425 // entries that were pushed as a a result of applying the 'keep alive' 1426 // closure to the entries on the discovered ref lists) or we overflow 1427 // the global marking stack. 1428 // 1429 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1430 // flag while there may still be some work to do. (See the comment at 1431 // the beginning of G1CMTask::do_marking_step() for those conditions - 1432 // one of which is reaching the specified time target.) It is only 1433 // when G1CMTask::do_marking_step() returns without setting the 1434 // has_aborted() flag that the marking step has completed. 1435 1436 _task->do_marking_step(1000000000.0 /* something very large */, 1437 true /* do_termination */, 1438 _is_serial); 1439 } while (_task->has_aborted() && !_cm->has_overflown()); 1440 } 1441 }; 1442 1443 // Implementation of AbstractRefProcTaskExecutor for parallel 1444 // reference processing at the end of G1 concurrent marking 1445 1446 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 1447 private: 1448 G1CollectedHeap* _g1h; 1449 G1ConcurrentMark* _cm; 1450 WorkGang* _workers; 1451 uint _active_workers; 1452 1453 public: 1454 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1455 G1ConcurrentMark* cm, 1456 WorkGang* workers, 1457 uint n_workers) : 1458 _g1h(g1h), _cm(cm), 1459 _workers(workers), _active_workers(n_workers) { } 1460 1461 virtual void execute(ProcessTask& task, uint ergo_workers); 1462 }; 1463 1464 class G1CMRefProcTaskProxy : public AbstractGangTask { 1465 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1466 ProcessTask& _proc_task; 1467 G1CollectedHeap* _g1h; 1468 G1ConcurrentMark* _cm; 1469 1470 public: 1471 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1472 G1CollectedHeap* g1h, 1473 G1ConcurrentMark* cm) : 1474 AbstractGangTask("Process reference objects in parallel"), 1475 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1476 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1477 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1478 } 1479 1480 virtual void work(uint worker_id) { 1481 ResourceMark rm; 1482 HandleMark hm; 1483 G1CMTask* task = _cm->task(worker_id); 1484 G1CMIsAliveClosure g1_is_alive(_g1h); 1485 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1486 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1487 1488 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1489 } 1490 }; 1491 1492 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) { 1493 assert(_workers != NULL, "Need parallel worker threads."); 1494 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1495 assert(_workers->active_workers() >= ergo_workers, 1496 "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)", 1497 ergo_workers, _workers->active_workers()); 1498 1499 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1500 1501 // We need to reset the concurrency level before each 1502 // proxy task execution, so that the termination protocol 1503 // and overflow handling in G1CMTask::do_marking_step() knows 1504 // how many workers to wait for. 1505 _cm->set_concurrency(ergo_workers); 1506 _workers->run_task(&proc_task_proxy, ergo_workers); 1507 } 1508 1509 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { 1510 ResourceMark rm; 1511 HandleMark hm; 1512 1513 // Is alive closure. 1514 G1CMIsAliveClosure g1_is_alive(_g1h); 1515 1516 // Inner scope to exclude the cleaning of the string table 1517 // from the displayed time. 1518 { 1519 GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm); 1520 1521 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1522 1523 // See the comment in G1CollectedHeap::ref_processing_init() 1524 // about how reference processing currently works in G1. 1525 1526 // Set the soft reference policy 1527 rp->setup_policy(clear_all_soft_refs); 1528 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1529 1530 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1531 // in serial reference processing. Note these closures are also 1532 // used for serially processing (by the the current thread) the 1533 // JNI references during parallel reference processing. 1534 // 1535 // These closures do not need to synchronize with the worker 1536 // threads involved in parallel reference processing as these 1537 // instances are executed serially by the current thread (e.g. 1538 // reference processing is not multi-threaded and is thus 1539 // performed by the current thread instead of a gang worker). 1540 // 1541 // The gang tasks involved in parallel reference processing create 1542 // their own instances of these closures, which do their own 1543 // synchronization among themselves. 1544 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1545 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1546 1547 // We need at least one active thread. If reference processing 1548 // is not multi-threaded we use the current (VMThread) thread, 1549 // otherwise we use the work gang from the G1CollectedHeap and 1550 // we utilize all the worker threads we can. 1551 bool processing_is_mt = rp->processing_is_mt(); 1552 uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U); 1553 active_workers = clamp(active_workers, 1u, _max_num_tasks); 1554 1555 // Parallel processing task executor. 1556 G1CMRefProcTaskExecutor par_task_executor(_g1h, this, 1557 _g1h->workers(), active_workers); 1558 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1559 1560 // Set the concurrency level. The phase was already set prior to 1561 // executing the remark task. 1562 set_concurrency(active_workers); 1563 1564 // Set the degree of MT processing here. If the discovery was done MT, 1565 // the number of threads involved during discovery could differ from 1566 // the number of active workers. This is OK as long as the discovered 1567 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1568 rp->set_active_mt_degree(active_workers); 1569 1570 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues()); 1571 1572 // Process the weak references. 1573 const ReferenceProcessorStats& stats = 1574 rp->process_discovered_references(&g1_is_alive, 1575 &g1_keep_alive, 1576 &g1_drain_mark_stack, 1577 executor, 1578 &pt); 1579 _gc_tracer_cm->report_gc_reference_stats(stats); 1580 pt.print_all_references(); 1581 1582 // The do_oop work routines of the keep_alive and drain_marking_stack 1583 // oop closures will set the has_overflown flag if we overflow the 1584 // global marking stack. 1585 1586 assert(has_overflown() || _global_mark_stack.is_empty(), 1587 "Mark stack should be empty (unless it has overflown)"); 1588 1589 assert(rp->num_queues() == active_workers, "why not"); 1590 1591 rp->verify_no_references_recorded(); 1592 assert(!rp->discovery_enabled(), "Post condition"); 1593 } 1594 1595 if (has_overflown()) { 1596 // We can not trust g1_is_alive and the contents of the heap if the marking stack 1597 // overflowed while processing references. Exit the VM. 1598 fatal("Overflow during reference processing, can not continue. Please " 1599 "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and " 1600 "restart.", MarkStackSizeMax); 1601 return; 1602 } 1603 1604 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1605 1606 { 1607 GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm); 1608 WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1); 1609 } 1610 1611 // Unload Klasses, String, Code Cache, etc. 1612 if (ClassUnloadingWithConcurrentMark) { 1613 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); 1614 bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm); 1615 _g1h->complete_cleaning(&g1_is_alive, purged_classes); 1616 } else if (StringDedup::is_enabled()) { 1617 GCTraceTime(Debug, gc, phases) debug("String Deduplication", _gc_timer_cm); 1618 _g1h->string_dedup_cleaning(&g1_is_alive, NULL); 1619 } 1620 } 1621 1622 class G1PrecleanYieldClosure : public YieldClosure { 1623 G1ConcurrentMark* _cm; 1624 1625 public: 1626 G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { } 1627 1628 virtual bool should_return() { 1629 return _cm->has_aborted(); 1630 } 1631 1632 virtual bool should_return_fine_grain() { 1633 _cm->do_yield_check(); 1634 return _cm->has_aborted(); 1635 } 1636 }; 1637 1638 void G1ConcurrentMark::preclean() { 1639 assert(G1UseReferencePrecleaning, "Precleaning must be enabled."); 1640 1641 SuspendibleThreadSetJoiner joiner; 1642 1643 G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */); 1644 G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */); 1645 1646 set_concurrency_and_phase(1, true); 1647 1648 G1PrecleanYieldClosure yield_cl(this); 1649 1650 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1651 // Precleaning is single threaded. Temporarily disable MT discovery. 1652 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); 1653 rp->preclean_discovered_references(rp->is_alive_non_header(), 1654 &keep_alive, 1655 &drain_mark_stack, 1656 &yield_cl, 1657 _gc_timer_cm); 1658 } 1659 1660 // When sampling object counts, we already swapped the mark bitmaps, so we need to use 1661 // the prev bitmap determining liveness. 1662 class G1ObjectCountIsAliveClosure: public BoolObjectClosure { 1663 G1CollectedHeap* _g1h; 1664 public: 1665 G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { } 1666 1667 bool do_object_b(oop obj) { 1668 return obj != NULL && 1669 (!_g1h->is_in_g1_reserved(obj) || !_g1h->is_obj_dead(obj)); 1670 } 1671 }; 1672 1673 void G1ConcurrentMark::report_object_count(bool mark_completed) { 1674 // Depending on the completion of the marking liveness needs to be determined 1675 // using either the next or prev bitmap. 1676 if (mark_completed) { 1677 G1ObjectCountIsAliveClosure is_alive(_g1h); 1678 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1679 } else { 1680 G1CMIsAliveClosure is_alive(_g1h); 1681 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1682 } 1683 } 1684 1685 1686 void G1ConcurrentMark::swap_mark_bitmaps() { 1687 G1CMBitMap* temp = _prev_mark_bitmap; 1688 _prev_mark_bitmap = _next_mark_bitmap; 1689 _next_mark_bitmap = temp; 1690 _g1h->collector_state()->set_clearing_next_bitmap(true); 1691 } 1692 1693 // Closure for marking entries in SATB buffers. 1694 class G1CMSATBBufferClosure : public SATBBufferClosure { 1695 private: 1696 G1CMTask* _task; 1697 G1CollectedHeap* _g1h; 1698 1699 // This is very similar to G1CMTask::deal_with_reference, but with 1700 // more relaxed requirements for the argument, so this must be more 1701 // circumspect about treating the argument as an object. 1702 void do_entry(void* entry) const { 1703 _task->increment_refs_reached(); 1704 oop const obj = static_cast<oop>(entry); 1705 _task->make_reference_grey(obj); 1706 } 1707 1708 public: 1709 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1710 : _task(task), _g1h(g1h) { } 1711 1712 virtual void do_buffer(void** buffer, size_t size) { 1713 for (size_t i = 0; i < size; ++i) { 1714 do_entry(buffer[i]); 1715 } 1716 } 1717 }; 1718 1719 class G1RemarkThreadsClosure : public ThreadClosure { 1720 G1CMSATBBufferClosure _cm_satb_cl; 1721 G1CMOopClosure _cm_cl; 1722 MarkingCodeBlobClosure _code_cl; 1723 uintx _claim_token; 1724 1725 public: 1726 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1727 _cm_satb_cl(task, g1h), 1728 _cm_cl(g1h, task), 1729 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1730 _claim_token(Threads::thread_claim_token()) {} 1731 1732 void do_thread(Thread* thread) { 1733 if (thread->claim_threads_do(true, _claim_token)) { 1734 SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(thread); 1735 queue.apply_closure_and_empty(&_cm_satb_cl); 1736 if (thread->is_Java_thread()) { 1737 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1738 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1739 // * Alive if on the stack of an executing method 1740 // * Weakly reachable otherwise 1741 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1742 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1743 JavaThread* jt = (JavaThread*)thread; 1744 jt->nmethods_do(&_code_cl); 1745 } 1746 } 1747 } 1748 }; 1749 1750 class G1CMRemarkTask : public AbstractGangTask { 1751 G1ConcurrentMark* _cm; 1752 public: 1753 void work(uint worker_id) { 1754 G1CMTask* task = _cm->task(worker_id); 1755 task->record_start_time(); 1756 { 1757 ResourceMark rm; 1758 HandleMark hm; 1759 1760 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1761 Threads::threads_do(&threads_f); 1762 } 1763 1764 do { 1765 task->do_marking_step(1000000000.0 /* something very large */, 1766 true /* do_termination */, 1767 false /* is_serial */); 1768 } while (task->has_aborted() && !_cm->has_overflown()); 1769 // If we overflow, then we do not want to restart. We instead 1770 // want to abort remark and do concurrent marking again. 1771 task->record_end_time(); 1772 } 1773 1774 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1775 AbstractGangTask("Par Remark"), _cm(cm) { 1776 _cm->terminator()->reset_for_reuse(active_workers); 1777 } 1778 }; 1779 1780 void G1ConcurrentMark::finalize_marking() { 1781 ResourceMark rm; 1782 HandleMark hm; 1783 1784 _g1h->ensure_parsability(false); 1785 1786 // this is remark, so we'll use up all active threads 1787 uint active_workers = _g1h->workers()->active_workers(); 1788 set_concurrency_and_phase(active_workers, false /* concurrent */); 1789 // Leave _parallel_marking_threads at it's 1790 // value originally calculated in the G1ConcurrentMark 1791 // constructor and pass values of the active workers 1792 // through the gang in the task. 1793 1794 { 1795 StrongRootsScope srs(active_workers); 1796 1797 G1CMRemarkTask remarkTask(this, active_workers); 1798 // We will start all available threads, even if we decide that the 1799 // active_workers will be fewer. The extra ones will just bail out 1800 // immediately. 1801 _g1h->workers()->run_task(&remarkTask); 1802 } 1803 1804 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1805 guarantee(has_overflown() || 1806 satb_mq_set.completed_buffers_num() == 0, 1807 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1808 BOOL_TO_STR(has_overflown()), 1809 satb_mq_set.completed_buffers_num()); 1810 1811 print_stats(); 1812 } 1813 1814 void G1ConcurrentMark::flush_all_task_caches() { 1815 size_t hits = 0; 1816 size_t misses = 0; 1817 for (uint i = 0; i < _max_num_tasks; i++) { 1818 Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache(); 1819 hits += stats.first; 1820 misses += stats.second; 1821 } 1822 size_t sum = hits + misses; 1823 log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf", 1824 hits, misses, percent_of(hits, sum)); 1825 } 1826 1827 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) { 1828 _prev_mark_bitmap->clear_range(mr); 1829 } 1830 1831 HeapRegion* 1832 G1ConcurrentMark::claim_region(uint worker_id) { 1833 // "checkpoint" the finger 1834 HeapWord* finger = _finger; 1835 1836 while (finger < _heap.end()) { 1837 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1838 1839 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1840 // Make sure that the reads below do not float before loading curr_region. 1841 OrderAccess::loadload(); 1842 // Above heap_region_containing may return NULL as we always scan claim 1843 // until the end of the heap. In this case, just jump to the next region. 1844 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1845 1846 // Is the gap between reading the finger and doing the CAS too long? 1847 HeapWord* res = Atomic::cmpxchg(&_finger, finger, end); 1848 if (res == finger && curr_region != NULL) { 1849 // we succeeded 1850 HeapWord* bottom = curr_region->bottom(); 1851 HeapWord* limit = curr_region->next_top_at_mark_start(); 1852 1853 // notice that _finger == end cannot be guaranteed here since, 1854 // someone else might have moved the finger even further 1855 assert(_finger >= end, "the finger should have moved forward"); 1856 1857 if (limit > bottom) { 1858 return curr_region; 1859 } else { 1860 assert(limit == bottom, 1861 "the region limit should be at bottom"); 1862 // we return NULL and the caller should try calling 1863 // claim_region() again. 1864 return NULL; 1865 } 1866 } else { 1867 assert(_finger > finger, "the finger should have moved forward"); 1868 // read it again 1869 finger = _finger; 1870 } 1871 } 1872 1873 return NULL; 1874 } 1875 1876 #ifndef PRODUCT 1877 class VerifyNoCSetOops { 1878 G1CollectedHeap* _g1h; 1879 const char* _phase; 1880 int _info; 1881 1882 public: 1883 VerifyNoCSetOops(const char* phase, int info = -1) : 1884 _g1h(G1CollectedHeap::heap()), 1885 _phase(phase), 1886 _info(info) 1887 { } 1888 1889 void operator()(G1TaskQueueEntry task_entry) const { 1890 if (task_entry.is_array_slice()) { 1891 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); 1892 return; 1893 } 1894 guarantee(oopDesc::is_oop(task_entry.obj()), 1895 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1896 p2i(task_entry.obj()), _phase, _info); 1897 HeapRegion* r = _g1h->heap_region_containing(task_entry.obj()); 1898 guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()), 1899 "obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set", 1900 p2i(task_entry.obj()), _phase, _info, r->hrm_index()); 1901 } 1902 }; 1903 1904 void G1ConcurrentMark::verify_no_collection_set_oops() { 1905 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1906 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 1907 return; 1908 } 1909 1910 // Verify entries on the global mark stack 1911 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 1912 1913 // Verify entries on the task queues 1914 for (uint i = 0; i < _max_num_tasks; ++i) { 1915 G1CMTaskQueue* queue = _task_queues->queue(i); 1916 queue->iterate(VerifyNoCSetOops("Queue", i)); 1917 } 1918 1919 // Verify the global finger 1920 HeapWord* global_finger = finger(); 1921 if (global_finger != NULL && global_finger < _heap.end()) { 1922 // Since we always iterate over all regions, we might get a NULL HeapRegion 1923 // here. 1924 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1925 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1926 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1927 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1928 } 1929 1930 // Verify the task fingers 1931 assert(_num_concurrent_workers <= _max_num_tasks, "sanity"); 1932 for (uint i = 0; i < _num_concurrent_workers; ++i) { 1933 G1CMTask* task = _tasks[i]; 1934 HeapWord* task_finger = task->finger(); 1935 if (task_finger != NULL && task_finger < _heap.end()) { 1936 // See above note on the global finger verification. 1937 HeapRegion* r = _g1h->heap_region_containing(task_finger); 1938 guarantee(r == NULL || task_finger == r->bottom() || 1939 !r->in_collection_set() || !r->has_index_in_opt_cset(), 1940 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 1941 p2i(task_finger), HR_FORMAT_PARAMS(r)); 1942 } 1943 } 1944 } 1945 #endif // PRODUCT 1946 1947 void G1ConcurrentMark::rebuild_rem_set_concurrently() { 1948 _g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset); 1949 } 1950 1951 void G1ConcurrentMark::print_stats() { 1952 if (!log_is_enabled(Debug, gc, stats)) { 1953 return; 1954 } 1955 log_debug(gc, stats)("---------------------------------------------------------------------"); 1956 for (size_t i = 0; i < _num_active_tasks; ++i) { 1957 _tasks[i]->print_stats(); 1958 log_debug(gc, stats)("---------------------------------------------------------------------"); 1959 } 1960 } 1961 1962 void G1ConcurrentMark::concurrent_cycle_abort() { 1963 if (!cm_thread()->during_cycle() || _has_aborted) { 1964 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 1965 return; 1966 } 1967 1968 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 1969 // concurrent bitmap clearing. 1970 { 1971 GCTraceTime(Debug, gc) debug("Clear Next Bitmap"); 1972 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false); 1973 } 1974 // Note we cannot clear the previous marking bitmap here 1975 // since VerifyDuringGC verifies the objects marked during 1976 // a full GC against the previous bitmap. 1977 1978 // Empty mark stack 1979 reset_marking_for_restart(); 1980 for (uint i = 0; i < _max_num_tasks; ++i) { 1981 _tasks[i]->clear_region_fields(); 1982 } 1983 _first_overflow_barrier_sync.abort(); 1984 _second_overflow_barrier_sync.abort(); 1985 _has_aborted = true; 1986 1987 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 1988 satb_mq_set.abandon_partial_marking(); 1989 // This can be called either during or outside marking, we'll read 1990 // the expected_active value from the SATB queue set. 1991 satb_mq_set.set_active_all_threads( 1992 false, /* new active value */ 1993 satb_mq_set.is_active() /* expected_active */); 1994 } 1995 1996 static void print_ms_time_info(const char* prefix, const char* name, 1997 NumberSeq& ns) { 1998 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 1999 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2000 if (ns.num() > 0) { 2001 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2002 prefix, ns.sd(), ns.maximum()); 2003 } 2004 } 2005 2006 void G1ConcurrentMark::print_summary_info() { 2007 Log(gc, marking) log; 2008 if (!log.is_trace()) { 2009 return; 2010 } 2011 2012 log.trace(" Concurrent marking:"); 2013 print_ms_time_info(" ", "init marks", _init_times); 2014 print_ms_time_info(" ", "remarks", _remark_times); 2015 { 2016 print_ms_time_info(" ", "final marks", _remark_mark_times); 2017 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2018 2019 } 2020 print_ms_time_info(" ", "cleanups", _cleanup_times); 2021 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2022 _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2023 log.trace(" Total stop_world time = %8.2f s.", 2024 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2025 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2026 cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum()); 2027 } 2028 2029 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2030 _concurrent_workers->threads_do(tc); 2031 } 2032 2033 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2034 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2035 p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap)); 2036 _prev_mark_bitmap->print_on_error(st, " Prev Bits: "); 2037 _next_mark_bitmap->print_on_error(st, " Next Bits: "); 2038 } 2039 2040 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2041 ReferenceProcessor* result = g1h->ref_processor_cm(); 2042 assert(result != NULL, "CM reference processor should not be NULL"); 2043 return result; 2044 } 2045 2046 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2047 G1CMTask* task) 2048 : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)), 2049 _g1h(g1h), _task(task) 2050 { } 2051 2052 void G1CMTask::setup_for_region(HeapRegion* hr) { 2053 assert(hr != NULL, 2054 "claim_region() should have filtered out NULL regions"); 2055 _curr_region = hr; 2056 _finger = hr->bottom(); 2057 update_region_limit(); 2058 } 2059 2060 void G1CMTask::update_region_limit() { 2061 HeapRegion* hr = _curr_region; 2062 HeapWord* bottom = hr->bottom(); 2063 HeapWord* limit = hr->next_top_at_mark_start(); 2064 2065 if (limit == bottom) { 2066 // The region was collected underneath our feet. 2067 // We set the finger to bottom to ensure that the bitmap 2068 // iteration that will follow this will not do anything. 2069 // (this is not a condition that holds when we set the region up, 2070 // as the region is not supposed to be empty in the first place) 2071 _finger = bottom; 2072 } else if (limit >= _region_limit) { 2073 assert(limit >= _finger, "peace of mind"); 2074 } else { 2075 assert(limit < _region_limit, "only way to get here"); 2076 // This can happen under some pretty unusual circumstances. An 2077 // evacuation pause empties the region underneath our feet (NTAMS 2078 // at bottom). We then do some allocation in the region (NTAMS 2079 // stays at bottom), followed by the region being used as a GC 2080 // alloc region (NTAMS will move to top() and the objects 2081 // originally below it will be grayed). All objects now marked in 2082 // the region are explicitly grayed, if below the global finger, 2083 // and we do not need in fact to scan anything else. So, we simply 2084 // set _finger to be limit to ensure that the bitmap iteration 2085 // doesn't do anything. 2086 _finger = limit; 2087 } 2088 2089 _region_limit = limit; 2090 } 2091 2092 void G1CMTask::giveup_current_region() { 2093 assert(_curr_region != NULL, "invariant"); 2094 clear_region_fields(); 2095 } 2096 2097 void G1CMTask::clear_region_fields() { 2098 // Values for these three fields that indicate that we're not 2099 // holding on to a region. 2100 _curr_region = NULL; 2101 _finger = NULL; 2102 _region_limit = NULL; 2103 } 2104 2105 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2106 if (cm_oop_closure == NULL) { 2107 assert(_cm_oop_closure != NULL, "invariant"); 2108 } else { 2109 assert(_cm_oop_closure == NULL, "invariant"); 2110 } 2111 _cm_oop_closure = cm_oop_closure; 2112 } 2113 2114 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) { 2115 guarantee(next_mark_bitmap != NULL, "invariant"); 2116 _next_mark_bitmap = next_mark_bitmap; 2117 clear_region_fields(); 2118 2119 _calls = 0; 2120 _elapsed_time_ms = 0.0; 2121 _termination_time_ms = 0.0; 2122 _termination_start_time_ms = 0.0; 2123 2124 _mark_stats_cache.reset(); 2125 } 2126 2127 bool G1CMTask::should_exit_termination() { 2128 if (!regular_clock_call()) { 2129 return true; 2130 } 2131 2132 // This is called when we are in the termination protocol. We should 2133 // quit if, for some reason, this task wants to abort or the global 2134 // stack is not empty (this means that we can get work from it). 2135 return !_cm->mark_stack_empty() || has_aborted(); 2136 } 2137 2138 void G1CMTask::reached_limit() { 2139 assert(_words_scanned >= _words_scanned_limit || 2140 _refs_reached >= _refs_reached_limit , 2141 "shouldn't have been called otherwise"); 2142 abort_marking_if_regular_check_fail(); 2143 } 2144 2145 bool G1CMTask::regular_clock_call() { 2146 if (has_aborted()) { 2147 return false; 2148 } 2149 2150 // First, we need to recalculate the words scanned and refs reached 2151 // limits for the next clock call. 2152 recalculate_limits(); 2153 2154 // During the regular clock call we do the following 2155 2156 // (1) If an overflow has been flagged, then we abort. 2157 if (_cm->has_overflown()) { 2158 return false; 2159 } 2160 2161 // If we are not concurrent (i.e. we're doing remark) we don't need 2162 // to check anything else. The other steps are only needed during 2163 // the concurrent marking phase. 2164 if (!_cm->concurrent()) { 2165 return true; 2166 } 2167 2168 // (2) If marking has been aborted for Full GC, then we also abort. 2169 if (_cm->has_aborted()) { 2170 return false; 2171 } 2172 2173 double curr_time_ms = os::elapsedVTime() * 1000.0; 2174 2175 // (4) We check whether we should yield. If we have to, then we abort. 2176 if (SuspendibleThreadSet::should_yield()) { 2177 // We should yield. To do this we abort the task. The caller is 2178 // responsible for yielding. 2179 return false; 2180 } 2181 2182 // (5) We check whether we've reached our time quota. If we have, 2183 // then we abort. 2184 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2185 if (elapsed_time_ms > _time_target_ms) { 2186 _has_timed_out = true; 2187 return false; 2188 } 2189 2190 // (6) Finally, we check whether there are enough completed STAB 2191 // buffers available for processing. If there are, we abort. 2192 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2193 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2194 // we do need to process SATB buffers, we'll abort and restart 2195 // the marking task to do so 2196 return false; 2197 } 2198 return true; 2199 } 2200 2201 void G1CMTask::recalculate_limits() { 2202 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2203 _words_scanned_limit = _real_words_scanned_limit; 2204 2205 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2206 _refs_reached_limit = _real_refs_reached_limit; 2207 } 2208 2209 void G1CMTask::decrease_limits() { 2210 // This is called when we believe that we're going to do an infrequent 2211 // operation which will increase the per byte scanned cost (i.e. move 2212 // entries to/from the global stack). It basically tries to decrease the 2213 // scanning limit so that the clock is called earlier. 2214 2215 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4; 2216 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4; 2217 } 2218 2219 void G1CMTask::move_entries_to_global_stack() { 2220 // Local array where we'll store the entries that will be popped 2221 // from the local queue. 2222 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2223 2224 size_t n = 0; 2225 G1TaskQueueEntry task_entry; 2226 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) { 2227 buffer[n] = task_entry; 2228 ++n; 2229 } 2230 if (n < G1CMMarkStack::EntriesPerChunk) { 2231 buffer[n] = G1TaskQueueEntry(); 2232 } 2233 2234 if (n > 0) { 2235 if (!_cm->mark_stack_push(buffer)) { 2236 set_has_aborted(); 2237 } 2238 } 2239 2240 // This operation was quite expensive, so decrease the limits. 2241 decrease_limits(); 2242 } 2243 2244 bool G1CMTask::get_entries_from_global_stack() { 2245 // Local array where we'll store the entries that will be popped 2246 // from the global stack. 2247 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2248 2249 if (!_cm->mark_stack_pop(buffer)) { 2250 return false; 2251 } 2252 2253 // We did actually pop at least one entry. 2254 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) { 2255 G1TaskQueueEntry task_entry = buffer[i]; 2256 if (task_entry.is_null()) { 2257 break; 2258 } 2259 assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj())); 2260 bool success = _task_queue->push(task_entry); 2261 // We only call this when the local queue is empty or under a 2262 // given target limit. So, we do not expect this push to fail. 2263 assert(success, "invariant"); 2264 } 2265 2266 // This operation was quite expensive, so decrease the limits 2267 decrease_limits(); 2268 return true; 2269 } 2270 2271 void G1CMTask::drain_local_queue(bool partially) { 2272 if (has_aborted()) { 2273 return; 2274 } 2275 2276 // Decide what the target size is, depending whether we're going to 2277 // drain it partially (so that other tasks can steal if they run out 2278 // of things to do) or totally (at the very end). 2279 size_t target_size; 2280 if (partially) { 2281 target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize); 2282 } else { 2283 target_size = 0; 2284 } 2285 2286 if (_task_queue->size() > target_size) { 2287 G1TaskQueueEntry entry; 2288 bool ret = _task_queue->pop_local(entry); 2289 while (ret) { 2290 scan_task_entry(entry); 2291 if (_task_queue->size() <= target_size || has_aborted()) { 2292 ret = false; 2293 } else { 2294 ret = _task_queue->pop_local(entry); 2295 } 2296 } 2297 } 2298 } 2299 2300 void G1CMTask::drain_global_stack(bool partially) { 2301 if (has_aborted()) { 2302 return; 2303 } 2304 2305 // We have a policy to drain the local queue before we attempt to 2306 // drain the global stack. 2307 assert(partially || _task_queue->size() == 0, "invariant"); 2308 2309 // Decide what the target size is, depending whether we're going to 2310 // drain it partially (so that other tasks can steal if they run out 2311 // of things to do) or totally (at the very end). 2312 // Notice that when draining the global mark stack partially, due to the racyness 2313 // of the mark stack size update we might in fact drop below the target. But, 2314 // this is not a problem. 2315 // In case of total draining, we simply process until the global mark stack is 2316 // totally empty, disregarding the size counter. 2317 if (partially) { 2318 size_t const target_size = _cm->partial_mark_stack_size_target(); 2319 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2320 if (get_entries_from_global_stack()) { 2321 drain_local_queue(partially); 2322 } 2323 } 2324 } else { 2325 while (!has_aborted() && get_entries_from_global_stack()) { 2326 drain_local_queue(partially); 2327 } 2328 } 2329 } 2330 2331 // SATB Queue has several assumptions on whether to call the par or 2332 // non-par versions of the methods. this is why some of the code is 2333 // replicated. We should really get rid of the single-threaded version 2334 // of the code to simplify things. 2335 void G1CMTask::drain_satb_buffers() { 2336 if (has_aborted()) { 2337 return; 2338 } 2339 2340 // We set this so that the regular clock knows that we're in the 2341 // middle of draining buffers and doesn't set the abort flag when it 2342 // notices that SATB buffers are available for draining. It'd be 2343 // very counter productive if it did that. :-) 2344 _draining_satb_buffers = true; 2345 2346 G1CMSATBBufferClosure satb_cl(this, _g1h); 2347 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); 2348 2349 // This keeps claiming and applying the closure to completed buffers 2350 // until we run out of buffers or we need to abort. 2351 while (!has_aborted() && 2352 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2353 abort_marking_if_regular_check_fail(); 2354 } 2355 2356 // Can't assert qset is empty here, even if not aborted. If concurrent, 2357 // some other thread might be adding to the queue. If not concurrent, 2358 // some other thread might have won the race for the last buffer, but 2359 // has not yet decremented the count. 2360 2361 _draining_satb_buffers = false; 2362 2363 // again, this was a potentially expensive operation, decrease the 2364 // limits to get the regular clock call early 2365 decrease_limits(); 2366 } 2367 2368 void G1CMTask::clear_mark_stats_cache(uint region_idx) { 2369 _mark_stats_cache.reset(region_idx); 2370 } 2371 2372 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() { 2373 return _mark_stats_cache.evict_all(); 2374 } 2375 2376 void G1CMTask::print_stats() { 2377 log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls); 2378 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2379 _elapsed_time_ms, _termination_time_ms); 2380 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms", 2381 _step_times_ms.num(), 2382 _step_times_ms.avg(), 2383 _step_times_ms.sd(), 2384 _step_times_ms.maximum(), 2385 _step_times_ms.sum()); 2386 size_t const hits = _mark_stats_cache.hits(); 2387 size_t const misses = _mark_stats_cache.misses(); 2388 log_debug(gc, stats)(" Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f", 2389 hits, misses, percent_of(hits, hits + misses)); 2390 } 2391 2392 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) { 2393 return _task_queues->steal(worker_id, task_entry); 2394 } 2395 2396 /***************************************************************************** 2397 2398 The do_marking_step(time_target_ms, ...) method is the building 2399 block of the parallel marking framework. It can be called in parallel 2400 with other invocations of do_marking_step() on different tasks 2401 (but only one per task, obviously) and concurrently with the 2402 mutator threads, or during remark, hence it eliminates the need 2403 for two versions of the code. When called during remark, it will 2404 pick up from where the task left off during the concurrent marking 2405 phase. Interestingly, tasks are also claimable during evacuation 2406 pauses too, since do_marking_step() ensures that it aborts before 2407 it needs to yield. 2408 2409 The data structures that it uses to do marking work are the 2410 following: 2411 2412 (1) Marking Bitmap. If there are gray objects that appear only 2413 on the bitmap (this happens either when dealing with an overflow 2414 or when the concurrent start pause has simply marked the roots 2415 and didn't push them on the stack), then tasks claim heap 2416 regions whose bitmap they then scan to find gray objects. A 2417 global finger indicates where the end of the last claimed region 2418 is. A local finger indicates how far into the region a task has 2419 scanned. The two fingers are used to determine how to gray an 2420 object (i.e. whether simply marking it is OK, as it will be 2421 visited by a task in the future, or whether it needs to be also 2422 pushed on a stack). 2423 2424 (2) Local Queue. The local queue of the task which is accessed 2425 reasonably efficiently by the task. Other tasks can steal from 2426 it when they run out of work. Throughout the marking phase, a 2427 task attempts to keep its local queue short but not totally 2428 empty, so that entries are available for stealing by other 2429 tasks. Only when there is no more work, a task will totally 2430 drain its local queue. 2431 2432 (3) Global Mark Stack. This handles local queue overflow. During 2433 marking only sets of entries are moved between it and the local 2434 queues, as access to it requires a mutex and more fine-grain 2435 interaction with it which might cause contention. If it 2436 overflows, then the marking phase should restart and iterate 2437 over the bitmap to identify gray objects. Throughout the marking 2438 phase, tasks attempt to keep the global mark stack at a small 2439 length but not totally empty, so that entries are available for 2440 popping by other tasks. Only when there is no more work, tasks 2441 will totally drain the global mark stack. 2442 2443 (4) SATB Buffer Queue. This is where completed SATB buffers are 2444 made available. Buffers are regularly removed from this queue 2445 and scanned for roots, so that the queue doesn't get too 2446 long. During remark, all completed buffers are processed, as 2447 well as the filled in parts of any uncompleted buffers. 2448 2449 The do_marking_step() method tries to abort when the time target 2450 has been reached. There are a few other cases when the 2451 do_marking_step() method also aborts: 2452 2453 (1) When the marking phase has been aborted (after a Full GC). 2454 2455 (2) When a global overflow (on the global stack) has been 2456 triggered. Before the task aborts, it will actually sync up with 2457 the other tasks to ensure that all the marking data structures 2458 (local queues, stacks, fingers etc.) are re-initialized so that 2459 when do_marking_step() completes, the marking phase can 2460 immediately restart. 2461 2462 (3) When enough completed SATB buffers are available. The 2463 do_marking_step() method only tries to drain SATB buffers right 2464 at the beginning. So, if enough buffers are available, the 2465 marking step aborts and the SATB buffers are processed at 2466 the beginning of the next invocation. 2467 2468 (4) To yield. when we have to yield then we abort and yield 2469 right at the end of do_marking_step(). This saves us from a lot 2470 of hassle as, by yielding we might allow a Full GC. If this 2471 happens then objects will be compacted underneath our feet, the 2472 heap might shrink, etc. We save checking for this by just 2473 aborting and doing the yield right at the end. 2474 2475 From the above it follows that the do_marking_step() method should 2476 be called in a loop (or, otherwise, regularly) until it completes. 2477 2478 If a marking step completes without its has_aborted() flag being 2479 true, it means it has completed the current marking phase (and 2480 also all other marking tasks have done so and have all synced up). 2481 2482 A method called regular_clock_call() is invoked "regularly" (in 2483 sub ms intervals) throughout marking. It is this clock method that 2484 checks all the abort conditions which were mentioned above and 2485 decides when the task should abort. A work-based scheme is used to 2486 trigger this clock method: when the number of object words the 2487 marking phase has scanned or the number of references the marking 2488 phase has visited reach a given limit. Additional invocations to 2489 the method clock have been planted in a few other strategic places 2490 too. The initial reason for the clock method was to avoid calling 2491 vtime too regularly, as it is quite expensive. So, once it was in 2492 place, it was natural to piggy-back all the other conditions on it 2493 too and not constantly check them throughout the code. 2494 2495 If do_termination is true then do_marking_step will enter its 2496 termination protocol. 2497 2498 The value of is_serial must be true when do_marking_step is being 2499 called serially (i.e. by the VMThread) and do_marking_step should 2500 skip any synchronization in the termination and overflow code. 2501 Examples include the serial remark code and the serial reference 2502 processing closures. 2503 2504 The value of is_serial must be false when do_marking_step is 2505 being called by any of the worker threads in a work gang. 2506 Examples include the concurrent marking code (CMMarkingTask), 2507 the MT remark code, and the MT reference processing closures. 2508 2509 *****************************************************************************/ 2510 2511 void G1CMTask::do_marking_step(double time_target_ms, 2512 bool do_termination, 2513 bool is_serial) { 2514 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2515 2516 _start_time_ms = os::elapsedVTime() * 1000.0; 2517 2518 // If do_stealing is true then do_marking_step will attempt to 2519 // steal work from the other G1CMTasks. It only makes sense to 2520 // enable stealing when the termination protocol is enabled 2521 // and do_marking_step() is not being called serially. 2522 bool do_stealing = do_termination && !is_serial; 2523 2524 G1Predictions const& predictor = _g1h->policy()->predictor(); 2525 double diff_prediction_ms = predictor.predict_zero_bounded(&_marking_step_diff_ms); 2526 _time_target_ms = time_target_ms - diff_prediction_ms; 2527 2528 // set up the variables that are used in the work-based scheme to 2529 // call the regular clock method 2530 _words_scanned = 0; 2531 _refs_reached = 0; 2532 recalculate_limits(); 2533 2534 // clear all flags 2535 clear_has_aborted(); 2536 _has_timed_out = false; 2537 _draining_satb_buffers = false; 2538 2539 ++_calls; 2540 2541 // Set up the bitmap and oop closures. Anything that uses them is 2542 // eventually called from this method, so it is OK to allocate these 2543 // statically. 2544 G1CMBitMapClosure bitmap_closure(this, _cm); 2545 G1CMOopClosure cm_oop_closure(_g1h, this); 2546 set_cm_oop_closure(&cm_oop_closure); 2547 2548 if (_cm->has_overflown()) { 2549 // This can happen if the mark stack overflows during a GC pause 2550 // and this task, after a yield point, restarts. We have to abort 2551 // as we need to get into the overflow protocol which happens 2552 // right at the end of this task. 2553 set_has_aborted(); 2554 } 2555 2556 // First drain any available SATB buffers. After this, we will not 2557 // look at SATB buffers before the next invocation of this method. 2558 // If enough completed SATB buffers are queued up, the regular clock 2559 // will abort this task so that it restarts. 2560 drain_satb_buffers(); 2561 // ...then partially drain the local queue and the global stack 2562 drain_local_queue(true); 2563 drain_global_stack(true); 2564 2565 do { 2566 if (!has_aborted() && _curr_region != NULL) { 2567 // This means that we're already holding on to a region. 2568 assert(_finger != NULL, "if region is not NULL, then the finger " 2569 "should not be NULL either"); 2570 2571 // We might have restarted this task after an evacuation pause 2572 // which might have evacuated the region we're holding on to 2573 // underneath our feet. Let's read its limit again to make sure 2574 // that we do not iterate over a region of the heap that 2575 // contains garbage (update_region_limit() will also move 2576 // _finger to the start of the region if it is found empty). 2577 update_region_limit(); 2578 // We will start from _finger not from the start of the region, 2579 // as we might be restarting this task after aborting half-way 2580 // through scanning this region. In this case, _finger points to 2581 // the address where we last found a marked object. If this is a 2582 // fresh region, _finger points to start(). 2583 MemRegion mr = MemRegion(_finger, _region_limit); 2584 2585 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2586 "humongous regions should go around loop once only"); 2587 2588 // Some special cases: 2589 // If the memory region is empty, we can just give up the region. 2590 // If the current region is humongous then we only need to check 2591 // the bitmap for the bit associated with the start of the object, 2592 // scan the object if it's live, and give up the region. 2593 // Otherwise, let's iterate over the bitmap of the part of the region 2594 // that is left. 2595 // If the iteration is successful, give up the region. 2596 if (mr.is_empty()) { 2597 giveup_current_region(); 2598 abort_marking_if_regular_check_fail(); 2599 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2600 if (_next_mark_bitmap->is_marked(mr.start())) { 2601 // The object is marked - apply the closure 2602 bitmap_closure.do_addr(mr.start()); 2603 } 2604 // Even if this task aborted while scanning the humongous object 2605 // we can (and should) give up the current region. 2606 giveup_current_region(); 2607 abort_marking_if_regular_check_fail(); 2608 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) { 2609 giveup_current_region(); 2610 abort_marking_if_regular_check_fail(); 2611 } else { 2612 assert(has_aborted(), "currently the only way to do so"); 2613 // The only way to abort the bitmap iteration is to return 2614 // false from the do_bit() method. However, inside the 2615 // do_bit() method we move the _finger to point to the 2616 // object currently being looked at. So, if we bail out, we 2617 // have definitely set _finger to something non-null. 2618 assert(_finger != NULL, "invariant"); 2619 2620 // Region iteration was actually aborted. So now _finger 2621 // points to the address of the object we last scanned. If we 2622 // leave it there, when we restart this task, we will rescan 2623 // the object. It is easy to avoid this. We move the finger by 2624 // enough to point to the next possible object header. 2625 assert(_finger < _region_limit, "invariant"); 2626 HeapWord* const new_finger = _finger + ((oop)_finger)->size(); 2627 // Check if bitmap iteration was aborted while scanning the last object 2628 if (new_finger >= _region_limit) { 2629 giveup_current_region(); 2630 } else { 2631 move_finger_to(new_finger); 2632 } 2633 } 2634 } 2635 // At this point we have either completed iterating over the 2636 // region we were holding on to, or we have aborted. 2637 2638 // We then partially drain the local queue and the global stack. 2639 // (Do we really need this?) 2640 drain_local_queue(true); 2641 drain_global_stack(true); 2642 2643 // Read the note on the claim_region() method on why it might 2644 // return NULL with potentially more regions available for 2645 // claiming and why we have to check out_of_regions() to determine 2646 // whether we're done or not. 2647 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2648 // We are going to try to claim a new region. We should have 2649 // given up on the previous one. 2650 // Separated the asserts so that we know which one fires. 2651 assert(_curr_region == NULL, "invariant"); 2652 assert(_finger == NULL, "invariant"); 2653 assert(_region_limit == NULL, "invariant"); 2654 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2655 if (claimed_region != NULL) { 2656 // Yes, we managed to claim one 2657 setup_for_region(claimed_region); 2658 assert(_curr_region == claimed_region, "invariant"); 2659 } 2660 // It is important to call the regular clock here. It might take 2661 // a while to claim a region if, for example, we hit a large 2662 // block of empty regions. So we need to call the regular clock 2663 // method once round the loop to make sure it's called 2664 // frequently enough. 2665 abort_marking_if_regular_check_fail(); 2666 } 2667 2668 if (!has_aborted() && _curr_region == NULL) { 2669 assert(_cm->out_of_regions(), 2670 "at this point we should be out of regions"); 2671 } 2672 } while ( _curr_region != NULL && !has_aborted()); 2673 2674 if (!has_aborted()) { 2675 // We cannot check whether the global stack is empty, since other 2676 // tasks might be pushing objects to it concurrently. 2677 assert(_cm->out_of_regions(), 2678 "at this point we should be out of regions"); 2679 // Try to reduce the number of available SATB buffers so that 2680 // remark has less work to do. 2681 drain_satb_buffers(); 2682 } 2683 2684 // Since we've done everything else, we can now totally drain the 2685 // local queue and global stack. 2686 drain_local_queue(false); 2687 drain_global_stack(false); 2688 2689 // Attempt at work stealing from other task's queues. 2690 if (do_stealing && !has_aborted()) { 2691 // We have not aborted. This means that we have finished all that 2692 // we could. Let's try to do some stealing... 2693 2694 // We cannot check whether the global stack is empty, since other 2695 // tasks might be pushing objects to it concurrently. 2696 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2697 "only way to reach here"); 2698 while (!has_aborted()) { 2699 G1TaskQueueEntry entry; 2700 if (_cm->try_stealing(_worker_id, entry)) { 2701 scan_task_entry(entry); 2702 2703 // And since we're towards the end, let's totally drain the 2704 // local queue and global stack. 2705 drain_local_queue(false); 2706 drain_global_stack(false); 2707 } else { 2708 break; 2709 } 2710 } 2711 } 2712 2713 // We still haven't aborted. Now, let's try to get into the 2714 // termination protocol. 2715 if (do_termination && !has_aborted()) { 2716 // We cannot check whether the global stack is empty, since other 2717 // tasks might be concurrently pushing objects on it. 2718 // Separated the asserts so that we know which one fires. 2719 assert(_cm->out_of_regions(), "only way to reach here"); 2720 assert(_task_queue->size() == 0, "only way to reach here"); 2721 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2722 2723 // The G1CMTask class also extends the TerminatorTerminator class, 2724 // hence its should_exit_termination() method will also decide 2725 // whether to exit the termination protocol or not. 2726 bool finished = (is_serial || 2727 _cm->terminator()->offer_termination(this)); 2728 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2729 _termination_time_ms += 2730 termination_end_time_ms - _termination_start_time_ms; 2731 2732 if (finished) { 2733 // We're all done. 2734 2735 // We can now guarantee that the global stack is empty, since 2736 // all other tasks have finished. We separated the guarantees so 2737 // that, if a condition is false, we can immediately find out 2738 // which one. 2739 guarantee(_cm->out_of_regions(), "only way to reach here"); 2740 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2741 guarantee(_task_queue->size() == 0, "only way to reach here"); 2742 guarantee(!_cm->has_overflown(), "only way to reach here"); 2743 guarantee(!has_aborted(), "should never happen if termination has completed"); 2744 } else { 2745 // Apparently there's more work to do. Let's abort this task. It 2746 // will restart it and we can hopefully find more things to do. 2747 set_has_aborted(); 2748 } 2749 } 2750 2751 // Mainly for debugging purposes to make sure that a pointer to the 2752 // closure which was statically allocated in this frame doesn't 2753 // escape it by accident. 2754 set_cm_oop_closure(NULL); 2755 double end_time_ms = os::elapsedVTime() * 1000.0; 2756 double elapsed_time_ms = end_time_ms - _start_time_ms; 2757 // Update the step history. 2758 _step_times_ms.add(elapsed_time_ms); 2759 2760 if (has_aborted()) { 2761 // The task was aborted for some reason. 2762 if (_has_timed_out) { 2763 double diff_ms = elapsed_time_ms - _time_target_ms; 2764 // Keep statistics of how well we did with respect to hitting 2765 // our target only if we actually timed out (if we aborted for 2766 // other reasons, then the results might get skewed). 2767 _marking_step_diff_ms.add(diff_ms); 2768 } 2769 2770 if (_cm->has_overflown()) { 2771 // This is the interesting one. We aborted because a global 2772 // overflow was raised. This means we have to restart the 2773 // marking phase and start iterating over regions. However, in 2774 // order to do this we have to make sure that all tasks stop 2775 // what they are doing and re-initialize in a safe manner. We 2776 // will achieve this with the use of two barrier sync points. 2777 2778 if (!is_serial) { 2779 // We only need to enter the sync barrier if being called 2780 // from a parallel context 2781 _cm->enter_first_sync_barrier(_worker_id); 2782 2783 // When we exit this sync barrier we know that all tasks have 2784 // stopped doing marking work. So, it's now safe to 2785 // re-initialize our data structures. 2786 } 2787 2788 clear_region_fields(); 2789 flush_mark_stats_cache(); 2790 2791 if (!is_serial) { 2792 // If we're executing the concurrent phase of marking, reset the marking 2793 // state; otherwise the marking state is reset after reference processing, 2794 // during the remark pause. 2795 // If we reset here as a result of an overflow during the remark we will 2796 // see assertion failures from any subsequent set_concurrency_and_phase() 2797 // calls. 2798 if (_cm->concurrent() && _worker_id == 0) { 2799 // Worker 0 is responsible for clearing the global data structures because 2800 // of an overflow. During STW we should not clear the overflow flag (in 2801 // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit 2802 // method to abort the pause and restart concurrent marking. 2803 _cm->reset_marking_for_restart(); 2804 2805 log_info(gc, marking)("Concurrent Mark reset for overflow"); 2806 } 2807 2808 // ...and enter the second barrier. 2809 _cm->enter_second_sync_barrier(_worker_id); 2810 } 2811 // At this point, if we're during the concurrent phase of 2812 // marking, everything has been re-initialized and we're 2813 // ready to restart. 2814 } 2815 } 2816 } 2817 2818 G1CMTask::G1CMTask(uint worker_id, 2819 G1ConcurrentMark* cm, 2820 G1CMTaskQueue* task_queue, 2821 G1RegionMarkStats* mark_stats, 2822 uint max_regions) : 2823 _objArray_processor(this), 2824 _worker_id(worker_id), 2825 _g1h(G1CollectedHeap::heap()), 2826 _cm(cm), 2827 _next_mark_bitmap(NULL), 2828 _task_queue(task_queue), 2829 _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize), 2830 _calls(0), 2831 _time_target_ms(0.0), 2832 _start_time_ms(0.0), 2833 _cm_oop_closure(NULL), 2834 _curr_region(NULL), 2835 _finger(NULL), 2836 _region_limit(NULL), 2837 _words_scanned(0), 2838 _words_scanned_limit(0), 2839 _real_words_scanned_limit(0), 2840 _refs_reached(0), 2841 _refs_reached_limit(0), 2842 _real_refs_reached_limit(0), 2843 _has_aborted(false), 2844 _has_timed_out(false), 2845 _draining_satb_buffers(false), 2846 _step_times_ms(), 2847 _elapsed_time_ms(0.0), 2848 _termination_time_ms(0.0), 2849 _termination_start_time_ms(0.0), 2850 _marking_step_diff_ms() 2851 { 2852 guarantee(task_queue != NULL, "invariant"); 2853 2854 _marking_step_diff_ms.add(0.5); 2855 } 2856 2857 // These are formatting macros that are used below to ensure 2858 // consistent formatting. The *_H_* versions are used to format the 2859 // header for a particular value and they should be kept consistent 2860 // with the corresponding macro. Also note that most of the macros add 2861 // the necessary white space (as a prefix) which makes them a bit 2862 // easier to compose. 2863 2864 // All the output lines are prefixed with this string to be able to 2865 // identify them easily in a large log file. 2866 #define G1PPRL_LINE_PREFIX "###" 2867 2868 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2869 #ifdef _LP64 2870 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2871 #else // _LP64 2872 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2873 #endif // _LP64 2874 2875 // For per-region info 2876 #define G1PPRL_TYPE_FORMAT " %-4s" 2877 #define G1PPRL_TYPE_H_FORMAT " %4s" 2878 #define G1PPRL_STATE_FORMAT " %-5s" 2879 #define G1PPRL_STATE_H_FORMAT " %5s" 2880 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2881 #define G1PPRL_BYTE_H_FORMAT " %9s" 2882 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2883 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2884 2885 // For summary info 2886 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2887 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2888 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2889 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2890 2891 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) : 2892 _total_used_bytes(0), _total_capacity_bytes(0), 2893 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2894 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) 2895 { 2896 if (!log_is_enabled(Trace, gc, liveness)) { 2897 return; 2898 } 2899 2900 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2901 MemRegion g1_reserved = g1h->g1_reserved(); 2902 double now = os::elapsedTime(); 2903 2904 // Print the header of the output. 2905 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2906 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2907 G1PPRL_SUM_ADDR_FORMAT("reserved") 2908 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2909 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 2910 HeapRegion::GrainBytes); 2911 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2912 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2913 G1PPRL_TYPE_H_FORMAT 2914 G1PPRL_ADDR_BASE_H_FORMAT 2915 G1PPRL_BYTE_H_FORMAT 2916 G1PPRL_BYTE_H_FORMAT 2917 G1PPRL_BYTE_H_FORMAT 2918 G1PPRL_DOUBLE_H_FORMAT 2919 G1PPRL_BYTE_H_FORMAT 2920 G1PPRL_STATE_H_FORMAT 2921 G1PPRL_BYTE_H_FORMAT, 2922 "type", "address-range", 2923 "used", "prev-live", "next-live", "gc-eff", 2924 "remset", "state", "code-roots"); 2925 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2926 G1PPRL_TYPE_H_FORMAT 2927 G1PPRL_ADDR_BASE_H_FORMAT 2928 G1PPRL_BYTE_H_FORMAT 2929 G1PPRL_BYTE_H_FORMAT 2930 G1PPRL_BYTE_H_FORMAT 2931 G1PPRL_DOUBLE_H_FORMAT 2932 G1PPRL_BYTE_H_FORMAT 2933 G1PPRL_STATE_H_FORMAT 2934 G1PPRL_BYTE_H_FORMAT, 2935 "", "", 2936 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 2937 "(bytes)", "", "(bytes)"); 2938 } 2939 2940 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) { 2941 if (!log_is_enabled(Trace, gc, liveness)) { 2942 return false; 2943 } 2944 2945 const char* type = r->get_type_str(); 2946 HeapWord* bottom = r->bottom(); 2947 HeapWord* end = r->end(); 2948 size_t capacity_bytes = r->capacity(); 2949 size_t used_bytes = r->used(); 2950 size_t prev_live_bytes = r->live_bytes(); 2951 size_t next_live_bytes = r->next_live_bytes(); 2952 double gc_eff = r->gc_efficiency(); 2953 size_t remset_bytes = r->rem_set()->mem_size(); 2954 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 2955 const char* remset_type = r->rem_set()->get_short_state_str(); 2956 2957 _total_used_bytes += used_bytes; 2958 _total_capacity_bytes += capacity_bytes; 2959 _total_prev_live_bytes += prev_live_bytes; 2960 _total_next_live_bytes += next_live_bytes; 2961 _total_remset_bytes += remset_bytes; 2962 _total_strong_code_roots_bytes += strong_code_roots_bytes; 2963 2964 // Print a line for this particular region. 2965 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2966 G1PPRL_TYPE_FORMAT 2967 G1PPRL_ADDR_BASE_FORMAT 2968 G1PPRL_BYTE_FORMAT 2969 G1PPRL_BYTE_FORMAT 2970 G1PPRL_BYTE_FORMAT 2971 G1PPRL_DOUBLE_FORMAT 2972 G1PPRL_BYTE_FORMAT 2973 G1PPRL_STATE_FORMAT 2974 G1PPRL_BYTE_FORMAT, 2975 type, p2i(bottom), p2i(end), 2976 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 2977 remset_bytes, remset_type, strong_code_roots_bytes); 2978 2979 return false; 2980 } 2981 2982 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 2983 if (!log_is_enabled(Trace, gc, liveness)) { 2984 return; 2985 } 2986 2987 // add static memory usages to remembered set sizes 2988 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 2989 // Print the footer of the output. 2990 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2991 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2992 " SUMMARY" 2993 G1PPRL_SUM_MB_FORMAT("capacity") 2994 G1PPRL_SUM_MB_PERC_FORMAT("used") 2995 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 2996 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 2997 G1PPRL_SUM_MB_FORMAT("remset") 2998 G1PPRL_SUM_MB_FORMAT("code-roots"), 2999 bytes_to_mb(_total_capacity_bytes), 3000 bytes_to_mb(_total_used_bytes), 3001 percent_of(_total_used_bytes, _total_capacity_bytes), 3002 bytes_to_mb(_total_prev_live_bytes), 3003 percent_of(_total_prev_live_bytes, _total_capacity_bytes), 3004 bytes_to_mb(_total_next_live_bytes), 3005 percent_of(_total_next_live_bytes, _total_capacity_bytes), 3006 bytes_to_mb(_total_remset_bytes), 3007 bytes_to_mb(_total_strong_code_roots_bytes)); 3008 }