1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMarkThread.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorState.hpp" 32 #include "gc/g1/g1ConcurrentMark.inline.hpp" 33 #include "gc/g1/g1OopClosures.inline.hpp" 34 #include "gc/g1/g1Policy.hpp" 35 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" 36 #include "gc/g1/g1StringDedup.hpp" 37 #include "gc/g1/heapRegion.inline.hpp" 38 #include "gc/g1/heapRegionRemSet.hpp" 39 #include "gc/g1/heapRegionSet.inline.hpp" 40 #include "gc/shared/adaptiveSizePolicy.hpp" 41 #include "gc/shared/gcId.hpp" 42 #include "gc/shared/gcTimer.hpp" 43 #include "gc/shared/gcTrace.hpp" 44 #include "gc/shared/gcTraceTime.inline.hpp" 45 #include "gc/shared/genOopClosures.inline.hpp" 46 #include "gc/shared/referencePolicy.hpp" 47 #include "gc/shared/strongRootsScope.hpp" 48 #include "gc/shared/suspendibleThreadSet.hpp" 49 #include "gc/shared/taskqueue.inline.hpp" 50 #include "gc/shared/vmGCOperations.hpp" 51 #include "gc/shared/weakProcessor.hpp" 52 #include "include/jvm.h" 53 #include "logging/log.hpp" 54 #include "memory/allocation.hpp" 55 #include "memory/resourceArea.hpp" 56 #include "oops/access.inline.hpp" 57 #include "oops/oop.inline.hpp" 58 #include "runtime/atomic.hpp" 59 #include "runtime/handles.inline.hpp" 60 #include "runtime/java.hpp" 61 #include "runtime/prefetch.inline.hpp" 62 #include "services/memTracker.hpp" 63 #include "utilities/align.hpp" 64 #include "utilities/growableArray.hpp" 65 66 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) { 67 assert(addr < _cm->finger(), "invariant"); 68 assert(addr >= _task->finger(), "invariant"); 69 70 // We move that task's local finger along. 71 _task->move_finger_to(addr); 72 73 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); 74 // we only partially drain the local queue and global stack 75 _task->drain_local_queue(true); 76 _task->drain_global_stack(true); 77 78 // if the has_aborted flag has been raised, we need to bail out of 79 // the iteration 80 return !_task->has_aborted(); 81 } 82 83 G1CMMarkStack::G1CMMarkStack() : 84 _max_chunk_capacity(0), 85 _base(NULL), 86 _chunk_capacity(0) { 87 set_empty(); 88 } 89 90 bool G1CMMarkStack::resize(size_t new_capacity) { 91 assert(is_empty(), "Only resize when stack is empty."); 92 assert(new_capacity <= _max_chunk_capacity, 93 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); 94 95 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC); 96 97 if (new_base == NULL) { 98 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); 99 return false; 100 } 101 // Release old mapping. 102 if (_base != NULL) { 103 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 104 } 105 106 _base = new_base; 107 _chunk_capacity = new_capacity; 108 set_empty(); 109 110 return true; 111 } 112 113 size_t G1CMMarkStack::capacity_alignment() { 114 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); 115 } 116 117 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { 118 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); 119 120 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); 121 122 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 123 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 124 125 guarantee(initial_chunk_capacity <= _max_chunk_capacity, 126 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, 127 _max_chunk_capacity, 128 initial_chunk_capacity); 129 130 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, 131 initial_chunk_capacity, _max_chunk_capacity); 132 133 return resize(initial_chunk_capacity); 134 } 135 136 void G1CMMarkStack::expand() { 137 if (_chunk_capacity == _max_chunk_capacity) { 138 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); 139 return; 140 } 141 size_t old_capacity = _chunk_capacity; 142 // Double capacity if possible 143 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); 144 145 if (resize(new_capacity)) { 146 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 147 old_capacity, new_capacity); 148 } else { 149 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 150 old_capacity, new_capacity); 151 } 152 } 153 154 G1CMMarkStack::~G1CMMarkStack() { 155 if (_base != NULL) { 156 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 157 } 158 } 159 160 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { 161 elem->next = *list; 162 *list = elem; 163 } 164 165 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { 166 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 167 add_chunk_to_list(&_chunk_list, elem); 168 _chunks_in_chunk_list++; 169 } 170 171 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { 172 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 173 add_chunk_to_list(&_free_list, elem); 174 } 175 176 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { 177 TaskQueueEntryChunk* result = *list; 178 if (result != NULL) { 179 *list = (*list)->next; 180 } 181 return result; 182 } 183 184 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { 185 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 186 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); 187 if (result != NULL) { 188 _chunks_in_chunk_list--; 189 } 190 return result; 191 } 192 193 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { 194 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 195 return remove_chunk_from_list(&_free_list); 196 } 197 198 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { 199 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. 200 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding 201 // wraparound of _hwm. 202 if (_hwm >= _chunk_capacity) { 203 return NULL; 204 } 205 206 size_t cur_idx = Atomic::add(1u, &_hwm) - 1; 207 if (cur_idx >= _chunk_capacity) { 208 return NULL; 209 } 210 211 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; 212 result->next = NULL; 213 return result; 214 } 215 216 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { 217 // Get a new chunk. 218 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); 219 220 if (new_chunk == NULL) { 221 // Did not get a chunk from the free list. Allocate from backing memory. 222 new_chunk = allocate_new_chunk(); 223 224 if (new_chunk == NULL) { 225 return false; 226 } 227 } 228 229 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 230 231 add_chunk_to_chunk_list(new_chunk); 232 233 return true; 234 } 235 236 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { 237 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); 238 239 if (cur == NULL) { 240 return false; 241 } 242 243 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 244 245 add_chunk_to_free_list(cur); 246 return true; 247 } 248 249 void G1CMMarkStack::set_empty() { 250 _chunks_in_chunk_list = 0; 251 _hwm = 0; 252 _chunk_list = NULL; 253 _free_list = NULL; 254 } 255 256 G1CMRootRegions::G1CMRootRegions() : 257 _survivors(NULL), _cm(NULL), _scan_in_progress(false), 258 _should_abort(false), _claimed_survivor_index(0) { } 259 260 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) { 261 _survivors = survivors; 262 _cm = cm; 263 } 264 265 void G1CMRootRegions::prepare_for_scan() { 266 assert(!scan_in_progress(), "pre-condition"); 267 268 // Currently, only survivors can be root regions. 269 _claimed_survivor_index = 0; 270 _scan_in_progress = _survivors->regions()->is_nonempty(); 271 _should_abort = false; 272 } 273 274 HeapRegion* G1CMRootRegions::claim_next() { 275 if (_should_abort) { 276 // If someone has set the should_abort flag, we return NULL to 277 // force the caller to bail out of their loop. 278 return NULL; 279 } 280 281 // Currently, only survivors can be root regions. 282 const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions(); 283 284 int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1; 285 if (claimed_index < survivor_regions->length()) { 286 return survivor_regions->at(claimed_index); 287 } 288 return NULL; 289 } 290 291 uint G1CMRootRegions::num_root_regions() const { 292 return (uint)_survivors->regions()->length(); 293 } 294 295 void G1CMRootRegions::notify_scan_done() { 296 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 297 _scan_in_progress = false; 298 RootRegionScan_lock->notify_all(); 299 } 300 301 void G1CMRootRegions::cancel_scan() { 302 notify_scan_done(); 303 } 304 305 void G1CMRootRegions::scan_finished() { 306 assert(scan_in_progress(), "pre-condition"); 307 308 // Currently, only survivors can be root regions. 309 if (!_should_abort) { 310 assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index); 311 assert((uint)_claimed_survivor_index >= _survivors->length(), 312 "we should have claimed all survivors, claimed index = %u, length = %u", 313 (uint)_claimed_survivor_index, _survivors->length()); 314 } 315 316 notify_scan_done(); 317 } 318 319 bool G1CMRootRegions::wait_until_scan_finished() { 320 if (!scan_in_progress()) { 321 return false; 322 } 323 324 { 325 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 326 while (scan_in_progress()) { 327 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 328 } 329 } 330 return true; 331 } 332 333 // Returns the maximum number of workers to be used in a concurrent 334 // phase based on the number of GC workers being used in a STW 335 // phase. 336 static uint scale_concurrent_worker_threads(uint num_gc_workers) { 337 return MAX2((num_gc_workers + 2) / 4, 1U); 338 } 339 340 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, 341 G1RegionToSpaceMapper* prev_bitmap_storage, 342 G1RegionToSpaceMapper* next_bitmap_storage) : 343 // _cm_thread set inside the constructor 344 _g1h(g1h), 345 _completed_initialization(false), 346 347 _mark_bitmap_1(), 348 _mark_bitmap_2(), 349 _prev_mark_bitmap(&_mark_bitmap_1), 350 _next_mark_bitmap(&_mark_bitmap_2), 351 352 _heap(_g1h->reserved_region()), 353 354 _root_regions(), 355 356 _global_mark_stack(), 357 358 // _finger set in set_non_marking_state 359 360 _worker_id_offset(DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads), 361 _max_num_tasks(ParallelGCThreads), 362 // _num_active_tasks set in set_non_marking_state() 363 // _tasks set inside the constructor 364 365 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)), 366 _terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)), 367 368 _first_overflow_barrier_sync(), 369 _second_overflow_barrier_sync(), 370 371 _has_overflown(false), 372 _concurrent(false), 373 _has_aborted(false), 374 _restart_for_overflow(false), 375 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 376 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 377 378 // _verbose_level set below 379 380 _init_times(), 381 _remark_times(), 382 _remark_mark_times(), 383 _remark_weak_ref_times(), 384 _cleanup_times(), 385 _total_cleanup_time(0.0), 386 387 _accum_task_vtime(NULL), 388 389 _concurrent_workers(NULL), 390 _num_concurrent_workers(0), 391 _max_concurrent_workers(0), 392 393 _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)), 394 _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC)) 395 { 396 _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage); 397 _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage); 398 399 // Create & start ConcurrentMark thread. 400 _cm_thread = new ConcurrentMarkThread(this); 401 if (_cm_thread->osthread() == NULL) { 402 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 403 } 404 405 assert(CGC_lock != NULL, "CGC_lock must be initialized"); 406 407 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 408 satb_qs.set_buffer_size(G1SATBBufferSize); 409 410 _root_regions.init(_g1h->survivor(), this); 411 412 if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) { 413 // Calculate the number of concurrent worker threads by scaling 414 // the number of parallel GC threads. 415 uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads); 416 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 417 } 418 419 assert(ConcGCThreads > 0, "ConcGCThreads have been set."); 420 if (ConcGCThreads > ParallelGCThreads) { 421 log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).", 422 ConcGCThreads, ParallelGCThreads); 423 return; 424 } 425 426 log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset); 427 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 428 429 _num_concurrent_workers = ConcGCThreads; 430 _max_concurrent_workers = _num_concurrent_workers; 431 432 _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true); 433 _concurrent_workers->initialize_workers(); 434 435 if (FLAG_IS_DEFAULT(MarkStackSize)) { 436 size_t mark_stack_size = 437 MIN2(MarkStackSizeMax, 438 MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE))); 439 // Verify that the calculated value for MarkStackSize is in range. 440 // It would be nice to use the private utility routine from Arguments. 441 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 442 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 443 "must be between 1 and " SIZE_FORMAT, 444 mark_stack_size, MarkStackSizeMax); 445 return; 446 } 447 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 448 } else { 449 // Verify MarkStackSize is in range. 450 if (FLAG_IS_CMDLINE(MarkStackSize)) { 451 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 452 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 453 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 454 "must be between 1 and " SIZE_FORMAT, 455 MarkStackSize, MarkStackSizeMax); 456 return; 457 } 458 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 459 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 460 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 461 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 462 MarkStackSize, MarkStackSizeMax); 463 return; 464 } 465 } 466 } 467 } 468 469 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { 470 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 471 } 472 473 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC); 474 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC); 475 476 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 477 _num_active_tasks = _max_num_tasks; 478 479 for (uint i = 0; i < _max_num_tasks; ++i) { 480 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 481 task_queue->initialize(); 482 _task_queues->register_queue(i, task_queue); 483 484 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions()); 485 486 _accum_task_vtime[i] = 0.0; 487 } 488 489 reset_at_marking_complete(); 490 _completed_initialization = true; 491 } 492 493 void G1ConcurrentMark::reset() { 494 _has_aborted = false; 495 496 reset_marking_for_restart(); 497 498 // Reset all tasks, since different phases will use different number of active 499 // threads. So, it's easiest to have all of them ready. 500 for (uint i = 0; i < _max_num_tasks; ++i) { 501 _tasks[i]->reset(_next_mark_bitmap); 502 } 503 504 uint max_regions = _g1h->max_regions(); 505 for (uint i = 0; i < max_regions; i++) { 506 _top_at_rebuild_starts[i] = NULL; 507 _region_mark_stats[i].clear(); 508 } 509 } 510 511 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) { 512 for (uint j = 0; j < _max_num_tasks; ++j) { 513 _tasks[j]->clear_mark_stats_cache(region_idx); 514 } 515 _top_at_rebuild_starts[region_idx] = NULL; 516 _region_mark_stats[region_idx].clear(); 517 } 518 519 void G1ConcurrentMark::clear_statistics(HeapRegion* r) { 520 uint const region_idx = r->hrm_index(); 521 if (r->is_humongous()) { 522 assert(r->is_starts_humongous(), "Got humongous continues region here"); 523 uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size()); 524 for (uint j = region_idx; j < (region_idx + size_in_regions); j++) { 525 clear_statistics_in_region(j); 526 } 527 } else { 528 clear_statistics_in_region(region_idx); 529 } 530 } 531 532 static void maybe_clear_bitmap_if_set(G1CMBitMap* bitmap, HeapWord* addr) { 533 if (bitmap->is_marked(addr)) { 534 bitmap->clear(addr); 535 } 536 } 537 538 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) { 539 assert_at_safepoint_on_vm_thread(); 540 541 // Need to clear all mark bits of the humongous object. 542 maybe_clear_bitmap_if_set(_prev_mark_bitmap, r->bottom()); 543 544 G1CollectorState* collector_state = _g1h->collector_state(); 545 if (collector_state->mark_or_rebuild_in_progress() || 546 collector_state->clearing_next_bitmap()) { 547 maybe_clear_bitmap_if_set(_next_mark_bitmap, r->bottom()); 548 } 549 550 // Clear any statistics about the region gathered so far. 551 clear_statistics(r); 552 } 553 554 void G1ConcurrentMark::reset_marking_for_restart() { 555 _global_mark_stack.set_empty(); 556 557 // Expand the marking stack, if we have to and if we can. 558 if (has_overflown()) { 559 _global_mark_stack.expand(); 560 561 uint max_regions = _g1h->max_regions(); 562 for (uint i = 0; i < max_regions; i++) { 563 _region_mark_stats[i].clear_during_overflow(); 564 } 565 } 566 567 clear_has_overflown(); 568 _finger = _heap.start(); 569 570 for (uint i = 0; i < _max_num_tasks; ++i) { 571 G1CMTaskQueue* queue = _task_queues->queue(i); 572 queue->set_empty(); 573 } 574 } 575 576 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 577 assert(active_tasks <= _max_num_tasks, "we should not have more"); 578 579 _num_active_tasks = active_tasks; 580 // Need to update the three data structures below according to the 581 // number of active threads for this phase. 582 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 583 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 584 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 585 } 586 587 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 588 set_concurrency(active_tasks); 589 590 _concurrent = concurrent; 591 592 if (!concurrent) { 593 // At this point we should be in a STW phase, and completed marking. 594 assert_at_safepoint_on_vm_thread(); 595 assert(out_of_regions(), 596 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 597 p2i(_finger), p2i(_heap.end())); 598 } 599 } 600 601 void G1ConcurrentMark::reset_at_marking_complete() { 602 // We set the global marking state to some default values when we're 603 // not doing marking. 604 reset_marking_for_restart(); 605 _num_active_tasks = 0; 606 } 607 608 G1ConcurrentMark::~G1ConcurrentMark() { 609 FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts); 610 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats); 611 // The G1ConcurrentMark instance is never freed. 612 ShouldNotReachHere(); 613 } 614 615 class G1ClearBitMapTask : public AbstractGangTask { 616 public: 617 static size_t chunk_size() { return M; } 618 619 private: 620 // Heap region closure used for clearing the given mark bitmap. 621 class G1ClearBitmapHRClosure : public HeapRegionClosure { 622 private: 623 G1CMBitMap* _bitmap; 624 G1ConcurrentMark* _cm; 625 public: 626 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) { 627 } 628 629 virtual bool do_heap_region(HeapRegion* r) { 630 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 631 632 HeapWord* cur = r->bottom(); 633 HeapWord* const end = r->end(); 634 635 while (cur < end) { 636 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 637 _bitmap->clear_range(mr); 638 639 cur += chunk_size_in_words; 640 641 // Abort iteration if after yielding the marking has been aborted. 642 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 643 return true; 644 } 645 // Repeat the asserts from before the start of the closure. We will do them 646 // as asserts here to minimize their overhead on the product. However, we 647 // will have them as guarantees at the beginning / end of the bitmap 648 // clearing to get some checking in the product. 649 assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant"); 650 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 651 } 652 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 653 654 return false; 655 } 656 }; 657 658 G1ClearBitmapHRClosure _cl; 659 HeapRegionClaimer _hr_claimer; 660 bool _suspendible; // If the task is suspendible, workers must join the STS. 661 662 public: 663 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 664 AbstractGangTask("G1 Clear Bitmap"), 665 _cl(bitmap, suspendible ? cm : NULL), 666 _hr_claimer(n_workers), 667 _suspendible(suspendible) 668 { } 669 670 void work(uint worker_id) { 671 SuspendibleThreadSetJoiner sts_join(_suspendible); 672 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id); 673 } 674 675 bool is_complete() { 676 return _cl.is_complete(); 677 } 678 }; 679 680 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 681 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 682 683 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 684 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 685 686 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 687 688 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 689 690 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 691 workers->run_task(&cl, num_workers); 692 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 693 } 694 695 void G1ConcurrentMark::cleanup_for_next_mark() { 696 // Make sure that the concurrent mark thread looks to still be in 697 // the current cycle. 698 guarantee(cm_thread()->during_cycle(), "invariant"); 699 700 // We are finishing up the current cycle by clearing the next 701 // marking bitmap and getting it ready for the next cycle. During 702 // this time no other cycle can start. So, let's make sure that this 703 // is the case. 704 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 705 706 clear_bitmap(_next_mark_bitmap, _concurrent_workers, true); 707 708 // Repeat the asserts from above. 709 guarantee(cm_thread()->during_cycle(), "invariant"); 710 guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant"); 711 } 712 713 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 714 assert_at_safepoint_on_vm_thread(); 715 clear_bitmap(_prev_mark_bitmap, workers, false); 716 } 717 718 class CheckBitmapClearHRClosure : public HeapRegionClosure { 719 G1CMBitMap* _bitmap; 720 public: 721 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { 722 } 723 724 virtual bool do_heap_region(HeapRegion* r) { 725 // This closure can be called concurrently to the mutator, so we must make sure 726 // that the result of the getNextMarkedWordAddress() call is compared to the 727 // value passed to it as limit to detect any found bits. 728 // end never changes in G1. 729 HeapWord* end = r->end(); 730 return _bitmap->get_next_marked_addr(r->bottom(), end) != end; 731 } 732 }; 733 734 bool G1ConcurrentMark::next_mark_bitmap_is_clear() { 735 CheckBitmapClearHRClosure cl(_next_mark_bitmap); 736 _g1h->heap_region_iterate(&cl); 737 return cl.is_complete(); 738 } 739 740 class NoteStartOfMarkHRClosure : public HeapRegionClosure { 741 public: 742 bool do_heap_region(HeapRegion* r) { 743 r->note_start_of_marking(); 744 return false; 745 } 746 }; 747 748 void G1ConcurrentMark::pre_initial_mark() { 749 // Initialize marking structures. This has to be done in a STW phase. 750 reset(); 751 752 // For each region note start of marking. 753 NoteStartOfMarkHRClosure startcl; 754 _g1h->heap_region_iterate(&startcl); 755 } 756 757 758 void G1ConcurrentMark::post_initial_mark() { 759 // Start Concurrent Marking weak-reference discovery. 760 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 761 // enable ("weak") refs discovery 762 rp->enable_discovery(); 763 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 764 765 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 766 // This is the start of the marking cycle, we're expected all 767 // threads to have SATB queues with active set to false. 768 satb_mq_set.set_active_all_threads(true, /* new active value */ 769 false /* expected_active */); 770 771 _root_regions.prepare_for_scan(); 772 773 // update_g1_committed() will be called at the end of an evac pause 774 // when marking is on. So, it's also called at the end of the 775 // initial-mark pause to update the heap end, if the heap expands 776 // during it. No need to call it here. 777 } 778 779 /* 780 * Notice that in the next two methods, we actually leave the STS 781 * during the barrier sync and join it immediately afterwards. If we 782 * do not do this, the following deadlock can occur: one thread could 783 * be in the barrier sync code, waiting for the other thread to also 784 * sync up, whereas another one could be trying to yield, while also 785 * waiting for the other threads to sync up too. 786 * 787 * Note, however, that this code is also used during remark and in 788 * this case we should not attempt to leave / enter the STS, otherwise 789 * we'll either hit an assert (debug / fastdebug) or deadlock 790 * (product). So we should only leave / enter the STS if we are 791 * operating concurrently. 792 * 793 * Because the thread that does the sync barrier has left the STS, it 794 * is possible to be suspended for a Full GC or an evacuation pause 795 * could occur. This is actually safe, since the entering the sync 796 * barrier is one of the last things do_marking_step() does, and it 797 * doesn't manipulate any data structures afterwards. 798 */ 799 800 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 801 bool barrier_aborted; 802 { 803 SuspendibleThreadSetLeaver sts_leave(concurrent()); 804 barrier_aborted = !_first_overflow_barrier_sync.enter(); 805 } 806 807 // at this point everyone should have synced up and not be doing any 808 // more work 809 810 if (barrier_aborted) { 811 // If the barrier aborted we ignore the overflow condition and 812 // just abort the whole marking phase as quickly as possible. 813 return; 814 } 815 } 816 817 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 818 SuspendibleThreadSetLeaver sts_leave(concurrent()); 819 _second_overflow_barrier_sync.enter(); 820 821 // at this point everything should be re-initialized and ready to go 822 } 823 824 class G1CMConcurrentMarkingTask : public AbstractGangTask { 825 G1ConcurrentMark* _cm; 826 public: 827 void work(uint worker_id) { 828 assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread"); 829 ResourceMark rm; 830 831 double start_vtime = os::elapsedVTime(); 832 833 { 834 SuspendibleThreadSetJoiner sts_join; 835 836 assert(worker_id < _cm->active_tasks(), "invariant"); 837 838 G1CMTask* task = _cm->task(worker_id); 839 task->record_start_time(); 840 if (!_cm->has_aborted()) { 841 do { 842 task->do_marking_step(G1ConcMarkStepDurationMillis, 843 true /* do_termination */, 844 false /* is_serial*/); 845 846 _cm->do_yield_check(); 847 } while (!_cm->has_aborted() && task->has_aborted()); 848 } 849 task->record_end_time(); 850 guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant"); 851 } 852 853 double end_vtime = os::elapsedVTime(); 854 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 855 } 856 857 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) : 858 AbstractGangTask("Concurrent Mark"), _cm(cm) { } 859 860 ~G1CMConcurrentMarkingTask() { } 861 }; 862 863 uint G1ConcurrentMark::calc_active_marking_workers() { 864 uint result = 0; 865 if (!UseDynamicNumberOfGCThreads || 866 (!FLAG_IS_DEFAULT(ConcGCThreads) && 867 !ForceDynamicNumberOfGCThreads)) { 868 result = _max_concurrent_workers; 869 } else { 870 result = 871 AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers, 872 1, /* Minimum workers */ 873 _num_concurrent_workers, 874 Threads::number_of_non_daemon_threads()); 875 // Don't scale the result down by scale_concurrent_workers() because 876 // that scaling has already gone into "_max_concurrent_workers". 877 } 878 assert(result > 0 && result <= _max_concurrent_workers, 879 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u", 880 _max_concurrent_workers, result); 881 return result; 882 } 883 884 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) { 885 // Currently, only survivors can be root regions. 886 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 887 G1RootRegionScanClosure cl(_g1h, this, worker_id); 888 889 const uintx interval = PrefetchScanIntervalInBytes; 890 HeapWord* curr = hr->bottom(); 891 const HeapWord* end = hr->top(); 892 while (curr < end) { 893 Prefetch::read(curr, interval); 894 oop obj = oop(curr); 895 int size = obj->oop_iterate_size(&cl); 896 assert(size == obj->size(), "sanity"); 897 curr += size; 898 } 899 } 900 901 class G1CMRootRegionScanTask : public AbstractGangTask { 902 G1ConcurrentMark* _cm; 903 public: 904 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 905 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 906 907 void work(uint worker_id) { 908 assert(Thread::current()->is_ConcurrentGC_thread(), 909 "this should only be done by a conc GC thread"); 910 911 G1CMRootRegions* root_regions = _cm->root_regions(); 912 HeapRegion* hr = root_regions->claim_next(); 913 while (hr != NULL) { 914 _cm->scan_root_region(hr, worker_id); 915 hr = root_regions->claim_next(); 916 } 917 } 918 }; 919 920 void G1ConcurrentMark::scan_root_regions() { 921 // scan_in_progress() will have been set to true only if there was 922 // at least one root region to scan. So, if it's false, we 923 // should not attempt to do any further work. 924 if (root_regions()->scan_in_progress()) { 925 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 926 927 _num_concurrent_workers = MIN2(calc_active_marking_workers(), 928 // We distribute work on a per-region basis, so starting 929 // more threads than that is useless. 930 root_regions()->num_root_regions()); 931 assert(_num_concurrent_workers <= _max_concurrent_workers, 932 "Maximum number of marking threads exceeded"); 933 934 G1CMRootRegionScanTask task(this); 935 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 936 task.name(), _num_concurrent_workers, root_regions()->num_root_regions()); 937 _concurrent_workers->run_task(&task, _num_concurrent_workers); 938 939 // It's possible that has_aborted() is true here without actually 940 // aborting the survivor scan earlier. This is OK as it's 941 // mainly used for sanity checking. 942 root_regions()->scan_finished(); 943 } 944 } 945 946 void G1ConcurrentMark::concurrent_cycle_start() { 947 _gc_timer_cm->register_gc_start(); 948 949 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 950 951 _g1h->trace_heap_before_gc(_gc_tracer_cm); 952 } 953 954 void G1ConcurrentMark::concurrent_cycle_end() { 955 _g1h->collector_state()->set_clearing_next_bitmap(false); 956 957 _g1h->trace_heap_after_gc(_gc_tracer_cm); 958 959 if (has_aborted()) { 960 log_info(gc, marking)("Concurrent Mark Abort"); 961 _gc_tracer_cm->report_concurrent_mode_failure(); 962 } 963 964 _gc_timer_cm->register_gc_end(); 965 966 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 967 } 968 969 void G1ConcurrentMark::mark_from_roots() { 970 _restart_for_overflow = false; 971 972 _num_concurrent_workers = calc_active_marking_workers(); 973 974 uint active_workers = MAX2(1U, _num_concurrent_workers); 975 976 // Setting active workers is not guaranteed since fewer 977 // worker threads may currently exist and more may not be 978 // available. 979 active_workers = _concurrent_workers->update_active_workers(active_workers); 980 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers()); 981 982 // Parallel task terminator is set in "set_concurrency_and_phase()" 983 set_concurrency_and_phase(active_workers, true /* concurrent */); 984 985 G1CMConcurrentMarkingTask marking_task(this); 986 _concurrent_workers->run_task(&marking_task); 987 print_stats(); 988 } 989 990 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) { 991 G1HeapVerifier* verifier = _g1h->verifier(); 992 993 verifier->verify_region_sets_optional(); 994 995 if (VerifyDuringGC) { 996 GCTraceTime(Debug, gc, phases) trace(caller, _gc_timer_cm); 997 998 size_t const BufLen = 512; 999 char buffer[BufLen]; 1000 1001 jio_snprintf(buffer, BufLen, "During GC (%s)", caller); 1002 verifier->verify(type, vo, buffer); 1003 } 1004 1005 verifier->check_bitmaps(caller); 1006 } 1007 1008 class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure { 1009 G1CollectedHeap* _g1h; 1010 G1ConcurrentMark* _cm; 1011 1012 uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild. 1013 1014 void update_remset_before_rebuild(HeapRegion * hr) { 1015 G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker(); 1016 1017 size_t live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize; 1018 bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes); 1019 if (selected_for_rebuild) { 1020 _num_regions_selected_for_rebuild++; 1021 } 1022 _cm->update_top_at_rebuild_start(hr); 1023 } 1024 1025 public: 1026 G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm) : 1027 _g1h(g1h), _cm(cm), _num_regions_selected_for_rebuild(0) { } 1028 1029 virtual bool do_heap_region(HeapRegion* r) { 1030 update_remset_before_rebuild(r); 1031 return false; 1032 } 1033 1034 uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; } 1035 }; 1036 1037 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure { 1038 G1CollectedHeap* _g1h; 1039 public: 1040 G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { } 1041 1042 virtual bool do_heap_region(HeapRegion* r) { 1043 _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r); 1044 return false; 1045 } 1046 }; 1047 1048 void G1ConcurrentMark::remark() { 1049 assert_at_safepoint_on_vm_thread(); 1050 1051 // If a full collection has happened, we should not continue. However we might 1052 // have ended up here as the Remark VM operation has been scheduled already. 1053 if (has_aborted()) { 1054 return; 1055 } 1056 1057 G1Policy* g1p = _g1h->g1_policy(); 1058 g1p->record_concurrent_mark_remark_start(); 1059 1060 double start = os::elapsedTime(); 1061 1062 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before"); 1063 1064 { 1065 GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm); 1066 finalize_marking(); 1067 } 1068 1069 double mark_work_end = os::elapsedTime(); 1070 1071 bool const mark_finished = !has_overflown(); 1072 if (mark_finished) { 1073 weak_refs_work(false /* clear_all_soft_refs */); 1074 1075 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1076 // We're done with marking. 1077 // This is the end of the marking cycle, we're expected all 1078 // threads to have SATB queues with active set to true. 1079 satb_mq_set.set_active_all_threads(false, /* new active value */ 1080 true /* expected_active */); 1081 1082 { 1083 GCTraceTime(Debug, gc, phases)("Flush Task Caches"); 1084 flush_all_task_caches(); 1085 } 1086 1087 { 1088 GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking Before Rebuild"); 1089 G1UpdateRemSetTrackingBeforeRebuild cl(_g1h, this); 1090 _g1h->heap_region_iterate(&cl); 1091 log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u", 1092 _g1h->num_regions(), cl.num_selected_for_rebuild()); 1093 } 1094 1095 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "Remark after"); 1096 1097 assert(!restart_for_overflow(), "sanity"); 1098 // Completely reset the marking state since marking completed 1099 reset_at_marking_complete(); 1100 } else { 1101 // We overflowed. Restart concurrent marking. 1102 _restart_for_overflow = true; 1103 1104 verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow"); 1105 1106 // Clear the marking state because we will be restarting 1107 // marking due to overflowing the global mark stack. 1108 reset_marking_for_restart(); 1109 } 1110 1111 { 1112 GCTraceTime(Debug, gc, phases)("Report Object Count"); 1113 report_object_count(); 1114 } 1115 1116 // Statistics 1117 double now = os::elapsedTime(); 1118 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1119 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1120 _remark_times.add((now - start) * 1000.0); 1121 1122 g1p->record_concurrent_mark_remark_end(); 1123 } 1124 1125 class G1CleanupTask : public AbstractGangTask { 1126 // Per-region work during the Cleanup pause. 1127 class G1CleanupRegionsClosure : public HeapRegionClosure { 1128 G1CollectedHeap* _g1h; 1129 size_t _freed_bytes; 1130 FreeRegionList* _local_cleanup_list; 1131 uint _old_regions_removed; 1132 uint _humongous_regions_removed; 1133 HRRSCleanupTask* _hrrs_cleanup_task; 1134 1135 public: 1136 G1CleanupRegionsClosure(G1CollectedHeap* g1, 1137 FreeRegionList* local_cleanup_list, 1138 HRRSCleanupTask* hrrs_cleanup_task) : 1139 _g1h(g1), 1140 _freed_bytes(0), 1141 _local_cleanup_list(local_cleanup_list), 1142 _old_regions_removed(0), 1143 _humongous_regions_removed(0), 1144 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1145 1146 size_t freed_bytes() { return _freed_bytes; } 1147 const uint old_regions_removed() { return _old_regions_removed; } 1148 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1149 1150 bool do_heap_region(HeapRegion *hr) { 1151 hr->note_end_of_marking(); 1152 1153 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) { 1154 _freed_bytes += hr->used(); 1155 hr->set_containing_set(NULL); 1156 if (hr->is_humongous()) { 1157 _humongous_regions_removed++; 1158 _g1h->free_humongous_region(hr, _local_cleanup_list); 1159 } else { 1160 _old_regions_removed++; 1161 _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */); 1162 } 1163 hr->clear_cardtable(); 1164 _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index()); 1165 log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom())); 1166 } else { 1167 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1168 } 1169 1170 return false; 1171 } 1172 }; 1173 1174 G1CollectedHeap* _g1h; 1175 FreeRegionList* _cleanup_list; 1176 HeapRegionClaimer _hrclaimer; 1177 1178 public: 1179 G1CleanupTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1180 AbstractGangTask("G1 Cleanup"), 1181 _g1h(g1h), 1182 _cleanup_list(cleanup_list), 1183 _hrclaimer(n_workers) { 1184 1185 HeapRegionRemSet::reset_for_cleanup_tasks(); 1186 } 1187 1188 void work(uint worker_id) { 1189 FreeRegionList local_cleanup_list("Local Cleanup List"); 1190 HRRSCleanupTask hrrs_cleanup_task; 1191 G1CleanupRegionsClosure cl(_g1h, 1192 &local_cleanup_list, 1193 &hrrs_cleanup_task); 1194 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id); 1195 assert(cl.is_complete(), "Shouldn't have aborted!"); 1196 1197 // Now update the old/humongous region sets 1198 _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed()); 1199 { 1200 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1201 _g1h->decrement_summary_bytes(cl.freed_bytes()); 1202 1203 _cleanup_list->add_ordered(&local_cleanup_list); 1204 assert(local_cleanup_list.is_empty(), "post-condition"); 1205 1206 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1207 } 1208 } 1209 }; 1210 1211 void G1ConcurrentMark::reclaim_empty_regions() { 1212 WorkGang* workers = _g1h->workers(); 1213 FreeRegionList empty_regions_list("Empty Regions After Mark List"); 1214 1215 G1CleanupTask cl(_g1h, &empty_regions_list, workers->active_workers()); 1216 workers->run_task(&cl); 1217 1218 if (!empty_regions_list.is_empty()) { 1219 log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length()); 1220 // Now print the empty regions list. 1221 G1HRPrinter* hrp = _g1h->hr_printer(); 1222 if (hrp->is_active()) { 1223 FreeRegionListIterator iter(&empty_regions_list); 1224 while (iter.more_available()) { 1225 HeapRegion* hr = iter.get_next(); 1226 hrp->cleanup(hr); 1227 } 1228 } 1229 // And actually make them available. 1230 _g1h->prepend_to_freelist(&empty_regions_list); 1231 } 1232 } 1233 1234 void G1ConcurrentMark::cleanup() { 1235 assert_at_safepoint_on_vm_thread(); 1236 1237 // If a full collection has happened, we shouldn't do this. 1238 if (has_aborted()) { 1239 return; 1240 } 1241 1242 G1Policy* g1p = _g1h->g1_policy(); 1243 g1p->record_concurrent_mark_cleanup_start(); 1244 1245 double start = os::elapsedTime(); 1246 1247 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UseNextMarking, "Cleanup before"); 1248 1249 { 1250 GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild"); 1251 G1UpdateRemSetTrackingAfterRebuild cl(_g1h); 1252 _g1h->heap_region_iterate(&cl); 1253 } 1254 1255 if (log_is_enabled(Trace, gc, liveness)) { 1256 G1PrintRegionLivenessInfoClosure cl("Post-Cleanup"); 1257 _g1h->heap_region_iterate(&cl); 1258 } 1259 1260 // Install newly created mark bitmap as "prev". 1261 swap_mark_bitmaps(); 1262 { 1263 GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions"); 1264 reclaim_empty_regions(); 1265 } 1266 1267 // Cleanup will have freed any regions completely full of garbage. 1268 // Update the soft reference policy with the new heap occupancy. 1269 Universe::update_heap_info_at_gc(); 1270 1271 // Clean out dead classes and update Metaspace sizes. 1272 if (ClassUnloadingWithConcurrentMark) { 1273 GCTraceTime(Debug, gc, phases)("Purge Metaspace"); 1274 ClassLoaderDataGraph::purge(); 1275 } 1276 MetaspaceGC::compute_new_size(); 1277 1278 // We reclaimed old regions so we should calculate the sizes to make 1279 // sure we update the old gen/space data. 1280 _g1h->g1mm()->update_sizes(); 1281 1282 verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after"); 1283 1284 // We need to make this be a "collection" so any collection pause that 1285 // races with it goes around and waits for Cleanup to finish. 1286 _g1h->increment_total_collections(); 1287 1288 // Local statistics 1289 double recent_cleanup_time = (os::elapsedTime() - start); 1290 _total_cleanup_time += recent_cleanup_time; 1291 _cleanup_times.add(recent_cleanup_time); 1292 1293 { 1294 GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup"); 1295 _g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1296 } 1297 } 1298 1299 // Supporting Object and Oop closures for reference discovery 1300 // and processing in during marking 1301 1302 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1303 HeapWord* addr = (HeapWord*)obj; 1304 return addr != NULL && 1305 (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_ill(obj)); 1306 } 1307 1308 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1309 // Uses the G1CMTask associated with a worker thread (for serial reference 1310 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1311 // trace referent objects. 1312 // 1313 // Using the G1CMTask and embedded local queues avoids having the worker 1314 // threads operating on the global mark stack. This reduces the risk 1315 // of overflowing the stack - which we would rather avoid at this late 1316 // state. Also using the tasks' local queues removes the potential 1317 // of the workers interfering with each other that could occur if 1318 // operating on the global stack. 1319 1320 class G1CMKeepAliveAndDrainClosure : public OopClosure { 1321 G1ConcurrentMark* _cm; 1322 G1CMTask* _task; 1323 int _ref_counter_limit; 1324 int _ref_counter; 1325 bool _is_serial; 1326 public: 1327 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1328 _cm(cm), _task(task), _is_serial(is_serial), 1329 _ref_counter_limit(G1RefProcDrainInterval) { 1330 assert(_ref_counter_limit > 0, "sanity"); 1331 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1332 _ref_counter = _ref_counter_limit; 1333 } 1334 1335 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1336 virtual void do_oop( oop* p) { do_oop_work(p); } 1337 1338 template <class T> void do_oop_work(T* p) { 1339 if (!_cm->has_overflown()) { 1340 _task->deal_with_reference(p); 1341 _ref_counter--; 1342 1343 if (_ref_counter == 0) { 1344 // We have dealt with _ref_counter_limit references, pushing them 1345 // and objects reachable from them on to the local stack (and 1346 // possibly the global stack). Call G1CMTask::do_marking_step() to 1347 // process these entries. 1348 // 1349 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1350 // there's nothing more to do (i.e. we're done with the entries that 1351 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1352 // above) or we overflow. 1353 // 1354 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1355 // flag while there may still be some work to do. (See the comment at 1356 // the beginning of G1CMTask::do_marking_step() for those conditions - 1357 // one of which is reaching the specified time target.) It is only 1358 // when G1CMTask::do_marking_step() returns without setting the 1359 // has_aborted() flag that the marking step has completed. 1360 do { 1361 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1362 _task->do_marking_step(mark_step_duration_ms, 1363 false /* do_termination */, 1364 _is_serial); 1365 } while (_task->has_aborted() && !_cm->has_overflown()); 1366 _ref_counter = _ref_counter_limit; 1367 } 1368 } 1369 } 1370 }; 1371 1372 // 'Drain' oop closure used by both serial and parallel reference processing. 1373 // Uses the G1CMTask associated with a given worker thread (for serial 1374 // reference processing the G1CMtask for worker 0 is used). Calls the 1375 // do_marking_step routine, with an unbelievably large timeout value, 1376 // to drain the marking data structures of the remaining entries 1377 // added by the 'keep alive' oop closure above. 1378 1379 class G1CMDrainMarkingStackClosure : public VoidClosure { 1380 G1ConcurrentMark* _cm; 1381 G1CMTask* _task; 1382 bool _is_serial; 1383 public: 1384 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1385 _cm(cm), _task(task), _is_serial(is_serial) { 1386 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1387 } 1388 1389 void do_void() { 1390 do { 1391 // We call G1CMTask::do_marking_step() to completely drain the local 1392 // and global marking stacks of entries pushed by the 'keep alive' 1393 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1394 // 1395 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1396 // if there's nothing more to do (i.e. we've completely drained the 1397 // entries that were pushed as a a result of applying the 'keep alive' 1398 // closure to the entries on the discovered ref lists) or we overflow 1399 // the global marking stack. 1400 // 1401 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1402 // flag while there may still be some work to do. (See the comment at 1403 // the beginning of G1CMTask::do_marking_step() for those conditions - 1404 // one of which is reaching the specified time target.) It is only 1405 // when G1CMTask::do_marking_step() returns without setting the 1406 // has_aborted() flag that the marking step has completed. 1407 1408 _task->do_marking_step(1000000000.0 /* something very large */, 1409 true /* do_termination */, 1410 _is_serial); 1411 } while (_task->has_aborted() && !_cm->has_overflown()); 1412 } 1413 }; 1414 1415 // Implementation of AbstractRefProcTaskExecutor for parallel 1416 // reference processing at the end of G1 concurrent marking 1417 1418 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 1419 private: 1420 G1CollectedHeap* _g1h; 1421 G1ConcurrentMark* _cm; 1422 WorkGang* _workers; 1423 uint _active_workers; 1424 1425 public: 1426 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1427 G1ConcurrentMark* cm, 1428 WorkGang* workers, 1429 uint n_workers) : 1430 _g1h(g1h), _cm(cm), 1431 _workers(workers), _active_workers(n_workers) { } 1432 1433 // Executes the given task using concurrent marking worker threads. 1434 virtual void execute(ProcessTask& task); 1435 virtual void execute(EnqueueTask& task); 1436 }; 1437 1438 class G1CMRefProcTaskProxy : public AbstractGangTask { 1439 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1440 ProcessTask& _proc_task; 1441 G1CollectedHeap* _g1h; 1442 G1ConcurrentMark* _cm; 1443 1444 public: 1445 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1446 G1CollectedHeap* g1h, 1447 G1ConcurrentMark* cm) : 1448 AbstractGangTask("Process reference objects in parallel"), 1449 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1450 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1451 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1452 } 1453 1454 virtual void work(uint worker_id) { 1455 ResourceMark rm; 1456 HandleMark hm; 1457 G1CMTask* task = _cm->task(worker_id); 1458 G1CMIsAliveClosure g1_is_alive(_g1h); 1459 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1460 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1461 1462 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1463 } 1464 }; 1465 1466 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1467 assert(_workers != NULL, "Need parallel worker threads."); 1468 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1469 1470 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1471 1472 // We need to reset the concurrency level before each 1473 // proxy task execution, so that the termination protocol 1474 // and overflow handling in G1CMTask::do_marking_step() knows 1475 // how many workers to wait for. 1476 _cm->set_concurrency(_active_workers); 1477 _workers->run_task(&proc_task_proxy); 1478 } 1479 1480 class G1CMRefEnqueueTaskProxy : public AbstractGangTask { 1481 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1482 EnqueueTask& _enq_task; 1483 1484 public: 1485 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1486 AbstractGangTask("Enqueue reference objects in parallel"), 1487 _enq_task(enq_task) { } 1488 1489 virtual void work(uint worker_id) { 1490 _enq_task.work(worker_id); 1491 } 1492 }; 1493 1494 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 1495 assert(_workers != NULL, "Need parallel worker threads."); 1496 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1497 1498 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 1499 1500 // Not strictly necessary but... 1501 // 1502 // We need to reset the concurrency level before each 1503 // proxy task execution, so that the termination protocol 1504 // and overflow handling in G1CMTask::do_marking_step() knows 1505 // how many workers to wait for. 1506 _cm->set_concurrency(_active_workers); 1507 _workers->run_task(&enq_task_proxy); 1508 } 1509 1510 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { 1511 ResourceMark rm; 1512 HandleMark hm; 1513 1514 // Is alive closure. 1515 G1CMIsAliveClosure g1_is_alive(_g1h); 1516 1517 // Inner scope to exclude the cleaning of the string and symbol 1518 // tables from the displayed time. 1519 { 1520 GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm); 1521 1522 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1523 1524 // See the comment in G1CollectedHeap::ref_processing_init() 1525 // about how reference processing currently works in G1. 1526 1527 // Set the soft reference policy 1528 rp->setup_policy(clear_all_soft_refs); 1529 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1530 1531 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1532 // in serial reference processing. Note these closures are also 1533 // used for serially processing (by the the current thread) the 1534 // JNI references during parallel reference processing. 1535 // 1536 // These closures do not need to synchronize with the worker 1537 // threads involved in parallel reference processing as these 1538 // instances are executed serially by the current thread (e.g. 1539 // reference processing is not multi-threaded and is thus 1540 // performed by the current thread instead of a gang worker). 1541 // 1542 // The gang tasks involved in parallel reference processing create 1543 // their own instances of these closures, which do their own 1544 // synchronization among themselves. 1545 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1546 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1547 1548 // We need at least one active thread. If reference processing 1549 // is not multi-threaded we use the current (VMThread) thread, 1550 // otherwise we use the work gang from the G1CollectedHeap and 1551 // we utilize all the worker threads we can. 1552 bool processing_is_mt = rp->processing_is_mt(); 1553 uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U); 1554 active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U); 1555 1556 // Parallel processing task executor. 1557 G1CMRefProcTaskExecutor par_task_executor(_g1h, this, 1558 _g1h->workers(), active_workers); 1559 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1560 1561 // Set the concurrency level. The phase was already set prior to 1562 // executing the remark task. 1563 set_concurrency(active_workers); 1564 1565 // Set the degree of MT processing here. If the discovery was done MT, 1566 // the number of threads involved during discovery could differ from 1567 // the number of active workers. This is OK as long as the discovered 1568 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1569 rp->set_active_mt_degree(active_workers); 1570 1571 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q()); 1572 1573 // Process the weak references. 1574 const ReferenceProcessorStats& stats = 1575 rp->process_discovered_references(&g1_is_alive, 1576 &g1_keep_alive, 1577 &g1_drain_mark_stack, 1578 executor, 1579 &pt); 1580 _gc_tracer_cm->report_gc_reference_stats(stats); 1581 pt.print_all_references(); 1582 1583 // The do_oop work routines of the keep_alive and drain_marking_stack 1584 // oop closures will set the has_overflown flag if we overflow the 1585 // global marking stack. 1586 1587 assert(has_overflown() || _global_mark_stack.is_empty(), 1588 "Mark stack should be empty (unless it has overflown)"); 1589 1590 assert(rp->num_q() == active_workers, "why not"); 1591 1592 rp->enqueue_discovered_references(executor, &pt); 1593 1594 rp->verify_no_references_recorded(); 1595 1596 pt.print_enqueue_phase(); 1597 1598 assert(!rp->discovery_enabled(), "Post condition"); 1599 } 1600 1601 assert(has_overflown() || _global_mark_stack.is_empty(), 1602 "Mark stack should be empty (unless it has overflown)"); 1603 1604 { 1605 GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm); 1606 WeakProcessor::weak_oops_do(&g1_is_alive, &do_nothing_cl); 1607 } 1608 1609 if (has_overflown()) { 1610 // We can not trust g1_is_alive if the marking stack overflowed 1611 return; 1612 } 1613 1614 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1615 1616 // Unload Klasses, String, Symbols, Code Cache, etc. 1617 if (ClassUnloadingWithConcurrentMark) { 1618 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); 1619 bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */); 1620 _g1h->complete_cleaning(&g1_is_alive, purged_classes); 1621 } else { 1622 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm); 1623 // No need to clean string table and symbol table as they are treated as strong roots when 1624 // class unloading is disabled. 1625 _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled()); 1626 } 1627 } 1628 1629 void G1ConcurrentMark::report_object_count() { 1630 G1CMIsAliveClosure is_alive(_g1h); 1631 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1632 } 1633 1634 void G1ConcurrentMark::swap_mark_bitmaps() { 1635 G1CMBitMap* temp = _prev_mark_bitmap; 1636 _prev_mark_bitmap = _next_mark_bitmap; 1637 _next_mark_bitmap = temp; 1638 _g1h->collector_state()->set_clearing_next_bitmap(true); 1639 } 1640 1641 // Closure for marking entries in SATB buffers. 1642 class G1CMSATBBufferClosure : public SATBBufferClosure { 1643 private: 1644 G1CMTask* _task; 1645 G1CollectedHeap* _g1h; 1646 1647 // This is very similar to G1CMTask::deal_with_reference, but with 1648 // more relaxed requirements for the argument, so this must be more 1649 // circumspect about treating the argument as an object. 1650 void do_entry(void* entry) const { 1651 _task->increment_refs_reached(); 1652 oop const obj = static_cast<oop>(entry); 1653 _task->make_reference_grey(obj); 1654 } 1655 1656 public: 1657 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1658 : _task(task), _g1h(g1h) { } 1659 1660 virtual void do_buffer(void** buffer, size_t size) { 1661 for (size_t i = 0; i < size; ++i) { 1662 do_entry(buffer[i]); 1663 } 1664 } 1665 }; 1666 1667 class G1RemarkThreadsClosure : public ThreadClosure { 1668 G1CMSATBBufferClosure _cm_satb_cl; 1669 G1CMOopClosure _cm_cl; 1670 MarkingCodeBlobClosure _code_cl; 1671 int _thread_parity; 1672 1673 public: 1674 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1675 _cm_satb_cl(task, g1h), 1676 _cm_cl(g1h, task), 1677 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1678 _thread_parity(Threads::thread_claim_parity()) {} 1679 1680 void do_thread(Thread* thread) { 1681 if (thread->is_Java_thread()) { 1682 if (thread->claim_oops_do(true, _thread_parity)) { 1683 JavaThread* jt = (JavaThread*)thread; 1684 1685 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1686 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1687 // * Alive if on the stack of an executing method 1688 // * Weakly reachable otherwise 1689 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1690 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1691 jt->nmethods_do(&_code_cl); 1692 1693 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 1694 } 1695 } else if (thread->is_VM_thread()) { 1696 if (thread->claim_oops_do(true, _thread_parity)) { 1697 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1698 } 1699 } 1700 } 1701 }; 1702 1703 class G1CMRemarkTask : public AbstractGangTask { 1704 G1ConcurrentMark* _cm; 1705 public: 1706 void work(uint worker_id) { 1707 G1CMTask* task = _cm->task(worker_id); 1708 task->record_start_time(); 1709 { 1710 ResourceMark rm; 1711 HandleMark hm; 1712 1713 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1714 Threads::threads_do(&threads_f); 1715 } 1716 1717 do { 1718 task->do_marking_step(1000000000.0 /* something very large */, 1719 true /* do_termination */, 1720 false /* is_serial */); 1721 } while (task->has_aborted() && !_cm->has_overflown()); 1722 // If we overflow, then we do not want to restart. We instead 1723 // want to abort remark and do concurrent marking again. 1724 task->record_end_time(); 1725 } 1726 1727 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1728 AbstractGangTask("Par Remark"), _cm(cm) { 1729 _cm->terminator()->reset_for_reuse(active_workers); 1730 } 1731 }; 1732 1733 void G1ConcurrentMark::finalize_marking() { 1734 ResourceMark rm; 1735 HandleMark hm; 1736 1737 _g1h->ensure_parsability(false); 1738 1739 // this is remark, so we'll use up all active threads 1740 uint active_workers = _g1h->workers()->active_workers(); 1741 set_concurrency_and_phase(active_workers, false /* concurrent */); 1742 // Leave _parallel_marking_threads at it's 1743 // value originally calculated in the G1ConcurrentMark 1744 // constructor and pass values of the active workers 1745 // through the gang in the task. 1746 1747 { 1748 StrongRootsScope srs(active_workers); 1749 1750 G1CMRemarkTask remarkTask(this, active_workers); 1751 // We will start all available threads, even if we decide that the 1752 // active_workers will be fewer. The extra ones will just bail out 1753 // immediately. 1754 _g1h->workers()->run_task(&remarkTask); 1755 } 1756 1757 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1758 guarantee(has_overflown() || 1759 satb_mq_set.completed_buffers_num() == 0, 1760 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1761 BOOL_TO_STR(has_overflown()), 1762 satb_mq_set.completed_buffers_num()); 1763 1764 print_stats(); 1765 } 1766 1767 void G1ConcurrentMark::flush_all_task_caches() { 1768 size_t hits = 0; 1769 size_t misses = 0; 1770 for (uint i = 0; i < _max_num_tasks; i++) { 1771 Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache(); 1772 hits += stats.first; 1773 misses += stats.second; 1774 } 1775 size_t sum = hits + misses; 1776 log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf", 1777 hits, misses, percent_of(hits, sum)); 1778 } 1779 1780 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) { 1781 _prev_mark_bitmap->clear_range(mr); 1782 } 1783 1784 HeapRegion* 1785 G1ConcurrentMark::claim_region(uint worker_id) { 1786 // "checkpoint" the finger 1787 HeapWord* finger = _finger; 1788 1789 while (finger < _heap.end()) { 1790 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1791 1792 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1793 // Make sure that the reads below do not float before loading curr_region. 1794 OrderAccess::loadload(); 1795 // Above heap_region_containing may return NULL as we always scan claim 1796 // until the end of the heap. In this case, just jump to the next region. 1797 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1798 1799 // Is the gap between reading the finger and doing the CAS too long? 1800 HeapWord* res = Atomic::cmpxchg(end, &_finger, finger); 1801 if (res == finger && curr_region != NULL) { 1802 // we succeeded 1803 HeapWord* bottom = curr_region->bottom(); 1804 HeapWord* limit = curr_region->next_top_at_mark_start(); 1805 1806 // notice that _finger == end cannot be guaranteed here since, 1807 // someone else might have moved the finger even further 1808 assert(_finger >= end, "the finger should have moved forward"); 1809 1810 if (limit > bottom) { 1811 return curr_region; 1812 } else { 1813 assert(limit == bottom, 1814 "the region limit should be at bottom"); 1815 // we return NULL and the caller should try calling 1816 // claim_region() again. 1817 return NULL; 1818 } 1819 } else { 1820 assert(_finger > finger, "the finger should have moved forward"); 1821 // read it again 1822 finger = _finger; 1823 } 1824 } 1825 1826 return NULL; 1827 } 1828 1829 #ifndef PRODUCT 1830 class VerifyNoCSetOops { 1831 G1CollectedHeap* _g1h; 1832 const char* _phase; 1833 int _info; 1834 1835 public: 1836 VerifyNoCSetOops(const char* phase, int info = -1) : 1837 _g1h(G1CollectedHeap::heap()), 1838 _phase(phase), 1839 _info(info) 1840 { } 1841 1842 void operator()(G1TaskQueueEntry task_entry) const { 1843 if (task_entry.is_array_slice()) { 1844 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); 1845 return; 1846 } 1847 guarantee(oopDesc::is_oop(task_entry.obj()), 1848 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1849 p2i(task_entry.obj()), _phase, _info); 1850 guarantee(!_g1h->is_in_cset(task_entry.obj()), 1851 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1852 p2i(task_entry.obj()), _phase, _info); 1853 } 1854 }; 1855 1856 void G1ConcurrentMark::verify_no_cset_oops() { 1857 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1858 if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { 1859 return; 1860 } 1861 1862 // Verify entries on the global mark stack 1863 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 1864 1865 // Verify entries on the task queues 1866 for (uint i = 0; i < _max_num_tasks; ++i) { 1867 G1CMTaskQueue* queue = _task_queues->queue(i); 1868 queue->iterate(VerifyNoCSetOops("Queue", i)); 1869 } 1870 1871 // Verify the global finger 1872 HeapWord* global_finger = finger(); 1873 if (global_finger != NULL && global_finger < _heap.end()) { 1874 // Since we always iterate over all regions, we might get a NULL HeapRegion 1875 // here. 1876 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1877 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1878 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1879 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1880 } 1881 1882 // Verify the task fingers 1883 assert(_num_concurrent_workers <= _max_num_tasks, "sanity"); 1884 for (uint i = 0; i < _num_concurrent_workers; ++i) { 1885 G1CMTask* task = _tasks[i]; 1886 HeapWord* task_finger = task->finger(); 1887 if (task_finger != NULL && task_finger < _heap.end()) { 1888 // See above note on the global finger verification. 1889 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 1890 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 1891 !task_hr->in_collection_set(), 1892 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 1893 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 1894 } 1895 } 1896 } 1897 #endif // PRODUCT 1898 1899 void G1ConcurrentMark::rebuild_rem_set_concurrently() { 1900 _g1h->g1_rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset); 1901 } 1902 1903 void G1ConcurrentMark::print_stats() { 1904 if (!log_is_enabled(Debug, gc, stats)) { 1905 return; 1906 } 1907 log_debug(gc, stats)("---------------------------------------------------------------------"); 1908 for (size_t i = 0; i < _num_active_tasks; ++i) { 1909 _tasks[i]->print_stats(); 1910 log_debug(gc, stats)("---------------------------------------------------------------------"); 1911 } 1912 } 1913 1914 void G1ConcurrentMark::concurrent_cycle_abort() { 1915 if (!cm_thread()->during_cycle() || _has_aborted) { 1916 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 1917 return; 1918 } 1919 1920 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 1921 // concurrent bitmap clearing. 1922 { 1923 GCTraceTime(Debug, gc)("Clear Next Bitmap"); 1924 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false); 1925 } 1926 // Note we cannot clear the previous marking bitmap here 1927 // since VerifyDuringGC verifies the objects marked during 1928 // a full GC against the previous bitmap. 1929 1930 // Empty mark stack 1931 reset_marking_for_restart(); 1932 for (uint i = 0; i < _max_num_tasks; ++i) { 1933 _tasks[i]->clear_region_fields(); 1934 } 1935 _first_overflow_barrier_sync.abort(); 1936 _second_overflow_barrier_sync.abort(); 1937 _has_aborted = true; 1938 1939 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1940 satb_mq_set.abandon_partial_marking(); 1941 // This can be called either during or outside marking, we'll read 1942 // the expected_active value from the SATB queue set. 1943 satb_mq_set.set_active_all_threads( 1944 false, /* new active value */ 1945 satb_mq_set.is_active() /* expected_active */); 1946 } 1947 1948 static void print_ms_time_info(const char* prefix, const char* name, 1949 NumberSeq& ns) { 1950 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 1951 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 1952 if (ns.num() > 0) { 1953 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 1954 prefix, ns.sd(), ns.maximum()); 1955 } 1956 } 1957 1958 void G1ConcurrentMark::print_summary_info() { 1959 Log(gc, marking) log; 1960 if (!log.is_trace()) { 1961 return; 1962 } 1963 1964 log.trace(" Concurrent marking:"); 1965 print_ms_time_info(" ", "init marks", _init_times); 1966 print_ms_time_info(" ", "remarks", _remark_times); 1967 { 1968 print_ms_time_info(" ", "final marks", _remark_mark_times); 1969 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 1970 1971 } 1972 print_ms_time_info(" ", "cleanups", _cleanup_times); 1973 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 1974 _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 1975 log.trace(" Total stop_world time = %8.2f s.", 1976 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 1977 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 1978 cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum()); 1979 } 1980 1981 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 1982 _concurrent_workers->print_worker_threads_on(st); 1983 } 1984 1985 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 1986 _concurrent_workers->threads_do(tc); 1987 } 1988 1989 void G1ConcurrentMark::print_on_error(outputStream* st) const { 1990 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 1991 p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap)); 1992 _prev_mark_bitmap->print_on_error(st, " Prev Bits: "); 1993 _next_mark_bitmap->print_on_error(st, " Next Bits: "); 1994 } 1995 1996 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 1997 ReferenceProcessor* result = g1h->ref_processor_cm(); 1998 assert(result != NULL, "CM reference processor should not be NULL"); 1999 return result; 2000 } 2001 2002 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2003 G1CMTask* task) 2004 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2005 _g1h(g1h), _task(task) 2006 { } 2007 2008 void G1CMTask::setup_for_region(HeapRegion* hr) { 2009 assert(hr != NULL, 2010 "claim_region() should have filtered out NULL regions"); 2011 _curr_region = hr; 2012 _finger = hr->bottom(); 2013 update_region_limit(); 2014 } 2015 2016 void G1CMTask::update_region_limit() { 2017 HeapRegion* hr = _curr_region; 2018 HeapWord* bottom = hr->bottom(); 2019 HeapWord* limit = hr->next_top_at_mark_start(); 2020 2021 if (limit == bottom) { 2022 // The region was collected underneath our feet. 2023 // We set the finger to bottom to ensure that the bitmap 2024 // iteration that will follow this will not do anything. 2025 // (this is not a condition that holds when we set the region up, 2026 // as the region is not supposed to be empty in the first place) 2027 _finger = bottom; 2028 } else if (limit >= _region_limit) { 2029 assert(limit >= _finger, "peace of mind"); 2030 } else { 2031 assert(limit < _region_limit, "only way to get here"); 2032 // This can happen under some pretty unusual circumstances. An 2033 // evacuation pause empties the region underneath our feet (NTAMS 2034 // at bottom). We then do some allocation in the region (NTAMS 2035 // stays at bottom), followed by the region being used as a GC 2036 // alloc region (NTAMS will move to top() and the objects 2037 // originally below it will be grayed). All objects now marked in 2038 // the region are explicitly grayed, if below the global finger, 2039 // and we do not need in fact to scan anything else. So, we simply 2040 // set _finger to be limit to ensure that the bitmap iteration 2041 // doesn't do anything. 2042 _finger = limit; 2043 } 2044 2045 _region_limit = limit; 2046 } 2047 2048 void G1CMTask::giveup_current_region() { 2049 assert(_curr_region != NULL, "invariant"); 2050 clear_region_fields(); 2051 } 2052 2053 void G1CMTask::clear_region_fields() { 2054 // Values for these three fields that indicate that we're not 2055 // holding on to a region. 2056 _curr_region = NULL; 2057 _finger = NULL; 2058 _region_limit = NULL; 2059 } 2060 2061 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2062 if (cm_oop_closure == NULL) { 2063 assert(_cm_oop_closure != NULL, "invariant"); 2064 } else { 2065 assert(_cm_oop_closure == NULL, "invariant"); 2066 } 2067 _cm_oop_closure = cm_oop_closure; 2068 } 2069 2070 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) { 2071 guarantee(next_mark_bitmap != NULL, "invariant"); 2072 _next_mark_bitmap = next_mark_bitmap; 2073 clear_region_fields(); 2074 2075 _calls = 0; 2076 _elapsed_time_ms = 0.0; 2077 _termination_time_ms = 0.0; 2078 _termination_start_time_ms = 0.0; 2079 2080 _mark_stats_cache.reset(); 2081 } 2082 2083 bool G1CMTask::should_exit_termination() { 2084 regular_clock_call(); 2085 // This is called when we are in the termination protocol. We should 2086 // quit if, for some reason, this task wants to abort or the global 2087 // stack is not empty (this means that we can get work from it). 2088 return !_cm->mark_stack_empty() || has_aborted(); 2089 } 2090 2091 void G1CMTask::reached_limit() { 2092 assert(_words_scanned >= _words_scanned_limit || 2093 _refs_reached >= _refs_reached_limit , 2094 "shouldn't have been called otherwise"); 2095 regular_clock_call(); 2096 } 2097 2098 void G1CMTask::regular_clock_call() { 2099 if (has_aborted()) { 2100 return; 2101 } 2102 2103 // First, we need to recalculate the words scanned and refs reached 2104 // limits for the next clock call. 2105 recalculate_limits(); 2106 2107 // During the regular clock call we do the following 2108 2109 // (1) If an overflow has been flagged, then we abort. 2110 if (_cm->has_overflown()) { 2111 set_has_aborted(); 2112 return; 2113 } 2114 2115 // If we are not concurrent (i.e. we're doing remark) we don't need 2116 // to check anything else. The other steps are only needed during 2117 // the concurrent marking phase. 2118 if (!_cm->concurrent()) { 2119 return; 2120 } 2121 2122 // (2) If marking has been aborted for Full GC, then we also abort. 2123 if (_cm->has_aborted()) { 2124 set_has_aborted(); 2125 return; 2126 } 2127 2128 double curr_time_ms = os::elapsedVTime() * 1000.0; 2129 2130 // (4) We check whether we should yield. If we have to, then we abort. 2131 if (SuspendibleThreadSet::should_yield()) { 2132 // We should yield. To do this we abort the task. The caller is 2133 // responsible for yielding. 2134 set_has_aborted(); 2135 return; 2136 } 2137 2138 // (5) We check whether we've reached our time quota. If we have, 2139 // then we abort. 2140 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2141 if (elapsed_time_ms > _time_target_ms) { 2142 set_has_aborted(); 2143 _has_timed_out = true; 2144 return; 2145 } 2146 2147 // (6) Finally, we check whether there are enough completed STAB 2148 // buffers available for processing. If there are, we abort. 2149 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2150 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2151 // we do need to process SATB buffers, we'll abort and restart 2152 // the marking task to do so 2153 set_has_aborted(); 2154 return; 2155 } 2156 } 2157 2158 void G1CMTask::recalculate_limits() { 2159 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2160 _words_scanned_limit = _real_words_scanned_limit; 2161 2162 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2163 _refs_reached_limit = _real_refs_reached_limit; 2164 } 2165 2166 void G1CMTask::decrease_limits() { 2167 // This is called when we believe that we're going to do an infrequent 2168 // operation which will increase the per byte scanned cost (i.e. move 2169 // entries to/from the global stack). It basically tries to decrease the 2170 // scanning limit so that the clock is called earlier. 2171 2172 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4; 2173 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4; 2174 } 2175 2176 void G1CMTask::move_entries_to_global_stack() { 2177 // Local array where we'll store the entries that will be popped 2178 // from the local queue. 2179 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2180 2181 size_t n = 0; 2182 G1TaskQueueEntry task_entry; 2183 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) { 2184 buffer[n] = task_entry; 2185 ++n; 2186 } 2187 if (n < G1CMMarkStack::EntriesPerChunk) { 2188 buffer[n] = G1TaskQueueEntry(); 2189 } 2190 2191 if (n > 0) { 2192 if (!_cm->mark_stack_push(buffer)) { 2193 set_has_aborted(); 2194 } 2195 } 2196 2197 // This operation was quite expensive, so decrease the limits. 2198 decrease_limits(); 2199 } 2200 2201 bool G1CMTask::get_entries_from_global_stack() { 2202 // Local array where we'll store the entries that will be popped 2203 // from the global stack. 2204 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2205 2206 if (!_cm->mark_stack_pop(buffer)) { 2207 return false; 2208 } 2209 2210 // We did actually pop at least one entry. 2211 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) { 2212 G1TaskQueueEntry task_entry = buffer[i]; 2213 if (task_entry.is_null()) { 2214 break; 2215 } 2216 assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj())); 2217 bool success = _task_queue->push(task_entry); 2218 // We only call this when the local queue is empty or under a 2219 // given target limit. So, we do not expect this push to fail. 2220 assert(success, "invariant"); 2221 } 2222 2223 // This operation was quite expensive, so decrease the limits 2224 decrease_limits(); 2225 return true; 2226 } 2227 2228 void G1CMTask::drain_local_queue(bool partially) { 2229 if (has_aborted()) { 2230 return; 2231 } 2232 2233 // Decide what the target size is, depending whether we're going to 2234 // drain it partially (so that other tasks can steal if they run out 2235 // of things to do) or totally (at the very end). 2236 size_t target_size; 2237 if (partially) { 2238 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2239 } else { 2240 target_size = 0; 2241 } 2242 2243 if (_task_queue->size() > target_size) { 2244 G1TaskQueueEntry entry; 2245 bool ret = _task_queue->pop_local(entry); 2246 while (ret) { 2247 scan_task_entry(entry); 2248 if (_task_queue->size() <= target_size || has_aborted()) { 2249 ret = false; 2250 } else { 2251 ret = _task_queue->pop_local(entry); 2252 } 2253 } 2254 } 2255 } 2256 2257 void G1CMTask::drain_global_stack(bool partially) { 2258 if (has_aborted()) { 2259 return; 2260 } 2261 2262 // We have a policy to drain the local queue before we attempt to 2263 // drain the global stack. 2264 assert(partially || _task_queue->size() == 0, "invariant"); 2265 2266 // Decide what the target size is, depending whether we're going to 2267 // drain it partially (so that other tasks can steal if they run out 2268 // of things to do) or totally (at the very end). 2269 // Notice that when draining the global mark stack partially, due to the racyness 2270 // of the mark stack size update we might in fact drop below the target. But, 2271 // this is not a problem. 2272 // In case of total draining, we simply process until the global mark stack is 2273 // totally empty, disregarding the size counter. 2274 if (partially) { 2275 size_t const target_size = _cm->partial_mark_stack_size_target(); 2276 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2277 if (get_entries_from_global_stack()) { 2278 drain_local_queue(partially); 2279 } 2280 } 2281 } else { 2282 while (!has_aborted() && get_entries_from_global_stack()) { 2283 drain_local_queue(partially); 2284 } 2285 } 2286 } 2287 2288 // SATB Queue has several assumptions on whether to call the par or 2289 // non-par versions of the methods. this is why some of the code is 2290 // replicated. We should really get rid of the single-threaded version 2291 // of the code to simplify things. 2292 void G1CMTask::drain_satb_buffers() { 2293 if (has_aborted()) { 2294 return; 2295 } 2296 2297 // We set this so that the regular clock knows that we're in the 2298 // middle of draining buffers and doesn't set the abort flag when it 2299 // notices that SATB buffers are available for draining. It'd be 2300 // very counter productive if it did that. :-) 2301 _draining_satb_buffers = true; 2302 2303 G1CMSATBBufferClosure satb_cl(this, _g1h); 2304 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2305 2306 // This keeps claiming and applying the closure to completed buffers 2307 // until we run out of buffers or we need to abort. 2308 while (!has_aborted() && 2309 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2310 regular_clock_call(); 2311 } 2312 2313 _draining_satb_buffers = false; 2314 2315 assert(has_aborted() || 2316 _cm->concurrent() || 2317 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2318 2319 // again, this was a potentially expensive operation, decrease the 2320 // limits to get the regular clock call early 2321 decrease_limits(); 2322 } 2323 2324 void G1CMTask::clear_mark_stats_cache(uint region_idx) { 2325 _mark_stats_cache.reset(region_idx); 2326 } 2327 2328 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() { 2329 return _mark_stats_cache.evict_all(); 2330 } 2331 2332 void G1CMTask::print_stats() { 2333 log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls); 2334 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2335 _elapsed_time_ms, _termination_time_ms); 2336 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms", 2337 _step_times_ms.num(), 2338 _step_times_ms.avg(), 2339 _step_times_ms.sd(), 2340 _step_times_ms.maximum(), 2341 _step_times_ms.sum()); 2342 size_t const hits = _mark_stats_cache.hits(); 2343 size_t const misses = _mark_stats_cache.misses(); 2344 log_debug(gc, stats)(" Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f", 2345 hits, misses, percent_of(hits, hits + misses)); 2346 } 2347 2348 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry) { 2349 return _task_queues->steal(worker_id, hash_seed, task_entry); 2350 } 2351 2352 /***************************************************************************** 2353 2354 The do_marking_step(time_target_ms, ...) method is the building 2355 block of the parallel marking framework. It can be called in parallel 2356 with other invocations of do_marking_step() on different tasks 2357 (but only one per task, obviously) and concurrently with the 2358 mutator threads, or during remark, hence it eliminates the need 2359 for two versions of the code. When called during remark, it will 2360 pick up from where the task left off during the concurrent marking 2361 phase. Interestingly, tasks are also claimable during evacuation 2362 pauses too, since do_marking_step() ensures that it aborts before 2363 it needs to yield. 2364 2365 The data structures that it uses to do marking work are the 2366 following: 2367 2368 (1) Marking Bitmap. If there are gray objects that appear only 2369 on the bitmap (this happens either when dealing with an overflow 2370 or when the initial marking phase has simply marked the roots 2371 and didn't push them on the stack), then tasks claim heap 2372 regions whose bitmap they then scan to find gray objects. A 2373 global finger indicates where the end of the last claimed region 2374 is. A local finger indicates how far into the region a task has 2375 scanned. The two fingers are used to determine how to gray an 2376 object (i.e. whether simply marking it is OK, as it will be 2377 visited by a task in the future, or whether it needs to be also 2378 pushed on a stack). 2379 2380 (2) Local Queue. The local queue of the task which is accessed 2381 reasonably efficiently by the task. Other tasks can steal from 2382 it when they run out of work. Throughout the marking phase, a 2383 task attempts to keep its local queue short but not totally 2384 empty, so that entries are available for stealing by other 2385 tasks. Only when there is no more work, a task will totally 2386 drain its local queue. 2387 2388 (3) Global Mark Stack. This handles local queue overflow. During 2389 marking only sets of entries are moved between it and the local 2390 queues, as access to it requires a mutex and more fine-grain 2391 interaction with it which might cause contention. If it 2392 overflows, then the marking phase should restart and iterate 2393 over the bitmap to identify gray objects. Throughout the marking 2394 phase, tasks attempt to keep the global mark stack at a small 2395 length but not totally empty, so that entries are available for 2396 popping by other tasks. Only when there is no more work, tasks 2397 will totally drain the global mark stack. 2398 2399 (4) SATB Buffer Queue. This is where completed SATB buffers are 2400 made available. Buffers are regularly removed from this queue 2401 and scanned for roots, so that the queue doesn't get too 2402 long. During remark, all completed buffers are processed, as 2403 well as the filled in parts of any uncompleted buffers. 2404 2405 The do_marking_step() method tries to abort when the time target 2406 has been reached. There are a few other cases when the 2407 do_marking_step() method also aborts: 2408 2409 (1) When the marking phase has been aborted (after a Full GC). 2410 2411 (2) When a global overflow (on the global stack) has been 2412 triggered. Before the task aborts, it will actually sync up with 2413 the other tasks to ensure that all the marking data structures 2414 (local queues, stacks, fingers etc.) are re-initialized so that 2415 when do_marking_step() completes, the marking phase can 2416 immediately restart. 2417 2418 (3) When enough completed SATB buffers are available. The 2419 do_marking_step() method only tries to drain SATB buffers right 2420 at the beginning. So, if enough buffers are available, the 2421 marking step aborts and the SATB buffers are processed at 2422 the beginning of the next invocation. 2423 2424 (4) To yield. when we have to yield then we abort and yield 2425 right at the end of do_marking_step(). This saves us from a lot 2426 of hassle as, by yielding we might allow a Full GC. If this 2427 happens then objects will be compacted underneath our feet, the 2428 heap might shrink, etc. We save checking for this by just 2429 aborting and doing the yield right at the end. 2430 2431 From the above it follows that the do_marking_step() method should 2432 be called in a loop (or, otherwise, regularly) until it completes. 2433 2434 If a marking step completes without its has_aborted() flag being 2435 true, it means it has completed the current marking phase (and 2436 also all other marking tasks have done so and have all synced up). 2437 2438 A method called regular_clock_call() is invoked "regularly" (in 2439 sub ms intervals) throughout marking. It is this clock method that 2440 checks all the abort conditions which were mentioned above and 2441 decides when the task should abort. A work-based scheme is used to 2442 trigger this clock method: when the number of object words the 2443 marking phase has scanned or the number of references the marking 2444 phase has visited reach a given limit. Additional invocations to 2445 the method clock have been planted in a few other strategic places 2446 too. The initial reason for the clock method was to avoid calling 2447 vtime too regularly, as it is quite expensive. So, once it was in 2448 place, it was natural to piggy-back all the other conditions on it 2449 too and not constantly check them throughout the code. 2450 2451 If do_termination is true then do_marking_step will enter its 2452 termination protocol. 2453 2454 The value of is_serial must be true when do_marking_step is being 2455 called serially (i.e. by the VMThread) and do_marking_step should 2456 skip any synchronization in the termination and overflow code. 2457 Examples include the serial remark code and the serial reference 2458 processing closures. 2459 2460 The value of is_serial must be false when do_marking_step is 2461 being called by any of the worker threads in a work gang. 2462 Examples include the concurrent marking code (CMMarkingTask), 2463 the MT remark code, and the MT reference processing closures. 2464 2465 *****************************************************************************/ 2466 2467 void G1CMTask::do_marking_step(double time_target_ms, 2468 bool do_termination, 2469 bool is_serial) { 2470 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2471 2472 _start_time_ms = os::elapsedVTime() * 1000.0; 2473 2474 // If do_stealing is true then do_marking_step will attempt to 2475 // steal work from the other G1CMTasks. It only makes sense to 2476 // enable stealing when the termination protocol is enabled 2477 // and do_marking_step() is not being called serially. 2478 bool do_stealing = do_termination && !is_serial; 2479 2480 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2481 _time_target_ms = time_target_ms - diff_prediction_ms; 2482 2483 // set up the variables that are used in the work-based scheme to 2484 // call the regular clock method 2485 _words_scanned = 0; 2486 _refs_reached = 0; 2487 recalculate_limits(); 2488 2489 // clear all flags 2490 clear_has_aborted(); 2491 _has_timed_out = false; 2492 _draining_satb_buffers = false; 2493 2494 ++_calls; 2495 2496 // Set up the bitmap and oop closures. Anything that uses them is 2497 // eventually called from this method, so it is OK to allocate these 2498 // statically. 2499 G1CMBitMapClosure bitmap_closure(this, _cm); 2500 G1CMOopClosure cm_oop_closure(_g1h, this); 2501 set_cm_oop_closure(&cm_oop_closure); 2502 2503 if (_cm->has_overflown()) { 2504 // This can happen if the mark stack overflows during a GC pause 2505 // and this task, after a yield point, restarts. We have to abort 2506 // as we need to get into the overflow protocol which happens 2507 // right at the end of this task. 2508 set_has_aborted(); 2509 } 2510 2511 // First drain any available SATB buffers. After this, we will not 2512 // look at SATB buffers before the next invocation of this method. 2513 // If enough completed SATB buffers are queued up, the regular clock 2514 // will abort this task so that it restarts. 2515 drain_satb_buffers(); 2516 // ...then partially drain the local queue and the global stack 2517 drain_local_queue(true); 2518 drain_global_stack(true); 2519 2520 do { 2521 if (!has_aborted() && _curr_region != NULL) { 2522 // This means that we're already holding on to a region. 2523 assert(_finger != NULL, "if region is not NULL, then the finger " 2524 "should not be NULL either"); 2525 2526 // We might have restarted this task after an evacuation pause 2527 // which might have evacuated the region we're holding on to 2528 // underneath our feet. Let's read its limit again to make sure 2529 // that we do not iterate over a region of the heap that 2530 // contains garbage (update_region_limit() will also move 2531 // _finger to the start of the region if it is found empty). 2532 update_region_limit(); 2533 // We will start from _finger not from the start of the region, 2534 // as we might be restarting this task after aborting half-way 2535 // through scanning this region. In this case, _finger points to 2536 // the address where we last found a marked object. If this is a 2537 // fresh region, _finger points to start(). 2538 MemRegion mr = MemRegion(_finger, _region_limit); 2539 2540 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2541 "humongous regions should go around loop once only"); 2542 2543 // Some special cases: 2544 // If the memory region is empty, we can just give up the region. 2545 // If the current region is humongous then we only need to check 2546 // the bitmap for the bit associated with the start of the object, 2547 // scan the object if it's live, and give up the region. 2548 // Otherwise, let's iterate over the bitmap of the part of the region 2549 // that is left. 2550 // If the iteration is successful, give up the region. 2551 if (mr.is_empty()) { 2552 giveup_current_region(); 2553 regular_clock_call(); 2554 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2555 if (_next_mark_bitmap->is_marked(mr.start())) { 2556 // The object is marked - apply the closure 2557 bitmap_closure.do_addr(mr.start()); 2558 } 2559 // Even if this task aborted while scanning the humongous object 2560 // we can (and should) give up the current region. 2561 giveup_current_region(); 2562 regular_clock_call(); 2563 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) { 2564 giveup_current_region(); 2565 regular_clock_call(); 2566 } else { 2567 assert(has_aborted(), "currently the only way to do so"); 2568 // The only way to abort the bitmap iteration is to return 2569 // false from the do_bit() method. However, inside the 2570 // do_bit() method we move the _finger to point to the 2571 // object currently being looked at. So, if we bail out, we 2572 // have definitely set _finger to something non-null. 2573 assert(_finger != NULL, "invariant"); 2574 2575 // Region iteration was actually aborted. So now _finger 2576 // points to the address of the object we last scanned. If we 2577 // leave it there, when we restart this task, we will rescan 2578 // the object. It is easy to avoid this. We move the finger by 2579 // enough to point to the next possible object header. 2580 assert(_finger < _region_limit, "invariant"); 2581 HeapWord* const new_finger = _finger + ((oop)_finger)->size(); 2582 // Check if bitmap iteration was aborted while scanning the last object 2583 if (new_finger >= _region_limit) { 2584 giveup_current_region(); 2585 } else { 2586 move_finger_to(new_finger); 2587 } 2588 } 2589 } 2590 // At this point we have either completed iterating over the 2591 // region we were holding on to, or we have aborted. 2592 2593 // We then partially drain the local queue and the global stack. 2594 // (Do we really need this?) 2595 drain_local_queue(true); 2596 drain_global_stack(true); 2597 2598 // Read the note on the claim_region() method on why it might 2599 // return NULL with potentially more regions available for 2600 // claiming and why we have to check out_of_regions() to determine 2601 // whether we're done or not. 2602 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2603 // We are going to try to claim a new region. We should have 2604 // given up on the previous one. 2605 // Separated the asserts so that we know which one fires. 2606 assert(_curr_region == NULL, "invariant"); 2607 assert(_finger == NULL, "invariant"); 2608 assert(_region_limit == NULL, "invariant"); 2609 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2610 if (claimed_region != NULL) { 2611 // Yes, we managed to claim one 2612 setup_for_region(claimed_region); 2613 assert(_curr_region == claimed_region, "invariant"); 2614 } 2615 // It is important to call the regular clock here. It might take 2616 // a while to claim a region if, for example, we hit a large 2617 // block of empty regions. So we need to call the regular clock 2618 // method once round the loop to make sure it's called 2619 // frequently enough. 2620 regular_clock_call(); 2621 } 2622 2623 if (!has_aborted() && _curr_region == NULL) { 2624 assert(_cm->out_of_regions(), 2625 "at this point we should be out of regions"); 2626 } 2627 } while ( _curr_region != NULL && !has_aborted()); 2628 2629 if (!has_aborted()) { 2630 // We cannot check whether the global stack is empty, since other 2631 // tasks might be pushing objects to it concurrently. 2632 assert(_cm->out_of_regions(), 2633 "at this point we should be out of regions"); 2634 // Try to reduce the number of available SATB buffers so that 2635 // remark has less work to do. 2636 drain_satb_buffers(); 2637 } 2638 2639 // Since we've done everything else, we can now totally drain the 2640 // local queue and global stack. 2641 drain_local_queue(false); 2642 drain_global_stack(false); 2643 2644 // Attempt at work stealing from other task's queues. 2645 if (do_stealing && !has_aborted()) { 2646 // We have not aborted. This means that we have finished all that 2647 // we could. Let's try to do some stealing... 2648 2649 // We cannot check whether the global stack is empty, since other 2650 // tasks might be pushing objects to it concurrently. 2651 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2652 "only way to reach here"); 2653 while (!has_aborted()) { 2654 G1TaskQueueEntry entry; 2655 if (_cm->try_stealing(_worker_id, &_hash_seed, entry)) { 2656 scan_task_entry(entry); 2657 2658 // And since we're towards the end, let's totally drain the 2659 // local queue and global stack. 2660 drain_local_queue(false); 2661 drain_global_stack(false); 2662 } else { 2663 break; 2664 } 2665 } 2666 } 2667 2668 // We still haven't aborted. Now, let's try to get into the 2669 // termination protocol. 2670 if (do_termination && !has_aborted()) { 2671 // We cannot check whether the global stack is empty, since other 2672 // tasks might be concurrently pushing objects on it. 2673 // Separated the asserts so that we know which one fires. 2674 assert(_cm->out_of_regions(), "only way to reach here"); 2675 assert(_task_queue->size() == 0, "only way to reach here"); 2676 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2677 2678 // The G1CMTask class also extends the TerminatorTerminator class, 2679 // hence its should_exit_termination() method will also decide 2680 // whether to exit the termination protocol or not. 2681 bool finished = (is_serial || 2682 _cm->terminator()->offer_termination(this)); 2683 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2684 _termination_time_ms += 2685 termination_end_time_ms - _termination_start_time_ms; 2686 2687 if (finished) { 2688 // We're all done. 2689 2690 // We can now guarantee that the global stack is empty, since 2691 // all other tasks have finished. We separated the guarantees so 2692 // that, if a condition is false, we can immediately find out 2693 // which one. 2694 guarantee(_cm->out_of_regions(), "only way to reach here"); 2695 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2696 guarantee(_task_queue->size() == 0, "only way to reach here"); 2697 guarantee(!_cm->has_overflown(), "only way to reach here"); 2698 } else { 2699 // Apparently there's more work to do. Let's abort this task. It 2700 // will restart it and we can hopefully find more things to do. 2701 set_has_aborted(); 2702 } 2703 } 2704 2705 // Mainly for debugging purposes to make sure that a pointer to the 2706 // closure which was statically allocated in this frame doesn't 2707 // escape it by accident. 2708 set_cm_oop_closure(NULL); 2709 double end_time_ms = os::elapsedVTime() * 1000.0; 2710 double elapsed_time_ms = end_time_ms - _start_time_ms; 2711 // Update the step history. 2712 _step_times_ms.add(elapsed_time_ms); 2713 2714 if (has_aborted()) { 2715 // The task was aborted for some reason. 2716 if (_has_timed_out) { 2717 double diff_ms = elapsed_time_ms - _time_target_ms; 2718 // Keep statistics of how well we did with respect to hitting 2719 // our target only if we actually timed out (if we aborted for 2720 // other reasons, then the results might get skewed). 2721 _marking_step_diffs_ms.add(diff_ms); 2722 } 2723 2724 if (_cm->has_overflown()) { 2725 // This is the interesting one. We aborted because a global 2726 // overflow was raised. This means we have to restart the 2727 // marking phase and start iterating over regions. However, in 2728 // order to do this we have to make sure that all tasks stop 2729 // what they are doing and re-initialize in a safe manner. We 2730 // will achieve this with the use of two barrier sync points. 2731 2732 if (!is_serial) { 2733 // We only need to enter the sync barrier if being called 2734 // from a parallel context 2735 _cm->enter_first_sync_barrier(_worker_id); 2736 2737 // When we exit this sync barrier we know that all tasks have 2738 // stopped doing marking work. So, it's now safe to 2739 // re-initialize our data structures. 2740 } 2741 2742 clear_region_fields(); 2743 flush_mark_stats_cache(); 2744 2745 if (!is_serial) { 2746 // If we're executing the concurrent phase of marking, reset the marking 2747 // state; otherwise the marking state is reset after reference processing, 2748 // during the remark pause. 2749 // If we reset here as a result of an overflow during the remark we will 2750 // see assertion failures from any subsequent set_concurrency_and_phase() 2751 // calls. 2752 if (_cm->concurrent() && _worker_id == 0) { 2753 // Worker 0 is responsible for clearing the global data structures because 2754 // of an overflow. During STW we should not clear the overflow flag (in 2755 // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit 2756 // method to abort the pause and restart concurrent marking. 2757 _cm->reset_marking_for_restart(); 2758 2759 log_info(gc, marking)("Concurrent Mark reset for overflow"); 2760 } 2761 2762 // ...and enter the second barrier. 2763 _cm->enter_second_sync_barrier(_worker_id); 2764 } 2765 // At this point, if we're during the concurrent phase of 2766 // marking, everything has been re-initialized and we're 2767 // ready to restart. 2768 } 2769 } 2770 } 2771 2772 G1CMTask::G1CMTask(uint worker_id, 2773 G1ConcurrentMark* cm, 2774 G1CMTaskQueue* task_queue, 2775 G1RegionMarkStats* mark_stats, 2776 uint max_regions) : 2777 _objArray_processor(this), 2778 _worker_id(worker_id), 2779 _g1h(G1CollectedHeap::heap()), 2780 _cm(cm), 2781 _next_mark_bitmap(NULL), 2782 _task_queue(task_queue), 2783 _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize), 2784 _calls(0), 2785 _time_target_ms(0.0), 2786 _start_time_ms(0.0), 2787 _cm_oop_closure(NULL), 2788 _curr_region(NULL), 2789 _finger(NULL), 2790 _region_limit(NULL), 2791 _words_scanned(0), 2792 _words_scanned_limit(0), 2793 _real_words_scanned_limit(0), 2794 _refs_reached(0), 2795 _refs_reached_limit(0), 2796 _real_refs_reached_limit(0), 2797 _hash_seed(17), 2798 _has_aborted(false), 2799 _has_timed_out(false), 2800 _draining_satb_buffers(false), 2801 _step_times_ms(), 2802 _elapsed_time_ms(0.0), 2803 _termination_time_ms(0.0), 2804 _termination_start_time_ms(0.0), 2805 _marking_step_diffs_ms() 2806 { 2807 guarantee(task_queue != NULL, "invariant"); 2808 2809 _marking_step_diffs_ms.add(0.5); 2810 } 2811 2812 // These are formatting macros that are used below to ensure 2813 // consistent formatting. The *_H_* versions are used to format the 2814 // header for a particular value and they should be kept consistent 2815 // with the corresponding macro. Also note that most of the macros add 2816 // the necessary white space (as a prefix) which makes them a bit 2817 // easier to compose. 2818 2819 // All the output lines are prefixed with this string to be able to 2820 // identify them easily in a large log file. 2821 #define G1PPRL_LINE_PREFIX "###" 2822 2823 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2824 #ifdef _LP64 2825 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2826 #else // _LP64 2827 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2828 #endif // _LP64 2829 2830 // For per-region info 2831 #define G1PPRL_TYPE_FORMAT " %-4s" 2832 #define G1PPRL_TYPE_H_FORMAT " %4s" 2833 #define G1PPRL_STATE_FORMAT " %-5s" 2834 #define G1PPRL_STATE_H_FORMAT " %5s" 2835 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2836 #define G1PPRL_BYTE_H_FORMAT " %9s" 2837 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2838 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2839 2840 // For summary info 2841 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2842 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2843 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2844 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2845 2846 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) : 2847 _total_used_bytes(0), _total_capacity_bytes(0), 2848 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2849 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) 2850 { 2851 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2852 MemRegion g1_reserved = g1h->g1_reserved(); 2853 double now = os::elapsedTime(); 2854 2855 // Print the header of the output. 2856 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2857 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2858 G1PPRL_SUM_ADDR_FORMAT("reserved") 2859 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2860 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 2861 HeapRegion::GrainBytes); 2862 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2863 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2864 G1PPRL_TYPE_H_FORMAT 2865 G1PPRL_ADDR_BASE_H_FORMAT 2866 G1PPRL_BYTE_H_FORMAT 2867 G1PPRL_BYTE_H_FORMAT 2868 G1PPRL_BYTE_H_FORMAT 2869 G1PPRL_DOUBLE_H_FORMAT 2870 G1PPRL_BYTE_H_FORMAT 2871 G1PPRL_STATE_H_FORMAT 2872 G1PPRL_BYTE_H_FORMAT, 2873 "type", "address-range", 2874 "used", "prev-live", "next-live", "gc-eff", 2875 "remset", "state", "code-roots"); 2876 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2877 G1PPRL_TYPE_H_FORMAT 2878 G1PPRL_ADDR_BASE_H_FORMAT 2879 G1PPRL_BYTE_H_FORMAT 2880 G1PPRL_BYTE_H_FORMAT 2881 G1PPRL_BYTE_H_FORMAT 2882 G1PPRL_DOUBLE_H_FORMAT 2883 G1PPRL_BYTE_H_FORMAT 2884 G1PPRL_STATE_H_FORMAT 2885 G1PPRL_BYTE_H_FORMAT, 2886 "", "", 2887 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 2888 "(bytes)", "", "(bytes)"); 2889 } 2890 2891 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) { 2892 const char* type = r->get_type_str(); 2893 HeapWord* bottom = r->bottom(); 2894 HeapWord* end = r->end(); 2895 size_t capacity_bytes = r->capacity(); 2896 size_t used_bytes = r->used(); 2897 size_t prev_live_bytes = r->live_bytes(); 2898 size_t next_live_bytes = r->next_live_bytes(); 2899 double gc_eff = r->gc_efficiency(); 2900 size_t remset_bytes = r->rem_set()->mem_size(); 2901 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 2902 const char* remset_type = r->rem_set()->get_short_state_str(); 2903 2904 _total_used_bytes += used_bytes; 2905 _total_capacity_bytes += capacity_bytes; 2906 _total_prev_live_bytes += prev_live_bytes; 2907 _total_next_live_bytes += next_live_bytes; 2908 _total_remset_bytes += remset_bytes; 2909 _total_strong_code_roots_bytes += strong_code_roots_bytes; 2910 2911 // Print a line for this particular region. 2912 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2913 G1PPRL_TYPE_FORMAT 2914 G1PPRL_ADDR_BASE_FORMAT 2915 G1PPRL_BYTE_FORMAT 2916 G1PPRL_BYTE_FORMAT 2917 G1PPRL_BYTE_FORMAT 2918 G1PPRL_DOUBLE_FORMAT 2919 G1PPRL_BYTE_FORMAT 2920 G1PPRL_STATE_FORMAT 2921 G1PPRL_BYTE_FORMAT, 2922 type, p2i(bottom), p2i(end), 2923 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 2924 remset_bytes, remset_type, strong_code_roots_bytes); 2925 2926 return false; 2927 } 2928 2929 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 2930 // add static memory usages to remembered set sizes 2931 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 2932 // Print the footer of the output. 2933 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2934 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2935 " SUMMARY" 2936 G1PPRL_SUM_MB_FORMAT("capacity") 2937 G1PPRL_SUM_MB_PERC_FORMAT("used") 2938 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 2939 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 2940 G1PPRL_SUM_MB_FORMAT("remset") 2941 G1PPRL_SUM_MB_FORMAT("code-roots"), 2942 bytes_to_mb(_total_capacity_bytes), 2943 bytes_to_mb(_total_used_bytes), 2944 percent_of(_total_used_bytes, _total_capacity_bytes), 2945 bytes_to_mb(_total_prev_live_bytes), 2946 percent_of(_total_prev_live_bytes, _total_capacity_bytes), 2947 bytes_to_mb(_total_next_live_bytes), 2948 percent_of(_total_next_live_bytes, _total_capacity_bytes), 2949 bytes_to_mb(_total_remset_bytes), 2950 bytes_to_mb(_total_strong_code_roots_bytes)); 2951 }