1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMarkThread.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorState.hpp" 32 #include "gc/g1/g1ConcurrentMark.inline.hpp" 33 #include "gc/g1/g1HeapVerifier.hpp" 34 #include "gc/g1/g1OopClosures.inline.hpp" 35 #include "gc/g1/g1CardLiveData.inline.hpp" 36 #include "gc/g1/g1Policy.hpp" 37 #include "gc/g1/g1StringDedup.hpp" 38 #include "gc/g1/heapRegion.inline.hpp" 39 #include "gc/g1/heapRegionRemSet.hpp" 40 #include "gc/g1/heapRegionSet.inline.hpp" 41 #include "gc/shared/adaptiveSizePolicy.hpp" 42 #include "gc/shared/gcId.hpp" 43 #include "gc/shared/gcTimer.hpp" 44 #include "gc/shared/gcTrace.hpp" 45 #include "gc/shared/gcTraceTime.inline.hpp" 46 #include "gc/shared/genOopClosures.inline.hpp" 47 #include "gc/shared/referencePolicy.hpp" 48 #include "gc/shared/strongRootsScope.hpp" 49 #include "gc/shared/suspendibleThreadSet.hpp" 50 #include "gc/shared/taskqueue.inline.hpp" 51 #include "gc/shared/vmGCOperations.hpp" 52 #include "gc/shared/weakProcessor.hpp" 53 #include "logging/log.hpp" 54 #include "memory/allocation.hpp" 55 #include "memory/resourceArea.hpp" 56 #include "oops/oop.inline.hpp" 57 #include "runtime/atomic.hpp" 58 #include "runtime/handles.inline.hpp" 59 #include "runtime/java.hpp" 60 #include "runtime/prefetch.inline.hpp" 61 #include "services/memTracker.hpp" 62 #include "utilities/align.hpp" 63 #include "utilities/growableArray.hpp" 64 65 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) { 66 assert(addr < _cm->finger(), "invariant"); 67 assert(addr >= _task->finger(), "invariant"); 68 69 // We move that task's local finger along. 70 _task->move_finger_to(addr); 71 72 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); 73 // we only partially drain the local queue and global stack 74 _task->drain_local_queue(true); 75 _task->drain_global_stack(true); 76 77 // if the has_aborted flag has been raised, we need to bail out of 78 // the iteration 79 return !_task->has_aborted(); 80 } 81 82 G1CMMarkStack::G1CMMarkStack() : 83 _max_chunk_capacity(0), 84 _base(NULL), 85 _chunk_capacity(0) { 86 set_empty(); 87 } 88 89 bool G1CMMarkStack::resize(size_t new_capacity) { 90 assert(is_empty(), "Only resize when stack is empty."); 91 assert(new_capacity <= _max_chunk_capacity, 92 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); 93 94 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC); 95 96 if (new_base == NULL) { 97 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); 98 return false; 99 } 100 // Release old mapping. 101 if (_base != NULL) { 102 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 103 } 104 105 _base = new_base; 106 _chunk_capacity = new_capacity; 107 set_empty(); 108 109 return true; 110 } 111 112 size_t G1CMMarkStack::capacity_alignment() { 113 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); 114 } 115 116 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { 117 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); 118 119 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); 120 121 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 122 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 123 124 guarantee(initial_chunk_capacity <= _max_chunk_capacity, 125 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, 126 _max_chunk_capacity, 127 initial_chunk_capacity); 128 129 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, 130 initial_chunk_capacity, _max_chunk_capacity); 131 132 return resize(initial_chunk_capacity); 133 } 134 135 void G1CMMarkStack::expand() { 136 if (_chunk_capacity == _max_chunk_capacity) { 137 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); 138 return; 139 } 140 size_t old_capacity = _chunk_capacity; 141 // Double capacity if possible 142 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); 143 144 if (resize(new_capacity)) { 145 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 146 old_capacity, new_capacity); 147 } else { 148 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 149 old_capacity, new_capacity); 150 } 151 } 152 153 G1CMMarkStack::~G1CMMarkStack() { 154 if (_base != NULL) { 155 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 156 } 157 } 158 159 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { 160 elem->next = *list; 161 *list = elem; 162 } 163 164 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { 165 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 166 add_chunk_to_list(&_chunk_list, elem); 167 _chunks_in_chunk_list++; 168 } 169 170 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { 171 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 172 add_chunk_to_list(&_free_list, elem); 173 } 174 175 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { 176 TaskQueueEntryChunk* result = *list; 177 if (result != NULL) { 178 *list = (*list)->next; 179 } 180 return result; 181 } 182 183 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { 184 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 185 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); 186 if (result != NULL) { 187 _chunks_in_chunk_list--; 188 } 189 return result; 190 } 191 192 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { 193 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 194 return remove_chunk_from_list(&_free_list); 195 } 196 197 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { 198 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. 199 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding 200 // wraparound of _hwm. 201 if (_hwm >= _chunk_capacity) { 202 return NULL; 203 } 204 205 size_t cur_idx = Atomic::add(1u, &_hwm) - 1; 206 if (cur_idx >= _chunk_capacity) { 207 return NULL; 208 } 209 210 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; 211 result->next = NULL; 212 return result; 213 } 214 215 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { 216 // Get a new chunk. 217 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); 218 219 if (new_chunk == NULL) { 220 // Did not get a chunk from the free list. Allocate from backing memory. 221 new_chunk = allocate_new_chunk(); 222 223 if (new_chunk == NULL) { 224 return false; 225 } 226 } 227 228 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 229 230 add_chunk_to_chunk_list(new_chunk); 231 232 return true; 233 } 234 235 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { 236 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); 237 238 if (cur == NULL) { 239 return false; 240 } 241 242 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 243 244 add_chunk_to_free_list(cur); 245 return true; 246 } 247 248 void G1CMMarkStack::set_empty() { 249 _chunks_in_chunk_list = 0; 250 _hwm = 0; 251 _chunk_list = NULL; 252 _free_list = NULL; 253 } 254 255 G1CMRootRegions::G1CMRootRegions() : 256 _cm(NULL), _scan_in_progress(false), 257 _should_abort(false), _claimed_survivor_index(0) { } 258 259 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) { 260 _survivors = survivors; 261 _cm = cm; 262 } 263 264 void G1CMRootRegions::prepare_for_scan() { 265 assert(!scan_in_progress(), "pre-condition"); 266 267 // Currently, only survivors can be root regions. 268 _claimed_survivor_index = 0; 269 _scan_in_progress = _survivors->regions()->is_nonempty(); 270 _should_abort = false; 271 } 272 273 HeapRegion* G1CMRootRegions::claim_next() { 274 if (_should_abort) { 275 // If someone has set the should_abort flag, we return NULL to 276 // force the caller to bail out of their loop. 277 return NULL; 278 } 279 280 // Currently, only survivors can be root regions. 281 const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions(); 282 283 int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1; 284 if (claimed_index < survivor_regions->length()) { 285 return survivor_regions->at(claimed_index); 286 } 287 return NULL; 288 } 289 290 uint G1CMRootRegions::num_root_regions() const { 291 return (uint)_survivors->regions()->length(); 292 } 293 294 void G1CMRootRegions::notify_scan_done() { 295 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 296 _scan_in_progress = false; 297 RootRegionScan_lock->notify_all(); 298 } 299 300 void G1CMRootRegions::cancel_scan() { 301 notify_scan_done(); 302 } 303 304 void G1CMRootRegions::scan_finished() { 305 assert(scan_in_progress(), "pre-condition"); 306 307 // Currently, only survivors can be root regions. 308 if (!_should_abort) { 309 assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index); 310 assert((uint)_claimed_survivor_index >= _survivors->length(), 311 "we should have claimed all survivors, claimed index = %u, length = %u", 312 (uint)_claimed_survivor_index, _survivors->length()); 313 } 314 315 notify_scan_done(); 316 } 317 318 bool G1CMRootRegions::wait_until_scan_finished() { 319 if (!scan_in_progress()) return false; 320 321 { 322 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 323 while (scan_in_progress()) { 324 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 325 } 326 } 327 return true; 328 } 329 330 // Returns the maximum number of workers to be used in a concurrent 331 // phase based on the number of GC workers being used in a STW 332 // phase. 333 static uint scale_concurrent_worker_threads(uint num_gc_workers) { 334 return MAX2((num_gc_workers + 2) / 4, 1U); 335 } 336 337 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, 338 G1RegionToSpaceMapper* prev_bitmap_storage, 339 G1RegionToSpaceMapper* next_bitmap_storage) : 340 // _cm_thread set inside the constructor 341 _g1h(g1h), 342 _completed_initialization(false), 343 344 _cleanup_list("Concurrent Mark Cleanup List"), 345 _mark_bitmap_1(), 346 _mark_bitmap_2(), 347 _prev_mark_bitmap(&_mark_bitmap_1), 348 _next_mark_bitmap(&_mark_bitmap_2), 349 350 _heap_start(_g1h->reserved_region().start()), 351 _heap_end(_g1h->reserved_region().end()), 352 353 _root_regions(), 354 355 _global_mark_stack(), 356 357 // _finger set in set_non_marking_state 358 359 _max_num_tasks(ParallelGCThreads), 360 // _num_active_tasks set in set_non_marking_state() 361 // _tasks set inside the constructor 362 363 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)), 364 _terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)), 365 366 _first_overflow_barrier_sync(), 367 _second_overflow_barrier_sync(), 368 369 _has_overflown(false), 370 _concurrent(false), 371 _has_aborted(false), 372 _restart_for_overflow(false), 373 _concurrent_marking_in_progress(false), 374 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 375 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 376 377 // _verbose_level set below 378 379 _init_times(), 380 _remark_times(), 381 _remark_mark_times(), 382 _remark_weak_ref_times(), 383 _cleanup_times(), 384 _total_counting_time(0.0), 385 _total_rs_scrub_time(0.0), 386 387 _accum_task_vtime(NULL), 388 389 _concurrent_workers(NULL), 390 _num_concurrent_workers(0), 391 _max_concurrent_workers(0) 392 { 393 _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage); 394 _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage); 395 396 // Create & start ConcurrentMark thread. 397 _cm_thread = new ConcurrentMarkThread(this); 398 if (_cm_thread->osthread() == NULL) { 399 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 400 } 401 402 assert(CGC_lock != NULL, "CGC_lock must be initialized"); 403 404 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 405 satb_qs.set_buffer_size(G1SATBBufferSize); 406 407 _root_regions.init(_g1h->survivor(), this); 408 409 if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) { 410 // Calculate the number of concurrent worker threads by scaling 411 // the number of parallel GC threads. 412 uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads); 413 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 414 } 415 416 assert(ConcGCThreads > 0, "ConcGCThreads have been set."); 417 if (ConcGCThreads > ParallelGCThreads) { 418 log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).", 419 ConcGCThreads, ParallelGCThreads); 420 return; 421 } 422 423 log_debug(gc)("ConcGCThreads: %u", ConcGCThreads); 424 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 425 426 _num_concurrent_workers = ConcGCThreads; 427 _max_concurrent_workers = _num_concurrent_workers; 428 429 _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true); 430 _concurrent_workers->initialize_workers(); 431 432 if (FLAG_IS_DEFAULT(MarkStackSize)) { 433 size_t mark_stack_size = 434 MIN2(MarkStackSizeMax, 435 MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE))); 436 // Verify that the calculated value for MarkStackSize is in range. 437 // It would be nice to use the private utility routine from Arguments. 438 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 439 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 440 "must be between 1 and " SIZE_FORMAT, 441 mark_stack_size, MarkStackSizeMax); 442 return; 443 } 444 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 445 } else { 446 // Verify MarkStackSize is in range. 447 if (FLAG_IS_CMDLINE(MarkStackSize)) { 448 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 449 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 450 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 451 "must be between 1 and " SIZE_FORMAT, 452 MarkStackSize, MarkStackSizeMax); 453 return; 454 } 455 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 456 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 457 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 458 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 459 MarkStackSize, MarkStackSizeMax); 460 return; 461 } 462 } 463 } 464 } 465 466 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { 467 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 468 } 469 470 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC); 471 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC); 472 473 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 474 _num_active_tasks = _max_num_tasks; 475 476 for (uint i = 0; i < _max_num_tasks; ++i) { 477 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 478 task_queue->initialize(); 479 _task_queues->register_queue(i, task_queue); 480 481 _tasks[i] = new G1CMTask(i, this, task_queue); 482 483 _accum_task_vtime[i] = 0.0; 484 } 485 486 set_non_marking_state(); 487 _completed_initialization = true; 488 } 489 490 void G1ConcurrentMark::reset() { 491 // Starting values for these two. This should be called in a STW 492 // phase. 493 MemRegion reserved = _g1h->g1_reserved(); 494 _heap_start = reserved.start(); 495 _heap_end = reserved.end(); 496 497 // Separated the asserts so that we know which one fires. 498 assert(_heap_start != NULL, "heap bounds should look ok"); 499 assert(_heap_end != NULL, "heap bounds should look ok"); 500 assert(_heap_start < _heap_end, "heap bounds should look ok"); 501 502 // Reset all the marking data structures and any necessary flags 503 reset_marking_state(); 504 505 // We reset all of them, since different phases will use 506 // different number of active threads. So, it's easiest to have all 507 // of them ready. 508 for (uint i = 0; i < _max_num_tasks; ++i) { 509 _tasks[i]->reset(_next_mark_bitmap); 510 } 511 512 // we need this to make sure that the flag is on during the evac 513 // pause with initial mark piggy-backed 514 set_concurrent_marking_in_progress(); 515 } 516 517 518 void G1ConcurrentMark::reset_marking_state() { 519 _global_mark_stack.set_empty(); 520 521 // Expand the marking stack, if we have to and if we can. 522 if (has_overflown()) { 523 _global_mark_stack.expand(); 524 } 525 526 clear_has_overflown(); 527 _finger = _heap_start; 528 529 for (uint i = 0; i < _max_num_tasks; ++i) { 530 G1CMTaskQueue* queue = _task_queues->queue(i); 531 queue->set_empty(); 532 } 533 } 534 535 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 536 assert(active_tasks <= _max_num_tasks, "we should not have more"); 537 538 _num_active_tasks = active_tasks; 539 // Need to update the three data structures below according to the 540 // number of active threads for this phase. 541 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 542 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 543 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 544 } 545 546 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 547 set_concurrency(active_tasks); 548 549 _concurrent = concurrent; 550 // We propagate this to all tasks, not just the active ones. 551 for (uint i = 0; i < _max_num_tasks; ++i) { 552 _tasks[i]->set_concurrent(concurrent); 553 } 554 555 if (concurrent) { 556 set_concurrent_marking_in_progress(); 557 } else { 558 // We currently assume that the concurrent flag has been set to 559 // false before we start remark. At this point we should also be 560 // in a STW phase. 561 assert(!concurrent_marking_in_progress(), "invariant"); 562 assert(out_of_regions(), 563 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 564 p2i(_finger), p2i(_heap_end)); 565 } 566 } 567 568 void G1ConcurrentMark::set_non_marking_state() { 569 // We set the global marking state to some default values when we're 570 // not doing marking. 571 reset_marking_state(); 572 _num_active_tasks = 0; 573 clear_concurrent_marking_in_progress(); 574 } 575 576 G1ConcurrentMark::~G1ConcurrentMark() { 577 // The G1ConcurrentMark instance is never freed. 578 ShouldNotReachHere(); 579 } 580 581 class G1ClearBitMapTask : public AbstractGangTask { 582 public: 583 static size_t chunk_size() { return M; } 584 585 private: 586 // Heap region closure used for clearing the given mark bitmap. 587 class G1ClearBitmapHRClosure : public HeapRegionClosure { 588 private: 589 G1CMBitMap* _bitmap; 590 G1ConcurrentMark* _cm; 591 public: 592 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) { 593 } 594 595 virtual bool do_heap_region(HeapRegion* r) { 596 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 597 598 HeapWord* cur = r->bottom(); 599 HeapWord* const end = r->end(); 600 601 while (cur < end) { 602 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 603 _bitmap->clear_range(mr); 604 605 cur += chunk_size_in_words; 606 607 // Abort iteration if after yielding the marking has been aborted. 608 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 609 return true; 610 } 611 // Repeat the asserts from before the start of the closure. We will do them 612 // as asserts here to minimize their overhead on the product. However, we 613 // will have them as guarantees at the beginning / end of the bitmap 614 // clearing to get some checking in the product. 615 assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant"); 616 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 617 } 618 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 619 620 return false; 621 } 622 }; 623 624 G1ClearBitmapHRClosure _cl; 625 HeapRegionClaimer _hr_claimer; 626 bool _suspendible; // If the task is suspendible, workers must join the STS. 627 628 public: 629 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 630 AbstractGangTask("G1 Clear Bitmap"), 631 _cl(bitmap, suspendible ? cm : NULL), 632 _hr_claimer(n_workers), 633 _suspendible(suspendible) 634 { } 635 636 void work(uint worker_id) { 637 SuspendibleThreadSetJoiner sts_join(_suspendible); 638 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id); 639 } 640 641 bool is_complete() { 642 return _cl.is_complete(); 643 } 644 }; 645 646 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 647 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 648 649 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 650 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 651 652 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 653 654 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 655 656 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 657 workers->run_task(&cl, num_workers); 658 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 659 } 660 661 void G1ConcurrentMark::cleanup_for_next_mark() { 662 // Make sure that the concurrent mark thread looks to still be in 663 // the current cycle. 664 guarantee(cm_thread()->during_cycle(), "invariant"); 665 666 // We are finishing up the current cycle by clearing the next 667 // marking bitmap and getting it ready for the next cycle. During 668 // this time no other cycle can start. So, let's make sure that this 669 // is the case. 670 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 671 672 clear_bitmap(_next_mark_bitmap, _concurrent_workers, true); 673 674 // Clear the live count data. If the marking has been aborted, the abort() 675 // call already did that. 676 if (!has_aborted()) { 677 clear_live_data(_concurrent_workers); 678 DEBUG_ONLY(verify_live_data_clear()); 679 } 680 681 // Repeat the asserts from above. 682 guarantee(cm_thread()->during_cycle(), "invariant"); 683 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 684 } 685 686 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 687 assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint."); 688 clear_bitmap(_prev_mark_bitmap, workers, false); 689 } 690 691 class CheckBitmapClearHRClosure : public HeapRegionClosure { 692 G1CMBitMap* _bitmap; 693 bool _error; 694 public: 695 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { 696 } 697 698 virtual bool do_heap_region(HeapRegion* r) { 699 // This closure can be called concurrently to the mutator, so we must make sure 700 // that the result of the getNextMarkedWordAddress() call is compared to the 701 // value passed to it as limit to detect any found bits. 702 // end never changes in G1. 703 HeapWord* end = r->end(); 704 return _bitmap->get_next_marked_addr(r->bottom(), end) != end; 705 } 706 }; 707 708 bool G1ConcurrentMark::next_mark_bitmap_is_clear() { 709 CheckBitmapClearHRClosure cl(_next_mark_bitmap); 710 _g1h->heap_region_iterate(&cl); 711 return cl.is_complete(); 712 } 713 714 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 715 public: 716 bool do_heap_region(HeapRegion* r) { 717 r->note_start_of_marking(); 718 return false; 719 } 720 }; 721 722 void G1ConcurrentMark::checkpoint_roots_initial_pre() { 723 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 724 725 _has_aborted = false; 726 727 // Initialize marking structures. This has to be done in a STW phase. 728 reset(); 729 730 // For each region note start of marking. 731 NoteStartOfMarkHRClosure startcl; 732 g1h->heap_region_iterate(&startcl); 733 } 734 735 736 void G1ConcurrentMark::checkpoint_roots_initial_post() { 737 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 738 739 // Start Concurrent Marking weak-reference discovery. 740 ReferenceProcessor* rp = g1h->ref_processor_cm(); 741 // enable ("weak") refs discovery 742 rp->enable_discovery(); 743 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 744 745 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 746 // This is the start of the marking cycle, we're expected all 747 // threads to have SATB queues with active set to false. 748 satb_mq_set.set_active_all_threads(true, /* new active value */ 749 false /* expected_active */); 750 751 _root_regions.prepare_for_scan(); 752 753 // update_g1_committed() will be called at the end of an evac pause 754 // when marking is on. So, it's also called at the end of the 755 // initial-mark pause to update the heap end, if the heap expands 756 // during it. No need to call it here. 757 } 758 759 /* 760 * Notice that in the next two methods, we actually leave the STS 761 * during the barrier sync and join it immediately afterwards. If we 762 * do not do this, the following deadlock can occur: one thread could 763 * be in the barrier sync code, waiting for the other thread to also 764 * sync up, whereas another one could be trying to yield, while also 765 * waiting for the other threads to sync up too. 766 * 767 * Note, however, that this code is also used during remark and in 768 * this case we should not attempt to leave / enter the STS, otherwise 769 * we'll either hit an assert (debug / fastdebug) or deadlock 770 * (product). So we should only leave / enter the STS if we are 771 * operating concurrently. 772 * 773 * Because the thread that does the sync barrier has left the STS, it 774 * is possible to be suspended for a Full GC or an evacuation pause 775 * could occur. This is actually safe, since the entering the sync 776 * barrier is one of the last things do_marking_step() does, and it 777 * doesn't manipulate any data structures afterwards. 778 */ 779 780 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 781 bool barrier_aborted; 782 { 783 SuspendibleThreadSetLeaver sts_leave(concurrent()); 784 barrier_aborted = !_first_overflow_barrier_sync.enter(); 785 } 786 787 // at this point everyone should have synced up and not be doing any 788 // more work 789 790 if (barrier_aborted) { 791 // If the barrier aborted we ignore the overflow condition and 792 // just abort the whole marking phase as quickly as possible. 793 return; 794 } 795 796 // If we're executing the concurrent phase of marking, reset the marking 797 // state; otherwise the marking state is reset after reference processing, 798 // during the remark pause. 799 // If we reset here as a result of an overflow during the remark we will 800 // see assertion failures from any subsequent set_concurrency_and_phase() 801 // calls. 802 if (concurrent()) { 803 // let the task associated with with worker 0 do this 804 if (worker_id == 0) { 805 // task 0 is responsible for clearing the global data structures 806 // We should be here because of an overflow. During STW we should 807 // not clear the overflow flag since we rely on it being true when 808 // we exit this method to abort the pause and restart concurrent 809 // marking. 810 reset_marking_state(); 811 812 log_info(gc, marking)("Concurrent Mark reset for overflow"); 813 } 814 } 815 816 // after this, each task should reset its own data structures then 817 // then go into the second barrier 818 } 819 820 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 821 SuspendibleThreadSetLeaver sts_leave(concurrent()); 822 _second_overflow_barrier_sync.enter(); 823 824 // at this point everything should be re-initialized and ready to go 825 } 826 827 class G1CMConcurrentMarkingTask: public AbstractGangTask { 828 private: 829 G1ConcurrentMark* _cm; 830 ConcurrentMarkThread* _cmt; 831 832 public: 833 void work(uint worker_id) { 834 assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread"); 835 ResourceMark rm; 836 837 double start_vtime = os::elapsedVTime(); 838 839 { 840 SuspendibleThreadSetJoiner sts_join; 841 842 assert(worker_id < _cm->active_tasks(), "invariant"); 843 844 G1CMTask* task = _cm->task(worker_id); 845 task->record_start_time(); 846 if (!_cm->has_aborted()) { 847 do { 848 task->do_marking_step(G1ConcMarkStepDurationMillis, 849 true /* do_termination */, 850 false /* is_serial*/); 851 852 _cm->do_yield_check(); 853 } while (!_cm->has_aborted() && task->has_aborted()); 854 } 855 task->record_end_time(); 856 guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant"); 857 } 858 859 double end_vtime = os::elapsedVTime(); 860 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 861 } 862 863 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm, 864 ConcurrentMarkThread* cmt) : 865 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 866 867 ~G1CMConcurrentMarkingTask() { } 868 }; 869 870 uint G1ConcurrentMark::calc_active_marking_workers() { 871 uint result = 0; 872 if (!UseDynamicNumberOfGCThreads || 873 (!FLAG_IS_DEFAULT(ConcGCThreads) && 874 !ForceDynamicNumberOfGCThreads)) { 875 result = _max_concurrent_workers; 876 } else { 877 result = 878 AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers, 879 1, /* Minimum workers */ 880 _num_concurrent_workers, 881 Threads::number_of_non_daemon_threads()); 882 // Don't scale the result down by scale_concurrent_workers() because 883 // that scaling has already gone into "_max_concurrent_workers". 884 } 885 assert(result > 0 && result <= _max_concurrent_workers, 886 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u", 887 _max_concurrent_workers, result); 888 return result; 889 } 890 891 void G1ConcurrentMark::scan_root_region(HeapRegion* hr) { 892 // Currently, only survivors can be root regions. 893 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 894 G1RootRegionScanClosure cl(_g1h, this); 895 896 const uintx interval = PrefetchScanIntervalInBytes; 897 HeapWord* curr = hr->bottom(); 898 const HeapWord* end = hr->top(); 899 while (curr < end) { 900 Prefetch::read(curr, interval); 901 oop obj = oop(curr); 902 int size = obj->oop_iterate_size(&cl); 903 assert(size == obj->size(), "sanity"); 904 curr += size; 905 } 906 } 907 908 class G1CMRootRegionScanTask : public AbstractGangTask { 909 private: 910 G1ConcurrentMark* _cm; 911 912 public: 913 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 914 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 915 916 void work(uint worker_id) { 917 assert(Thread::current()->is_ConcurrentGC_thread(), 918 "this should only be done by a conc GC thread"); 919 920 G1CMRootRegions* root_regions = _cm->root_regions(); 921 HeapRegion* hr = root_regions->claim_next(); 922 while (hr != NULL) { 923 _cm->scan_root_region(hr); 924 hr = root_regions->claim_next(); 925 } 926 } 927 }; 928 929 void G1ConcurrentMark::scan_root_regions() { 930 // scan_in_progress() will have been set to true only if there was 931 // at least one root region to scan. So, if it's false, we 932 // should not attempt to do any further work. 933 if (root_regions()->scan_in_progress()) { 934 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 935 936 _num_concurrent_workers = MIN2(calc_active_marking_workers(), 937 // We distribute work on a per-region basis, so starting 938 // more threads than that is useless. 939 root_regions()->num_root_regions()); 940 assert(_num_concurrent_workers <= _max_concurrent_workers, 941 "Maximum number of marking threads exceeded"); 942 943 G1CMRootRegionScanTask task(this); 944 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 945 task.name(), _num_concurrent_workers, root_regions()->num_root_regions()); 946 _concurrent_workers->run_task(&task, _num_concurrent_workers); 947 948 // It's possible that has_aborted() is true here without actually 949 // aborting the survivor scan earlier. This is OK as it's 950 // mainly used for sanity checking. 951 root_regions()->scan_finished(); 952 } 953 } 954 955 void G1ConcurrentMark::concurrent_cycle_start() { 956 _gc_timer_cm->register_gc_start(); 957 958 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 959 960 _g1h->trace_heap_before_gc(_gc_tracer_cm); 961 } 962 963 void G1ConcurrentMark::concurrent_cycle_end() { 964 _g1h->trace_heap_after_gc(_gc_tracer_cm); 965 966 if (has_aborted()) { 967 _gc_tracer_cm->report_concurrent_mode_failure(); 968 } 969 970 _gc_timer_cm->register_gc_end(); 971 972 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 973 } 974 975 void G1ConcurrentMark::mark_from_roots() { 976 // we might be tempted to assert that: 977 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 978 // "inconsistent argument?"); 979 // However that wouldn't be right, because it's possible that 980 // a safepoint is indeed in progress as a younger generation 981 // stop-the-world GC happens even as we mark in this generation. 982 983 _restart_for_overflow = false; 984 985 _num_concurrent_workers = calc_active_marking_workers(); 986 987 uint active_workers = MAX2(1U, _num_concurrent_workers); 988 989 // Setting active workers is not guaranteed since fewer 990 // worker threads may currently exist and more may not be 991 // available. 992 active_workers = _concurrent_workers->update_active_workers(active_workers); 993 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers()); 994 995 // Parallel task terminator is set in "set_concurrency_and_phase()" 996 set_concurrency_and_phase(active_workers, true /* concurrent */); 997 998 G1CMConcurrentMarkingTask marking_task(this, cm_thread()); 999 _concurrent_workers->run_task(&marking_task); 1000 print_stats(); 1001 } 1002 1003 void G1ConcurrentMark::checkpoint_roots_final(bool clear_all_soft_refs) { 1004 // world is stopped at this checkpoint 1005 assert(SafepointSynchronize::is_at_safepoint(), 1006 "world should be stopped"); 1007 1008 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1009 1010 // If a full collection has happened, we shouldn't do this. 1011 if (has_aborted()) { 1012 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1013 return; 1014 } 1015 1016 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1017 1018 if (VerifyDuringGC) { 1019 g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (before)"); 1020 } 1021 g1h->verifier()->check_bitmaps("Remark Start"); 1022 1023 G1Policy* g1p = g1h->g1_policy(); 1024 g1p->record_concurrent_mark_remark_start(); 1025 1026 double start = os::elapsedTime(); 1027 1028 checkpoint_roots_final_work(); 1029 1030 double mark_work_end = os::elapsedTime(); 1031 1032 weak_refs_work(clear_all_soft_refs); 1033 1034 if (has_overflown()) { 1035 // We overflowed. Restart concurrent marking. 1036 _restart_for_overflow = true; 1037 1038 // Verify the heap w.r.t. the previous marking bitmap. 1039 if (VerifyDuringGC) { 1040 g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (overflow)"); 1041 } 1042 1043 // Clear the marking state because we will be restarting 1044 // marking due to overflowing the global mark stack. 1045 reset_marking_state(); 1046 } else { 1047 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1048 // We're done with marking. 1049 // This is the end of the marking cycle, we're expected all 1050 // threads to have SATB queues with active set to true. 1051 satb_mq_set.set_active_all_threads(false, /* new active value */ 1052 true /* expected_active */); 1053 1054 if (VerifyDuringGC) { 1055 g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "During GC (after)"); 1056 } 1057 g1h->verifier()->check_bitmaps("Remark End"); 1058 assert(!restart_for_overflow(), "sanity"); 1059 // Completely reset the marking state since marking completed 1060 set_non_marking_state(); 1061 } 1062 1063 // Statistics 1064 double now = os::elapsedTime(); 1065 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1066 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1067 _remark_times.add((now - start) * 1000.0); 1068 1069 g1p->record_concurrent_mark_remark_end(); 1070 1071 G1CMIsAliveClosure is_alive(g1h); 1072 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1073 } 1074 1075 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1076 G1CollectedHeap* _g1; 1077 size_t _freed_bytes; 1078 FreeRegionList* _local_cleanup_list; 1079 uint _old_regions_removed; 1080 uint _humongous_regions_removed; 1081 HRRSCleanupTask* _hrrs_cleanup_task; 1082 1083 public: 1084 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1085 FreeRegionList* local_cleanup_list, 1086 HRRSCleanupTask* hrrs_cleanup_task) : 1087 _g1(g1), 1088 _freed_bytes(0), 1089 _local_cleanup_list(local_cleanup_list), 1090 _old_regions_removed(0), 1091 _humongous_regions_removed(0), 1092 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1093 1094 size_t freed_bytes() { return _freed_bytes; } 1095 const uint old_regions_removed() { return _old_regions_removed; } 1096 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1097 1098 bool do_heap_region(HeapRegion *hr) { 1099 _g1->reset_gc_time_stamps(hr); 1100 hr->note_end_of_marking(); 1101 1102 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) { 1103 _freed_bytes += hr->used(); 1104 hr->set_containing_set(NULL); 1105 if (hr->is_humongous()) { 1106 _humongous_regions_removed++; 1107 _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */); 1108 } else { 1109 _old_regions_removed++; 1110 _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */); 1111 } 1112 } else { 1113 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1114 } 1115 1116 return false; 1117 } 1118 }; 1119 1120 class G1ParNoteEndTask: public AbstractGangTask { 1121 friend class G1NoteEndOfConcMarkClosure; 1122 1123 protected: 1124 G1CollectedHeap* _g1h; 1125 FreeRegionList* _cleanup_list; 1126 HeapRegionClaimer _hrclaimer; 1127 1128 public: 1129 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1130 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1131 } 1132 1133 void work(uint worker_id) { 1134 FreeRegionList local_cleanup_list("Local Cleanup List"); 1135 HRRSCleanupTask hrrs_cleanup_task; 1136 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1137 &hrrs_cleanup_task); 1138 _g1h->heap_region_par_iterate_from_worker_offset(&g1_note_end, &_hrclaimer, worker_id); 1139 assert(g1_note_end.is_complete(), "Shouldn't have yielded!"); 1140 1141 // Now update the lists 1142 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1143 { 1144 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1145 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1146 1147 // If we iterate over the global cleanup list at the end of 1148 // cleanup to do this printing we will not guarantee to only 1149 // generate output for the newly-reclaimed regions (the list 1150 // might not be empty at the beginning of cleanup; we might 1151 // still be working on its previous contents). So we do the 1152 // printing here, before we append the new regions to the global 1153 // cleanup list. 1154 1155 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1156 if (hr_printer->is_active()) { 1157 FreeRegionListIterator iter(&local_cleanup_list); 1158 while (iter.more_available()) { 1159 HeapRegion* hr = iter.get_next(); 1160 hr_printer->cleanup(hr); 1161 } 1162 } 1163 1164 _cleanup_list->add_ordered(&local_cleanup_list); 1165 assert(local_cleanup_list.is_empty(), "post-condition"); 1166 1167 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1168 } 1169 } 1170 }; 1171 1172 void G1ConcurrentMark::cleanup() { 1173 // world is stopped at this checkpoint 1174 assert(SafepointSynchronize::is_at_safepoint(), 1175 "world should be stopped"); 1176 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1177 1178 // If a full collection has happened, we shouldn't do this. 1179 if (has_aborted()) { 1180 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1181 return; 1182 } 1183 1184 g1h->verifier()->verify_region_sets_optional(); 1185 1186 if (VerifyDuringGC) { 1187 g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "During GC (before)"); 1188 } 1189 g1h->verifier()->check_bitmaps("Cleanup Start"); 1190 1191 G1Policy* g1p = g1h->g1_policy(); 1192 g1p->record_concurrent_mark_cleanup_start(); 1193 1194 double start = os::elapsedTime(); 1195 1196 HeapRegionRemSet::reset_for_cleanup_tasks(); 1197 1198 { 1199 GCTraceTime(Debug, gc)("Finalize Live Data"); 1200 finalize_live_data(); 1201 } 1202 1203 if (VerifyDuringGC) { 1204 GCTraceTime(Debug, gc)("Verify Live Data"); 1205 verify_live_data(); 1206 } 1207 1208 g1h->collector_state()->set_mark_in_progress(false); 1209 1210 double count_end = os::elapsedTime(); 1211 double this_final_counting_time = (count_end - start); 1212 _total_counting_time += this_final_counting_time; 1213 1214 if (log_is_enabled(Trace, gc, liveness)) { 1215 G1PrintRegionLivenessInfoClosure cl("Post-Marking"); 1216 _g1h->heap_region_iterate(&cl); 1217 } 1218 1219 // Install newly created mark bitMap as "prev". 1220 swap_mark_bitmaps(); 1221 1222 g1h->reset_gc_time_stamp(); 1223 1224 uint n_workers = _g1h->workers()->active_workers(); 1225 1226 // Note end of marking in all heap regions. 1227 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1228 g1h->workers()->run_task(&g1_par_note_end_task); 1229 g1h->check_gc_time_stamps(); 1230 1231 if (!cleanup_list_is_empty()) { 1232 // The cleanup list is not empty, so we'll have to process it 1233 // concurrently. Notify anyone else that might be wanting free 1234 // regions that there will be more free regions coming soon. 1235 g1h->set_free_regions_coming(); 1236 } 1237 1238 // call below, since it affects the metric by which we sort the heap 1239 // regions. 1240 if (G1ScrubRemSets) { 1241 double rs_scrub_start = os::elapsedTime(); 1242 g1h->scrub_rem_set(); 1243 _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start); 1244 } 1245 1246 // this will also free any regions totally full of garbage objects, 1247 // and sort the regions. 1248 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1249 1250 // Statistics. 1251 double end = os::elapsedTime(); 1252 _cleanup_times.add((end - start) * 1000.0); 1253 1254 // Clean up will have freed any regions completely full of garbage. 1255 // Update the soft reference policy with the new heap occupancy. 1256 Universe::update_heap_info_at_gc(); 1257 1258 if (VerifyDuringGC) { 1259 g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "During GC (after)"); 1260 } 1261 1262 g1h->verifier()->check_bitmaps("Cleanup End"); 1263 1264 g1h->verifier()->verify_region_sets_optional(); 1265 1266 // We need to make this be a "collection" so any collection pause that 1267 // races with it goes around and waits for completeCleanup to finish. 1268 g1h->increment_total_collections(); 1269 1270 // Clean out dead classes and update Metaspace sizes. 1271 if (ClassUnloadingWithConcurrentMark) { 1272 ClassLoaderDataGraph::purge(); 1273 } 1274 MetaspaceGC::compute_new_size(); 1275 1276 // We reclaimed old regions so we should calculate the sizes to make 1277 // sure we update the old gen/space data. 1278 g1h->g1mm()->update_sizes(); 1279 g1h->allocation_context_stats().update_after_mark(); 1280 } 1281 1282 void G1ConcurrentMark::complete_cleanup() { 1283 if (has_aborted()) return; 1284 1285 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1286 1287 _cleanup_list.verify_optional(); 1288 FreeRegionList tmp_free_list("Tmp Free List"); 1289 1290 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1291 "cleanup list has %u entries", 1292 _cleanup_list.length()); 1293 1294 // No one else should be accessing the _cleanup_list at this point, 1295 // so it is not necessary to take any locks 1296 while (!_cleanup_list.is_empty()) { 1297 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 1298 assert(hr != NULL, "Got NULL from a non-empty list"); 1299 hr->par_clear(); 1300 tmp_free_list.add_ordered(hr); 1301 1302 // Instead of adding one region at a time to the secondary_free_list, 1303 // we accumulate them in the local list and move them a few at a 1304 // time. This also cuts down on the number of notify_all() calls 1305 // we do during this process. We'll also append the local list when 1306 // _cleanup_list is empty (which means we just removed the last 1307 // region from the _cleanup_list). 1308 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1309 _cleanup_list.is_empty()) { 1310 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1311 "appending %u entries to the secondary_free_list, " 1312 "cleanup list still has %u entries", 1313 tmp_free_list.length(), 1314 _cleanup_list.length()); 1315 1316 { 1317 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1318 g1h->secondary_free_list_add(&tmp_free_list); 1319 SecondaryFreeList_lock->notify_all(); 1320 } 1321 #ifndef PRODUCT 1322 if (G1StressConcRegionFreeing) { 1323 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1324 os::sleep(Thread::current(), (jlong) 1, false); 1325 } 1326 } 1327 #endif 1328 } 1329 } 1330 assert(tmp_free_list.is_empty(), "post-condition"); 1331 } 1332 1333 // Supporting Object and Oop closures for reference discovery 1334 // and processing in during marking 1335 1336 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1337 HeapWord* addr = (HeapWord*)obj; 1338 return addr != NULL && 1339 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1340 } 1341 1342 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1343 // Uses the G1CMTask associated with a worker thread (for serial reference 1344 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1345 // trace referent objects. 1346 // 1347 // Using the G1CMTask and embedded local queues avoids having the worker 1348 // threads operating on the global mark stack. This reduces the risk 1349 // of overflowing the stack - which we would rather avoid at this late 1350 // state. Also using the tasks' local queues removes the potential 1351 // of the workers interfering with each other that could occur if 1352 // operating on the global stack. 1353 1354 class G1CMKeepAliveAndDrainClosure: public OopClosure { 1355 G1ConcurrentMark* _cm; 1356 G1CMTask* _task; 1357 int _ref_counter_limit; 1358 int _ref_counter; 1359 bool _is_serial; 1360 public: 1361 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1362 _cm(cm), _task(task), _is_serial(is_serial), 1363 _ref_counter_limit(G1RefProcDrainInterval) { 1364 assert(_ref_counter_limit > 0, "sanity"); 1365 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1366 _ref_counter = _ref_counter_limit; 1367 } 1368 1369 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1370 virtual void do_oop( oop* p) { do_oop_work(p); } 1371 1372 template <class T> void do_oop_work(T* p) { 1373 if (!_cm->has_overflown()) { 1374 oop obj = oopDesc::load_decode_heap_oop(p); 1375 _task->deal_with_reference(obj); 1376 _ref_counter--; 1377 1378 if (_ref_counter == 0) { 1379 // We have dealt with _ref_counter_limit references, pushing them 1380 // and objects reachable from them on to the local stack (and 1381 // possibly the global stack). Call G1CMTask::do_marking_step() to 1382 // process these entries. 1383 // 1384 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1385 // there's nothing more to do (i.e. we're done with the entries that 1386 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1387 // above) or we overflow. 1388 // 1389 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1390 // flag while there may still be some work to do. (See the comment at 1391 // the beginning of G1CMTask::do_marking_step() for those conditions - 1392 // one of which is reaching the specified time target.) It is only 1393 // when G1CMTask::do_marking_step() returns without setting the 1394 // has_aborted() flag that the marking step has completed. 1395 do { 1396 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1397 _task->do_marking_step(mark_step_duration_ms, 1398 false /* do_termination */, 1399 _is_serial); 1400 } while (_task->has_aborted() && !_cm->has_overflown()); 1401 _ref_counter = _ref_counter_limit; 1402 } 1403 } 1404 } 1405 }; 1406 1407 // 'Drain' oop closure used by both serial and parallel reference processing. 1408 // Uses the G1CMTask associated with a given worker thread (for serial 1409 // reference processing the G1CMtask for worker 0 is used). Calls the 1410 // do_marking_step routine, with an unbelievably large timeout value, 1411 // to drain the marking data structures of the remaining entries 1412 // added by the 'keep alive' oop closure above. 1413 1414 class G1CMDrainMarkingStackClosure: public VoidClosure { 1415 G1ConcurrentMark* _cm; 1416 G1CMTask* _task; 1417 bool _is_serial; 1418 public: 1419 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1420 _cm(cm), _task(task), _is_serial(is_serial) { 1421 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1422 } 1423 1424 void do_void() { 1425 do { 1426 // We call G1CMTask::do_marking_step() to completely drain the local 1427 // and global marking stacks of entries pushed by the 'keep alive' 1428 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1429 // 1430 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1431 // if there's nothing more to do (i.e. we've completely drained the 1432 // entries that were pushed as a a result of applying the 'keep alive' 1433 // closure to the entries on the discovered ref lists) or we overflow 1434 // the global marking stack. 1435 // 1436 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1437 // flag while there may still be some work to do. (See the comment at 1438 // the beginning of G1CMTask::do_marking_step() for those conditions - 1439 // one of which is reaching the specified time target.) It is only 1440 // when G1CMTask::do_marking_step() returns without setting the 1441 // has_aborted() flag that the marking step has completed. 1442 1443 _task->do_marking_step(1000000000.0 /* something very large */, 1444 true /* do_termination */, 1445 _is_serial); 1446 } while (_task->has_aborted() && !_cm->has_overflown()); 1447 } 1448 }; 1449 1450 // Implementation of AbstractRefProcTaskExecutor for parallel 1451 // reference processing at the end of G1 concurrent marking 1452 1453 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 1454 private: 1455 G1CollectedHeap* _g1h; 1456 G1ConcurrentMark* _cm; 1457 WorkGang* _workers; 1458 uint _active_workers; 1459 1460 public: 1461 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1462 G1ConcurrentMark* cm, 1463 WorkGang* workers, 1464 uint n_workers) : 1465 _g1h(g1h), _cm(cm), 1466 _workers(workers), _active_workers(n_workers) { } 1467 1468 // Executes the given task using concurrent marking worker threads. 1469 virtual void execute(ProcessTask& task); 1470 virtual void execute(EnqueueTask& task); 1471 }; 1472 1473 class G1CMRefProcTaskProxy: public AbstractGangTask { 1474 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1475 ProcessTask& _proc_task; 1476 G1CollectedHeap* _g1h; 1477 G1ConcurrentMark* _cm; 1478 1479 public: 1480 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1481 G1CollectedHeap* g1h, 1482 G1ConcurrentMark* cm) : 1483 AbstractGangTask("Process reference objects in parallel"), 1484 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1485 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1486 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1487 } 1488 1489 virtual void work(uint worker_id) { 1490 ResourceMark rm; 1491 HandleMark hm; 1492 G1CMTask* task = _cm->task(worker_id); 1493 G1CMIsAliveClosure g1_is_alive(_g1h); 1494 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1495 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1496 1497 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1498 } 1499 }; 1500 1501 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1502 assert(_workers != NULL, "Need parallel worker threads."); 1503 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1504 1505 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1506 1507 // We need to reset the concurrency level before each 1508 // proxy task execution, so that the termination protocol 1509 // and overflow handling in G1CMTask::do_marking_step() knows 1510 // how many workers to wait for. 1511 _cm->set_concurrency(_active_workers); 1512 _workers->run_task(&proc_task_proxy); 1513 } 1514 1515 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 1516 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1517 EnqueueTask& _enq_task; 1518 1519 public: 1520 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1521 AbstractGangTask("Enqueue reference objects in parallel"), 1522 _enq_task(enq_task) { } 1523 1524 virtual void work(uint worker_id) { 1525 _enq_task.work(worker_id); 1526 } 1527 }; 1528 1529 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 1530 assert(_workers != NULL, "Need parallel worker threads."); 1531 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1532 1533 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 1534 1535 // Not strictly necessary but... 1536 // 1537 // We need to reset the concurrency level before each 1538 // proxy task execution, so that the termination protocol 1539 // and overflow handling in G1CMTask::do_marking_step() knows 1540 // how many workers to wait for. 1541 _cm->set_concurrency(_active_workers); 1542 _workers->run_task(&enq_task_proxy); 1543 } 1544 1545 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { 1546 if (has_overflown()) { 1547 // Skip processing the discovered references if we have 1548 // overflown the global marking stack. Reference objects 1549 // only get discovered once so it is OK to not 1550 // de-populate the discovered reference lists. We could have, 1551 // but the only benefit would be that, when marking restarts, 1552 // less reference objects are discovered. 1553 return; 1554 } 1555 1556 ResourceMark rm; 1557 HandleMark hm; 1558 1559 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1560 1561 // Is alive closure. 1562 G1CMIsAliveClosure g1_is_alive(g1h); 1563 1564 // Inner scope to exclude the cleaning of the string and symbol 1565 // tables from the displayed time. 1566 { 1567 GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm); 1568 1569 ReferenceProcessor* rp = g1h->ref_processor_cm(); 1570 1571 // See the comment in G1CollectedHeap::ref_processing_init() 1572 // about how reference processing currently works in G1. 1573 1574 // Set the soft reference policy 1575 rp->setup_policy(clear_all_soft_refs); 1576 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1577 1578 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1579 // in serial reference processing. Note these closures are also 1580 // used for serially processing (by the the current thread) the 1581 // JNI references during parallel reference processing. 1582 // 1583 // These closures do not need to synchronize with the worker 1584 // threads involved in parallel reference processing as these 1585 // instances are executed serially by the current thread (e.g. 1586 // reference processing is not multi-threaded and is thus 1587 // performed by the current thread instead of a gang worker). 1588 // 1589 // The gang tasks involved in parallel reference processing create 1590 // their own instances of these closures, which do their own 1591 // synchronization among themselves. 1592 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1593 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1594 1595 // We need at least one active thread. If reference processing 1596 // is not multi-threaded we use the current (VMThread) thread, 1597 // otherwise we use the work gang from the G1CollectedHeap and 1598 // we utilize all the worker threads we can. 1599 bool processing_is_mt = rp->processing_is_mt(); 1600 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 1601 active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U); 1602 1603 // Parallel processing task executor. 1604 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 1605 g1h->workers(), active_workers); 1606 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1607 1608 // Set the concurrency level. The phase was already set prior to 1609 // executing the remark task. 1610 set_concurrency(active_workers); 1611 1612 // Set the degree of MT processing here. If the discovery was done MT, 1613 // the number of threads involved during discovery could differ from 1614 // the number of active workers. This is OK as long as the discovered 1615 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1616 rp->set_active_mt_degree(active_workers); 1617 1618 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q()); 1619 1620 // Process the weak references. 1621 const ReferenceProcessorStats& stats = 1622 rp->process_discovered_references(&g1_is_alive, 1623 &g1_keep_alive, 1624 &g1_drain_mark_stack, 1625 executor, 1626 &pt); 1627 _gc_tracer_cm->report_gc_reference_stats(stats); 1628 pt.print_all_references(); 1629 1630 // The do_oop work routines of the keep_alive and drain_marking_stack 1631 // oop closures will set the has_overflown flag if we overflow the 1632 // global marking stack. 1633 1634 assert(has_overflown() || _global_mark_stack.is_empty(), 1635 "Mark stack should be empty (unless it has overflown)"); 1636 1637 assert(rp->num_q() == active_workers, "why not"); 1638 1639 rp->enqueue_discovered_references(executor, &pt); 1640 1641 rp->verify_no_references_recorded(); 1642 1643 pt.print_enqueue_phase(); 1644 1645 assert(!rp->discovery_enabled(), "Post condition"); 1646 } 1647 1648 assert(has_overflown() || _global_mark_stack.is_empty(), 1649 "Mark stack should be empty (unless it has overflown)"); 1650 1651 { 1652 GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm); 1653 WeakProcessor::weak_oops_do(&g1_is_alive, &do_nothing_cl); 1654 } 1655 1656 if (has_overflown()) { 1657 // We can not trust g1_is_alive if the marking stack overflowed 1658 return; 1659 } 1660 1661 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1662 1663 // Unload Klasses, String, Symbols, Code Cache, etc. 1664 if (ClassUnloadingWithConcurrentMark) { 1665 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); 1666 bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */); 1667 g1h->complete_cleaning(&g1_is_alive, purged_classes); 1668 } else { 1669 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm); 1670 // No need to clean string table and symbol table as they are treated as strong roots when 1671 // class unloading is disabled. 1672 g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled()); 1673 1674 } 1675 } 1676 1677 void G1ConcurrentMark::swap_mark_bitmaps() { 1678 G1CMBitMap* temp = _prev_mark_bitmap; 1679 _prev_mark_bitmap = _next_mark_bitmap; 1680 _next_mark_bitmap = temp; 1681 } 1682 1683 // Closure for marking entries in SATB buffers. 1684 class G1CMSATBBufferClosure : public SATBBufferClosure { 1685 private: 1686 G1CMTask* _task; 1687 G1CollectedHeap* _g1h; 1688 1689 // This is very similar to G1CMTask::deal_with_reference, but with 1690 // more relaxed requirements for the argument, so this must be more 1691 // circumspect about treating the argument as an object. 1692 void do_entry(void* entry) const { 1693 _task->increment_refs_reached(); 1694 oop const obj = static_cast<oop>(entry); 1695 _task->make_reference_grey(obj); 1696 } 1697 1698 public: 1699 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1700 : _task(task), _g1h(g1h) { } 1701 1702 virtual void do_buffer(void** buffer, size_t size) { 1703 for (size_t i = 0; i < size; ++i) { 1704 do_entry(buffer[i]); 1705 } 1706 } 1707 }; 1708 1709 class G1RemarkThreadsClosure : public ThreadClosure { 1710 G1CMSATBBufferClosure _cm_satb_cl; 1711 G1CMOopClosure _cm_cl; 1712 MarkingCodeBlobClosure _code_cl; 1713 int _thread_parity; 1714 1715 public: 1716 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1717 _cm_satb_cl(task, g1h), 1718 _cm_cl(g1h, g1h->concurrent_mark(), task), 1719 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1720 _thread_parity(Threads::thread_claim_parity()) {} 1721 1722 void do_thread(Thread* thread) { 1723 if (thread->is_Java_thread()) { 1724 if (thread->claim_oops_do(true, _thread_parity)) { 1725 JavaThread* jt = (JavaThread*)thread; 1726 1727 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1728 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1729 // * Alive if on the stack of an executing method 1730 // * Weakly reachable otherwise 1731 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1732 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1733 jt->nmethods_do(&_code_cl); 1734 1735 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 1736 } 1737 } else if (thread->is_VM_thread()) { 1738 if (thread->claim_oops_do(true, _thread_parity)) { 1739 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1740 } 1741 } 1742 } 1743 }; 1744 1745 class G1CMRemarkTask: public AbstractGangTask { 1746 private: 1747 G1ConcurrentMark* _cm; 1748 public: 1749 void work(uint worker_id) { 1750 G1CMTask* task = _cm->task(worker_id); 1751 task->record_start_time(); 1752 { 1753 ResourceMark rm; 1754 HandleMark hm; 1755 1756 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1757 Threads::threads_do(&threads_f); 1758 } 1759 1760 do { 1761 task->do_marking_step(1000000000.0 /* something very large */, 1762 true /* do_termination */, 1763 false /* is_serial */); 1764 } while (task->has_aborted() && !_cm->has_overflown()); 1765 // If we overflow, then we do not want to restart. We instead 1766 // want to abort remark and do concurrent marking again. 1767 task->record_end_time(); 1768 } 1769 1770 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1771 AbstractGangTask("Par Remark"), _cm(cm) { 1772 _cm->terminator()->reset_for_reuse(active_workers); 1773 } 1774 }; 1775 1776 void G1ConcurrentMark::checkpoint_roots_final_work() { 1777 ResourceMark rm; 1778 HandleMark hm; 1779 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1780 1781 GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm); 1782 1783 g1h->ensure_parsability(false); 1784 1785 // this is remark, so we'll use up all active threads 1786 uint active_workers = g1h->workers()->active_workers(); 1787 set_concurrency_and_phase(active_workers, false /* concurrent */); 1788 // Leave _parallel_marking_threads at it's 1789 // value originally calculated in the G1ConcurrentMark 1790 // constructor and pass values of the active workers 1791 // through the gang in the task. 1792 1793 { 1794 StrongRootsScope srs(active_workers); 1795 1796 G1CMRemarkTask remarkTask(this, active_workers); 1797 // We will start all available threads, even if we decide that the 1798 // active_workers will be fewer. The extra ones will just bail out 1799 // immediately. 1800 g1h->workers()->run_task(&remarkTask); 1801 } 1802 1803 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1804 guarantee(has_overflown() || 1805 satb_mq_set.completed_buffers_num() == 0, 1806 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1807 BOOL_TO_STR(has_overflown()), 1808 satb_mq_set.completed_buffers_num()); 1809 1810 print_stats(); 1811 } 1812 1813 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) { 1814 _prev_mark_bitmap->clear_range(mr); 1815 } 1816 1817 HeapRegion* 1818 G1ConcurrentMark::claim_region(uint worker_id) { 1819 // "checkpoint" the finger 1820 HeapWord* finger = _finger; 1821 1822 // _heap_end will not change underneath our feet; it only changes at 1823 // yield points. 1824 while (finger < _heap_end) { 1825 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1826 1827 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1828 // Make sure that the reads below do not float before loading curr_region. 1829 OrderAccess::loadload(); 1830 // Above heap_region_containing may return NULL as we always scan claim 1831 // until the end of the heap. In this case, just jump to the next region. 1832 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1833 1834 // Is the gap between reading the finger and doing the CAS too long? 1835 HeapWord* res = Atomic::cmpxchg(end, &_finger, finger); 1836 if (res == finger && curr_region != NULL) { 1837 // we succeeded 1838 HeapWord* bottom = curr_region->bottom(); 1839 HeapWord* limit = curr_region->next_top_at_mark_start(); 1840 1841 // notice that _finger == end cannot be guaranteed here since, 1842 // someone else might have moved the finger even further 1843 assert(_finger >= end, "the finger should have moved forward"); 1844 1845 if (limit > bottom) { 1846 return curr_region; 1847 } else { 1848 assert(limit == bottom, 1849 "the region limit should be at bottom"); 1850 // we return NULL and the caller should try calling 1851 // claim_region() again. 1852 return NULL; 1853 } 1854 } else { 1855 assert(_finger > finger, "the finger should have moved forward"); 1856 // read it again 1857 finger = _finger; 1858 } 1859 } 1860 1861 return NULL; 1862 } 1863 1864 #ifndef PRODUCT 1865 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 1866 private: 1867 G1CollectedHeap* _g1h; 1868 const char* _phase; 1869 int _info; 1870 1871 public: 1872 VerifyNoCSetOops(const char* phase, int info = -1) : 1873 _g1h(G1CollectedHeap::heap()), 1874 _phase(phase), 1875 _info(info) 1876 { } 1877 1878 void operator()(G1TaskQueueEntry task_entry) const { 1879 if (task_entry.is_array_slice()) { 1880 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); 1881 return; 1882 } 1883 guarantee(oopDesc::is_oop(task_entry.obj()), 1884 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1885 p2i(task_entry.obj()), _phase, _info); 1886 guarantee(!_g1h->is_in_cset(task_entry.obj()), 1887 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1888 p2i(task_entry.obj()), _phase, _info); 1889 } 1890 }; 1891 1892 void G1ConcurrentMark::verify_no_cset_oops() { 1893 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1894 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 1895 return; 1896 } 1897 1898 // Verify entries on the global mark stack 1899 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 1900 1901 // Verify entries on the task queues 1902 for (uint i = 0; i < _max_num_tasks; ++i) { 1903 G1CMTaskQueue* queue = _task_queues->queue(i); 1904 queue->iterate(VerifyNoCSetOops("Queue", i)); 1905 } 1906 1907 // Verify the global finger 1908 HeapWord* global_finger = finger(); 1909 if (global_finger != NULL && global_finger < _heap_end) { 1910 // Since we always iterate over all regions, we might get a NULL HeapRegion 1911 // here. 1912 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1913 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1914 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1915 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1916 } 1917 1918 // Verify the task fingers 1919 assert(_num_concurrent_workers <= _max_num_tasks, "sanity"); 1920 for (uint i = 0; i < _num_concurrent_workers; ++i) { 1921 G1CMTask* task = _tasks[i]; 1922 HeapWord* task_finger = task->finger(); 1923 if (task_finger != NULL && task_finger < _heap_end) { 1924 // See above note on the global finger verification. 1925 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 1926 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 1927 !task_hr->in_collection_set(), 1928 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 1929 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 1930 } 1931 } 1932 } 1933 #endif // PRODUCT 1934 void G1ConcurrentMark::create_live_data() { 1935 _g1h->g1_rem_set()->create_card_live_data(_concurrent_workers, _next_mark_bitmap); 1936 } 1937 1938 void G1ConcurrentMark::finalize_live_data() { 1939 _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _next_mark_bitmap); 1940 } 1941 1942 void G1ConcurrentMark::verify_live_data() { 1943 _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _next_mark_bitmap); 1944 } 1945 1946 void G1ConcurrentMark::clear_live_data(WorkGang* workers) { 1947 _g1h->g1_rem_set()->clear_card_live_data(workers); 1948 } 1949 1950 #ifdef ASSERT 1951 void G1ConcurrentMark::verify_live_data_clear() { 1952 _g1h->g1_rem_set()->verify_card_live_data_is_clear(); 1953 } 1954 #endif 1955 1956 void G1ConcurrentMark::print_stats() { 1957 if (!log_is_enabled(Debug, gc, stats)) { 1958 return; 1959 } 1960 log_debug(gc, stats)("---------------------------------------------------------------------"); 1961 for (size_t i = 0; i < _num_active_tasks; ++i) { 1962 _tasks[i]->print_stats(); 1963 log_debug(gc, stats)("---------------------------------------------------------------------"); 1964 } 1965 } 1966 1967 void G1ConcurrentMark::abort() { 1968 if (!cm_thread()->during_cycle() || _has_aborted) { 1969 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 1970 return; 1971 } 1972 1973 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 1974 // concurrent bitmap clearing. 1975 { 1976 GCTraceTime(Debug, gc)("Clear Next Bitmap"); 1977 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false); 1978 } 1979 // Note we cannot clear the previous marking bitmap here 1980 // since VerifyDuringGC verifies the objects marked during 1981 // a full GC against the previous bitmap. 1982 1983 { 1984 GCTraceTime(Debug, gc)("Clear Live Data"); 1985 clear_live_data(_g1h->workers()); 1986 } 1987 DEBUG_ONLY({ 1988 GCTraceTime(Debug, gc)("Verify Live Data Clear"); 1989 verify_live_data_clear(); 1990 }) 1991 // Empty mark stack 1992 reset_marking_state(); 1993 for (uint i = 0; i < _max_num_tasks; ++i) { 1994 _tasks[i]->clear_region_fields(); 1995 } 1996 _first_overflow_barrier_sync.abort(); 1997 _second_overflow_barrier_sync.abort(); 1998 _has_aborted = true; 1999 2000 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2001 satb_mq_set.abandon_partial_marking(); 2002 // This can be called either during or outside marking, we'll read 2003 // the expected_active value from the SATB queue set. 2004 satb_mq_set.set_active_all_threads( 2005 false, /* new active value */ 2006 satb_mq_set.is_active() /* expected_active */); 2007 } 2008 2009 static void print_ms_time_info(const char* prefix, const char* name, 2010 NumberSeq& ns) { 2011 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2012 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2013 if (ns.num() > 0) { 2014 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2015 prefix, ns.sd(), ns.maximum()); 2016 } 2017 } 2018 2019 void G1ConcurrentMark::print_summary_info() { 2020 Log(gc, marking) log; 2021 if (!log.is_trace()) { 2022 return; 2023 } 2024 2025 log.trace(" Concurrent marking:"); 2026 print_ms_time_info(" ", "init marks", _init_times); 2027 print_ms_time_info(" ", "remarks", _remark_times); 2028 { 2029 print_ms_time_info(" ", "final marks", _remark_mark_times); 2030 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2031 2032 } 2033 print_ms_time_info(" ", "cleanups", _cleanup_times); 2034 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2035 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2036 if (G1ScrubRemSets) { 2037 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 2038 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2039 } 2040 log.trace(" Total stop_world time = %8.2f s.", 2041 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2042 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2043 cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum()); 2044 } 2045 2046 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2047 _concurrent_workers->print_worker_threads_on(st); 2048 } 2049 2050 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2051 _concurrent_workers->threads_do(tc); 2052 } 2053 2054 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2055 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2056 p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap)); 2057 _prev_mark_bitmap->print_on_error(st, " Prev Bits: "); 2058 _next_mark_bitmap->print_on_error(st, " Next Bits: "); 2059 } 2060 2061 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2062 ReferenceProcessor* result = g1h->ref_processor_cm(); 2063 assert(result != NULL, "CM reference processor should not be NULL"); 2064 return result; 2065 } 2066 2067 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2068 G1ConcurrentMark* cm, 2069 G1CMTask* task) 2070 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2071 _g1h(g1h), _cm(cm), _task(task) 2072 { } 2073 2074 void G1CMTask::setup_for_region(HeapRegion* hr) { 2075 assert(hr != NULL, 2076 "claim_region() should have filtered out NULL regions"); 2077 _curr_region = hr; 2078 _finger = hr->bottom(); 2079 update_region_limit(); 2080 } 2081 2082 void G1CMTask::update_region_limit() { 2083 HeapRegion* hr = _curr_region; 2084 HeapWord* bottom = hr->bottom(); 2085 HeapWord* limit = hr->next_top_at_mark_start(); 2086 2087 if (limit == bottom) { 2088 // The region was collected underneath our feet. 2089 // We set the finger to bottom to ensure that the bitmap 2090 // iteration that will follow this will not do anything. 2091 // (this is not a condition that holds when we set the region up, 2092 // as the region is not supposed to be empty in the first place) 2093 _finger = bottom; 2094 } else if (limit >= _region_limit) { 2095 assert(limit >= _finger, "peace of mind"); 2096 } else { 2097 assert(limit < _region_limit, "only way to get here"); 2098 // This can happen under some pretty unusual circumstances. An 2099 // evacuation pause empties the region underneath our feet (NTAMS 2100 // at bottom). We then do some allocation in the region (NTAMS 2101 // stays at bottom), followed by the region being used as a GC 2102 // alloc region (NTAMS will move to top() and the objects 2103 // originally below it will be grayed). All objects now marked in 2104 // the region are explicitly grayed, if below the global finger, 2105 // and we do not need in fact to scan anything else. So, we simply 2106 // set _finger to be limit to ensure that the bitmap iteration 2107 // doesn't do anything. 2108 _finger = limit; 2109 } 2110 2111 _region_limit = limit; 2112 } 2113 2114 void G1CMTask::giveup_current_region() { 2115 assert(_curr_region != NULL, "invariant"); 2116 clear_region_fields(); 2117 } 2118 2119 void G1CMTask::clear_region_fields() { 2120 // Values for these three fields that indicate that we're not 2121 // holding on to a region. 2122 _curr_region = NULL; 2123 _finger = NULL; 2124 _region_limit = NULL; 2125 } 2126 2127 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2128 if (cm_oop_closure == NULL) { 2129 assert(_cm_oop_closure != NULL, "invariant"); 2130 } else { 2131 assert(_cm_oop_closure == NULL, "invariant"); 2132 } 2133 _cm_oop_closure = cm_oop_closure; 2134 } 2135 2136 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) { 2137 guarantee(next_mark_bitmap != NULL, "invariant"); 2138 _next_mark_bitmap = next_mark_bitmap; 2139 clear_region_fields(); 2140 2141 _calls = 0; 2142 _elapsed_time_ms = 0.0; 2143 _termination_time_ms = 0.0; 2144 _termination_start_time_ms = 0.0; 2145 } 2146 2147 bool G1CMTask::should_exit_termination() { 2148 regular_clock_call(); 2149 // This is called when we are in the termination protocol. We should 2150 // quit if, for some reason, this task wants to abort or the global 2151 // stack is not empty (this means that we can get work from it). 2152 return !_cm->mark_stack_empty() || has_aborted(); 2153 } 2154 2155 void G1CMTask::reached_limit() { 2156 assert(_words_scanned >= _words_scanned_limit || 2157 _refs_reached >= _refs_reached_limit , 2158 "shouldn't have been called otherwise"); 2159 regular_clock_call(); 2160 } 2161 2162 void G1CMTask::regular_clock_call() { 2163 if (has_aborted()) return; 2164 2165 // First, we need to recalculate the words scanned and refs reached 2166 // limits for the next clock call. 2167 recalculate_limits(); 2168 2169 // During the regular clock call we do the following 2170 2171 // (1) If an overflow has been flagged, then we abort. 2172 if (_cm->has_overflown()) { 2173 set_has_aborted(); 2174 return; 2175 } 2176 2177 // If we are not concurrent (i.e. we're doing remark) we don't need 2178 // to check anything else. The other steps are only needed during 2179 // the concurrent marking phase. 2180 if (!_concurrent) { 2181 return; 2182 } 2183 2184 // (2) If marking has been aborted for Full GC, then we also abort. 2185 if (_cm->has_aborted()) { 2186 set_has_aborted(); 2187 return; 2188 } 2189 2190 double curr_time_ms = os::elapsedVTime() * 1000.0; 2191 2192 // (4) We check whether we should yield. If we have to, then we abort. 2193 if (SuspendibleThreadSet::should_yield()) { 2194 // We should yield. To do this we abort the task. The caller is 2195 // responsible for yielding. 2196 set_has_aborted(); 2197 return; 2198 } 2199 2200 // (5) We check whether we've reached our time quota. If we have, 2201 // then we abort. 2202 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2203 if (elapsed_time_ms > _time_target_ms) { 2204 set_has_aborted(); 2205 _has_timed_out = true; 2206 return; 2207 } 2208 2209 // (6) Finally, we check whether there are enough completed STAB 2210 // buffers available for processing. If there are, we abort. 2211 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2212 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2213 // we do need to process SATB buffers, we'll abort and restart 2214 // the marking task to do so 2215 set_has_aborted(); 2216 return; 2217 } 2218 } 2219 2220 void G1CMTask::recalculate_limits() { 2221 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2222 _words_scanned_limit = _real_words_scanned_limit; 2223 2224 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2225 _refs_reached_limit = _real_refs_reached_limit; 2226 } 2227 2228 void G1CMTask::decrease_limits() { 2229 // This is called when we believe that we're going to do an infrequent 2230 // operation which will increase the per byte scanned cost (i.e. move 2231 // entries to/from the global stack). It basically tries to decrease the 2232 // scanning limit so that the clock is called earlier. 2233 2234 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4; 2235 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4; 2236 } 2237 2238 void G1CMTask::move_entries_to_global_stack() { 2239 // Local array where we'll store the entries that will be popped 2240 // from the local queue. 2241 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2242 2243 size_t n = 0; 2244 G1TaskQueueEntry task_entry; 2245 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) { 2246 buffer[n] = task_entry; 2247 ++n; 2248 } 2249 if (n < G1CMMarkStack::EntriesPerChunk) { 2250 buffer[n] = G1TaskQueueEntry(); 2251 } 2252 2253 if (n > 0) { 2254 if (!_cm->mark_stack_push(buffer)) { 2255 set_has_aborted(); 2256 } 2257 } 2258 2259 // This operation was quite expensive, so decrease the limits. 2260 decrease_limits(); 2261 } 2262 2263 bool G1CMTask::get_entries_from_global_stack() { 2264 // Local array where we'll store the entries that will be popped 2265 // from the global stack. 2266 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2267 2268 if (!_cm->mark_stack_pop(buffer)) { 2269 return false; 2270 } 2271 2272 // We did actually pop at least one entry. 2273 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) { 2274 G1TaskQueueEntry task_entry = buffer[i]; 2275 if (task_entry.is_null()) { 2276 break; 2277 } 2278 assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj())); 2279 bool success = _task_queue->push(task_entry); 2280 // We only call this when the local queue is empty or under a 2281 // given target limit. So, we do not expect this push to fail. 2282 assert(success, "invariant"); 2283 } 2284 2285 // This operation was quite expensive, so decrease the limits 2286 decrease_limits(); 2287 return true; 2288 } 2289 2290 void G1CMTask::drain_local_queue(bool partially) { 2291 if (has_aborted()) { 2292 return; 2293 } 2294 2295 // Decide what the target size is, depending whether we're going to 2296 // drain it partially (so that other tasks can steal if they run out 2297 // of things to do) or totally (at the very end). 2298 size_t target_size; 2299 if (partially) { 2300 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2301 } else { 2302 target_size = 0; 2303 } 2304 2305 if (_task_queue->size() > target_size) { 2306 G1TaskQueueEntry entry; 2307 bool ret = _task_queue->pop_local(entry); 2308 while (ret) { 2309 scan_task_entry(entry); 2310 if (_task_queue->size() <= target_size || has_aborted()) { 2311 ret = false; 2312 } else { 2313 ret = _task_queue->pop_local(entry); 2314 } 2315 } 2316 } 2317 } 2318 2319 void G1CMTask::drain_global_stack(bool partially) { 2320 if (has_aborted()) return; 2321 2322 // We have a policy to drain the local queue before we attempt to 2323 // drain the global stack. 2324 assert(partially || _task_queue->size() == 0, "invariant"); 2325 2326 // Decide what the target size is, depending whether we're going to 2327 // drain it partially (so that other tasks can steal if they run out 2328 // of things to do) or totally (at the very end). 2329 // Notice that when draining the global mark stack partially, due to the racyness 2330 // of the mark stack size update we might in fact drop below the target. But, 2331 // this is not a problem. 2332 // In case of total draining, we simply process until the global mark stack is 2333 // totally empty, disregarding the size counter. 2334 if (partially) { 2335 size_t const target_size = _cm->partial_mark_stack_size_target(); 2336 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2337 if (get_entries_from_global_stack()) { 2338 drain_local_queue(partially); 2339 } 2340 } 2341 } else { 2342 while (!has_aborted() && get_entries_from_global_stack()) { 2343 drain_local_queue(partially); 2344 } 2345 } 2346 } 2347 2348 // SATB Queue has several assumptions on whether to call the par or 2349 // non-par versions of the methods. this is why some of the code is 2350 // replicated. We should really get rid of the single-threaded version 2351 // of the code to simplify things. 2352 void G1CMTask::drain_satb_buffers() { 2353 if (has_aborted()) return; 2354 2355 // We set this so that the regular clock knows that we're in the 2356 // middle of draining buffers and doesn't set the abort flag when it 2357 // notices that SATB buffers are available for draining. It'd be 2358 // very counter productive if it did that. :-) 2359 _draining_satb_buffers = true; 2360 2361 G1CMSATBBufferClosure satb_cl(this, _g1h); 2362 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2363 2364 // This keeps claiming and applying the closure to completed buffers 2365 // until we run out of buffers or we need to abort. 2366 while (!has_aborted() && 2367 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2368 regular_clock_call(); 2369 } 2370 2371 _draining_satb_buffers = false; 2372 2373 assert(has_aborted() || 2374 _concurrent || 2375 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2376 2377 // again, this was a potentially expensive operation, decrease the 2378 // limits to get the regular clock call early 2379 decrease_limits(); 2380 } 2381 2382 void G1CMTask::print_stats() { 2383 log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", 2384 _worker_id, _calls); 2385 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2386 _elapsed_time_ms, _termination_time_ms); 2387 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 2388 _step_times_ms.num(), _step_times_ms.avg(), 2389 _step_times_ms.sd()); 2390 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", 2391 _step_times_ms.maximum(), _step_times_ms.sum()); 2392 } 2393 2394 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry) { 2395 return _task_queues->steal(worker_id, hash_seed, task_entry); 2396 } 2397 2398 /***************************************************************************** 2399 2400 The do_marking_step(time_target_ms, ...) method is the building 2401 block of the parallel marking framework. It can be called in parallel 2402 with other invocations of do_marking_step() on different tasks 2403 (but only one per task, obviously) and concurrently with the 2404 mutator threads, or during remark, hence it eliminates the need 2405 for two versions of the code. When called during remark, it will 2406 pick up from where the task left off during the concurrent marking 2407 phase. Interestingly, tasks are also claimable during evacuation 2408 pauses too, since do_marking_step() ensures that it aborts before 2409 it needs to yield. 2410 2411 The data structures that it uses to do marking work are the 2412 following: 2413 2414 (1) Marking Bitmap. If there are gray objects that appear only 2415 on the bitmap (this happens either when dealing with an overflow 2416 or when the initial marking phase has simply marked the roots 2417 and didn't push them on the stack), then tasks claim heap 2418 regions whose bitmap they then scan to find gray objects. A 2419 global finger indicates where the end of the last claimed region 2420 is. A local finger indicates how far into the region a task has 2421 scanned. The two fingers are used to determine how to gray an 2422 object (i.e. whether simply marking it is OK, as it will be 2423 visited by a task in the future, or whether it needs to be also 2424 pushed on a stack). 2425 2426 (2) Local Queue. The local queue of the task which is accessed 2427 reasonably efficiently by the task. Other tasks can steal from 2428 it when they run out of work. Throughout the marking phase, a 2429 task attempts to keep its local queue short but not totally 2430 empty, so that entries are available for stealing by other 2431 tasks. Only when there is no more work, a task will totally 2432 drain its local queue. 2433 2434 (3) Global Mark Stack. This handles local queue overflow. During 2435 marking only sets of entries are moved between it and the local 2436 queues, as access to it requires a mutex and more fine-grain 2437 interaction with it which might cause contention. If it 2438 overflows, then the marking phase should restart and iterate 2439 over the bitmap to identify gray objects. Throughout the marking 2440 phase, tasks attempt to keep the global mark stack at a small 2441 length but not totally empty, so that entries are available for 2442 popping by other tasks. Only when there is no more work, tasks 2443 will totally drain the global mark stack. 2444 2445 (4) SATB Buffer Queue. This is where completed SATB buffers are 2446 made available. Buffers are regularly removed from this queue 2447 and scanned for roots, so that the queue doesn't get too 2448 long. During remark, all completed buffers are processed, as 2449 well as the filled in parts of any uncompleted buffers. 2450 2451 The do_marking_step() method tries to abort when the time target 2452 has been reached. There are a few other cases when the 2453 do_marking_step() method also aborts: 2454 2455 (1) When the marking phase has been aborted (after a Full GC). 2456 2457 (2) When a global overflow (on the global stack) has been 2458 triggered. Before the task aborts, it will actually sync up with 2459 the other tasks to ensure that all the marking data structures 2460 (local queues, stacks, fingers etc.) are re-initialized so that 2461 when do_marking_step() completes, the marking phase can 2462 immediately restart. 2463 2464 (3) When enough completed SATB buffers are available. The 2465 do_marking_step() method only tries to drain SATB buffers right 2466 at the beginning. So, if enough buffers are available, the 2467 marking step aborts and the SATB buffers are processed at 2468 the beginning of the next invocation. 2469 2470 (4) To yield. when we have to yield then we abort and yield 2471 right at the end of do_marking_step(). This saves us from a lot 2472 of hassle as, by yielding we might allow a Full GC. If this 2473 happens then objects will be compacted underneath our feet, the 2474 heap might shrink, etc. We save checking for this by just 2475 aborting and doing the yield right at the end. 2476 2477 From the above it follows that the do_marking_step() method should 2478 be called in a loop (or, otherwise, regularly) until it completes. 2479 2480 If a marking step completes without its has_aborted() flag being 2481 true, it means it has completed the current marking phase (and 2482 also all other marking tasks have done so and have all synced up). 2483 2484 A method called regular_clock_call() is invoked "regularly" (in 2485 sub ms intervals) throughout marking. It is this clock method that 2486 checks all the abort conditions which were mentioned above and 2487 decides when the task should abort. A work-based scheme is used to 2488 trigger this clock method: when the number of object words the 2489 marking phase has scanned or the number of references the marking 2490 phase has visited reach a given limit. Additional invocations to 2491 the method clock have been planted in a few other strategic places 2492 too. The initial reason for the clock method was to avoid calling 2493 vtime too regularly, as it is quite expensive. So, once it was in 2494 place, it was natural to piggy-back all the other conditions on it 2495 too and not constantly check them throughout the code. 2496 2497 If do_termination is true then do_marking_step will enter its 2498 termination protocol. 2499 2500 The value of is_serial must be true when do_marking_step is being 2501 called serially (i.e. by the VMThread) and do_marking_step should 2502 skip any synchronization in the termination and overflow code. 2503 Examples include the serial remark code and the serial reference 2504 processing closures. 2505 2506 The value of is_serial must be false when do_marking_step is 2507 being called by any of the worker threads in a work gang. 2508 Examples include the concurrent marking code (CMMarkingTask), 2509 the MT remark code, and the MT reference processing closures. 2510 2511 *****************************************************************************/ 2512 2513 void G1CMTask::do_marking_step(double time_target_ms, 2514 bool do_termination, 2515 bool is_serial) { 2516 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2517 assert(_concurrent == _cm->concurrent(), "they should be the same"); 2518 2519 _start_time_ms = os::elapsedVTime() * 1000.0; 2520 2521 // If do_stealing is true then do_marking_step will attempt to 2522 // steal work from the other G1CMTasks. It only makes sense to 2523 // enable stealing when the termination protocol is enabled 2524 // and do_marking_step() is not being called serially. 2525 bool do_stealing = do_termination && !is_serial; 2526 2527 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2528 _time_target_ms = time_target_ms - diff_prediction_ms; 2529 2530 // set up the variables that are used in the work-based scheme to 2531 // call the regular clock method 2532 _words_scanned = 0; 2533 _refs_reached = 0; 2534 recalculate_limits(); 2535 2536 // clear all flags 2537 clear_has_aborted(); 2538 _has_timed_out = false; 2539 _draining_satb_buffers = false; 2540 2541 ++_calls; 2542 2543 // Set up the bitmap and oop closures. Anything that uses them is 2544 // eventually called from this method, so it is OK to allocate these 2545 // statically. 2546 G1CMBitMapClosure bitmap_closure(this, _cm); 2547 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 2548 set_cm_oop_closure(&cm_oop_closure); 2549 2550 if (_cm->has_overflown()) { 2551 // This can happen if the mark stack overflows during a GC pause 2552 // and this task, after a yield point, restarts. We have to abort 2553 // as we need to get into the overflow protocol which happens 2554 // right at the end of this task. 2555 set_has_aborted(); 2556 } 2557 2558 // First drain any available SATB buffers. After this, we will not 2559 // look at SATB buffers before the next invocation of this method. 2560 // If enough completed SATB buffers are queued up, the regular clock 2561 // will abort this task so that it restarts. 2562 drain_satb_buffers(); 2563 // ...then partially drain the local queue and the global stack 2564 drain_local_queue(true); 2565 drain_global_stack(true); 2566 2567 do { 2568 if (!has_aborted() && _curr_region != NULL) { 2569 // This means that we're already holding on to a region. 2570 assert(_finger != NULL, "if region is not NULL, then the finger " 2571 "should not be NULL either"); 2572 2573 // We might have restarted this task after an evacuation pause 2574 // which might have evacuated the region we're holding on to 2575 // underneath our feet. Let's read its limit again to make sure 2576 // that we do not iterate over a region of the heap that 2577 // contains garbage (update_region_limit() will also move 2578 // _finger to the start of the region if it is found empty). 2579 update_region_limit(); 2580 // We will start from _finger not from the start of the region, 2581 // as we might be restarting this task after aborting half-way 2582 // through scanning this region. In this case, _finger points to 2583 // the address where we last found a marked object. If this is a 2584 // fresh region, _finger points to start(). 2585 MemRegion mr = MemRegion(_finger, _region_limit); 2586 2587 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2588 "humongous regions should go around loop once only"); 2589 2590 // Some special cases: 2591 // If the memory region is empty, we can just give up the region. 2592 // If the current region is humongous then we only need to check 2593 // the bitmap for the bit associated with the start of the object, 2594 // scan the object if it's live, and give up the region. 2595 // Otherwise, let's iterate over the bitmap of the part of the region 2596 // that is left. 2597 // If the iteration is successful, give up the region. 2598 if (mr.is_empty()) { 2599 giveup_current_region(); 2600 regular_clock_call(); 2601 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2602 if (_next_mark_bitmap->is_marked(mr.start())) { 2603 // The object is marked - apply the closure 2604 bitmap_closure.do_addr(mr.start()); 2605 } 2606 // Even if this task aborted while scanning the humongous object 2607 // we can (and should) give up the current region. 2608 giveup_current_region(); 2609 regular_clock_call(); 2610 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) { 2611 giveup_current_region(); 2612 regular_clock_call(); 2613 } else { 2614 assert(has_aborted(), "currently the only way to do so"); 2615 // The only way to abort the bitmap iteration is to return 2616 // false from the do_bit() method. However, inside the 2617 // do_bit() method we move the _finger to point to the 2618 // object currently being looked at. So, if we bail out, we 2619 // have definitely set _finger to something non-null. 2620 assert(_finger != NULL, "invariant"); 2621 2622 // Region iteration was actually aborted. So now _finger 2623 // points to the address of the object we last scanned. If we 2624 // leave it there, when we restart this task, we will rescan 2625 // the object. It is easy to avoid this. We move the finger by 2626 // enough to point to the next possible object header. 2627 assert(_finger < _region_limit, "invariant"); 2628 HeapWord* const new_finger = _finger + ((oop)_finger)->size(); 2629 // Check if bitmap iteration was aborted while scanning the last object 2630 if (new_finger >= _region_limit) { 2631 giveup_current_region(); 2632 } else { 2633 move_finger_to(new_finger); 2634 } 2635 } 2636 } 2637 // At this point we have either completed iterating over the 2638 // region we were holding on to, or we have aborted. 2639 2640 // We then partially drain the local queue and the global stack. 2641 // (Do we really need this?) 2642 drain_local_queue(true); 2643 drain_global_stack(true); 2644 2645 // Read the note on the claim_region() method on why it might 2646 // return NULL with potentially more regions available for 2647 // claiming and why we have to check out_of_regions() to determine 2648 // whether we're done or not. 2649 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2650 // We are going to try to claim a new region. We should have 2651 // given up on the previous one. 2652 // Separated the asserts so that we know which one fires. 2653 assert(_curr_region == NULL, "invariant"); 2654 assert(_finger == NULL, "invariant"); 2655 assert(_region_limit == NULL, "invariant"); 2656 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2657 if (claimed_region != NULL) { 2658 // Yes, we managed to claim one 2659 setup_for_region(claimed_region); 2660 assert(_curr_region == claimed_region, "invariant"); 2661 } 2662 // It is important to call the regular clock here. It might take 2663 // a while to claim a region if, for example, we hit a large 2664 // block of empty regions. So we need to call the regular clock 2665 // method once round the loop to make sure it's called 2666 // frequently enough. 2667 regular_clock_call(); 2668 } 2669 2670 if (!has_aborted() && _curr_region == NULL) { 2671 assert(_cm->out_of_regions(), 2672 "at this point we should be out of regions"); 2673 } 2674 } while ( _curr_region != NULL && !has_aborted()); 2675 2676 if (!has_aborted()) { 2677 // We cannot check whether the global stack is empty, since other 2678 // tasks might be pushing objects to it concurrently. 2679 assert(_cm->out_of_regions(), 2680 "at this point we should be out of regions"); 2681 // Try to reduce the number of available SATB buffers so that 2682 // remark has less work to do. 2683 drain_satb_buffers(); 2684 } 2685 2686 // Since we've done everything else, we can now totally drain the 2687 // local queue and global stack. 2688 drain_local_queue(false); 2689 drain_global_stack(false); 2690 2691 // Attempt at work stealing from other task's queues. 2692 if (do_stealing && !has_aborted()) { 2693 // We have not aborted. This means that we have finished all that 2694 // we could. Let's try to do some stealing... 2695 2696 // We cannot check whether the global stack is empty, since other 2697 // tasks might be pushing objects to it concurrently. 2698 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2699 "only way to reach here"); 2700 while (!has_aborted()) { 2701 G1TaskQueueEntry entry; 2702 if (_cm->try_stealing(_worker_id, &_hash_seed, entry)) { 2703 scan_task_entry(entry); 2704 2705 // And since we're towards the end, let's totally drain the 2706 // local queue and global stack. 2707 drain_local_queue(false); 2708 drain_global_stack(false); 2709 } else { 2710 break; 2711 } 2712 } 2713 } 2714 2715 // We still haven't aborted. Now, let's try to get into the 2716 // termination protocol. 2717 if (do_termination && !has_aborted()) { 2718 // We cannot check whether the global stack is empty, since other 2719 // tasks might be concurrently pushing objects on it. 2720 // Separated the asserts so that we know which one fires. 2721 assert(_cm->out_of_regions(), "only way to reach here"); 2722 assert(_task_queue->size() == 0, "only way to reach here"); 2723 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2724 2725 // The G1CMTask class also extends the TerminatorTerminator class, 2726 // hence its should_exit_termination() method will also decide 2727 // whether to exit the termination protocol or not. 2728 bool finished = (is_serial || 2729 _cm->terminator()->offer_termination(this)); 2730 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2731 _termination_time_ms += 2732 termination_end_time_ms - _termination_start_time_ms; 2733 2734 if (finished) { 2735 // We're all done. 2736 2737 if (_worker_id == 0) { 2738 // Let's allow task 0 to do this 2739 if (_concurrent) { 2740 assert(_cm->concurrent_marking_in_progress(), "invariant"); 2741 // We need to set this to false before the next 2742 // safepoint. This way we ensure that the marking phase 2743 // doesn't observe any more heap expansions. 2744 _cm->clear_concurrent_marking_in_progress(); 2745 } 2746 } 2747 2748 // We can now guarantee that the global stack is empty, since 2749 // all other tasks have finished. We separated the guarantees so 2750 // that, if a condition is false, we can immediately find out 2751 // which one. 2752 guarantee(_cm->out_of_regions(), "only way to reach here"); 2753 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2754 guarantee(_task_queue->size() == 0, "only way to reach here"); 2755 guarantee(!_cm->has_overflown(), "only way to reach here"); 2756 } else { 2757 // Apparently there's more work to do. Let's abort this task. It 2758 // will restart it and we can hopefully find more things to do. 2759 set_has_aborted(); 2760 } 2761 } 2762 2763 // Mainly for debugging purposes to make sure that a pointer to the 2764 // closure which was statically allocated in this frame doesn't 2765 // escape it by accident. 2766 set_cm_oop_closure(NULL); 2767 double end_time_ms = os::elapsedVTime() * 1000.0; 2768 double elapsed_time_ms = end_time_ms - _start_time_ms; 2769 // Update the step history. 2770 _step_times_ms.add(elapsed_time_ms); 2771 2772 if (has_aborted()) { 2773 // The task was aborted for some reason. 2774 if (_has_timed_out) { 2775 double diff_ms = elapsed_time_ms - _time_target_ms; 2776 // Keep statistics of how well we did with respect to hitting 2777 // our target only if we actually timed out (if we aborted for 2778 // other reasons, then the results might get skewed). 2779 _marking_step_diffs_ms.add(diff_ms); 2780 } 2781 2782 if (_cm->has_overflown()) { 2783 // This is the interesting one. We aborted because a global 2784 // overflow was raised. This means we have to restart the 2785 // marking phase and start iterating over regions. However, in 2786 // order to do this we have to make sure that all tasks stop 2787 // what they are doing and re-initialize in a safe manner. We 2788 // will achieve this with the use of two barrier sync points. 2789 2790 if (!is_serial) { 2791 // We only need to enter the sync barrier if being called 2792 // from a parallel context 2793 _cm->enter_first_sync_barrier(_worker_id); 2794 2795 // When we exit this sync barrier we know that all tasks have 2796 // stopped doing marking work. So, it's now safe to 2797 // re-initialize our data structures. At the end of this method, 2798 // task 0 will clear the global data structures. 2799 } 2800 2801 // We clear the local state of this task... 2802 clear_region_fields(); 2803 2804 if (!is_serial) { 2805 // ...and enter the second barrier. 2806 _cm->enter_second_sync_barrier(_worker_id); 2807 } 2808 // At this point, if we're during the concurrent phase of 2809 // marking, everything has been re-initialized and we're 2810 // ready to restart. 2811 } 2812 } 2813 } 2814 2815 G1CMTask::G1CMTask(uint worker_id, G1ConcurrentMark* cm, G1CMTaskQueue* task_queue) : 2816 _objArray_processor(this), 2817 _worker_id(worker_id), 2818 _g1h(G1CollectedHeap::heap()), 2819 _cm(cm), 2820 _next_mark_bitmap(NULL), 2821 _task_queue(task_queue), 2822 _calls(0), 2823 _time_target_ms(0.0), 2824 _start_time_ms(0.0), 2825 _cm_oop_closure(NULL), 2826 _curr_region(NULL), 2827 _finger(NULL), 2828 _region_limit(NULL), 2829 _words_scanned(0), 2830 _words_scanned_limit(0), 2831 _real_words_scanned_limit(0), 2832 _refs_reached(0), 2833 _refs_reached_limit(0), 2834 _real_refs_reached_limit(0), 2835 _hash_seed(17), 2836 _has_aborted(false), 2837 _has_timed_out(false), 2838 _draining_satb_buffers(false), 2839 _step_times_ms(), 2840 _elapsed_time_ms(0.0), 2841 _termination_time_ms(0.0), 2842 _termination_start_time_ms(0.0), 2843 _concurrent(false), 2844 _marking_step_diffs_ms() 2845 { 2846 guarantee(task_queue != NULL, "invariant"); 2847 2848 _marking_step_diffs_ms.add(0.5); 2849 } 2850 2851 // These are formatting macros that are used below to ensure 2852 // consistent formatting. The *_H_* versions are used to format the 2853 // header for a particular value and they should be kept consistent 2854 // with the corresponding macro. Also note that most of the macros add 2855 // the necessary white space (as a prefix) which makes them a bit 2856 // easier to compose. 2857 2858 // All the output lines are prefixed with this string to be able to 2859 // identify them easily in a large log file. 2860 #define G1PPRL_LINE_PREFIX "###" 2861 2862 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2863 #ifdef _LP64 2864 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2865 #else // _LP64 2866 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2867 #endif // _LP64 2868 2869 // For per-region info 2870 #define G1PPRL_TYPE_FORMAT " %-4s" 2871 #define G1PPRL_TYPE_H_FORMAT " %4s" 2872 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2873 #define G1PPRL_BYTE_H_FORMAT " %9s" 2874 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2875 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2876 2877 // For summary info 2878 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2879 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2880 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2881 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2882 2883 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) : 2884 _total_used_bytes(0), _total_capacity_bytes(0), 2885 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2886 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) 2887 { 2888 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2889 MemRegion g1_reserved = g1h->g1_reserved(); 2890 double now = os::elapsedTime(); 2891 2892 // Print the header of the output. 2893 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2894 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2895 G1PPRL_SUM_ADDR_FORMAT("reserved") 2896 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2897 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 2898 HeapRegion::GrainBytes); 2899 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2900 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2901 G1PPRL_TYPE_H_FORMAT 2902 G1PPRL_ADDR_BASE_H_FORMAT 2903 G1PPRL_BYTE_H_FORMAT 2904 G1PPRL_BYTE_H_FORMAT 2905 G1PPRL_BYTE_H_FORMAT 2906 G1PPRL_DOUBLE_H_FORMAT 2907 G1PPRL_BYTE_H_FORMAT 2908 G1PPRL_BYTE_H_FORMAT, 2909 "type", "address-range", 2910 "used", "prev-live", "next-live", "gc-eff", 2911 "remset", "code-roots"); 2912 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2913 G1PPRL_TYPE_H_FORMAT 2914 G1PPRL_ADDR_BASE_H_FORMAT 2915 G1PPRL_BYTE_H_FORMAT 2916 G1PPRL_BYTE_H_FORMAT 2917 G1PPRL_BYTE_H_FORMAT 2918 G1PPRL_DOUBLE_H_FORMAT 2919 G1PPRL_BYTE_H_FORMAT 2920 G1PPRL_BYTE_H_FORMAT, 2921 "", "", 2922 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 2923 "(bytes)", "(bytes)"); 2924 } 2925 2926 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) { 2927 const char* type = r->get_type_str(); 2928 HeapWord* bottom = r->bottom(); 2929 HeapWord* end = r->end(); 2930 size_t capacity_bytes = r->capacity(); 2931 size_t used_bytes = r->used(); 2932 size_t prev_live_bytes = r->live_bytes(); 2933 size_t next_live_bytes = r->next_live_bytes(); 2934 double gc_eff = r->gc_efficiency(); 2935 size_t remset_bytes = r->rem_set()->mem_size(); 2936 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 2937 2938 _total_used_bytes += used_bytes; 2939 _total_capacity_bytes += capacity_bytes; 2940 _total_prev_live_bytes += prev_live_bytes; 2941 _total_next_live_bytes += next_live_bytes; 2942 _total_remset_bytes += remset_bytes; 2943 _total_strong_code_roots_bytes += strong_code_roots_bytes; 2944 2945 // Print a line for this particular region. 2946 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2947 G1PPRL_TYPE_FORMAT 2948 G1PPRL_ADDR_BASE_FORMAT 2949 G1PPRL_BYTE_FORMAT 2950 G1PPRL_BYTE_FORMAT 2951 G1PPRL_BYTE_FORMAT 2952 G1PPRL_DOUBLE_FORMAT 2953 G1PPRL_BYTE_FORMAT 2954 G1PPRL_BYTE_FORMAT, 2955 type, p2i(bottom), p2i(end), 2956 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 2957 remset_bytes, strong_code_roots_bytes); 2958 2959 return false; 2960 } 2961 2962 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 2963 // add static memory usages to remembered set sizes 2964 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 2965 // Print the footer of the output. 2966 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2967 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2968 " SUMMARY" 2969 G1PPRL_SUM_MB_FORMAT("capacity") 2970 G1PPRL_SUM_MB_PERC_FORMAT("used") 2971 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 2972 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 2973 G1PPRL_SUM_MB_FORMAT("remset") 2974 G1PPRL_SUM_MB_FORMAT("code-roots"), 2975 bytes_to_mb(_total_capacity_bytes), 2976 bytes_to_mb(_total_used_bytes), 2977 percent_of(_total_used_bytes, _total_capacity_bytes), 2978 bytes_to_mb(_total_prev_live_bytes), 2979 percent_of(_total_prev_live_bytes, _total_capacity_bytes), 2980 bytes_to_mb(_total_next_live_bytes), 2981 percent_of(_total_next_live_bytes, _total_capacity_bytes), 2982 bytes_to_mb(_total_remset_bytes), 2983 bytes_to_mb(_total_strong_code_roots_bytes)); 2984 }