1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/concurrentMarkThread.inline.hpp"
  30 #include "gc/g1/g1CollectedHeap.inline.hpp"
  31 #include "gc/g1/g1CollectorState.hpp"
  32 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  33 #include "gc/g1/g1HeapVerifier.hpp"
  34 #include "gc/g1/g1OopClosures.inline.hpp"
  35 #include "gc/g1/g1CardLiveData.inline.hpp"
  36 #include "gc/g1/g1Policy.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/heapRegion.inline.hpp"
  39 #include "gc/g1/heapRegionRemSet.hpp"
  40 #include "gc/g1/heapRegionSet.inline.hpp"
  41 #include "gc/shared/gcId.hpp"
  42 #include "gc/shared/gcTimer.hpp"
  43 #include "gc/shared/gcTrace.hpp"
  44 #include "gc/shared/gcTraceTime.inline.hpp"
  45 #include "gc/shared/genOopClosures.inline.hpp"
  46 #include "gc/shared/referencePolicy.hpp"
  47 #include "gc/shared/strongRootsScope.hpp"
  48 #include "gc/shared/suspendibleThreadSet.hpp"
  49 #include "gc/shared/taskqueue.inline.hpp"
  50 #include "gc/shared/vmGCOperations.hpp"
  51 #include "gc/shared/weakProcessor.hpp"
  52 #include "logging/log.hpp"
  53 #include "memory/allocation.hpp"
  54 #include "memory/resourceArea.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "runtime/atomic.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/prefetch.inline.hpp"
  60 #include "services/memTracker.hpp"
  61 #include "utilities/align.hpp"
  62 #include "utilities/growableArray.hpp"
  63 
  64 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  65   assert(addr < _cm->finger(), "invariant");
  66   assert(addr >= _task->finger(), "invariant");
  67 
  68   // We move that task's local finger along.
  69   _task->move_finger_to(addr);
  70 
  71   _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
  72   // we only partially drain the local queue and global stack
  73   _task->drain_local_queue(true);
  74   _task->drain_global_stack(true);
  75 
  76   // if the has_aborted flag has been raised, we need to bail out of
  77   // the iteration
  78   return !_task->has_aborted();
  79 }
  80 
  81 G1CMMarkStack::G1CMMarkStack() :
  82   _max_chunk_capacity(0),
  83   _base(NULL),
  84   _chunk_capacity(0) {
  85   set_empty();
  86 }
  87 
  88 bool G1CMMarkStack::resize(size_t new_capacity) {
  89   assert(is_empty(), "Only resize when stack is empty.");
  90   assert(new_capacity <= _max_chunk_capacity,
  91          "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
  92 
  93   TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
  94 
  95   if (new_base == NULL) {
  96     log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
  97     return false;
  98   }
  99   // Release old mapping.
 100   if (_base != NULL) {
 101     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 102   }
 103 
 104   _base = new_base;
 105   _chunk_capacity = new_capacity;
 106   set_empty();
 107 
 108   return true;
 109 }
 110 
 111 size_t G1CMMarkStack::capacity_alignment() {
 112   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
 113 }
 114 
 115 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 116   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 117 
 118   size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
 119 
 120   _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 121   size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 122 
 123   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
 124             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
 125             _max_chunk_capacity,
 126             initial_chunk_capacity);
 127 
 128   log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
 129                 initial_chunk_capacity, _max_chunk_capacity);
 130 
 131   return resize(initial_chunk_capacity);
 132 }
 133 
 134 void G1CMMarkStack::expand() {
 135   if (_chunk_capacity == _max_chunk_capacity) {
 136     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 137     return;
 138   }
 139   size_t old_capacity = _chunk_capacity;
 140   // Double capacity if possible
 141   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
 142 
 143   if (resize(new_capacity)) {
 144     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 145                   old_capacity, new_capacity);
 146   } else {
 147     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 148                     old_capacity, new_capacity);
 149   }
 150 }
 151 
 152 G1CMMarkStack::~G1CMMarkStack() {
 153   if (_base != NULL) {
 154     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 155   }
 156 }
 157 
 158 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
 159   elem->next = *list;
 160   *list = elem;
 161 }
 162 
 163 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
 164   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 165   add_chunk_to_list(&_chunk_list, elem);
 166   _chunks_in_chunk_list++;
 167 }
 168 
 169 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
 170   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 171   add_chunk_to_list(&_free_list, elem);
 172 }
 173 
 174 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
 175   TaskQueueEntryChunk* result = *list;
 176   if (result != NULL) {
 177     *list = (*list)->next;
 178   }
 179   return result;
 180 }
 181 
 182 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 183   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 184   TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
 185   if (result != NULL) {
 186     _chunks_in_chunk_list--;
 187   }
 188   return result;
 189 }
 190 
 191 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 192   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 193   return remove_chunk_from_list(&_free_list);
 194 }
 195 
 196 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
 197   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 198   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 199   // wraparound of _hwm.
 200   if (_hwm >= _chunk_capacity) {
 201     return NULL;
 202   }
 203 
 204   size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
 205   if (cur_idx >= _chunk_capacity) {
 206     return NULL;
 207   }
 208 
 209   TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
 210   result->next = NULL;
 211   return result;
 212 }
 213 
 214 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
 215   // Get a new chunk.
 216   TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
 217 
 218   if (new_chunk == NULL) {
 219     // Did not get a chunk from the free list. Allocate from backing memory.
 220     new_chunk = allocate_new_chunk();
 221 
 222     if (new_chunk == NULL) {
 223       return false;
 224     }
 225   }
 226 
 227   Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 228 
 229   add_chunk_to_chunk_list(new_chunk);
 230 
 231   return true;
 232 }
 233 
 234 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
 235   TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
 236 
 237   if (cur == NULL) {
 238     return false;
 239   }
 240 
 241   Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 242 
 243   add_chunk_to_free_list(cur);
 244   return true;
 245 }
 246 
 247 void G1CMMarkStack::set_empty() {
 248   _chunks_in_chunk_list = 0;
 249   _hwm = 0;
 250   _chunk_list = NULL;
 251   _free_list = NULL;
 252 }
 253 
 254 G1CMRootRegions::G1CMRootRegions() :
 255   _cm(NULL), _scan_in_progress(false),
 256   _should_abort(false), _claimed_survivor_index(0) { }
 257 
 258 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
 259   _survivors = survivors;
 260   _cm = cm;
 261 }
 262 
 263 void G1CMRootRegions::prepare_for_scan() {
 264   assert(!scan_in_progress(), "pre-condition");
 265 
 266   // Currently, only survivors can be root regions.
 267   _claimed_survivor_index = 0;
 268   _scan_in_progress = _survivors->regions()->is_nonempty();
 269   _should_abort = false;
 270 }
 271 
 272 HeapRegion* G1CMRootRegions::claim_next() {
 273   if (_should_abort) {
 274     // If someone has set the should_abort flag, we return NULL to
 275     // force the caller to bail out of their loop.
 276     return NULL;
 277   }
 278 
 279   // Currently, only survivors can be root regions.
 280   const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions();
 281 
 282   int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1;
 283   if (claimed_index < survivor_regions->length()) {
 284     return survivor_regions->at(claimed_index);
 285   }
 286   return NULL;
 287 }
 288 
 289 uint G1CMRootRegions::num_root_regions() const {
 290   return (uint)_survivors->regions()->length();
 291 }
 292 
 293 void G1CMRootRegions::notify_scan_done() {
 294   MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 295   _scan_in_progress = false;
 296   RootRegionScan_lock->notify_all();
 297 }
 298 
 299 void G1CMRootRegions::cancel_scan() {
 300   notify_scan_done();
 301 }
 302 
 303 void G1CMRootRegions::scan_finished() {
 304   assert(scan_in_progress(), "pre-condition");
 305 
 306   // Currently, only survivors can be root regions.
 307   if (!_should_abort) {
 308     assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index);
 309     assert((uint)_claimed_survivor_index >= _survivors->length(),
 310            "we should have claimed all survivors, claimed index = %u, length = %u",
 311            (uint)_claimed_survivor_index, _survivors->length());
 312   }
 313 
 314   notify_scan_done();
 315 }
 316 
 317 bool G1CMRootRegions::wait_until_scan_finished() {
 318   if (!scan_in_progress()) return false;
 319 
 320   {
 321     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 322     while (scan_in_progress()) {
 323       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 324     }
 325   }
 326   return true;
 327 }
 328 
 329 // Returns the maximum number of workers to be used in a concurrent
 330 // phase based on the number of GC workers being used in a STW
 331 // phase.
 332 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
 333   return MAX2((num_gc_workers + 2) / 4, 1U);
 334 }
 335 
 336 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
 337                                    G1RegionToSpaceMapper* prev_bitmap_storage,
 338                                    G1RegionToSpaceMapper* next_bitmap_storage) :
 339   // _cm_thread set inside the constructor
 340   _g1h(g1h),
 341   _completed_initialization(false),
 342 
 343   _cleanup_list("Concurrent Mark Cleanup List"),
 344   _mark_bitmap_1(),
 345   _mark_bitmap_2(),
 346   _prev_mark_bitmap(&_mark_bitmap_1),
 347   _next_mark_bitmap(&_mark_bitmap_2),
 348 
 349   _heap_start(_g1h->reserved_region().start()),
 350   _heap_end(_g1h->reserved_region().end()),
 351 
 352   _root_regions(),
 353 
 354   _global_mark_stack(),
 355 
 356   // _finger set in set_non_marking_state
 357 
 358   _max_num_tasks(ParallelGCThreads),
 359   // _num_active_tasks set in set_non_marking_state()
 360   // _tasks set inside the constructor
 361 
 362   _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
 363   _terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)),
 364 
 365   _first_overflow_barrier_sync(),
 366   _second_overflow_barrier_sync(),
 367 
 368   _has_overflown(false),
 369   _concurrent(false),
 370   _has_aborted(false),
 371   _restart_for_overflow(false),
 372   _concurrent_marking_in_progress(false),
 373   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 374   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 375 
 376   // _verbose_level set below
 377 
 378   _init_times(),
 379   _remark_times(),
 380   _remark_mark_times(),
 381   _remark_weak_ref_times(),
 382   _cleanup_times(),
 383   _total_counting_time(0.0),
 384   _total_rs_scrub_time(0.0),
 385 
 386   _accum_task_vtime(NULL),
 387 
 388   _concurrent_workers(NULL),
 389   _num_concurrent_workers(0),
 390   _max_concurrent_workers(0)
 391 {
 392   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 393   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 394 
 395   // Create & start ConcurrentMark thread.
 396   _cm_thread = new ConcurrentMarkThread(this);
 397   if (_cm_thread->osthread() == NULL) {
 398     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 399   }
 400 
 401   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 402 
 403   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 404   satb_qs.set_buffer_size(G1SATBBufferSize);
 405 
 406   _root_regions.init(_g1h->survivor(), this);
 407 
 408   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 409     // Calculate the number of concurrent worker threads by scaling
 410     // the number of parallel GC threads.
 411     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 412     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 413   }
 414 
 415   assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
 416   if (ConcGCThreads > ParallelGCThreads) {
 417     log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
 418                     ConcGCThreads, ParallelGCThreads);
 419     return;
 420   }
 421 
 422   log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
 423   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 424 
 425   _num_concurrent_workers = ConcGCThreads;
 426   _max_concurrent_workers = _num_concurrent_workers;
 427 
 428   _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
 429   _concurrent_workers->initialize_workers();
 430 
 431   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 432     size_t mark_stack_size =
 433       MIN2(MarkStackSizeMax,
 434           MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE)));
 435     // Verify that the calculated value for MarkStackSize is in range.
 436     // It would be nice to use the private utility routine from Arguments.
 437     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 438       log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 439                       "must be between 1 and " SIZE_FORMAT,
 440                       mark_stack_size, MarkStackSizeMax);
 441       return;
 442     }
 443     FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
 444   } else {
 445     // Verify MarkStackSize is in range.
 446     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 447       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 448         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 449           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 450                           "must be between 1 and " SIZE_FORMAT,
 451                           MarkStackSize, MarkStackSizeMax);
 452           return;
 453         }
 454       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 455         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 456           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 457                           " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 458                           MarkStackSize, MarkStackSizeMax);
 459           return;
 460         }
 461       }
 462     }
 463   }
 464 
 465   if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
 466     vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
 467   }
 468 
 469   _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
 470   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
 471 
 472   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 473   _num_active_tasks = _max_num_tasks;
 474 
 475   for (uint i = 0; i < _max_num_tasks; ++i) {
 476     G1CMTaskQueue* task_queue = new G1CMTaskQueue();
 477     task_queue->initialize();
 478     _task_queues->register_queue(i, task_queue);
 479 
 480     _tasks[i] = new G1CMTask(i, this, task_queue);
 481 
 482     _accum_task_vtime[i] = 0.0;
 483   }
 484 
 485   set_non_marking_state();
 486   _completed_initialization = true;
 487 }
 488 
 489 void G1ConcurrentMark::reset() {
 490   // Starting values for these two. This should be called in a STW
 491   // phase.
 492   MemRegion reserved = _g1h->g1_reserved();
 493   _heap_start = reserved.start();
 494   _heap_end   = reserved.end();
 495 
 496   // Separated the asserts so that we know which one fires.
 497   assert(_heap_start != NULL, "heap bounds should look ok");
 498   assert(_heap_end != NULL, "heap bounds should look ok");
 499   assert(_heap_start < _heap_end, "heap bounds should look ok");
 500 
 501   // Reset all the marking data structures and any necessary flags
 502   reset_marking_state();
 503 
 504   // We reset all of them, since different phases will use
 505   // different number of active threads. So, it's easiest to have all
 506   // of them ready.
 507   for (uint i = 0; i < _max_num_tasks; ++i) {
 508     _tasks[i]->reset(_next_mark_bitmap);
 509   }
 510 
 511   // we need this to make sure that the flag is on during the evac
 512   // pause with initial mark piggy-backed
 513   set_concurrent_marking_in_progress();
 514 }
 515 
 516 
 517 void G1ConcurrentMark::reset_marking_state() {
 518   _global_mark_stack.set_empty();
 519 
 520   // Expand the marking stack, if we have to and if we can.
 521   if (has_overflown()) {
 522     _global_mark_stack.expand();
 523   }
 524 
 525   clear_has_overflown();
 526   _finger = _heap_start;
 527 
 528   for (uint i = 0; i < _max_num_tasks; ++i) {
 529     G1CMTaskQueue* queue = _task_queues->queue(i);
 530     queue->set_empty();
 531   }
 532 }
 533 
 534 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
 535   assert(active_tasks <= _max_num_tasks, "we should not have more");
 536 
 537   _num_active_tasks = active_tasks;
 538   // Need to update the three data structures below according to the
 539   // number of active threads for this phase.
 540   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 541   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 542   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 543 }
 544 
 545 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 546   set_concurrency(active_tasks);
 547 
 548   _concurrent = concurrent;
 549   // We propagate this to all tasks, not just the active ones.
 550   for (uint i = 0; i < _max_num_tasks; ++i) {
 551     _tasks[i]->set_concurrent(concurrent);
 552   }
 553 
 554   if (concurrent) {
 555     set_concurrent_marking_in_progress();
 556   } else {
 557     // We currently assume that the concurrent flag has been set to
 558     // false before we start remark. At this point we should also be
 559     // in a STW phase.
 560     assert(!concurrent_marking_in_progress(), "invariant");
 561     assert(out_of_regions(),
 562            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 563            p2i(_finger), p2i(_heap_end));
 564   }
 565 }
 566 
 567 void G1ConcurrentMark::set_non_marking_state() {
 568   // We set the global marking state to some default values when we're
 569   // not doing marking.
 570   reset_marking_state();
 571   _num_active_tasks = 0;
 572   clear_concurrent_marking_in_progress();
 573 }
 574 
 575 G1ConcurrentMark::~G1ConcurrentMark() {
 576   // The G1ConcurrentMark instance is never freed.
 577   ShouldNotReachHere();
 578 }
 579 
 580 class G1ClearBitMapTask : public AbstractGangTask {
 581 public:
 582   static size_t chunk_size() { return M; }
 583 
 584 private:
 585   // Heap region closure used for clearing the given mark bitmap.
 586   class G1ClearBitmapHRClosure : public HeapRegionClosure {
 587   private:
 588     G1CMBitMap* _bitmap;
 589     G1ConcurrentMark* _cm;
 590   public:
 591     G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) {
 592     }
 593 
 594     virtual bool doHeapRegion(HeapRegion* r) {
 595       size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
 596 
 597       HeapWord* cur = r->bottom();
 598       HeapWord* const end = r->end();
 599 
 600       while (cur < end) {
 601         MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 602         _bitmap->clear_range(mr);
 603 
 604         cur += chunk_size_in_words;
 605 
 606         // Abort iteration if after yielding the marking has been aborted.
 607         if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
 608           return true;
 609         }
 610         // Repeat the asserts from before the start of the closure. We will do them
 611         // as asserts here to minimize their overhead on the product. However, we
 612         // will have them as guarantees at the beginning / end of the bitmap
 613         // clearing to get some checking in the product.
 614         assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
 615         assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
 616       }
 617       assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
 618 
 619       return false;
 620     }
 621   };
 622 
 623   G1ClearBitmapHRClosure _cl;
 624   HeapRegionClaimer _hr_claimer;
 625   bool _suspendible; // If the task is suspendible, workers must join the STS.
 626 
 627 public:
 628   G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
 629     AbstractGangTask("G1 Clear Bitmap"),
 630     _cl(bitmap, suspendible ? cm : NULL),
 631     _hr_claimer(n_workers),
 632     _suspendible(suspendible)
 633   { }
 634 
 635   void work(uint worker_id) {
 636     SuspendibleThreadSetJoiner sts_join(_suspendible);
 637     G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
 638   }
 639 
 640   bool is_complete() {
 641     return _cl.complete();
 642   }
 643 };
 644 
 645 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 646   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 647 
 648   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 649   size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 650 
 651   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 652 
 653   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 654 
 655   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 656   workers->run_task(&cl, num_workers);
 657   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 658 }
 659 
 660 void G1ConcurrentMark::cleanup_for_next_mark() {
 661   // Make sure that the concurrent mark thread looks to still be in
 662   // the current cycle.
 663   guarantee(cm_thread()->during_cycle(), "invariant");
 664 
 665   // We are finishing up the current cycle by clearing the next
 666   // marking bitmap and getting it ready for the next cycle. During
 667   // this time no other cycle can start. So, let's make sure that this
 668   // is the case.
 669   guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
 670 
 671   clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 672 
 673   // Clear the live count data. If the marking has been aborted, the abort()
 674   // call already did that.
 675   if (!has_aborted()) {
 676     clear_live_data(_concurrent_workers);
 677     DEBUG_ONLY(verify_live_data_clear());
 678   }
 679 
 680   // Repeat the asserts from above.
 681   guarantee(cm_thread()->during_cycle(), "invariant");
 682   guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
 683 }
 684 
 685 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 686   assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint.");
 687   clear_bitmap(_prev_mark_bitmap, workers, false);
 688 }
 689 
 690 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 691   G1CMBitMap* _bitmap;
 692   bool _error;
 693  public:
 694   CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) {
 695   }
 696 
 697   virtual bool doHeapRegion(HeapRegion* r) {
 698     // This closure can be called concurrently to the mutator, so we must make sure
 699     // that the result of the getNextMarkedWordAddress() call is compared to the
 700     // value passed to it as limit to detect any found bits.
 701     // end never changes in G1.
 702     HeapWord* end = r->end();
 703     return _bitmap->get_next_marked_addr(r->bottom(), end) != end;
 704   }
 705 };
 706 
 707 bool G1ConcurrentMark::next_mark_bitmap_is_clear() {
 708   CheckBitmapClearHRClosure cl(_next_mark_bitmap);
 709   _g1h->heap_region_iterate(&cl);
 710   return cl.complete();
 711 }
 712 
 713 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 714 public:
 715   bool doHeapRegion(HeapRegion* r) {
 716     r->note_start_of_marking();
 717     return false;
 718   }
 719 };
 720 
 721 void G1ConcurrentMark::checkpoint_roots_initial_pre() {
 722   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 723 
 724   _has_aborted = false;
 725 
 726   // Initialize marking structures. This has to be done in a STW phase.
 727   reset();
 728 
 729   // For each region note start of marking.
 730   NoteStartOfMarkHRClosure startcl;
 731   g1h->heap_region_iterate(&startcl);
 732 }
 733 
 734 
 735 void G1ConcurrentMark::checkpoint_roots_initial_post() {
 736   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 737 
 738   // Start Concurrent Marking weak-reference discovery.
 739   ReferenceProcessor* rp = g1h->ref_processor_cm();
 740   // enable ("weak") refs discovery
 741   rp->enable_discovery();
 742   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 743 
 744   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 745   // This is the start of  the marking cycle, we're expected all
 746   // threads to have SATB queues with active set to false.
 747   satb_mq_set.set_active_all_threads(true, /* new active value */
 748                                      false /* expected_active */);
 749 
 750   _root_regions.prepare_for_scan();
 751 
 752   // update_g1_committed() will be called at the end of an evac pause
 753   // when marking is on. So, it's also called at the end of the
 754   // initial-mark pause to update the heap end, if the heap expands
 755   // during it. No need to call it here.
 756 }
 757 
 758 /*
 759  * Notice that in the next two methods, we actually leave the STS
 760  * during the barrier sync and join it immediately afterwards. If we
 761  * do not do this, the following deadlock can occur: one thread could
 762  * be in the barrier sync code, waiting for the other thread to also
 763  * sync up, whereas another one could be trying to yield, while also
 764  * waiting for the other threads to sync up too.
 765  *
 766  * Note, however, that this code is also used during remark and in
 767  * this case we should not attempt to leave / enter the STS, otherwise
 768  * we'll either hit an assert (debug / fastdebug) or deadlock
 769  * (product). So we should only leave / enter the STS if we are
 770  * operating concurrently.
 771  *
 772  * Because the thread that does the sync barrier has left the STS, it
 773  * is possible to be suspended for a Full GC or an evacuation pause
 774  * could occur. This is actually safe, since the entering the sync
 775  * barrier is one of the last things do_marking_step() does, and it
 776  * doesn't manipulate any data structures afterwards.
 777  */
 778 
 779 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 780   bool barrier_aborted;
 781   {
 782     SuspendibleThreadSetLeaver sts_leave(concurrent());
 783     barrier_aborted = !_first_overflow_barrier_sync.enter();
 784   }
 785 
 786   // at this point everyone should have synced up and not be doing any
 787   // more work
 788 
 789   if (barrier_aborted) {
 790     // If the barrier aborted we ignore the overflow condition and
 791     // just abort the whole marking phase as quickly as possible.
 792     return;
 793   }
 794 
 795   // If we're executing the concurrent phase of marking, reset the marking
 796   // state; otherwise the marking state is reset after reference processing,
 797   // during the remark pause.
 798   // If we reset here as a result of an overflow during the remark we will
 799   // see assertion failures from any subsequent set_concurrency_and_phase()
 800   // calls.
 801   if (concurrent()) {
 802     // let the task associated with with worker 0 do this
 803     if (worker_id == 0) {
 804       // task 0 is responsible for clearing the global data structures
 805       // We should be here because of an overflow. During STW we should
 806       // not clear the overflow flag since we rely on it being true when
 807       // we exit this method to abort the pause and restart concurrent
 808       // marking.
 809       reset_marking_state();
 810 
 811       log_info(gc, marking)("Concurrent Mark reset for overflow");
 812     }
 813   }
 814 
 815   // after this, each task should reset its own data structures then
 816   // then go into the second barrier
 817 }
 818 
 819 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 820   SuspendibleThreadSetLeaver sts_leave(concurrent());
 821   _second_overflow_barrier_sync.enter();
 822 
 823   // at this point everything should be re-initialized and ready to go
 824 }
 825 
 826 class G1CMConcurrentMarkingTask: public AbstractGangTask {
 827 private:
 828   G1ConcurrentMark*     _cm;
 829   ConcurrentMarkThread* _cmt;
 830 
 831 public:
 832   void work(uint worker_id) {
 833     assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread");
 834     ResourceMark rm;
 835 
 836     double start_vtime = os::elapsedVTime();
 837 
 838     {
 839       SuspendibleThreadSetJoiner sts_join;
 840 
 841       assert(worker_id < _cm->active_tasks(), "invariant");
 842 
 843       G1CMTask* task = _cm->task(worker_id);
 844       task->record_start_time();
 845       if (!_cm->has_aborted()) {
 846         do {
 847           task->do_marking_step(G1ConcMarkStepDurationMillis,
 848                                 true  /* do_termination */,
 849                                 false /* is_serial*/);
 850 
 851           _cm->do_yield_check();
 852         } while (!_cm->has_aborted() && task->has_aborted());
 853       }
 854       task->record_end_time();
 855       guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant");
 856     }
 857 
 858     double end_vtime = os::elapsedVTime();
 859     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
 860   }
 861 
 862   G1CMConcurrentMarkingTask(G1ConcurrentMark* cm,
 863                             ConcurrentMarkThread* cmt) :
 864       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
 865 
 866   ~G1CMConcurrentMarkingTask() { }
 867 };
 868 
 869 uint G1ConcurrentMark::calc_active_marking_workers() {
 870   uint result = 0;
 871   if (!UseDynamicNumberOfGCThreads ||
 872       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 873        !ForceDynamicNumberOfGCThreads)) {
 874     result = _max_concurrent_workers;
 875   } else {
 876     result =
 877       AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers,
 878                                                       1, /* Minimum workers */
 879                                                       _num_concurrent_workers,
 880                                                       Threads::number_of_non_daemon_threads());
 881     // Don't scale the result down by scale_concurrent_workers() because
 882     // that scaling has already gone into "_max_concurrent_workers".
 883   }
 884   assert(result > 0 && result <= _max_concurrent_workers,
 885          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 886          _max_concurrent_workers, result);
 887   return result;
 888 }
 889 
 890 void G1ConcurrentMark::scan_root_region(HeapRegion* hr) {
 891   // Currently, only survivors can be root regions.
 892   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
 893   G1RootRegionScanClosure cl(_g1h, this);
 894 
 895   const uintx interval = PrefetchScanIntervalInBytes;
 896   HeapWord* curr = hr->bottom();
 897   const HeapWord* end = hr->top();
 898   while (curr < end) {
 899     Prefetch::read(curr, interval);
 900     oop obj = oop(curr);
 901     int size = obj->oop_iterate_size(&cl);
 902     assert(size == obj->size(), "sanity");
 903     curr += size;
 904   }
 905 }
 906 
 907 class G1CMRootRegionScanTask : public AbstractGangTask {
 908 private:
 909   G1ConcurrentMark* _cm;
 910 
 911 public:
 912   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 913     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
 914 
 915   void work(uint worker_id) {
 916     assert(Thread::current()->is_ConcurrentGC_thread(),
 917            "this should only be done by a conc GC thread");
 918 
 919     G1CMRootRegions* root_regions = _cm->root_regions();
 920     HeapRegion* hr = root_regions->claim_next();
 921     while (hr != NULL) {
 922       _cm->scan_root_region(hr);
 923       hr = root_regions->claim_next();
 924     }
 925   }
 926 };
 927 
 928 void G1ConcurrentMark::scan_root_regions() {
 929   // scan_in_progress() will have been set to true only if there was
 930   // at least one root region to scan. So, if it's false, we
 931   // should not attempt to do any further work.
 932   if (root_regions()->scan_in_progress()) {
 933     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 934 
 935     _num_concurrent_workers = MIN2(calc_active_marking_workers(),
 936                                    // We distribute work on a per-region basis, so starting
 937                                    // more threads than that is useless.
 938                                    root_regions()->num_root_regions());
 939     assert(_num_concurrent_workers <= _max_concurrent_workers,
 940            "Maximum number of marking threads exceeded");
 941 
 942     G1CMRootRegionScanTask task(this);
 943     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
 944                         task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
 945     _concurrent_workers->run_task(&task, _num_concurrent_workers);
 946 
 947     // It's possible that has_aborted() is true here without actually
 948     // aborting the survivor scan earlier. This is OK as it's
 949     // mainly used for sanity checking.
 950     root_regions()->scan_finished();
 951   }
 952 }
 953 
 954 void G1ConcurrentMark::concurrent_cycle_start() {
 955   _gc_timer_cm->register_gc_start();
 956 
 957   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
 958 
 959   _g1h->trace_heap_before_gc(_gc_tracer_cm);
 960 }
 961 
 962 void G1ConcurrentMark::concurrent_cycle_end() {
 963   _g1h->trace_heap_after_gc(_gc_tracer_cm);
 964 
 965   if (has_aborted()) {
 966     _gc_tracer_cm->report_concurrent_mode_failure();
 967   }
 968 
 969   _gc_timer_cm->register_gc_end();
 970 
 971   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
 972 }
 973 
 974 void G1ConcurrentMark::mark_from_roots() {
 975   // we might be tempted to assert that:
 976   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
 977   //        "inconsistent argument?");
 978   // However that wouldn't be right, because it's possible that
 979   // a safepoint is indeed in progress as a younger generation
 980   // stop-the-world GC happens even as we mark in this generation.
 981 
 982   _restart_for_overflow = false;
 983 
 984   _num_concurrent_workers = calc_active_marking_workers();
 985 
 986   uint active_workers = MAX2(1U, _num_concurrent_workers);
 987 
 988   // Setting active workers is not guaranteed since fewer
 989   // worker threads may currently exist and more may not be
 990   // available.
 991   active_workers = _concurrent_workers->update_active_workers(active_workers);
 992   log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
 993 
 994   // Parallel task terminator is set in "set_concurrency_and_phase()"
 995   set_concurrency_and_phase(active_workers, true /* concurrent */);
 996 
 997   G1CMConcurrentMarkingTask marking_task(this, cm_thread());
 998   _concurrent_workers->run_task(&marking_task);
 999   print_stats();
1000 }
1001 
1002 void G1ConcurrentMark::checkpoint_roots_final(bool clear_all_soft_refs) {
1003   // world is stopped at this checkpoint
1004   assert(SafepointSynchronize::is_at_safepoint(),
1005          "world should be stopped");
1006 
1007   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1008 
1009   // If a full collection has happened, we shouldn't do this.
1010   if (has_aborted()) {
1011     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1012     return;
1013   }
1014 
1015   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1016 
1017   if (VerifyDuringGC) {
1018     HandleMark hm;  // handle scope
1019     g1h->prepare_for_verify();
1020     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1021   }
1022   g1h->verifier()->check_bitmaps("Remark Start");
1023 
1024   G1Policy* g1p = g1h->g1_policy();
1025   g1p->record_concurrent_mark_remark_start();
1026 
1027   double start = os::elapsedTime();
1028 
1029   checkpoint_roots_final_work();
1030 
1031   double mark_work_end = os::elapsedTime();
1032 
1033   weak_refs_work(clear_all_soft_refs);
1034 
1035   if (has_overflown()) {
1036     // We overflowed.  Restart concurrent marking.
1037     _restart_for_overflow = true;
1038 
1039     // Verify the heap w.r.t. the previous marking bitmap.
1040     if (VerifyDuringGC) {
1041       HandleMark hm;  // handle scope
1042       g1h->prepare_for_verify();
1043       Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
1044     }
1045 
1046     // Clear the marking state because we will be restarting
1047     // marking due to overflowing the global mark stack.
1048     reset_marking_state();
1049   } else {
1050     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1051     // We're done with marking.
1052     // This is the end of  the marking cycle, we're expected all
1053     // threads to have SATB queues with active set to true.
1054     satb_mq_set.set_active_all_threads(false, /* new active value */
1055                                        true /* expected_active */);
1056 
1057     if (VerifyDuringGC) {
1058       HandleMark hm;  // handle scope
1059       g1h->prepare_for_verify();
1060       Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");
1061     }
1062     g1h->verifier()->check_bitmaps("Remark End");
1063     assert(!restart_for_overflow(), "sanity");
1064     // Completely reset the marking state since marking completed
1065     set_non_marking_state();
1066   }
1067 
1068   // Statistics
1069   double now = os::elapsedTime();
1070   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1071   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1072   _remark_times.add((now - start) * 1000.0);
1073 
1074   g1p->record_concurrent_mark_remark_end();
1075 
1076   G1CMIsAliveClosure is_alive(g1h);
1077   _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1078 }
1079 
1080 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1081   G1CollectedHeap* _g1;
1082   size_t _freed_bytes;
1083   FreeRegionList* _local_cleanup_list;
1084   uint _old_regions_removed;
1085   uint _humongous_regions_removed;
1086   HRRSCleanupTask* _hrrs_cleanup_task;
1087 
1088 public:
1089   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1090                              FreeRegionList* local_cleanup_list,
1091                              HRRSCleanupTask* hrrs_cleanup_task) :
1092     _g1(g1),
1093     _freed_bytes(0),
1094     _local_cleanup_list(local_cleanup_list),
1095     _old_regions_removed(0),
1096     _humongous_regions_removed(0),
1097     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1098 
1099   size_t freed_bytes() { return _freed_bytes; }
1100   const uint old_regions_removed() { return _old_regions_removed; }
1101   const uint humongous_regions_removed() { return _humongous_regions_removed; }
1102 
1103   bool doHeapRegion(HeapRegion *hr) {
1104     _g1->reset_gc_time_stamps(hr);
1105     hr->note_end_of_marking();
1106 
1107     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1108       _freed_bytes += hr->used();
1109       hr->set_containing_set(NULL);
1110       if (hr->is_humongous()) {
1111         _humongous_regions_removed++;
1112         _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */);
1113       } else {
1114         _old_regions_removed++;
1115         _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */);
1116       }
1117     } else {
1118       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1119     }
1120 
1121     return false;
1122   }
1123 };
1124 
1125 class G1ParNoteEndTask: public AbstractGangTask {
1126   friend class G1NoteEndOfConcMarkClosure;
1127 
1128 protected:
1129   G1CollectedHeap* _g1h;
1130   FreeRegionList* _cleanup_list;
1131   HeapRegionClaimer _hrclaimer;
1132 
1133 public:
1134   G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1135       AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1136   }
1137 
1138   void work(uint worker_id) {
1139     FreeRegionList local_cleanup_list("Local Cleanup List");
1140     HRRSCleanupTask hrrs_cleanup_task;
1141     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1142                                            &hrrs_cleanup_task);
1143     _g1h->heap_region_par_iterate_from_worker_offset(&g1_note_end, &_hrclaimer, worker_id);
1144     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1145 
1146     // Now update the lists
1147     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1148     {
1149       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1150       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1151 
1152       // If we iterate over the global cleanup list at the end of
1153       // cleanup to do this printing we will not guarantee to only
1154       // generate output for the newly-reclaimed regions (the list
1155       // might not be empty at the beginning of cleanup; we might
1156       // still be working on its previous contents). So we do the
1157       // printing here, before we append the new regions to the global
1158       // cleanup list.
1159 
1160       G1HRPrinter* hr_printer = _g1h->hr_printer();
1161       if (hr_printer->is_active()) {
1162         FreeRegionListIterator iter(&local_cleanup_list);
1163         while (iter.more_available()) {
1164           HeapRegion* hr = iter.get_next();
1165           hr_printer->cleanup(hr);
1166         }
1167       }
1168 
1169       _cleanup_list->add_ordered(&local_cleanup_list);
1170       assert(local_cleanup_list.is_empty(), "post-condition");
1171 
1172       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1173     }
1174   }
1175 };
1176 
1177 void G1ConcurrentMark::cleanup() {
1178   // world is stopped at this checkpoint
1179   assert(SafepointSynchronize::is_at_safepoint(),
1180          "world should be stopped");
1181   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1182 
1183   // If a full collection has happened, we shouldn't do this.
1184   if (has_aborted()) {
1185     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1186     return;
1187   }
1188 
1189   g1h->verifier()->verify_region_sets_optional();
1190 
1191   if (VerifyDuringGC) {
1192     HandleMark hm;  // handle scope
1193     g1h->prepare_for_verify();
1194     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1195   }
1196   g1h->verifier()->check_bitmaps("Cleanup Start");
1197 
1198   G1Policy* g1p = g1h->g1_policy();
1199   g1p->record_concurrent_mark_cleanup_start();
1200 
1201   double start = os::elapsedTime();
1202 
1203   HeapRegionRemSet::reset_for_cleanup_tasks();
1204 
1205   {
1206     GCTraceTime(Debug, gc)("Finalize Live Data");
1207     finalize_live_data();
1208   }
1209 
1210   if (VerifyDuringGC) {
1211     GCTraceTime(Debug, gc)("Verify Live Data");
1212     verify_live_data();
1213   }
1214 
1215   g1h->collector_state()->set_mark_in_progress(false);
1216 
1217   double count_end = os::elapsedTime();
1218   double this_final_counting_time = (count_end - start);
1219   _total_counting_time += this_final_counting_time;
1220 
1221   if (log_is_enabled(Trace, gc, liveness)) {
1222     G1PrintRegionLivenessInfoClosure cl("Post-Marking");
1223     _g1h->heap_region_iterate(&cl);
1224   }
1225 
1226   // Install newly created mark bitMap as "prev".
1227   swap_mark_bitmaps();
1228 
1229   g1h->reset_gc_time_stamp();
1230 
1231   uint n_workers = _g1h->workers()->active_workers();
1232 
1233   // Note end of marking in all heap regions.
1234   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
1235   g1h->workers()->run_task(&g1_par_note_end_task);
1236   g1h->check_gc_time_stamps();
1237 
1238   if (!cleanup_list_is_empty()) {
1239     // The cleanup list is not empty, so we'll have to process it
1240     // concurrently. Notify anyone else that might be wanting free
1241     // regions that there will be more free regions coming soon.
1242     g1h->set_free_regions_coming();
1243   }
1244 
1245   // call below, since it affects the metric by which we sort the heap
1246   // regions.
1247   if (G1ScrubRemSets) {
1248     double rs_scrub_start = os::elapsedTime();
1249     g1h->scrub_rem_set();
1250     _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start);
1251   }
1252 
1253   // this will also free any regions totally full of garbage objects,
1254   // and sort the regions.
1255   g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1256 
1257   // Statistics.
1258   double end = os::elapsedTime();
1259   _cleanup_times.add((end - start) * 1000.0);
1260 
1261   // Clean up will have freed any regions completely full of garbage.
1262   // Update the soft reference policy with the new heap occupancy.
1263   Universe::update_heap_info_at_gc();
1264 
1265   if (VerifyDuringGC) {
1266     HandleMark hm;  // handle scope
1267     g1h->prepare_for_verify();
1268     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)");
1269   }
1270 
1271   g1h->verifier()->check_bitmaps("Cleanup End");
1272 
1273   g1h->verifier()->verify_region_sets_optional();
1274 
1275   // We need to make this be a "collection" so any collection pause that
1276   // races with it goes around and waits for completeCleanup to finish.
1277   g1h->increment_total_collections();
1278 
1279   // Clean out dead classes and update Metaspace sizes.
1280   if (ClassUnloadingWithConcurrentMark) {
1281     ClassLoaderDataGraph::purge();
1282   }
1283   MetaspaceGC::compute_new_size();
1284 
1285   // We reclaimed old regions so we should calculate the sizes to make
1286   // sure we update the old gen/space data.
1287   g1h->g1mm()->update_sizes();
1288   g1h->allocation_context_stats().update_after_mark();
1289 }
1290 
1291 void G1ConcurrentMark::complete_cleanup() {
1292   if (has_aborted()) return;
1293 
1294   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1295 
1296   _cleanup_list.verify_optional();
1297   FreeRegionList tmp_free_list("Tmp Free List");
1298 
1299   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1300                                   "cleanup list has %u entries",
1301                                   _cleanup_list.length());
1302 
1303   // No one else should be accessing the _cleanup_list at this point,
1304   // so it is not necessary to take any locks
1305   while (!_cleanup_list.is_empty()) {
1306     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
1307     assert(hr != NULL, "Got NULL from a non-empty list");
1308     hr->par_clear();
1309     tmp_free_list.add_ordered(hr);
1310 
1311     // Instead of adding one region at a time to the secondary_free_list,
1312     // we accumulate them in the local list and move them a few at a
1313     // time. This also cuts down on the number of notify_all() calls
1314     // we do during this process. We'll also append the local list when
1315     // _cleanup_list is empty (which means we just removed the last
1316     // region from the _cleanup_list).
1317     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
1318         _cleanup_list.is_empty()) {
1319       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1320                                       "appending %u entries to the secondary_free_list, "
1321                                       "cleanup list still has %u entries",
1322                                       tmp_free_list.length(),
1323                                       _cleanup_list.length());
1324 
1325       {
1326         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1327         g1h->secondary_free_list_add(&tmp_free_list);
1328         SecondaryFreeList_lock->notify_all();
1329       }
1330 #ifndef PRODUCT
1331       if (G1StressConcRegionFreeing) {
1332         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
1333           os::sleep(Thread::current(), (jlong) 1, false);
1334         }
1335       }
1336 #endif
1337     }
1338   }
1339   assert(tmp_free_list.is_empty(), "post-condition");
1340 }
1341 
1342 // Supporting Object and Oop closures for reference discovery
1343 // and processing in during marking
1344 
1345 bool G1CMIsAliveClosure::do_object_b(oop obj) {
1346   HeapWord* addr = (HeapWord*)obj;
1347   return addr != NULL &&
1348          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
1349 }
1350 
1351 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1352 // Uses the G1CMTask associated with a worker thread (for serial reference
1353 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1354 // trace referent objects.
1355 //
1356 // Using the G1CMTask and embedded local queues avoids having the worker
1357 // threads operating on the global mark stack. This reduces the risk
1358 // of overflowing the stack - which we would rather avoid at this late
1359 // state. Also using the tasks' local queues removes the potential
1360 // of the workers interfering with each other that could occur if
1361 // operating on the global stack.
1362 
1363 class G1CMKeepAliveAndDrainClosure: public OopClosure {
1364   G1ConcurrentMark* _cm;
1365   G1CMTask*         _task;
1366   int               _ref_counter_limit;
1367   int               _ref_counter;
1368   bool              _is_serial;
1369  public:
1370   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1371     _cm(cm), _task(task), _is_serial(is_serial),
1372     _ref_counter_limit(G1RefProcDrainInterval) {
1373     assert(_ref_counter_limit > 0, "sanity");
1374     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1375     _ref_counter = _ref_counter_limit;
1376   }
1377 
1378   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1379   virtual void do_oop(      oop* p) { do_oop_work(p); }
1380 
1381   template <class T> void do_oop_work(T* p) {
1382     if (!_cm->has_overflown()) {
1383       oop obj = oopDesc::load_decode_heap_oop(p);
1384       _task->deal_with_reference(obj);
1385       _ref_counter--;
1386 
1387       if (_ref_counter == 0) {
1388         // We have dealt with _ref_counter_limit references, pushing them
1389         // and objects reachable from them on to the local stack (and
1390         // possibly the global stack). Call G1CMTask::do_marking_step() to
1391         // process these entries.
1392         //
1393         // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1394         // there's nothing more to do (i.e. we're done with the entries that
1395         // were pushed as a result of the G1CMTask::deal_with_reference() calls
1396         // above) or we overflow.
1397         //
1398         // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1399         // flag while there may still be some work to do. (See the comment at
1400         // the beginning of G1CMTask::do_marking_step() for those conditions -
1401         // one of which is reaching the specified time target.) It is only
1402         // when G1CMTask::do_marking_step() returns without setting the
1403         // has_aborted() flag that the marking step has completed.
1404         do {
1405           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1406           _task->do_marking_step(mark_step_duration_ms,
1407                                  false      /* do_termination */,
1408                                  _is_serial);
1409         } while (_task->has_aborted() && !_cm->has_overflown());
1410         _ref_counter = _ref_counter_limit;
1411       }
1412     }
1413   }
1414 };
1415 
1416 // 'Drain' oop closure used by both serial and parallel reference processing.
1417 // Uses the G1CMTask associated with a given worker thread (for serial
1418 // reference processing the G1CMtask for worker 0 is used). Calls the
1419 // do_marking_step routine, with an unbelievably large timeout value,
1420 // to drain the marking data structures of the remaining entries
1421 // added by the 'keep alive' oop closure above.
1422 
1423 class G1CMDrainMarkingStackClosure: public VoidClosure {
1424   G1ConcurrentMark* _cm;
1425   G1CMTask*         _task;
1426   bool              _is_serial;
1427  public:
1428   G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1429     _cm(cm), _task(task), _is_serial(is_serial) {
1430     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1431   }
1432 
1433   void do_void() {
1434     do {
1435       // We call G1CMTask::do_marking_step() to completely drain the local
1436       // and global marking stacks of entries pushed by the 'keep alive'
1437       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1438       //
1439       // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1440       // if there's nothing more to do (i.e. we've completely drained the
1441       // entries that were pushed as a a result of applying the 'keep alive'
1442       // closure to the entries on the discovered ref lists) or we overflow
1443       // the global marking stack.
1444       //
1445       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1446       // flag while there may still be some work to do. (See the comment at
1447       // the beginning of G1CMTask::do_marking_step() for those conditions -
1448       // one of which is reaching the specified time target.) It is only
1449       // when G1CMTask::do_marking_step() returns without setting the
1450       // has_aborted() flag that the marking step has completed.
1451 
1452       _task->do_marking_step(1000000000.0 /* something very large */,
1453                              true         /* do_termination */,
1454                              _is_serial);
1455     } while (_task->has_aborted() && !_cm->has_overflown());
1456   }
1457 };
1458 
1459 // Implementation of AbstractRefProcTaskExecutor for parallel
1460 // reference processing at the end of G1 concurrent marking
1461 
1462 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
1463 private:
1464   G1CollectedHeap*  _g1h;
1465   G1ConcurrentMark* _cm;
1466   WorkGang*         _workers;
1467   uint              _active_workers;
1468 
1469 public:
1470   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1471                           G1ConcurrentMark* cm,
1472                           WorkGang* workers,
1473                           uint n_workers) :
1474     _g1h(g1h), _cm(cm),
1475     _workers(workers), _active_workers(n_workers) { }
1476 
1477   // Executes the given task using concurrent marking worker threads.
1478   virtual void execute(ProcessTask& task);
1479   virtual void execute(EnqueueTask& task);
1480 };
1481 
1482 class G1CMRefProcTaskProxy: public AbstractGangTask {
1483   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1484   ProcessTask&      _proc_task;
1485   G1CollectedHeap*  _g1h;
1486   G1ConcurrentMark* _cm;
1487 
1488 public:
1489   G1CMRefProcTaskProxy(ProcessTask& proc_task,
1490                        G1CollectedHeap* g1h,
1491                        G1ConcurrentMark* cm) :
1492     AbstractGangTask("Process reference objects in parallel"),
1493     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1494     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1495     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1496   }
1497 
1498   virtual void work(uint worker_id) {
1499     ResourceMark rm;
1500     HandleMark hm;
1501     G1CMTask* task = _cm->task(worker_id);
1502     G1CMIsAliveClosure g1_is_alive(_g1h);
1503     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1504     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1505 
1506     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1507   }
1508 };
1509 
1510 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
1511   assert(_workers != NULL, "Need parallel worker threads.");
1512   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1513 
1514   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1515 
1516   // We need to reset the concurrency level before each
1517   // proxy task execution, so that the termination protocol
1518   // and overflow handling in G1CMTask::do_marking_step() knows
1519   // how many workers to wait for.
1520   _cm->set_concurrency(_active_workers);
1521   _workers->run_task(&proc_task_proxy);
1522 }
1523 
1524 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
1525   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
1526   EnqueueTask& _enq_task;
1527 
1528 public:
1529   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
1530     AbstractGangTask("Enqueue reference objects in parallel"),
1531     _enq_task(enq_task) { }
1532 
1533   virtual void work(uint worker_id) {
1534     _enq_task.work(worker_id);
1535   }
1536 };
1537 
1538 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
1539   assert(_workers != NULL, "Need parallel worker threads.");
1540   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1541 
1542   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
1543 
1544   // Not strictly necessary but...
1545   //
1546   // We need to reset the concurrency level before each
1547   // proxy task execution, so that the termination protocol
1548   // and overflow handling in G1CMTask::do_marking_step() knows
1549   // how many workers to wait for.
1550   _cm->set_concurrency(_active_workers);
1551   _workers->run_task(&enq_task_proxy);
1552 }
1553 
1554 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
1555   if (has_overflown()) {
1556     // Skip processing the discovered references if we have
1557     // overflown the global marking stack. Reference objects
1558     // only get discovered once so it is OK to not
1559     // de-populate the discovered reference lists. We could have,
1560     // but the only benefit would be that, when marking restarts,
1561     // less reference objects are discovered.
1562     return;
1563   }
1564 
1565   ResourceMark rm;
1566   HandleMark   hm;
1567 
1568   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1569 
1570   // Is alive closure.
1571   G1CMIsAliveClosure g1_is_alive(g1h);
1572 
1573   // Inner scope to exclude the cleaning of the string and symbol
1574   // tables from the displayed time.
1575   {
1576     GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm);
1577 
1578     ReferenceProcessor* rp = g1h->ref_processor_cm();
1579 
1580     // See the comment in G1CollectedHeap::ref_processing_init()
1581     // about how reference processing currently works in G1.
1582 
1583     // Set the soft reference policy
1584     rp->setup_policy(clear_all_soft_refs);
1585     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1586 
1587     // Instances of the 'Keep Alive' and 'Complete GC' closures used
1588     // in serial reference processing. Note these closures are also
1589     // used for serially processing (by the the current thread) the
1590     // JNI references during parallel reference processing.
1591     //
1592     // These closures do not need to synchronize with the worker
1593     // threads involved in parallel reference processing as these
1594     // instances are executed serially by the current thread (e.g.
1595     // reference processing is not multi-threaded and is thus
1596     // performed by the current thread instead of a gang worker).
1597     //
1598     // The gang tasks involved in parallel reference processing create
1599     // their own instances of these closures, which do their own
1600     // synchronization among themselves.
1601     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1602     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1603 
1604     // We need at least one active thread. If reference processing
1605     // is not multi-threaded we use the current (VMThread) thread,
1606     // otherwise we use the work gang from the G1CollectedHeap and
1607     // we utilize all the worker threads we can.
1608     bool processing_is_mt = rp->processing_is_mt();
1609     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
1610     active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U);
1611 
1612     // Parallel processing task executor.
1613     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
1614                                               g1h->workers(), active_workers);
1615     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1616 
1617     // Set the concurrency level. The phase was already set prior to
1618     // executing the remark task.
1619     set_concurrency(active_workers);
1620 
1621     // Set the degree of MT processing here.  If the discovery was done MT,
1622     // the number of threads involved during discovery could differ from
1623     // the number of active workers.  This is OK as long as the discovered
1624     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1625     rp->set_active_mt_degree(active_workers);
1626 
1627     ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
1628 
1629     // Process the weak references.
1630     const ReferenceProcessorStats& stats =
1631         rp->process_discovered_references(&g1_is_alive,
1632                                           &g1_keep_alive,
1633                                           &g1_drain_mark_stack,
1634                                           executor,
1635                                           &pt);
1636     _gc_tracer_cm->report_gc_reference_stats(stats);
1637     pt.print_all_references();
1638 
1639     // The do_oop work routines of the keep_alive and drain_marking_stack
1640     // oop closures will set the has_overflown flag if we overflow the
1641     // global marking stack.
1642 
1643     assert(has_overflown() || _global_mark_stack.is_empty(),
1644             "Mark stack should be empty (unless it has overflown)");
1645 
1646     assert(rp->num_q() == active_workers, "why not");
1647 
1648     rp->enqueue_discovered_references(executor, &pt);
1649 
1650     rp->verify_no_references_recorded();
1651 
1652     pt.print_enqueue_phase();
1653 
1654     assert(!rp->discovery_enabled(), "Post condition");
1655   }
1656 
1657   assert(has_overflown() || _global_mark_stack.is_empty(),
1658           "Mark stack should be empty (unless it has overflown)");
1659 
1660   {
1661     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1662     WeakProcessor::weak_oops_do(&g1_is_alive, &do_nothing_cl);
1663   }
1664 
1665   if (has_overflown()) {
1666     // We can not trust g1_is_alive if the marking stack overflowed
1667     return;
1668   }
1669 
1670   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1671 
1672   // Unload Klasses, String, Symbols, Code Cache, etc.
1673   if (ClassUnloadingWithConcurrentMark) {
1674     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1675     bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
1676     g1h->complete_cleaning(&g1_is_alive, purged_classes);
1677   } else {
1678     GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1679     // No need to clean string table and symbol table as they are treated as strong roots when
1680     // class unloading is disabled.
1681     g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
1682 
1683   }
1684 }
1685 
1686 void G1ConcurrentMark::swap_mark_bitmaps() {
1687   G1CMBitMap* temp = _prev_mark_bitmap;
1688   _prev_mark_bitmap = _next_mark_bitmap;
1689   _next_mark_bitmap = temp;
1690 }
1691 
1692 // Closure for marking entries in SATB buffers.
1693 class G1CMSATBBufferClosure : public SATBBufferClosure {
1694 private:
1695   G1CMTask* _task;
1696   G1CollectedHeap* _g1h;
1697 
1698   // This is very similar to G1CMTask::deal_with_reference, but with
1699   // more relaxed requirements for the argument, so this must be more
1700   // circumspect about treating the argument as an object.
1701   void do_entry(void* entry) const {
1702     _task->increment_refs_reached();
1703     oop const obj = static_cast<oop>(entry);
1704     _task->make_reference_grey(obj);
1705   }
1706 
1707 public:
1708   G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1709     : _task(task), _g1h(g1h) { }
1710 
1711   virtual void do_buffer(void** buffer, size_t size) {
1712     for (size_t i = 0; i < size; ++i) {
1713       do_entry(buffer[i]);
1714     }
1715   }
1716 };
1717 
1718 class G1RemarkThreadsClosure : public ThreadClosure {
1719   G1CMSATBBufferClosure _cm_satb_cl;
1720   G1CMOopClosure _cm_cl;
1721   MarkingCodeBlobClosure _code_cl;
1722   int _thread_parity;
1723 
1724  public:
1725   G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1726     _cm_satb_cl(task, g1h),
1727     _cm_cl(g1h, g1h->concurrent_mark(), task),
1728     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1729     _thread_parity(Threads::thread_claim_parity()) {}
1730 
1731   void do_thread(Thread* thread) {
1732     if (thread->is_Java_thread()) {
1733       if (thread->claim_oops_do(true, _thread_parity)) {
1734         JavaThread* jt = (JavaThread*)thread;
1735 
1736         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1737         // however the liveness of oops reachable from nmethods have very complex lifecycles:
1738         // * Alive if on the stack of an executing method
1739         // * Weakly reachable otherwise
1740         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1741         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1742         jt->nmethods_do(&_code_cl);
1743 
1744         jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl);
1745       }
1746     } else if (thread->is_VM_thread()) {
1747       if (thread->claim_oops_do(true, _thread_parity)) {
1748         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
1749       }
1750     }
1751   }
1752 };
1753 
1754 class G1CMRemarkTask: public AbstractGangTask {
1755 private:
1756   G1ConcurrentMark* _cm;
1757 public:
1758   void work(uint worker_id) {
1759     // Since all available tasks are actually started, we should
1760     // only proceed if we're supposed to be active.
1761     if (worker_id < _cm->active_tasks()) {
1762       G1CMTask* task = _cm->task(worker_id);
1763       task->record_start_time();
1764       {
1765         ResourceMark rm;
1766         HandleMark hm;
1767 
1768         G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1769         Threads::threads_do(&threads_f);
1770       }
1771 
1772       do {
1773         task->do_marking_step(1000000000.0 /* something very large */,
1774                               true         /* do_termination       */,
1775                               false        /* is_serial            */);
1776       } while (task->has_aborted() && !_cm->has_overflown());
1777       // If we overflow, then we do not want to restart. We instead
1778       // want to abort remark and do concurrent marking again.
1779       task->record_end_time();
1780     }
1781   }
1782 
1783   G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1784     AbstractGangTask("Par Remark"), _cm(cm) {
1785     _cm->terminator()->reset_for_reuse(active_workers);
1786   }
1787 };
1788 
1789 void G1ConcurrentMark::checkpoint_roots_final_work() {
1790   ResourceMark rm;
1791   HandleMark   hm;
1792   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1793 
1794   GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
1795 
1796   g1h->ensure_parsability(false);
1797 
1798   // this is remark, so we'll use up all active threads
1799   uint active_workers = g1h->workers()->active_workers();
1800   set_concurrency_and_phase(active_workers, false /* concurrent */);
1801   // Leave _parallel_marking_threads at it's
1802   // value originally calculated in the G1ConcurrentMark
1803   // constructor and pass values of the active workers
1804   // through the gang in the task.
1805 
1806   {
1807     StrongRootsScope srs(active_workers);
1808 
1809     G1CMRemarkTask remarkTask(this, active_workers);
1810     // We will start all available threads, even if we decide that the
1811     // active_workers will be fewer. The extra ones will just bail out
1812     // immediately.
1813     g1h->workers()->run_task(&remarkTask);
1814   }
1815 
1816   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1817   guarantee(has_overflown() ||
1818             satb_mq_set.completed_buffers_num() == 0,
1819             "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1820             BOOL_TO_STR(has_overflown()),
1821             satb_mq_set.completed_buffers_num());
1822 
1823   print_stats();
1824 }
1825 
1826 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
1827   _prev_mark_bitmap->clear_range(mr);
1828 }
1829 
1830 HeapRegion*
1831 G1ConcurrentMark::claim_region(uint worker_id) {
1832   // "checkpoint" the finger
1833   HeapWord* finger = _finger;
1834 
1835   // _heap_end will not change underneath our feet; it only changes at
1836   // yield points.
1837   while (finger < _heap_end) {
1838     assert(_g1h->is_in_g1_reserved(finger), "invariant");
1839 
1840     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1841     // Make sure that the reads below do not float before loading curr_region.
1842     OrderAccess::loadload();
1843     // Above heap_region_containing may return NULL as we always scan claim
1844     // until the end of the heap. In this case, just jump to the next region.
1845     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1846 
1847     // Is the gap between reading the finger and doing the CAS too long?
1848     HeapWord* res = Atomic::cmpxchg(end, &_finger, finger);
1849     if (res == finger && curr_region != NULL) {
1850       // we succeeded
1851       HeapWord*   bottom        = curr_region->bottom();
1852       HeapWord*   limit         = curr_region->next_top_at_mark_start();
1853 
1854       // notice that _finger == end cannot be guaranteed here since,
1855       // someone else might have moved the finger even further
1856       assert(_finger >= end, "the finger should have moved forward");
1857 
1858       if (limit > bottom) {
1859         return curr_region;
1860       } else {
1861         assert(limit == bottom,
1862                "the region limit should be at bottom");
1863         // we return NULL and the caller should try calling
1864         // claim_region() again.
1865         return NULL;
1866       }
1867     } else {
1868       assert(_finger > finger, "the finger should have moved forward");
1869       // read it again
1870       finger = _finger;
1871     }
1872   }
1873 
1874   return NULL;
1875 }
1876 
1877 #ifndef PRODUCT
1878 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC {
1879 private:
1880   G1CollectedHeap* _g1h;
1881   const char* _phase;
1882   int _info;
1883 
1884 public:
1885   VerifyNoCSetOops(const char* phase, int info = -1) :
1886     _g1h(G1CollectedHeap::heap()),
1887     _phase(phase),
1888     _info(info)
1889   { }
1890 
1891   void operator()(G1TaskQueueEntry task_entry) const {
1892     if (task_entry.is_array_slice()) {
1893       guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1894       return;
1895     }
1896     guarantee(oopDesc::is_oop(task_entry.obj()),
1897               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1898               p2i(task_entry.obj()), _phase, _info);
1899     guarantee(!_g1h->is_in_cset(task_entry.obj()),
1900               "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
1901               p2i(task_entry.obj()), _phase, _info);
1902   }
1903 };
1904 
1905 void G1ConcurrentMark::verify_no_cset_oops() {
1906   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1907   if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
1908     return;
1909   }
1910 
1911   // Verify entries on the global mark stack
1912   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1913 
1914   // Verify entries on the task queues
1915   for (uint i = 0; i < _max_num_tasks; ++i) {
1916     G1CMTaskQueue* queue = _task_queues->queue(i);
1917     queue->iterate(VerifyNoCSetOops("Queue", i));
1918   }
1919 
1920   // Verify the global finger
1921   HeapWord* global_finger = finger();
1922   if (global_finger != NULL && global_finger < _heap_end) {
1923     // Since we always iterate over all regions, we might get a NULL HeapRegion
1924     // here.
1925     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
1926     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
1927               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
1928               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
1929   }
1930 
1931   // Verify the task fingers
1932   assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1933   for (uint i = 0; i < _num_concurrent_workers; ++i) {
1934     G1CMTask* task = _tasks[i];
1935     HeapWord* task_finger = task->finger();
1936     if (task_finger != NULL && task_finger < _heap_end) {
1937       // See above note on the global finger verification.
1938       HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
1939       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
1940                 !task_hr->in_collection_set(),
1941                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
1942                 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
1943     }
1944   }
1945 }
1946 #endif // PRODUCT
1947 void G1ConcurrentMark::create_live_data() {
1948   _g1h->g1_rem_set()->create_card_live_data(_concurrent_workers, _next_mark_bitmap);
1949 }
1950 
1951 void G1ConcurrentMark::finalize_live_data() {
1952   _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _next_mark_bitmap);
1953 }
1954 
1955 void G1ConcurrentMark::verify_live_data() {
1956   _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _next_mark_bitmap);
1957 }
1958 
1959 void G1ConcurrentMark::clear_live_data(WorkGang* workers) {
1960   _g1h->g1_rem_set()->clear_card_live_data(workers);
1961 }
1962 
1963 #ifdef ASSERT
1964 void G1ConcurrentMark::verify_live_data_clear() {
1965   _g1h->g1_rem_set()->verify_card_live_data_is_clear();
1966 }
1967 #endif
1968 
1969 void G1ConcurrentMark::print_stats() {
1970   if (!log_is_enabled(Debug, gc, stats)) {
1971     return;
1972   }
1973   log_debug(gc, stats)("---------------------------------------------------------------------");
1974   for (size_t i = 0; i < _num_active_tasks; ++i) {
1975     _tasks[i]->print_stats();
1976     log_debug(gc, stats)("---------------------------------------------------------------------");
1977   }
1978 }
1979 
1980 void G1ConcurrentMark::abort() {
1981   if (!cm_thread()->during_cycle() || _has_aborted) {
1982     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
1983     return;
1984   }
1985 
1986   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
1987   // concurrent bitmap clearing.
1988   {
1989     GCTraceTime(Debug, gc)("Clear Next Bitmap");
1990     clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
1991   }
1992   // Note we cannot clear the previous marking bitmap here
1993   // since VerifyDuringGC verifies the objects marked during
1994   // a full GC against the previous bitmap.
1995 
1996   {
1997     GCTraceTime(Debug, gc)("Clear Live Data");
1998     clear_live_data(_g1h->workers());
1999   }
2000   DEBUG_ONLY({
2001     GCTraceTime(Debug, gc)("Verify Live Data Clear");
2002     verify_live_data_clear();
2003   })
2004   // Empty mark stack
2005   reset_marking_state();
2006   for (uint i = 0; i < _max_num_tasks; ++i) {
2007     _tasks[i]->clear_region_fields();
2008   }
2009   _first_overflow_barrier_sync.abort();
2010   _second_overflow_barrier_sync.abort();
2011   _has_aborted = true;
2012 
2013   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2014   satb_mq_set.abandon_partial_marking();
2015   // This can be called either during or outside marking, we'll read
2016   // the expected_active value from the SATB queue set.
2017   satb_mq_set.set_active_all_threads(
2018                                  false, /* new active value */
2019                                  satb_mq_set.is_active() /* expected_active */);
2020 }
2021 
2022 static void print_ms_time_info(const char* prefix, const char* name,
2023                                NumberSeq& ns) {
2024   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2025                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2026   if (ns.num() > 0) {
2027     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2028                            prefix, ns.sd(), ns.maximum());
2029   }
2030 }
2031 
2032 void G1ConcurrentMark::print_summary_info() {
2033   Log(gc, marking) log;
2034   if (!log.is_trace()) {
2035     return;
2036   }
2037 
2038   log.trace(" Concurrent marking:");
2039   print_ms_time_info("  ", "init marks", _init_times);
2040   print_ms_time_info("  ", "remarks", _remark_times);
2041   {
2042     print_ms_time_info("     ", "final marks", _remark_mark_times);
2043     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2044 
2045   }
2046   print_ms_time_info("  ", "cleanups", _cleanup_times);
2047   log.trace("    Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2048             _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2049   if (G1ScrubRemSets) {
2050     log.trace("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
2051               _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2052   }
2053   log.trace("  Total stop_world time = %8.2f s.",
2054             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2055   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
2056             cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
2057 }
2058 
2059 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2060   _concurrent_workers->print_worker_threads_on(st);
2061 }
2062 
2063 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2064   _concurrent_workers->threads_do(tc);
2065 }
2066 
2067 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2068   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2069                p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
2070   _prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
2071   _next_mark_bitmap->print_on_error(st, " Next Bits: ");
2072 }
2073 
2074 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2075   ReferenceProcessor* result = g1h->ref_processor_cm();
2076   assert(result != NULL, "CM reference processor should not be NULL");
2077   return result;
2078 }
2079 
2080 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2081                                G1ConcurrentMark* cm,
2082                                G1CMTask* task)
2083   : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
2084     _g1h(g1h), _cm(cm), _task(task)
2085 { }
2086 
2087 void G1CMTask::setup_for_region(HeapRegion* hr) {
2088   assert(hr != NULL,
2089         "claim_region() should have filtered out NULL regions");
2090   _curr_region  = hr;
2091   _finger       = hr->bottom();
2092   update_region_limit();
2093 }
2094 
2095 void G1CMTask::update_region_limit() {
2096   HeapRegion* hr            = _curr_region;
2097   HeapWord* bottom          = hr->bottom();
2098   HeapWord* limit           = hr->next_top_at_mark_start();
2099 
2100   if (limit == bottom) {
2101     // The region was collected underneath our feet.
2102     // We set the finger to bottom to ensure that the bitmap
2103     // iteration that will follow this will not do anything.
2104     // (this is not a condition that holds when we set the region up,
2105     // as the region is not supposed to be empty in the first place)
2106     _finger = bottom;
2107   } else if (limit >= _region_limit) {
2108     assert(limit >= _finger, "peace of mind");
2109   } else {
2110     assert(limit < _region_limit, "only way to get here");
2111     // This can happen under some pretty unusual circumstances.  An
2112     // evacuation pause empties the region underneath our feet (NTAMS
2113     // at bottom). We then do some allocation in the region (NTAMS
2114     // stays at bottom), followed by the region being used as a GC
2115     // alloc region (NTAMS will move to top() and the objects
2116     // originally below it will be grayed). All objects now marked in
2117     // the region are explicitly grayed, if below the global finger,
2118     // and we do not need in fact to scan anything else. So, we simply
2119     // set _finger to be limit to ensure that the bitmap iteration
2120     // doesn't do anything.
2121     _finger = limit;
2122   }
2123 
2124   _region_limit = limit;
2125 }
2126 
2127 void G1CMTask::giveup_current_region() {
2128   assert(_curr_region != NULL, "invariant");
2129   clear_region_fields();
2130 }
2131 
2132 void G1CMTask::clear_region_fields() {
2133   // Values for these three fields that indicate that we're not
2134   // holding on to a region.
2135   _curr_region   = NULL;
2136   _finger        = NULL;
2137   _region_limit  = NULL;
2138 }
2139 
2140 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2141   if (cm_oop_closure == NULL) {
2142     assert(_cm_oop_closure != NULL, "invariant");
2143   } else {
2144     assert(_cm_oop_closure == NULL, "invariant");
2145   }
2146   _cm_oop_closure = cm_oop_closure;
2147 }
2148 
2149 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
2150   guarantee(next_mark_bitmap != NULL, "invariant");
2151   _next_mark_bitmap              = next_mark_bitmap;
2152   clear_region_fields();
2153 
2154   _calls                         = 0;
2155   _elapsed_time_ms               = 0.0;
2156   _termination_time_ms           = 0.0;
2157   _termination_start_time_ms     = 0.0;
2158 }
2159 
2160 bool G1CMTask::should_exit_termination() {
2161   regular_clock_call();
2162   // This is called when we are in the termination protocol. We should
2163   // quit if, for some reason, this task wants to abort or the global
2164   // stack is not empty (this means that we can get work from it).
2165   return !_cm->mark_stack_empty() || has_aborted();
2166 }
2167 
2168 void G1CMTask::reached_limit() {
2169   assert(_words_scanned >= _words_scanned_limit ||
2170          _refs_reached >= _refs_reached_limit ,
2171          "shouldn't have been called otherwise");
2172   regular_clock_call();
2173 }
2174 
2175 void G1CMTask::regular_clock_call() {
2176   if (has_aborted()) return;
2177 
2178   // First, we need to recalculate the words scanned and refs reached
2179   // limits for the next clock call.
2180   recalculate_limits();
2181 
2182   // During the regular clock call we do the following
2183 
2184   // (1) If an overflow has been flagged, then we abort.
2185   if (_cm->has_overflown()) {
2186     set_has_aborted();
2187     return;
2188   }
2189 
2190   // If we are not concurrent (i.e. we're doing remark) we don't need
2191   // to check anything else. The other steps are only needed during
2192   // the concurrent marking phase.
2193   if (!_concurrent) {
2194     return;
2195   }
2196 
2197   // (2) If marking has been aborted for Full GC, then we also abort.
2198   if (_cm->has_aborted()) {
2199     set_has_aborted();
2200     return;
2201   }
2202 
2203   double curr_time_ms = os::elapsedVTime() * 1000.0;
2204 
2205   // (4) We check whether we should yield. If we have to, then we abort.
2206   if (SuspendibleThreadSet::should_yield()) {
2207     // We should yield. To do this we abort the task. The caller is
2208     // responsible for yielding.
2209     set_has_aborted();
2210     return;
2211   }
2212 
2213   // (5) We check whether we've reached our time quota. If we have,
2214   // then we abort.
2215   double elapsed_time_ms = curr_time_ms - _start_time_ms;
2216   if (elapsed_time_ms > _time_target_ms) {
2217     set_has_aborted();
2218     _has_timed_out = true;
2219     return;
2220   }
2221 
2222   // (6) Finally, we check whether there are enough completed STAB
2223   // buffers available for processing. If there are, we abort.
2224   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2225   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2226     // we do need to process SATB buffers, we'll abort and restart
2227     // the marking task to do so
2228     set_has_aborted();
2229     return;
2230   }
2231 }
2232 
2233 void G1CMTask::recalculate_limits() {
2234   _real_words_scanned_limit = _words_scanned + words_scanned_period;
2235   _words_scanned_limit      = _real_words_scanned_limit;
2236 
2237   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
2238   _refs_reached_limit       = _real_refs_reached_limit;
2239 }
2240 
2241 void G1CMTask::decrease_limits() {
2242   // This is called when we believe that we're going to do an infrequent
2243   // operation which will increase the per byte scanned cost (i.e. move
2244   // entries to/from the global stack). It basically tries to decrease the
2245   // scanning limit so that the clock is called earlier.
2246 
2247   _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
2248   _refs_reached_limit  = _real_refs_reached_limit - 3 * refs_reached_period / 4;
2249 }
2250 
2251 void G1CMTask::move_entries_to_global_stack() {
2252   // Local array where we'll store the entries that will be popped
2253   // from the local queue.
2254   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2255 
2256   size_t n = 0;
2257   G1TaskQueueEntry task_entry;
2258   while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) {
2259     buffer[n] = task_entry;
2260     ++n;
2261   }
2262   if (n < G1CMMarkStack::EntriesPerChunk) {
2263     buffer[n] = G1TaskQueueEntry();
2264   }
2265 
2266   if (n > 0) {
2267     if (!_cm->mark_stack_push(buffer)) {
2268       set_has_aborted();
2269     }
2270   }
2271 
2272   // This operation was quite expensive, so decrease the limits.
2273   decrease_limits();
2274 }
2275 
2276 bool G1CMTask::get_entries_from_global_stack() {
2277   // Local array where we'll store the entries that will be popped
2278   // from the global stack.
2279   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2280 
2281   if (!_cm->mark_stack_pop(buffer)) {
2282     return false;
2283   }
2284 
2285   // We did actually pop at least one entry.
2286   for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) {
2287     G1TaskQueueEntry task_entry = buffer[i];
2288     if (task_entry.is_null()) {
2289       break;
2290     }
2291     assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
2292     bool success = _task_queue->push(task_entry);
2293     // We only call this when the local queue is empty or under a
2294     // given target limit. So, we do not expect this push to fail.
2295     assert(success, "invariant");
2296   }
2297 
2298   // This operation was quite expensive, so decrease the limits
2299   decrease_limits();
2300   return true;
2301 }
2302 
2303 void G1CMTask::drain_local_queue(bool partially) {
2304   if (has_aborted()) {
2305     return;
2306   }
2307 
2308   // Decide what the target size is, depending whether we're going to
2309   // drain it partially (so that other tasks can steal if they run out
2310   // of things to do) or totally (at the very end).
2311   size_t target_size;
2312   if (partially) {
2313     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
2314   } else {
2315     target_size = 0;
2316   }
2317 
2318   if (_task_queue->size() > target_size) {
2319     G1TaskQueueEntry entry;
2320     bool ret = _task_queue->pop_local(entry);
2321     while (ret) {
2322       scan_task_entry(entry);
2323       if (_task_queue->size() <= target_size || has_aborted()) {
2324         ret = false;
2325       } else {
2326         ret = _task_queue->pop_local(entry);
2327       }
2328     }
2329   }
2330 }
2331 
2332 void G1CMTask::drain_global_stack(bool partially) {
2333   if (has_aborted()) return;
2334 
2335   // We have a policy to drain the local queue before we attempt to
2336   // drain the global stack.
2337   assert(partially || _task_queue->size() == 0, "invariant");
2338 
2339   // Decide what the target size is, depending whether we're going to
2340   // drain it partially (so that other tasks can steal if they run out
2341   // of things to do) or totally (at the very end).
2342   // Notice that when draining the global mark stack partially, due to the racyness
2343   // of the mark stack size update we might in fact drop below the target. But,
2344   // this is not a problem.
2345   // In case of total draining, we simply process until the global mark stack is
2346   // totally empty, disregarding the size counter.
2347   if (partially) {
2348     size_t const target_size = _cm->partial_mark_stack_size_target();
2349     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2350       if (get_entries_from_global_stack()) {
2351         drain_local_queue(partially);
2352       }
2353     }
2354   } else {
2355     while (!has_aborted() && get_entries_from_global_stack()) {
2356       drain_local_queue(partially);
2357     }
2358   }
2359 }
2360 
2361 // SATB Queue has several assumptions on whether to call the par or
2362 // non-par versions of the methods. this is why some of the code is
2363 // replicated. We should really get rid of the single-threaded version
2364 // of the code to simplify things.
2365 void G1CMTask::drain_satb_buffers() {
2366   if (has_aborted()) return;
2367 
2368   // We set this so that the regular clock knows that we're in the
2369   // middle of draining buffers and doesn't set the abort flag when it
2370   // notices that SATB buffers are available for draining. It'd be
2371   // very counter productive if it did that. :-)
2372   _draining_satb_buffers = true;
2373 
2374   G1CMSATBBufferClosure satb_cl(this, _g1h);
2375   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2376 
2377   // This keeps claiming and applying the closure to completed buffers
2378   // until we run out of buffers or we need to abort.
2379   while (!has_aborted() &&
2380          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2381     regular_clock_call();
2382   }
2383 
2384   _draining_satb_buffers = false;
2385 
2386   assert(has_aborted() ||
2387          _concurrent ||
2388          satb_mq_set.completed_buffers_num() == 0, "invariant");
2389 
2390   // again, this was a potentially expensive operation, decrease the
2391   // limits to get the regular clock call early
2392   decrease_limits();
2393 }
2394 
2395 void G1CMTask::print_stats() {
2396   log_debug(gc, stats)("Marking Stats, task = %u, calls = %u",
2397                        _worker_id, _calls);
2398   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2399                        _elapsed_time_ms, _termination_time_ms);
2400   log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
2401                        _step_times_ms.num(), _step_times_ms.avg(),
2402                        _step_times_ms.sd());
2403   log_debug(gc, stats)("                    max = %1.2lfms, total = %1.2lfms",
2404                        _step_times_ms.maximum(), _step_times_ms.sum());
2405 }
2406 
2407 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry) {
2408   return _task_queues->steal(worker_id, hash_seed, task_entry);
2409 }
2410 
2411 /*****************************************************************************
2412 
2413     The do_marking_step(time_target_ms, ...) method is the building
2414     block of the parallel marking framework. It can be called in parallel
2415     with other invocations of do_marking_step() on different tasks
2416     (but only one per task, obviously) and concurrently with the
2417     mutator threads, or during remark, hence it eliminates the need
2418     for two versions of the code. When called during remark, it will
2419     pick up from where the task left off during the concurrent marking
2420     phase. Interestingly, tasks are also claimable during evacuation
2421     pauses too, since do_marking_step() ensures that it aborts before
2422     it needs to yield.
2423 
2424     The data structures that it uses to do marking work are the
2425     following:
2426 
2427       (1) Marking Bitmap. If there are gray objects that appear only
2428       on the bitmap (this happens either when dealing with an overflow
2429       or when the initial marking phase has simply marked the roots
2430       and didn't push them on the stack), then tasks claim heap
2431       regions whose bitmap they then scan to find gray objects. A
2432       global finger indicates where the end of the last claimed region
2433       is. A local finger indicates how far into the region a task has
2434       scanned. The two fingers are used to determine how to gray an
2435       object (i.e. whether simply marking it is OK, as it will be
2436       visited by a task in the future, or whether it needs to be also
2437       pushed on a stack).
2438 
2439       (2) Local Queue. The local queue of the task which is accessed
2440       reasonably efficiently by the task. Other tasks can steal from
2441       it when they run out of work. Throughout the marking phase, a
2442       task attempts to keep its local queue short but not totally
2443       empty, so that entries are available for stealing by other
2444       tasks. Only when there is no more work, a task will totally
2445       drain its local queue.
2446 
2447       (3) Global Mark Stack. This handles local queue overflow. During
2448       marking only sets of entries are moved between it and the local
2449       queues, as access to it requires a mutex and more fine-grain
2450       interaction with it which might cause contention. If it
2451       overflows, then the marking phase should restart and iterate
2452       over the bitmap to identify gray objects. Throughout the marking
2453       phase, tasks attempt to keep the global mark stack at a small
2454       length but not totally empty, so that entries are available for
2455       popping by other tasks. Only when there is no more work, tasks
2456       will totally drain the global mark stack.
2457 
2458       (4) SATB Buffer Queue. This is where completed SATB buffers are
2459       made available. Buffers are regularly removed from this queue
2460       and scanned for roots, so that the queue doesn't get too
2461       long. During remark, all completed buffers are processed, as
2462       well as the filled in parts of any uncompleted buffers.
2463 
2464     The do_marking_step() method tries to abort when the time target
2465     has been reached. There are a few other cases when the
2466     do_marking_step() method also aborts:
2467 
2468       (1) When the marking phase has been aborted (after a Full GC).
2469 
2470       (2) When a global overflow (on the global stack) has been
2471       triggered. Before the task aborts, it will actually sync up with
2472       the other tasks to ensure that all the marking data structures
2473       (local queues, stacks, fingers etc.)  are re-initialized so that
2474       when do_marking_step() completes, the marking phase can
2475       immediately restart.
2476 
2477       (3) When enough completed SATB buffers are available. The
2478       do_marking_step() method only tries to drain SATB buffers right
2479       at the beginning. So, if enough buffers are available, the
2480       marking step aborts and the SATB buffers are processed at
2481       the beginning of the next invocation.
2482 
2483       (4) To yield. when we have to yield then we abort and yield
2484       right at the end of do_marking_step(). This saves us from a lot
2485       of hassle as, by yielding we might allow a Full GC. If this
2486       happens then objects will be compacted underneath our feet, the
2487       heap might shrink, etc. We save checking for this by just
2488       aborting and doing the yield right at the end.
2489 
2490     From the above it follows that the do_marking_step() method should
2491     be called in a loop (or, otherwise, regularly) until it completes.
2492 
2493     If a marking step completes without its has_aborted() flag being
2494     true, it means it has completed the current marking phase (and
2495     also all other marking tasks have done so and have all synced up).
2496 
2497     A method called regular_clock_call() is invoked "regularly" (in
2498     sub ms intervals) throughout marking. It is this clock method that
2499     checks all the abort conditions which were mentioned above and
2500     decides when the task should abort. A work-based scheme is used to
2501     trigger this clock method: when the number of object words the
2502     marking phase has scanned or the number of references the marking
2503     phase has visited reach a given limit. Additional invocations to
2504     the method clock have been planted in a few other strategic places
2505     too. The initial reason for the clock method was to avoid calling
2506     vtime too regularly, as it is quite expensive. So, once it was in
2507     place, it was natural to piggy-back all the other conditions on it
2508     too and not constantly check them throughout the code.
2509 
2510     If do_termination is true then do_marking_step will enter its
2511     termination protocol.
2512 
2513     The value of is_serial must be true when do_marking_step is being
2514     called serially (i.e. by the VMThread) and do_marking_step should
2515     skip any synchronization in the termination and overflow code.
2516     Examples include the serial remark code and the serial reference
2517     processing closures.
2518 
2519     The value of is_serial must be false when do_marking_step is
2520     being called by any of the worker threads in a work gang.
2521     Examples include the concurrent marking code (CMMarkingTask),
2522     the MT remark code, and the MT reference processing closures.
2523 
2524  *****************************************************************************/
2525 
2526 void G1CMTask::do_marking_step(double time_target_ms,
2527                                bool do_termination,
2528                                bool is_serial) {
2529   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2530   assert(_concurrent == _cm->concurrent(), "they should be the same");
2531 
2532   _start_time_ms = os::elapsedVTime() * 1000.0;
2533 
2534   // If do_stealing is true then do_marking_step will attempt to
2535   // steal work from the other G1CMTasks. It only makes sense to
2536   // enable stealing when the termination protocol is enabled
2537   // and do_marking_step() is not being called serially.
2538   bool do_stealing = do_termination && !is_serial;
2539 
2540   double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
2541   _time_target_ms = time_target_ms - diff_prediction_ms;
2542 
2543   // set up the variables that are used in the work-based scheme to
2544   // call the regular clock method
2545   _words_scanned = 0;
2546   _refs_reached  = 0;
2547   recalculate_limits();
2548 
2549   // clear all flags
2550   clear_has_aborted();
2551   _has_timed_out = false;
2552   _draining_satb_buffers = false;
2553 
2554   ++_calls;
2555 
2556   // Set up the bitmap and oop closures. Anything that uses them is
2557   // eventually called from this method, so it is OK to allocate these
2558   // statically.
2559   G1CMBitMapClosure bitmap_closure(this, _cm);
2560   G1CMOopClosure    cm_oop_closure(_g1h, _cm, this);
2561   set_cm_oop_closure(&cm_oop_closure);
2562 
2563   if (_cm->has_overflown()) {
2564     // This can happen if the mark stack overflows during a GC pause
2565     // and this task, after a yield point, restarts. We have to abort
2566     // as we need to get into the overflow protocol which happens
2567     // right at the end of this task.
2568     set_has_aborted();
2569   }
2570 
2571   // First drain any available SATB buffers. After this, we will not
2572   // look at SATB buffers before the next invocation of this method.
2573   // If enough completed SATB buffers are queued up, the regular clock
2574   // will abort this task so that it restarts.
2575   drain_satb_buffers();
2576   // ...then partially drain the local queue and the global stack
2577   drain_local_queue(true);
2578   drain_global_stack(true);
2579 
2580   do {
2581     if (!has_aborted() && _curr_region != NULL) {
2582       // This means that we're already holding on to a region.
2583       assert(_finger != NULL, "if region is not NULL, then the finger "
2584              "should not be NULL either");
2585 
2586       // We might have restarted this task after an evacuation pause
2587       // which might have evacuated the region we're holding on to
2588       // underneath our feet. Let's read its limit again to make sure
2589       // that we do not iterate over a region of the heap that
2590       // contains garbage (update_region_limit() will also move
2591       // _finger to the start of the region if it is found empty).
2592       update_region_limit();
2593       // We will start from _finger not from the start of the region,
2594       // as we might be restarting this task after aborting half-way
2595       // through scanning this region. In this case, _finger points to
2596       // the address where we last found a marked object. If this is a
2597       // fresh region, _finger points to start().
2598       MemRegion mr = MemRegion(_finger, _region_limit);
2599 
2600       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2601              "humongous regions should go around loop once only");
2602 
2603       // Some special cases:
2604       // If the memory region is empty, we can just give up the region.
2605       // If the current region is humongous then we only need to check
2606       // the bitmap for the bit associated with the start of the object,
2607       // scan the object if it's live, and give up the region.
2608       // Otherwise, let's iterate over the bitmap of the part of the region
2609       // that is left.
2610       // If the iteration is successful, give up the region.
2611       if (mr.is_empty()) {
2612         giveup_current_region();
2613         regular_clock_call();
2614       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2615         if (_next_mark_bitmap->is_marked(mr.start())) {
2616           // The object is marked - apply the closure
2617           bitmap_closure.do_addr(mr.start());
2618         }
2619         // Even if this task aborted while scanning the humongous object
2620         // we can (and should) give up the current region.
2621         giveup_current_region();
2622         regular_clock_call();
2623       } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
2624         giveup_current_region();
2625         regular_clock_call();
2626       } else {
2627         assert(has_aborted(), "currently the only way to do so");
2628         // The only way to abort the bitmap iteration is to return
2629         // false from the do_bit() method. However, inside the
2630         // do_bit() method we move the _finger to point to the
2631         // object currently being looked at. So, if we bail out, we
2632         // have definitely set _finger to something non-null.
2633         assert(_finger != NULL, "invariant");
2634 
2635         // Region iteration was actually aborted. So now _finger
2636         // points to the address of the object we last scanned. If we
2637         // leave it there, when we restart this task, we will rescan
2638         // the object. It is easy to avoid this. We move the finger by
2639         // enough to point to the next possible object header.
2640         assert(_finger < _region_limit, "invariant");
2641         HeapWord* const new_finger = _finger + ((oop)_finger)->size();
2642         // Check if bitmap iteration was aborted while scanning the last object
2643         if (new_finger >= _region_limit) {
2644           giveup_current_region();
2645         } else {
2646           move_finger_to(new_finger);
2647         }
2648       }
2649     }
2650     // At this point we have either completed iterating over the
2651     // region we were holding on to, or we have aborted.
2652 
2653     // We then partially drain the local queue and the global stack.
2654     // (Do we really need this?)
2655     drain_local_queue(true);
2656     drain_global_stack(true);
2657 
2658     // Read the note on the claim_region() method on why it might
2659     // return NULL with potentially more regions available for
2660     // claiming and why we have to check out_of_regions() to determine
2661     // whether we're done or not.
2662     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2663       // We are going to try to claim a new region. We should have
2664       // given up on the previous one.
2665       // Separated the asserts so that we know which one fires.
2666       assert(_curr_region  == NULL, "invariant");
2667       assert(_finger       == NULL, "invariant");
2668       assert(_region_limit == NULL, "invariant");
2669       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2670       if (claimed_region != NULL) {
2671         // Yes, we managed to claim one
2672         setup_for_region(claimed_region);
2673         assert(_curr_region == claimed_region, "invariant");
2674       }
2675       // It is important to call the regular clock here. It might take
2676       // a while to claim a region if, for example, we hit a large
2677       // block of empty regions. So we need to call the regular clock
2678       // method once round the loop to make sure it's called
2679       // frequently enough.
2680       regular_clock_call();
2681     }
2682 
2683     if (!has_aborted() && _curr_region == NULL) {
2684       assert(_cm->out_of_regions(),
2685              "at this point we should be out of regions");
2686     }
2687   } while ( _curr_region != NULL && !has_aborted());
2688 
2689   if (!has_aborted()) {
2690     // We cannot check whether the global stack is empty, since other
2691     // tasks might be pushing objects to it concurrently.
2692     assert(_cm->out_of_regions(),
2693            "at this point we should be out of regions");
2694     // Try to reduce the number of available SATB buffers so that
2695     // remark has less work to do.
2696     drain_satb_buffers();
2697   }
2698 
2699   // Since we've done everything else, we can now totally drain the
2700   // local queue and global stack.
2701   drain_local_queue(false);
2702   drain_global_stack(false);
2703 
2704   // Attempt at work stealing from other task's queues.
2705   if (do_stealing && !has_aborted()) {
2706     // We have not aborted. This means that we have finished all that
2707     // we could. Let's try to do some stealing...
2708 
2709     // We cannot check whether the global stack is empty, since other
2710     // tasks might be pushing objects to it concurrently.
2711     assert(_cm->out_of_regions() && _task_queue->size() == 0,
2712            "only way to reach here");
2713     while (!has_aborted()) {
2714       G1TaskQueueEntry entry;
2715       if (_cm->try_stealing(_worker_id, &_hash_seed, entry)) {
2716         scan_task_entry(entry);
2717 
2718         // And since we're towards the end, let's totally drain the
2719         // local queue and global stack.
2720         drain_local_queue(false);
2721         drain_global_stack(false);
2722       } else {
2723         break;
2724       }
2725     }
2726   }
2727 
2728   // We still haven't aborted. Now, let's try to get into the
2729   // termination protocol.
2730   if (do_termination && !has_aborted()) {
2731     // We cannot check whether the global stack is empty, since other
2732     // tasks might be concurrently pushing objects on it.
2733     // Separated the asserts so that we know which one fires.
2734     assert(_cm->out_of_regions(), "only way to reach here");
2735     assert(_task_queue->size() == 0, "only way to reach here");
2736     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2737 
2738     // The G1CMTask class also extends the TerminatorTerminator class,
2739     // hence its should_exit_termination() method will also decide
2740     // whether to exit the termination protocol or not.
2741     bool finished = (is_serial ||
2742                      _cm->terminator()->offer_termination(this));
2743     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2744     _termination_time_ms +=
2745       termination_end_time_ms - _termination_start_time_ms;
2746 
2747     if (finished) {
2748       // We're all done.
2749 
2750       if (_worker_id == 0) {
2751         // Let's allow task 0 to do this
2752         if (_concurrent) {
2753           assert(_cm->concurrent_marking_in_progress(), "invariant");
2754           // We need to set this to false before the next
2755           // safepoint. This way we ensure that the marking phase
2756           // doesn't observe any more heap expansions.
2757           _cm->clear_concurrent_marking_in_progress();
2758         }
2759       }
2760 
2761       // We can now guarantee that the global stack is empty, since
2762       // all other tasks have finished. We separated the guarantees so
2763       // that, if a condition is false, we can immediately find out
2764       // which one.
2765       guarantee(_cm->out_of_regions(), "only way to reach here");
2766       guarantee(_cm->mark_stack_empty(), "only way to reach here");
2767       guarantee(_task_queue->size() == 0, "only way to reach here");
2768       guarantee(!_cm->has_overflown(), "only way to reach here");
2769     } else {
2770       // Apparently there's more work to do. Let's abort this task. It
2771       // will restart it and we can hopefully find more things to do.
2772       set_has_aborted();
2773     }
2774   }
2775 
2776   // Mainly for debugging purposes to make sure that a pointer to the
2777   // closure which was statically allocated in this frame doesn't
2778   // escape it by accident.
2779   set_cm_oop_closure(NULL);
2780   double end_time_ms = os::elapsedVTime() * 1000.0;
2781   double elapsed_time_ms = end_time_ms - _start_time_ms;
2782   // Update the step history.
2783   _step_times_ms.add(elapsed_time_ms);
2784 
2785   if (has_aborted()) {
2786     // The task was aborted for some reason.
2787     if (_has_timed_out) {
2788       double diff_ms = elapsed_time_ms - _time_target_ms;
2789       // Keep statistics of how well we did with respect to hitting
2790       // our target only if we actually timed out (if we aborted for
2791       // other reasons, then the results might get skewed).
2792       _marking_step_diffs_ms.add(diff_ms);
2793     }
2794 
2795     if (_cm->has_overflown()) {
2796       // This is the interesting one. We aborted because a global
2797       // overflow was raised. This means we have to restart the
2798       // marking phase and start iterating over regions. However, in
2799       // order to do this we have to make sure that all tasks stop
2800       // what they are doing and re-initialize in a safe manner. We
2801       // will achieve this with the use of two barrier sync points.
2802 
2803       if (!is_serial) {
2804         // We only need to enter the sync barrier if being called
2805         // from a parallel context
2806         _cm->enter_first_sync_barrier(_worker_id);
2807 
2808         // When we exit this sync barrier we know that all tasks have
2809         // stopped doing marking work. So, it's now safe to
2810         // re-initialize our data structures. At the end of this method,
2811         // task 0 will clear the global data structures.
2812       }
2813 
2814       // We clear the local state of this task...
2815       clear_region_fields();
2816 
2817       if (!is_serial) {
2818         // ...and enter the second barrier.
2819         _cm->enter_second_sync_barrier(_worker_id);
2820       }
2821       // At this point, if we're during the concurrent phase of
2822       // marking, everything has been re-initialized and we're
2823       // ready to restart.
2824     }
2825   }
2826 }
2827 
2828 G1CMTask::G1CMTask(uint worker_id, G1ConcurrentMark* cm, G1CMTaskQueue* task_queue) :
2829   _objArray_processor(this),
2830   _worker_id(worker_id),
2831   _g1h(G1CollectedHeap::heap()),
2832   _cm(cm),
2833   _next_mark_bitmap(NULL),
2834   _task_queue(task_queue),
2835   _calls(0),
2836   _time_target_ms(0.0),
2837   _start_time_ms(0.0),
2838   _cm_oop_closure(NULL),
2839   _curr_region(NULL),
2840   _finger(NULL),
2841   _region_limit(NULL),
2842   _words_scanned(0),
2843   _words_scanned_limit(0),
2844   _real_words_scanned_limit(0),
2845   _refs_reached(0),
2846   _refs_reached_limit(0),
2847   _real_refs_reached_limit(0),
2848   _hash_seed(17),
2849   _has_aborted(false),
2850   _has_timed_out(false),
2851   _draining_satb_buffers(false),
2852   _step_times_ms(),
2853   _elapsed_time_ms(0.0),
2854   _termination_time_ms(0.0),
2855   _termination_start_time_ms(0.0),
2856   _concurrent(false),
2857   _marking_step_diffs_ms()
2858 {
2859   guarantee(task_queue != NULL, "invariant");
2860 
2861   _marking_step_diffs_ms.add(0.5);
2862 }
2863 
2864 // These are formatting macros that are used below to ensure
2865 // consistent formatting. The *_H_* versions are used to format the
2866 // header for a particular value and they should be kept consistent
2867 // with the corresponding macro. Also note that most of the macros add
2868 // the necessary white space (as a prefix) which makes them a bit
2869 // easier to compose.
2870 
2871 // All the output lines are prefixed with this string to be able to
2872 // identify them easily in a large log file.
2873 #define G1PPRL_LINE_PREFIX            "###"
2874 
2875 #define G1PPRL_ADDR_BASE_FORMAT    " " PTR_FORMAT "-" PTR_FORMAT
2876 #ifdef _LP64
2877 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
2878 #else // _LP64
2879 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
2880 #endif // _LP64
2881 
2882 // For per-region info
2883 #define G1PPRL_TYPE_FORMAT            "   %-4s"
2884 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
2885 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
2886 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
2887 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
2888 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
2889 
2890 // For summary info
2891 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
2892 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
2893 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
2894 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
2895 
2896 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) :
2897   _total_used_bytes(0), _total_capacity_bytes(0),
2898   _total_prev_live_bytes(0), _total_next_live_bytes(0),
2899   _total_remset_bytes(0), _total_strong_code_roots_bytes(0)
2900 {
2901   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2902   MemRegion g1_reserved = g1h->g1_reserved();
2903   double now = os::elapsedTime();
2904 
2905   // Print the header of the output.
2906   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
2907   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
2908                           G1PPRL_SUM_ADDR_FORMAT("reserved")
2909                           G1PPRL_SUM_BYTE_FORMAT("region-size"),
2910                           p2i(g1_reserved.start()), p2i(g1_reserved.end()),
2911                           HeapRegion::GrainBytes);
2912   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
2913   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2914                           G1PPRL_TYPE_H_FORMAT
2915                           G1PPRL_ADDR_BASE_H_FORMAT
2916                           G1PPRL_BYTE_H_FORMAT
2917                           G1PPRL_BYTE_H_FORMAT
2918                           G1PPRL_BYTE_H_FORMAT
2919                           G1PPRL_DOUBLE_H_FORMAT
2920                           G1PPRL_BYTE_H_FORMAT
2921                           G1PPRL_BYTE_H_FORMAT,
2922                           "type", "address-range",
2923                           "used", "prev-live", "next-live", "gc-eff",
2924                           "remset", "code-roots");
2925   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2926                           G1PPRL_TYPE_H_FORMAT
2927                           G1PPRL_ADDR_BASE_H_FORMAT
2928                           G1PPRL_BYTE_H_FORMAT
2929                           G1PPRL_BYTE_H_FORMAT
2930                           G1PPRL_BYTE_H_FORMAT
2931                           G1PPRL_DOUBLE_H_FORMAT
2932                           G1PPRL_BYTE_H_FORMAT
2933                           G1PPRL_BYTE_H_FORMAT,
2934                           "", "",
2935                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
2936                           "(bytes)", "(bytes)");
2937 }
2938 
2939 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
2940   const char* type       = r->get_type_str();
2941   HeapWord* bottom       = r->bottom();
2942   HeapWord* end          = r->end();
2943   size_t capacity_bytes  = r->capacity();
2944   size_t used_bytes      = r->used();
2945   size_t prev_live_bytes = r->live_bytes();
2946   size_t next_live_bytes = r->next_live_bytes();
2947   double gc_eff          = r->gc_efficiency();
2948   size_t remset_bytes    = r->rem_set()->mem_size();
2949   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
2950 
2951   _total_used_bytes      += used_bytes;
2952   _total_capacity_bytes  += capacity_bytes;
2953   _total_prev_live_bytes += prev_live_bytes;
2954   _total_next_live_bytes += next_live_bytes;
2955   _total_remset_bytes    += remset_bytes;
2956   _total_strong_code_roots_bytes += strong_code_roots_bytes;
2957 
2958   // Print a line for this particular region.
2959   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2960                           G1PPRL_TYPE_FORMAT
2961                           G1PPRL_ADDR_BASE_FORMAT
2962                           G1PPRL_BYTE_FORMAT
2963                           G1PPRL_BYTE_FORMAT
2964                           G1PPRL_BYTE_FORMAT
2965                           G1PPRL_DOUBLE_FORMAT
2966                           G1PPRL_BYTE_FORMAT
2967                           G1PPRL_BYTE_FORMAT,
2968                           type, p2i(bottom), p2i(end),
2969                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
2970                           remset_bytes, strong_code_roots_bytes);
2971 
2972   return false;
2973 }
2974 
2975 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
2976   // add static memory usages to remembered set sizes
2977   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
2978   // Print the footer of the output.
2979   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
2980   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2981                          " SUMMARY"
2982                          G1PPRL_SUM_MB_FORMAT("capacity")
2983                          G1PPRL_SUM_MB_PERC_FORMAT("used")
2984                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
2985                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
2986                          G1PPRL_SUM_MB_FORMAT("remset")
2987                          G1PPRL_SUM_MB_FORMAT("code-roots"),
2988                          bytes_to_mb(_total_capacity_bytes),
2989                          bytes_to_mb(_total_used_bytes),
2990                          percent_of(_total_used_bytes, _total_capacity_bytes),
2991                          bytes_to_mb(_total_prev_live_bytes),
2992                          percent_of(_total_prev_live_bytes, _total_capacity_bytes),
2993                          bytes_to_mb(_total_next_live_bytes),
2994                          percent_of(_total_next_live_bytes, _total_capacity_bytes),
2995                          bytes_to_mb(_total_remset_bytes),
2996                          bytes_to_mb(_total_strong_code_roots_bytes));
2997 }