1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorState.hpp"
  31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1HeapVerifier.hpp"
  34 #include "gc/g1/g1OopClosures.inline.hpp"
  35 #include "gc/g1/g1Policy.hpp"
  36 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/g1ThreadLocalData.hpp"
  39 #include "gc/g1/heapRegion.inline.hpp"
  40 #include "gc/g1/heapRegionRemSet.hpp"
  41 #include "gc/g1/heapRegionSet.inline.hpp"
  42 #include "gc/shared/adaptiveSizePolicy.hpp"
  43 #include "gc/shared/gcId.hpp"
  44 #include "gc/shared/gcTimer.hpp"
  45 #include "gc/shared/gcTrace.hpp"
  46 #include "gc/shared/gcTraceTime.inline.hpp"
  47 #include "gc/shared/genOopClosures.inline.hpp"
  48 #include "gc/shared/referencePolicy.hpp"
  49 #include "gc/shared/strongRootsScope.hpp"
  50 #include "gc/shared/suspendibleThreadSet.hpp"
  51 #include "gc/shared/taskqueue.inline.hpp"
  52 #include "gc/shared/vmGCOperations.hpp"
  53 #include "gc/shared/weakProcessor.inline.hpp"
  54 #include "include/jvm.h"
  55 #include "logging/log.hpp"
  56 #include "memory/allocation.hpp"
  57 #include "memory/resourceArea.hpp"
  58 #include "oops/access.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "runtime/atomic.hpp"
  61 #include "runtime/handles.inline.hpp"
  62 #include "runtime/java.hpp"
  63 #include "runtime/prefetch.inline.hpp"
  64 #include "services/memTracker.hpp"
  65 #include "utilities/align.hpp"
  66 #include "utilities/growableArray.hpp"
  67 
  68 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  69   assert(addr < _cm->finger(), "invariant");
  70   assert(addr >= _task->finger(), "invariant");
  71 
  72   // We move that task's local finger along.
  73   _task->move_finger_to(addr);
  74 
  75   _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
  76   // we only partially drain the local queue and global stack
  77   _task->drain_local_queue(true);
  78   _task->drain_global_stack(true);
  79 
  80   // if the has_aborted flag has been raised, we need to bail out of
  81   // the iteration
  82   return !_task->has_aborted();
  83 }
  84 
  85 G1CMMarkStack::G1CMMarkStack() :
  86   _max_chunk_capacity(0),
  87   _base(NULL),
  88   _chunk_capacity(0) {
  89   set_empty();
  90 }
  91 
  92 bool G1CMMarkStack::resize(size_t new_capacity) {
  93   assert(is_empty(), "Only resize when stack is empty.");
  94   assert(new_capacity <= _max_chunk_capacity,
  95          "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
  96 
  97   TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
  98 
  99   if (new_base == NULL) {
 100     log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
 101     return false;
 102   }
 103   // Release old mapping.
 104   if (_base != NULL) {
 105     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 106   }
 107 
 108   _base = new_base;
 109   _chunk_capacity = new_capacity;
 110   set_empty();
 111 
 112   return true;
 113 }
 114 
 115 size_t G1CMMarkStack::capacity_alignment() {
 116   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
 117 }
 118 
 119 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 120   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 121 
 122   size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
 123 
 124   _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 125   size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 126 
 127   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
 128             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
 129             _max_chunk_capacity,
 130             initial_chunk_capacity);
 131 
 132   log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
 133                 initial_chunk_capacity, _max_chunk_capacity);
 134 
 135   return resize(initial_chunk_capacity);
 136 }
 137 
 138 void G1CMMarkStack::expand() {
 139   if (_chunk_capacity == _max_chunk_capacity) {
 140     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 141     return;
 142   }
 143   size_t old_capacity = _chunk_capacity;
 144   // Double capacity if possible
 145   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
 146 
 147   if (resize(new_capacity)) {
 148     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 149                   old_capacity, new_capacity);
 150   } else {
 151     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 152                     old_capacity, new_capacity);
 153   }
 154 }
 155 
 156 G1CMMarkStack::~G1CMMarkStack() {
 157   if (_base != NULL) {
 158     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 159   }
 160 }
 161 
 162 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
 163   elem->next = *list;
 164   *list = elem;
 165 }
 166 
 167 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
 168   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 169   add_chunk_to_list(&_chunk_list, elem);
 170   _chunks_in_chunk_list++;
 171 }
 172 
 173 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
 174   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 175   add_chunk_to_list(&_free_list, elem);
 176 }
 177 
 178 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
 179   TaskQueueEntryChunk* result = *list;
 180   if (result != NULL) {
 181     *list = (*list)->next;
 182   }
 183   return result;
 184 }
 185 
 186 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 187   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 188   TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
 189   if (result != NULL) {
 190     _chunks_in_chunk_list--;
 191   }
 192   return result;
 193 }
 194 
 195 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 196   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 197   return remove_chunk_from_list(&_free_list);
 198 }
 199 
 200 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
 201   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 202   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 203   // wraparound of _hwm.
 204   if (_hwm >= _chunk_capacity) {
 205     return NULL;
 206   }
 207 
 208   size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
 209   if (cur_idx >= _chunk_capacity) {
 210     return NULL;
 211   }
 212 
 213   TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
 214   result->next = NULL;
 215   return result;
 216 }
 217 
 218 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
 219   // Get a new chunk.
 220   TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
 221 
 222   if (new_chunk == NULL) {
 223     // Did not get a chunk from the free list. Allocate from backing memory.
 224     new_chunk = allocate_new_chunk();
 225 
 226     if (new_chunk == NULL) {
 227       return false;
 228     }
 229   }
 230 
 231   Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 232 
 233   add_chunk_to_chunk_list(new_chunk);
 234 
 235   return true;
 236 }
 237 
 238 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
 239   TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
 240 
 241   if (cur == NULL) {
 242     return false;
 243   }
 244 
 245   Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 246 
 247   add_chunk_to_free_list(cur);
 248   return true;
 249 }
 250 
 251 void G1CMMarkStack::set_empty() {
 252   _chunks_in_chunk_list = 0;
 253   _hwm = 0;
 254   _chunk_list = NULL;
 255   _free_list = NULL;
 256 }
 257 
 258 G1CMRootRegions::G1CMRootRegions() :
 259   _survivors(NULL), _cm(NULL), _scan_in_progress(false),
 260   _should_abort(false), _claimed_survivor_index(0) { }
 261 
 262 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
 263   _survivors = survivors;
 264   _cm = cm;
 265 }
 266 
 267 void G1CMRootRegions::prepare_for_scan() {
 268   assert(!scan_in_progress(), "pre-condition");
 269 
 270   // Currently, only survivors can be root regions.
 271   _claimed_survivor_index = 0;
 272   _scan_in_progress = _survivors->regions()->is_nonempty();
 273   _should_abort = false;
 274 }
 275 
 276 HeapRegion* G1CMRootRegions::claim_next() {
 277   if (_should_abort) {
 278     // If someone has set the should_abort flag, we return NULL to
 279     // force the caller to bail out of their loop.
 280     return NULL;
 281   }
 282 
 283   // Currently, only survivors can be root regions.
 284   const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions();
 285 
 286   int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1;
 287   if (claimed_index < survivor_regions->length()) {
 288     return survivor_regions->at(claimed_index);
 289   }
 290   return NULL;
 291 }
 292 
 293 uint G1CMRootRegions::num_root_regions() const {
 294   return (uint)_survivors->regions()->length();
 295 }
 296 
 297 void G1CMRootRegions::notify_scan_done() {
 298   MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 299   _scan_in_progress = false;
 300   RootRegionScan_lock->notify_all();
 301 }
 302 
 303 void G1CMRootRegions::cancel_scan() {
 304   notify_scan_done();
 305 }
 306 
 307 void G1CMRootRegions::scan_finished() {
 308   assert(scan_in_progress(), "pre-condition");
 309 
 310   // Currently, only survivors can be root regions.
 311   if (!_should_abort) {
 312     assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index);
 313     assert((uint)_claimed_survivor_index >= _survivors->length(),
 314            "we should have claimed all survivors, claimed index = %u, length = %u",
 315            (uint)_claimed_survivor_index, _survivors->length());
 316   }
 317 
 318   notify_scan_done();
 319 }
 320 
 321 bool G1CMRootRegions::wait_until_scan_finished() {
 322   if (!scan_in_progress()) {
 323     return false;
 324   }
 325 
 326   {
 327     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 328     while (scan_in_progress()) {
 329       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 330     }
 331   }
 332   return true;
 333 }
 334 
 335 // Returns the maximum number of workers to be used in a concurrent
 336 // phase based on the number of GC workers being used in a STW
 337 // phase.
 338 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
 339   return MAX2((num_gc_workers + 2) / 4, 1U);
 340 }
 341 
 342 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
 343                                    G1RegionToSpaceMapper* prev_bitmap_storage,
 344                                    G1RegionToSpaceMapper* next_bitmap_storage) :
 345   // _cm_thread set inside the constructor
 346   _g1h(g1h),
 347   _completed_initialization(false),
 348 
 349   _mark_bitmap_1(),
 350   _mark_bitmap_2(),
 351   _prev_mark_bitmap(&_mark_bitmap_1),
 352   _next_mark_bitmap(&_mark_bitmap_2),
 353 
 354   _heap(_g1h->reserved_region()),
 355 
 356   _root_regions(),
 357 
 358   _global_mark_stack(),
 359 
 360   // _finger set in set_non_marking_state
 361 
 362   _worker_id_offset(DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
 363   _max_num_tasks(ParallelGCThreads),
 364   // _num_active_tasks set in set_non_marking_state()
 365   // _tasks set inside the constructor
 366 
 367   _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
 368   _terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)),
 369 
 370   _first_overflow_barrier_sync(),
 371   _second_overflow_barrier_sync(),
 372 
 373   _has_overflown(false),
 374   _concurrent(false),
 375   _has_aborted(false),
 376   _restart_for_overflow(false),
 377   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 378   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 379 
 380   // _verbose_level set below
 381 
 382   _init_times(),
 383   _remark_times(),
 384   _remark_mark_times(),
 385   _remark_weak_ref_times(),
 386   _cleanup_times(),
 387   _total_cleanup_time(0.0),
 388 
 389   _accum_task_vtime(NULL),
 390 
 391   _concurrent_workers(NULL),
 392   _num_concurrent_workers(0),
 393   _max_concurrent_workers(0),
 394 
 395   _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
 396   _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 397 {
 398   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 399   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 400 
 401   // Create & start ConcurrentMark thread.
 402   _cm_thread = new G1ConcurrentMarkThread(this);
 403   if (_cm_thread->osthread() == NULL) {
 404     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 405   }
 406 
 407   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 408 
 409   SATBMarkQueueSet& satb_qs = G1BarrierSet::satb_mark_queue_set();
 410   satb_qs.set_buffer_size(G1SATBBufferSize);
 411 
 412   _root_regions.init(_g1h->survivor(), this);
 413 
 414   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 415     // Calculate the number of concurrent worker threads by scaling
 416     // the number of parallel GC threads.
 417     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 418     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 419   }
 420 
 421   assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
 422   if (ConcGCThreads > ParallelGCThreads) {
 423     log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
 424                     ConcGCThreads, ParallelGCThreads);
 425     return;
 426   }
 427 
 428   log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
 429   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 430 
 431   _num_concurrent_workers = ConcGCThreads;
 432   _max_concurrent_workers = _num_concurrent_workers;
 433 
 434   _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
 435   _concurrent_workers->initialize_workers();
 436 
 437   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 438     size_t mark_stack_size =
 439       MIN2(MarkStackSizeMax,
 440           MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE)));
 441     // Verify that the calculated value for MarkStackSize is in range.
 442     // It would be nice to use the private utility routine from Arguments.
 443     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 444       log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 445                       "must be between 1 and " SIZE_FORMAT,
 446                       mark_stack_size, MarkStackSizeMax);
 447       return;
 448     }
 449     FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
 450   } else {
 451     // Verify MarkStackSize is in range.
 452     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 453       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 454         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 455           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 456                           "must be between 1 and " SIZE_FORMAT,
 457                           MarkStackSize, MarkStackSizeMax);
 458           return;
 459         }
 460       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 461         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 462           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 463                           " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 464                           MarkStackSize, MarkStackSizeMax);
 465           return;
 466         }
 467       }
 468     }
 469   }
 470 
 471   if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
 472     vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
 473   }
 474 
 475   _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
 476   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
 477 
 478   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 479   _num_active_tasks = _max_num_tasks;
 480 
 481   for (uint i = 0; i < _max_num_tasks; ++i) {
 482     G1CMTaskQueue* task_queue = new G1CMTaskQueue();
 483     task_queue->initialize();
 484     _task_queues->register_queue(i, task_queue);
 485 
 486     _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
 487 
 488     _accum_task_vtime[i] = 0.0;
 489   }
 490 
 491   reset_at_marking_complete();
 492   _completed_initialization = true;
 493 }
 494 
 495 void G1ConcurrentMark::reset() {
 496   _has_aborted = false;
 497 
 498   reset_marking_for_restart();
 499 
 500   // Reset all tasks, since different phases will use different number of active
 501   // threads. So, it's easiest to have all of them ready.
 502   for (uint i = 0; i < _max_num_tasks; ++i) {
 503     _tasks[i]->reset(_next_mark_bitmap);
 504   }
 505 
 506   uint max_regions = _g1h->max_regions();
 507   for (uint i = 0; i < max_regions; i++) {
 508     _top_at_rebuild_starts[i] = NULL;
 509     _region_mark_stats[i].clear();
 510   }
 511 }
 512 
 513 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
 514   for (uint j = 0; j < _max_num_tasks; ++j) {
 515     _tasks[j]->clear_mark_stats_cache(region_idx);
 516   }
 517   _top_at_rebuild_starts[region_idx] = NULL;
 518   _region_mark_stats[region_idx].clear();
 519 }
 520 
 521 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
 522   uint const region_idx = r->hrm_index();
 523   if (r->is_humongous()) {
 524     assert(r->is_starts_humongous(), "Got humongous continues region here");
 525     uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
 526     for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
 527       clear_statistics_in_region(j);
 528     }
 529   } else {
 530     clear_statistics_in_region(region_idx);
 531   }
 532 }
 533 
 534 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) {
 535   if (bitmap->is_marked(addr)) {
 536     bitmap->clear(addr);
 537   }
 538 }
 539 
 540 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
 541   assert_at_safepoint_on_vm_thread();
 542 
 543   // Need to clear all mark bits of the humongous object.
 544   clear_mark_if_set(_prev_mark_bitmap, r->bottom());
 545   clear_mark_if_set(_next_mark_bitmap, r->bottom());
 546 
 547   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
 548     return;
 549   }
 550 
 551   // Clear any statistics about the region gathered so far.
 552   clear_statistics(r);
 553 }
 554 
 555 void G1ConcurrentMark::reset_marking_for_restart() {
 556   _global_mark_stack.set_empty();
 557 
 558   // Expand the marking stack, if we have to and if we can.
 559   if (has_overflown()) {
 560     _global_mark_stack.expand();
 561 
 562     uint max_regions = _g1h->max_regions();
 563     for (uint i = 0; i < max_regions; i++) {
 564       _region_mark_stats[i].clear_during_overflow();
 565     }
 566   }
 567 
 568   clear_has_overflown();
 569   _finger = _heap.start();
 570 
 571   for (uint i = 0; i < _max_num_tasks; ++i) {
 572     G1CMTaskQueue* queue = _task_queues->queue(i);
 573     queue->set_empty();
 574   }
 575 }
 576 
 577 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
 578   assert(active_tasks <= _max_num_tasks, "we should not have more");
 579 
 580   _num_active_tasks = active_tasks;
 581   // Need to update the three data structures below according to the
 582   // number of active threads for this phase.
 583   _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
 584   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 585   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 586 }
 587 
 588 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 589   set_concurrency(active_tasks);
 590 
 591   _concurrent = concurrent;
 592 
 593   if (!concurrent) {
 594     // At this point we should be in a STW phase, and completed marking.
 595     assert_at_safepoint_on_vm_thread();
 596     assert(out_of_regions(),
 597            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 598            p2i(_finger), p2i(_heap.end()));
 599   }
 600 }
 601 
 602 void G1ConcurrentMark::reset_at_marking_complete() {
 603   // We set the global marking state to some default values when we're
 604   // not doing marking.
 605   reset_marking_for_restart();
 606   _num_active_tasks = 0;
 607 }
 608 
 609 G1ConcurrentMark::~G1ConcurrentMark() {
 610   FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
 611   FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
 612   // The G1ConcurrentMark instance is never freed.
 613   ShouldNotReachHere();
 614 }
 615 
 616 class G1ClearBitMapTask : public AbstractGangTask {
 617 public:
 618   static size_t chunk_size() { return M; }
 619 
 620 private:
 621   // Heap region closure used for clearing the given mark bitmap.
 622   class G1ClearBitmapHRClosure : public HeapRegionClosure {
 623   private:
 624     G1CMBitMap* _bitmap;
 625     G1ConcurrentMark* _cm;
 626   public:
 627     G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) {
 628     }
 629 
 630     virtual bool do_heap_region(HeapRegion* r) {
 631       size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
 632 
 633       HeapWord* cur = r->bottom();
 634       HeapWord* const end = r->end();
 635 
 636       while (cur < end) {
 637         MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 638         _bitmap->clear_range(mr);
 639 
 640         cur += chunk_size_in_words;
 641 
 642         // Abort iteration if after yielding the marking has been aborted.
 643         if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
 644           return true;
 645         }
 646         // Repeat the asserts from before the start of the closure. We will do them
 647         // as asserts here to minimize their overhead on the product. However, we
 648         // will have them as guarantees at the beginning / end of the bitmap
 649         // clearing to get some checking in the product.
 650         assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
 651         assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 652       }
 653       assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
 654 
 655       return false;
 656     }
 657   };
 658 
 659   G1ClearBitmapHRClosure _cl;
 660   HeapRegionClaimer _hr_claimer;
 661   bool _suspendible; // If the task is suspendible, workers must join the STS.
 662 
 663 public:
 664   G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
 665     AbstractGangTask("G1 Clear Bitmap"),
 666     _cl(bitmap, suspendible ? cm : NULL),
 667     _hr_claimer(n_workers),
 668     _suspendible(suspendible)
 669   { }
 670 
 671   void work(uint worker_id) {
 672     SuspendibleThreadSetJoiner sts_join(_suspendible);
 673     G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
 674   }
 675 
 676   bool is_complete() {
 677     return _cl.is_complete();
 678   }
 679 };
 680 
 681 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 682   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 683 
 684   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 685   size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 686 
 687   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 688 
 689   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 690 
 691   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 692   workers->run_task(&cl, num_workers);
 693   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 694 }
 695 
 696 void G1ConcurrentMark::cleanup_for_next_mark() {
 697   // Make sure that the concurrent mark thread looks to still be in
 698   // the current cycle.
 699   guarantee(cm_thread()->during_cycle(), "invariant");
 700 
 701   // We are finishing up the current cycle by clearing the next
 702   // marking bitmap and getting it ready for the next cycle. During
 703   // this time no other cycle can start. So, let's make sure that this
 704   // is the case.
 705   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 706 
 707   clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 708 
 709   // Repeat the asserts from above.
 710   guarantee(cm_thread()->during_cycle(), "invariant");
 711   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 712 }
 713 
 714 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 715   assert_at_safepoint_on_vm_thread();
 716   clear_bitmap(_prev_mark_bitmap, workers, false);
 717 }
 718 
 719 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 720 public:
 721   bool do_heap_region(HeapRegion* r) {
 722     r->note_start_of_marking();
 723     return false;
 724   }
 725 };
 726 
 727 void G1ConcurrentMark::pre_initial_mark() {
 728   // Initialize marking structures. This has to be done in a STW phase.
 729   reset();
 730 
 731   // For each region note start of marking.
 732   NoteStartOfMarkHRClosure startcl;
 733   _g1h->heap_region_iterate(&startcl);
 734 }
 735 
 736 
 737 void G1ConcurrentMark::post_initial_mark() {
 738   // Start Concurrent Marking weak-reference discovery.
 739   ReferenceProcessor* rp = _g1h->ref_processor_cm();
 740   // enable ("weak") refs discovery
 741   rp->enable_discovery();
 742   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 743 
 744   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
 745   // This is the start of  the marking cycle, we're expected all
 746   // threads to have SATB queues with active set to false.
 747   satb_mq_set.set_active_all_threads(true, /* new active value */
 748                                      false /* expected_active */);
 749 
 750   _root_regions.prepare_for_scan();
 751 
 752   // update_g1_committed() will be called at the end of an evac pause
 753   // when marking is on. So, it's also called at the end of the
 754   // initial-mark pause to update the heap end, if the heap expands
 755   // during it. No need to call it here.
 756 }
 757 
 758 /*
 759  * Notice that in the next two methods, we actually leave the STS
 760  * during the barrier sync and join it immediately afterwards. If we
 761  * do not do this, the following deadlock can occur: one thread could
 762  * be in the barrier sync code, waiting for the other thread to also
 763  * sync up, whereas another one could be trying to yield, while also
 764  * waiting for the other threads to sync up too.
 765  *
 766  * Note, however, that this code is also used during remark and in
 767  * this case we should not attempt to leave / enter the STS, otherwise
 768  * we'll either hit an assert (debug / fastdebug) or deadlock
 769  * (product). So we should only leave / enter the STS if we are
 770  * operating concurrently.
 771  *
 772  * Because the thread that does the sync barrier has left the STS, it
 773  * is possible to be suspended for a Full GC or an evacuation pause
 774  * could occur. This is actually safe, since the entering the sync
 775  * barrier is one of the last things do_marking_step() does, and it
 776  * doesn't manipulate any data structures afterwards.
 777  */
 778 
 779 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 780   bool barrier_aborted;
 781   {
 782     SuspendibleThreadSetLeaver sts_leave(concurrent());
 783     barrier_aborted = !_first_overflow_barrier_sync.enter();
 784   }
 785 
 786   // at this point everyone should have synced up and not be doing any
 787   // more work
 788 
 789   if (barrier_aborted) {
 790     // If the barrier aborted we ignore the overflow condition and
 791     // just abort the whole marking phase as quickly as possible.
 792     return;
 793   }
 794 }
 795 
 796 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 797   SuspendibleThreadSetLeaver sts_leave(concurrent());
 798   _second_overflow_barrier_sync.enter();
 799 
 800   // at this point everything should be re-initialized and ready to go
 801 }
 802 
 803 class G1CMConcurrentMarkingTask : public AbstractGangTask {
 804   G1ConcurrentMark*     _cm;
 805 
 806 public:
 807   void work(uint worker_id) {
 808     assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread");
 809     ResourceMark rm;
 810 
 811     double start_vtime = os::elapsedVTime();
 812 
 813     {
 814       SuspendibleThreadSetJoiner sts_join;
 815 
 816       assert(worker_id < _cm->active_tasks(), "invariant");
 817 
 818       G1CMTask* task = _cm->task(worker_id);
 819       task->record_start_time();
 820       if (!_cm->has_aborted()) {
 821         do {
 822           task->do_marking_step(G1ConcMarkStepDurationMillis,
 823                                 true  /* do_termination */,
 824                                 false /* is_serial*/);
 825 
 826           _cm->do_yield_check();
 827         } while (!_cm->has_aborted() && task->has_aborted());
 828       }
 829       task->record_end_time();
 830       guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant");
 831     }
 832 
 833     double end_vtime = os::elapsedVTime();
 834     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
 835   }
 836 
 837   G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
 838       AbstractGangTask("Concurrent Mark"), _cm(cm) { }
 839 
 840   ~G1CMConcurrentMarkingTask() { }
 841 };
 842 
 843 uint G1ConcurrentMark::calc_active_marking_workers() {
 844   uint result = 0;
 845   if (!UseDynamicNumberOfGCThreads ||
 846       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 847        !ForceDynamicNumberOfGCThreads)) {
 848     result = _max_concurrent_workers;
 849   } else {
 850     result =
 851       AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers,
 852                                                       1, /* Minimum workers */
 853                                                       _num_concurrent_workers,
 854                                                       Threads::number_of_non_daemon_threads());
 855     // Don't scale the result down by scale_concurrent_workers() because
 856     // that scaling has already gone into "_max_concurrent_workers".
 857   }
 858   assert(result > 0 && result <= _max_concurrent_workers,
 859          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 860          _max_concurrent_workers, result);
 861   return result;
 862 }
 863 
 864 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
 865   // Currently, only survivors can be root regions.
 866   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
 867   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 868 
 869   const uintx interval = PrefetchScanIntervalInBytes;
 870   HeapWord* curr = hr->bottom();
 871   const HeapWord* end = hr->top();
 872   while (curr < end) {
 873     Prefetch::read(curr, interval);
 874     oop obj = oop(curr);
 875     int size = obj->oop_iterate_size(&cl);
 876     assert(size == obj->size(), "sanity");
 877     curr += size;
 878   }
 879 }
 880 
 881 class G1CMRootRegionScanTask : public AbstractGangTask {
 882   G1ConcurrentMark* _cm;
 883 public:
 884   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 885     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
 886 
 887   void work(uint worker_id) {
 888     assert(Thread::current()->is_ConcurrentGC_thread(),
 889            "this should only be done by a conc GC thread");
 890 
 891     G1CMRootRegions* root_regions = _cm->root_regions();
 892     HeapRegion* hr = root_regions->claim_next();
 893     while (hr != NULL) {
 894       _cm->scan_root_region(hr, worker_id);
 895       hr = root_regions->claim_next();
 896     }
 897   }
 898 };
 899 
 900 void G1ConcurrentMark::scan_root_regions() {
 901   // scan_in_progress() will have been set to true only if there was
 902   // at least one root region to scan. So, if it's false, we
 903   // should not attempt to do any further work.
 904   if (root_regions()->scan_in_progress()) {
 905     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 906 
 907     _num_concurrent_workers = MIN2(calc_active_marking_workers(),
 908                                    // We distribute work on a per-region basis, so starting
 909                                    // more threads than that is useless.
 910                                    root_regions()->num_root_regions());
 911     assert(_num_concurrent_workers <= _max_concurrent_workers,
 912            "Maximum number of marking threads exceeded");
 913 
 914     G1CMRootRegionScanTask task(this);
 915     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
 916                         task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
 917     _concurrent_workers->run_task(&task, _num_concurrent_workers);
 918 
 919     // It's possible that has_aborted() is true here without actually
 920     // aborting the survivor scan earlier. This is OK as it's
 921     // mainly used for sanity checking.
 922     root_regions()->scan_finished();
 923   }
 924 }
 925 
 926 void G1ConcurrentMark::concurrent_cycle_start() {
 927   _gc_timer_cm->register_gc_start();
 928 
 929   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
 930 
 931   _g1h->trace_heap_before_gc(_gc_tracer_cm);
 932 }
 933 
 934 void G1ConcurrentMark::concurrent_cycle_end() {
 935   _g1h->collector_state()->set_clearing_next_bitmap(false);
 936 
 937   _g1h->trace_heap_after_gc(_gc_tracer_cm);
 938 
 939   if (has_aborted()) {
 940     log_info(gc, marking)("Concurrent Mark Abort");
 941     _gc_tracer_cm->report_concurrent_mode_failure();
 942   }
 943 
 944   _gc_timer_cm->register_gc_end();
 945 
 946   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
 947 }
 948 
 949 void G1ConcurrentMark::mark_from_roots() {
 950   _restart_for_overflow = false;
 951 
 952   _num_concurrent_workers = calc_active_marking_workers();
 953 
 954   uint active_workers = MAX2(1U, _num_concurrent_workers);
 955 
 956   // Setting active workers is not guaranteed since fewer
 957   // worker threads may currently exist and more may not be
 958   // available.
 959   active_workers = _concurrent_workers->update_active_workers(active_workers);
 960   log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
 961 
 962   // Parallel task terminator is set in "set_concurrency_and_phase()"
 963   set_concurrency_and_phase(active_workers, true /* concurrent */);
 964 
 965   G1CMConcurrentMarkingTask marking_task(this);
 966   _concurrent_workers->run_task(&marking_task);
 967   print_stats();
 968 }
 969 
 970 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
 971   G1HeapVerifier* verifier = _g1h->verifier();
 972 
 973   verifier->verify_region_sets_optional();
 974 
 975   if (VerifyDuringGC) {
 976     GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm);
 977 
 978     size_t const BufLen = 512;
 979     char buffer[BufLen];
 980 
 981     jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
 982     verifier->verify(type, vo, buffer);
 983   }
 984 
 985   verifier->check_bitmaps(caller);
 986 }
 987 
 988 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
 989   G1CollectedHeap* _g1h;
 990   G1ConcurrentMark* _cm;
 991   HeapRegionClaimer _hrclaimer;
 992   uint volatile _total_selected_for_rebuild;
 993 
 994   G1PrintRegionLivenessInfoClosure _cl;
 995 
 996   class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
 997     G1CollectedHeap* _g1h;
 998     G1ConcurrentMark* _cm;
 999 
1000     G1PrintRegionLivenessInfoClosure* _cl;
1001 
1002     uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1003 
1004     void update_remset_before_rebuild(HeapRegion* hr) {
1005       G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
1006 
1007       bool selected_for_rebuild;
1008       if (hr->is_humongous()) {
1009         bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0;
1010         selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live);
1011       } else {
1012         size_t const live_bytes = _cm->liveness(hr->hrm_index());
1013         selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1014       }
1015       if (selected_for_rebuild) {
1016         _num_regions_selected_for_rebuild++;
1017       }
1018       _cm->update_top_at_rebuild_start(hr);
1019     }
1020 
1021     // Distribute the given words across the humongous object starting with hr and
1022     // note end of marking.
1023     void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1024       uint const region_idx = hr->hrm_index();
1025       size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size();
1026       uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words);
1027 
1028       // "Distributing" zero words means that we only note end of marking for these
1029       // regions.
1030       assert(marked_words == 0 || obj_size_in_words == marked_words,
1031              "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT,
1032              obj_size_in_words, marked_words);
1033 
1034       for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
1035         HeapRegion* const r = _g1h->region_at(i);
1036         size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
1037 
1038         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)",
1039                                words_to_add, i, r->get_type_str());
1040         add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize);
1041         marked_words -= words_to_add;
1042       }
1043       assert(marked_words == 0,
1044              SIZE_FORMAT " words left after distributing space across %u regions",
1045              marked_words, num_regions_in_humongous);
1046     }
1047 
1048     void update_marked_bytes(HeapRegion* hr) {
1049       uint const region_idx = hr->hrm_index();
1050       size_t const marked_words = _cm->liveness(region_idx);
1051       // The marking attributes the object's size completely to the humongous starts
1052       // region. We need to distribute this value across the entire set of regions a
1053       // humongous object spans.
1054       if (hr->is_humongous()) {
1055         assert(hr->is_starts_humongous() || marked_words == 0,
1056                "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)",
1057                marked_words, region_idx, hr->get_type_str());
1058         if (hr->is_starts_humongous()) {
1059           distribute_marked_bytes(hr, marked_words);
1060         }
1061       } else {
1062         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
1063         add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize);
1064       }
1065     }
1066 
1067     void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) {
1068       hr->add_to_marked_bytes(marked_bytes);
1069       _cl->do_heap_region(hr);
1070       hr->note_end_of_marking();
1071     }
1072 
1073   public:
1074     G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) :
1075       _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { }
1076 
1077     virtual bool do_heap_region(HeapRegion* r) {
1078       update_remset_before_rebuild(r);
1079       update_marked_bytes(r);
1080 
1081       return false;
1082     }
1083 
1084     uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1085   };
1086 
1087 public:
1088   G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) :
1089     AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"),
1090     _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1091 
1092   virtual void work(uint worker_id) {
1093     G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1094     _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1095     Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild);
1096   }
1097 
1098   uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1099 
1100   // Number of regions for which roughly one thread should be spawned for this work.
1101   static const uint RegionsPerThread = 384;
1102 };
1103 
1104 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1105   G1CollectedHeap* _g1h;
1106 public:
1107   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1108 
1109   virtual bool do_heap_region(HeapRegion* r) {
1110     _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1111     return false;
1112   }
1113 };
1114 
1115 void G1ConcurrentMark::remark() {
1116   assert_at_safepoint_on_vm_thread();
1117 
1118   // If a full collection has happened, we should not continue. However we might
1119   // have ended up here as the Remark VM operation has been scheduled already.
1120   if (has_aborted()) {
1121     return;
1122   }
1123 
1124   G1Policy* g1p = _g1h->g1_policy();
1125   g1p->record_concurrent_mark_remark_start();
1126 
1127   double start = os::elapsedTime();
1128 
1129   verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1130 
1131   {
1132     GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1133     finalize_marking();
1134   }
1135 
1136   double mark_work_end = os::elapsedTime();
1137 
1138   bool const mark_finished = !has_overflown();
1139   if (mark_finished) {
1140     weak_refs_work(false /* clear_all_soft_refs */);
1141 
1142     SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1143     // We're done with marking.
1144     // This is the end of the marking cycle, we're expected all
1145     // threads to have SATB queues with active set to true.
1146     satb_mq_set.set_active_all_threads(false, /* new active value */
1147                                        true /* expected_active */);
1148 
1149     {
1150       GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm);
1151       flush_all_task_caches();
1152     }
1153 
1154     // Install newly created mark bitmap as "prev".
1155     swap_mark_bitmaps();
1156     {
1157       GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);
1158 
1159       uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) /
1160                                        G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread;
1161       uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity);
1162 
1163       G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
1164       log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
1165       _g1h->workers()->run_task(&cl, num_workers);
1166 
1167       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1168                                       _g1h->num_regions(), cl.total_selected_for_rebuild());
1169     }
1170     {
1171       GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm);
1172       reclaim_empty_regions();
1173     }
1174 
1175     // Clean out dead classes
1176     if (ClassUnloadingWithConcurrentMark) {
1177       GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
1178       ClassLoaderDataGraph::purge();
1179     }
1180 
1181     _g1h->resize_heap_if_necessary();
1182 
1183     compute_new_sizes();
1184 
1185     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1186 
1187     assert(!restart_for_overflow(), "sanity");
1188     // Completely reset the marking state since marking completed
1189     reset_at_marking_complete();
1190   } else {
1191     // We overflowed.  Restart concurrent marking.
1192     _restart_for_overflow = true;
1193 
1194     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1195 
1196     // Clear the marking state because we will be restarting
1197     // marking due to overflowing the global mark stack.
1198     reset_marking_for_restart();
1199   }
1200 
1201   {
1202     GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm);
1203     report_object_count(mark_finished);
1204   }
1205 
1206   // Statistics
1207   double now = os::elapsedTime();
1208   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1209   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1210   _remark_times.add((now - start) * 1000.0);
1211 
1212   g1p->record_concurrent_mark_remark_end();
1213 }
1214 
1215 class G1ReclaimEmptyRegionsTask : public AbstractGangTask {
1216   // Per-region work during the Cleanup pause.
1217   class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure {
1218     G1CollectedHeap* _g1h;
1219     size_t _freed_bytes;
1220     FreeRegionList* _local_cleanup_list;
1221     uint _old_regions_removed;
1222     uint _humongous_regions_removed;
1223     HRRSCleanupTask* _hrrs_cleanup_task;
1224 
1225   public:
1226     G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h,
1227                                  FreeRegionList* local_cleanup_list,
1228                                  HRRSCleanupTask* hrrs_cleanup_task) :
1229       _g1h(g1h),
1230       _freed_bytes(0),
1231       _local_cleanup_list(local_cleanup_list),
1232       _old_regions_removed(0),
1233       _humongous_regions_removed(0),
1234       _hrrs_cleanup_task(hrrs_cleanup_task) { }
1235 
1236     size_t freed_bytes() { return _freed_bytes; }
1237     const uint old_regions_removed() { return _old_regions_removed; }
1238     const uint humongous_regions_removed() { return _humongous_regions_removed; }
1239 
1240     bool do_heap_region(HeapRegion *hr) {
1241       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1242         _freed_bytes += hr->used();
1243         hr->set_containing_set(NULL);
1244         if (hr->is_humongous()) {
1245           _humongous_regions_removed++;
1246           _g1h->free_humongous_region(hr, _local_cleanup_list);
1247         } else {
1248           _old_regions_removed++;
1249           _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1250         }
1251         hr->clear_cardtable();
1252         _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1253         log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1254       } else {
1255         hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1256       }
1257 
1258       return false;
1259     }
1260   };
1261 
1262   G1CollectedHeap* _g1h;
1263   FreeRegionList* _cleanup_list;
1264   HeapRegionClaimer _hrclaimer;
1265 
1266 public:
1267   G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1268     AbstractGangTask("G1 Cleanup"),
1269     _g1h(g1h),
1270     _cleanup_list(cleanup_list),
1271     _hrclaimer(n_workers) {
1272 
1273     HeapRegionRemSet::reset_for_cleanup_tasks();
1274   }
1275 
1276   void work(uint worker_id) {
1277     FreeRegionList local_cleanup_list("Local Cleanup List");
1278     HRRSCleanupTask hrrs_cleanup_task;
1279     G1ReclaimEmptyRegionsClosure cl(_g1h,
1280                                     &local_cleanup_list,
1281                                     &hrrs_cleanup_task);
1282     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1283     assert(cl.is_complete(), "Shouldn't have aborted!");
1284 
1285     // Now update the old/humongous region sets
1286     _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
1287     {
1288       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1289       _g1h->decrement_summary_bytes(cl.freed_bytes());
1290 
1291       _cleanup_list->add_ordered(&local_cleanup_list);
1292       assert(local_cleanup_list.is_empty(), "post-condition");
1293 
1294       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1295     }
1296   }
1297 };
1298 
1299 void G1ConcurrentMark::reclaim_empty_regions() {
1300   WorkGang* workers = _g1h->workers();
1301   FreeRegionList empty_regions_list("Empty Regions After Mark List");
1302 
1303   G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers());
1304   workers->run_task(&cl);
1305 
1306   if (!empty_regions_list.is_empty()) {
1307     log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1308     // Now print the empty regions list.
1309     G1HRPrinter* hrp = _g1h->hr_printer();
1310     if (hrp->is_active()) {
1311       FreeRegionListIterator iter(&empty_regions_list);
1312       while (iter.more_available()) {
1313         HeapRegion* hr = iter.get_next();
1314         hrp->cleanup(hr);
1315       }
1316     }
1317     // And actually make them available.
1318     _g1h->prepend_to_freelist(&empty_regions_list);
1319   }
1320 }
1321 
1322 void G1ConcurrentMark::compute_new_sizes() {
1323   MetaspaceGC::compute_new_size();
1324 
1325   // Cleanup will have freed any regions completely full of garbage.
1326   // Update the soft reference policy with the new heap occupancy.
1327   Universe::update_heap_info_at_gc();
1328 
1329   // We reclaimed old regions so we should calculate the sizes to make
1330   // sure we update the old gen/space data.
1331   _g1h->g1mm()->update_sizes();
1332 }
1333 
1334 void G1ConcurrentMark::cleanup() {
1335   assert_at_safepoint_on_vm_thread();
1336 
1337   // If a full collection has happened, we shouldn't do this.
1338   if (has_aborted()) {
1339     return;
1340   }
1341 
1342   G1Policy* g1p = _g1h->g1_policy();
1343   g1p->record_concurrent_mark_cleanup_start();
1344 
1345   double start = os::elapsedTime();
1346 
1347   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1348 
1349   {
1350     GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1351     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1352     _g1h->heap_region_iterate(&cl);
1353   }
1354 
1355   if (log_is_enabled(Trace, gc, liveness)) {
1356     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1357     _g1h->heap_region_iterate(&cl);
1358   }
1359 
1360   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1361 
1362   // We need to make this be a "collection" so any collection pause that
1363   // races with it goes around and waits for Cleanup to finish.
1364   _g1h->increment_total_collections();
1365 
1366   // Local statistics
1367   double recent_cleanup_time = (os::elapsedTime() - start);
1368   _total_cleanup_time += recent_cleanup_time;
1369   _cleanup_times.add(recent_cleanup_time);
1370 
1371   {
1372     GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1373     _g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1374   }
1375 }
1376 
1377 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1378 // Uses the G1CMTask associated with a worker thread (for serial reference
1379 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1380 // trace referent objects.
1381 //
1382 // Using the G1CMTask and embedded local queues avoids having the worker
1383 // threads operating on the global mark stack. This reduces the risk
1384 // of overflowing the stack - which we would rather avoid at this late
1385 // state. Also using the tasks' local queues removes the potential
1386 // of the workers interfering with each other that could occur if
1387 // operating on the global stack.
1388 
1389 class G1CMKeepAliveAndDrainClosure : public OopClosure {
1390   G1ConcurrentMark* _cm;
1391   G1CMTask*         _task;
1392   uint              _ref_counter_limit;
1393   uint              _ref_counter;
1394   bool              _is_serial;
1395 public:
1396   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1397     _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval),
1398     _ref_counter(_ref_counter_limit), _is_serial(is_serial) {
1399     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1400   }
1401 
1402   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1403   virtual void do_oop(      oop* p) { do_oop_work(p); }
1404 
1405   template <class T> void do_oop_work(T* p) {
1406     if (_cm->has_overflown()) {
1407       return;
1408     }
1409     if (!_task->deal_with_reference(p)) {
1410       // We did not add anything to the mark bitmap (or mark stack), so there is
1411       // no point trying to drain it.
1412       return;
1413     }
1414     _ref_counter--;
1415 
1416     if (_ref_counter == 0) {
1417       // We have dealt with _ref_counter_limit references, pushing them
1418       // and objects reachable from them on to the local stack (and
1419       // possibly the global stack). Call G1CMTask::do_marking_step() to
1420       // process these entries.
1421       //
1422       // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1423       // there's nothing more to do (i.e. we're done with the entries that
1424       // were pushed as a result of the G1CMTask::deal_with_reference() calls
1425       // above) or we overflow.
1426       //
1427       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1428       // flag while there may still be some work to do. (See the comment at
1429       // the beginning of G1CMTask::do_marking_step() for those conditions -
1430       // one of which is reaching the specified time target.) It is only
1431       // when G1CMTask::do_marking_step() returns without setting the
1432       // has_aborted() flag that the marking step has completed.
1433       do {
1434         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1435         _task->do_marking_step(mark_step_duration_ms,
1436                                false      /* do_termination */,
1437                                _is_serial);
1438       } while (_task->has_aborted() && !_cm->has_overflown());
1439       _ref_counter = _ref_counter_limit;
1440     }
1441   }
1442 };
1443 
1444 // 'Drain' oop closure used by both serial and parallel reference processing.
1445 // Uses the G1CMTask associated with a given worker thread (for serial
1446 // reference processing the G1CMtask for worker 0 is used). Calls the
1447 // do_marking_step routine, with an unbelievably large timeout value,
1448 // to drain the marking data structures of the remaining entries
1449 // added by the 'keep alive' oop closure above.
1450 
1451 class G1CMDrainMarkingStackClosure : public VoidClosure {
1452   G1ConcurrentMark* _cm;
1453   G1CMTask*         _task;
1454   bool              _is_serial;
1455  public:
1456   G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1457     _cm(cm), _task(task), _is_serial(is_serial) {
1458     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1459   }
1460 
1461   void do_void() {
1462     do {
1463       // We call G1CMTask::do_marking_step() to completely drain the local
1464       // and global marking stacks of entries pushed by the 'keep alive'
1465       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1466       //
1467       // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1468       // if there's nothing more to do (i.e. we've completely drained the
1469       // entries that were pushed as a a result of applying the 'keep alive'
1470       // closure to the entries on the discovered ref lists) or we overflow
1471       // the global marking stack.
1472       //
1473       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1474       // flag while there may still be some work to do. (See the comment at
1475       // the beginning of G1CMTask::do_marking_step() for those conditions -
1476       // one of which is reaching the specified time target.) It is only
1477       // when G1CMTask::do_marking_step() returns without setting the
1478       // has_aborted() flag that the marking step has completed.
1479 
1480       _task->do_marking_step(1000000000.0 /* something very large */,
1481                              true         /* do_termination */,
1482                              _is_serial);
1483     } while (_task->has_aborted() && !_cm->has_overflown());
1484   }
1485 };
1486 
1487 // Implementation of AbstractRefProcTaskExecutor for parallel
1488 // reference processing at the end of G1 concurrent marking
1489 
1490 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1491 private:
1492   G1CollectedHeap*  _g1h;
1493   G1ConcurrentMark* _cm;
1494   WorkGang*         _workers;
1495   uint              _active_workers;
1496 
1497 public:
1498   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1499                           G1ConcurrentMark* cm,
1500                           WorkGang* workers,
1501                           uint n_workers) :
1502     _g1h(g1h), _cm(cm),
1503     _workers(workers), _active_workers(n_workers) { }
1504 
1505   virtual void execute(ProcessTask& task, uint ergo_workers);
1506 };
1507 
1508 class G1CMRefProcTaskProxy : public AbstractGangTask {
1509   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1510   ProcessTask&      _proc_task;
1511   G1CollectedHeap*  _g1h;
1512   G1ConcurrentMark* _cm;
1513 
1514 public:
1515   G1CMRefProcTaskProxy(ProcessTask& proc_task,
1516                        G1CollectedHeap* g1h,
1517                        G1ConcurrentMark* cm) :
1518     AbstractGangTask("Process reference objects in parallel"),
1519     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1520     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1521     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1522   }
1523 
1524   virtual void work(uint worker_id) {
1525     ResourceMark rm;
1526     HandleMark hm;
1527     G1CMTask* task = _cm->task(worker_id);
1528     G1CMIsAliveClosure g1_is_alive(_g1h);
1529     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1530     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1531 
1532     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1533   }
1534 };
1535 
1536 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) {
1537   assert(_workers != NULL, "Need parallel worker threads.");
1538   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1539   assert(_workers->active_workers() >= ergo_workers,
1540          "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)",
1541          ergo_workers, _workers->active_workers());
1542 
1543   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1544 
1545   // We need to reset the concurrency level before each
1546   // proxy task execution, so that the termination protocol
1547   // and overflow handling in G1CMTask::do_marking_step() knows
1548   // how many workers to wait for.
1549   _cm->set_concurrency(ergo_workers);
1550   _workers->run_task(&proc_task_proxy, ergo_workers);
1551 }
1552 
1553 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
1554   ResourceMark rm;
1555   HandleMark   hm;
1556 
1557   // Is alive closure.
1558   G1CMIsAliveClosure g1_is_alive(_g1h);
1559 
1560   // Inner scope to exclude the cleaning of the string table
1561   // from the displayed time.
1562   {
1563     GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm);
1564 
1565     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1566 
1567     // See the comment in G1CollectedHeap::ref_processing_init()
1568     // about how reference processing currently works in G1.
1569 
1570     // Set the soft reference policy
1571     rp->setup_policy(clear_all_soft_refs);
1572     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1573 
1574     // Instances of the 'Keep Alive' and 'Complete GC' closures used
1575     // in serial reference processing. Note these closures are also
1576     // used for serially processing (by the the current thread) the
1577     // JNI references during parallel reference processing.
1578     //
1579     // These closures do not need to synchronize with the worker
1580     // threads involved in parallel reference processing as these
1581     // instances are executed serially by the current thread (e.g.
1582     // reference processing is not multi-threaded and is thus
1583     // performed by the current thread instead of a gang worker).
1584     //
1585     // The gang tasks involved in parallel reference processing create
1586     // their own instances of these closures, which do their own
1587     // synchronization among themselves.
1588     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1589     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1590 
1591     // We need at least one active thread. If reference processing
1592     // is not multi-threaded we use the current (VMThread) thread,
1593     // otherwise we use the work gang from the G1CollectedHeap and
1594     // we utilize all the worker threads we can.
1595     bool processing_is_mt = rp->processing_is_mt();
1596     uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
1597     active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U);
1598 
1599     // Parallel processing task executor.
1600     G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
1601                                               _g1h->workers(), active_workers);
1602     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1603 
1604     // Set the concurrency level. The phase was already set prior to
1605     // executing the remark task.
1606     set_concurrency(active_workers);
1607 
1608     // Set the degree of MT processing here.  If the discovery was done MT,
1609     // the number of threads involved during discovery could differ from
1610     // the number of active workers.  This is OK as long as the discovered
1611     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1612     rp->set_active_mt_degree(active_workers);
1613 
1614     ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
1615 
1616     // Process the weak references.
1617     const ReferenceProcessorStats& stats =
1618         rp->process_discovered_references(&g1_is_alive,
1619                                           &g1_keep_alive,
1620                                           &g1_drain_mark_stack,
1621                                           executor,
1622                                           &pt);
1623     _gc_tracer_cm->report_gc_reference_stats(stats);
1624     pt.print_all_references();
1625 
1626     // The do_oop work routines of the keep_alive and drain_marking_stack
1627     // oop closures will set the has_overflown flag if we overflow the
1628     // global marking stack.
1629 
1630     assert(has_overflown() || _global_mark_stack.is_empty(),
1631            "Mark stack should be empty (unless it has overflown)");
1632 
1633     assert(rp->num_queues() == active_workers, "why not");
1634 
1635     rp->verify_no_references_recorded();
1636     assert(!rp->discovery_enabled(), "Post condition");
1637   }
1638 
1639   if (has_overflown()) {
1640     // We can not trust g1_is_alive and the contents of the heap if the marking stack
1641     // overflowed while processing references. Exit the VM.
1642     fatal("Overflow during reference processing, can not continue. Please "
1643           "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and "
1644           "restart.", MarkStackSizeMax);
1645     return;
1646   }
1647 
1648   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1649 
1650   {
1651     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1652     WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1);
1653   }
1654 
1655   // Unload Klasses, String, Code Cache, etc.
1656   if (ClassUnloadingWithConcurrentMark) {
1657     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1658     bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm, false /* Defer cleaning */);
1659     _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1660   } else {
1661     GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1662     // No need to clean string table as it is treated as strong roots when
1663     // class unloading is disabled.
1664     _g1h->partial_cleaning(&g1_is_alive, false, G1StringDedup::is_enabled());
1665   }
1666 }
1667 
1668 class G1PrecleanYieldClosure : public YieldClosure {
1669   G1ConcurrentMark* _cm;
1670 
1671 public:
1672   G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { }
1673 
1674   virtual bool should_return() {
1675     return _cm->has_aborted();
1676   }
1677 
1678   virtual bool should_return_fine_grain() {
1679     _cm->do_yield_check();
1680     return _cm->has_aborted();
1681   }
1682 };
1683 
1684 void G1ConcurrentMark::preclean() {
1685   assert(G1UseReferencePrecleaning, "Precleaning must be enabled.");
1686 
1687   SuspendibleThreadSetJoiner joiner;
1688 
1689   G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */);
1690   G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */);
1691 
1692   set_concurrency_and_phase(1, true);
1693 
1694   G1PrecleanYieldClosure yield_cl(this);
1695 
1696   ReferenceProcessor* rp = _g1h->ref_processor_cm();
1697   // Precleaning is single threaded. Temporarily disable MT discovery.
1698   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
1699   rp->preclean_discovered_references(rp->is_alive_non_header(),
1700                                      &keep_alive,
1701                                      &drain_mark_stack,
1702                                      &yield_cl,
1703                                      _gc_timer_cm);
1704 }
1705 
1706 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
1707 // the prev bitmap determining liveness.
1708 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
1709   G1CollectedHeap* _g1h;
1710 public:
1711   G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
1712 
1713   bool do_object_b(oop obj) {
1714     HeapWord* addr = (HeapWord*)obj;
1715     return addr != NULL &&
1716            (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj));
1717   }
1718 };
1719 
1720 void G1ConcurrentMark::report_object_count(bool mark_completed) {
1721   // Depending on the completion of the marking liveness needs to be determined
1722   // using either the next or prev bitmap.
1723   if (mark_completed) {
1724     G1ObjectCountIsAliveClosure is_alive(_g1h);
1725     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1726   } else {
1727     G1CMIsAliveClosure is_alive(_g1h);
1728     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1729   }
1730 }
1731 
1732 
1733 void G1ConcurrentMark::swap_mark_bitmaps() {
1734   G1CMBitMap* temp = _prev_mark_bitmap;
1735   _prev_mark_bitmap = _next_mark_bitmap;
1736   _next_mark_bitmap = temp;
1737   _g1h->collector_state()->set_clearing_next_bitmap(true);
1738 }
1739 
1740 // Closure for marking entries in SATB buffers.
1741 class G1CMSATBBufferClosure : public SATBBufferClosure {
1742 private:
1743   G1CMTask* _task;
1744   G1CollectedHeap* _g1h;
1745 
1746   // This is very similar to G1CMTask::deal_with_reference, but with
1747   // more relaxed requirements for the argument, so this must be more
1748   // circumspect about treating the argument as an object.
1749   void do_entry(void* entry) const {
1750     _task->increment_refs_reached();
1751     oop const obj = static_cast<oop>(entry);
1752     _task->make_reference_grey(obj);
1753   }
1754 
1755 public:
1756   G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1757     : _task(task), _g1h(g1h) { }
1758 
1759   virtual void do_buffer(void** buffer, size_t size) {
1760     for (size_t i = 0; i < size; ++i) {
1761       do_entry(buffer[i]);
1762     }
1763   }
1764 };
1765 
1766 class G1RemarkThreadsClosure : public ThreadClosure {
1767   G1CMSATBBufferClosure _cm_satb_cl;
1768   G1CMOopClosure _cm_cl;
1769   MarkingCodeBlobClosure _code_cl;
1770   int _thread_parity;
1771 
1772  public:
1773   G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1774     _cm_satb_cl(task, g1h),
1775     _cm_cl(g1h, task),
1776     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1777     _thread_parity(Threads::thread_claim_parity()) {}
1778 
1779   void do_thread(Thread* thread) {
1780     if (thread->is_Java_thread()) {
1781       if (thread->claim_oops_do(true, _thread_parity)) {
1782         JavaThread* jt = (JavaThread*)thread;
1783 
1784         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1785         // however the liveness of oops reachable from nmethods have very complex lifecycles:
1786         // * Alive if on the stack of an executing method
1787         // * Weakly reachable otherwise
1788         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1789         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1790         jt->nmethods_do(&_code_cl);
1791 
1792         G1ThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(&_cm_satb_cl);
1793       }
1794     } else if (thread->is_VM_thread()) {
1795       if (thread->claim_oops_do(true, _thread_parity)) {
1796         G1BarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
1797       }
1798     }
1799   }
1800 };
1801 
1802 class G1CMRemarkTask : public AbstractGangTask {
1803   G1ConcurrentMark* _cm;
1804 public:
1805   void work(uint worker_id) {
1806     G1CMTask* task = _cm->task(worker_id);
1807     task->record_start_time();
1808     {
1809       ResourceMark rm;
1810       HandleMark hm;
1811 
1812       G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1813       Threads::threads_do(&threads_f);
1814     }
1815 
1816     do {
1817       task->do_marking_step(1000000000.0 /* something very large */,
1818                             true         /* do_termination       */,
1819                             false        /* is_serial            */);
1820     } while (task->has_aborted() && !_cm->has_overflown());
1821     // If we overflow, then we do not want to restart. We instead
1822     // want to abort remark and do concurrent marking again.
1823     task->record_end_time();
1824   }
1825 
1826   G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1827     AbstractGangTask("Par Remark"), _cm(cm) {
1828     _cm->terminator()->reset_for_reuse(active_workers);
1829   }
1830 };
1831 
1832 void G1ConcurrentMark::finalize_marking() {
1833   ResourceMark rm;
1834   HandleMark   hm;
1835 
1836   _g1h->ensure_parsability(false);
1837 
1838   // this is remark, so we'll use up all active threads
1839   uint active_workers = _g1h->workers()->active_workers();
1840   set_concurrency_and_phase(active_workers, false /* concurrent */);
1841   // Leave _parallel_marking_threads at it's
1842   // value originally calculated in the G1ConcurrentMark
1843   // constructor and pass values of the active workers
1844   // through the gang in the task.
1845 
1846   {
1847     StrongRootsScope srs(active_workers);
1848 
1849     G1CMRemarkTask remarkTask(this, active_workers);
1850     // We will start all available threads, even if we decide that the
1851     // active_workers will be fewer. The extra ones will just bail out
1852     // immediately.
1853     _g1h->workers()->run_task(&remarkTask);
1854   }
1855 
1856   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1857   guarantee(has_overflown() ||
1858             satb_mq_set.completed_buffers_num() == 0,
1859             "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1860             BOOL_TO_STR(has_overflown()),
1861             satb_mq_set.completed_buffers_num());
1862 
1863   print_stats();
1864 }
1865 
1866 void G1ConcurrentMark::flush_all_task_caches() {
1867   size_t hits = 0;
1868   size_t misses = 0;
1869   for (uint i = 0; i < _max_num_tasks; i++) {
1870     Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
1871     hits += stats.first;
1872     misses += stats.second;
1873   }
1874   size_t sum = hits + misses;
1875   log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
1876                        hits, misses, percent_of(hits, sum));
1877 }
1878 
1879 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
1880   _prev_mark_bitmap->clear_range(mr);
1881 }
1882 
1883 HeapRegion*
1884 G1ConcurrentMark::claim_region(uint worker_id) {
1885   // "checkpoint" the finger
1886   HeapWord* finger = _finger;
1887 
1888   while (finger < _heap.end()) {
1889     assert(_g1h->is_in_g1_reserved(finger), "invariant");
1890 
1891     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1892     // Make sure that the reads below do not float before loading curr_region.
1893     OrderAccess::loadload();
1894     // Above heap_region_containing may return NULL as we always scan claim
1895     // until the end of the heap. In this case, just jump to the next region.
1896     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1897 
1898     // Is the gap between reading the finger and doing the CAS too long?
1899     HeapWord* res = Atomic::cmpxchg(end, &_finger, finger);
1900     if (res == finger && curr_region != NULL) {
1901       // we succeeded
1902       HeapWord*   bottom        = curr_region->bottom();
1903       HeapWord*   limit         = curr_region->next_top_at_mark_start();
1904 
1905       // notice that _finger == end cannot be guaranteed here since,
1906       // someone else might have moved the finger even further
1907       assert(_finger >= end, "the finger should have moved forward");
1908 
1909       if (limit > bottom) {
1910         return curr_region;
1911       } else {
1912         assert(limit == bottom,
1913                "the region limit should be at bottom");
1914         // we return NULL and the caller should try calling
1915         // claim_region() again.
1916         return NULL;
1917       }
1918     } else {
1919       assert(_finger > finger, "the finger should have moved forward");
1920       // read it again
1921       finger = _finger;
1922     }
1923   }
1924 
1925   return NULL;
1926 }
1927 
1928 #ifndef PRODUCT
1929 class VerifyNoCSetOops {
1930   G1CollectedHeap* _g1h;
1931   const char* _phase;
1932   int _info;
1933 
1934 public:
1935   VerifyNoCSetOops(const char* phase, int info = -1) :
1936     _g1h(G1CollectedHeap::heap()),
1937     _phase(phase),
1938     _info(info)
1939   { }
1940 
1941   void operator()(G1TaskQueueEntry task_entry) const {
1942     if (task_entry.is_array_slice()) {
1943       guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1944       return;
1945     }
1946     guarantee(oopDesc::is_oop(task_entry.obj()),
1947               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1948               p2i(task_entry.obj()), _phase, _info);
1949     guarantee(!_g1h->is_in_cset(task_entry.obj()),
1950               "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
1951               p2i(task_entry.obj()), _phase, _info);
1952   }
1953 };
1954 
1955 void G1ConcurrentMark::verify_no_cset_oops() {
1956   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1957   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1958     return;
1959   }
1960 
1961   // Verify entries on the global mark stack
1962   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1963 
1964   // Verify entries on the task queues
1965   for (uint i = 0; i < _max_num_tasks; ++i) {
1966     G1CMTaskQueue* queue = _task_queues->queue(i);
1967     queue->iterate(VerifyNoCSetOops("Queue", i));
1968   }
1969 
1970   // Verify the global finger
1971   HeapWord* global_finger = finger();
1972   if (global_finger != NULL && global_finger < _heap.end()) {
1973     // Since we always iterate over all regions, we might get a NULL HeapRegion
1974     // here.
1975     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
1976     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
1977               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
1978               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
1979   }
1980 
1981   // Verify the task fingers
1982   assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1983   for (uint i = 0; i < _num_concurrent_workers; ++i) {
1984     G1CMTask* task = _tasks[i];
1985     HeapWord* task_finger = task->finger();
1986     if (task_finger != NULL && task_finger < _heap.end()) {
1987       // See above note on the global finger verification.
1988       HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
1989       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
1990                 !task_hr->in_collection_set(),
1991                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
1992                 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
1993     }
1994   }
1995 }
1996 #endif // PRODUCT
1997 
1998 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
1999   _g1h->g1_rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
2000 }
2001 
2002 void G1ConcurrentMark::print_stats() {
2003   if (!log_is_enabled(Debug, gc, stats)) {
2004     return;
2005   }
2006   log_debug(gc, stats)("---------------------------------------------------------------------");
2007   for (size_t i = 0; i < _num_active_tasks; ++i) {
2008     _tasks[i]->print_stats();
2009     log_debug(gc, stats)("---------------------------------------------------------------------");
2010   }
2011 }
2012 
2013 void G1ConcurrentMark::concurrent_cycle_abort() {
2014   if (!cm_thread()->during_cycle() || _has_aborted) {
2015     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2016     return;
2017   }
2018 
2019   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2020   // concurrent bitmap clearing.
2021   {
2022     GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
2023     clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
2024   }
2025   // Note we cannot clear the previous marking bitmap here
2026   // since VerifyDuringGC verifies the objects marked during
2027   // a full GC against the previous bitmap.
2028 
2029   // Empty mark stack
2030   reset_marking_for_restart();
2031   for (uint i = 0; i < _max_num_tasks; ++i) {
2032     _tasks[i]->clear_region_fields();
2033   }
2034   _first_overflow_barrier_sync.abort();
2035   _second_overflow_barrier_sync.abort();
2036   _has_aborted = true;
2037 
2038   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2039   satb_mq_set.abandon_partial_marking();
2040   // This can be called either during or outside marking, we'll read
2041   // the expected_active value from the SATB queue set.
2042   satb_mq_set.set_active_all_threads(
2043                                  false, /* new active value */
2044                                  satb_mq_set.is_active() /* expected_active */);
2045 }
2046 
2047 static void print_ms_time_info(const char* prefix, const char* name,
2048                                NumberSeq& ns) {
2049   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2050                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2051   if (ns.num() > 0) {
2052     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2053                            prefix, ns.sd(), ns.maximum());
2054   }
2055 }
2056 
2057 void G1ConcurrentMark::print_summary_info() {
2058   Log(gc, marking) log;
2059   if (!log.is_trace()) {
2060     return;
2061   }
2062 
2063   log.trace(" Concurrent marking:");
2064   print_ms_time_info("  ", "init marks", _init_times);
2065   print_ms_time_info("  ", "remarks", _remark_times);
2066   {
2067     print_ms_time_info("     ", "final marks", _remark_mark_times);
2068     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2069 
2070   }
2071   print_ms_time_info("  ", "cleanups", _cleanup_times);
2072   log.trace("    Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2073             _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2074   log.trace("  Total stop_world time = %8.2f s.",
2075             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2076   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
2077             cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
2078 }
2079 
2080 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2081   _concurrent_workers->print_worker_threads_on(st);
2082 }
2083 
2084 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2085   _concurrent_workers->threads_do(tc);
2086 }
2087 
2088 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2089   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2090                p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
2091   _prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
2092   _next_mark_bitmap->print_on_error(st, " Next Bits: ");
2093 }
2094 
2095 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2096   ReferenceProcessor* result = g1h->ref_processor_cm();
2097   assert(result != NULL, "CM reference processor should not be NULL");
2098   return result;
2099 }
2100 
2101 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2102                                G1CMTask* task)
2103   : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)),
2104     _g1h(g1h), _task(task)
2105 { }
2106 
2107 void G1CMTask::setup_for_region(HeapRegion* hr) {
2108   assert(hr != NULL,
2109         "claim_region() should have filtered out NULL regions");
2110   _curr_region  = hr;
2111   _finger       = hr->bottom();
2112   update_region_limit();
2113 }
2114 
2115 void G1CMTask::update_region_limit() {
2116   HeapRegion* hr            = _curr_region;
2117   HeapWord* bottom          = hr->bottom();
2118   HeapWord* limit           = hr->next_top_at_mark_start();
2119 
2120   if (limit == bottom) {
2121     // The region was collected underneath our feet.
2122     // We set the finger to bottom to ensure that the bitmap
2123     // iteration that will follow this will not do anything.
2124     // (this is not a condition that holds when we set the region up,
2125     // as the region is not supposed to be empty in the first place)
2126     _finger = bottom;
2127   } else if (limit >= _region_limit) {
2128     assert(limit >= _finger, "peace of mind");
2129   } else {
2130     assert(limit < _region_limit, "only way to get here");
2131     // This can happen under some pretty unusual circumstances.  An
2132     // evacuation pause empties the region underneath our feet (NTAMS
2133     // at bottom). We then do some allocation in the region (NTAMS
2134     // stays at bottom), followed by the region being used as a GC
2135     // alloc region (NTAMS will move to top() and the objects
2136     // originally below it will be grayed). All objects now marked in
2137     // the region are explicitly grayed, if below the global finger,
2138     // and we do not need in fact to scan anything else. So, we simply
2139     // set _finger to be limit to ensure that the bitmap iteration
2140     // doesn't do anything.
2141     _finger = limit;
2142   }
2143 
2144   _region_limit = limit;
2145 }
2146 
2147 void G1CMTask::giveup_current_region() {
2148   assert(_curr_region != NULL, "invariant");
2149   clear_region_fields();
2150 }
2151 
2152 void G1CMTask::clear_region_fields() {
2153   // Values for these three fields that indicate that we're not
2154   // holding on to a region.
2155   _curr_region   = NULL;
2156   _finger        = NULL;
2157   _region_limit  = NULL;
2158 }
2159 
2160 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2161   if (cm_oop_closure == NULL) {
2162     assert(_cm_oop_closure != NULL, "invariant");
2163   } else {
2164     assert(_cm_oop_closure == NULL, "invariant");
2165   }
2166   _cm_oop_closure = cm_oop_closure;
2167 }
2168 
2169 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
2170   guarantee(next_mark_bitmap != NULL, "invariant");
2171   _next_mark_bitmap              = next_mark_bitmap;
2172   clear_region_fields();
2173 
2174   _calls                         = 0;
2175   _elapsed_time_ms               = 0.0;
2176   _termination_time_ms           = 0.0;
2177   _termination_start_time_ms     = 0.0;
2178 
2179   _mark_stats_cache.reset();
2180 }
2181 
2182 bool G1CMTask::should_exit_termination() {
2183   regular_clock_call();
2184   // This is called when we are in the termination protocol. We should
2185   // quit if, for some reason, this task wants to abort or the global
2186   // stack is not empty (this means that we can get work from it).
2187   return !_cm->mark_stack_empty() || has_aborted();
2188 }
2189 
2190 void G1CMTask::reached_limit() {
2191   assert(_words_scanned >= _words_scanned_limit ||
2192          _refs_reached >= _refs_reached_limit ,
2193          "shouldn't have been called otherwise");
2194   regular_clock_call();
2195 }
2196 
2197 void G1CMTask::regular_clock_call() {
2198   if (has_aborted()) {
2199     return;
2200   }
2201 
2202   // First, we need to recalculate the words scanned and refs reached
2203   // limits for the next clock call.
2204   recalculate_limits();
2205 
2206   // During the regular clock call we do the following
2207 
2208   // (1) If an overflow has been flagged, then we abort.
2209   if (_cm->has_overflown()) {
2210     set_has_aborted();
2211     return;
2212   }
2213 
2214   // If we are not concurrent (i.e. we're doing remark) we don't need
2215   // to check anything else. The other steps are only needed during
2216   // the concurrent marking phase.
2217   if (!_cm->concurrent()) {
2218     return;
2219   }
2220 
2221   // (2) If marking has been aborted for Full GC, then we also abort.
2222   if (_cm->has_aborted()) {
2223     set_has_aborted();
2224     return;
2225   }
2226 
2227   double curr_time_ms = os::elapsedVTime() * 1000.0;
2228 
2229   // (4) We check whether we should yield. If we have to, then we abort.
2230   if (SuspendibleThreadSet::should_yield()) {
2231     // We should yield. To do this we abort the task. The caller is
2232     // responsible for yielding.
2233     set_has_aborted();
2234     return;
2235   }
2236 
2237   // (5) We check whether we've reached our time quota. If we have,
2238   // then we abort.
2239   double elapsed_time_ms = curr_time_ms - _start_time_ms;
2240   if (elapsed_time_ms > _time_target_ms) {
2241     set_has_aborted();
2242     _has_timed_out = true;
2243     return;
2244   }
2245 
2246   // (6) Finally, we check whether there are enough completed STAB
2247   // buffers available for processing. If there are, we abort.
2248   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2249   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2250     // we do need to process SATB buffers, we'll abort and restart
2251     // the marking task to do so
2252     set_has_aborted();
2253     return;
2254   }
2255 }
2256 
2257 void G1CMTask::recalculate_limits() {
2258   _real_words_scanned_limit = _words_scanned + words_scanned_period;
2259   _words_scanned_limit      = _real_words_scanned_limit;
2260 
2261   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
2262   _refs_reached_limit       = _real_refs_reached_limit;
2263 }
2264 
2265 void G1CMTask::decrease_limits() {
2266   // This is called when we believe that we're going to do an infrequent
2267   // operation which will increase the per byte scanned cost (i.e. move
2268   // entries to/from the global stack). It basically tries to decrease the
2269   // scanning limit so that the clock is called earlier.
2270 
2271   _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
2272   _refs_reached_limit  = _real_refs_reached_limit - 3 * refs_reached_period / 4;
2273 }
2274 
2275 void G1CMTask::move_entries_to_global_stack() {
2276   // Local array where we'll store the entries that will be popped
2277   // from the local queue.
2278   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2279 
2280   size_t n = 0;
2281   G1TaskQueueEntry task_entry;
2282   while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) {
2283     buffer[n] = task_entry;
2284     ++n;
2285   }
2286   if (n < G1CMMarkStack::EntriesPerChunk) {
2287     buffer[n] = G1TaskQueueEntry();
2288   }
2289 
2290   if (n > 0) {
2291     if (!_cm->mark_stack_push(buffer)) {
2292       set_has_aborted();
2293     }
2294   }
2295 
2296   // This operation was quite expensive, so decrease the limits.
2297   decrease_limits();
2298 }
2299 
2300 bool G1CMTask::get_entries_from_global_stack() {
2301   // Local array where we'll store the entries that will be popped
2302   // from the global stack.
2303   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2304 
2305   if (!_cm->mark_stack_pop(buffer)) {
2306     return false;
2307   }
2308 
2309   // We did actually pop at least one entry.
2310   for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) {
2311     G1TaskQueueEntry task_entry = buffer[i];
2312     if (task_entry.is_null()) {
2313       break;
2314     }
2315     assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
2316     bool success = _task_queue->push(task_entry);
2317     // We only call this when the local queue is empty or under a
2318     // given target limit. So, we do not expect this push to fail.
2319     assert(success, "invariant");
2320   }
2321 
2322   // This operation was quite expensive, so decrease the limits
2323   decrease_limits();
2324   return true;
2325 }
2326 
2327 void G1CMTask::drain_local_queue(bool partially) {
2328   if (has_aborted()) {
2329     return;
2330   }
2331 
2332   // Decide what the target size is, depending whether we're going to
2333   // drain it partially (so that other tasks can steal if they run out
2334   // of things to do) or totally (at the very end).
2335   size_t target_size;
2336   if (partially) {
2337     target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize);
2338   } else {
2339     target_size = 0;
2340   }
2341 
2342   if (_task_queue->size() > target_size) {
2343     G1TaskQueueEntry entry;
2344     bool ret = _task_queue->pop_local(entry);
2345     while (ret) {
2346       scan_task_entry(entry);
2347       if (_task_queue->size() <= target_size || has_aborted()) {
2348         ret = false;
2349       } else {
2350         ret = _task_queue->pop_local(entry);
2351       }
2352     }
2353   }
2354 }
2355 
2356 void G1CMTask::drain_global_stack(bool partially) {
2357   if (has_aborted()) {
2358     return;
2359   }
2360 
2361   // We have a policy to drain the local queue before we attempt to
2362   // drain the global stack.
2363   assert(partially || _task_queue->size() == 0, "invariant");
2364 
2365   // Decide what the target size is, depending whether we're going to
2366   // drain it partially (so that other tasks can steal if they run out
2367   // of things to do) or totally (at the very end).
2368   // Notice that when draining the global mark stack partially, due to the racyness
2369   // of the mark stack size update we might in fact drop below the target. But,
2370   // this is not a problem.
2371   // In case of total draining, we simply process until the global mark stack is
2372   // totally empty, disregarding the size counter.
2373   if (partially) {
2374     size_t const target_size = _cm->partial_mark_stack_size_target();
2375     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2376       if (get_entries_from_global_stack()) {
2377         drain_local_queue(partially);
2378       }
2379     }
2380   } else {
2381     while (!has_aborted() && get_entries_from_global_stack()) {
2382       drain_local_queue(partially);
2383     }
2384   }
2385 }
2386 
2387 // SATB Queue has several assumptions on whether to call the par or
2388 // non-par versions of the methods. this is why some of the code is
2389 // replicated. We should really get rid of the single-threaded version
2390 // of the code to simplify things.
2391 void G1CMTask::drain_satb_buffers() {
2392   if (has_aborted()) {
2393     return;
2394   }
2395 
2396   // We set this so that the regular clock knows that we're in the
2397   // middle of draining buffers and doesn't set the abort flag when it
2398   // notices that SATB buffers are available for draining. It'd be
2399   // very counter productive if it did that. :-)
2400   _draining_satb_buffers = true;
2401 
2402   G1CMSATBBufferClosure satb_cl(this, _g1h);
2403   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2404 
2405   // This keeps claiming and applying the closure to completed buffers
2406   // until we run out of buffers or we need to abort.
2407   while (!has_aborted() &&
2408          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2409     regular_clock_call();
2410   }
2411 
2412   _draining_satb_buffers = false;
2413 
2414   assert(has_aborted() ||
2415          _cm->concurrent() ||
2416          satb_mq_set.completed_buffers_num() == 0, "invariant");
2417 
2418   // again, this was a potentially expensive operation, decrease the
2419   // limits to get the regular clock call early
2420   decrease_limits();
2421 }
2422 
2423 void G1CMTask::clear_mark_stats_cache(uint region_idx) {
2424   _mark_stats_cache.reset(region_idx);
2425 }
2426 
2427 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
2428   return _mark_stats_cache.evict_all();
2429 }
2430 
2431 void G1CMTask::print_stats() {
2432   log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls);
2433   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2434                        _elapsed_time_ms, _termination_time_ms);
2435   log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms",
2436                        _step_times_ms.num(),
2437                        _step_times_ms.avg(),
2438                        _step_times_ms.sd(),
2439                        _step_times_ms.maximum(),
2440                        _step_times_ms.sum());
2441   size_t const hits = _mark_stats_cache.hits();
2442   size_t const misses = _mark_stats_cache.misses();
2443   log_debug(gc, stats)("  Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f",
2444                        hits, misses, percent_of(hits, hits + misses));
2445 }
2446 
2447 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) {
2448   return _task_queues->steal(worker_id, task_entry);
2449 }
2450 
2451 /*****************************************************************************
2452 
2453     The do_marking_step(time_target_ms, ...) method is the building
2454     block of the parallel marking framework. It can be called in parallel
2455     with other invocations of do_marking_step() on different tasks
2456     (but only one per task, obviously) and concurrently with the
2457     mutator threads, or during remark, hence it eliminates the need
2458     for two versions of the code. When called during remark, it will
2459     pick up from where the task left off during the concurrent marking
2460     phase. Interestingly, tasks are also claimable during evacuation
2461     pauses too, since do_marking_step() ensures that it aborts before
2462     it needs to yield.
2463 
2464     The data structures that it uses to do marking work are the
2465     following:
2466 
2467       (1) Marking Bitmap. If there are gray objects that appear only
2468       on the bitmap (this happens either when dealing with an overflow
2469       or when the initial marking phase has simply marked the roots
2470       and didn't push them on the stack), then tasks claim heap
2471       regions whose bitmap they then scan to find gray objects. A
2472       global finger indicates where the end of the last claimed region
2473       is. A local finger indicates how far into the region a task has
2474       scanned. The two fingers are used to determine how to gray an
2475       object (i.e. whether simply marking it is OK, as it will be
2476       visited by a task in the future, or whether it needs to be also
2477       pushed on a stack).
2478 
2479       (2) Local Queue. The local queue of the task which is accessed
2480       reasonably efficiently by the task. Other tasks can steal from
2481       it when they run out of work. Throughout the marking phase, a
2482       task attempts to keep its local queue short but not totally
2483       empty, so that entries are available for stealing by other
2484       tasks. Only when there is no more work, a task will totally
2485       drain its local queue.
2486 
2487       (3) Global Mark Stack. This handles local queue overflow. During
2488       marking only sets of entries are moved between it and the local
2489       queues, as access to it requires a mutex and more fine-grain
2490       interaction with it which might cause contention. If it
2491       overflows, then the marking phase should restart and iterate
2492       over the bitmap to identify gray objects. Throughout the marking
2493       phase, tasks attempt to keep the global mark stack at a small
2494       length but not totally empty, so that entries are available for
2495       popping by other tasks. Only when there is no more work, tasks
2496       will totally drain the global mark stack.
2497 
2498       (4) SATB Buffer Queue. This is where completed SATB buffers are
2499       made available. Buffers are regularly removed from this queue
2500       and scanned for roots, so that the queue doesn't get too
2501       long. During remark, all completed buffers are processed, as
2502       well as the filled in parts of any uncompleted buffers.
2503 
2504     The do_marking_step() method tries to abort when the time target
2505     has been reached. There are a few other cases when the
2506     do_marking_step() method also aborts:
2507 
2508       (1) When the marking phase has been aborted (after a Full GC).
2509 
2510       (2) When a global overflow (on the global stack) has been
2511       triggered. Before the task aborts, it will actually sync up with
2512       the other tasks to ensure that all the marking data structures
2513       (local queues, stacks, fingers etc.)  are re-initialized so that
2514       when do_marking_step() completes, the marking phase can
2515       immediately restart.
2516 
2517       (3) When enough completed SATB buffers are available. The
2518       do_marking_step() method only tries to drain SATB buffers right
2519       at the beginning. So, if enough buffers are available, the
2520       marking step aborts and the SATB buffers are processed at
2521       the beginning of the next invocation.
2522 
2523       (4) To yield. when we have to yield then we abort and yield
2524       right at the end of do_marking_step(). This saves us from a lot
2525       of hassle as, by yielding we might allow a Full GC. If this
2526       happens then objects will be compacted underneath our feet, the
2527       heap might shrink, etc. We save checking for this by just
2528       aborting and doing the yield right at the end.
2529 
2530     From the above it follows that the do_marking_step() method should
2531     be called in a loop (or, otherwise, regularly) until it completes.
2532 
2533     If a marking step completes without its has_aborted() flag being
2534     true, it means it has completed the current marking phase (and
2535     also all other marking tasks have done so and have all synced up).
2536 
2537     A method called regular_clock_call() is invoked "regularly" (in
2538     sub ms intervals) throughout marking. It is this clock method that
2539     checks all the abort conditions which were mentioned above and
2540     decides when the task should abort. A work-based scheme is used to
2541     trigger this clock method: when the number of object words the
2542     marking phase has scanned or the number of references the marking
2543     phase has visited reach a given limit. Additional invocations to
2544     the method clock have been planted in a few other strategic places
2545     too. The initial reason for the clock method was to avoid calling
2546     vtime too regularly, as it is quite expensive. So, once it was in
2547     place, it was natural to piggy-back all the other conditions on it
2548     too and not constantly check them throughout the code.
2549 
2550     If do_termination is true then do_marking_step will enter its
2551     termination protocol.
2552 
2553     The value of is_serial must be true when do_marking_step is being
2554     called serially (i.e. by the VMThread) and do_marking_step should
2555     skip any synchronization in the termination and overflow code.
2556     Examples include the serial remark code and the serial reference
2557     processing closures.
2558 
2559     The value of is_serial must be false when do_marking_step is
2560     being called by any of the worker threads in a work gang.
2561     Examples include the concurrent marking code (CMMarkingTask),
2562     the MT remark code, and the MT reference processing closures.
2563 
2564  *****************************************************************************/
2565 
2566 void G1CMTask::do_marking_step(double time_target_ms,
2567                                bool do_termination,
2568                                bool is_serial) {
2569   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2570 
2571   _start_time_ms = os::elapsedVTime() * 1000.0;
2572 
2573   // If do_stealing is true then do_marking_step will attempt to
2574   // steal work from the other G1CMTasks. It only makes sense to
2575   // enable stealing when the termination protocol is enabled
2576   // and do_marking_step() is not being called serially.
2577   bool do_stealing = do_termination && !is_serial;
2578 
2579   double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
2580   _time_target_ms = time_target_ms - diff_prediction_ms;
2581 
2582   // set up the variables that are used in the work-based scheme to
2583   // call the regular clock method
2584   _words_scanned = 0;
2585   _refs_reached  = 0;
2586   recalculate_limits();
2587 
2588   // clear all flags
2589   clear_has_aborted();
2590   _has_timed_out = false;
2591   _draining_satb_buffers = false;
2592 
2593   ++_calls;
2594 
2595   // Set up the bitmap and oop closures. Anything that uses them is
2596   // eventually called from this method, so it is OK to allocate these
2597   // statically.
2598   G1CMBitMapClosure bitmap_closure(this, _cm);
2599   G1CMOopClosure cm_oop_closure(_g1h, this);
2600   set_cm_oop_closure(&cm_oop_closure);
2601 
2602   if (_cm->has_overflown()) {
2603     // This can happen if the mark stack overflows during a GC pause
2604     // and this task, after a yield point, restarts. We have to abort
2605     // as we need to get into the overflow protocol which happens
2606     // right at the end of this task.
2607     set_has_aborted();
2608   }
2609 
2610   // First drain any available SATB buffers. After this, we will not
2611   // look at SATB buffers before the next invocation of this method.
2612   // If enough completed SATB buffers are queued up, the regular clock
2613   // will abort this task so that it restarts.
2614   drain_satb_buffers();
2615   // ...then partially drain the local queue and the global stack
2616   drain_local_queue(true);
2617   drain_global_stack(true);
2618 
2619   do {
2620     if (!has_aborted() && _curr_region != NULL) {
2621       // This means that we're already holding on to a region.
2622       assert(_finger != NULL, "if region is not NULL, then the finger "
2623              "should not be NULL either");
2624 
2625       // We might have restarted this task after an evacuation pause
2626       // which might have evacuated the region we're holding on to
2627       // underneath our feet. Let's read its limit again to make sure
2628       // that we do not iterate over a region of the heap that
2629       // contains garbage (update_region_limit() will also move
2630       // _finger to the start of the region if it is found empty).
2631       update_region_limit();
2632       // We will start from _finger not from the start of the region,
2633       // as we might be restarting this task after aborting half-way
2634       // through scanning this region. In this case, _finger points to
2635       // the address where we last found a marked object. If this is a
2636       // fresh region, _finger points to start().
2637       MemRegion mr = MemRegion(_finger, _region_limit);
2638 
2639       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2640              "humongous regions should go around loop once only");
2641 
2642       // Some special cases:
2643       // If the memory region is empty, we can just give up the region.
2644       // If the current region is humongous then we only need to check
2645       // the bitmap for the bit associated with the start of the object,
2646       // scan the object if it's live, and give up the region.
2647       // Otherwise, let's iterate over the bitmap of the part of the region
2648       // that is left.
2649       // If the iteration is successful, give up the region.
2650       if (mr.is_empty()) {
2651         giveup_current_region();
2652         regular_clock_call();
2653       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2654         if (_next_mark_bitmap->is_marked(mr.start())) {
2655           // The object is marked - apply the closure
2656           bitmap_closure.do_addr(mr.start());
2657         }
2658         // Even if this task aborted while scanning the humongous object
2659         // we can (and should) give up the current region.
2660         giveup_current_region();
2661         regular_clock_call();
2662       } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
2663         giveup_current_region();
2664         regular_clock_call();
2665       } else {
2666         assert(has_aborted(), "currently the only way to do so");
2667         // The only way to abort the bitmap iteration is to return
2668         // false from the do_bit() method. However, inside the
2669         // do_bit() method we move the _finger to point to the
2670         // object currently being looked at. So, if we bail out, we
2671         // have definitely set _finger to something non-null.
2672         assert(_finger != NULL, "invariant");
2673 
2674         // Region iteration was actually aborted. So now _finger
2675         // points to the address of the object we last scanned. If we
2676         // leave it there, when we restart this task, we will rescan
2677         // the object. It is easy to avoid this. We move the finger by
2678         // enough to point to the next possible object header.
2679         assert(_finger < _region_limit, "invariant");
2680         HeapWord* const new_finger = _finger + ((oop)_finger)->size();
2681         // Check if bitmap iteration was aborted while scanning the last object
2682         if (new_finger >= _region_limit) {
2683           giveup_current_region();
2684         } else {
2685           move_finger_to(new_finger);
2686         }
2687       }
2688     }
2689     // At this point we have either completed iterating over the
2690     // region we were holding on to, or we have aborted.
2691 
2692     // We then partially drain the local queue and the global stack.
2693     // (Do we really need this?)
2694     drain_local_queue(true);
2695     drain_global_stack(true);
2696 
2697     // Read the note on the claim_region() method on why it might
2698     // return NULL with potentially more regions available for
2699     // claiming and why we have to check out_of_regions() to determine
2700     // whether we're done or not.
2701     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2702       // We are going to try to claim a new region. We should have
2703       // given up on the previous one.
2704       // Separated the asserts so that we know which one fires.
2705       assert(_curr_region  == NULL, "invariant");
2706       assert(_finger       == NULL, "invariant");
2707       assert(_region_limit == NULL, "invariant");
2708       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2709       if (claimed_region != NULL) {
2710         // Yes, we managed to claim one
2711         setup_for_region(claimed_region);
2712         assert(_curr_region == claimed_region, "invariant");
2713       }
2714       // It is important to call the regular clock here. It might take
2715       // a while to claim a region if, for example, we hit a large
2716       // block of empty regions. So we need to call the regular clock
2717       // method once round the loop to make sure it's called
2718       // frequently enough.
2719       regular_clock_call();
2720     }
2721 
2722     if (!has_aborted() && _curr_region == NULL) {
2723       assert(_cm->out_of_regions(),
2724              "at this point we should be out of regions");
2725     }
2726   } while ( _curr_region != NULL && !has_aborted());
2727 
2728   if (!has_aborted()) {
2729     // We cannot check whether the global stack is empty, since other
2730     // tasks might be pushing objects to it concurrently.
2731     assert(_cm->out_of_regions(),
2732            "at this point we should be out of regions");
2733     // Try to reduce the number of available SATB buffers so that
2734     // remark has less work to do.
2735     drain_satb_buffers();
2736   }
2737 
2738   // Since we've done everything else, we can now totally drain the
2739   // local queue and global stack.
2740   drain_local_queue(false);
2741   drain_global_stack(false);
2742 
2743   // Attempt at work stealing from other task's queues.
2744   if (do_stealing && !has_aborted()) {
2745     // We have not aborted. This means that we have finished all that
2746     // we could. Let's try to do some stealing...
2747 
2748     // We cannot check whether the global stack is empty, since other
2749     // tasks might be pushing objects to it concurrently.
2750     assert(_cm->out_of_regions() && _task_queue->size() == 0,
2751            "only way to reach here");
2752     while (!has_aborted()) {
2753       G1TaskQueueEntry entry;
2754       if (_cm->try_stealing(_worker_id, entry)) {
2755         scan_task_entry(entry);
2756 
2757         // And since we're towards the end, let's totally drain the
2758         // local queue and global stack.
2759         drain_local_queue(false);
2760         drain_global_stack(false);
2761       } else {
2762         break;
2763       }
2764     }
2765   }
2766 
2767   // We still haven't aborted. Now, let's try to get into the
2768   // termination protocol.
2769   if (do_termination && !has_aborted()) {
2770     // We cannot check whether the global stack is empty, since other
2771     // tasks might be concurrently pushing objects on it.
2772     // Separated the asserts so that we know which one fires.
2773     assert(_cm->out_of_regions(), "only way to reach here");
2774     assert(_task_queue->size() == 0, "only way to reach here");
2775     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2776 
2777     // The G1CMTask class also extends the TerminatorTerminator class,
2778     // hence its should_exit_termination() method will also decide
2779     // whether to exit the termination protocol or not.
2780     bool finished = (is_serial ||
2781                      _cm->terminator()->offer_termination(this));
2782     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2783     _termination_time_ms +=
2784       termination_end_time_ms - _termination_start_time_ms;
2785 
2786     if (finished) {
2787       // We're all done.
2788 
2789       // We can now guarantee that the global stack is empty, since
2790       // all other tasks have finished. We separated the guarantees so
2791       // that, if a condition is false, we can immediately find out
2792       // which one.
2793       guarantee(_cm->out_of_regions(), "only way to reach here");
2794       guarantee(_cm->mark_stack_empty(), "only way to reach here");
2795       guarantee(_task_queue->size() == 0, "only way to reach here");
2796       guarantee(!_cm->has_overflown(), "only way to reach here");
2797     } else {
2798       // Apparently there's more work to do. Let's abort this task. It
2799       // will restart it and we can hopefully find more things to do.
2800       set_has_aborted();
2801     }
2802   }
2803 
2804   // Mainly for debugging purposes to make sure that a pointer to the
2805   // closure which was statically allocated in this frame doesn't
2806   // escape it by accident.
2807   set_cm_oop_closure(NULL);
2808   double end_time_ms = os::elapsedVTime() * 1000.0;
2809   double elapsed_time_ms = end_time_ms - _start_time_ms;
2810   // Update the step history.
2811   _step_times_ms.add(elapsed_time_ms);
2812 
2813   if (has_aborted()) {
2814     // The task was aborted for some reason.
2815     if (_has_timed_out) {
2816       double diff_ms = elapsed_time_ms - _time_target_ms;
2817       // Keep statistics of how well we did with respect to hitting
2818       // our target only if we actually timed out (if we aborted for
2819       // other reasons, then the results might get skewed).
2820       _marking_step_diffs_ms.add(diff_ms);
2821     }
2822 
2823     if (_cm->has_overflown()) {
2824       // This is the interesting one. We aborted because a global
2825       // overflow was raised. This means we have to restart the
2826       // marking phase and start iterating over regions. However, in
2827       // order to do this we have to make sure that all tasks stop
2828       // what they are doing and re-initialize in a safe manner. We
2829       // will achieve this with the use of two barrier sync points.
2830 
2831       if (!is_serial) {
2832         // We only need to enter the sync barrier if being called
2833         // from a parallel context
2834         _cm->enter_first_sync_barrier(_worker_id);
2835 
2836         // When we exit this sync barrier we know that all tasks have
2837         // stopped doing marking work. So, it's now safe to
2838         // re-initialize our data structures.
2839       }
2840 
2841       clear_region_fields();
2842       flush_mark_stats_cache();
2843 
2844       if (!is_serial) {
2845         // If we're executing the concurrent phase of marking, reset the marking
2846         // state; otherwise the marking state is reset after reference processing,
2847         // during the remark pause.
2848         // If we reset here as a result of an overflow during the remark we will
2849         // see assertion failures from any subsequent set_concurrency_and_phase()
2850         // calls.
2851         if (_cm->concurrent() && _worker_id == 0) {
2852           // Worker 0 is responsible for clearing the global data structures because
2853           // of an overflow. During STW we should not clear the overflow flag (in
2854           // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit
2855           // method to abort the pause and restart concurrent marking.
2856           _cm->reset_marking_for_restart();
2857 
2858           log_info(gc, marking)("Concurrent Mark reset for overflow");
2859         }
2860 
2861         // ...and enter the second barrier.
2862         _cm->enter_second_sync_barrier(_worker_id);
2863       }
2864       // At this point, if we're during the concurrent phase of
2865       // marking, everything has been re-initialized and we're
2866       // ready to restart.
2867     }
2868   }
2869 }
2870 
2871 G1CMTask::G1CMTask(uint worker_id,
2872                    G1ConcurrentMark* cm,
2873                    G1CMTaskQueue* task_queue,
2874                    G1RegionMarkStats* mark_stats,
2875                    uint max_regions) :
2876   _objArray_processor(this),
2877   _worker_id(worker_id),
2878   _g1h(G1CollectedHeap::heap()),
2879   _cm(cm),
2880   _next_mark_bitmap(NULL),
2881   _task_queue(task_queue),
2882   _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize),
2883   _calls(0),
2884   _time_target_ms(0.0),
2885   _start_time_ms(0.0),
2886   _cm_oop_closure(NULL),
2887   _curr_region(NULL),
2888   _finger(NULL),
2889   _region_limit(NULL),
2890   _words_scanned(0),
2891   _words_scanned_limit(0),
2892   _real_words_scanned_limit(0),
2893   _refs_reached(0),
2894   _refs_reached_limit(0),
2895   _real_refs_reached_limit(0),
2896   _has_aborted(false),
2897   _has_timed_out(false),
2898   _draining_satb_buffers(false),
2899   _step_times_ms(),
2900   _elapsed_time_ms(0.0),
2901   _termination_time_ms(0.0),
2902   _termination_start_time_ms(0.0),
2903   _marking_step_diffs_ms()
2904 {
2905   guarantee(task_queue != NULL, "invariant");
2906 
2907   _marking_step_diffs_ms.add(0.5);
2908 }
2909 
2910 // These are formatting macros that are used below to ensure
2911 // consistent formatting. The *_H_* versions are used to format the
2912 // header for a particular value and they should be kept consistent
2913 // with the corresponding macro. Also note that most of the macros add
2914 // the necessary white space (as a prefix) which makes them a bit
2915 // easier to compose.
2916 
2917 // All the output lines are prefixed with this string to be able to
2918 // identify them easily in a large log file.
2919 #define G1PPRL_LINE_PREFIX            "###"
2920 
2921 #define G1PPRL_ADDR_BASE_FORMAT    " " PTR_FORMAT "-" PTR_FORMAT
2922 #ifdef _LP64
2923 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
2924 #else // _LP64
2925 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
2926 #endif // _LP64
2927 
2928 // For per-region info
2929 #define G1PPRL_TYPE_FORMAT            "   %-4s"
2930 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
2931 #define G1PPRL_STATE_FORMAT           "   %-5s"
2932 #define G1PPRL_STATE_H_FORMAT         "   %5s"
2933 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
2934 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
2935 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
2936 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
2937 
2938 // For summary info
2939 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
2940 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
2941 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
2942 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
2943 
2944 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) :
2945   _total_used_bytes(0), _total_capacity_bytes(0),
2946   _total_prev_live_bytes(0), _total_next_live_bytes(0),
2947   _total_remset_bytes(0), _total_strong_code_roots_bytes(0)
2948 {
2949   if (!log_is_enabled(Trace, gc, liveness)) {
2950     return;
2951   }
2952 
2953   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2954   MemRegion g1_reserved = g1h->g1_reserved();
2955   double now = os::elapsedTime();
2956 
2957   // Print the header of the output.
2958   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
2959   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
2960                           G1PPRL_SUM_ADDR_FORMAT("reserved")
2961                           G1PPRL_SUM_BYTE_FORMAT("region-size"),
2962                           p2i(g1_reserved.start()), p2i(g1_reserved.end()),
2963                           HeapRegion::GrainBytes);
2964   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
2965   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2966                           G1PPRL_TYPE_H_FORMAT
2967                           G1PPRL_ADDR_BASE_H_FORMAT
2968                           G1PPRL_BYTE_H_FORMAT
2969                           G1PPRL_BYTE_H_FORMAT
2970                           G1PPRL_BYTE_H_FORMAT
2971                           G1PPRL_DOUBLE_H_FORMAT
2972                           G1PPRL_BYTE_H_FORMAT
2973                           G1PPRL_STATE_H_FORMAT
2974                           G1PPRL_BYTE_H_FORMAT,
2975                           "type", "address-range",
2976                           "used", "prev-live", "next-live", "gc-eff",
2977                           "remset", "state", "code-roots");
2978   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2979                           G1PPRL_TYPE_H_FORMAT
2980                           G1PPRL_ADDR_BASE_H_FORMAT
2981                           G1PPRL_BYTE_H_FORMAT
2982                           G1PPRL_BYTE_H_FORMAT
2983                           G1PPRL_BYTE_H_FORMAT
2984                           G1PPRL_DOUBLE_H_FORMAT
2985                           G1PPRL_BYTE_H_FORMAT
2986                           G1PPRL_STATE_H_FORMAT
2987                           G1PPRL_BYTE_H_FORMAT,
2988                           "", "",
2989                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
2990                           "(bytes)", "", "(bytes)");
2991 }
2992 
2993 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
2994   if (!log_is_enabled(Trace, gc, liveness)) {
2995     return false;
2996   }
2997 
2998   const char* type       = r->get_type_str();
2999   HeapWord* bottom       = r->bottom();
3000   HeapWord* end          = r->end();
3001   size_t capacity_bytes  = r->capacity();
3002   size_t used_bytes      = r->used();
3003   size_t prev_live_bytes = r->live_bytes();
3004   size_t next_live_bytes = r->next_live_bytes();
3005   double gc_eff          = r->gc_efficiency();
3006   size_t remset_bytes    = r->rem_set()->mem_size();
3007   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3008   const char* remset_type = r->rem_set()->get_short_state_str();
3009 
3010   _total_used_bytes      += used_bytes;
3011   _total_capacity_bytes  += capacity_bytes;
3012   _total_prev_live_bytes += prev_live_bytes;
3013   _total_next_live_bytes += next_live_bytes;
3014   _total_remset_bytes    += remset_bytes;
3015   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3016 
3017   // Print a line for this particular region.
3018   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3019                           G1PPRL_TYPE_FORMAT
3020                           G1PPRL_ADDR_BASE_FORMAT
3021                           G1PPRL_BYTE_FORMAT
3022                           G1PPRL_BYTE_FORMAT
3023                           G1PPRL_BYTE_FORMAT
3024                           G1PPRL_DOUBLE_FORMAT
3025                           G1PPRL_BYTE_FORMAT
3026                           G1PPRL_STATE_FORMAT
3027                           G1PPRL_BYTE_FORMAT,
3028                           type, p2i(bottom), p2i(end),
3029                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3030                           remset_bytes, remset_type, strong_code_roots_bytes);
3031 
3032   return false;
3033 }
3034 
3035 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3036   if (!log_is_enabled(Trace, gc, liveness)) {
3037     return;
3038   }
3039 
3040   // add static memory usages to remembered set sizes
3041   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3042   // Print the footer of the output.
3043   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3044   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3045                          " SUMMARY"
3046                          G1PPRL_SUM_MB_FORMAT("capacity")
3047                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3048                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3049                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3050                          G1PPRL_SUM_MB_FORMAT("remset")
3051                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3052                          bytes_to_mb(_total_capacity_bytes),
3053                          bytes_to_mb(_total_used_bytes),
3054                          percent_of(_total_used_bytes, _total_capacity_bytes),
3055                          bytes_to_mb(_total_prev_live_bytes),
3056                          percent_of(_total_prev_live_bytes, _total_capacity_bytes),
3057                          bytes_to_mb(_total_next_live_bytes),
3058                          percent_of(_total_next_live_bytes, _total_capacity_bytes),
3059                          bytes_to_mb(_total_remset_bytes),
3060                          bytes_to_mb(_total_strong_code_roots_bytes));
3061 }