1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorState.hpp"
  31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1HeapVerifier.hpp"
  34 #include "gc/g1/g1OopClosures.inline.hpp"
  35 #include "gc/g1/g1Policy.hpp"
  36 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/g1ThreadLocalData.hpp"
  39 #include "gc/g1/heapRegion.inline.hpp"
  40 #include "gc/g1/heapRegionRemSet.hpp"
  41 #include "gc/g1/heapRegionSet.inline.hpp"
  42 #include "gc/shared/adaptiveSizePolicy.hpp"
  43 #include "gc/shared/gcId.hpp"
  44 #include "gc/shared/gcTimer.hpp"
  45 #include "gc/shared/gcTrace.hpp"
  46 #include "gc/shared/gcTraceTime.inline.hpp"
  47 #include "gc/shared/genOopClosures.inline.hpp"
  48 #include "gc/shared/referencePolicy.hpp"
  49 #include "gc/shared/strongRootsScope.hpp"
  50 #include "gc/shared/suspendibleThreadSet.hpp"
  51 #include "gc/shared/taskqueue.inline.hpp"
  52 #include "gc/shared/vmGCOperations.hpp"
  53 #include "gc/shared/weakProcessor.inline.hpp"
  54 #include "include/jvm.h"
  55 #include "logging/log.hpp"
  56 #include "memory/allocation.hpp"
  57 #include "memory/resourceArea.hpp"
  58 #include "oops/access.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "runtime/atomic.hpp"
  61 #include "runtime/handles.inline.hpp"
  62 #include "runtime/java.hpp"
  63 #include "runtime/prefetch.inline.hpp"
  64 #include "services/memTracker.hpp"
  65 #include "utilities/align.hpp"
  66 #include "utilities/growableArray.hpp"
  67 
  68 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  69   assert(addr < _cm->finger(), "invariant");
  70   assert(addr >= _task->finger(), "invariant");
  71 
  72   // We move that task's local finger along.
  73   _task->move_finger_to(addr);
  74 
  75   _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
  76   // we only partially drain the local queue and global stack
  77   _task->drain_local_queue(true);
  78   _task->drain_global_stack(true);
  79 
  80   // if the has_aborted flag has been raised, we need to bail out of
  81   // the iteration
  82   return !_task->has_aborted();
  83 }
  84 
  85 G1CMMarkStack::G1CMMarkStack() :
  86   _max_chunk_capacity(0),
  87   _base(NULL),
  88   _chunk_capacity(0) {
  89   set_empty();
  90 }
  91 
  92 bool G1CMMarkStack::resize(size_t new_capacity) {
  93   assert(is_empty(), "Only resize when stack is empty.");
  94   assert(new_capacity <= _max_chunk_capacity,
  95          "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
  96 
  97   TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
  98 
  99   if (new_base == NULL) {
 100     log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
 101     return false;
 102   }
 103   // Release old mapping.
 104   if (_base != NULL) {
 105     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 106   }
 107 
 108   _base = new_base;
 109   _chunk_capacity = new_capacity;
 110   set_empty();
 111 
 112   return true;
 113 }
 114 
 115 size_t G1CMMarkStack::capacity_alignment() {
 116   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
 117 }
 118 
 119 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 120   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 121 
 122   size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
 123 
 124   _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 125   size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 126 
 127   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
 128             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
 129             _max_chunk_capacity,
 130             initial_chunk_capacity);
 131 
 132   log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
 133                 initial_chunk_capacity, _max_chunk_capacity);
 134 
 135   return resize(initial_chunk_capacity);
 136 }
 137 
 138 void G1CMMarkStack::expand() {
 139   if (_chunk_capacity == _max_chunk_capacity) {
 140     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 141     return;
 142   }
 143   size_t old_capacity = _chunk_capacity;
 144   // Double capacity if possible
 145   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
 146 
 147   if (resize(new_capacity)) {
 148     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 149                   old_capacity, new_capacity);
 150   } else {
 151     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 152                     old_capacity, new_capacity);
 153   }
 154 }
 155 
 156 G1CMMarkStack::~G1CMMarkStack() {
 157   if (_base != NULL) {
 158     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 159   }
 160 }
 161 
 162 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
 163   elem->next = *list;
 164   *list = elem;
 165 }
 166 
 167 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
 168   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 169   add_chunk_to_list(&_chunk_list, elem);
 170   _chunks_in_chunk_list++;
 171 }
 172 
 173 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
 174   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 175   add_chunk_to_list(&_free_list, elem);
 176 }
 177 
 178 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
 179   TaskQueueEntryChunk* result = *list;
 180   if (result != NULL) {
 181     *list = (*list)->next;
 182   }
 183   return result;
 184 }
 185 
 186 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 187   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 188   TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
 189   if (result != NULL) {
 190     _chunks_in_chunk_list--;
 191   }
 192   return result;
 193 }
 194 
 195 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 196   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 197   return remove_chunk_from_list(&_free_list);
 198 }
 199 
 200 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
 201   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 202   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 203   // wraparound of _hwm.
 204   if (_hwm >= _chunk_capacity) {
 205     return NULL;
 206   }
 207 
 208   size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
 209   if (cur_idx >= _chunk_capacity) {
 210     return NULL;
 211   }
 212 
 213   TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
 214   result->next = NULL;
 215   return result;
 216 }
 217 
 218 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
 219   // Get a new chunk.
 220   TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
 221 
 222   if (new_chunk == NULL) {
 223     // Did not get a chunk from the free list. Allocate from backing memory.
 224     new_chunk = allocate_new_chunk();
 225 
 226     if (new_chunk == NULL) {
 227       return false;
 228     }
 229   }
 230 
 231   Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 232 
 233   add_chunk_to_chunk_list(new_chunk);
 234 
 235   return true;
 236 }
 237 
 238 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
 239   TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
 240 
 241   if (cur == NULL) {
 242     return false;
 243   }
 244 
 245   Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 246 
 247   add_chunk_to_free_list(cur);
 248   return true;
 249 }
 250 
 251 void G1CMMarkStack::set_empty() {
 252   _chunks_in_chunk_list = 0;
 253   _hwm = 0;
 254   _chunk_list = NULL;
 255   _free_list = NULL;
 256 }
 257 
 258 G1CMRootRegions::G1CMRootRegions() :
 259   _survivors(NULL), _cm(NULL), _scan_in_progress(false),
 260   _should_abort(false), _claimed_survivor_index(0) { }
 261 
 262 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
 263   _survivors = survivors;
 264   _cm = cm;
 265 }
 266 
 267 void G1CMRootRegions::prepare_for_scan() {
 268   assert(!scan_in_progress(), "pre-condition");
 269 
 270   // Currently, only survivors can be root regions.
 271   _claimed_survivor_index = 0;
 272   _scan_in_progress = _survivors->regions()->is_nonempty();
 273   _should_abort = false;
 274 }
 275 
 276 HeapRegion* G1CMRootRegions::claim_next() {
 277   if (_should_abort) {
 278     // If someone has set the should_abort flag, we return NULL to
 279     // force the caller to bail out of their loop.
 280     return NULL;
 281   }
 282 
 283   // Currently, only survivors can be root regions.
 284   const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions();
 285 
 286   int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1;
 287   if (claimed_index < survivor_regions->length()) {
 288     return survivor_regions->at(claimed_index);
 289   }
 290   return NULL;
 291 }
 292 
 293 uint G1CMRootRegions::num_root_regions() const {
 294   return (uint)_survivors->regions()->length();
 295 }
 296 
 297 void G1CMRootRegions::notify_scan_done() {
 298   MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 299   _scan_in_progress = false;
 300   RootRegionScan_lock->notify_all();
 301 }
 302 
 303 void G1CMRootRegions::cancel_scan() {
 304   notify_scan_done();
 305 }
 306 
 307 void G1CMRootRegions::scan_finished() {
 308   assert(scan_in_progress(), "pre-condition");
 309 
 310   // Currently, only survivors can be root regions.
 311   if (!_should_abort) {
 312     assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index);
 313     assert((uint)_claimed_survivor_index >= _survivors->length(),
 314            "we should have claimed all survivors, claimed index = %u, length = %u",
 315            (uint)_claimed_survivor_index, _survivors->length());
 316   }
 317 
 318   notify_scan_done();
 319 }
 320 
 321 bool G1CMRootRegions::wait_until_scan_finished() {
 322   if (!scan_in_progress()) {
 323     return false;
 324   }
 325 
 326   {
 327     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 328     while (scan_in_progress()) {
 329       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 330     }
 331   }
 332   return true;
 333 }
 334 
 335 // Returns the maximum number of workers to be used in a concurrent
 336 // phase based on the number of GC workers being used in a STW
 337 // phase.
 338 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
 339   return MAX2((num_gc_workers + 2) / 4, 1U);
 340 }
 341 
 342 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
 343                                    G1RegionToSpaceMapper* prev_bitmap_storage,
 344                                    G1RegionToSpaceMapper* next_bitmap_storage) :
 345   // _cm_thread set inside the constructor
 346   _g1h(g1h),
 347   _completed_initialization(false),
 348 
 349   _mark_bitmap_1(),
 350   _mark_bitmap_2(),
 351   _prev_mark_bitmap(&_mark_bitmap_1),
 352   _next_mark_bitmap(&_mark_bitmap_2),
 353 
 354   _heap(_g1h->reserved_region()),
 355 
 356   _root_regions(),
 357 
 358   _global_mark_stack(),
 359 
 360   // _finger set in set_non_marking_state
 361 
 362   _worker_id_offset(DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
 363   _max_num_tasks(ParallelGCThreads),
 364   // _num_active_tasks set in set_non_marking_state()
 365   // _tasks set inside the constructor
 366 
 367   _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
 368   _terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)),
 369 
 370   _first_overflow_barrier_sync(),
 371   _second_overflow_barrier_sync(),
 372 
 373   _has_overflown(false),
 374   _concurrent(false),
 375   _has_aborted(false),
 376   _restart_for_overflow(false),
 377   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 378   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 379 
 380   // _verbose_level set below
 381 
 382   _init_times(),
 383   _remark_times(),
 384   _remark_mark_times(),
 385   _remark_weak_ref_times(),
 386   _cleanup_times(),
 387   _total_cleanup_time(0.0),
 388 
 389   _accum_task_vtime(NULL),
 390 
 391   _concurrent_workers(NULL),
 392   _num_concurrent_workers(0),
 393   _max_concurrent_workers(0),
 394 
 395   _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
 396   _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 397 {
 398   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 399   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 400 
 401   // Create & start ConcurrentMark thread.
 402   _cm_thread = new G1ConcurrentMarkThread(this);
 403   if (_cm_thread->osthread() == NULL) {
 404     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 405   }
 406 
 407   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 408 
 409   SATBMarkQueueSet& satb_qs = G1BarrierSet::satb_mark_queue_set();
 410   satb_qs.set_buffer_size(G1SATBBufferSize);
 411 
 412   _root_regions.init(_g1h->survivor(), this);
 413 
 414   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 415     // Calculate the number of concurrent worker threads by scaling
 416     // the number of parallel GC threads.
 417     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 418     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 419   }
 420 
 421   assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
 422   if (ConcGCThreads > ParallelGCThreads) {
 423     log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
 424                     ConcGCThreads, ParallelGCThreads);
 425     return;
 426   }
 427 
 428   log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
 429   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 430 
 431   _num_concurrent_workers = ConcGCThreads;
 432   _max_concurrent_workers = _num_concurrent_workers;
 433 
 434   _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
 435   _concurrent_workers->initialize_workers();
 436 
 437   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 438     size_t mark_stack_size =
 439       MIN2(MarkStackSizeMax,
 440           MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE)));
 441     // Verify that the calculated value for MarkStackSize is in range.
 442     // It would be nice to use the private utility routine from Arguments.
 443     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 444       log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 445                       "must be between 1 and " SIZE_FORMAT,
 446                       mark_stack_size, MarkStackSizeMax);
 447       return;
 448     }
 449     FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
 450   } else {
 451     // Verify MarkStackSize is in range.
 452     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 453       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 454         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 455           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 456                           "must be between 1 and " SIZE_FORMAT,
 457                           MarkStackSize, MarkStackSizeMax);
 458           return;
 459         }
 460       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 461         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 462           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 463                           " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 464                           MarkStackSize, MarkStackSizeMax);
 465           return;
 466         }
 467       }
 468     }
 469   }
 470 
 471   if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
 472     vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
 473   }
 474 
 475   _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
 476   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
 477 
 478   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 479   _num_active_tasks = _max_num_tasks;
 480 
 481   for (uint i = 0; i < _max_num_tasks; ++i) {
 482     G1CMTaskQueue* task_queue = new G1CMTaskQueue();
 483     task_queue->initialize();
 484     _task_queues->register_queue(i, task_queue);
 485 
 486     _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
 487 
 488     _accum_task_vtime[i] = 0.0;
 489   }
 490 
 491   reset_at_marking_complete();
 492   _completed_initialization = true;
 493 }
 494 
 495 void G1ConcurrentMark::reset() {
 496   _has_aborted = false;
 497 
 498   reset_marking_for_restart();
 499 
 500   // Reset all tasks, since different phases will use different number of active
 501   // threads. So, it's easiest to have all of them ready.
 502   for (uint i = 0; i < _max_num_tasks; ++i) {
 503     _tasks[i]->reset(_next_mark_bitmap);
 504   }
 505 
 506   uint max_regions = _g1h->max_regions();
 507   for (uint i = 0; i < max_regions; i++) {
 508     _top_at_rebuild_starts[i] = NULL;
 509     _region_mark_stats[i].clear();
 510   }
 511 }
 512 
 513 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
 514   for (uint j = 0; j < _max_num_tasks; ++j) {
 515     _tasks[j]->clear_mark_stats_cache(region_idx);
 516   }
 517   _top_at_rebuild_starts[region_idx] = NULL;
 518   _region_mark_stats[region_idx].clear();
 519 }
 520 
 521 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
 522   uint const region_idx = r->hrm_index();
 523   if (r->is_humongous()) {
 524     assert(r->is_starts_humongous(), "Got humongous continues region here");
 525     uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
 526     for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
 527       clear_statistics_in_region(j);
 528     }
 529   } else {
 530     clear_statistics_in_region(region_idx);
 531   }
 532 }
 533 
 534 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) {
 535   if (bitmap->is_marked(addr)) {
 536     bitmap->clear(addr);
 537   }
 538 }
 539 
 540 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
 541   assert_at_safepoint_on_vm_thread();
 542 
 543   // Need to clear all mark bits of the humongous object.
 544   clear_mark_if_set(_prev_mark_bitmap, r->bottom());
 545   clear_mark_if_set(_next_mark_bitmap, r->bottom());
 546 
 547   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
 548     return;
 549   }
 550 
 551   // Clear any statistics about the region gathered so far.
 552   clear_statistics(r);
 553 }
 554 
 555 void G1ConcurrentMark::reset_marking_for_restart() {
 556   _global_mark_stack.set_empty();
 557 
 558   // Expand the marking stack, if we have to and if we can.
 559   if (has_overflown()) {
 560     _global_mark_stack.expand();
 561 
 562     uint max_regions = _g1h->max_regions();
 563     for (uint i = 0; i < max_regions; i++) {
 564       _region_mark_stats[i].clear_during_overflow();
 565     }
 566   }
 567 
 568   clear_has_overflown();
 569   _finger = _heap.start();
 570 
 571   for (uint i = 0; i < _max_num_tasks; ++i) {
 572     G1CMTaskQueue* queue = _task_queues->queue(i);
 573     queue->set_empty();
 574   }
 575 }
 576 
 577 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
 578   assert(active_tasks <= _max_num_tasks, "we should not have more");
 579 
 580   _num_active_tasks = active_tasks;
 581   // Need to update the three data structures below according to the
 582   // number of active threads for this phase.
 583   _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
 584   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 585   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 586 }
 587 
 588 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 589   set_concurrency(active_tasks);
 590 
 591   _concurrent = concurrent;
 592 
 593   if (!concurrent) {
 594     // At this point we should be in a STW phase, and completed marking.
 595     assert_at_safepoint_on_vm_thread();
 596     assert(out_of_regions(),
 597            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 598            p2i(_finger), p2i(_heap.end()));
 599   }
 600 }
 601 
 602 void G1ConcurrentMark::reset_at_marking_complete() {
 603   // We set the global marking state to some default values when we're
 604   // not doing marking.
 605   reset_marking_for_restart();
 606   _num_active_tasks = 0;
 607 }
 608 
 609 G1ConcurrentMark::~G1ConcurrentMark() {
 610   FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
 611   FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
 612   // The G1ConcurrentMark instance is never freed.
 613   ShouldNotReachHere();
 614 }
 615 
 616 class G1ClearBitMapTask : public AbstractGangTask {
 617 public:
 618   static size_t chunk_size() { return M; }
 619 
 620 private:
 621   // Heap region closure used for clearing the given mark bitmap.
 622   class G1ClearBitmapHRClosure : public HeapRegionClosure {
 623   private:
 624     G1CMBitMap* _bitmap;
 625     G1ConcurrentMark* _cm;
 626   public:
 627     G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) {
 628     }
 629 
 630     virtual bool do_heap_region(HeapRegion* r) {
 631       size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
 632 
 633       HeapWord* cur = r->bottom();
 634       HeapWord* const end = r->end();
 635 
 636       while (cur < end) {
 637         MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 638         _bitmap->clear_range(mr);
 639 
 640         cur += chunk_size_in_words;
 641 
 642         // Abort iteration if after yielding the marking has been aborted.
 643         if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
 644           return true;
 645         }
 646         // Repeat the asserts from before the start of the closure. We will do them
 647         // as asserts here to minimize their overhead on the product. However, we
 648         // will have them as guarantees at the beginning / end of the bitmap
 649         // clearing to get some checking in the product.
 650         assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
 651         assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 652       }
 653       assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
 654 
 655       return false;
 656     }
 657   };
 658 
 659   G1ClearBitmapHRClosure _cl;
 660   HeapRegionClaimer _hr_claimer;
 661   bool _suspendible; // If the task is suspendible, workers must join the STS.
 662 
 663 public:
 664   G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
 665     AbstractGangTask("G1 Clear Bitmap"),
 666     _cl(bitmap, suspendible ? cm : NULL),
 667     _hr_claimer(n_workers),
 668     _suspendible(suspendible)
 669   { }
 670 
 671   void work(uint worker_id) {
 672     SuspendibleThreadSetJoiner sts_join(_suspendible);
 673     G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
 674   }
 675 
 676   bool is_complete() {
 677     return _cl.is_complete();
 678   }
 679 };
 680 
 681 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 682   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 683 
 684   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 685   size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 686 
 687   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 688 
 689   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 690 
 691   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 692   workers->run_task(&cl, num_workers);
 693   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 694 }
 695 
 696 void G1ConcurrentMark::cleanup_for_next_mark() {
 697   // Make sure that the concurrent mark thread looks to still be in
 698   // the current cycle.
 699   guarantee(cm_thread()->during_cycle(), "invariant");
 700 
 701   // We are finishing up the current cycle by clearing the next
 702   // marking bitmap and getting it ready for the next cycle. During
 703   // this time no other cycle can start. So, let's make sure that this
 704   // is the case.
 705   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 706 
 707   clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 708 
 709   // Repeat the asserts from above.
 710   guarantee(cm_thread()->during_cycle(), "invariant");
 711   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 712 }
 713 
 714 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 715   assert_at_safepoint_on_vm_thread();
 716   clear_bitmap(_prev_mark_bitmap, workers, false);
 717 }
 718 
 719 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 720 public:
 721   bool do_heap_region(HeapRegion* r) {
 722     r->note_start_of_marking();
 723     return false;
 724   }
 725 };
 726 
 727 void G1ConcurrentMark::pre_initial_mark() {
 728   // Initialize marking structures. This has to be done in a STW phase.
 729   reset();
 730 
 731   // For each region note start of marking.
 732   NoteStartOfMarkHRClosure startcl;
 733   _g1h->heap_region_iterate(&startcl);
 734 }
 735 
 736 
 737 void G1ConcurrentMark::post_initial_mark() {
 738   // Start Concurrent Marking weak-reference discovery.
 739   ReferenceProcessor* rp = _g1h->ref_processor_cm();
 740   // enable ("weak") refs discovery
 741   rp->enable_discovery();
 742   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 743 
 744   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
 745   // This is the start of  the marking cycle, we're expected all
 746   // threads to have SATB queues with active set to false.
 747   satb_mq_set.set_active_all_threads(true, /* new active value */
 748                                      false /* expected_active */);
 749 
 750   _root_regions.prepare_for_scan();
 751 
 752   // update_g1_committed() will be called at the end of an evac pause
 753   // when marking is on. So, it's also called at the end of the
 754   // initial-mark pause to update the heap end, if the heap expands
 755   // during it. No need to call it here.
 756 }
 757 
 758 /*
 759  * Notice that in the next two methods, we actually leave the STS
 760  * during the barrier sync and join it immediately afterwards. If we
 761  * do not do this, the following deadlock can occur: one thread could
 762  * be in the barrier sync code, waiting for the other thread to also
 763  * sync up, whereas another one could be trying to yield, while also
 764  * waiting for the other threads to sync up too.
 765  *
 766  * Note, however, that this code is also used during remark and in
 767  * this case we should not attempt to leave / enter the STS, otherwise
 768  * we'll either hit an assert (debug / fastdebug) or deadlock
 769  * (product). So we should only leave / enter the STS if we are
 770  * operating concurrently.
 771  *
 772  * Because the thread that does the sync barrier has left the STS, it
 773  * is possible to be suspended for a Full GC or an evacuation pause
 774  * could occur. This is actually safe, since the entering the sync
 775  * barrier is one of the last things do_marking_step() does, and it
 776  * doesn't manipulate any data structures afterwards.
 777  */
 778 
 779 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 780   bool barrier_aborted;
 781   {
 782     SuspendibleThreadSetLeaver sts_leave(concurrent());
 783     barrier_aborted = !_first_overflow_barrier_sync.enter();
 784   }
 785 
 786   // at this point everyone should have synced up and not be doing any
 787   // more work
 788 
 789   if (barrier_aborted) {
 790     // If the barrier aborted we ignore the overflow condition and
 791     // just abort the whole marking phase as quickly as possible.
 792     return;
 793   }
 794 }
 795 
 796 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 797   SuspendibleThreadSetLeaver sts_leave(concurrent());
 798   _second_overflow_barrier_sync.enter();
 799 
 800   // at this point everything should be re-initialized and ready to go
 801 }
 802 
 803 class G1CMConcurrentMarkingTask : public AbstractGangTask {
 804   G1ConcurrentMark*     _cm;
 805 
 806 public:
 807   void work(uint worker_id) {
 808     assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread");
 809     ResourceMark rm;
 810 
 811     double start_vtime = os::elapsedVTime();
 812 
 813     {
 814       SuspendibleThreadSetJoiner sts_join;
 815 
 816       assert(worker_id < _cm->active_tasks(), "invariant");
 817 
 818       G1CMTask* task = _cm->task(worker_id);
 819       task->record_start_time();
 820       if (!_cm->has_aborted()) {
 821         do {
 822           task->do_marking_step(G1ConcMarkStepDurationMillis,
 823                                 true  /* do_termination */,
 824                                 false /* is_serial*/);
 825 
 826           _cm->do_yield_check();
 827         } while (!_cm->has_aborted() && task->has_aborted());
 828       }
 829       task->record_end_time();
 830       guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant");
 831     }
 832 
 833     double end_vtime = os::elapsedVTime();
 834     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
 835   }
 836 
 837   G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
 838       AbstractGangTask("Concurrent Mark"), _cm(cm) { }
 839 
 840   ~G1CMConcurrentMarkingTask() { }
 841 };
 842 
 843 uint G1ConcurrentMark::calc_active_marking_workers() {
 844   uint result = 0;
 845   if (!UseDynamicNumberOfGCThreads ||
 846       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 847        !ForceDynamicNumberOfGCThreads)) {
 848     result = _max_concurrent_workers;
 849   } else {
 850     result =
 851       AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers,
 852                                                       1, /* Minimum workers */
 853                                                       _num_concurrent_workers,
 854                                                       Threads::number_of_non_daemon_threads());
 855     // Don't scale the result down by scale_concurrent_workers() because
 856     // that scaling has already gone into "_max_concurrent_workers".
 857   }
 858   assert(result > 0 && result <= _max_concurrent_workers,
 859          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 860          _max_concurrent_workers, result);
 861   return result;
 862 }
 863 
 864 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
 865   // Currently, only survivors can be root regions.
 866   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
 867   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 868 
 869   const uintx interval = PrefetchScanIntervalInBytes;
 870   HeapWord* curr = hr->bottom();
 871   const HeapWord* end = hr->top();
 872   while (curr < end) {
 873     Prefetch::read(curr, interval);
 874     oop obj = oop(curr);
 875     int size = obj->oop_iterate_size(&cl);
 876     assert(size == obj->size(), "sanity");
 877     curr += size;
 878   }
 879 }
 880 
 881 class G1CMRootRegionScanTask : public AbstractGangTask {
 882   G1ConcurrentMark* _cm;
 883 public:
 884   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 885     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
 886 
 887   void work(uint worker_id) {
 888     assert(Thread::current()->is_ConcurrentGC_thread(),
 889            "this should only be done by a conc GC thread");
 890 
 891     G1CMRootRegions* root_regions = _cm->root_regions();
 892     HeapRegion* hr = root_regions->claim_next();
 893     while (hr != NULL) {
 894       _cm->scan_root_region(hr, worker_id);
 895       hr = root_regions->claim_next();
 896     }
 897   }
 898 };
 899 
 900 void G1ConcurrentMark::scan_root_regions() {
 901   // scan_in_progress() will have been set to true only if there was
 902   // at least one root region to scan. So, if it's false, we
 903   // should not attempt to do any further work.
 904   if (root_regions()->scan_in_progress()) {
 905     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 906 
 907     _num_concurrent_workers = MIN2(calc_active_marking_workers(),
 908                                    // We distribute work on a per-region basis, so starting
 909                                    // more threads than that is useless.
 910                                    root_regions()->num_root_regions());
 911     assert(_num_concurrent_workers <= _max_concurrent_workers,
 912            "Maximum number of marking threads exceeded");
 913 
 914     G1CMRootRegionScanTask task(this);
 915     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
 916                         task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
 917     _concurrent_workers->run_task(&task, _num_concurrent_workers);
 918 
 919     // It's possible that has_aborted() is true here without actually
 920     // aborting the survivor scan earlier. This is OK as it's
 921     // mainly used for sanity checking.
 922     root_regions()->scan_finished();
 923   }
 924 }
 925 
 926 void G1ConcurrentMark::concurrent_cycle_start() {
 927   _gc_timer_cm->register_gc_start();
 928 
 929   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
 930 
 931   _g1h->trace_heap_before_gc(_gc_tracer_cm);
 932 }
 933 
 934 void G1ConcurrentMark::concurrent_cycle_end() {
 935   _g1h->collector_state()->set_clearing_next_bitmap(false);
 936 
 937   _g1h->trace_heap_after_gc(_gc_tracer_cm);
 938 
 939   if (has_aborted()) {
 940     log_info(gc, marking)("Concurrent Mark Abort");
 941     _gc_tracer_cm->report_concurrent_mode_failure();
 942   }
 943 
 944   _gc_timer_cm->register_gc_end();
 945 
 946   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
 947 }
 948 
 949 void G1ConcurrentMark::mark_from_roots() {
 950   _restart_for_overflow = false;
 951 
 952   _num_concurrent_workers = calc_active_marking_workers();
 953 
 954   uint active_workers = MAX2(1U, _num_concurrent_workers);
 955 
 956   // Setting active workers is not guaranteed since fewer
 957   // worker threads may currently exist and more may not be
 958   // available.
 959   active_workers = _concurrent_workers->update_active_workers(active_workers);
 960   log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
 961 
 962   // Parallel task terminator is set in "set_concurrency_and_phase()"
 963   set_concurrency_and_phase(active_workers, true /* concurrent */);
 964 
 965   G1CMConcurrentMarkingTask marking_task(this);
 966   _concurrent_workers->run_task(&marking_task);
 967   print_stats();
 968 }
 969 
 970 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
 971   G1HeapVerifier* verifier = _g1h->verifier();
 972 
 973   verifier->verify_region_sets_optional();
 974 
 975   if (VerifyDuringGC) {
 976     GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm);
 977 
 978     size_t const BufLen = 512;
 979     char buffer[BufLen];
 980 
 981     jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
 982     verifier->verify(type, vo, buffer);
 983   }
 984 
 985   verifier->check_bitmaps(caller);
 986 }
 987 
 988 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
 989   G1CollectedHeap* _g1h;
 990   G1ConcurrentMark* _cm;
 991   HeapRegionClaimer _hrclaimer;
 992   uint volatile _total_selected_for_rebuild;
 993 
 994   G1PrintRegionLivenessInfoClosure _cl;
 995 
 996   class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
 997     G1CollectedHeap* _g1h;
 998     G1ConcurrentMark* _cm;
 999 
1000     G1PrintRegionLivenessInfoClosure* _cl;
1001 
1002     uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1003 
1004     void update_remset_before_rebuild(HeapRegion* hr) {
1005       G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
1006 
1007       bool selected_for_rebuild;
1008       if (hr->is_humongous()) {
1009         bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0;
1010         selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live);
1011       } else {
1012         size_t const live_bytes = _cm->liveness(hr->hrm_index());
1013         selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1014       }
1015       if (selected_for_rebuild) {
1016         _num_regions_selected_for_rebuild++;
1017       }
1018       _cm->update_top_at_rebuild_start(hr);
1019     }
1020 
1021     // Distribute the given words across the humongous object starting with hr and
1022     // note end of marking.
1023     void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1024       uint const region_idx = hr->hrm_index();
1025       size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size();
1026       uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words);
1027 
1028       // "Distributing" zero words means that we only note end of marking for these
1029       // regions.
1030       assert(marked_words == 0 || obj_size_in_words == marked_words,
1031              "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT,
1032              obj_size_in_words, marked_words);
1033 
1034       for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
1035         HeapRegion* const r = _g1h->region_at(i);
1036         size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
1037 
1038         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)",
1039                                words_to_add, i, r->get_type_str());
1040         add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize);
1041         marked_words -= words_to_add;
1042       }
1043       assert(marked_words == 0,
1044              SIZE_FORMAT " words left after distributing space across %u regions",
1045              marked_words, num_regions_in_humongous);
1046     }
1047 
1048     void update_marked_bytes(HeapRegion* hr) {
1049       uint const region_idx = hr->hrm_index();
1050       size_t const marked_words = _cm->liveness(region_idx);
1051       // The marking attributes the object's size completely to the humongous starts
1052       // region. We need to distribute this value across the entire set of regions a
1053       // humongous object spans.
1054       if (hr->is_humongous()) {
1055         assert(hr->is_starts_humongous() || marked_words == 0,
1056                "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)",
1057                marked_words, region_idx, hr->get_type_str());
1058         if (hr->is_starts_humongous()) {
1059           distribute_marked_bytes(hr, marked_words);
1060         }
1061       } else {
1062         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
1063         add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize);
1064       }
1065     }
1066 
1067     void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) {
1068       hr->add_to_marked_bytes(marked_bytes);
1069       _cl->do_heap_region(hr);
1070       hr->note_end_of_marking();
1071     }
1072 
1073   public:
1074     G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) :
1075       _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { }
1076 
1077     virtual bool do_heap_region(HeapRegion* r) {
1078       update_remset_before_rebuild(r);
1079       update_marked_bytes(r);
1080 
1081       return false;
1082     }
1083 
1084     uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1085   };
1086 
1087 public:
1088   G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) :
1089     AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"),
1090     _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1091 
1092   virtual void work(uint worker_id) {
1093     G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1094     _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1095     Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild);
1096   }
1097 
1098   uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1099 
1100   // Number of regions for which roughly one thread should be spawned for this work.
1101   static const uint RegionsPerThread = 384;
1102 };
1103 
1104 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1105   G1CollectedHeap* _g1h;
1106 public:
1107   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1108 
1109   virtual bool do_heap_region(HeapRegion* r) {
1110     _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1111     return false;
1112   }
1113 };
1114 
1115 void G1ConcurrentMark::remark() {
1116   assert_at_safepoint_on_vm_thread();
1117 
1118   // If a full collection has happened, we should not continue. However we might
1119   // have ended up here as the Remark VM operation has been scheduled already.
1120   if (has_aborted()) {
1121     return;
1122   }
1123 
1124   G1Policy* g1p = _g1h->g1_policy();
1125   g1p->record_concurrent_mark_remark_start();
1126 
1127   double start = os::elapsedTime();
1128 
1129   verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1130 
1131   {
1132     GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1133     finalize_marking();
1134   }
1135 
1136   double mark_work_end = os::elapsedTime();
1137 
1138   bool const mark_finished = !has_overflown();
1139   if (mark_finished) {
1140     weak_refs_work(false /* clear_all_soft_refs */);
1141 
1142     SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1143     // We're done with marking.
1144     // This is the end of the marking cycle, we're expected all
1145     // threads to have SATB queues with active set to true.
1146     satb_mq_set.set_active_all_threads(false, /* new active value */
1147                                        true /* expected_active */);
1148 
1149     {
1150       GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm);
1151       flush_all_task_caches();
1152     }
1153 
1154     // Install newly created mark bitmap as "prev".
1155     swap_mark_bitmaps();
1156     {
1157       GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);
1158 
1159       uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) /
1160                                        G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread;
1161       uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity);
1162 
1163       G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
1164       log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
1165       _g1h->workers()->run_task(&cl, num_workers);
1166 
1167       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1168                                       _g1h->num_regions(), cl.total_selected_for_rebuild());
1169     }
1170     {
1171       GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm);
1172       reclaim_empty_regions();
1173     }
1174 
1175     // Clean out dead classes
1176     if (ClassUnloadingWithConcurrentMark) {
1177       GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
1178       ClassLoaderDataGraph::purge();
1179     }
1180 
1181     compute_new_sizes();
1182 
1183     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1184 
1185     assert(!restart_for_overflow(), "sanity");
1186     // Completely reset the marking state since marking completed
1187     reset_at_marking_complete();
1188   } else {
1189     // We overflowed.  Restart concurrent marking.
1190     _restart_for_overflow = true;
1191 
1192     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1193 
1194     // Clear the marking state because we will be restarting
1195     // marking due to overflowing the global mark stack.
1196     reset_marking_for_restart();
1197   }
1198 
1199   {
1200     GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm);
1201     report_object_count(mark_finished);
1202   }
1203 
1204   // Statistics
1205   double now = os::elapsedTime();
1206   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1207   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1208   _remark_times.add((now - start) * 1000.0);
1209 
1210   g1p->record_concurrent_mark_remark_end();
1211 }
1212 
1213 class G1ReclaimEmptyRegionsTask : public AbstractGangTask {
1214   // Per-region work during the Cleanup pause.
1215   class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure {
1216     G1CollectedHeap* _g1h;
1217     size_t _freed_bytes;
1218     FreeRegionList* _local_cleanup_list;
1219     uint _old_regions_removed;
1220     uint _humongous_regions_removed;
1221     HRRSCleanupTask* _hrrs_cleanup_task;
1222 
1223   public:
1224     G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h,
1225                                  FreeRegionList* local_cleanup_list,
1226                                  HRRSCleanupTask* hrrs_cleanup_task) :
1227       _g1h(g1h),
1228       _freed_bytes(0),
1229       _local_cleanup_list(local_cleanup_list),
1230       _old_regions_removed(0),
1231       _humongous_regions_removed(0),
1232       _hrrs_cleanup_task(hrrs_cleanup_task) { }
1233 
1234     size_t freed_bytes() { return _freed_bytes; }
1235     const uint old_regions_removed() { return _old_regions_removed; }
1236     const uint humongous_regions_removed() { return _humongous_regions_removed; }
1237 
1238     bool do_heap_region(HeapRegion *hr) {
1239       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1240         _freed_bytes += hr->used();
1241         hr->set_containing_set(NULL);
1242         if (hr->is_humongous()) {
1243           _humongous_regions_removed++;
1244           _g1h->free_humongous_region(hr, _local_cleanup_list);
1245         } else {
1246           _old_regions_removed++;
1247           _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1248         }
1249         hr->clear_cardtable();
1250         _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1251         log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1252       } else {
1253         hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1254       }
1255 
1256       return false;
1257     }
1258   };
1259 
1260   G1CollectedHeap* _g1h;
1261   FreeRegionList* _cleanup_list;
1262   HeapRegionClaimer _hrclaimer;
1263 
1264 public:
1265   G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1266     AbstractGangTask("G1 Cleanup"),
1267     _g1h(g1h),
1268     _cleanup_list(cleanup_list),
1269     _hrclaimer(n_workers) {
1270 
1271     HeapRegionRemSet::reset_for_cleanup_tasks();
1272   }
1273 
1274   void work(uint worker_id) {
1275     FreeRegionList local_cleanup_list("Local Cleanup List");
1276     HRRSCleanupTask hrrs_cleanup_task;
1277     G1ReclaimEmptyRegionsClosure cl(_g1h,
1278                                     &local_cleanup_list,
1279                                     &hrrs_cleanup_task);
1280     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1281     assert(cl.is_complete(), "Shouldn't have aborted!");
1282 
1283     // Now update the old/humongous region sets
1284     _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
1285     {
1286       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1287       _g1h->decrement_summary_bytes(cl.freed_bytes());
1288 
1289       _cleanup_list->add_ordered(&local_cleanup_list);
1290       assert(local_cleanup_list.is_empty(), "post-condition");
1291 
1292       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1293     }
1294   }
1295 };
1296 
1297 void G1ConcurrentMark::reclaim_empty_regions() {
1298   WorkGang* workers = _g1h->workers();
1299   FreeRegionList empty_regions_list("Empty Regions After Mark List");
1300 
1301   G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers());
1302   workers->run_task(&cl);
1303 
1304   if (!empty_regions_list.is_empty()) {
1305     log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1306     // Now print the empty regions list.
1307     G1HRPrinter* hrp = _g1h->hr_printer();
1308     if (hrp->is_active()) {
1309       FreeRegionListIterator iter(&empty_regions_list);
1310       while (iter.more_available()) {
1311         HeapRegion* hr = iter.get_next();
1312         hrp->cleanup(hr);
1313       }
1314     }
1315     // And actually make them available.
1316     _g1h->prepend_to_freelist(&empty_regions_list);
1317   }
1318 }
1319 
1320 void G1ConcurrentMark::compute_new_sizes() {
1321   MetaspaceGC::compute_new_size();
1322 
1323   // Cleanup will have freed any regions completely full of garbage.
1324   // Update the soft reference policy with the new heap occupancy.
1325   Universe::update_heap_info_at_gc();
1326 
1327   // We reclaimed old regions so we should calculate the sizes to make
1328   // sure we update the old gen/space data.
1329   _g1h->g1mm()->update_sizes();
1330 }
1331 
1332 void G1ConcurrentMark::cleanup() {
1333   assert_at_safepoint_on_vm_thread();
1334 
1335   // If a full collection has happened, we shouldn't do this.
1336   if (has_aborted()) {
1337     return;
1338   }
1339 
1340   G1Policy* g1p = _g1h->g1_policy();
1341   g1p->record_concurrent_mark_cleanup_start();
1342 
1343   double start = os::elapsedTime();
1344 
1345   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1346 
1347   {
1348     GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1349     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1350     _g1h->heap_region_iterate(&cl);
1351   }
1352 
1353   if (log_is_enabled(Trace, gc, liveness)) {
1354     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1355     _g1h->heap_region_iterate(&cl);
1356   }
1357 
1358   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1359 
1360   // We need to make this be a "collection" so any collection pause that
1361   // races with it goes around and waits for Cleanup to finish.
1362   _g1h->increment_total_collections();
1363 
1364   // Local statistics
1365   double recent_cleanup_time = (os::elapsedTime() - start);
1366   _total_cleanup_time += recent_cleanup_time;
1367   _cleanup_times.add(recent_cleanup_time);
1368 
1369   {
1370     GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1371     _g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1372   }
1373 }
1374 
1375 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1376 // Uses the G1CMTask associated with a worker thread (for serial reference
1377 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1378 // trace referent objects.
1379 //
1380 // Using the G1CMTask and embedded local queues avoids having the worker
1381 // threads operating on the global mark stack. This reduces the risk
1382 // of overflowing the stack - which we would rather avoid at this late
1383 // state. Also using the tasks' local queues removes the potential
1384 // of the workers interfering with each other that could occur if
1385 // operating on the global stack.
1386 
1387 class G1CMKeepAliveAndDrainClosure : public OopClosure {
1388   G1ConcurrentMark* _cm;
1389   G1CMTask*         _task;
1390   uint              _ref_counter_limit;
1391   uint              _ref_counter;
1392   bool              _is_serial;
1393 public:
1394   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1395     _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval),
1396     _ref_counter(_ref_counter_limit), _is_serial(is_serial) {
1397     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1398   }
1399 
1400   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1401   virtual void do_oop(      oop* p) { do_oop_work(p); }
1402 
1403   template <class T> void do_oop_work(T* p) {
1404     if (_cm->has_overflown()) {
1405       return;
1406     }
1407     if (!_task->deal_with_reference(p)) {
1408       // We did not add anything to the mark bitmap (or mark stack), so there is
1409       // no point trying to drain it.
1410       return;
1411     }
1412     _ref_counter--;
1413 
1414     if (_ref_counter == 0) {
1415       // We have dealt with _ref_counter_limit references, pushing them
1416       // and objects reachable from them on to the local stack (and
1417       // possibly the global stack). Call G1CMTask::do_marking_step() to
1418       // process these entries.
1419       //
1420       // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1421       // there's nothing more to do (i.e. we're done with the entries that
1422       // were pushed as a result of the G1CMTask::deal_with_reference() calls
1423       // above) or we overflow.
1424       //
1425       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1426       // flag while there may still be some work to do. (See the comment at
1427       // the beginning of G1CMTask::do_marking_step() for those conditions -
1428       // one of which is reaching the specified time target.) It is only
1429       // when G1CMTask::do_marking_step() returns without setting the
1430       // has_aborted() flag that the marking step has completed.
1431       do {
1432         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1433         _task->do_marking_step(mark_step_duration_ms,
1434                                false      /* do_termination */,
1435                                _is_serial);
1436       } while (_task->has_aborted() && !_cm->has_overflown());
1437       _ref_counter = _ref_counter_limit;
1438     }
1439   }
1440 };
1441 
1442 // 'Drain' oop closure used by both serial and parallel reference processing.
1443 // Uses the G1CMTask associated with a given worker thread (for serial
1444 // reference processing the G1CMtask for worker 0 is used). Calls the
1445 // do_marking_step routine, with an unbelievably large timeout value,
1446 // to drain the marking data structures of the remaining entries
1447 // added by the 'keep alive' oop closure above.
1448 
1449 class G1CMDrainMarkingStackClosure : public VoidClosure {
1450   G1ConcurrentMark* _cm;
1451   G1CMTask*         _task;
1452   bool              _is_serial;
1453  public:
1454   G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1455     _cm(cm), _task(task), _is_serial(is_serial) {
1456     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1457   }
1458 
1459   void do_void() {
1460     do {
1461       // We call G1CMTask::do_marking_step() to completely drain the local
1462       // and global marking stacks of entries pushed by the 'keep alive'
1463       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1464       //
1465       // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1466       // if there's nothing more to do (i.e. we've completely drained the
1467       // entries that were pushed as a a result of applying the 'keep alive'
1468       // closure to the entries on the discovered ref lists) or we overflow
1469       // the global marking stack.
1470       //
1471       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1472       // flag while there may still be some work to do. (See the comment at
1473       // the beginning of G1CMTask::do_marking_step() for those conditions -
1474       // one of which is reaching the specified time target.) It is only
1475       // when G1CMTask::do_marking_step() returns without setting the
1476       // has_aborted() flag that the marking step has completed.
1477 
1478       _task->do_marking_step(1000000000.0 /* something very large */,
1479                              true         /* do_termination */,
1480                              _is_serial);
1481     } while (_task->has_aborted() && !_cm->has_overflown());
1482   }
1483 };
1484 
1485 // Implementation of AbstractRefProcTaskExecutor for parallel
1486 // reference processing at the end of G1 concurrent marking
1487 
1488 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1489 private:
1490   G1CollectedHeap*  _g1h;
1491   G1ConcurrentMark* _cm;
1492   WorkGang*         _workers;
1493   uint              _active_workers;
1494 
1495 public:
1496   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1497                           G1ConcurrentMark* cm,
1498                           WorkGang* workers,
1499                           uint n_workers) :
1500     _g1h(g1h), _cm(cm),
1501     _workers(workers), _active_workers(n_workers) { }
1502 
1503   virtual void execute(ProcessTask& task, uint ergo_workers);
1504 };
1505 
1506 class G1CMRefProcTaskProxy : public AbstractGangTask {
1507   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1508   ProcessTask&      _proc_task;
1509   G1CollectedHeap*  _g1h;
1510   G1ConcurrentMark* _cm;
1511 
1512 public:
1513   G1CMRefProcTaskProxy(ProcessTask& proc_task,
1514                        G1CollectedHeap* g1h,
1515                        G1ConcurrentMark* cm) :
1516     AbstractGangTask("Process reference objects in parallel"),
1517     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1518     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1519     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1520   }
1521 
1522   virtual void work(uint worker_id) {
1523     ResourceMark rm;
1524     HandleMark hm;
1525     G1CMTask* task = _cm->task(worker_id);
1526     G1CMIsAliveClosure g1_is_alive(_g1h);
1527     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1528     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1529 
1530     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1531   }
1532 };
1533 
1534 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) {
1535   assert(_workers != NULL, "Need parallel worker threads.");
1536   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1537   assert(_workers->active_workers() >= ergo_workers,
1538          "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)",
1539          ergo_workers, _workers->active_workers());
1540 
1541   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1542 
1543   // We need to reset the concurrency level before each
1544   // proxy task execution, so that the termination protocol
1545   // and overflow handling in G1CMTask::do_marking_step() knows
1546   // how many workers to wait for.
1547   _cm->set_concurrency(ergo_workers);
1548   if (ergo_workers == 1) {
1549     proc_task_proxy.work(0);
1550   } else {
1551     _workers->run_task(&proc_task_proxy, ergo_workers);
1552   }
1553 }
1554 
1555 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
1556   ResourceMark rm;
1557   HandleMark   hm;
1558 
1559   // Is alive closure.
1560   G1CMIsAliveClosure g1_is_alive(_g1h);
1561 
1562   // Inner scope to exclude the cleaning of the string table
1563   // from the displayed time.
1564   {
1565     GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm);
1566 
1567     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1568 
1569     // See the comment in G1CollectedHeap::ref_processing_init()
1570     // about how reference processing currently works in G1.
1571 
1572     // Set the soft reference policy
1573     rp->setup_policy(clear_all_soft_refs);
1574     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1575 
1576     // Instances of the 'Keep Alive' and 'Complete GC' closures used
1577     // in serial reference processing. Note these closures are also
1578     // used for serially processing (by the the current thread) the
1579     // JNI references during parallel reference processing.
1580     //
1581     // These closures do not need to synchronize with the worker
1582     // threads involved in parallel reference processing as these
1583     // instances are executed serially by the current thread (e.g.
1584     // reference processing is not multi-threaded and is thus
1585     // performed by the current thread instead of a gang worker).
1586     //
1587     // The gang tasks involved in parallel reference processing create
1588     // their own instances of these closures, which do their own
1589     // synchronization among themselves.
1590     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1591     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1592 
1593     // We need at least one active thread. If reference processing
1594     // is not multi-threaded we use the current (VMThread) thread,
1595     // otherwise we use the work gang from the G1CollectedHeap and
1596     // we utilize all the worker threads we can.
1597     bool processing_is_mt = rp->processing_is_mt();
1598     uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
1599     active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U);
1600 
1601     // Parallel processing task executor.
1602     G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
1603                                               _g1h->workers(), active_workers);
1604     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1605 
1606     // Set the concurrency level. The phase was already set prior to
1607     // executing the remark task.
1608     set_concurrency(active_workers);
1609 
1610     // Set the degree of MT processing here.  If the discovery was done MT,
1611     // the number of threads involved during discovery could differ from
1612     // the number of active workers.  This is OK as long as the discovered
1613     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1614     rp->set_active_mt_degree(active_workers);
1615 
1616     ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
1617 
1618     // Process the weak references.
1619     const ReferenceProcessorStats& stats =
1620         rp->process_discovered_references(&g1_is_alive,
1621                                           &g1_keep_alive,
1622                                           &g1_drain_mark_stack,
1623                                           executor,
1624                                           &pt);
1625     _gc_tracer_cm->report_gc_reference_stats(stats);
1626     pt.print_all_references();
1627 
1628     // The do_oop work routines of the keep_alive and drain_marking_stack
1629     // oop closures will set the has_overflown flag if we overflow the
1630     // global marking stack.
1631 
1632     assert(has_overflown() || _global_mark_stack.is_empty(),
1633            "Mark stack should be empty (unless it has overflown)");
1634 
1635     assert(rp->num_queues() == active_workers, "why not");
1636 
1637     rp->verify_no_references_recorded();
1638     assert(!rp->discovery_enabled(), "Post condition");
1639   }
1640 
1641   if (has_overflown()) {
1642     // We can not trust g1_is_alive and the contents of the heap if the marking stack
1643     // overflowed while processing references. Exit the VM.
1644     fatal("Overflow during reference processing, can not continue. Please "
1645           "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and "
1646           "restart.", MarkStackSizeMax);
1647     return;
1648   }
1649 
1650   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1651 
1652   {
1653     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1654     WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1);
1655   }
1656 
1657   // Unload Klasses, String, Code Cache, etc.
1658   if (ClassUnloadingWithConcurrentMark) {
1659     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1660     bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm, false /* Defer cleaning */);
1661     _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1662   } else {
1663     GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1664     // No need to clean string table as it is treated as strong roots when
1665     // class unloading is disabled.
1666     _g1h->partial_cleaning(&g1_is_alive, false, G1StringDedup::is_enabled());
1667   }
1668 }
1669 
1670 class G1PrecleanYieldClosure : public YieldClosure {
1671   G1ConcurrentMark* _cm;
1672 
1673 public:
1674   G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { }
1675 
1676   virtual bool should_return() {
1677     return _cm->has_aborted();
1678   }
1679 
1680   virtual bool should_return_fine_grain() {
1681     _cm->do_yield_check();
1682     return _cm->has_aborted();
1683   }
1684 };
1685 
1686 void G1ConcurrentMark::preclean() {
1687   assert(G1UseReferencePrecleaning, "Precleaning must be enabled.");
1688 
1689   SuspendibleThreadSetJoiner joiner;
1690 
1691   G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */);
1692   G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */);
1693 
1694   set_concurrency_and_phase(1, true);
1695 
1696   G1PrecleanYieldClosure yield_cl(this);
1697 
1698   ReferenceProcessor* rp = _g1h->ref_processor_cm();
1699   // Precleaning is single threaded. Temporarily disable MT discovery.
1700   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
1701   rp->preclean_discovered_references(rp->is_alive_non_header(),
1702                                      &keep_alive,
1703                                      &drain_mark_stack,
1704                                      &yield_cl,
1705                                      _gc_timer_cm);
1706 }
1707 
1708 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
1709 // the prev bitmap determining liveness.
1710 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
1711   G1CollectedHeap* _g1h;
1712 public:
1713   G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
1714 
1715   bool do_object_b(oop obj) {
1716     HeapWord* addr = (HeapWord*)obj;
1717     return addr != NULL &&
1718            (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj));
1719   }
1720 };
1721 
1722 void G1ConcurrentMark::report_object_count(bool mark_completed) {
1723   // Depending on the completion of the marking liveness needs to be determined
1724   // using either the next or prev bitmap.
1725   if (mark_completed) {
1726     G1ObjectCountIsAliveClosure is_alive(_g1h);
1727     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1728   } else {
1729     G1CMIsAliveClosure is_alive(_g1h);
1730     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1731   }
1732 }
1733 
1734 
1735 void G1ConcurrentMark::swap_mark_bitmaps() {
1736   G1CMBitMap* temp = _prev_mark_bitmap;
1737   _prev_mark_bitmap = _next_mark_bitmap;
1738   _next_mark_bitmap = temp;
1739   _g1h->collector_state()->set_clearing_next_bitmap(true);
1740 }
1741 
1742 // Closure for marking entries in SATB buffers.
1743 class G1CMSATBBufferClosure : public SATBBufferClosure {
1744 private:
1745   G1CMTask* _task;
1746   G1CollectedHeap* _g1h;
1747 
1748   // This is very similar to G1CMTask::deal_with_reference, but with
1749   // more relaxed requirements for the argument, so this must be more
1750   // circumspect about treating the argument as an object.
1751   void do_entry(void* entry) const {
1752     _task->increment_refs_reached();
1753     oop const obj = static_cast<oop>(entry);
1754     _task->make_reference_grey(obj);
1755   }
1756 
1757 public:
1758   G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1759     : _task(task), _g1h(g1h) { }
1760 
1761   virtual void do_buffer(void** buffer, size_t size) {
1762     for (size_t i = 0; i < size; ++i) {
1763       do_entry(buffer[i]);
1764     }
1765   }
1766 };
1767 
1768 class G1RemarkThreadsClosure : public ThreadClosure {
1769   G1CMSATBBufferClosure _cm_satb_cl;
1770   G1CMOopClosure _cm_cl;
1771   MarkingCodeBlobClosure _code_cl;
1772   int _thread_parity;
1773 
1774  public:
1775   G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1776     _cm_satb_cl(task, g1h),
1777     _cm_cl(g1h, task),
1778     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1779     _thread_parity(Threads::thread_claim_parity()) {}
1780 
1781   void do_thread(Thread* thread) {
1782     if (thread->is_Java_thread()) {
1783       if (thread->claim_oops_do(true, _thread_parity)) {
1784         JavaThread* jt = (JavaThread*)thread;
1785 
1786         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1787         // however the liveness of oops reachable from nmethods have very complex lifecycles:
1788         // * Alive if on the stack of an executing method
1789         // * Weakly reachable otherwise
1790         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1791         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1792         jt->nmethods_do(&_code_cl);
1793 
1794         G1ThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(&_cm_satb_cl);
1795       }
1796     } else if (thread->is_VM_thread()) {
1797       if (thread->claim_oops_do(true, _thread_parity)) {
1798         G1BarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
1799       }
1800     }
1801   }
1802 };
1803 
1804 class G1CMRemarkTask : public AbstractGangTask {
1805   G1ConcurrentMark* _cm;
1806 public:
1807   void work(uint worker_id) {
1808     G1CMTask* task = _cm->task(worker_id);
1809     task->record_start_time();
1810     {
1811       ResourceMark rm;
1812       HandleMark hm;
1813 
1814       G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1815       Threads::threads_do(&threads_f);
1816     }
1817 
1818     do {
1819       task->do_marking_step(1000000000.0 /* something very large */,
1820                             true         /* do_termination       */,
1821                             false        /* is_serial            */);
1822     } while (task->has_aborted() && !_cm->has_overflown());
1823     // If we overflow, then we do not want to restart. We instead
1824     // want to abort remark and do concurrent marking again.
1825     task->record_end_time();
1826   }
1827 
1828   G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1829     AbstractGangTask("Par Remark"), _cm(cm) {
1830     _cm->terminator()->reset_for_reuse(active_workers);
1831   }
1832 };
1833 
1834 void G1ConcurrentMark::finalize_marking() {
1835   ResourceMark rm;
1836   HandleMark   hm;
1837 
1838   _g1h->ensure_parsability(false);
1839 
1840   // this is remark, so we'll use up all active threads
1841   uint active_workers = _g1h->workers()->active_workers();
1842   set_concurrency_and_phase(active_workers, false /* concurrent */);
1843   // Leave _parallel_marking_threads at it's
1844   // value originally calculated in the G1ConcurrentMark
1845   // constructor and pass values of the active workers
1846   // through the gang in the task.
1847 
1848   {
1849     StrongRootsScope srs(active_workers);
1850 
1851     G1CMRemarkTask remarkTask(this, active_workers);
1852     // We will start all available threads, even if we decide that the
1853     // active_workers will be fewer. The extra ones will just bail out
1854     // immediately.
1855     _g1h->workers()->run_task(&remarkTask);
1856   }
1857 
1858   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1859   guarantee(has_overflown() ||
1860             satb_mq_set.completed_buffers_num() == 0,
1861             "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1862             BOOL_TO_STR(has_overflown()),
1863             satb_mq_set.completed_buffers_num());
1864 
1865   print_stats();
1866 }
1867 
1868 void G1ConcurrentMark::flush_all_task_caches() {
1869   size_t hits = 0;
1870   size_t misses = 0;
1871   for (uint i = 0; i < _max_num_tasks; i++) {
1872     Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
1873     hits += stats.first;
1874     misses += stats.second;
1875   }
1876   size_t sum = hits + misses;
1877   log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
1878                        hits, misses, percent_of(hits, sum));
1879 }
1880 
1881 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
1882   _prev_mark_bitmap->clear_range(mr);
1883 }
1884 
1885 HeapRegion*
1886 G1ConcurrentMark::claim_region(uint worker_id) {
1887   // "checkpoint" the finger
1888   HeapWord* finger = _finger;
1889 
1890   while (finger < _heap.end()) {
1891     assert(_g1h->is_in_g1_reserved(finger), "invariant");
1892 
1893     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1894     // Make sure that the reads below do not float before loading curr_region.
1895     OrderAccess::loadload();
1896     // Above heap_region_containing may return NULL as we always scan claim
1897     // until the end of the heap. In this case, just jump to the next region.
1898     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1899 
1900     // Is the gap between reading the finger and doing the CAS too long?
1901     HeapWord* res = Atomic::cmpxchg(end, &_finger, finger);
1902     if (res == finger && curr_region != NULL) {
1903       // we succeeded
1904       HeapWord*   bottom        = curr_region->bottom();
1905       HeapWord*   limit         = curr_region->next_top_at_mark_start();
1906 
1907       // notice that _finger == end cannot be guaranteed here since,
1908       // someone else might have moved the finger even further
1909       assert(_finger >= end, "the finger should have moved forward");
1910 
1911       if (limit > bottom) {
1912         return curr_region;
1913       } else {
1914         assert(limit == bottom,
1915                "the region limit should be at bottom");
1916         // we return NULL and the caller should try calling
1917         // claim_region() again.
1918         return NULL;
1919       }
1920     } else {
1921       assert(_finger > finger, "the finger should have moved forward");
1922       // read it again
1923       finger = _finger;
1924     }
1925   }
1926 
1927   return NULL;
1928 }
1929 
1930 #ifndef PRODUCT
1931 class VerifyNoCSetOops {
1932   G1CollectedHeap* _g1h;
1933   const char* _phase;
1934   int _info;
1935 
1936 public:
1937   VerifyNoCSetOops(const char* phase, int info = -1) :
1938     _g1h(G1CollectedHeap::heap()),
1939     _phase(phase),
1940     _info(info)
1941   { }
1942 
1943   void operator()(G1TaskQueueEntry task_entry) const {
1944     if (task_entry.is_array_slice()) {
1945       guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1946       return;
1947     }
1948     guarantee(oopDesc::is_oop(task_entry.obj()),
1949               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1950               p2i(task_entry.obj()), _phase, _info);
1951     guarantee(!_g1h->is_in_cset(task_entry.obj()),
1952               "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
1953               p2i(task_entry.obj()), _phase, _info);
1954   }
1955 };
1956 
1957 void G1ConcurrentMark::verify_no_cset_oops() {
1958   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1959   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1960     return;
1961   }
1962 
1963   // Verify entries on the global mark stack
1964   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1965 
1966   // Verify entries on the task queues
1967   for (uint i = 0; i < _max_num_tasks; ++i) {
1968     G1CMTaskQueue* queue = _task_queues->queue(i);
1969     queue->iterate(VerifyNoCSetOops("Queue", i));
1970   }
1971 
1972   // Verify the global finger
1973   HeapWord* global_finger = finger();
1974   if (global_finger != NULL && global_finger < _heap.end()) {
1975     // Since we always iterate over all regions, we might get a NULL HeapRegion
1976     // here.
1977     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
1978     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
1979               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
1980               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
1981   }
1982 
1983   // Verify the task fingers
1984   assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1985   for (uint i = 0; i < _num_concurrent_workers; ++i) {
1986     G1CMTask* task = _tasks[i];
1987     HeapWord* task_finger = task->finger();
1988     if (task_finger != NULL && task_finger < _heap.end()) {
1989       // See above note on the global finger verification.
1990       HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
1991       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
1992                 !task_hr->in_collection_set(),
1993                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
1994                 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
1995     }
1996   }
1997 }
1998 #endif // PRODUCT
1999 
2000 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
2001   _g1h->g1_rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
2002 }
2003 
2004 void G1ConcurrentMark::print_stats() {
2005   if (!log_is_enabled(Debug, gc, stats)) {
2006     return;
2007   }
2008   log_debug(gc, stats)("---------------------------------------------------------------------");
2009   for (size_t i = 0; i < _num_active_tasks; ++i) {
2010     _tasks[i]->print_stats();
2011     log_debug(gc, stats)("---------------------------------------------------------------------");
2012   }
2013 }
2014 
2015 void G1ConcurrentMark::concurrent_cycle_abort() {
2016   if (!cm_thread()->during_cycle() || _has_aborted) {
2017     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2018     return;
2019   }
2020 
2021   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2022   // concurrent bitmap clearing.
2023   {
2024     GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
2025     clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
2026   }
2027   // Note we cannot clear the previous marking bitmap here
2028   // since VerifyDuringGC verifies the objects marked during
2029   // a full GC against the previous bitmap.
2030 
2031   // Empty mark stack
2032   reset_marking_for_restart();
2033   for (uint i = 0; i < _max_num_tasks; ++i) {
2034     _tasks[i]->clear_region_fields();
2035   }
2036   _first_overflow_barrier_sync.abort();
2037   _second_overflow_barrier_sync.abort();
2038   _has_aborted = true;
2039 
2040   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2041   satb_mq_set.abandon_partial_marking();
2042   // This can be called either during or outside marking, we'll read
2043   // the expected_active value from the SATB queue set.
2044   satb_mq_set.set_active_all_threads(
2045                                  false, /* new active value */
2046                                  satb_mq_set.is_active() /* expected_active */);
2047 }
2048 
2049 static void print_ms_time_info(const char* prefix, const char* name,
2050                                NumberSeq& ns) {
2051   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2052                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2053   if (ns.num() > 0) {
2054     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2055                            prefix, ns.sd(), ns.maximum());
2056   }
2057 }
2058 
2059 void G1ConcurrentMark::print_summary_info() {
2060   Log(gc, marking) log;
2061   if (!log.is_trace()) {
2062     return;
2063   }
2064 
2065   log.trace(" Concurrent marking:");
2066   print_ms_time_info("  ", "init marks", _init_times);
2067   print_ms_time_info("  ", "remarks", _remark_times);
2068   {
2069     print_ms_time_info("     ", "final marks", _remark_mark_times);
2070     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2071 
2072   }
2073   print_ms_time_info("  ", "cleanups", _cleanup_times);
2074   log.trace("    Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2075             _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2076   log.trace("  Total stop_world time = %8.2f s.",
2077             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2078   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
2079             cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
2080 }
2081 
2082 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2083   _concurrent_workers->print_worker_threads_on(st);
2084 }
2085 
2086 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2087   _concurrent_workers->threads_do(tc);
2088 }
2089 
2090 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2091   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2092                p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
2093   _prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
2094   _next_mark_bitmap->print_on_error(st, " Next Bits: ");
2095 }
2096 
2097 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2098   ReferenceProcessor* result = g1h->ref_processor_cm();
2099   assert(result != NULL, "CM reference processor should not be NULL");
2100   return result;
2101 }
2102 
2103 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2104                                G1CMTask* task)
2105   : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)),
2106     _g1h(g1h), _task(task)
2107 { }
2108 
2109 void G1CMTask::setup_for_region(HeapRegion* hr) {
2110   assert(hr != NULL,
2111         "claim_region() should have filtered out NULL regions");
2112   _curr_region  = hr;
2113   _finger       = hr->bottom();
2114   update_region_limit();
2115 }
2116 
2117 void G1CMTask::update_region_limit() {
2118   HeapRegion* hr            = _curr_region;
2119   HeapWord* bottom          = hr->bottom();
2120   HeapWord* limit           = hr->next_top_at_mark_start();
2121 
2122   if (limit == bottom) {
2123     // The region was collected underneath our feet.
2124     // We set the finger to bottom to ensure that the bitmap
2125     // iteration that will follow this will not do anything.
2126     // (this is not a condition that holds when we set the region up,
2127     // as the region is not supposed to be empty in the first place)
2128     _finger = bottom;
2129   } else if (limit >= _region_limit) {
2130     assert(limit >= _finger, "peace of mind");
2131   } else {
2132     assert(limit < _region_limit, "only way to get here");
2133     // This can happen under some pretty unusual circumstances.  An
2134     // evacuation pause empties the region underneath our feet (NTAMS
2135     // at bottom). We then do some allocation in the region (NTAMS
2136     // stays at bottom), followed by the region being used as a GC
2137     // alloc region (NTAMS will move to top() and the objects
2138     // originally below it will be grayed). All objects now marked in
2139     // the region are explicitly grayed, if below the global finger,
2140     // and we do not need in fact to scan anything else. So, we simply
2141     // set _finger to be limit to ensure that the bitmap iteration
2142     // doesn't do anything.
2143     _finger = limit;
2144   }
2145 
2146   _region_limit = limit;
2147 }
2148 
2149 void G1CMTask::giveup_current_region() {
2150   assert(_curr_region != NULL, "invariant");
2151   clear_region_fields();
2152 }
2153 
2154 void G1CMTask::clear_region_fields() {
2155   // Values for these three fields that indicate that we're not
2156   // holding on to a region.
2157   _curr_region   = NULL;
2158   _finger        = NULL;
2159   _region_limit  = NULL;
2160 }
2161 
2162 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2163   if (cm_oop_closure == NULL) {
2164     assert(_cm_oop_closure != NULL, "invariant");
2165   } else {
2166     assert(_cm_oop_closure == NULL, "invariant");
2167   }
2168   _cm_oop_closure = cm_oop_closure;
2169 }
2170 
2171 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
2172   guarantee(next_mark_bitmap != NULL, "invariant");
2173   _next_mark_bitmap              = next_mark_bitmap;
2174   clear_region_fields();
2175 
2176   _calls                         = 0;
2177   _elapsed_time_ms               = 0.0;
2178   _termination_time_ms           = 0.0;
2179   _termination_start_time_ms     = 0.0;
2180 
2181   _mark_stats_cache.reset();
2182 }
2183 
2184 bool G1CMTask::should_exit_termination() {
2185   regular_clock_call();
2186   // This is called when we are in the termination protocol. We should
2187   // quit if, for some reason, this task wants to abort or the global
2188   // stack is not empty (this means that we can get work from it).
2189   return !_cm->mark_stack_empty() || has_aborted();
2190 }
2191 
2192 void G1CMTask::reached_limit() {
2193   assert(_words_scanned >= _words_scanned_limit ||
2194          _refs_reached >= _refs_reached_limit ,
2195          "shouldn't have been called otherwise");
2196   regular_clock_call();
2197 }
2198 
2199 void G1CMTask::regular_clock_call() {
2200   if (has_aborted()) {
2201     return;
2202   }
2203 
2204   // First, we need to recalculate the words scanned and refs reached
2205   // limits for the next clock call.
2206   recalculate_limits();
2207 
2208   // During the regular clock call we do the following
2209 
2210   // (1) If an overflow has been flagged, then we abort.
2211   if (_cm->has_overflown()) {
2212     set_has_aborted();
2213     return;
2214   }
2215 
2216   // If we are not concurrent (i.e. we're doing remark) we don't need
2217   // to check anything else. The other steps are only needed during
2218   // the concurrent marking phase.
2219   if (!_cm->concurrent()) {
2220     return;
2221   }
2222 
2223   // (2) If marking has been aborted for Full GC, then we also abort.
2224   if (_cm->has_aborted()) {
2225     set_has_aborted();
2226     return;
2227   }
2228 
2229   double curr_time_ms = os::elapsedVTime() * 1000.0;
2230 
2231   // (4) We check whether we should yield. If we have to, then we abort.
2232   if (SuspendibleThreadSet::should_yield()) {
2233     // We should yield. To do this we abort the task. The caller is
2234     // responsible for yielding.
2235     set_has_aborted();
2236     return;
2237   }
2238 
2239   // (5) We check whether we've reached our time quota. If we have,
2240   // then we abort.
2241   double elapsed_time_ms = curr_time_ms - _start_time_ms;
2242   if (elapsed_time_ms > _time_target_ms) {
2243     set_has_aborted();
2244     _has_timed_out = true;
2245     return;
2246   }
2247 
2248   // (6) Finally, we check whether there are enough completed STAB
2249   // buffers available for processing. If there are, we abort.
2250   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2251   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2252     // we do need to process SATB buffers, we'll abort and restart
2253     // the marking task to do so
2254     set_has_aborted();
2255     return;
2256   }
2257 }
2258 
2259 void G1CMTask::recalculate_limits() {
2260   _real_words_scanned_limit = _words_scanned + words_scanned_period;
2261   _words_scanned_limit      = _real_words_scanned_limit;
2262 
2263   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
2264   _refs_reached_limit       = _real_refs_reached_limit;
2265 }
2266 
2267 void G1CMTask::decrease_limits() {
2268   // This is called when we believe that we're going to do an infrequent
2269   // operation which will increase the per byte scanned cost (i.e. move
2270   // entries to/from the global stack). It basically tries to decrease the
2271   // scanning limit so that the clock is called earlier.
2272 
2273   _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
2274   _refs_reached_limit  = _real_refs_reached_limit - 3 * refs_reached_period / 4;
2275 }
2276 
2277 void G1CMTask::move_entries_to_global_stack() {
2278   // Local array where we'll store the entries that will be popped
2279   // from the local queue.
2280   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2281 
2282   size_t n = 0;
2283   G1TaskQueueEntry task_entry;
2284   while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) {
2285     buffer[n] = task_entry;
2286     ++n;
2287   }
2288   if (n < G1CMMarkStack::EntriesPerChunk) {
2289     buffer[n] = G1TaskQueueEntry();
2290   }
2291 
2292   if (n > 0) {
2293     if (!_cm->mark_stack_push(buffer)) {
2294       set_has_aborted();
2295     }
2296   }
2297 
2298   // This operation was quite expensive, so decrease the limits.
2299   decrease_limits();
2300 }
2301 
2302 bool G1CMTask::get_entries_from_global_stack() {
2303   // Local array where we'll store the entries that will be popped
2304   // from the global stack.
2305   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2306 
2307   if (!_cm->mark_stack_pop(buffer)) {
2308     return false;
2309   }
2310 
2311   // We did actually pop at least one entry.
2312   for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) {
2313     G1TaskQueueEntry task_entry = buffer[i];
2314     if (task_entry.is_null()) {
2315       break;
2316     }
2317     assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
2318     bool success = _task_queue->push(task_entry);
2319     // We only call this when the local queue is empty or under a
2320     // given target limit. So, we do not expect this push to fail.
2321     assert(success, "invariant");
2322   }
2323 
2324   // This operation was quite expensive, so decrease the limits
2325   decrease_limits();
2326   return true;
2327 }
2328 
2329 void G1CMTask::drain_local_queue(bool partially) {
2330   if (has_aborted()) {
2331     return;
2332   }
2333 
2334   // Decide what the target size is, depending whether we're going to
2335   // drain it partially (so that other tasks can steal if they run out
2336   // of things to do) or totally (at the very end).
2337   size_t target_size;
2338   if (partially) {
2339     target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize);
2340   } else {
2341     target_size = 0;
2342   }
2343 
2344   if (_task_queue->size() > target_size) {
2345     G1TaskQueueEntry entry;
2346     bool ret = _task_queue->pop_local(entry);
2347     while (ret) {
2348       scan_task_entry(entry);
2349       if (_task_queue->size() <= target_size || has_aborted()) {
2350         ret = false;
2351       } else {
2352         ret = _task_queue->pop_local(entry);
2353       }
2354     }
2355   }
2356 }
2357 
2358 void G1CMTask::drain_global_stack(bool partially) {
2359   if (has_aborted()) {
2360     return;
2361   }
2362 
2363   // We have a policy to drain the local queue before we attempt to
2364   // drain the global stack.
2365   assert(partially || _task_queue->size() == 0, "invariant");
2366 
2367   // Decide what the target size is, depending whether we're going to
2368   // drain it partially (so that other tasks can steal if they run out
2369   // of things to do) or totally (at the very end).
2370   // Notice that when draining the global mark stack partially, due to the racyness
2371   // of the mark stack size update we might in fact drop below the target. But,
2372   // this is not a problem.
2373   // In case of total draining, we simply process until the global mark stack is
2374   // totally empty, disregarding the size counter.
2375   if (partially) {
2376     size_t const target_size = _cm->partial_mark_stack_size_target();
2377     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2378       if (get_entries_from_global_stack()) {
2379         drain_local_queue(partially);
2380       }
2381     }
2382   } else {
2383     while (!has_aborted() && get_entries_from_global_stack()) {
2384       drain_local_queue(partially);
2385     }
2386   }
2387 }
2388 
2389 // SATB Queue has several assumptions on whether to call the par or
2390 // non-par versions of the methods. this is why some of the code is
2391 // replicated. We should really get rid of the single-threaded version
2392 // of the code to simplify things.
2393 void G1CMTask::drain_satb_buffers() {
2394   if (has_aborted()) {
2395     return;
2396   }
2397 
2398   // We set this so that the regular clock knows that we're in the
2399   // middle of draining buffers and doesn't set the abort flag when it
2400   // notices that SATB buffers are available for draining. It'd be
2401   // very counter productive if it did that. :-)
2402   _draining_satb_buffers = true;
2403 
2404   G1CMSATBBufferClosure satb_cl(this, _g1h);
2405   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2406 
2407   // This keeps claiming and applying the closure to completed buffers
2408   // until we run out of buffers or we need to abort.
2409   while (!has_aborted() &&
2410          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2411     regular_clock_call();
2412   }
2413 
2414   _draining_satb_buffers = false;
2415 
2416   assert(has_aborted() ||
2417          _cm->concurrent() ||
2418          satb_mq_set.completed_buffers_num() == 0, "invariant");
2419 
2420   // again, this was a potentially expensive operation, decrease the
2421   // limits to get the regular clock call early
2422   decrease_limits();
2423 }
2424 
2425 void G1CMTask::clear_mark_stats_cache(uint region_idx) {
2426   _mark_stats_cache.reset(region_idx);
2427 }
2428 
2429 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
2430   return _mark_stats_cache.evict_all();
2431 }
2432 
2433 void G1CMTask::print_stats() {
2434   log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls);
2435   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2436                        _elapsed_time_ms, _termination_time_ms);
2437   log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms",
2438                        _step_times_ms.num(),
2439                        _step_times_ms.avg(),
2440                        _step_times_ms.sd(),
2441                        _step_times_ms.maximum(),
2442                        _step_times_ms.sum());
2443   size_t const hits = _mark_stats_cache.hits();
2444   size_t const misses = _mark_stats_cache.misses();
2445   log_debug(gc, stats)("  Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f",
2446                        hits, misses, percent_of(hits, hits + misses));
2447 }
2448 
2449 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) {
2450   return _task_queues->steal(worker_id, task_entry);
2451 }
2452 
2453 /*****************************************************************************
2454 
2455     The do_marking_step(time_target_ms, ...) method is the building
2456     block of the parallel marking framework. It can be called in parallel
2457     with other invocations of do_marking_step() on different tasks
2458     (but only one per task, obviously) and concurrently with the
2459     mutator threads, or during remark, hence it eliminates the need
2460     for two versions of the code. When called during remark, it will
2461     pick up from where the task left off during the concurrent marking
2462     phase. Interestingly, tasks are also claimable during evacuation
2463     pauses too, since do_marking_step() ensures that it aborts before
2464     it needs to yield.
2465 
2466     The data structures that it uses to do marking work are the
2467     following:
2468 
2469       (1) Marking Bitmap. If there are gray objects that appear only
2470       on the bitmap (this happens either when dealing with an overflow
2471       or when the initial marking phase has simply marked the roots
2472       and didn't push them on the stack), then tasks claim heap
2473       regions whose bitmap they then scan to find gray objects. A
2474       global finger indicates where the end of the last claimed region
2475       is. A local finger indicates how far into the region a task has
2476       scanned. The two fingers are used to determine how to gray an
2477       object (i.e. whether simply marking it is OK, as it will be
2478       visited by a task in the future, or whether it needs to be also
2479       pushed on a stack).
2480 
2481       (2) Local Queue. The local queue of the task which is accessed
2482       reasonably efficiently by the task. Other tasks can steal from
2483       it when they run out of work. Throughout the marking phase, a
2484       task attempts to keep its local queue short but not totally
2485       empty, so that entries are available for stealing by other
2486       tasks. Only when there is no more work, a task will totally
2487       drain its local queue.
2488 
2489       (3) Global Mark Stack. This handles local queue overflow. During
2490       marking only sets of entries are moved between it and the local
2491       queues, as access to it requires a mutex and more fine-grain
2492       interaction with it which might cause contention. If it
2493       overflows, then the marking phase should restart and iterate
2494       over the bitmap to identify gray objects. Throughout the marking
2495       phase, tasks attempt to keep the global mark stack at a small
2496       length but not totally empty, so that entries are available for
2497       popping by other tasks. Only when there is no more work, tasks
2498       will totally drain the global mark stack.
2499 
2500       (4) SATB Buffer Queue. This is where completed SATB buffers are
2501       made available. Buffers are regularly removed from this queue
2502       and scanned for roots, so that the queue doesn't get too
2503       long. During remark, all completed buffers are processed, as
2504       well as the filled in parts of any uncompleted buffers.
2505 
2506     The do_marking_step() method tries to abort when the time target
2507     has been reached. There are a few other cases when the
2508     do_marking_step() method also aborts:
2509 
2510       (1) When the marking phase has been aborted (after a Full GC).
2511 
2512       (2) When a global overflow (on the global stack) has been
2513       triggered. Before the task aborts, it will actually sync up with
2514       the other tasks to ensure that all the marking data structures
2515       (local queues, stacks, fingers etc.)  are re-initialized so that
2516       when do_marking_step() completes, the marking phase can
2517       immediately restart.
2518 
2519       (3) When enough completed SATB buffers are available. The
2520       do_marking_step() method only tries to drain SATB buffers right
2521       at the beginning. So, if enough buffers are available, the
2522       marking step aborts and the SATB buffers are processed at
2523       the beginning of the next invocation.
2524 
2525       (4) To yield. when we have to yield then we abort and yield
2526       right at the end of do_marking_step(). This saves us from a lot
2527       of hassle as, by yielding we might allow a Full GC. If this
2528       happens then objects will be compacted underneath our feet, the
2529       heap might shrink, etc. We save checking for this by just
2530       aborting and doing the yield right at the end.
2531 
2532     From the above it follows that the do_marking_step() method should
2533     be called in a loop (or, otherwise, regularly) until it completes.
2534 
2535     If a marking step completes without its has_aborted() flag being
2536     true, it means it has completed the current marking phase (and
2537     also all other marking tasks have done so and have all synced up).
2538 
2539     A method called regular_clock_call() is invoked "regularly" (in
2540     sub ms intervals) throughout marking. It is this clock method that
2541     checks all the abort conditions which were mentioned above and
2542     decides when the task should abort. A work-based scheme is used to
2543     trigger this clock method: when the number of object words the
2544     marking phase has scanned or the number of references the marking
2545     phase has visited reach a given limit. Additional invocations to
2546     the method clock have been planted in a few other strategic places
2547     too. The initial reason for the clock method was to avoid calling
2548     vtime too regularly, as it is quite expensive. So, once it was in
2549     place, it was natural to piggy-back all the other conditions on it
2550     too and not constantly check them throughout the code.
2551 
2552     If do_termination is true then do_marking_step will enter its
2553     termination protocol.
2554 
2555     The value of is_serial must be true when do_marking_step is being
2556     called serially (i.e. by the VMThread) and do_marking_step should
2557     skip any synchronization in the termination and overflow code.
2558     Examples include the serial remark code and the serial reference
2559     processing closures.
2560 
2561     The value of is_serial must be false when do_marking_step is
2562     being called by any of the worker threads in a work gang.
2563     Examples include the concurrent marking code (CMMarkingTask),
2564     the MT remark code, and the MT reference processing closures.
2565 
2566  *****************************************************************************/
2567 
2568 void G1CMTask::do_marking_step(double time_target_ms,
2569                                bool do_termination,
2570                                bool is_serial) {
2571   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2572 
2573   _start_time_ms = os::elapsedVTime() * 1000.0;
2574 
2575   // If do_stealing is true then do_marking_step will attempt to
2576   // steal work from the other G1CMTasks. It only makes sense to
2577   // enable stealing when the termination protocol is enabled
2578   // and do_marking_step() is not being called serially.
2579   bool do_stealing = do_termination && !is_serial;
2580 
2581   double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
2582   _time_target_ms = time_target_ms - diff_prediction_ms;
2583 
2584   // set up the variables that are used in the work-based scheme to
2585   // call the regular clock method
2586   _words_scanned = 0;
2587   _refs_reached  = 0;
2588   recalculate_limits();
2589 
2590   // clear all flags
2591   clear_has_aborted();
2592   _has_timed_out = false;
2593   _draining_satb_buffers = false;
2594 
2595   ++_calls;
2596 
2597   // Set up the bitmap and oop closures. Anything that uses them is
2598   // eventually called from this method, so it is OK to allocate these
2599   // statically.
2600   G1CMBitMapClosure bitmap_closure(this, _cm);
2601   G1CMOopClosure cm_oop_closure(_g1h, this);
2602   set_cm_oop_closure(&cm_oop_closure);
2603 
2604   if (_cm->has_overflown()) {
2605     // This can happen if the mark stack overflows during a GC pause
2606     // and this task, after a yield point, restarts. We have to abort
2607     // as we need to get into the overflow protocol which happens
2608     // right at the end of this task.
2609     set_has_aborted();
2610   }
2611 
2612   // First drain any available SATB buffers. After this, we will not
2613   // look at SATB buffers before the next invocation of this method.
2614   // If enough completed SATB buffers are queued up, the regular clock
2615   // will abort this task so that it restarts.
2616   drain_satb_buffers();
2617   // ...then partially drain the local queue and the global stack
2618   drain_local_queue(true);
2619   drain_global_stack(true);
2620 
2621   do {
2622     if (!has_aborted() && _curr_region != NULL) {
2623       // This means that we're already holding on to a region.
2624       assert(_finger != NULL, "if region is not NULL, then the finger "
2625              "should not be NULL either");
2626 
2627       // We might have restarted this task after an evacuation pause
2628       // which might have evacuated the region we're holding on to
2629       // underneath our feet. Let's read its limit again to make sure
2630       // that we do not iterate over a region of the heap that
2631       // contains garbage (update_region_limit() will also move
2632       // _finger to the start of the region if it is found empty).
2633       update_region_limit();
2634       // We will start from _finger not from the start of the region,
2635       // as we might be restarting this task after aborting half-way
2636       // through scanning this region. In this case, _finger points to
2637       // the address where we last found a marked object. If this is a
2638       // fresh region, _finger points to start().
2639       MemRegion mr = MemRegion(_finger, _region_limit);
2640 
2641       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2642              "humongous regions should go around loop once only");
2643 
2644       // Some special cases:
2645       // If the memory region is empty, we can just give up the region.
2646       // If the current region is humongous then we only need to check
2647       // the bitmap for the bit associated with the start of the object,
2648       // scan the object if it's live, and give up the region.
2649       // Otherwise, let's iterate over the bitmap of the part of the region
2650       // that is left.
2651       // If the iteration is successful, give up the region.
2652       if (mr.is_empty()) {
2653         giveup_current_region();
2654         regular_clock_call();
2655       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2656         if (_next_mark_bitmap->is_marked(mr.start())) {
2657           // The object is marked - apply the closure
2658           bitmap_closure.do_addr(mr.start());
2659         }
2660         // Even if this task aborted while scanning the humongous object
2661         // we can (and should) give up the current region.
2662         giveup_current_region();
2663         regular_clock_call();
2664       } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
2665         giveup_current_region();
2666         regular_clock_call();
2667       } else {
2668         assert(has_aborted(), "currently the only way to do so");
2669         // The only way to abort the bitmap iteration is to return
2670         // false from the do_bit() method. However, inside the
2671         // do_bit() method we move the _finger to point to the
2672         // object currently being looked at. So, if we bail out, we
2673         // have definitely set _finger to something non-null.
2674         assert(_finger != NULL, "invariant");
2675 
2676         // Region iteration was actually aborted. So now _finger
2677         // points to the address of the object we last scanned. If we
2678         // leave it there, when we restart this task, we will rescan
2679         // the object. It is easy to avoid this. We move the finger by
2680         // enough to point to the next possible object header.
2681         assert(_finger < _region_limit, "invariant");
2682         HeapWord* const new_finger = _finger + ((oop)_finger)->size();
2683         // Check if bitmap iteration was aborted while scanning the last object
2684         if (new_finger >= _region_limit) {
2685           giveup_current_region();
2686         } else {
2687           move_finger_to(new_finger);
2688         }
2689       }
2690     }
2691     // At this point we have either completed iterating over the
2692     // region we were holding on to, or we have aborted.
2693 
2694     // We then partially drain the local queue and the global stack.
2695     // (Do we really need this?)
2696     drain_local_queue(true);
2697     drain_global_stack(true);
2698 
2699     // Read the note on the claim_region() method on why it might
2700     // return NULL with potentially more regions available for
2701     // claiming and why we have to check out_of_regions() to determine
2702     // whether we're done or not.
2703     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2704       // We are going to try to claim a new region. We should have
2705       // given up on the previous one.
2706       // Separated the asserts so that we know which one fires.
2707       assert(_curr_region  == NULL, "invariant");
2708       assert(_finger       == NULL, "invariant");
2709       assert(_region_limit == NULL, "invariant");
2710       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2711       if (claimed_region != NULL) {
2712         // Yes, we managed to claim one
2713         setup_for_region(claimed_region);
2714         assert(_curr_region == claimed_region, "invariant");
2715       }
2716       // It is important to call the regular clock here. It might take
2717       // a while to claim a region if, for example, we hit a large
2718       // block of empty regions. So we need to call the regular clock
2719       // method once round the loop to make sure it's called
2720       // frequently enough.
2721       regular_clock_call();
2722     }
2723 
2724     if (!has_aborted() && _curr_region == NULL) {
2725       assert(_cm->out_of_regions(),
2726              "at this point we should be out of regions");
2727     }
2728   } while ( _curr_region != NULL && !has_aborted());
2729 
2730   if (!has_aborted()) {
2731     // We cannot check whether the global stack is empty, since other
2732     // tasks might be pushing objects to it concurrently.
2733     assert(_cm->out_of_regions(),
2734            "at this point we should be out of regions");
2735     // Try to reduce the number of available SATB buffers so that
2736     // remark has less work to do.
2737     drain_satb_buffers();
2738   }
2739 
2740   // Since we've done everything else, we can now totally drain the
2741   // local queue and global stack.
2742   drain_local_queue(false);
2743   drain_global_stack(false);
2744 
2745   // Attempt at work stealing from other task's queues.
2746   if (do_stealing && !has_aborted()) {
2747     // We have not aborted. This means that we have finished all that
2748     // we could. Let's try to do some stealing...
2749 
2750     // We cannot check whether the global stack is empty, since other
2751     // tasks might be pushing objects to it concurrently.
2752     assert(_cm->out_of_regions() && _task_queue->size() == 0,
2753            "only way to reach here");
2754     while (!has_aborted()) {
2755       G1TaskQueueEntry entry;
2756       if (_cm->try_stealing(_worker_id, entry)) {
2757         scan_task_entry(entry);
2758 
2759         // And since we're towards the end, let's totally drain the
2760         // local queue and global stack.
2761         drain_local_queue(false);
2762         drain_global_stack(false);
2763       } else {
2764         break;
2765       }
2766     }
2767   }
2768 
2769   // We still haven't aborted. Now, let's try to get into the
2770   // termination protocol.
2771   if (do_termination && !has_aborted()) {
2772     // We cannot check whether the global stack is empty, since other
2773     // tasks might be concurrently pushing objects on it.
2774     // Separated the asserts so that we know which one fires.
2775     assert(_cm->out_of_regions(), "only way to reach here");
2776     assert(_task_queue->size() == 0, "only way to reach here");
2777     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2778 
2779     // The G1CMTask class also extends the TerminatorTerminator class,
2780     // hence its should_exit_termination() method will also decide
2781     // whether to exit the termination protocol or not.
2782     bool finished = (is_serial ||
2783                      _cm->terminator()->offer_termination(this));
2784     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2785     _termination_time_ms +=
2786       termination_end_time_ms - _termination_start_time_ms;
2787 
2788     if (finished) {
2789       // We're all done.
2790 
2791       // We can now guarantee that the global stack is empty, since
2792       // all other tasks have finished. We separated the guarantees so
2793       // that, if a condition is false, we can immediately find out
2794       // which one.
2795       guarantee(_cm->out_of_regions(), "only way to reach here");
2796       guarantee(_cm->mark_stack_empty(), "only way to reach here");
2797       guarantee(_task_queue->size() == 0, "only way to reach here");
2798       guarantee(!_cm->has_overflown(), "only way to reach here");
2799     } else {
2800       // Apparently there's more work to do. Let's abort this task. It
2801       // will restart it and we can hopefully find more things to do.
2802       set_has_aborted();
2803     }
2804   }
2805 
2806   // Mainly for debugging purposes to make sure that a pointer to the
2807   // closure which was statically allocated in this frame doesn't
2808   // escape it by accident.
2809   set_cm_oop_closure(NULL);
2810   double end_time_ms = os::elapsedVTime() * 1000.0;
2811   double elapsed_time_ms = end_time_ms - _start_time_ms;
2812   // Update the step history.
2813   _step_times_ms.add(elapsed_time_ms);
2814 
2815   if (has_aborted()) {
2816     // The task was aborted for some reason.
2817     if (_has_timed_out) {
2818       double diff_ms = elapsed_time_ms - _time_target_ms;
2819       // Keep statistics of how well we did with respect to hitting
2820       // our target only if we actually timed out (if we aborted for
2821       // other reasons, then the results might get skewed).
2822       _marking_step_diffs_ms.add(diff_ms);
2823     }
2824 
2825     if (_cm->has_overflown()) {
2826       // This is the interesting one. We aborted because a global
2827       // overflow was raised. This means we have to restart the
2828       // marking phase and start iterating over regions. However, in
2829       // order to do this we have to make sure that all tasks stop
2830       // what they are doing and re-initialize in a safe manner. We
2831       // will achieve this with the use of two barrier sync points.
2832 
2833       if (!is_serial) {
2834         // We only need to enter the sync barrier if being called
2835         // from a parallel context
2836         _cm->enter_first_sync_barrier(_worker_id);
2837 
2838         // When we exit this sync barrier we know that all tasks have
2839         // stopped doing marking work. So, it's now safe to
2840         // re-initialize our data structures.
2841       }
2842 
2843       clear_region_fields();
2844       flush_mark_stats_cache();
2845 
2846       if (!is_serial) {
2847         // If we're executing the concurrent phase of marking, reset the marking
2848         // state; otherwise the marking state is reset after reference processing,
2849         // during the remark pause.
2850         // If we reset here as a result of an overflow during the remark we will
2851         // see assertion failures from any subsequent set_concurrency_and_phase()
2852         // calls.
2853         if (_cm->concurrent() && _worker_id == 0) {
2854           // Worker 0 is responsible for clearing the global data structures because
2855           // of an overflow. During STW we should not clear the overflow flag (in
2856           // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit
2857           // method to abort the pause and restart concurrent marking.
2858           _cm->reset_marking_for_restart();
2859 
2860           log_info(gc, marking)("Concurrent Mark reset for overflow");
2861         }
2862 
2863         // ...and enter the second barrier.
2864         _cm->enter_second_sync_barrier(_worker_id);
2865       }
2866       // At this point, if we're during the concurrent phase of
2867       // marking, everything has been re-initialized and we're
2868       // ready to restart.
2869     }
2870   }
2871 }
2872 
2873 G1CMTask::G1CMTask(uint worker_id,
2874                    G1ConcurrentMark* cm,
2875                    G1CMTaskQueue* task_queue,
2876                    G1RegionMarkStats* mark_stats,
2877                    uint max_regions) :
2878   _objArray_processor(this),
2879   _worker_id(worker_id),
2880   _g1h(G1CollectedHeap::heap()),
2881   _cm(cm),
2882   _next_mark_bitmap(NULL),
2883   _task_queue(task_queue),
2884   _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize),
2885   _calls(0),
2886   _time_target_ms(0.0),
2887   _start_time_ms(0.0),
2888   _cm_oop_closure(NULL),
2889   _curr_region(NULL),
2890   _finger(NULL),
2891   _region_limit(NULL),
2892   _words_scanned(0),
2893   _words_scanned_limit(0),
2894   _real_words_scanned_limit(0),
2895   _refs_reached(0),
2896   _refs_reached_limit(0),
2897   _real_refs_reached_limit(0),
2898   _has_aborted(false),
2899   _has_timed_out(false),
2900   _draining_satb_buffers(false),
2901   _step_times_ms(),
2902   _elapsed_time_ms(0.0),
2903   _termination_time_ms(0.0),
2904   _termination_start_time_ms(0.0),
2905   _marking_step_diffs_ms()
2906 {
2907   guarantee(task_queue != NULL, "invariant");
2908 
2909   _marking_step_diffs_ms.add(0.5);
2910 }
2911 
2912 // These are formatting macros that are used below to ensure
2913 // consistent formatting. The *_H_* versions are used to format the
2914 // header for a particular value and they should be kept consistent
2915 // with the corresponding macro. Also note that most of the macros add
2916 // the necessary white space (as a prefix) which makes them a bit
2917 // easier to compose.
2918 
2919 // All the output lines are prefixed with this string to be able to
2920 // identify them easily in a large log file.
2921 #define G1PPRL_LINE_PREFIX            "###"
2922 
2923 #define G1PPRL_ADDR_BASE_FORMAT    " " PTR_FORMAT "-" PTR_FORMAT
2924 #ifdef _LP64
2925 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
2926 #else // _LP64
2927 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
2928 #endif // _LP64
2929 
2930 // For per-region info
2931 #define G1PPRL_TYPE_FORMAT            "   %-4s"
2932 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
2933 #define G1PPRL_STATE_FORMAT           "   %-5s"
2934 #define G1PPRL_STATE_H_FORMAT         "   %5s"
2935 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
2936 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
2937 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
2938 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
2939 
2940 // For summary info
2941 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
2942 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
2943 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
2944 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
2945 
2946 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) :
2947   _total_used_bytes(0), _total_capacity_bytes(0),
2948   _total_prev_live_bytes(0), _total_next_live_bytes(0),
2949   _total_remset_bytes(0), _total_strong_code_roots_bytes(0)
2950 {
2951   if (!log_is_enabled(Trace, gc, liveness)) {
2952     return;
2953   }
2954 
2955   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2956   MemRegion g1_reserved = g1h->g1_reserved();
2957   double now = os::elapsedTime();
2958 
2959   // Print the header of the output.
2960   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
2961   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
2962                           G1PPRL_SUM_ADDR_FORMAT("reserved")
2963                           G1PPRL_SUM_BYTE_FORMAT("region-size"),
2964                           p2i(g1_reserved.start()), p2i(g1_reserved.end()),
2965                           HeapRegion::GrainBytes);
2966   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
2967   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2968                           G1PPRL_TYPE_H_FORMAT
2969                           G1PPRL_ADDR_BASE_H_FORMAT
2970                           G1PPRL_BYTE_H_FORMAT
2971                           G1PPRL_BYTE_H_FORMAT
2972                           G1PPRL_BYTE_H_FORMAT
2973                           G1PPRL_DOUBLE_H_FORMAT
2974                           G1PPRL_BYTE_H_FORMAT
2975                           G1PPRL_STATE_H_FORMAT
2976                           G1PPRL_BYTE_H_FORMAT,
2977                           "type", "address-range",
2978                           "used", "prev-live", "next-live", "gc-eff",
2979                           "remset", "state", "code-roots");
2980   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2981                           G1PPRL_TYPE_H_FORMAT
2982                           G1PPRL_ADDR_BASE_H_FORMAT
2983                           G1PPRL_BYTE_H_FORMAT
2984                           G1PPRL_BYTE_H_FORMAT
2985                           G1PPRL_BYTE_H_FORMAT
2986                           G1PPRL_DOUBLE_H_FORMAT
2987                           G1PPRL_BYTE_H_FORMAT
2988                           G1PPRL_STATE_H_FORMAT
2989                           G1PPRL_BYTE_H_FORMAT,
2990                           "", "",
2991                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
2992                           "(bytes)", "", "(bytes)");
2993 }
2994 
2995 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
2996   if (!log_is_enabled(Trace, gc, liveness)) {
2997     return false;
2998   }
2999 
3000   const char* type       = r->get_type_str();
3001   HeapWord* bottom       = r->bottom();
3002   HeapWord* end          = r->end();
3003   size_t capacity_bytes  = r->capacity();
3004   size_t used_bytes      = r->used();
3005   size_t prev_live_bytes = r->live_bytes();
3006   size_t next_live_bytes = r->next_live_bytes();
3007   double gc_eff          = r->gc_efficiency();
3008   size_t remset_bytes    = r->rem_set()->mem_size();
3009   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3010   const char* remset_type = r->rem_set()->get_short_state_str();
3011 
3012   _total_used_bytes      += used_bytes;
3013   _total_capacity_bytes  += capacity_bytes;
3014   _total_prev_live_bytes += prev_live_bytes;
3015   _total_next_live_bytes += next_live_bytes;
3016   _total_remset_bytes    += remset_bytes;
3017   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3018 
3019   // Print a line for this particular region.
3020   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3021                           G1PPRL_TYPE_FORMAT
3022                           G1PPRL_ADDR_BASE_FORMAT
3023                           G1PPRL_BYTE_FORMAT
3024                           G1PPRL_BYTE_FORMAT
3025                           G1PPRL_BYTE_FORMAT
3026                           G1PPRL_DOUBLE_FORMAT
3027                           G1PPRL_BYTE_FORMAT
3028                           G1PPRL_STATE_FORMAT
3029                           G1PPRL_BYTE_FORMAT,
3030                           type, p2i(bottom), p2i(end),
3031                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3032                           remset_bytes, remset_type, strong_code_roots_bytes);
3033 
3034   return false;
3035 }
3036 
3037 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3038   if (!log_is_enabled(Trace, gc, liveness)) {
3039     return;
3040   }
3041 
3042   // add static memory usages to remembered set sizes
3043   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3044   // Print the footer of the output.
3045   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3046   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3047                          " SUMMARY"
3048                          G1PPRL_SUM_MB_FORMAT("capacity")
3049                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3050                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3051                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3052                          G1PPRL_SUM_MB_FORMAT("remset")
3053                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3054                          bytes_to_mb(_total_capacity_bytes),
3055                          bytes_to_mb(_total_used_bytes),
3056                          percent_of(_total_used_bytes, _total_capacity_bytes),
3057                          bytes_to_mb(_total_prev_live_bytes),
3058                          percent_of(_total_prev_live_bytes, _total_capacity_bytes),
3059                          bytes_to_mb(_total_next_live_bytes),
3060                          percent_of(_total_next_live_bytes, _total_capacity_bytes),
3061                          bytes_to_mb(_total_remset_bytes),
3062                          bytes_to_mb(_total_strong_code_roots_bytes));
3063 }