1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorState.hpp"
  31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1HeapVerifier.hpp"
  34 #include "gc/g1/g1OopClosures.inline.hpp"
  35 #include "gc/g1/g1Policy.hpp"
  36 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/g1ThreadLocalData.hpp"
  39 #include "gc/g1/heapRegion.inline.hpp"
  40 #include "gc/g1/heapRegionRemSet.hpp"
  41 #include "gc/g1/heapRegionSet.inline.hpp"
  42 #include "gc/shared/gcId.hpp"
  43 #include "gc/shared/gcTimer.hpp"
  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.inline.hpp"
  46 #include "gc/shared/gcVMOperations.hpp"
  47 #include "gc/shared/genOopClosures.inline.hpp"
  48 #include "gc/shared/referencePolicy.hpp"
  49 #include "gc/shared/strongRootsScope.hpp"
  50 #include "gc/shared/suspendibleThreadSet.hpp"
  51 #include "gc/shared/taskqueue.inline.hpp"
  52 #include "gc/shared/weakProcessor.inline.hpp"
  53 #include "gc/shared/workerPolicy.hpp"
  54 #include "include/jvm.h"
  55 #include "logging/log.hpp"
  56 #include "memory/allocation.hpp"
  57 #include "memory/resourceArea.hpp"
  58 #include "oops/access.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "runtime/atomic.hpp"
  61 #include "runtime/handles.inline.hpp"
  62 #include "runtime/java.hpp"
  63 #include "runtime/prefetch.inline.hpp"
  64 #include "services/memTracker.hpp"
  65 #include "utilities/align.hpp"
  66 #include "utilities/growableArray.hpp"
  67 
  68 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  69   assert(addr < _cm->finger(), "invariant");
  70   assert(addr >= _task->finger(), "invariant");
  71 
  72   // We move that task's local finger along.
  73   _task->move_finger_to(addr);
  74 
  75   _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
  76   // we only partially drain the local queue and global stack
  77   _task->drain_local_queue(true);
  78   _task->drain_global_stack(true);
  79 
  80   // if the has_aborted flag has been raised, we need to bail out of
  81   // the iteration
  82   return !_task->has_aborted();
  83 }
  84 
  85 G1CMMarkStack::G1CMMarkStack() :
  86   _max_chunk_capacity(0),
  87   _base(NULL),
  88   _chunk_capacity(0) {
  89   set_empty();
  90 }
  91 
  92 bool G1CMMarkStack::resize(size_t new_capacity) {
  93   assert(is_empty(), "Only resize when stack is empty.");
  94   assert(new_capacity <= _max_chunk_capacity,
  95          "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
  96 
  97   TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
  98 
  99   if (new_base == NULL) {
 100     log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
 101     return false;
 102   }
 103   // Release old mapping.
 104   if (_base != NULL) {
 105     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 106   }
 107 
 108   _base = new_base;
 109   _chunk_capacity = new_capacity;
 110   set_empty();
 111 
 112   return true;
 113 }
 114 
 115 size_t G1CMMarkStack::capacity_alignment() {
 116   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
 117 }
 118 
 119 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 120   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 121 
 122   size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
 123 
 124   _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 125   size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 126 
 127   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
 128             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
 129             _max_chunk_capacity,
 130             initial_chunk_capacity);
 131 
 132   log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
 133                 initial_chunk_capacity, _max_chunk_capacity);
 134 
 135   return resize(initial_chunk_capacity);
 136 }
 137 
 138 void G1CMMarkStack::expand() {
 139   if (_chunk_capacity == _max_chunk_capacity) {
 140     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 141     return;
 142   }
 143   size_t old_capacity = _chunk_capacity;
 144   // Double capacity if possible
 145   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
 146 
 147   if (resize(new_capacity)) {
 148     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 149                   old_capacity, new_capacity);
 150   } else {
 151     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 152                     old_capacity, new_capacity);
 153   }
 154 }
 155 
 156 G1CMMarkStack::~G1CMMarkStack() {
 157   if (_base != NULL) {
 158     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 159   }
 160 }
 161 
 162 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
 163   elem->next = *list;
 164   *list = elem;
 165 }
 166 
 167 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
 168   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 169   add_chunk_to_list(&_chunk_list, elem);
 170   _chunks_in_chunk_list++;
 171 }
 172 
 173 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
 174   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 175   add_chunk_to_list(&_free_list, elem);
 176 }
 177 
 178 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
 179   TaskQueueEntryChunk* result = *list;
 180   if (result != NULL) {
 181     *list = (*list)->next;
 182   }
 183   return result;
 184 }
 185 
 186 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 187   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 188   TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
 189   if (result != NULL) {
 190     _chunks_in_chunk_list--;
 191   }
 192   return result;
 193 }
 194 
 195 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 196   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 197   return remove_chunk_from_list(&_free_list);
 198 }
 199 
 200 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
 201   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 202   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 203   // wraparound of _hwm.
 204   if (_hwm >= _chunk_capacity) {
 205     return NULL;
 206   }
 207 
 208   size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
 209   if (cur_idx >= _chunk_capacity) {
 210     return NULL;
 211   }
 212 
 213   TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
 214   result->next = NULL;
 215   return result;
 216 }
 217 
 218 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
 219   // Get a new chunk.
 220   TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
 221 
 222   if (new_chunk == NULL) {
 223     // Did not get a chunk from the free list. Allocate from backing memory.
 224     new_chunk = allocate_new_chunk();
 225 
 226     if (new_chunk == NULL) {
 227       return false;
 228     }
 229   }
 230 
 231   Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 232 
 233   add_chunk_to_chunk_list(new_chunk);
 234 
 235   return true;
 236 }
 237 
 238 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
 239   TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
 240 
 241   if (cur == NULL) {
 242     return false;
 243   }
 244 
 245   Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 246 
 247   add_chunk_to_free_list(cur);
 248   return true;
 249 }
 250 
 251 void G1CMMarkStack::set_empty() {
 252   _chunks_in_chunk_list = 0;
 253   _hwm = 0;
 254   _chunk_list = NULL;
 255   _free_list = NULL;
 256 }
 257 
 258 G1CMRootRegions::G1CMRootRegions(uint const max_regions) :
 259   _root_regions(NEW_C_HEAP_ARRAY(HeapRegion*, max_regions, mtGC)),
 260   _max_regions(max_regions),
 261   _num_root_regions(0),
 262   _claimed_root_regions(0),
 263   _scan_in_progress(false),
 264   _should_abort(false) { }
 265 
 266 G1CMRootRegions::~G1CMRootRegions() {
 267   FREE_C_HEAP_ARRAY(HeapRegion*, _max_regions);
 268 }
 269 
 270 void G1CMRootRegions::reset() {
 271   _num_root_regions = 0;
 272 }
 273 
 274 void G1CMRootRegions::add(HeapRegion* hr) {
 275   assert_at_safepoint();
 276   size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1;
 277   assert(idx < _max_regions, "Trying to add more root regions than there is space " SIZE_FORMAT, _max_regions);
 278   _root_regions[idx] = hr;
 279 }
 280 
 281 void G1CMRootRegions::prepare_for_scan() {
 282   assert(!scan_in_progress(), "pre-condition");
 283 
 284   _scan_in_progress = _num_root_regions > 0;
 285 
 286   _claimed_root_regions = 0;
 287   _should_abort = false;
 288 }
 289 
 290 HeapRegion* G1CMRootRegions::claim_next() {
 291   if (_should_abort) {
 292     // If someone has set the should_abort flag, we return NULL to
 293     // force the caller to bail out of their loop.
 294     return NULL;
 295   }
 296 
 297   if (_claimed_root_regions >= _num_root_regions) {
 298     return NULL;
 299   }
 300 
 301   size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
 302   if (claimed_index < _num_root_regions) {
 303     return _root_regions[claimed_index];
 304   }
 305   return NULL;
 306 }
 307 
 308 uint G1CMRootRegions::num_root_regions() const {
 309   return (uint)_num_root_regions;
 310 }
 311 
 312 void G1CMRootRegions::notify_scan_done() {
 313   MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 314   _scan_in_progress = false;
 315   RootRegionScan_lock->notify_all();
 316 }
 317 
 318 void G1CMRootRegions::cancel_scan() {
 319   notify_scan_done();
 320 }
 321 
 322 void G1CMRootRegions::scan_finished() {
 323   assert(scan_in_progress(), "pre-condition");
 324 
 325   if (!_should_abort) {
 326     assert(_claimed_root_regions >= num_root_regions(),
 327            "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
 328            _claimed_root_regions, num_root_regions());
 329   }
 330 
 331   notify_scan_done();
 332 }
 333 
 334 bool G1CMRootRegions::wait_until_scan_finished() {
 335   if (!scan_in_progress()) {
 336     return false;
 337   }
 338 
 339   {
 340     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 341     while (scan_in_progress()) {
 342       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 343     }
 344   }
 345   return true;
 346 }
 347 
 348 // Returns the maximum number of workers to be used in a concurrent
 349 // phase based on the number of GC workers being used in a STW
 350 // phase.
 351 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
 352   return MAX2((num_gc_workers + 2) / 4, 1U);
 353 }
 354 
 355 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
 356                                    G1RegionToSpaceMapper* prev_bitmap_storage,
 357                                    G1RegionToSpaceMapper* next_bitmap_storage) :
 358   // _cm_thread set inside the constructor
 359   _g1h(g1h),
 360   _completed_initialization(false),
 361 
 362   _mark_bitmap_1(),
 363   _mark_bitmap_2(),
 364   _prev_mark_bitmap(&_mark_bitmap_1),
 365   _next_mark_bitmap(&_mark_bitmap_2),
 366 
 367   _heap(_g1h->reserved_region()),
 368 
 369   _root_regions(_g1h->max_regions()),
 370 
 371   _global_mark_stack(),
 372 
 373   // _finger set in set_non_marking_state
 374 
 375   _worker_id_offset(DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
 376   _max_num_tasks(ParallelGCThreads),
 377   // _num_active_tasks set in set_non_marking_state()
 378   // _tasks set inside the constructor
 379 
 380   _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
 381   _terminator((int) _max_num_tasks, _task_queues),
 382 
 383   _first_overflow_barrier_sync(),
 384   _second_overflow_barrier_sync(),
 385 
 386   _has_overflown(false),
 387   _concurrent(false),
 388   _has_aborted(false),
 389   _restart_for_overflow(false),
 390   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 391   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 392 
 393   // _verbose_level set below
 394 
 395   _init_times(),
 396   _remark_times(),
 397   _remark_mark_times(),
 398   _remark_weak_ref_times(),
 399   _cleanup_times(),
 400   _total_cleanup_time(0.0),
 401 
 402   _accum_task_vtime(NULL),
 403 
 404   _concurrent_workers(NULL),
 405   _num_concurrent_workers(0),
 406   _max_concurrent_workers(0),
 407 
 408   _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
 409   _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 410 {
 411   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 412   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 413 
 414   // Create & start ConcurrentMark thread.
 415   _cm_thread = new G1ConcurrentMarkThread(this);
 416   if (_cm_thread->osthread() == NULL) {
 417     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 418   }
 419 
 420   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 421 
 422   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 423     // Calculate the number of concurrent worker threads by scaling
 424     // the number of parallel GC threads.
 425     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 426     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 427   }
 428 
 429   assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
 430   if (ConcGCThreads > ParallelGCThreads) {
 431     log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
 432                     ConcGCThreads, ParallelGCThreads);
 433     return;
 434   }
 435 
 436   log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
 437   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 438 
 439   _num_concurrent_workers = ConcGCThreads;
 440   _max_concurrent_workers = _num_concurrent_workers;
 441 
 442   _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
 443   _concurrent_workers->initialize_workers();
 444 
 445   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 446     size_t mark_stack_size =
 447       MIN2(MarkStackSizeMax,
 448           MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE)));
 449     // Verify that the calculated value for MarkStackSize is in range.
 450     // It would be nice to use the private utility routine from Arguments.
 451     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 452       log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 453                       "must be between 1 and " SIZE_FORMAT,
 454                       mark_stack_size, MarkStackSizeMax);
 455       return;
 456     }
 457     FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
 458   } else {
 459     // Verify MarkStackSize is in range.
 460     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 461       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 462         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 463           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 464                           "must be between 1 and " SIZE_FORMAT,
 465                           MarkStackSize, MarkStackSizeMax);
 466           return;
 467         }
 468       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 469         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 470           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 471                           " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 472                           MarkStackSize, MarkStackSizeMax);
 473           return;
 474         }
 475       }
 476     }
 477   }
 478 
 479   if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
 480     vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
 481   }
 482 
 483   _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
 484   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
 485 
 486   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 487   _num_active_tasks = _max_num_tasks;
 488 
 489   for (uint i = 0; i < _max_num_tasks; ++i) {
 490     G1CMTaskQueue* task_queue = new G1CMTaskQueue();
 491     task_queue->initialize();
 492     _task_queues->register_queue(i, task_queue);
 493 
 494     _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
 495 
 496     _accum_task_vtime[i] = 0.0;
 497   }
 498 
 499   reset_at_marking_complete();
 500   _completed_initialization = true;
 501 }
 502 
 503 void G1ConcurrentMark::reset() {
 504   _has_aborted = false;
 505 
 506   reset_marking_for_restart();
 507 
 508   // Reset all tasks, since different phases will use different number of active
 509   // threads. So, it's easiest to have all of them ready.
 510   for (uint i = 0; i < _max_num_tasks; ++i) {
 511     _tasks[i]->reset(_next_mark_bitmap);
 512   }
 513 
 514   uint max_regions = _g1h->max_regions();
 515   for (uint i = 0; i < max_regions; i++) {
 516     _top_at_rebuild_starts[i] = NULL;
 517     _region_mark_stats[i].clear();
 518   }
 519 }
 520 
 521 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
 522   for (uint j = 0; j < _max_num_tasks; ++j) {
 523     _tasks[j]->clear_mark_stats_cache(region_idx);
 524   }
 525   _top_at_rebuild_starts[region_idx] = NULL;
 526   _region_mark_stats[region_idx].clear();
 527 }
 528 
 529 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
 530   uint const region_idx = r->hrm_index();
 531   if (r->is_humongous()) {
 532     assert(r->is_starts_humongous(), "Got humongous continues region here");
 533     uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
 534     for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
 535       clear_statistics_in_region(j);
 536     }
 537   } else {
 538     clear_statistics_in_region(region_idx);
 539   }
 540 }
 541 
 542 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) {
 543   if (bitmap->is_marked(addr)) {
 544     bitmap->clear(addr);
 545   }
 546 }
 547 
 548 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
 549   assert_at_safepoint_on_vm_thread();
 550 
 551   // Need to clear all mark bits of the humongous object.
 552   clear_mark_if_set(_prev_mark_bitmap, r->bottom());
 553   clear_mark_if_set(_next_mark_bitmap, r->bottom());
 554 
 555   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
 556     return;
 557   }
 558 
 559   // Clear any statistics about the region gathered so far.
 560   clear_statistics(r);
 561 }
 562 
 563 void G1ConcurrentMark::reset_marking_for_restart() {
 564   _global_mark_stack.set_empty();
 565 
 566   // Expand the marking stack, if we have to and if we can.
 567   if (has_overflown()) {
 568     _global_mark_stack.expand();
 569 
 570     uint max_regions = _g1h->max_regions();
 571     for (uint i = 0; i < max_regions; i++) {
 572       _region_mark_stats[i].clear_during_overflow();
 573     }
 574   }
 575 
 576   clear_has_overflown();
 577   _finger = _heap.start();
 578 
 579   for (uint i = 0; i < _max_num_tasks; ++i) {
 580     G1CMTaskQueue* queue = _task_queues->queue(i);
 581     queue->set_empty();
 582   }
 583 }
 584 
 585 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
 586   assert(active_tasks <= _max_num_tasks, "we should not have more");
 587 
 588   _num_active_tasks = active_tasks;
 589   // Need to update the three data structures below according to the
 590   // number of active threads for this phase.
 591   _terminator = TaskTerminator((int) active_tasks, _task_queues);
 592   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 593   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 594 }
 595 
 596 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 597   set_concurrency(active_tasks);
 598 
 599   _concurrent = concurrent;
 600 
 601   if (!concurrent) {
 602     // At this point we should be in a STW phase, and completed marking.
 603     assert_at_safepoint_on_vm_thread();
 604     assert(out_of_regions(),
 605            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 606            p2i(_finger), p2i(_heap.end()));
 607   }
 608 }
 609 
 610 void G1ConcurrentMark::reset_at_marking_complete() {
 611   // We set the global marking state to some default values when we're
 612   // not doing marking.
 613   reset_marking_for_restart();
 614   _num_active_tasks = 0;
 615 }
 616 
 617 G1ConcurrentMark::~G1ConcurrentMark() {
 618   FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
 619   FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
 620   // The G1ConcurrentMark instance is never freed.
 621   ShouldNotReachHere();
 622 }
 623 
 624 class G1ClearBitMapTask : public AbstractGangTask {
 625 public:
 626   static size_t chunk_size() { return M; }
 627 
 628 private:
 629   // Heap region closure used for clearing the given mark bitmap.
 630   class G1ClearBitmapHRClosure : public HeapRegionClosure {
 631   private:
 632     G1CMBitMap* _bitmap;
 633     G1ConcurrentMark* _cm;
 634   public:
 635     G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) {
 636     }
 637 
 638     virtual bool do_heap_region(HeapRegion* r) {
 639       size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
 640 
 641       HeapWord* cur = r->bottom();
 642       HeapWord* const end = r->end();
 643 
 644       while (cur < end) {
 645         MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 646         _bitmap->clear_range(mr);
 647 
 648         cur += chunk_size_in_words;
 649 
 650         // Abort iteration if after yielding the marking has been aborted.
 651         if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
 652           return true;
 653         }
 654         // Repeat the asserts from before the start of the closure. We will do them
 655         // as asserts here to minimize their overhead on the product. However, we
 656         // will have them as guarantees at the beginning / end of the bitmap
 657         // clearing to get some checking in the product.
 658         assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
 659         assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 660       }
 661       assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
 662 
 663       return false;
 664     }
 665   };
 666 
 667   G1ClearBitmapHRClosure _cl;
 668   HeapRegionClaimer _hr_claimer;
 669   bool _suspendible; // If the task is suspendible, workers must join the STS.
 670 
 671 public:
 672   G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
 673     AbstractGangTask("G1 Clear Bitmap"),
 674     _cl(bitmap, suspendible ? cm : NULL),
 675     _hr_claimer(n_workers),
 676     _suspendible(suspendible)
 677   { }
 678 
 679   void work(uint worker_id) {
 680     SuspendibleThreadSetJoiner sts_join(_suspendible);
 681     G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
 682   }
 683 
 684   bool is_complete() {
 685     return _cl.is_complete();
 686   }
 687 };
 688 
 689 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 690   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 691 
 692   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 693   size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 694 
 695   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 696 
 697   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 698 
 699   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 700   workers->run_task(&cl, num_workers);
 701   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 702 }
 703 
 704 void G1ConcurrentMark::cleanup_for_next_mark() {
 705   // Make sure that the concurrent mark thread looks to still be in
 706   // the current cycle.
 707   guarantee(cm_thread()->during_cycle(), "invariant");
 708 
 709   // We are finishing up the current cycle by clearing the next
 710   // marking bitmap and getting it ready for the next cycle. During
 711   // this time no other cycle can start. So, let's make sure that this
 712   // is the case.
 713   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 714 
 715   clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 716 
 717   // Repeat the asserts from above.
 718   guarantee(cm_thread()->during_cycle(), "invariant");
 719   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 720 }
 721 
 722 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 723   assert_at_safepoint_on_vm_thread();
 724   clear_bitmap(_prev_mark_bitmap, workers, false);
 725 }
 726 
 727 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 728 public:
 729   bool do_heap_region(HeapRegion* r) {
 730     r->note_start_of_marking();
 731     return false;
 732   }
 733 };
 734 
 735 void G1ConcurrentMark::pre_initial_mark() {
 736   // Initialize marking structures. This has to be done in a STW phase.
 737   reset();
 738 
 739   // For each region note start of marking.
 740   NoteStartOfMarkHRClosure startcl;
 741   _g1h->heap_region_iterate(&startcl);
 742 
 743   _root_regions.reset();
 744 }
 745 
 746 
 747 void G1ConcurrentMark::post_initial_mark() {
 748   // Start Concurrent Marking weak-reference discovery.
 749   ReferenceProcessor* rp = _g1h->ref_processor_cm();
 750   // enable ("weak") refs discovery
 751   rp->enable_discovery();
 752   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 753 
 754   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
 755   // This is the start of  the marking cycle, we're expected all
 756   // threads to have SATB queues with active set to false.
 757   satb_mq_set.set_active_all_threads(true, /* new active value */
 758                                      false /* expected_active */);
 759 
 760   _root_regions.prepare_for_scan();
 761 
 762   // update_g1_committed() will be called at the end of an evac pause
 763   // when marking is on. So, it's also called at the end of the
 764   // initial-mark pause to update the heap end, if the heap expands
 765   // during it. No need to call it here.
 766 }
 767 
 768 /*
 769  * Notice that in the next two methods, we actually leave the STS
 770  * during the barrier sync and join it immediately afterwards. If we
 771  * do not do this, the following deadlock can occur: one thread could
 772  * be in the barrier sync code, waiting for the other thread to also
 773  * sync up, whereas another one could be trying to yield, while also
 774  * waiting for the other threads to sync up too.
 775  *
 776  * Note, however, that this code is also used during remark and in
 777  * this case we should not attempt to leave / enter the STS, otherwise
 778  * we'll either hit an assert (debug / fastdebug) or deadlock
 779  * (product). So we should only leave / enter the STS if we are
 780  * operating concurrently.
 781  *
 782  * Because the thread that does the sync barrier has left the STS, it
 783  * is possible to be suspended for a Full GC or an evacuation pause
 784  * could occur. This is actually safe, since the entering the sync
 785  * barrier is one of the last things do_marking_step() does, and it
 786  * doesn't manipulate any data structures afterwards.
 787  */
 788 
 789 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 790   bool barrier_aborted;
 791   {
 792     SuspendibleThreadSetLeaver sts_leave(concurrent());
 793     barrier_aborted = !_first_overflow_barrier_sync.enter();
 794   }
 795 
 796   // at this point everyone should have synced up and not be doing any
 797   // more work
 798 
 799   if (barrier_aborted) {
 800     // If the barrier aborted we ignore the overflow condition and
 801     // just abort the whole marking phase as quickly as possible.
 802     return;
 803   }
 804 }
 805 
 806 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 807   SuspendibleThreadSetLeaver sts_leave(concurrent());
 808   _second_overflow_barrier_sync.enter();
 809 
 810   // at this point everything should be re-initialized and ready to go
 811 }
 812 
 813 class G1CMConcurrentMarkingTask : public AbstractGangTask {
 814   G1ConcurrentMark*     _cm;
 815 
 816 public:
 817   void work(uint worker_id) {
 818     assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread");
 819     ResourceMark rm;
 820 
 821     double start_vtime = os::elapsedVTime();
 822 
 823     {
 824       SuspendibleThreadSetJoiner sts_join;
 825 
 826       assert(worker_id < _cm->active_tasks(), "invariant");
 827 
 828       G1CMTask* task = _cm->task(worker_id);
 829       task->record_start_time();
 830       if (!_cm->has_aborted()) {
 831         do {
 832           task->do_marking_step(G1ConcMarkStepDurationMillis,
 833                                 true  /* do_termination */,
 834                                 false /* is_serial*/);
 835 
 836           _cm->do_yield_check();
 837         } while (!_cm->has_aborted() && task->has_aborted());
 838       }
 839       task->record_end_time();
 840       guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant");
 841     }
 842 
 843     double end_vtime = os::elapsedVTime();
 844     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
 845   }
 846 
 847   G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
 848       AbstractGangTask("Concurrent Mark"), _cm(cm) { }
 849 
 850   ~G1CMConcurrentMarkingTask() { }
 851 };
 852 
 853 uint G1ConcurrentMark::calc_active_marking_workers() {
 854   uint result = 0;
 855   if (!UseDynamicNumberOfGCThreads ||
 856       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 857        !ForceDynamicNumberOfGCThreads)) {
 858     result = _max_concurrent_workers;
 859   } else {
 860     result =
 861       WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
 862                                                 1, /* Minimum workers */
 863                                                 _num_concurrent_workers,
 864                                                 Threads::number_of_non_daemon_threads());
 865     // Don't scale the result down by scale_concurrent_workers() because
 866     // that scaling has already gone into "_max_concurrent_workers".
 867   }
 868   assert(result > 0 && result <= _max_concurrent_workers,
 869          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 870          _max_concurrent_workers, result);
 871   return result;
 872 }
 873 
 874 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
 875   assert(hr->is_old() || (hr->is_survivor() && hr->next_top_at_mark_start() == hr->bottom()),
 876          "Root regions must be old or survivor but region %u is %s", hr->hrm_index(), hr->get_type_str());
 877   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 878 
 879   const uintx interval = PrefetchScanIntervalInBytes;
 880   HeapWord* curr = hr->next_top_at_mark_start();
 881   const HeapWord* end = hr->top();
 882   while (curr < end) {
 883     Prefetch::read(curr, interval);
 884     oop obj = oop(curr);
 885     int size = obj->oop_iterate_size(&cl);
 886     assert(size == obj->size(), "sanity");
 887     curr += size;
 888   }
 889 }
 890 
 891 class G1CMRootRegionScanTask : public AbstractGangTask {
 892   G1ConcurrentMark* _cm;
 893 public:
 894   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 895     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
 896 
 897   void work(uint worker_id) {
 898     assert(Thread::current()->is_ConcurrentGC_thread(),
 899            "this should only be done by a conc GC thread");
 900 
 901     G1CMRootRegions* root_regions = _cm->root_regions();
 902     HeapRegion* hr = root_regions->claim_next();
 903     while (hr != NULL) {
 904       _cm->scan_root_region(hr, worker_id);
 905       hr = root_regions->claim_next();
 906     }
 907   }
 908 };
 909 
 910 void G1ConcurrentMark::scan_root_regions() {
 911   // scan_in_progress() will have been set to true only if there was
 912   // at least one root region to scan. So, if it's false, we
 913   // should not attempt to do any further work.
 914   if (root_regions()->scan_in_progress()) {
 915     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 916 
 917     _num_concurrent_workers = MIN2(calc_active_marking_workers(),
 918                                    // We distribute work on a per-region basis, so starting
 919                                    // more threads than that is useless.
 920                                    root_regions()->num_root_regions());
 921     assert(_num_concurrent_workers <= _max_concurrent_workers,
 922            "Maximum number of marking threads exceeded");
 923 
 924     G1CMRootRegionScanTask task(this);
 925     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
 926                         task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
 927     _concurrent_workers->run_task(&task, _num_concurrent_workers);
 928 
 929     // It's possible that has_aborted() is true here without actually
 930     // aborting the survivor scan earlier. This is OK as it's
 931     // mainly used for sanity checking.
 932     root_regions()->scan_finished();
 933   }
 934 }
 935 
 936 void G1ConcurrentMark::concurrent_cycle_start() {
 937   _gc_timer_cm->register_gc_start();
 938 
 939   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
 940 
 941   _g1h->trace_heap_before_gc(_gc_tracer_cm);
 942 }
 943 
 944 void G1ConcurrentMark::concurrent_cycle_end() {
 945   _g1h->collector_state()->set_clearing_next_bitmap(false);
 946 
 947   _g1h->trace_heap_after_gc(_gc_tracer_cm);
 948 
 949   if (has_aborted()) {
 950     log_info(gc, marking)("Concurrent Mark Abort");
 951     _gc_tracer_cm->report_concurrent_mode_failure();
 952   }
 953 
 954   _gc_timer_cm->register_gc_end();
 955 
 956   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
 957 }
 958 
 959 void G1ConcurrentMark::mark_from_roots() {
 960   _restart_for_overflow = false;
 961 
 962   _num_concurrent_workers = calc_active_marking_workers();
 963 
 964   uint active_workers = MAX2(1U, _num_concurrent_workers);
 965 
 966   // Setting active workers is not guaranteed since fewer
 967   // worker threads may currently exist and more may not be
 968   // available.
 969   active_workers = _concurrent_workers->update_active_workers(active_workers);
 970   log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
 971 
 972   // Parallel task terminator is set in "set_concurrency_and_phase()"
 973   set_concurrency_and_phase(active_workers, true /* concurrent */);
 974 
 975   G1CMConcurrentMarkingTask marking_task(this);
 976   _concurrent_workers->run_task(&marking_task);
 977   print_stats();
 978 }
 979 
 980 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
 981   G1HeapVerifier* verifier = _g1h->verifier();
 982 
 983   verifier->verify_region_sets_optional();
 984 
 985   if (VerifyDuringGC) {
 986     GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm);
 987 
 988     size_t const BufLen = 512;
 989     char buffer[BufLen];
 990 
 991     jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
 992     verifier->verify(type, vo, buffer);
 993   }
 994 
 995   verifier->check_bitmaps(caller);
 996 }
 997 
 998 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
 999   G1CollectedHeap* _g1h;
1000   G1ConcurrentMark* _cm;
1001   HeapRegionClaimer _hrclaimer;
1002   uint volatile _total_selected_for_rebuild;
1003 
1004   G1PrintRegionLivenessInfoClosure _cl;
1005 
1006   class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1007     G1CollectedHeap* _g1h;
1008     G1ConcurrentMark* _cm;
1009 
1010     G1PrintRegionLivenessInfoClosure* _cl;
1011 
1012     uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1013 
1014     void update_remset_before_rebuild(HeapRegion* hr) {
1015       G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
1016 
1017       bool selected_for_rebuild;
1018       if (hr->is_humongous()) {
1019         bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0;
1020         selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live);
1021       } else {
1022         size_t const live_bytes = _cm->liveness(hr->hrm_index());
1023         selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1024       }
1025       if (selected_for_rebuild) {
1026         _num_regions_selected_for_rebuild++;
1027       }
1028       _cm->update_top_at_rebuild_start(hr);
1029     }
1030 
1031     // Distribute the given words across the humongous object starting with hr and
1032     // note end of marking.
1033     void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1034       uint const region_idx = hr->hrm_index();
1035       size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size();
1036       uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words);
1037 
1038       // "Distributing" zero words means that we only note end of marking for these
1039       // regions.
1040       assert(marked_words == 0 || obj_size_in_words == marked_words,
1041              "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT,
1042              obj_size_in_words, marked_words);
1043 
1044       for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
1045         HeapRegion* const r = _g1h->region_at(i);
1046         size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
1047 
1048         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)",
1049                                words_to_add, i, r->get_type_str());
1050         add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize);
1051         marked_words -= words_to_add;
1052       }
1053       assert(marked_words == 0,
1054              SIZE_FORMAT " words left after distributing space across %u regions",
1055              marked_words, num_regions_in_humongous);
1056     }
1057 
1058     void update_marked_bytes(HeapRegion* hr) {
1059       uint const region_idx = hr->hrm_index();
1060       size_t const marked_words = _cm->liveness(region_idx);
1061       // The marking attributes the object's size completely to the humongous starts
1062       // region. We need to distribute this value across the entire set of regions a
1063       // humongous object spans.
1064       if (hr->is_humongous()) {
1065         assert(hr->is_starts_humongous() || marked_words == 0,
1066                "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)",
1067                marked_words, region_idx, hr->get_type_str());
1068         if (hr->is_starts_humongous()) {
1069           distribute_marked_bytes(hr, marked_words);
1070         }
1071       } else {
1072         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
1073         add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize);
1074       }
1075     }
1076 
1077     void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) {
1078       hr->add_to_marked_bytes(marked_bytes);
1079       _cl->do_heap_region(hr);
1080       hr->note_end_of_marking();
1081     }
1082 
1083   public:
1084     G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) :
1085       _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { }
1086 
1087     virtual bool do_heap_region(HeapRegion* r) {
1088       update_remset_before_rebuild(r);
1089       update_marked_bytes(r);
1090 
1091       return false;
1092     }
1093 
1094     uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1095   };
1096 
1097 public:
1098   G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) :
1099     AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"),
1100     _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1101 
1102   virtual void work(uint worker_id) {
1103     G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1104     _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1105     Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild);
1106   }
1107 
1108   uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1109 
1110   // Number of regions for which roughly one thread should be spawned for this work.
1111   static const uint RegionsPerThread = 384;
1112 };
1113 
1114 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1115   G1CollectedHeap* _g1h;
1116 public:
1117   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1118 
1119   virtual bool do_heap_region(HeapRegion* r) {
1120     _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1121     return false;
1122   }
1123 };
1124 
1125 void G1ConcurrentMark::remark() {
1126   assert_at_safepoint_on_vm_thread();
1127 
1128   // If a full collection has happened, we should not continue. However we might
1129   // have ended up here as the Remark VM operation has been scheduled already.
1130   if (has_aborted()) {
1131     return;
1132   }
1133 
1134   G1Policy* g1p = _g1h->g1_policy();
1135   g1p->record_concurrent_mark_remark_start();
1136 
1137   double start = os::elapsedTime();
1138 
1139   verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1140 
1141   {
1142     GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1143     finalize_marking();
1144   }
1145 
1146   double mark_work_end = os::elapsedTime();
1147 
1148   bool const mark_finished = !has_overflown();
1149   if (mark_finished) {
1150     weak_refs_work(false /* clear_all_soft_refs */);
1151 
1152     SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1153     // We're done with marking.
1154     // This is the end of the marking cycle, we're expected all
1155     // threads to have SATB queues with active set to true.
1156     satb_mq_set.set_active_all_threads(false, /* new active value */
1157                                        true /* expected_active */);
1158 
1159     {
1160       GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm);
1161       flush_all_task_caches();
1162     }
1163 
1164     // Install newly created mark bitmap as "prev".
1165     swap_mark_bitmaps();
1166     {
1167       GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);
1168 
1169       uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) /
1170                                        G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread;
1171       uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity);
1172 
1173       G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
1174       log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
1175       _g1h->workers()->run_task(&cl, num_workers);
1176 
1177       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1178                                       _g1h->num_regions(), cl.total_selected_for_rebuild());
1179     }
1180     {
1181       GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm);
1182       reclaim_empty_regions();
1183     }
1184 
1185     // Clean out dead classes
1186     if (ClassUnloadingWithConcurrentMark) {
1187       GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
1188       ClassLoaderDataGraph::purge();
1189     }
1190 
1191     _g1h->resize_heap_if_necessary();
1192 
1193     compute_new_sizes();
1194 
1195     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1196 
1197     assert(!restart_for_overflow(), "sanity");
1198     // Completely reset the marking state since marking completed
1199     reset_at_marking_complete();
1200   } else {
1201     // We overflowed.  Restart concurrent marking.
1202     _restart_for_overflow = true;
1203 
1204     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1205 
1206     // Clear the marking state because we will be restarting
1207     // marking due to overflowing the global mark stack.
1208     reset_marking_for_restart();
1209   }
1210 
1211   {
1212     GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm);
1213     report_object_count(mark_finished);
1214   }
1215 
1216   // Statistics
1217   double now = os::elapsedTime();
1218   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1219   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1220   _remark_times.add((now - start) * 1000.0);
1221 
1222   g1p->record_concurrent_mark_remark_end();
1223 }
1224 
1225 class G1ReclaimEmptyRegionsTask : public AbstractGangTask {
1226   // Per-region work during the Cleanup pause.
1227   class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure {
1228     G1CollectedHeap* _g1h;
1229     size_t _freed_bytes;
1230     FreeRegionList* _local_cleanup_list;
1231     uint _old_regions_removed;
1232     uint _humongous_regions_removed;
1233 
1234   public:
1235     G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h,
1236                                  FreeRegionList* local_cleanup_list) :
1237       _g1h(g1h),
1238       _freed_bytes(0),
1239       _local_cleanup_list(local_cleanup_list),
1240       _old_regions_removed(0),
1241       _humongous_regions_removed(0) { }
1242 
1243     size_t freed_bytes() { return _freed_bytes; }
1244     const uint old_regions_removed() { return _old_regions_removed; }
1245     const uint humongous_regions_removed() { return _humongous_regions_removed; }
1246 
1247     bool do_heap_region(HeapRegion *hr) {
1248       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1249         _freed_bytes += hr->used();
1250         hr->set_containing_set(NULL);
1251         if (hr->is_humongous()) {
1252           _humongous_regions_removed++;
1253           _g1h->free_humongous_region(hr, _local_cleanup_list);
1254         } else {
1255           _old_regions_removed++;
1256           _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1257         }
1258         hr->clear_cardtable();
1259         _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1260         log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1261       }
1262 
1263       return false;
1264     }
1265   };
1266 
1267   G1CollectedHeap* _g1h;
1268   FreeRegionList* _cleanup_list;
1269   HeapRegionClaimer _hrclaimer;
1270 
1271 public:
1272   G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1273     AbstractGangTask("G1 Cleanup"),
1274     _g1h(g1h),
1275     _cleanup_list(cleanup_list),
1276     _hrclaimer(n_workers) {
1277   }
1278 
1279   void work(uint worker_id) {
1280     FreeRegionList local_cleanup_list("Local Cleanup List");
1281     G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list);
1282     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1283     assert(cl.is_complete(), "Shouldn't have aborted!");
1284 
1285     // Now update the old/humongous region sets
1286     _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
1287     {
1288       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1289       _g1h->decrement_summary_bytes(cl.freed_bytes());
1290 
1291       _cleanup_list->add_ordered(&local_cleanup_list);
1292       assert(local_cleanup_list.is_empty(), "post-condition");
1293     }
1294   }
1295 };
1296 
1297 void G1ConcurrentMark::reclaim_empty_regions() {
1298   WorkGang* workers = _g1h->workers();
1299   FreeRegionList empty_regions_list("Empty Regions After Mark List");
1300 
1301   G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers());
1302   workers->run_task(&cl);
1303 
1304   if (!empty_regions_list.is_empty()) {
1305     log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1306     // Now print the empty regions list.
1307     G1HRPrinter* hrp = _g1h->hr_printer();
1308     if (hrp->is_active()) {
1309       FreeRegionListIterator iter(&empty_regions_list);
1310       while (iter.more_available()) {
1311         HeapRegion* hr = iter.get_next();
1312         hrp->cleanup(hr);
1313       }
1314     }
1315     // And actually make them available.
1316     _g1h->prepend_to_freelist(&empty_regions_list);
1317   }
1318 }
1319 
1320 void G1ConcurrentMark::compute_new_sizes() {
1321   MetaspaceGC::compute_new_size();
1322 
1323   // Cleanup will have freed any regions completely full of garbage.
1324   // Update the soft reference policy with the new heap occupancy.
1325   Universe::update_heap_info_at_gc();
1326 
1327   // We reclaimed old regions so we should calculate the sizes to make
1328   // sure we update the old gen/space data.
1329   _g1h->g1mm()->update_sizes();
1330 }
1331 
1332 void G1ConcurrentMark::cleanup() {
1333   assert_at_safepoint_on_vm_thread();
1334 
1335   // If a full collection has happened, we shouldn't do this.
1336   if (has_aborted()) {
1337     return;
1338   }
1339 
1340   G1Policy* g1p = _g1h->g1_policy();
1341   g1p->record_concurrent_mark_cleanup_start();
1342 
1343   double start = os::elapsedTime();
1344 
1345   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1346 
1347   {
1348     GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1349     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1350     _g1h->heap_region_iterate(&cl);
1351   }
1352 
1353   if (log_is_enabled(Trace, gc, liveness)) {
1354     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1355     _g1h->heap_region_iterate(&cl);
1356   }
1357 
1358   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1359 
1360   // We need to make this be a "collection" so any collection pause that
1361   // races with it goes around and waits for Cleanup to finish.
1362   _g1h->increment_total_collections();
1363 
1364   // Local statistics
1365   double recent_cleanup_time = (os::elapsedTime() - start);
1366   _total_cleanup_time += recent_cleanup_time;
1367   _cleanup_times.add(recent_cleanup_time);
1368 
1369   {
1370     GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1371     _g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1372   }
1373 }
1374 
1375 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1376 // Uses the G1CMTask associated with a worker thread (for serial reference
1377 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1378 // trace referent objects.
1379 //
1380 // Using the G1CMTask and embedded local queues avoids having the worker
1381 // threads operating on the global mark stack. This reduces the risk
1382 // of overflowing the stack - which we would rather avoid at this late
1383 // state. Also using the tasks' local queues removes the potential
1384 // of the workers interfering with each other that could occur if
1385 // operating on the global stack.
1386 
1387 class G1CMKeepAliveAndDrainClosure : public OopClosure {
1388   G1ConcurrentMark* _cm;
1389   G1CMTask*         _task;
1390   uint              _ref_counter_limit;
1391   uint              _ref_counter;
1392   bool              _is_serial;
1393 public:
1394   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1395     _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval),
1396     _ref_counter(_ref_counter_limit), _is_serial(is_serial) {
1397     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1398   }
1399 
1400   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1401   virtual void do_oop(      oop* p) { do_oop_work(p); }
1402 
1403   template <class T> void do_oop_work(T* p) {
1404     if (_cm->has_overflown()) {
1405       return;
1406     }
1407     if (!_task->deal_with_reference(p)) {
1408       // We did not add anything to the mark bitmap (or mark stack), so there is
1409       // no point trying to drain it.
1410       return;
1411     }
1412     _ref_counter--;
1413 
1414     if (_ref_counter == 0) {
1415       // We have dealt with _ref_counter_limit references, pushing them
1416       // and objects reachable from them on to the local stack (and
1417       // possibly the global stack). Call G1CMTask::do_marking_step() to
1418       // process these entries.
1419       //
1420       // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1421       // there's nothing more to do (i.e. we're done with the entries that
1422       // were pushed as a result of the G1CMTask::deal_with_reference() calls
1423       // above) or we overflow.
1424       //
1425       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1426       // flag while there may still be some work to do. (See the comment at
1427       // the beginning of G1CMTask::do_marking_step() for those conditions -
1428       // one of which is reaching the specified time target.) It is only
1429       // when G1CMTask::do_marking_step() returns without setting the
1430       // has_aborted() flag that the marking step has completed.
1431       do {
1432         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1433         _task->do_marking_step(mark_step_duration_ms,
1434                                false      /* do_termination */,
1435                                _is_serial);
1436       } while (_task->has_aborted() && !_cm->has_overflown());
1437       _ref_counter = _ref_counter_limit;
1438     }
1439   }
1440 };
1441 
1442 // 'Drain' oop closure used by both serial and parallel reference processing.
1443 // Uses the G1CMTask associated with a given worker thread (for serial
1444 // reference processing the G1CMtask for worker 0 is used). Calls the
1445 // do_marking_step routine, with an unbelievably large timeout value,
1446 // to drain the marking data structures of the remaining entries
1447 // added by the 'keep alive' oop closure above.
1448 
1449 class G1CMDrainMarkingStackClosure : public VoidClosure {
1450   G1ConcurrentMark* _cm;
1451   G1CMTask*         _task;
1452   bool              _is_serial;
1453  public:
1454   G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1455     _cm(cm), _task(task), _is_serial(is_serial) {
1456     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1457   }
1458 
1459   void do_void() {
1460     do {
1461       // We call G1CMTask::do_marking_step() to completely drain the local
1462       // and global marking stacks of entries pushed by the 'keep alive'
1463       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1464       //
1465       // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1466       // if there's nothing more to do (i.e. we've completely drained the
1467       // entries that were pushed as a a result of applying the 'keep alive'
1468       // closure to the entries on the discovered ref lists) or we overflow
1469       // the global marking stack.
1470       //
1471       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1472       // flag while there may still be some work to do. (See the comment at
1473       // the beginning of G1CMTask::do_marking_step() for those conditions -
1474       // one of which is reaching the specified time target.) It is only
1475       // when G1CMTask::do_marking_step() returns without setting the
1476       // has_aborted() flag that the marking step has completed.
1477 
1478       _task->do_marking_step(1000000000.0 /* something very large */,
1479                              true         /* do_termination */,
1480                              _is_serial);
1481     } while (_task->has_aborted() && !_cm->has_overflown());
1482   }
1483 };
1484 
1485 // Implementation of AbstractRefProcTaskExecutor for parallel
1486 // reference processing at the end of G1 concurrent marking
1487 
1488 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1489 private:
1490   G1CollectedHeap*  _g1h;
1491   G1ConcurrentMark* _cm;
1492   WorkGang*         _workers;
1493   uint              _active_workers;
1494 
1495 public:
1496   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1497                           G1ConcurrentMark* cm,
1498                           WorkGang* workers,
1499                           uint n_workers) :
1500     _g1h(g1h), _cm(cm),
1501     _workers(workers), _active_workers(n_workers) { }
1502 
1503   virtual void execute(ProcessTask& task, uint ergo_workers);
1504 };
1505 
1506 class G1CMRefProcTaskProxy : public AbstractGangTask {
1507   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1508   ProcessTask&      _proc_task;
1509   G1CollectedHeap*  _g1h;
1510   G1ConcurrentMark* _cm;
1511 
1512 public:
1513   G1CMRefProcTaskProxy(ProcessTask& proc_task,
1514                        G1CollectedHeap* g1h,
1515                        G1ConcurrentMark* cm) :
1516     AbstractGangTask("Process reference objects in parallel"),
1517     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1518     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1519     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1520   }
1521 
1522   virtual void work(uint worker_id) {
1523     ResourceMark rm;
1524     HandleMark hm;
1525     G1CMTask* task = _cm->task(worker_id);
1526     G1CMIsAliveClosure g1_is_alive(_g1h);
1527     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1528     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1529 
1530     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1531   }
1532 };
1533 
1534 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) {
1535   assert(_workers != NULL, "Need parallel worker threads.");
1536   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1537   assert(_workers->active_workers() >= ergo_workers,
1538          "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)",
1539          ergo_workers, _workers->active_workers());
1540 
1541   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1542 
1543   // We need to reset the concurrency level before each
1544   // proxy task execution, so that the termination protocol
1545   // and overflow handling in G1CMTask::do_marking_step() knows
1546   // how many workers to wait for.
1547   _cm->set_concurrency(ergo_workers);
1548   _workers->run_task(&proc_task_proxy, ergo_workers);
1549 }
1550 
1551 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
1552   ResourceMark rm;
1553   HandleMark   hm;
1554 
1555   // Is alive closure.
1556   G1CMIsAliveClosure g1_is_alive(_g1h);
1557 
1558   // Inner scope to exclude the cleaning of the string table
1559   // from the displayed time.
1560   {
1561     GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm);
1562 
1563     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1564 
1565     // See the comment in G1CollectedHeap::ref_processing_init()
1566     // about how reference processing currently works in G1.
1567 
1568     // Set the soft reference policy
1569     rp->setup_policy(clear_all_soft_refs);
1570     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1571 
1572     // Instances of the 'Keep Alive' and 'Complete GC' closures used
1573     // in serial reference processing. Note these closures are also
1574     // used for serially processing (by the the current thread) the
1575     // JNI references during parallel reference processing.
1576     //
1577     // These closures do not need to synchronize with the worker
1578     // threads involved in parallel reference processing as these
1579     // instances are executed serially by the current thread (e.g.
1580     // reference processing is not multi-threaded and is thus
1581     // performed by the current thread instead of a gang worker).
1582     //
1583     // The gang tasks involved in parallel reference processing create
1584     // their own instances of these closures, which do their own
1585     // synchronization among themselves.
1586     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1587     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1588 
1589     // We need at least one active thread. If reference processing
1590     // is not multi-threaded we use the current (VMThread) thread,
1591     // otherwise we use the work gang from the G1CollectedHeap and
1592     // we utilize all the worker threads we can.
1593     bool processing_is_mt = rp->processing_is_mt();
1594     uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
1595     active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U);
1596 
1597     // Parallel processing task executor.
1598     G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
1599                                               _g1h->workers(), active_workers);
1600     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1601 
1602     // Set the concurrency level. The phase was already set prior to
1603     // executing the remark task.
1604     set_concurrency(active_workers);
1605 
1606     // Set the degree of MT processing here.  If the discovery was done MT,
1607     // the number of threads involved during discovery could differ from
1608     // the number of active workers.  This is OK as long as the discovered
1609     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1610     rp->set_active_mt_degree(active_workers);
1611 
1612     ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
1613 
1614     // Process the weak references.
1615     const ReferenceProcessorStats& stats =
1616         rp->process_discovered_references(&g1_is_alive,
1617                                           &g1_keep_alive,
1618                                           &g1_drain_mark_stack,
1619                                           executor,
1620                                           &pt);
1621     _gc_tracer_cm->report_gc_reference_stats(stats);
1622     pt.print_all_references();
1623 
1624     // The do_oop work routines of the keep_alive and drain_marking_stack
1625     // oop closures will set the has_overflown flag if we overflow the
1626     // global marking stack.
1627 
1628     assert(has_overflown() || _global_mark_stack.is_empty(),
1629            "Mark stack should be empty (unless it has overflown)");
1630 
1631     assert(rp->num_queues() == active_workers, "why not");
1632 
1633     rp->verify_no_references_recorded();
1634     assert(!rp->discovery_enabled(), "Post condition");
1635   }
1636 
1637   if (has_overflown()) {
1638     // We can not trust g1_is_alive and the contents of the heap if the marking stack
1639     // overflowed while processing references. Exit the VM.
1640     fatal("Overflow during reference processing, can not continue. Please "
1641           "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and "
1642           "restart.", MarkStackSizeMax);
1643     return;
1644   }
1645 
1646   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1647 
1648   {
1649     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1650     WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1);
1651   }
1652 
1653   // Unload Klasses, String, Code Cache, etc.
1654   if (ClassUnloadingWithConcurrentMark) {
1655     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1656     bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm);
1657     _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1658   } else if (StringDedup::is_enabled()) {
1659     GCTraceTime(Debug, gc, phases) debug("String Deduplication", _gc_timer_cm);
1660     _g1h->string_dedup_cleaning(&g1_is_alive, NULL);
1661   }
1662 }
1663 
1664 class G1PrecleanYieldClosure : public YieldClosure {
1665   G1ConcurrentMark* _cm;
1666 
1667 public:
1668   G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { }
1669 
1670   virtual bool should_return() {
1671     return _cm->has_aborted();
1672   }
1673 
1674   virtual bool should_return_fine_grain() {
1675     _cm->do_yield_check();
1676     return _cm->has_aborted();
1677   }
1678 };
1679 
1680 void G1ConcurrentMark::preclean() {
1681   assert(G1UseReferencePrecleaning, "Precleaning must be enabled.");
1682 
1683   SuspendibleThreadSetJoiner joiner;
1684 
1685   G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */);
1686   G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */);
1687 
1688   set_concurrency_and_phase(1, true);
1689 
1690   G1PrecleanYieldClosure yield_cl(this);
1691 
1692   ReferenceProcessor* rp = _g1h->ref_processor_cm();
1693   // Precleaning is single threaded. Temporarily disable MT discovery.
1694   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
1695   rp->preclean_discovered_references(rp->is_alive_non_header(),
1696                                      &keep_alive,
1697                                      &drain_mark_stack,
1698                                      &yield_cl,
1699                                      _gc_timer_cm);
1700 }
1701 
1702 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
1703 // the prev bitmap determining liveness.
1704 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
1705   G1CollectedHeap* _g1h;
1706 public:
1707   G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
1708 
1709   bool do_object_b(oop obj) {
1710     HeapWord* addr = (HeapWord*)obj;
1711     return addr != NULL &&
1712            (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj));
1713   }
1714 };
1715 
1716 void G1ConcurrentMark::report_object_count(bool mark_completed) {
1717   // Depending on the completion of the marking liveness needs to be determined
1718   // using either the next or prev bitmap.
1719   if (mark_completed) {
1720     G1ObjectCountIsAliveClosure is_alive(_g1h);
1721     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1722   } else {
1723     G1CMIsAliveClosure is_alive(_g1h);
1724     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1725   }
1726 }
1727 
1728 
1729 void G1ConcurrentMark::swap_mark_bitmaps() {
1730   G1CMBitMap* temp = _prev_mark_bitmap;
1731   _prev_mark_bitmap = _next_mark_bitmap;
1732   _next_mark_bitmap = temp;
1733   _g1h->collector_state()->set_clearing_next_bitmap(true);
1734 }
1735 
1736 // Closure for marking entries in SATB buffers.
1737 class G1CMSATBBufferClosure : public SATBBufferClosure {
1738 private:
1739   G1CMTask* _task;
1740   G1CollectedHeap* _g1h;
1741 
1742   // This is very similar to G1CMTask::deal_with_reference, but with
1743   // more relaxed requirements for the argument, so this must be more
1744   // circumspect about treating the argument as an object.
1745   void do_entry(void* entry) const {
1746     _task->increment_refs_reached();
1747     oop const obj = static_cast<oop>(entry);
1748     _task->make_reference_grey(obj);
1749   }
1750 
1751 public:
1752   G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1753     : _task(task), _g1h(g1h) { }
1754 
1755   virtual void do_buffer(void** buffer, size_t size) {
1756     for (size_t i = 0; i < size; ++i) {
1757       do_entry(buffer[i]);
1758     }
1759   }
1760 };
1761 
1762 class G1RemarkThreadsClosure : public ThreadClosure {
1763   G1CMSATBBufferClosure _cm_satb_cl;
1764   G1CMOopClosure _cm_cl;
1765   MarkingCodeBlobClosure _code_cl;
1766   int _thread_parity;
1767 
1768  public:
1769   G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1770     _cm_satb_cl(task, g1h),
1771     _cm_cl(g1h, task),
1772     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1773     _thread_parity(Threads::thread_claim_parity()) {}
1774 
1775   void do_thread(Thread* thread) {
1776     if (thread->is_Java_thread()) {
1777       if (thread->claim_oops_do(true, _thread_parity)) {
1778         JavaThread* jt = (JavaThread*)thread;
1779 
1780         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1781         // however the liveness of oops reachable from nmethods have very complex lifecycles:
1782         // * Alive if on the stack of an executing method
1783         // * Weakly reachable otherwise
1784         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1785         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1786         jt->nmethods_do(&_code_cl);
1787 
1788         G1ThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(&_cm_satb_cl);
1789       }
1790     } else if (thread->is_VM_thread()) {
1791       if (thread->claim_oops_do(true, _thread_parity)) {
1792         G1BarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
1793       }
1794     }
1795   }
1796 };
1797 
1798 class G1CMRemarkTask : public AbstractGangTask {
1799   G1ConcurrentMark* _cm;
1800 public:
1801   void work(uint worker_id) {
1802     G1CMTask* task = _cm->task(worker_id);
1803     task->record_start_time();
1804     {
1805       ResourceMark rm;
1806       HandleMark hm;
1807 
1808       G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1809       Threads::threads_do(&threads_f);
1810     }
1811 
1812     do {
1813       task->do_marking_step(1000000000.0 /* something very large */,
1814                             true         /* do_termination       */,
1815                             false        /* is_serial            */);
1816     } while (task->has_aborted() && !_cm->has_overflown());
1817     // If we overflow, then we do not want to restart. We instead
1818     // want to abort remark and do concurrent marking again.
1819     task->record_end_time();
1820   }
1821 
1822   G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1823     AbstractGangTask("Par Remark"), _cm(cm) {
1824     _cm->terminator()->reset_for_reuse(active_workers);
1825   }
1826 };
1827 
1828 void G1ConcurrentMark::finalize_marking() {
1829   ResourceMark rm;
1830   HandleMark   hm;
1831 
1832   _g1h->ensure_parsability(false);
1833 
1834   // this is remark, so we'll use up all active threads
1835   uint active_workers = _g1h->workers()->active_workers();
1836   set_concurrency_and_phase(active_workers, false /* concurrent */);
1837   // Leave _parallel_marking_threads at it's
1838   // value originally calculated in the G1ConcurrentMark
1839   // constructor and pass values of the active workers
1840   // through the gang in the task.
1841 
1842   {
1843     StrongRootsScope srs(active_workers);
1844 
1845     G1CMRemarkTask remarkTask(this, active_workers);
1846     // We will start all available threads, even if we decide that the
1847     // active_workers will be fewer. The extra ones will just bail out
1848     // immediately.
1849     _g1h->workers()->run_task(&remarkTask);
1850   }
1851 
1852   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1853   guarantee(has_overflown() ||
1854             satb_mq_set.completed_buffers_num() == 0,
1855             "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1856             BOOL_TO_STR(has_overflown()),
1857             satb_mq_set.completed_buffers_num());
1858 
1859   print_stats();
1860 }
1861 
1862 void G1ConcurrentMark::flush_all_task_caches() {
1863   size_t hits = 0;
1864   size_t misses = 0;
1865   for (uint i = 0; i < _max_num_tasks; i++) {
1866     Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
1867     hits += stats.first;
1868     misses += stats.second;
1869   }
1870   size_t sum = hits + misses;
1871   log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
1872                        hits, misses, percent_of(hits, sum));
1873 }
1874 
1875 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
1876   _prev_mark_bitmap->clear_range(mr);
1877 }
1878 
1879 HeapRegion*
1880 G1ConcurrentMark::claim_region(uint worker_id) {
1881   // "checkpoint" the finger
1882   HeapWord* finger = _finger;
1883 
1884   while (finger < _heap.end()) {
1885     assert(_g1h->is_in_g1_reserved(finger), "invariant");
1886 
1887     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1888     // Make sure that the reads below do not float before loading curr_region.
1889     OrderAccess::loadload();
1890     // Above heap_region_containing may return NULL as we always scan claim
1891     // until the end of the heap. In this case, just jump to the next region.
1892     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1893 
1894     // Is the gap between reading the finger and doing the CAS too long?
1895     HeapWord* res = Atomic::cmpxchg(end, &_finger, finger);
1896     if (res == finger && curr_region != NULL) {
1897       // we succeeded
1898       HeapWord*   bottom        = curr_region->bottom();
1899       HeapWord*   limit         = curr_region->next_top_at_mark_start();
1900 
1901       // notice that _finger == end cannot be guaranteed here since,
1902       // someone else might have moved the finger even further
1903       assert(_finger >= end, "the finger should have moved forward");
1904 
1905       if (limit > bottom) {
1906         return curr_region;
1907       } else {
1908         assert(limit == bottom,
1909                "the region limit should be at bottom");
1910         // we return NULL and the caller should try calling
1911         // claim_region() again.
1912         return NULL;
1913       }
1914     } else {
1915       assert(_finger > finger, "the finger should have moved forward");
1916       // read it again
1917       finger = _finger;
1918     }
1919   }
1920 
1921   return NULL;
1922 }
1923 
1924 #ifndef PRODUCT
1925 class VerifyNoCSetOops {
1926   G1CollectedHeap* _g1h;
1927   const char* _phase;
1928   int _info;
1929 
1930 public:
1931   VerifyNoCSetOops(const char* phase, int info = -1) :
1932     _g1h(G1CollectedHeap::heap()),
1933     _phase(phase),
1934     _info(info)
1935   { }
1936 
1937   void operator()(G1TaskQueueEntry task_entry) const {
1938     if (task_entry.is_array_slice()) {
1939       guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1940       return;
1941     }
1942     guarantee(oopDesc::is_oop(task_entry.obj()),
1943               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1944               p2i(task_entry.obj()), _phase, _info);
1945     guarantee(!_g1h->is_in_cset(task_entry.obj()),
1946               "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
1947               p2i(task_entry.obj()), _phase, _info);
1948   }
1949 };
1950 
1951 void G1ConcurrentMark::verify_no_cset_oops() {
1952   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1953   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1954     return;
1955   }
1956 
1957   // Verify entries on the global mark stack
1958   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1959 
1960   // Verify entries on the task queues
1961   for (uint i = 0; i < _max_num_tasks; ++i) {
1962     G1CMTaskQueue* queue = _task_queues->queue(i);
1963     queue->iterate(VerifyNoCSetOops("Queue", i));
1964   }
1965 
1966   // Verify the global finger
1967   HeapWord* global_finger = finger();
1968   if (global_finger != NULL && global_finger < _heap.end()) {
1969     // Since we always iterate over all regions, we might get a NULL HeapRegion
1970     // here.
1971     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
1972     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
1973               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
1974               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
1975   }
1976 
1977   // Verify the task fingers
1978   assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1979   for (uint i = 0; i < _num_concurrent_workers; ++i) {
1980     G1CMTask* task = _tasks[i];
1981     HeapWord* task_finger = task->finger();
1982     if (task_finger != NULL && task_finger < _heap.end()) {
1983       // See above note on the global finger verification.
1984       HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
1985       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
1986                 !task_hr->in_collection_set(),
1987                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
1988                 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
1989     }
1990   }
1991 }
1992 #endif // PRODUCT
1993 
1994 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
1995   _g1h->g1_rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
1996 }
1997 
1998 void G1ConcurrentMark::print_stats() {
1999   if (!log_is_enabled(Debug, gc, stats)) {
2000     return;
2001   }
2002   log_debug(gc, stats)("---------------------------------------------------------------------");
2003   for (size_t i = 0; i < _num_active_tasks; ++i) {
2004     _tasks[i]->print_stats();
2005     log_debug(gc, stats)("---------------------------------------------------------------------");
2006   }
2007 }
2008 
2009 void G1ConcurrentMark::concurrent_cycle_abort() {
2010   if (!cm_thread()->during_cycle() || _has_aborted) {
2011     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2012     return;
2013   }
2014 
2015   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2016   // concurrent bitmap clearing.
2017   {
2018     GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
2019     clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
2020   }
2021   // Note we cannot clear the previous marking bitmap here
2022   // since VerifyDuringGC verifies the objects marked during
2023   // a full GC against the previous bitmap.
2024 
2025   // Empty mark stack
2026   reset_marking_for_restart();
2027   for (uint i = 0; i < _max_num_tasks; ++i) {
2028     _tasks[i]->clear_region_fields();
2029   }
2030   _first_overflow_barrier_sync.abort();
2031   _second_overflow_barrier_sync.abort();
2032   _has_aborted = true;
2033 
2034   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2035   satb_mq_set.abandon_partial_marking();
2036   // This can be called either during or outside marking, we'll read
2037   // the expected_active value from the SATB queue set.
2038   satb_mq_set.set_active_all_threads(
2039                                  false, /* new active value */
2040                                  satb_mq_set.is_active() /* expected_active */);
2041 }
2042 
2043 static void print_ms_time_info(const char* prefix, const char* name,
2044                                NumberSeq& ns) {
2045   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2046                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2047   if (ns.num() > 0) {
2048     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2049                            prefix, ns.sd(), ns.maximum());
2050   }
2051 }
2052 
2053 void G1ConcurrentMark::print_summary_info() {
2054   Log(gc, marking) log;
2055   if (!log.is_trace()) {
2056     return;
2057   }
2058 
2059   log.trace(" Concurrent marking:");
2060   print_ms_time_info("  ", "init marks", _init_times);
2061   print_ms_time_info("  ", "remarks", _remark_times);
2062   {
2063     print_ms_time_info("     ", "final marks", _remark_mark_times);
2064     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2065 
2066   }
2067   print_ms_time_info("  ", "cleanups", _cleanup_times);
2068   log.trace("    Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2069             _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2070   log.trace("  Total stop_world time = %8.2f s.",
2071             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2072   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
2073             cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
2074 }
2075 
2076 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2077   _concurrent_workers->print_worker_threads_on(st);
2078 }
2079 
2080 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2081   _concurrent_workers->threads_do(tc);
2082 }
2083 
2084 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2085   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2086                p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
2087   _prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
2088   _next_mark_bitmap->print_on_error(st, " Next Bits: ");
2089 }
2090 
2091 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2092   ReferenceProcessor* result = g1h->ref_processor_cm();
2093   assert(result != NULL, "CM reference processor should not be NULL");
2094   return result;
2095 }
2096 
2097 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2098                                G1CMTask* task)
2099   : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)),
2100     _g1h(g1h), _task(task)
2101 { }
2102 
2103 void G1CMTask::setup_for_region(HeapRegion* hr) {
2104   assert(hr != NULL,
2105         "claim_region() should have filtered out NULL regions");
2106   _curr_region  = hr;
2107   _finger       = hr->bottom();
2108   update_region_limit();
2109 }
2110 
2111 void G1CMTask::update_region_limit() {
2112   HeapRegion* hr            = _curr_region;
2113   HeapWord* bottom          = hr->bottom();
2114   HeapWord* limit           = hr->next_top_at_mark_start();
2115 
2116   if (limit == bottom) {
2117     // The region was collected underneath our feet.
2118     // We set the finger to bottom to ensure that the bitmap
2119     // iteration that will follow this will not do anything.
2120     // (this is not a condition that holds when we set the region up,
2121     // as the region is not supposed to be empty in the first place)
2122     _finger = bottom;
2123   } else if (limit >= _region_limit) {
2124     assert(limit >= _finger, "peace of mind");
2125   } else {
2126     assert(limit < _region_limit, "only way to get here");
2127     // This can happen under some pretty unusual circumstances.  An
2128     // evacuation pause empties the region underneath our feet (NTAMS
2129     // at bottom). We then do some allocation in the region (NTAMS
2130     // stays at bottom), followed by the region being used as a GC
2131     // alloc region (NTAMS will move to top() and the objects
2132     // originally below it will be grayed). All objects now marked in
2133     // the region are explicitly grayed, if below the global finger,
2134     // and we do not need in fact to scan anything else. So, we simply
2135     // set _finger to be limit to ensure that the bitmap iteration
2136     // doesn't do anything.
2137     _finger = limit;
2138   }
2139 
2140   _region_limit = limit;
2141 }
2142 
2143 void G1CMTask::giveup_current_region() {
2144   assert(_curr_region != NULL, "invariant");
2145   clear_region_fields();
2146 }
2147 
2148 void G1CMTask::clear_region_fields() {
2149   // Values for these three fields that indicate that we're not
2150   // holding on to a region.
2151   _curr_region   = NULL;
2152   _finger        = NULL;
2153   _region_limit  = NULL;
2154 }
2155 
2156 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2157   if (cm_oop_closure == NULL) {
2158     assert(_cm_oop_closure != NULL, "invariant");
2159   } else {
2160     assert(_cm_oop_closure == NULL, "invariant");
2161   }
2162   _cm_oop_closure = cm_oop_closure;
2163 }
2164 
2165 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
2166   guarantee(next_mark_bitmap != NULL, "invariant");
2167   _next_mark_bitmap              = next_mark_bitmap;
2168   clear_region_fields();
2169 
2170   _calls                         = 0;
2171   _elapsed_time_ms               = 0.0;
2172   _termination_time_ms           = 0.0;
2173   _termination_start_time_ms     = 0.0;
2174 
2175   _mark_stats_cache.reset();
2176 }
2177 
2178 bool G1CMTask::should_exit_termination() {
2179   if (!regular_clock_call()) {
2180     return true;
2181   }
2182 
2183   // This is called when we are in the termination protocol. We should
2184   // quit if, for some reason, this task wants to abort or the global
2185   // stack is not empty (this means that we can get work from it).
2186   return !_cm->mark_stack_empty() || has_aborted();
2187 }
2188 
2189 void G1CMTask::reached_limit() {
2190   assert(_words_scanned >= _words_scanned_limit ||
2191          _refs_reached >= _refs_reached_limit ,
2192          "shouldn't have been called otherwise");
2193   abort_marking_if_regular_check_fail();
2194 }
2195 
2196 bool G1CMTask::regular_clock_call() {
2197   if (has_aborted()) {
2198     return false;
2199   }
2200 
2201   // First, we need to recalculate the words scanned and refs reached
2202   // limits for the next clock call.
2203   recalculate_limits();
2204 
2205   // During the regular clock call we do the following
2206 
2207   // (1) If an overflow has been flagged, then we abort.
2208   if (_cm->has_overflown()) {
2209     return false;
2210   }
2211 
2212   // If we are not concurrent (i.e. we're doing remark) we don't need
2213   // to check anything else. The other steps are only needed during
2214   // the concurrent marking phase.
2215   if (!_cm->concurrent()) {
2216     return true;
2217   }
2218 
2219   // (2) If marking has been aborted for Full GC, then we also abort.
2220   if (_cm->has_aborted()) {
2221     return false;
2222   }
2223 
2224   double curr_time_ms = os::elapsedVTime() * 1000.0;
2225 
2226   // (4) We check whether we should yield. If we have to, then we abort.
2227   if (SuspendibleThreadSet::should_yield()) {
2228     // We should yield. To do this we abort the task. The caller is
2229     // responsible for yielding.
2230     return false;
2231   }
2232 
2233   // (5) We check whether we've reached our time quota. If we have,
2234   // then we abort.
2235   double elapsed_time_ms = curr_time_ms - _start_time_ms;
2236   if (elapsed_time_ms > _time_target_ms) {
2237     _has_timed_out = true;
2238     return false;
2239   }
2240 
2241   // (6) Finally, we check whether there are enough completed STAB
2242   // buffers available for processing. If there are, we abort.
2243   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2244   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2245     // we do need to process SATB buffers, we'll abort and restart
2246     // the marking task to do so
2247     return false;
2248   }
2249   return true;
2250 }
2251 
2252 void G1CMTask::recalculate_limits() {
2253   _real_words_scanned_limit = _words_scanned + words_scanned_period;
2254   _words_scanned_limit      = _real_words_scanned_limit;
2255 
2256   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
2257   _refs_reached_limit       = _real_refs_reached_limit;
2258 }
2259 
2260 void G1CMTask::decrease_limits() {
2261   // This is called when we believe that we're going to do an infrequent
2262   // operation which will increase the per byte scanned cost (i.e. move
2263   // entries to/from the global stack). It basically tries to decrease the
2264   // scanning limit so that the clock is called earlier.
2265 
2266   _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
2267   _refs_reached_limit  = _real_refs_reached_limit - 3 * refs_reached_period / 4;
2268 }
2269 
2270 void G1CMTask::move_entries_to_global_stack() {
2271   // Local array where we'll store the entries that will be popped
2272   // from the local queue.
2273   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2274 
2275   size_t n = 0;
2276   G1TaskQueueEntry task_entry;
2277   while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) {
2278     buffer[n] = task_entry;
2279     ++n;
2280   }
2281   if (n < G1CMMarkStack::EntriesPerChunk) {
2282     buffer[n] = G1TaskQueueEntry();
2283   }
2284 
2285   if (n > 0) {
2286     if (!_cm->mark_stack_push(buffer)) {
2287       set_has_aborted();
2288     }
2289   }
2290 
2291   // This operation was quite expensive, so decrease the limits.
2292   decrease_limits();
2293 }
2294 
2295 bool G1CMTask::get_entries_from_global_stack() {
2296   // Local array where we'll store the entries that will be popped
2297   // from the global stack.
2298   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2299 
2300   if (!_cm->mark_stack_pop(buffer)) {
2301     return false;
2302   }
2303 
2304   // We did actually pop at least one entry.
2305   for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) {
2306     G1TaskQueueEntry task_entry = buffer[i];
2307     if (task_entry.is_null()) {
2308       break;
2309     }
2310     assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
2311     bool success = _task_queue->push(task_entry);
2312     // We only call this when the local queue is empty or under a
2313     // given target limit. So, we do not expect this push to fail.
2314     assert(success, "invariant");
2315   }
2316 
2317   // This operation was quite expensive, so decrease the limits
2318   decrease_limits();
2319   return true;
2320 }
2321 
2322 void G1CMTask::drain_local_queue(bool partially) {
2323   if (has_aborted()) {
2324     return;
2325   }
2326 
2327   // Decide what the target size is, depending whether we're going to
2328   // drain it partially (so that other tasks can steal if they run out
2329   // of things to do) or totally (at the very end).
2330   size_t target_size;
2331   if (partially) {
2332     target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize);
2333   } else {
2334     target_size = 0;
2335   }
2336 
2337   if (_task_queue->size() > target_size) {
2338     G1TaskQueueEntry entry;
2339     bool ret = _task_queue->pop_local(entry);
2340     while (ret) {
2341       scan_task_entry(entry);
2342       if (_task_queue->size() <= target_size || has_aborted()) {
2343         ret = false;
2344       } else {
2345         ret = _task_queue->pop_local(entry);
2346       }
2347     }
2348   }
2349 }
2350 
2351 void G1CMTask::drain_global_stack(bool partially) {
2352   if (has_aborted()) {
2353     return;
2354   }
2355 
2356   // We have a policy to drain the local queue before we attempt to
2357   // drain the global stack.
2358   assert(partially || _task_queue->size() == 0, "invariant");
2359 
2360   // Decide what the target size is, depending whether we're going to
2361   // drain it partially (so that other tasks can steal if they run out
2362   // of things to do) or totally (at the very end).
2363   // Notice that when draining the global mark stack partially, due to the racyness
2364   // of the mark stack size update we might in fact drop below the target. But,
2365   // this is not a problem.
2366   // In case of total draining, we simply process until the global mark stack is
2367   // totally empty, disregarding the size counter.
2368   if (partially) {
2369     size_t const target_size = _cm->partial_mark_stack_size_target();
2370     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2371       if (get_entries_from_global_stack()) {
2372         drain_local_queue(partially);
2373       }
2374     }
2375   } else {
2376     while (!has_aborted() && get_entries_from_global_stack()) {
2377       drain_local_queue(partially);
2378     }
2379   }
2380 }
2381 
2382 // SATB Queue has several assumptions on whether to call the par or
2383 // non-par versions of the methods. this is why some of the code is
2384 // replicated. We should really get rid of the single-threaded version
2385 // of the code to simplify things.
2386 void G1CMTask::drain_satb_buffers() {
2387   if (has_aborted()) {
2388     return;
2389   }
2390 
2391   // We set this so that the regular clock knows that we're in the
2392   // middle of draining buffers and doesn't set the abort flag when it
2393   // notices that SATB buffers are available for draining. It'd be
2394   // very counter productive if it did that. :-)
2395   _draining_satb_buffers = true;
2396 
2397   G1CMSATBBufferClosure satb_cl(this, _g1h);
2398   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2399 
2400   // This keeps claiming and applying the closure to completed buffers
2401   // until we run out of buffers or we need to abort.
2402   while (!has_aborted() &&
2403          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2404     abort_marking_if_regular_check_fail();
2405   }
2406 
2407   _draining_satb_buffers = false;
2408 
2409   assert(has_aborted() ||
2410          _cm->concurrent() ||
2411          satb_mq_set.completed_buffers_num() == 0, "invariant");
2412 
2413   // again, this was a potentially expensive operation, decrease the
2414   // limits to get the regular clock call early
2415   decrease_limits();
2416 }
2417 
2418 void G1CMTask::clear_mark_stats_cache(uint region_idx) {
2419   _mark_stats_cache.reset(region_idx);
2420 }
2421 
2422 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
2423   return _mark_stats_cache.evict_all();
2424 }
2425 
2426 void G1CMTask::print_stats() {
2427   log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls);
2428   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2429                        _elapsed_time_ms, _termination_time_ms);
2430   log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms",
2431                        _step_times_ms.num(),
2432                        _step_times_ms.avg(),
2433                        _step_times_ms.sd(),
2434                        _step_times_ms.maximum(),
2435                        _step_times_ms.sum());
2436   size_t const hits = _mark_stats_cache.hits();
2437   size_t const misses = _mark_stats_cache.misses();
2438   log_debug(gc, stats)("  Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f",
2439                        hits, misses, percent_of(hits, hits + misses));
2440 }
2441 
2442 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) {
2443   return _task_queues->steal(worker_id, task_entry);
2444 }
2445 
2446 /*****************************************************************************
2447 
2448     The do_marking_step(time_target_ms, ...) method is the building
2449     block of the parallel marking framework. It can be called in parallel
2450     with other invocations of do_marking_step() on different tasks
2451     (but only one per task, obviously) and concurrently with the
2452     mutator threads, or during remark, hence it eliminates the need
2453     for two versions of the code. When called during remark, it will
2454     pick up from where the task left off during the concurrent marking
2455     phase. Interestingly, tasks are also claimable during evacuation
2456     pauses too, since do_marking_step() ensures that it aborts before
2457     it needs to yield.
2458 
2459     The data structures that it uses to do marking work are the
2460     following:
2461 
2462       (1) Marking Bitmap. If there are gray objects that appear only
2463       on the bitmap (this happens either when dealing with an overflow
2464       or when the initial marking phase has simply marked the roots
2465       and didn't push them on the stack), then tasks claim heap
2466       regions whose bitmap they then scan to find gray objects. A
2467       global finger indicates where the end of the last claimed region
2468       is. A local finger indicates how far into the region a task has
2469       scanned. The two fingers are used to determine how to gray an
2470       object (i.e. whether simply marking it is OK, as it will be
2471       visited by a task in the future, or whether it needs to be also
2472       pushed on a stack).
2473 
2474       (2) Local Queue. The local queue of the task which is accessed
2475       reasonably efficiently by the task. Other tasks can steal from
2476       it when they run out of work. Throughout the marking phase, a
2477       task attempts to keep its local queue short but not totally
2478       empty, so that entries are available for stealing by other
2479       tasks. Only when there is no more work, a task will totally
2480       drain its local queue.
2481 
2482       (3) Global Mark Stack. This handles local queue overflow. During
2483       marking only sets of entries are moved between it and the local
2484       queues, as access to it requires a mutex and more fine-grain
2485       interaction with it which might cause contention. If it
2486       overflows, then the marking phase should restart and iterate
2487       over the bitmap to identify gray objects. Throughout the marking
2488       phase, tasks attempt to keep the global mark stack at a small
2489       length but not totally empty, so that entries are available for
2490       popping by other tasks. Only when there is no more work, tasks
2491       will totally drain the global mark stack.
2492 
2493       (4) SATB Buffer Queue. This is where completed SATB buffers are
2494       made available. Buffers are regularly removed from this queue
2495       and scanned for roots, so that the queue doesn't get too
2496       long. During remark, all completed buffers are processed, as
2497       well as the filled in parts of any uncompleted buffers.
2498 
2499     The do_marking_step() method tries to abort when the time target
2500     has been reached. There are a few other cases when the
2501     do_marking_step() method also aborts:
2502 
2503       (1) When the marking phase has been aborted (after a Full GC).
2504 
2505       (2) When a global overflow (on the global stack) has been
2506       triggered. Before the task aborts, it will actually sync up with
2507       the other tasks to ensure that all the marking data structures
2508       (local queues, stacks, fingers etc.)  are re-initialized so that
2509       when do_marking_step() completes, the marking phase can
2510       immediately restart.
2511 
2512       (3) When enough completed SATB buffers are available. The
2513       do_marking_step() method only tries to drain SATB buffers right
2514       at the beginning. So, if enough buffers are available, the
2515       marking step aborts and the SATB buffers are processed at
2516       the beginning of the next invocation.
2517 
2518       (4) To yield. when we have to yield then we abort and yield
2519       right at the end of do_marking_step(). This saves us from a lot
2520       of hassle as, by yielding we might allow a Full GC. If this
2521       happens then objects will be compacted underneath our feet, the
2522       heap might shrink, etc. We save checking for this by just
2523       aborting and doing the yield right at the end.
2524 
2525     From the above it follows that the do_marking_step() method should
2526     be called in a loop (or, otherwise, regularly) until it completes.
2527 
2528     If a marking step completes without its has_aborted() flag being
2529     true, it means it has completed the current marking phase (and
2530     also all other marking tasks have done so and have all synced up).
2531 
2532     A method called regular_clock_call() is invoked "regularly" (in
2533     sub ms intervals) throughout marking. It is this clock method that
2534     checks all the abort conditions which were mentioned above and
2535     decides when the task should abort. A work-based scheme is used to
2536     trigger this clock method: when the number of object words the
2537     marking phase has scanned or the number of references the marking
2538     phase has visited reach a given limit. Additional invocations to
2539     the method clock have been planted in a few other strategic places
2540     too. The initial reason for the clock method was to avoid calling
2541     vtime too regularly, as it is quite expensive. So, once it was in
2542     place, it was natural to piggy-back all the other conditions on it
2543     too and not constantly check them throughout the code.
2544 
2545     If do_termination is true then do_marking_step will enter its
2546     termination protocol.
2547 
2548     The value of is_serial must be true when do_marking_step is being
2549     called serially (i.e. by the VMThread) and do_marking_step should
2550     skip any synchronization in the termination and overflow code.
2551     Examples include the serial remark code and the serial reference
2552     processing closures.
2553 
2554     The value of is_serial must be false when do_marking_step is
2555     being called by any of the worker threads in a work gang.
2556     Examples include the concurrent marking code (CMMarkingTask),
2557     the MT remark code, and the MT reference processing closures.
2558 
2559  *****************************************************************************/
2560 
2561 void G1CMTask::do_marking_step(double time_target_ms,
2562                                bool do_termination,
2563                                bool is_serial) {
2564   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2565 
2566   _start_time_ms = os::elapsedVTime() * 1000.0;
2567 
2568   // If do_stealing is true then do_marking_step will attempt to
2569   // steal work from the other G1CMTasks. It only makes sense to
2570   // enable stealing when the termination protocol is enabled
2571   // and do_marking_step() is not being called serially.
2572   bool do_stealing = do_termination && !is_serial;
2573 
2574   double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
2575   _time_target_ms = time_target_ms - diff_prediction_ms;
2576 
2577   // set up the variables that are used in the work-based scheme to
2578   // call the regular clock method
2579   _words_scanned = 0;
2580   _refs_reached  = 0;
2581   recalculate_limits();
2582 
2583   // clear all flags
2584   clear_has_aborted();
2585   _has_timed_out = false;
2586   _draining_satb_buffers = false;
2587 
2588   ++_calls;
2589 
2590   // Set up the bitmap and oop closures. Anything that uses them is
2591   // eventually called from this method, so it is OK to allocate these
2592   // statically.
2593   G1CMBitMapClosure bitmap_closure(this, _cm);
2594   G1CMOopClosure cm_oop_closure(_g1h, this);
2595   set_cm_oop_closure(&cm_oop_closure);
2596 
2597   if (_cm->has_overflown()) {
2598     // This can happen if the mark stack overflows during a GC pause
2599     // and this task, after a yield point, restarts. We have to abort
2600     // as we need to get into the overflow protocol which happens
2601     // right at the end of this task.
2602     set_has_aborted();
2603   }
2604 
2605   // First drain any available SATB buffers. After this, we will not
2606   // look at SATB buffers before the next invocation of this method.
2607   // If enough completed SATB buffers are queued up, the regular clock
2608   // will abort this task so that it restarts.
2609   drain_satb_buffers();
2610   // ...then partially drain the local queue and the global stack
2611   drain_local_queue(true);
2612   drain_global_stack(true);
2613 
2614   do {
2615     if (!has_aborted() && _curr_region != NULL) {
2616       // This means that we're already holding on to a region.
2617       assert(_finger != NULL, "if region is not NULL, then the finger "
2618              "should not be NULL either");
2619 
2620       // We might have restarted this task after an evacuation pause
2621       // which might have evacuated the region we're holding on to
2622       // underneath our feet. Let's read its limit again to make sure
2623       // that we do not iterate over a region of the heap that
2624       // contains garbage (update_region_limit() will also move
2625       // _finger to the start of the region if it is found empty).
2626       update_region_limit();
2627       // We will start from _finger not from the start of the region,
2628       // as we might be restarting this task after aborting half-way
2629       // through scanning this region. In this case, _finger points to
2630       // the address where we last found a marked object. If this is a
2631       // fresh region, _finger points to start().
2632       MemRegion mr = MemRegion(_finger, _region_limit);
2633 
2634       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2635              "humongous regions should go around loop once only");
2636 
2637       // Some special cases:
2638       // If the memory region is empty, we can just give up the region.
2639       // If the current region is humongous then we only need to check
2640       // the bitmap for the bit associated with the start of the object,
2641       // scan the object if it's live, and give up the region.
2642       // Otherwise, let's iterate over the bitmap of the part of the region
2643       // that is left.
2644       // If the iteration is successful, give up the region.
2645       if (mr.is_empty()) {
2646         giveup_current_region();
2647         abort_marking_if_regular_check_fail();
2648       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2649         if (_next_mark_bitmap->is_marked(mr.start())) {
2650           // The object is marked - apply the closure
2651           bitmap_closure.do_addr(mr.start());
2652         }
2653         // Even if this task aborted while scanning the humongous object
2654         // we can (and should) give up the current region.
2655         giveup_current_region();
2656         abort_marking_if_regular_check_fail();
2657       } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
2658         giveup_current_region();
2659         abort_marking_if_regular_check_fail();
2660       } else {
2661         assert(has_aborted(), "currently the only way to do so");
2662         // The only way to abort the bitmap iteration is to return
2663         // false from the do_bit() method. However, inside the
2664         // do_bit() method we move the _finger to point to the
2665         // object currently being looked at. So, if we bail out, we
2666         // have definitely set _finger to something non-null.
2667         assert(_finger != NULL, "invariant");
2668 
2669         // Region iteration was actually aborted. So now _finger
2670         // points to the address of the object we last scanned. If we
2671         // leave it there, when we restart this task, we will rescan
2672         // the object. It is easy to avoid this. We move the finger by
2673         // enough to point to the next possible object header.
2674         assert(_finger < _region_limit, "invariant");
2675         HeapWord* const new_finger = _finger + ((oop)_finger)->size();
2676         // Check if bitmap iteration was aborted while scanning the last object
2677         if (new_finger >= _region_limit) {
2678           giveup_current_region();
2679         } else {
2680           move_finger_to(new_finger);
2681         }
2682       }
2683     }
2684     // At this point we have either completed iterating over the
2685     // region we were holding on to, or we have aborted.
2686 
2687     // We then partially drain the local queue and the global stack.
2688     // (Do we really need this?)
2689     drain_local_queue(true);
2690     drain_global_stack(true);
2691 
2692     // Read the note on the claim_region() method on why it might
2693     // return NULL with potentially more regions available for
2694     // claiming and why we have to check out_of_regions() to determine
2695     // whether we're done or not.
2696     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2697       // We are going to try to claim a new region. We should have
2698       // given up on the previous one.
2699       // Separated the asserts so that we know which one fires.
2700       assert(_curr_region  == NULL, "invariant");
2701       assert(_finger       == NULL, "invariant");
2702       assert(_region_limit == NULL, "invariant");
2703       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2704       if (claimed_region != NULL) {
2705         // Yes, we managed to claim one
2706         setup_for_region(claimed_region);
2707         assert(_curr_region == claimed_region, "invariant");
2708       }
2709       // It is important to call the regular clock here. It might take
2710       // a while to claim a region if, for example, we hit a large
2711       // block of empty regions. So we need to call the regular clock
2712       // method once round the loop to make sure it's called
2713       // frequently enough.
2714       abort_marking_if_regular_check_fail();
2715     }
2716 
2717     if (!has_aborted() && _curr_region == NULL) {
2718       assert(_cm->out_of_regions(),
2719              "at this point we should be out of regions");
2720     }
2721   } while ( _curr_region != NULL && !has_aborted());
2722 
2723   if (!has_aborted()) {
2724     // We cannot check whether the global stack is empty, since other
2725     // tasks might be pushing objects to it concurrently.
2726     assert(_cm->out_of_regions(),
2727            "at this point we should be out of regions");
2728     // Try to reduce the number of available SATB buffers so that
2729     // remark has less work to do.
2730     drain_satb_buffers();
2731   }
2732 
2733   // Since we've done everything else, we can now totally drain the
2734   // local queue and global stack.
2735   drain_local_queue(false);
2736   drain_global_stack(false);
2737 
2738   // Attempt at work stealing from other task's queues.
2739   if (do_stealing && !has_aborted()) {
2740     // We have not aborted. This means that we have finished all that
2741     // we could. Let's try to do some stealing...
2742 
2743     // We cannot check whether the global stack is empty, since other
2744     // tasks might be pushing objects to it concurrently.
2745     assert(_cm->out_of_regions() && _task_queue->size() == 0,
2746            "only way to reach here");
2747     while (!has_aborted()) {
2748       G1TaskQueueEntry entry;
2749       if (_cm->try_stealing(_worker_id, entry)) {
2750         scan_task_entry(entry);
2751 
2752         // And since we're towards the end, let's totally drain the
2753         // local queue and global stack.
2754         drain_local_queue(false);
2755         drain_global_stack(false);
2756       } else {
2757         break;
2758       }
2759     }
2760   }
2761 
2762   // We still haven't aborted. Now, let's try to get into the
2763   // termination protocol.
2764   if (do_termination && !has_aborted()) {
2765     // We cannot check whether the global stack is empty, since other
2766     // tasks might be concurrently pushing objects on it.
2767     // Separated the asserts so that we know which one fires.
2768     assert(_cm->out_of_regions(), "only way to reach here");
2769     assert(_task_queue->size() == 0, "only way to reach here");
2770     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2771 
2772     // The G1CMTask class also extends the TerminatorTerminator class,
2773     // hence its should_exit_termination() method will also decide
2774     // whether to exit the termination protocol or not.
2775     bool finished = (is_serial ||
2776                      _cm->terminator()->offer_termination(this));
2777     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2778     _termination_time_ms +=
2779       termination_end_time_ms - _termination_start_time_ms;
2780 
2781     if (finished) {
2782       // We're all done.
2783 
2784       // We can now guarantee that the global stack is empty, since
2785       // all other tasks have finished. We separated the guarantees so
2786       // that, if a condition is false, we can immediately find out
2787       // which one.
2788       guarantee(_cm->out_of_regions(), "only way to reach here");
2789       guarantee(_cm->mark_stack_empty(), "only way to reach here");
2790       guarantee(_task_queue->size() == 0, "only way to reach here");
2791       guarantee(!_cm->has_overflown(), "only way to reach here");
2792       guarantee(!has_aborted(), "should never happen if termination has completed");
2793     } else {
2794       // Apparently there's more work to do. Let's abort this task. It
2795       // will restart it and we can hopefully find more things to do.
2796       set_has_aborted();
2797     }
2798   }
2799 
2800   // Mainly for debugging purposes to make sure that a pointer to the
2801   // closure which was statically allocated in this frame doesn't
2802   // escape it by accident.
2803   set_cm_oop_closure(NULL);
2804   double end_time_ms = os::elapsedVTime() * 1000.0;
2805   double elapsed_time_ms = end_time_ms - _start_time_ms;
2806   // Update the step history.
2807   _step_times_ms.add(elapsed_time_ms);
2808 
2809   if (has_aborted()) {
2810     // The task was aborted for some reason.
2811     if (_has_timed_out) {
2812       double diff_ms = elapsed_time_ms - _time_target_ms;
2813       // Keep statistics of how well we did with respect to hitting
2814       // our target only if we actually timed out (if we aborted for
2815       // other reasons, then the results might get skewed).
2816       _marking_step_diffs_ms.add(diff_ms);
2817     }
2818 
2819     if (_cm->has_overflown()) {
2820       // This is the interesting one. We aborted because a global
2821       // overflow was raised. This means we have to restart the
2822       // marking phase and start iterating over regions. However, in
2823       // order to do this we have to make sure that all tasks stop
2824       // what they are doing and re-initialize in a safe manner. We
2825       // will achieve this with the use of two barrier sync points.
2826 
2827       if (!is_serial) {
2828         // We only need to enter the sync barrier if being called
2829         // from a parallel context
2830         _cm->enter_first_sync_barrier(_worker_id);
2831 
2832         // When we exit this sync barrier we know that all tasks have
2833         // stopped doing marking work. So, it's now safe to
2834         // re-initialize our data structures.
2835       }
2836 
2837       clear_region_fields();
2838       flush_mark_stats_cache();
2839 
2840       if (!is_serial) {
2841         // If we're executing the concurrent phase of marking, reset the marking
2842         // state; otherwise the marking state is reset after reference processing,
2843         // during the remark pause.
2844         // If we reset here as a result of an overflow during the remark we will
2845         // see assertion failures from any subsequent set_concurrency_and_phase()
2846         // calls.
2847         if (_cm->concurrent() && _worker_id == 0) {
2848           // Worker 0 is responsible for clearing the global data structures because
2849           // of an overflow. During STW we should not clear the overflow flag (in
2850           // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit
2851           // method to abort the pause and restart concurrent marking.
2852           _cm->reset_marking_for_restart();
2853 
2854           log_info(gc, marking)("Concurrent Mark reset for overflow");
2855         }
2856 
2857         // ...and enter the second barrier.
2858         _cm->enter_second_sync_barrier(_worker_id);
2859       }
2860       // At this point, if we're during the concurrent phase of
2861       // marking, everything has been re-initialized and we're
2862       // ready to restart.
2863     }
2864   }
2865 }
2866 
2867 G1CMTask::G1CMTask(uint worker_id,
2868                    G1ConcurrentMark* cm,
2869                    G1CMTaskQueue* task_queue,
2870                    G1RegionMarkStats* mark_stats,
2871                    uint max_regions) :
2872   _objArray_processor(this),
2873   _worker_id(worker_id),
2874   _g1h(G1CollectedHeap::heap()),
2875   _cm(cm),
2876   _next_mark_bitmap(NULL),
2877   _task_queue(task_queue),
2878   _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize),
2879   _calls(0),
2880   _time_target_ms(0.0),
2881   _start_time_ms(0.0),
2882   _cm_oop_closure(NULL),
2883   _curr_region(NULL),
2884   _finger(NULL),
2885   _region_limit(NULL),
2886   _words_scanned(0),
2887   _words_scanned_limit(0),
2888   _real_words_scanned_limit(0),
2889   _refs_reached(0),
2890   _refs_reached_limit(0),
2891   _real_refs_reached_limit(0),
2892   _has_aborted(false),
2893   _has_timed_out(false),
2894   _draining_satb_buffers(false),
2895   _step_times_ms(),
2896   _elapsed_time_ms(0.0),
2897   _termination_time_ms(0.0),
2898   _termination_start_time_ms(0.0),
2899   _marking_step_diffs_ms()
2900 {
2901   guarantee(task_queue != NULL, "invariant");
2902 
2903   _marking_step_diffs_ms.add(0.5);
2904 }
2905 
2906 // These are formatting macros that are used below to ensure
2907 // consistent formatting. The *_H_* versions are used to format the
2908 // header for a particular value and they should be kept consistent
2909 // with the corresponding macro. Also note that most of the macros add
2910 // the necessary white space (as a prefix) which makes them a bit
2911 // easier to compose.
2912 
2913 // All the output lines are prefixed with this string to be able to
2914 // identify them easily in a large log file.
2915 #define G1PPRL_LINE_PREFIX            "###"
2916 
2917 #define G1PPRL_ADDR_BASE_FORMAT    " " PTR_FORMAT "-" PTR_FORMAT
2918 #ifdef _LP64
2919 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
2920 #else // _LP64
2921 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
2922 #endif // _LP64
2923 
2924 // For per-region info
2925 #define G1PPRL_TYPE_FORMAT            "   %-4s"
2926 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
2927 #define G1PPRL_STATE_FORMAT           "   %-5s"
2928 #define G1PPRL_STATE_H_FORMAT         "   %5s"
2929 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
2930 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
2931 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
2932 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
2933 
2934 // For summary info
2935 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
2936 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
2937 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
2938 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
2939 
2940 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) :
2941   _total_used_bytes(0), _total_capacity_bytes(0),
2942   _total_prev_live_bytes(0), _total_next_live_bytes(0),
2943   _total_remset_bytes(0), _total_strong_code_roots_bytes(0)
2944 {
2945   if (!log_is_enabled(Trace, gc, liveness)) {
2946     return;
2947   }
2948 
2949   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2950   MemRegion g1_reserved = g1h->g1_reserved();
2951   double now = os::elapsedTime();
2952 
2953   // Print the header of the output.
2954   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
2955   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
2956                           G1PPRL_SUM_ADDR_FORMAT("reserved")
2957                           G1PPRL_SUM_BYTE_FORMAT("region-size"),
2958                           p2i(g1_reserved.start()), p2i(g1_reserved.end()),
2959                           HeapRegion::GrainBytes);
2960   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
2961   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2962                           G1PPRL_TYPE_H_FORMAT
2963                           G1PPRL_ADDR_BASE_H_FORMAT
2964                           G1PPRL_BYTE_H_FORMAT
2965                           G1PPRL_BYTE_H_FORMAT
2966                           G1PPRL_BYTE_H_FORMAT
2967                           G1PPRL_DOUBLE_H_FORMAT
2968                           G1PPRL_BYTE_H_FORMAT
2969                           G1PPRL_STATE_H_FORMAT
2970                           G1PPRL_BYTE_H_FORMAT,
2971                           "type", "address-range",
2972                           "used", "prev-live", "next-live", "gc-eff",
2973                           "remset", "state", "code-roots");
2974   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2975                           G1PPRL_TYPE_H_FORMAT
2976                           G1PPRL_ADDR_BASE_H_FORMAT
2977                           G1PPRL_BYTE_H_FORMAT
2978                           G1PPRL_BYTE_H_FORMAT
2979                           G1PPRL_BYTE_H_FORMAT
2980                           G1PPRL_DOUBLE_H_FORMAT
2981                           G1PPRL_BYTE_H_FORMAT
2982                           G1PPRL_STATE_H_FORMAT
2983                           G1PPRL_BYTE_H_FORMAT,
2984                           "", "",
2985                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
2986                           "(bytes)", "", "(bytes)");
2987 }
2988 
2989 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
2990   if (!log_is_enabled(Trace, gc, liveness)) {
2991     return false;
2992   }
2993 
2994   const char* type       = r->get_type_str();
2995   HeapWord* bottom       = r->bottom();
2996   HeapWord* end          = r->end();
2997   size_t capacity_bytes  = r->capacity();
2998   size_t used_bytes      = r->used();
2999   size_t prev_live_bytes = r->live_bytes();
3000   size_t next_live_bytes = r->next_live_bytes();
3001   double gc_eff          = r->gc_efficiency();
3002   size_t remset_bytes    = r->rem_set()->mem_size();
3003   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3004   const char* remset_type = r->rem_set()->get_short_state_str();
3005 
3006   _total_used_bytes      += used_bytes;
3007   _total_capacity_bytes  += capacity_bytes;
3008   _total_prev_live_bytes += prev_live_bytes;
3009   _total_next_live_bytes += next_live_bytes;
3010   _total_remset_bytes    += remset_bytes;
3011   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3012 
3013   // Print a line for this particular region.
3014   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3015                           G1PPRL_TYPE_FORMAT
3016                           G1PPRL_ADDR_BASE_FORMAT
3017                           G1PPRL_BYTE_FORMAT
3018                           G1PPRL_BYTE_FORMAT
3019                           G1PPRL_BYTE_FORMAT
3020                           G1PPRL_DOUBLE_FORMAT
3021                           G1PPRL_BYTE_FORMAT
3022                           G1PPRL_STATE_FORMAT
3023                           G1PPRL_BYTE_FORMAT,
3024                           type, p2i(bottom), p2i(end),
3025                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3026                           remset_bytes, remset_type, strong_code_roots_bytes);
3027 
3028   return false;
3029 }
3030 
3031 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3032   if (!log_is_enabled(Trace, gc, liveness)) {
3033     return;
3034   }
3035 
3036   // add static memory usages to remembered set sizes
3037   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3038   // Print the footer of the output.
3039   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3040   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3041                          " SUMMARY"
3042                          G1PPRL_SUM_MB_FORMAT("capacity")
3043                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3044                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3045                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3046                          G1PPRL_SUM_MB_FORMAT("remset")
3047                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3048                          bytes_to_mb(_total_capacity_bytes),
3049                          bytes_to_mb(_total_used_bytes),
3050                          percent_of(_total_used_bytes, _total_capacity_bytes),
3051                          bytes_to_mb(_total_prev_live_bytes),
3052                          percent_of(_total_prev_live_bytes, _total_capacity_bytes),
3053                          bytes_to_mb(_total_next_live_bytes),
3054                          percent_of(_total_next_live_bytes, _total_capacity_bytes),
3055                          bytes_to_mb(_total_remset_bytes),
3056                          bytes_to_mb(_total_strong_code_roots_bytes));
3057 }