1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorState.hpp"
  31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1DirtyCardQueue.hpp"
  34 #include "gc/g1/g1HeapVerifier.hpp"
  35 #include "gc/g1/g1OopClosures.inline.hpp"
  36 #include "gc/g1/g1Policy.hpp"
  37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
  38 #include "gc/g1/g1StringDedup.hpp"
  39 #include "gc/g1/g1ThreadLocalData.hpp"
  40 #include "gc/g1/g1Trace.hpp"
  41 #include "gc/g1/heapRegion.inline.hpp"
  42 #include "gc/g1/heapRegionRemSet.hpp"
  43 #include "gc/g1/heapRegionSet.inline.hpp"
  44 #include "gc/shared/gcId.hpp"
  45 #include "gc/shared/gcTimer.hpp"
  46 #include "gc/shared/gcTraceTime.inline.hpp"
  47 #include "gc/shared/gcVMOperations.hpp"
  48 #include "gc/shared/genOopClosures.inline.hpp"
  49 #include "gc/shared/referencePolicy.hpp"
  50 #include "gc/shared/strongRootsScope.hpp"
  51 #include "gc/shared/suspendibleThreadSet.hpp"
  52 #include "gc/shared/taskqueue.inline.hpp"
  53 #include "gc/shared/weakProcessor.inline.hpp"
  54 #include "gc/shared/workerPolicy.hpp"
  55 #include "include/jvm.h"
  56 #include "logging/log.hpp"
  57 #include "memory/allocation.hpp"
  58 #include "memory/iterator.hpp"
  59 #include "memory/resourceArea.hpp"
  60 #include "memory/universe.hpp"
  61 #include "oops/access.inline.hpp"
  62 #include "oops/oop.inline.hpp"
  63 #include "runtime/atomic.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/java.hpp"
  66 #include "runtime/orderAccess.hpp"
  67 #include "runtime/prefetch.inline.hpp"
  68 #include "services/memTracker.hpp"
  69 #include "utilities/align.hpp"
  70 #include "utilities/growableArray.hpp"
  71 
  72 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  73   assert(addr < _cm->finger(), "invariant");
  74   assert(addr >= _task->finger(), "invariant");
  75 
  76   // We move that task's local finger along.
  77   _task->move_finger_to(addr);
  78 
  79   _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
  80   // we only partially drain the local queue and global stack
  81   _task->drain_local_queue(true);
  82   _task->drain_global_stack(true);
  83 
  84   // if the has_aborted flag has been raised, we need to bail out of
  85   // the iteration
  86   return !_task->has_aborted();
  87 }
  88 
  89 G1CMMarkStack::G1CMMarkStack() :
  90   _max_chunk_capacity(0),
  91   _base(NULL),
  92   _chunk_capacity(0) {
  93   set_empty();
  94 }
  95 
  96 bool G1CMMarkStack::resize(size_t new_capacity) {
  97   assert(is_empty(), "Only resize when stack is empty.");
  98   assert(new_capacity <= _max_chunk_capacity,
  99          "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
 100 
 101   TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
 102 
 103   if (new_base == NULL) {
 104     log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
 105     return false;
 106   }
 107   // Release old mapping.
 108   if (_base != NULL) {
 109     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 110   }
 111 
 112   _base = new_base;
 113   _chunk_capacity = new_capacity;
 114   set_empty();
 115 
 116   return true;
 117 }
 118 
 119 size_t G1CMMarkStack::capacity_alignment() {
 120   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
 121 }
 122 
 123 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 124   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 125 
 126   size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
 127 
 128   _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 129   size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 130 
 131   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
 132             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
 133             _max_chunk_capacity,
 134             initial_chunk_capacity);
 135 
 136   log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
 137                 initial_chunk_capacity, _max_chunk_capacity);
 138 
 139   return resize(initial_chunk_capacity);
 140 }
 141 
 142 void G1CMMarkStack::expand() {
 143   if (_chunk_capacity == _max_chunk_capacity) {
 144     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 145     return;
 146   }
 147   size_t old_capacity = _chunk_capacity;
 148   // Double capacity if possible
 149   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
 150 
 151   if (resize(new_capacity)) {
 152     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 153                   old_capacity, new_capacity);
 154   } else {
 155     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 156                     old_capacity, new_capacity);
 157   }
 158 }
 159 
 160 G1CMMarkStack::~G1CMMarkStack() {
 161   if (_base != NULL) {
 162     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 163   }
 164 }
 165 
 166 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
 167   elem->next = *list;
 168   *list = elem;
 169 }
 170 
 171 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
 172   MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 173   add_chunk_to_list(&_chunk_list, elem);
 174   _chunks_in_chunk_list++;
 175 }
 176 
 177 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
 178   MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 179   add_chunk_to_list(&_free_list, elem);
 180 }
 181 
 182 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
 183   TaskQueueEntryChunk* result = *list;
 184   if (result != NULL) {
 185     *list = (*list)->next;
 186   }
 187   return result;
 188 }
 189 
 190 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 191   MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 192   TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
 193   if (result != NULL) {
 194     _chunks_in_chunk_list--;
 195   }
 196   return result;
 197 }
 198 
 199 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 200   MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 201   return remove_chunk_from_list(&_free_list);
 202 }
 203 
 204 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
 205   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 206   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 207   // wraparound of _hwm.
 208   if (_hwm >= _chunk_capacity) {
 209     return NULL;
 210   }
 211 
 212   size_t cur_idx = Atomic::add(&_hwm, 1u) - 1;
 213   if (cur_idx >= _chunk_capacity) {
 214     return NULL;
 215   }
 216 
 217   TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
 218   result->next = NULL;
 219   return result;
 220 }
 221 
 222 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
 223   // Get a new chunk.
 224   TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
 225 
 226   if (new_chunk == NULL) {
 227     // Did not get a chunk from the free list. Allocate from backing memory.
 228     new_chunk = allocate_new_chunk();
 229 
 230     if (new_chunk == NULL) {
 231       return false;
 232     }
 233   }
 234 
 235   Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 236 
 237   add_chunk_to_chunk_list(new_chunk);
 238 
 239   return true;
 240 }
 241 
 242 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
 243   TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
 244 
 245   if (cur == NULL) {
 246     return false;
 247   }
 248 
 249   Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 250 
 251   add_chunk_to_free_list(cur);
 252   return true;
 253 }
 254 
 255 void G1CMMarkStack::set_empty() {
 256   _chunks_in_chunk_list = 0;
 257   _hwm = 0;
 258   _chunk_list = NULL;
 259   _free_list = NULL;
 260 }
 261 
 262 G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) :
 263     _root_regions(NULL),
 264     _max_regions(max_regions),
 265     _num_root_regions(0),
 266     _claimed_root_regions(0),
 267     _scan_in_progress(false),
 268     _should_abort(false) {
 269   _root_regions = new MemRegion[_max_regions];
 270   if (_root_regions == NULL) {
 271     vm_exit_during_initialization("Could not allocate root MemRegion set.");
 272   }
 273 }
 274 
 275 G1CMRootMemRegions::~G1CMRootMemRegions() {
 276   delete[] _root_regions;
 277 }
 278 
 279 void G1CMRootMemRegions::reset() {
 280   _num_root_regions = 0;
 281 }
 282 
 283 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
 284   assert_at_safepoint();
 285   size_t idx = Atomic::add(&_num_root_regions, (size_t)1) - 1;
 286   assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
 287   assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
 288          "end (" PTR_FORMAT ")", p2i(start), p2i(end));
 289   _root_regions[idx].set_start(start);
 290   _root_regions[idx].set_end(end);
 291 }
 292 
 293 void G1CMRootMemRegions::prepare_for_scan() {
 294   assert(!scan_in_progress(), "pre-condition");
 295 
 296   _scan_in_progress = _num_root_regions > 0;
 297 
 298   _claimed_root_regions = 0;
 299   _should_abort = false;
 300 }
 301 
 302 const MemRegion* G1CMRootMemRegions::claim_next() {
 303   if (_should_abort) {
 304     // If someone has set the should_abort flag, we return NULL to
 305     // force the caller to bail out of their loop.
 306     return NULL;
 307   }
 308 
 309   if (_claimed_root_regions >= _num_root_regions) {
 310     return NULL;
 311   }
 312 
 313   size_t claimed_index = Atomic::add(&_claimed_root_regions, (size_t)1) - 1;
 314   if (claimed_index < _num_root_regions) {
 315     return &_root_regions[claimed_index];
 316   }
 317   return NULL;
 318 }
 319 
 320 uint G1CMRootMemRegions::num_root_regions() const {
 321   return (uint)_num_root_regions;
 322 }
 323 
 324 void G1CMRootMemRegions::notify_scan_done() {
 325   MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 326   _scan_in_progress = false;
 327   RootRegionScan_lock->notify_all();
 328 }
 329 
 330 void G1CMRootMemRegions::cancel_scan() {
 331   notify_scan_done();
 332 }
 333 
 334 void G1CMRootMemRegions::scan_finished() {
 335   assert(scan_in_progress(), "pre-condition");
 336 
 337   if (!_should_abort) {
 338     assert(_claimed_root_regions >= num_root_regions(),
 339            "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
 340            _claimed_root_regions, num_root_regions());
 341   }
 342 
 343   notify_scan_done();
 344 }
 345 
 346 bool G1CMRootMemRegions::wait_until_scan_finished() {
 347   if (!scan_in_progress()) {
 348     return false;
 349   }
 350 
 351   {
 352     MonitorLocker ml(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 353     while (scan_in_progress()) {
 354       ml.wait();
 355     }
 356   }
 357   return true;
 358 }
 359 
 360 // Returns the maximum number of workers to be used in a concurrent
 361 // phase based on the number of GC workers being used in a STW
 362 // phase.
 363 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
 364   return MAX2((num_gc_workers + 2) / 4, 1U);
 365 }
 366 
 367 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
 368                                    G1RegionToSpaceMapper* prev_bitmap_storage,
 369                                    G1RegionToSpaceMapper* next_bitmap_storage) :
 370   // _cm_thread set inside the constructor
 371   _g1h(g1h),
 372   _completed_initialization(false),
 373 
 374   _mark_bitmap_1(),
 375   _mark_bitmap_2(),
 376   _prev_mark_bitmap(&_mark_bitmap_1),
 377   _next_mark_bitmap(&_mark_bitmap_2),
 378 
 379   _heap(_g1h->reserved_region()),
 380 
 381   _root_regions(_g1h->max_regions()),
 382 
 383   _global_mark_stack(),
 384 
 385   // _finger set in set_non_marking_state
 386 
 387   _worker_id_offset(G1DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
 388   _max_num_tasks(ParallelGCThreads),
 389   // _num_active_tasks set in set_non_marking_state()
 390   // _tasks set inside the constructor
 391 
 392   _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
 393   _terminator((int) _max_num_tasks, _task_queues),
 394 
 395   _first_overflow_barrier_sync(),
 396   _second_overflow_barrier_sync(),
 397 
 398   _has_overflown(false),
 399   _concurrent(false),
 400   _has_aborted(false),
 401   _restart_for_overflow(false),
 402   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 403   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 404 
 405   // _verbose_level set below
 406 
 407   _init_times(),
 408   _remark_times(),
 409   _remark_mark_times(),
 410   _remark_weak_ref_times(),
 411   _cleanup_times(),
 412   _total_cleanup_time(0.0),
 413 
 414   _accum_task_vtime(NULL),
 415 
 416   _concurrent_workers(NULL),
 417   _num_concurrent_workers(0),
 418   _max_concurrent_workers(0),
 419 
 420   _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
 421   _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 422 {
 423   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 424   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 425 
 426   // Create & start ConcurrentMark thread.
 427   _cm_thread = new G1ConcurrentMarkThread(this);
 428   if (_cm_thread->osthread() == NULL) {
 429     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 430   }
 431 
 432   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 433 
 434   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 435     // Calculate the number of concurrent worker threads by scaling
 436     // the number of parallel GC threads.
 437     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 438     FLAG_SET_ERGO(ConcGCThreads, marking_thread_num);
 439   }
 440 
 441   assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
 442   if (ConcGCThreads > ParallelGCThreads) {
 443     log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
 444                     ConcGCThreads, ParallelGCThreads);
 445     return;
 446   }
 447 
 448   log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
 449   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 450 
 451   _num_concurrent_workers = ConcGCThreads;
 452   _max_concurrent_workers = _num_concurrent_workers;
 453 
 454   _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
 455   _concurrent_workers->initialize_workers();
 456 
 457   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 458     size_t mark_stack_size =
 459       MIN2(MarkStackSizeMax,
 460           MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE)));
 461     // Verify that the calculated value for MarkStackSize is in range.
 462     // It would be nice to use the private utility routine from Arguments.
 463     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 464       log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 465                       "must be between 1 and " SIZE_FORMAT,
 466                       mark_stack_size, MarkStackSizeMax);
 467       return;
 468     }
 469     FLAG_SET_ERGO(MarkStackSize, mark_stack_size);
 470   } else {
 471     // Verify MarkStackSize is in range.
 472     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 473       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 474         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 475           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 476                           "must be between 1 and " SIZE_FORMAT,
 477                           MarkStackSize, MarkStackSizeMax);
 478           return;
 479         }
 480       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 481         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 482           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 483                           " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 484                           MarkStackSize, MarkStackSizeMax);
 485           return;
 486         }
 487       }
 488     }
 489   }
 490 
 491   if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
 492     vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
 493   }
 494 
 495   _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
 496   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
 497 
 498   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 499   _num_active_tasks = _max_num_tasks;
 500 
 501   for (uint i = 0; i < _max_num_tasks; ++i) {
 502     G1CMTaskQueue* task_queue = new G1CMTaskQueue();
 503     task_queue->initialize();
 504     _task_queues->register_queue(i, task_queue);
 505 
 506     _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
 507 
 508     _accum_task_vtime[i] = 0.0;
 509   }
 510 
 511   reset_at_marking_complete();
 512   _completed_initialization = true;
 513 }
 514 
 515 void G1ConcurrentMark::reset() {
 516   _has_aborted = false;
 517 
 518   reset_marking_for_restart();
 519 
 520   // Reset all tasks, since different phases will use different number of active
 521   // threads. So, it's easiest to have all of them ready.
 522   for (uint i = 0; i < _max_num_tasks; ++i) {
 523     _tasks[i]->reset(_next_mark_bitmap);
 524   }
 525 
 526   uint max_regions = _g1h->max_regions();
 527   for (uint i = 0; i < max_regions; i++) {
 528     _top_at_rebuild_starts[i] = NULL;
 529     _region_mark_stats[i].clear();
 530   }
 531 }
 532 
 533 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
 534   for (uint j = 0; j < _max_num_tasks; ++j) {
 535     _tasks[j]->clear_mark_stats_cache(region_idx);
 536   }
 537   _top_at_rebuild_starts[region_idx] = NULL;
 538   _region_mark_stats[region_idx].clear();
 539 }
 540 
 541 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
 542   uint const region_idx = r->hrm_index();
 543   if (r->is_humongous()) {
 544     assert(r->is_starts_humongous(), "Got humongous continues region here");
 545     uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
 546     for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
 547       clear_statistics_in_region(j);
 548     }
 549   } else {
 550     clear_statistics_in_region(region_idx);
 551   }
 552 }
 553 
 554 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) {
 555   if (bitmap->is_marked(addr)) {
 556     bitmap->clear(addr);
 557   }
 558 }
 559 
 560 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
 561   assert_at_safepoint_on_vm_thread();
 562 
 563   // Need to clear all mark bits of the humongous object.
 564   clear_mark_if_set(_prev_mark_bitmap, r->bottom());
 565   clear_mark_if_set(_next_mark_bitmap, r->bottom());
 566 
 567   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
 568     return;
 569   }
 570 
 571   // Clear any statistics about the region gathered so far.
 572   clear_statistics(r);
 573 }
 574 
 575 void G1ConcurrentMark::reset_marking_for_restart() {
 576   _global_mark_stack.set_empty();
 577 
 578   // Expand the marking stack, if we have to and if we can.
 579   if (has_overflown()) {
 580     _global_mark_stack.expand();
 581 
 582     uint max_regions = _g1h->max_regions();
 583     for (uint i = 0; i < max_regions; i++) {
 584       _region_mark_stats[i].clear_during_overflow();
 585     }
 586   }
 587 
 588   clear_has_overflown();
 589   _finger = _heap.start();
 590 
 591   for (uint i = 0; i < _max_num_tasks; ++i) {
 592     G1CMTaskQueue* queue = _task_queues->queue(i);
 593     queue->set_empty();
 594   }
 595 }
 596 
 597 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
 598   assert(active_tasks <= _max_num_tasks, "we should not have more");
 599 
 600   _num_active_tasks = active_tasks;
 601   // Need to update the three data structures below according to the
 602   // number of active threads for this phase.
 603   _terminator.terminator()->reset_for_reuse((int) active_tasks);
 604   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 605   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 606 }
 607 
 608 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 609   set_concurrency(active_tasks);
 610 
 611   _concurrent = concurrent;
 612 
 613   if (!concurrent) {
 614     // At this point we should be in a STW phase, and completed marking.
 615     assert_at_safepoint_on_vm_thread();
 616     assert(out_of_regions(),
 617            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 618            p2i(_finger), p2i(_heap.end()));
 619   }
 620 }
 621 
 622 void G1ConcurrentMark::reset_at_marking_complete() {
 623   // We set the global marking state to some default values when we're
 624   // not doing marking.
 625   reset_marking_for_restart();
 626   _num_active_tasks = 0;
 627 }
 628 
 629 G1ConcurrentMark::~G1ConcurrentMark() {
 630   FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
 631   FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
 632   // The G1ConcurrentMark instance is never freed.
 633   ShouldNotReachHere();
 634 }
 635 
 636 class G1ClearBitMapTask : public AbstractGangTask {
 637 public:
 638   static size_t chunk_size() { return M; }
 639 
 640 private:
 641   // Heap region closure used for clearing the given mark bitmap.
 642   class G1ClearBitmapHRClosure : public HeapRegionClosure {
 643   private:
 644     G1CMBitMap* _bitmap;
 645     G1ConcurrentMark* _cm;
 646   public:
 647     G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) {
 648     }
 649 
 650     virtual bool do_heap_region(HeapRegion* r) {
 651       size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
 652 
 653       HeapWord* cur = r->bottom();
 654       HeapWord* const end = r->end();
 655 
 656       while (cur < end) {
 657         MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 658         _bitmap->clear_range(mr);
 659 
 660         cur += chunk_size_in_words;
 661 
 662         // Abort iteration if after yielding the marking has been aborted.
 663         if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
 664           return true;
 665         }
 666         // Repeat the asserts from before the start of the closure. We will do them
 667         // as asserts here to minimize their overhead on the product. However, we
 668         // will have them as guarantees at the beginning / end of the bitmap
 669         // clearing to get some checking in the product.
 670         assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
 671         assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 672       }
 673       assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
 674 
 675       return false;
 676     }
 677   };
 678 
 679   G1ClearBitmapHRClosure _cl;
 680   HeapRegionClaimer _hr_claimer;
 681   bool _suspendible; // If the task is suspendible, workers must join the STS.
 682 
 683 public:
 684   G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
 685     AbstractGangTask("G1 Clear Bitmap"),
 686     _cl(bitmap, suspendible ? cm : NULL),
 687     _hr_claimer(n_workers),
 688     _suspendible(suspendible)
 689   { }
 690 
 691   void work(uint worker_id) {
 692     SuspendibleThreadSetJoiner sts_join(_suspendible);
 693     G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
 694   }
 695 
 696   bool is_complete() {
 697     return _cl.is_complete();
 698   }
 699 };
 700 
 701 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 702   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 703 
 704   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 705   size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 706 
 707   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 708 
 709   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 710 
 711   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 712   workers->run_task(&cl, num_workers);
 713   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 714 }
 715 
 716 void G1ConcurrentMark::cleanup_for_next_mark() {
 717   // Make sure that the concurrent mark thread looks to still be in
 718   // the current cycle.
 719   guarantee(cm_thread()->during_cycle(), "invariant");
 720 
 721   // We are finishing up the current cycle by clearing the next
 722   // marking bitmap and getting it ready for the next cycle. During
 723   // this time no other cycle can start. So, let's make sure that this
 724   // is the case.
 725   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 726 
 727   clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 728 
 729   // Repeat the asserts from above.
 730   guarantee(cm_thread()->during_cycle(), "invariant");
 731   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 732 }
 733 
 734 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 735   assert_at_safepoint_on_vm_thread();
 736   clear_bitmap(_prev_mark_bitmap, workers, false);
 737 }
 738 
 739 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 740 public:
 741   bool do_heap_region(HeapRegion* r) {
 742     r->note_start_of_marking();
 743     return false;
 744   }
 745 };
 746 
 747 void G1ConcurrentMark::pre_initial_mark() {
 748   assert_at_safepoint_on_vm_thread();
 749 
 750   // Reset marking state.
 751   reset();
 752 
 753   // For each region note start of marking.
 754   NoteStartOfMarkHRClosure startcl;
 755   _g1h->heap_region_iterate(&startcl);
 756 
 757   _root_regions.reset();
 758 }
 759 
 760 
 761 void G1ConcurrentMark::post_initial_mark() {
 762   // Start Concurrent Marking weak-reference discovery.
 763   ReferenceProcessor* rp = _g1h->ref_processor_cm();
 764   // enable ("weak") refs discovery
 765   rp->enable_discovery();
 766   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 767 
 768   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
 769   // This is the start of  the marking cycle, we're expected all
 770   // threads to have SATB queues with active set to false.
 771   satb_mq_set.set_active_all_threads(true, /* new active value */
 772                                      false /* expected_active */);
 773 
 774   _root_regions.prepare_for_scan();
 775 
 776   // update_g1_committed() will be called at the end of an evac pause
 777   // when marking is on. So, it's also called at the end of the
 778   // initial-mark pause to update the heap end, if the heap expands
 779   // during it. No need to call it here.
 780 }
 781 
 782 /*
 783  * Notice that in the next two methods, we actually leave the STS
 784  * during the barrier sync and join it immediately afterwards. If we
 785  * do not do this, the following deadlock can occur: one thread could
 786  * be in the barrier sync code, waiting for the other thread to also
 787  * sync up, whereas another one could be trying to yield, while also
 788  * waiting for the other threads to sync up too.
 789  *
 790  * Note, however, that this code is also used during remark and in
 791  * this case we should not attempt to leave / enter the STS, otherwise
 792  * we'll either hit an assert (debug / fastdebug) or deadlock
 793  * (product). So we should only leave / enter the STS if we are
 794  * operating concurrently.
 795  *
 796  * Because the thread that does the sync barrier has left the STS, it
 797  * is possible to be suspended for a Full GC or an evacuation pause
 798  * could occur. This is actually safe, since the entering the sync
 799  * barrier is one of the last things do_marking_step() does, and it
 800  * doesn't manipulate any data structures afterwards.
 801  */
 802 
 803 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 804   bool barrier_aborted;
 805   {
 806     SuspendibleThreadSetLeaver sts_leave(concurrent());
 807     barrier_aborted = !_first_overflow_barrier_sync.enter();
 808   }
 809 
 810   // at this point everyone should have synced up and not be doing any
 811   // more work
 812 
 813   if (barrier_aborted) {
 814     // If the barrier aborted we ignore the overflow condition and
 815     // just abort the whole marking phase as quickly as possible.
 816     return;
 817   }
 818 }
 819 
 820 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 821   SuspendibleThreadSetLeaver sts_leave(concurrent());
 822   _second_overflow_barrier_sync.enter();
 823 
 824   // at this point everything should be re-initialized and ready to go
 825 }
 826 
 827 class G1CMConcurrentMarkingTask : public AbstractGangTask {
 828   G1ConcurrentMark*     _cm;
 829 
 830 public:
 831   void work(uint worker_id) {
 832     assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread");
 833     ResourceMark rm;
 834 
 835     double start_vtime = os::elapsedVTime();
 836 
 837     {
 838       SuspendibleThreadSetJoiner sts_join;
 839 
 840       assert(worker_id < _cm->active_tasks(), "invariant");
 841 
 842       G1CMTask* task = _cm->task(worker_id);
 843       task->record_start_time();
 844       if (!_cm->has_aborted()) {
 845         do {
 846           task->do_marking_step(G1ConcMarkStepDurationMillis,
 847                                 true  /* do_termination */,
 848                                 false /* is_serial*/);
 849 
 850           _cm->do_yield_check();
 851         } while (!_cm->has_aborted() && task->has_aborted());
 852       }
 853       task->record_end_time();
 854       guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant");
 855     }
 856 
 857     double end_vtime = os::elapsedVTime();
 858     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
 859   }
 860 
 861   G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
 862       AbstractGangTask("Concurrent Mark"), _cm(cm) { }
 863 
 864   ~G1CMConcurrentMarkingTask() { }
 865 };
 866 
 867 uint G1ConcurrentMark::calc_active_marking_workers() {
 868   uint result = 0;
 869   if (!UseDynamicNumberOfGCThreads ||
 870       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 871        !ForceDynamicNumberOfGCThreads)) {
 872     result = _max_concurrent_workers;
 873   } else {
 874     result =
 875       WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
 876                                                 1, /* Minimum workers */
 877                                                 _num_concurrent_workers,
 878                                                 Threads::number_of_non_daemon_threads());
 879     // Don't scale the result down by scale_concurrent_workers() because
 880     // that scaling has already gone into "_max_concurrent_workers".
 881   }
 882   assert(result > 0 && result <= _max_concurrent_workers,
 883          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 884          _max_concurrent_workers, result);
 885   return result;
 886 }
 887 
 888 void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) {
 889 #ifdef ASSERT
 890   HeapWord* last = region->last();
 891   HeapRegion* hr = _g1h->heap_region_containing(last);
 892   assert(hr->is_old() || hr->next_top_at_mark_start() == hr->bottom(),
 893          "Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str());
 894   assert(hr->next_top_at_mark_start() == region->start(),
 895          "MemRegion start should be equal to nTAMS");
 896 #endif
 897 
 898   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 899 
 900   const uintx interval = PrefetchScanIntervalInBytes;
 901   HeapWord* curr = region->start();
 902   const HeapWord* end = region->end();
 903   while (curr < end) {
 904     Prefetch::read(curr, interval);
 905     oop obj = oop(curr);
 906     int size = obj->oop_iterate_size(&cl);
 907     assert(size == obj->size(), "sanity");
 908     curr += size;
 909   }
 910 }
 911 
 912 class G1CMRootRegionScanTask : public AbstractGangTask {
 913   G1ConcurrentMark* _cm;
 914 public:
 915   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 916     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
 917 
 918   void work(uint worker_id) {
 919     assert(Thread::current()->is_ConcurrentGC_thread(),
 920            "this should only be done by a conc GC thread");
 921 
 922     G1CMRootMemRegions* root_regions = _cm->root_regions();
 923     const MemRegion* region = root_regions->claim_next();
 924     while (region != NULL) {
 925       _cm->scan_root_region(region, worker_id);
 926       region = root_regions->claim_next();
 927     }
 928   }
 929 };
 930 
 931 void G1ConcurrentMark::scan_root_regions() {
 932   // scan_in_progress() will have been set to true only if there was
 933   // at least one root region to scan. So, if it's false, we
 934   // should not attempt to do any further work.
 935   if (root_regions()->scan_in_progress()) {
 936     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 937 
 938     _num_concurrent_workers = MIN2(calc_active_marking_workers(),
 939                                    // We distribute work on a per-region basis, so starting
 940                                    // more threads than that is useless.
 941                                    root_regions()->num_root_regions());
 942     assert(_num_concurrent_workers <= _max_concurrent_workers,
 943            "Maximum number of marking threads exceeded");
 944 
 945     G1CMRootRegionScanTask task(this);
 946     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
 947                         task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
 948     _concurrent_workers->run_task(&task, _num_concurrent_workers);
 949 
 950     // It's possible that has_aborted() is true here without actually
 951     // aborting the survivor scan earlier. This is OK as it's
 952     // mainly used for sanity checking.
 953     root_regions()->scan_finished();
 954   }
 955 }
 956 
 957 void G1ConcurrentMark::concurrent_cycle_start() {
 958   _gc_timer_cm->register_gc_start();
 959 
 960   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
 961 
 962   _g1h->trace_heap_before_gc(_gc_tracer_cm);
 963 }
 964 
 965 void G1ConcurrentMark::concurrent_cycle_end() {
 966   _g1h->collector_state()->set_clearing_next_bitmap(false);
 967 
 968   _g1h->trace_heap_after_gc(_gc_tracer_cm);
 969 
 970   if (has_aborted()) {
 971     log_info(gc, marking)("Concurrent Mark Abort");
 972     _gc_tracer_cm->report_concurrent_mode_failure();
 973   }
 974 
 975   _gc_timer_cm->register_gc_end();
 976 
 977   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
 978 }
 979 
 980 void G1ConcurrentMark::mark_from_roots() {
 981   _restart_for_overflow = false;
 982 
 983   _num_concurrent_workers = calc_active_marking_workers();
 984 
 985   uint active_workers = MAX2(1U, _num_concurrent_workers);
 986 
 987   // Setting active workers is not guaranteed since fewer
 988   // worker threads may currently exist and more may not be
 989   // available.
 990   active_workers = _concurrent_workers->update_active_workers(active_workers);
 991   log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
 992 
 993   // Parallel task terminator is set in "set_concurrency_and_phase()"
 994   set_concurrency_and_phase(active_workers, true /* concurrent */);
 995 
 996   G1CMConcurrentMarkingTask marking_task(this);
 997   _concurrent_workers->run_task(&marking_task);
 998   print_stats();
 999 }
1000 
1001 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
1002   G1HeapVerifier* verifier = _g1h->verifier();
1003 
1004   verifier->verify_region_sets_optional();
1005 
1006   if (VerifyDuringGC) {
1007     GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm);
1008 
1009     size_t const BufLen = 512;
1010     char buffer[BufLen];
1011 
1012     jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
1013     verifier->verify(type, vo, buffer);
1014   }
1015 
1016   verifier->check_bitmaps(caller);
1017 }
1018 
1019 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
1020   G1CollectedHeap* _g1h;
1021   G1ConcurrentMark* _cm;
1022   HeapRegionClaimer _hrclaimer;
1023   uint volatile _total_selected_for_rebuild;
1024 
1025   G1PrintRegionLivenessInfoClosure _cl;
1026 
1027   class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1028     G1CollectedHeap* _g1h;
1029     G1ConcurrentMark* _cm;
1030 
1031     G1PrintRegionLivenessInfoClosure* _cl;
1032 
1033     uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1034 
1035     void update_remset_before_rebuild(HeapRegion* hr) {
1036       G1RemSetTrackingPolicy* tracking_policy = _g1h->policy()->remset_tracker();
1037 
1038       bool selected_for_rebuild;
1039       if (hr->is_humongous()) {
1040         bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0;
1041         selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live);
1042       } else {
1043         size_t const live_bytes = _cm->liveness(hr->hrm_index());
1044         selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1045       }
1046       if (selected_for_rebuild) {
1047         _num_regions_selected_for_rebuild++;
1048       }
1049       _cm->update_top_at_rebuild_start(hr);
1050     }
1051 
1052     // Distribute the given words across the humongous object starting with hr and
1053     // note end of marking.
1054     void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1055       uint const region_idx = hr->hrm_index();
1056       size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size();
1057       uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words);
1058 
1059       // "Distributing" zero words means that we only note end of marking for these
1060       // regions.
1061       assert(marked_words == 0 || obj_size_in_words == marked_words,
1062              "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT,
1063              obj_size_in_words, marked_words);
1064 
1065       for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
1066         HeapRegion* const r = _g1h->region_at(i);
1067         size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
1068 
1069         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)",
1070                                words_to_add, i, r->get_type_str());
1071         add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize);
1072         marked_words -= words_to_add;
1073       }
1074       assert(marked_words == 0,
1075              SIZE_FORMAT " words left after distributing space across %u regions",
1076              marked_words, num_regions_in_humongous);
1077     }
1078 
1079     void update_marked_bytes(HeapRegion* hr) {
1080       uint const region_idx = hr->hrm_index();
1081       size_t const marked_words = _cm->liveness(region_idx);
1082       // The marking attributes the object's size completely to the humongous starts
1083       // region. We need to distribute this value across the entire set of regions a
1084       // humongous object spans.
1085       if (hr->is_humongous()) {
1086         assert(hr->is_starts_humongous() || marked_words == 0,
1087                "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)",
1088                marked_words, region_idx, hr->get_type_str());
1089         if (hr->is_starts_humongous()) {
1090           distribute_marked_bytes(hr, marked_words);
1091         }
1092       } else {
1093         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
1094         add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize);
1095       }
1096     }
1097 
1098     void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) {
1099       hr->add_to_marked_bytes(marked_bytes);
1100       _cl->do_heap_region(hr);
1101       hr->note_end_of_marking();
1102     }
1103 
1104   public:
1105     G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) :
1106       _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { }
1107 
1108     virtual bool do_heap_region(HeapRegion* r) {
1109       update_remset_before_rebuild(r);
1110       update_marked_bytes(r);
1111 
1112       return false;
1113     }
1114 
1115     uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1116   };
1117 
1118 public:
1119   G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) :
1120     AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"),
1121     _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1122 
1123   virtual void work(uint worker_id) {
1124     G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1125     _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1126     Atomic::add(&_total_selected_for_rebuild, update_cl.num_selected_for_rebuild());
1127   }
1128 
1129   uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1130 
1131   // Number of regions for which roughly one thread should be spawned for this work.
1132   static const uint RegionsPerThread = 384;
1133 };
1134 
1135 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1136   G1CollectedHeap* _g1h;
1137 public:
1138   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1139 
1140   virtual bool do_heap_region(HeapRegion* r) {
1141     _g1h->policy()->remset_tracker()->update_after_rebuild(r);
1142     return false;
1143   }
1144 };
1145 
1146 void G1ConcurrentMark::remark() {
1147   assert_at_safepoint_on_vm_thread();
1148 
1149   // If a full collection has happened, we should not continue. However we might
1150   // have ended up here as the Remark VM operation has been scheduled already.
1151   if (has_aborted()) {
1152     return;
1153   }
1154 
1155   G1Policy* policy = _g1h->policy();
1156   policy->record_concurrent_mark_remark_start();
1157 
1158   double start = os::elapsedTime();
1159 
1160   verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1161 
1162   {
1163     GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1164     finalize_marking();
1165   }
1166 
1167   double mark_work_end = os::elapsedTime();
1168 
1169   bool const mark_finished = !has_overflown();
1170   if (mark_finished) {
1171     weak_refs_work(false /* clear_all_soft_refs */);
1172 
1173     SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1174     // We're done with marking.
1175     // This is the end of the marking cycle, we're expected all
1176     // threads to have SATB queues with active set to true.
1177     satb_mq_set.set_active_all_threads(false, /* new active value */
1178                                        true /* expected_active */);
1179 
1180     {
1181       GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm);
1182       flush_all_task_caches();
1183     }
1184 
1185     // Install newly created mark bitmap as "prev".
1186     swap_mark_bitmaps();
1187     {
1188       GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);
1189 
1190       uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) /
1191                                        G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread;
1192       uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity);
1193 
1194       G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
1195       log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
1196       _g1h->workers()->run_task(&cl, num_workers);
1197 
1198       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1199                                       _g1h->num_regions(), cl.total_selected_for_rebuild());
1200     }
1201     {
1202       GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm);
1203       reclaim_empty_regions();
1204     }
1205 
1206     // Clean out dead classes
1207     if (ClassUnloadingWithConcurrentMark) {
1208       GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
1209       ClassLoaderDataGraph::purge();
1210     }
1211 
1212     compute_new_sizes();
1213 
1214     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1215 
1216     assert(!restart_for_overflow(), "sanity");
1217     // Completely reset the marking state since marking completed
1218     reset_at_marking_complete();
1219   } else {
1220     // We overflowed.  Restart concurrent marking.
1221     _restart_for_overflow = true;
1222 
1223     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1224 
1225     // Clear the marking state because we will be restarting
1226     // marking due to overflowing the global mark stack.
1227     reset_marking_for_restart();
1228   }
1229 
1230   {
1231     GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm);
1232     report_object_count(mark_finished);
1233   }
1234 
1235   // Statistics
1236   double now = os::elapsedTime();
1237   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1238   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1239   _remark_times.add((now - start) * 1000.0);
1240 
1241   policy->record_concurrent_mark_remark_end();
1242 }
1243 
1244 class G1ReclaimEmptyRegionsTask : public AbstractGangTask {
1245   // Per-region work during the Cleanup pause.
1246   class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure {
1247     G1CollectedHeap* _g1h;
1248     size_t _freed_bytes;
1249     FreeRegionList* _local_cleanup_list;
1250     uint _old_regions_removed;
1251     uint _humongous_regions_removed;
1252 
1253   public:
1254     G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h,
1255                                  FreeRegionList* local_cleanup_list) :
1256       _g1h(g1h),
1257       _freed_bytes(0),
1258       _local_cleanup_list(local_cleanup_list),
1259       _old_regions_removed(0),
1260       _humongous_regions_removed(0) { }
1261 
1262     size_t freed_bytes() { return _freed_bytes; }
1263     const uint old_regions_removed() { return _old_regions_removed; }
1264     const uint humongous_regions_removed() { return _humongous_regions_removed; }
1265 
1266     bool do_heap_region(HeapRegion *hr) {
1267       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1268         _freed_bytes += hr->used();
1269         hr->set_containing_set(NULL);
1270         if (hr->is_humongous()) {
1271           _humongous_regions_removed++;
1272           _g1h->free_humongous_region(hr, _local_cleanup_list);
1273         } else {
1274           _old_regions_removed++;
1275           _g1h->free_region(hr, _local_cleanup_list);
1276         }
1277         hr->clear_cardtable();
1278         _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1279         log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1280       }
1281 
1282       return false;
1283     }
1284   };
1285 
1286   G1CollectedHeap* _g1h;
1287   FreeRegionList* _cleanup_list;
1288   HeapRegionClaimer _hrclaimer;
1289 
1290 public:
1291   G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1292     AbstractGangTask("G1 Cleanup"),
1293     _g1h(g1h),
1294     _cleanup_list(cleanup_list),
1295     _hrclaimer(n_workers) {
1296   }
1297 
1298   void work(uint worker_id) {
1299     FreeRegionList local_cleanup_list("Local Cleanup List");
1300     G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list);
1301     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1302     assert(cl.is_complete(), "Shouldn't have aborted!");
1303 
1304     // Now update the old/humongous region sets
1305     _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
1306     {
1307       MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1308       _g1h->decrement_summary_bytes(cl.freed_bytes());
1309 
1310       _cleanup_list->add_ordered(&local_cleanup_list);
1311       assert(local_cleanup_list.is_empty(), "post-condition");
1312     }
1313   }
1314 };
1315 
1316 void G1ConcurrentMark::reclaim_empty_regions() {
1317   WorkGang* workers = _g1h->workers();
1318   FreeRegionList empty_regions_list("Empty Regions After Mark List");
1319 
1320   G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers());
1321   workers->run_task(&cl);
1322 
1323   if (!empty_regions_list.is_empty()) {
1324     log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1325     // Now print the empty regions list.
1326     G1HRPrinter* hrp = _g1h->hr_printer();
1327     if (hrp->is_active()) {
1328       FreeRegionListIterator iter(&empty_regions_list);
1329       while (iter.more_available()) {
1330         HeapRegion* hr = iter.get_next();
1331         hrp->cleanup(hr);
1332       }
1333     }
1334     // And actually make them available.
1335     _g1h->prepend_to_freelist(&empty_regions_list);
1336   }
1337 }
1338 
1339 void G1ConcurrentMark::compute_new_sizes() {
1340   MetaspaceGC::compute_new_size();
1341 
1342   // Cleanup will have freed any regions completely full of garbage.
1343   // Update the soft reference policy with the new heap occupancy.
1344   Universe::update_heap_info_at_gc();
1345 
1346   // We reclaimed old regions so we should calculate the sizes to make
1347   // sure we update the old gen/space data.
1348   _g1h->g1mm()->update_sizes();
1349 }
1350 
1351 void G1ConcurrentMark::cleanup() {
1352   assert_at_safepoint_on_vm_thread();
1353 
1354   // If a full collection has happened, we shouldn't do this.
1355   if (has_aborted()) {
1356     return;
1357   }
1358 
1359   G1Policy* policy = _g1h->policy();
1360   policy->record_concurrent_mark_cleanup_start();
1361 
1362   double start = os::elapsedTime();
1363 
1364   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1365 
1366   {
1367     GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1368     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1369     _g1h->heap_region_iterate(&cl);
1370   }
1371 
1372   if (log_is_enabled(Trace, gc, liveness)) {
1373     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1374     _g1h->heap_region_iterate(&cl);
1375   }
1376 
1377   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1378 
1379   // We need to make this be a "collection" so any collection pause that
1380   // races with it goes around and waits for Cleanup to finish.
1381   _g1h->increment_total_collections();
1382 
1383   {
1384     GCTraceTime(Debug, gc, phases) debug("Expand heap after concurrent mark", _gc_timer_cm);
1385     _g1h->expand_heap_after_concurrent_mark();
1386   }
1387 
1388   // Local statistics
1389   double recent_cleanup_time = (os::elapsedTime() - start);
1390   _total_cleanup_time += recent_cleanup_time;
1391   _cleanup_times.add(recent_cleanup_time);
1392 
1393   {
1394     GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1395     policy->record_concurrent_mark_cleanup_end();
1396   }
1397 }
1398 
1399 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1400 // Uses the G1CMTask associated with a worker thread (for serial reference
1401 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1402 // trace referent objects.
1403 //
1404 // Using the G1CMTask and embedded local queues avoids having the worker
1405 // threads operating on the global mark stack. This reduces the risk
1406 // of overflowing the stack - which we would rather avoid at this late
1407 // state. Also using the tasks' local queues removes the potential
1408 // of the workers interfering with each other that could occur if
1409 // operating on the global stack.
1410 
1411 class G1CMKeepAliveAndDrainClosure : public OopClosure {
1412   G1ConcurrentMark* _cm;
1413   G1CMTask*         _task;
1414   uint              _ref_counter_limit;
1415   uint              _ref_counter;
1416   bool              _is_serial;
1417 public:
1418   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1419     _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval),
1420     _ref_counter(_ref_counter_limit), _is_serial(is_serial) {
1421     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1422   }
1423 
1424   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1425   virtual void do_oop(      oop* p) { do_oop_work(p); }
1426 
1427   template <class T> void do_oop_work(T* p) {
1428     if (_cm->has_overflown()) {
1429       return;
1430     }
1431     if (!_task->deal_with_reference(p)) {
1432       // We did not add anything to the mark bitmap (or mark stack), so there is
1433       // no point trying to drain it.
1434       return;
1435     }
1436     _ref_counter--;
1437 
1438     if (_ref_counter == 0) {
1439       // We have dealt with _ref_counter_limit references, pushing them
1440       // and objects reachable from them on to the local stack (and
1441       // possibly the global stack). Call G1CMTask::do_marking_step() to
1442       // process these entries.
1443       //
1444       // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1445       // there's nothing more to do (i.e. we're done with the entries that
1446       // were pushed as a result of the G1CMTask::deal_with_reference() calls
1447       // above) or we overflow.
1448       //
1449       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1450       // flag while there may still be some work to do. (See the comment at
1451       // the beginning of G1CMTask::do_marking_step() for those conditions -
1452       // one of which is reaching the specified time target.) It is only
1453       // when G1CMTask::do_marking_step() returns without setting the
1454       // has_aborted() flag that the marking step has completed.
1455       do {
1456         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1457         _task->do_marking_step(mark_step_duration_ms,
1458                                false      /* do_termination */,
1459                                _is_serial);
1460       } while (_task->has_aborted() && !_cm->has_overflown());
1461       _ref_counter = _ref_counter_limit;
1462     }
1463   }
1464 };
1465 
1466 // 'Drain' oop closure used by both serial and parallel reference processing.
1467 // Uses the G1CMTask associated with a given worker thread (for serial
1468 // reference processing the G1CMtask for worker 0 is used). Calls the
1469 // do_marking_step routine, with an unbelievably large timeout value,
1470 // to drain the marking data structures of the remaining entries
1471 // added by the 'keep alive' oop closure above.
1472 
1473 class G1CMDrainMarkingStackClosure : public VoidClosure {
1474   G1ConcurrentMark* _cm;
1475   G1CMTask*         _task;
1476   bool              _is_serial;
1477  public:
1478   G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1479     _cm(cm), _task(task), _is_serial(is_serial) {
1480     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1481   }
1482 
1483   void do_void() {
1484     do {
1485       // We call G1CMTask::do_marking_step() to completely drain the local
1486       // and global marking stacks of entries pushed by the 'keep alive'
1487       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1488       //
1489       // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1490       // if there's nothing more to do (i.e. we've completely drained the
1491       // entries that were pushed as a a result of applying the 'keep alive'
1492       // closure to the entries on the discovered ref lists) or we overflow
1493       // the global marking stack.
1494       //
1495       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1496       // flag while there may still be some work to do. (See the comment at
1497       // the beginning of G1CMTask::do_marking_step() for those conditions -
1498       // one of which is reaching the specified time target.) It is only
1499       // when G1CMTask::do_marking_step() returns without setting the
1500       // has_aborted() flag that the marking step has completed.
1501 
1502       _task->do_marking_step(1000000000.0 /* something very large */,
1503                              true         /* do_termination */,
1504                              _is_serial);
1505     } while (_task->has_aborted() && !_cm->has_overflown());
1506   }
1507 };
1508 
1509 // Implementation of AbstractRefProcTaskExecutor for parallel
1510 // reference processing at the end of G1 concurrent marking
1511 
1512 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1513 private:
1514   G1CollectedHeap*  _g1h;
1515   G1ConcurrentMark* _cm;
1516   WorkGang*         _workers;
1517   uint              _active_workers;
1518 
1519 public:
1520   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1521                           G1ConcurrentMark* cm,
1522                           WorkGang* workers,
1523                           uint n_workers) :
1524     _g1h(g1h), _cm(cm),
1525     _workers(workers), _active_workers(n_workers) { }
1526 
1527   virtual void execute(ProcessTask& task, uint ergo_workers);
1528 };
1529 
1530 class G1CMRefProcTaskProxy : public AbstractGangTask {
1531   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1532   ProcessTask&      _proc_task;
1533   G1CollectedHeap*  _g1h;
1534   G1ConcurrentMark* _cm;
1535 
1536 public:
1537   G1CMRefProcTaskProxy(ProcessTask& proc_task,
1538                        G1CollectedHeap* g1h,
1539                        G1ConcurrentMark* cm) :
1540     AbstractGangTask("Process reference objects in parallel"),
1541     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1542     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1543     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1544   }
1545 
1546   virtual void work(uint worker_id) {
1547     ResourceMark rm;
1548     HandleMark hm;
1549     G1CMTask* task = _cm->task(worker_id);
1550     G1CMIsAliveClosure g1_is_alive(_g1h);
1551     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1552     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1553 
1554     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1555   }
1556 };
1557 
1558 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) {
1559   assert(_workers != NULL, "Need parallel worker threads.");
1560   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1561   assert(_workers->active_workers() >= ergo_workers,
1562          "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)",
1563          ergo_workers, _workers->active_workers());
1564 
1565   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1566 
1567   // We need to reset the concurrency level before each
1568   // proxy task execution, so that the termination protocol
1569   // and overflow handling in G1CMTask::do_marking_step() knows
1570   // how many workers to wait for.
1571   _cm->set_concurrency(ergo_workers);
1572   _workers->run_task(&proc_task_proxy, ergo_workers);
1573 }
1574 
1575 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
1576   ResourceMark rm;
1577   HandleMark   hm;
1578 
1579   // Is alive closure.
1580   G1CMIsAliveClosure g1_is_alive(_g1h);
1581 
1582   // Inner scope to exclude the cleaning of the string table
1583   // from the displayed time.
1584   {
1585     GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm);
1586 
1587     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1588 
1589     // See the comment in G1CollectedHeap::ref_processing_init()
1590     // about how reference processing currently works in G1.
1591 
1592     // Set the soft reference policy
1593     rp->setup_policy(clear_all_soft_refs);
1594     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1595 
1596     // Instances of the 'Keep Alive' and 'Complete GC' closures used
1597     // in serial reference processing. Note these closures are also
1598     // used for serially processing (by the the current thread) the
1599     // JNI references during parallel reference processing.
1600     //
1601     // These closures do not need to synchronize with the worker
1602     // threads involved in parallel reference processing as these
1603     // instances are executed serially by the current thread (e.g.
1604     // reference processing is not multi-threaded and is thus
1605     // performed by the current thread instead of a gang worker).
1606     //
1607     // The gang tasks involved in parallel reference processing create
1608     // their own instances of these closures, which do their own
1609     // synchronization among themselves.
1610     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1611     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1612 
1613     // We need at least one active thread. If reference processing
1614     // is not multi-threaded we use the current (VMThread) thread,
1615     // otherwise we use the work gang from the G1CollectedHeap and
1616     // we utilize all the worker threads we can.
1617     bool processing_is_mt = rp->processing_is_mt();
1618     uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
1619     active_workers = clamp(active_workers, 1u, _max_num_tasks);
1620 
1621     // Parallel processing task executor.
1622     G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
1623                                               _g1h->workers(), active_workers);
1624     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1625 
1626     // Set the concurrency level. The phase was already set prior to
1627     // executing the remark task.
1628     set_concurrency(active_workers);
1629 
1630     // Set the degree of MT processing here.  If the discovery was done MT,
1631     // the number of threads involved during discovery could differ from
1632     // the number of active workers.  This is OK as long as the discovered
1633     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1634     rp->set_active_mt_degree(active_workers);
1635 
1636     ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
1637 
1638     // Process the weak references.
1639     const ReferenceProcessorStats& stats =
1640         rp->process_discovered_references(&g1_is_alive,
1641                                           &g1_keep_alive,
1642                                           &g1_drain_mark_stack,
1643                                           executor,
1644                                           &pt);
1645     _gc_tracer_cm->report_gc_reference_stats(stats);
1646     pt.print_all_references();
1647 
1648     // The do_oop work routines of the keep_alive and drain_marking_stack
1649     // oop closures will set the has_overflown flag if we overflow the
1650     // global marking stack.
1651 
1652     assert(has_overflown() || _global_mark_stack.is_empty(),
1653            "Mark stack should be empty (unless it has overflown)");
1654 
1655     assert(rp->num_queues() == active_workers, "why not");
1656 
1657     rp->verify_no_references_recorded();
1658     assert(!rp->discovery_enabled(), "Post condition");
1659   }
1660 
1661   if (has_overflown()) {
1662     // We can not trust g1_is_alive and the contents of the heap if the marking stack
1663     // overflowed while processing references. Exit the VM.
1664     fatal("Overflow during reference processing, can not continue. Please "
1665           "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and "
1666           "restart.", MarkStackSizeMax);
1667     return;
1668   }
1669 
1670   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1671 
1672   {
1673     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1674     WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1);
1675   }
1676 
1677   // Unload Klasses, String, Code Cache, etc.
1678   if (ClassUnloadingWithConcurrentMark) {
1679     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1680     bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm);
1681     _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1682   } else if (StringDedup::is_enabled()) {
1683     GCTraceTime(Debug, gc, phases) debug("String Deduplication", _gc_timer_cm);
1684     _g1h->string_dedup_cleaning(&g1_is_alive, NULL);
1685   }
1686 }
1687 
1688 class G1PrecleanYieldClosure : public YieldClosure {
1689   G1ConcurrentMark* _cm;
1690 
1691 public:
1692   G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { }
1693 
1694   virtual bool should_return() {
1695     return _cm->has_aborted();
1696   }
1697 
1698   virtual bool should_return_fine_grain() {
1699     _cm->do_yield_check();
1700     return _cm->has_aborted();
1701   }
1702 };
1703 
1704 void G1ConcurrentMark::preclean() {
1705   assert(G1UseReferencePrecleaning, "Precleaning must be enabled.");
1706 
1707   SuspendibleThreadSetJoiner joiner;
1708 
1709   G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */);
1710   G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */);
1711 
1712   set_concurrency_and_phase(1, true);
1713 
1714   G1PrecleanYieldClosure yield_cl(this);
1715 
1716   ReferenceProcessor* rp = _g1h->ref_processor_cm();
1717   // Precleaning is single threaded. Temporarily disable MT discovery.
1718   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
1719   rp->preclean_discovered_references(rp->is_alive_non_header(),
1720                                      &keep_alive,
1721                                      &drain_mark_stack,
1722                                      &yield_cl,
1723                                      _gc_timer_cm);
1724 }
1725 
1726 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
1727 // the prev bitmap determining liveness.
1728 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
1729   G1CollectedHeap* _g1h;
1730 public:
1731   G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
1732 
1733   bool do_object_b(oop obj) {
1734     HeapWord* addr = (HeapWord*)obj;
1735     return addr != NULL &&
1736            (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj));
1737   }
1738 };
1739 
1740 void G1ConcurrentMark::report_object_count(bool mark_completed) {
1741   // Depending on the completion of the marking liveness needs to be determined
1742   // using either the next or prev bitmap.
1743   if (mark_completed) {
1744     G1ObjectCountIsAliveClosure is_alive(_g1h);
1745     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1746   } else {
1747     G1CMIsAliveClosure is_alive(_g1h);
1748     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1749   }
1750 }
1751 
1752 
1753 void G1ConcurrentMark::swap_mark_bitmaps() {
1754   G1CMBitMap* temp = _prev_mark_bitmap;
1755   _prev_mark_bitmap = _next_mark_bitmap;
1756   _next_mark_bitmap = temp;
1757   _g1h->collector_state()->set_clearing_next_bitmap(true);
1758 }
1759 
1760 // Closure for marking entries in SATB buffers.
1761 class G1CMSATBBufferClosure : public SATBBufferClosure {
1762 private:
1763   G1CMTask* _task;
1764   G1CollectedHeap* _g1h;
1765 
1766   // This is very similar to G1CMTask::deal_with_reference, but with
1767   // more relaxed requirements for the argument, so this must be more
1768   // circumspect about treating the argument as an object.
1769   void do_entry(void* entry) const {
1770     _task->increment_refs_reached();
1771     oop const obj = static_cast<oop>(entry);
1772     _task->make_reference_grey(obj);
1773   }
1774 
1775 public:
1776   G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1777     : _task(task), _g1h(g1h) { }
1778 
1779   virtual void do_buffer(void** buffer, size_t size) {
1780     for (size_t i = 0; i < size; ++i) {
1781       do_entry(buffer[i]);
1782     }
1783   }
1784 };
1785 
1786 class G1RemarkThreadsClosure : public ThreadClosure {
1787   G1CMSATBBufferClosure _cm_satb_cl;
1788   G1CMOopClosure _cm_cl;
1789   MarkingCodeBlobClosure _code_cl;
1790   uintx _claim_token;
1791 
1792  public:
1793   G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1794     _cm_satb_cl(task, g1h),
1795     _cm_cl(g1h, task),
1796     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1797     _claim_token(Threads::thread_claim_token()) {}
1798 
1799   void do_thread(Thread* thread) {
1800     if (thread->claim_threads_do(true, _claim_token)) {
1801       SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(thread);
1802       queue.apply_closure_and_empty(&_cm_satb_cl);
1803       if (thread->is_Java_thread()) {
1804         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1805         // however the liveness of oops reachable from nmethods have very complex lifecycles:
1806         // * Alive if on the stack of an executing method
1807         // * Weakly reachable otherwise
1808         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1809         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1810         JavaThread* jt = (JavaThread*)thread;
1811         jt->nmethods_do(&_code_cl);
1812       }
1813     }
1814   }
1815 };
1816 
1817 class G1CMRemarkTask : public AbstractGangTask {
1818   G1ConcurrentMark* _cm;
1819 public:
1820   void work(uint worker_id) {
1821     G1CMTask* task = _cm->task(worker_id);
1822     task->record_start_time();
1823     {
1824       ResourceMark rm;
1825       HandleMark hm;
1826 
1827       G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1828       Threads::threads_do(&threads_f);
1829     }
1830 
1831     do {
1832       task->do_marking_step(1000000000.0 /* something very large */,
1833                             true         /* do_termination       */,
1834                             false        /* is_serial            */);
1835     } while (task->has_aborted() && !_cm->has_overflown());
1836     // If we overflow, then we do not want to restart. We instead
1837     // want to abort remark and do concurrent marking again.
1838     task->record_end_time();
1839   }
1840 
1841   G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1842     AbstractGangTask("Par Remark"), _cm(cm) {
1843     _cm->terminator()->reset_for_reuse(active_workers);
1844   }
1845 };
1846 
1847 void G1ConcurrentMark::finalize_marking() {
1848   ResourceMark rm;
1849   HandleMark   hm;
1850 
1851   _g1h->ensure_parsability(false);
1852 
1853   // this is remark, so we'll use up all active threads
1854   uint active_workers = _g1h->workers()->active_workers();
1855   set_concurrency_and_phase(active_workers, false /* concurrent */);
1856   // Leave _parallel_marking_threads at it's
1857   // value originally calculated in the G1ConcurrentMark
1858   // constructor and pass values of the active workers
1859   // through the gang in the task.
1860 
1861   {
1862     StrongRootsScope srs(active_workers);
1863 
1864     G1CMRemarkTask remarkTask(this, active_workers);
1865     // We will start all available threads, even if we decide that the
1866     // active_workers will be fewer. The extra ones will just bail out
1867     // immediately.
1868     _g1h->workers()->run_task(&remarkTask);
1869   }
1870 
1871   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1872   guarantee(has_overflown() ||
1873             satb_mq_set.completed_buffers_num() == 0,
1874             "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1875             BOOL_TO_STR(has_overflown()),
1876             satb_mq_set.completed_buffers_num());
1877 
1878   print_stats();
1879 }
1880 
1881 void G1ConcurrentMark::flush_all_task_caches() {
1882   size_t hits = 0;
1883   size_t misses = 0;
1884   for (uint i = 0; i < _max_num_tasks; i++) {
1885     Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
1886     hits += stats.first;
1887     misses += stats.second;
1888   }
1889   size_t sum = hits + misses;
1890   log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
1891                        hits, misses, percent_of(hits, sum));
1892 }
1893 
1894 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
1895   _prev_mark_bitmap->clear_range(mr);
1896 }
1897 
1898 HeapRegion*
1899 G1ConcurrentMark::claim_region(uint worker_id) {
1900   // "checkpoint" the finger
1901   HeapWord* finger = _finger;
1902 
1903   while (finger < _heap.end()) {
1904     assert(_g1h->is_in_g1_reserved(finger), "invariant");
1905 
1906     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1907     // Make sure that the reads below do not float before loading curr_region.
1908     OrderAccess::loadload();
1909     // Above heap_region_containing may return NULL as we always scan claim
1910     // until the end of the heap. In this case, just jump to the next region.
1911     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1912 
1913     // Is the gap between reading the finger and doing the CAS too long?
1914     HeapWord* res = Atomic::cmpxchg(&_finger, finger, end);
1915     if (res == finger && curr_region != NULL) {
1916       // we succeeded
1917       HeapWord*   bottom        = curr_region->bottom();
1918       HeapWord*   limit         = curr_region->next_top_at_mark_start();
1919 
1920       // notice that _finger == end cannot be guaranteed here since,
1921       // someone else might have moved the finger even further
1922       assert(_finger >= end, "the finger should have moved forward");
1923 
1924       if (limit > bottom) {
1925         return curr_region;
1926       } else {
1927         assert(limit == bottom,
1928                "the region limit should be at bottom");
1929         // we return NULL and the caller should try calling
1930         // claim_region() again.
1931         return NULL;
1932       }
1933     } else {
1934       assert(_finger > finger, "the finger should have moved forward");
1935       // read it again
1936       finger = _finger;
1937     }
1938   }
1939 
1940   return NULL;
1941 }
1942 
1943 #ifndef PRODUCT
1944 class VerifyNoCSetOops {
1945   G1CollectedHeap* _g1h;
1946   const char* _phase;
1947   int _info;
1948 
1949 public:
1950   VerifyNoCSetOops(const char* phase, int info = -1) :
1951     _g1h(G1CollectedHeap::heap()),
1952     _phase(phase),
1953     _info(info)
1954   { }
1955 
1956   void operator()(G1TaskQueueEntry task_entry) const {
1957     if (task_entry.is_array_slice()) {
1958       guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1959       return;
1960     }
1961     guarantee(oopDesc::is_oop(task_entry.obj()),
1962               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1963               p2i(task_entry.obj()), _phase, _info);
1964     HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
1965     guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
1966               "obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
1967               p2i(task_entry.obj()), _phase, _info, r->hrm_index());
1968   }
1969 };
1970 
1971 void G1ConcurrentMark::verify_no_collection_set_oops() {
1972   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1973   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1974     return;
1975   }
1976 
1977   // Verify entries on the global mark stack
1978   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1979 
1980   // Verify entries on the task queues
1981   for (uint i = 0; i < _max_num_tasks; ++i) {
1982     G1CMTaskQueue* queue = _task_queues->queue(i);
1983     queue->iterate(VerifyNoCSetOops("Queue", i));
1984   }
1985 
1986   // Verify the global finger
1987   HeapWord* global_finger = finger();
1988   if (global_finger != NULL && global_finger < _heap.end()) {
1989     // Since we always iterate over all regions, we might get a NULL HeapRegion
1990     // here.
1991     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
1992     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
1993               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
1994               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
1995   }
1996 
1997   // Verify the task fingers
1998   assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1999   for (uint i = 0; i < _num_concurrent_workers; ++i) {
2000     G1CMTask* task = _tasks[i];
2001     HeapWord* task_finger = task->finger();
2002     if (task_finger != NULL && task_finger < _heap.end()) {
2003       // See above note on the global finger verification.
2004       HeapRegion* r = _g1h->heap_region_containing(task_finger);
2005       guarantee(r == NULL || task_finger == r->bottom() ||
2006                 !r->in_collection_set() || !r->has_index_in_opt_cset(),
2007                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2008                 p2i(task_finger), HR_FORMAT_PARAMS(r));
2009     }
2010   }
2011 }
2012 #endif // PRODUCT
2013 
2014 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
2015   _g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
2016 }
2017 
2018 void G1ConcurrentMark::print_stats() {
2019   if (!log_is_enabled(Debug, gc, stats)) {
2020     return;
2021   }
2022   log_debug(gc, stats)("---------------------------------------------------------------------");
2023   for (size_t i = 0; i < _num_active_tasks; ++i) {
2024     _tasks[i]->print_stats();
2025     log_debug(gc, stats)("---------------------------------------------------------------------");
2026   }
2027 }
2028 
2029 void G1ConcurrentMark::concurrent_cycle_abort() {
2030   if (!cm_thread()->during_cycle() || _has_aborted) {
2031     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2032     return;
2033   }
2034 
2035   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2036   // concurrent bitmap clearing.
2037   {
2038     GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
2039     clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
2040   }
2041   // Note we cannot clear the previous marking bitmap here
2042   // since VerifyDuringGC verifies the objects marked during
2043   // a full GC against the previous bitmap.
2044 
2045   // Empty mark stack
2046   reset_marking_for_restart();
2047   for (uint i = 0; i < _max_num_tasks; ++i) {
2048     _tasks[i]->clear_region_fields();
2049   }
2050   _first_overflow_barrier_sync.abort();
2051   _second_overflow_barrier_sync.abort();
2052   _has_aborted = true;
2053 
2054   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2055   satb_mq_set.abandon_partial_marking();
2056   // This can be called either during or outside marking, we'll read
2057   // the expected_active value from the SATB queue set.
2058   satb_mq_set.set_active_all_threads(
2059                                  false, /* new active value */
2060                                  satb_mq_set.is_active() /* expected_active */);
2061 }
2062 
2063 static void print_ms_time_info(const char* prefix, const char* name,
2064                                NumberSeq& ns) {
2065   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2066                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2067   if (ns.num() > 0) {
2068     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2069                            prefix, ns.sd(), ns.maximum());
2070   }
2071 }
2072 
2073 void G1ConcurrentMark::print_summary_info() {
2074   Log(gc, marking) log;
2075   if (!log.is_trace()) {
2076     return;
2077   }
2078 
2079   log.trace(" Concurrent marking:");
2080   print_ms_time_info("  ", "init marks", _init_times);
2081   print_ms_time_info("  ", "remarks", _remark_times);
2082   {
2083     print_ms_time_info("     ", "final marks", _remark_mark_times);
2084     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2085 
2086   }
2087   print_ms_time_info("  ", "cleanups", _cleanup_times);
2088   log.trace("    Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2089             _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2090   log.trace("  Total stop_world time = %8.2f s.",
2091             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2092   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
2093             cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
2094 }
2095 
2096 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2097   _concurrent_workers->print_worker_threads_on(st);
2098 }
2099 
2100 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2101   _concurrent_workers->threads_do(tc);
2102 }
2103 
2104 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2105   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2106                p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
2107   _prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
2108   _next_mark_bitmap->print_on_error(st, " Next Bits: ");
2109 }
2110 
2111 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2112   ReferenceProcessor* result = g1h->ref_processor_cm();
2113   assert(result != NULL, "CM reference processor should not be NULL");
2114   return result;
2115 }
2116 
2117 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2118                                G1CMTask* task)
2119   : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)),
2120     _g1h(g1h), _task(task)
2121 { }
2122 
2123 void G1CMTask::setup_for_region(HeapRegion* hr) {
2124   assert(hr != NULL,
2125         "claim_region() should have filtered out NULL regions");
2126   _curr_region  = hr;
2127   _finger       = hr->bottom();
2128   update_region_limit();
2129 }
2130 
2131 void G1CMTask::update_region_limit() {
2132   HeapRegion* hr            = _curr_region;
2133   HeapWord* bottom          = hr->bottom();
2134   HeapWord* limit           = hr->next_top_at_mark_start();
2135 
2136   if (limit == bottom) {
2137     // The region was collected underneath our feet.
2138     // We set the finger to bottom to ensure that the bitmap
2139     // iteration that will follow this will not do anything.
2140     // (this is not a condition that holds when we set the region up,
2141     // as the region is not supposed to be empty in the first place)
2142     _finger = bottom;
2143   } else if (limit >= _region_limit) {
2144     assert(limit >= _finger, "peace of mind");
2145   } else {
2146     assert(limit < _region_limit, "only way to get here");
2147     // This can happen under some pretty unusual circumstances.  An
2148     // evacuation pause empties the region underneath our feet (NTAMS
2149     // at bottom). We then do some allocation in the region (NTAMS
2150     // stays at bottom), followed by the region being used as a GC
2151     // alloc region (NTAMS will move to top() and the objects
2152     // originally below it will be grayed). All objects now marked in
2153     // the region are explicitly grayed, if below the global finger,
2154     // and we do not need in fact to scan anything else. So, we simply
2155     // set _finger to be limit to ensure that the bitmap iteration
2156     // doesn't do anything.
2157     _finger = limit;
2158   }
2159 
2160   _region_limit = limit;
2161 }
2162 
2163 void G1CMTask::giveup_current_region() {
2164   assert(_curr_region != NULL, "invariant");
2165   clear_region_fields();
2166 }
2167 
2168 void G1CMTask::clear_region_fields() {
2169   // Values for these three fields that indicate that we're not
2170   // holding on to a region.
2171   _curr_region   = NULL;
2172   _finger        = NULL;
2173   _region_limit  = NULL;
2174 }
2175 
2176 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2177   if (cm_oop_closure == NULL) {
2178     assert(_cm_oop_closure != NULL, "invariant");
2179   } else {
2180     assert(_cm_oop_closure == NULL, "invariant");
2181   }
2182   _cm_oop_closure = cm_oop_closure;
2183 }
2184 
2185 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
2186   guarantee(next_mark_bitmap != NULL, "invariant");
2187   _next_mark_bitmap              = next_mark_bitmap;
2188   clear_region_fields();
2189 
2190   _calls                         = 0;
2191   _elapsed_time_ms               = 0.0;
2192   _termination_time_ms           = 0.0;
2193   _termination_start_time_ms     = 0.0;
2194 
2195   _mark_stats_cache.reset();
2196 }
2197 
2198 bool G1CMTask::should_exit_termination() {
2199   if (!regular_clock_call()) {
2200     return true;
2201   }
2202 
2203   // This is called when we are in the termination protocol. We should
2204   // quit if, for some reason, this task wants to abort or the global
2205   // stack is not empty (this means that we can get work from it).
2206   return !_cm->mark_stack_empty() || has_aborted();
2207 }
2208 
2209 void G1CMTask::reached_limit() {
2210   assert(_words_scanned >= _words_scanned_limit ||
2211          _refs_reached >= _refs_reached_limit ,
2212          "shouldn't have been called otherwise");
2213   abort_marking_if_regular_check_fail();
2214 }
2215 
2216 bool G1CMTask::regular_clock_call() {
2217   if (has_aborted()) {
2218     return false;
2219   }
2220 
2221   // First, we need to recalculate the words scanned and refs reached
2222   // limits for the next clock call.
2223   recalculate_limits();
2224 
2225   // During the regular clock call we do the following
2226 
2227   // (1) If an overflow has been flagged, then we abort.
2228   if (_cm->has_overflown()) {
2229     return false;
2230   }
2231 
2232   // If we are not concurrent (i.e. we're doing remark) we don't need
2233   // to check anything else. The other steps are only needed during
2234   // the concurrent marking phase.
2235   if (!_cm->concurrent()) {
2236     return true;
2237   }
2238 
2239   // (2) If marking has been aborted for Full GC, then we also abort.
2240   if (_cm->has_aborted()) {
2241     return false;
2242   }
2243 
2244   double curr_time_ms = os::elapsedVTime() * 1000.0;
2245 
2246   // (4) We check whether we should yield. If we have to, then we abort.
2247   if (SuspendibleThreadSet::should_yield()) {
2248     // We should yield. To do this we abort the task. The caller is
2249     // responsible for yielding.
2250     return false;
2251   }
2252 
2253   // (5) We check whether we've reached our time quota. If we have,
2254   // then we abort.
2255   double elapsed_time_ms = curr_time_ms - _start_time_ms;
2256   if (elapsed_time_ms > _time_target_ms) {
2257     _has_timed_out = true;
2258     return false;
2259   }
2260 
2261   // (6) Finally, we check whether there are enough completed STAB
2262   // buffers available for processing. If there are, we abort.
2263   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2264   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2265     // we do need to process SATB buffers, we'll abort and restart
2266     // the marking task to do so
2267     return false;
2268   }
2269   return true;
2270 }
2271 
2272 void G1CMTask::recalculate_limits() {
2273   _real_words_scanned_limit = _words_scanned + words_scanned_period;
2274   _words_scanned_limit      = _real_words_scanned_limit;
2275 
2276   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
2277   _refs_reached_limit       = _real_refs_reached_limit;
2278 }
2279 
2280 void G1CMTask::decrease_limits() {
2281   // This is called when we believe that we're going to do an infrequent
2282   // operation which will increase the per byte scanned cost (i.e. move
2283   // entries to/from the global stack). It basically tries to decrease the
2284   // scanning limit so that the clock is called earlier.
2285 
2286   _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
2287   _refs_reached_limit  = _real_refs_reached_limit - 3 * refs_reached_period / 4;
2288 }
2289 
2290 void G1CMTask::move_entries_to_global_stack() {
2291   // Local array where we'll store the entries that will be popped
2292   // from the local queue.
2293   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2294 
2295   size_t n = 0;
2296   G1TaskQueueEntry task_entry;
2297   while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) {
2298     buffer[n] = task_entry;
2299     ++n;
2300   }
2301   if (n < G1CMMarkStack::EntriesPerChunk) {
2302     buffer[n] = G1TaskQueueEntry();
2303   }
2304 
2305   if (n > 0) {
2306     if (!_cm->mark_stack_push(buffer)) {
2307       set_has_aborted();
2308     }
2309   }
2310 
2311   // This operation was quite expensive, so decrease the limits.
2312   decrease_limits();
2313 }
2314 
2315 bool G1CMTask::get_entries_from_global_stack() {
2316   // Local array where we'll store the entries that will be popped
2317   // from the global stack.
2318   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2319 
2320   if (!_cm->mark_stack_pop(buffer)) {
2321     return false;
2322   }
2323 
2324   // We did actually pop at least one entry.
2325   for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) {
2326     G1TaskQueueEntry task_entry = buffer[i];
2327     if (task_entry.is_null()) {
2328       break;
2329     }
2330     assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
2331     bool success = _task_queue->push(task_entry);
2332     // We only call this when the local queue is empty or under a
2333     // given target limit. So, we do not expect this push to fail.
2334     assert(success, "invariant");
2335   }
2336 
2337   // This operation was quite expensive, so decrease the limits
2338   decrease_limits();
2339   return true;
2340 }
2341 
2342 void G1CMTask::drain_local_queue(bool partially) {
2343   if (has_aborted()) {
2344     return;
2345   }
2346 
2347   // Decide what the target size is, depending whether we're going to
2348   // drain it partially (so that other tasks can steal if they run out
2349   // of things to do) or totally (at the very end).
2350   size_t target_size;
2351   if (partially) {
2352     target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize);
2353   } else {
2354     target_size = 0;
2355   }
2356 
2357   if (_task_queue->size() > target_size) {
2358     G1TaskQueueEntry entry;
2359     bool ret = _task_queue->pop_local(entry);
2360     while (ret) {
2361       scan_task_entry(entry);
2362       if (_task_queue->size() <= target_size || has_aborted()) {
2363         ret = false;
2364       } else {
2365         ret = _task_queue->pop_local(entry);
2366       }
2367     }
2368   }
2369 }
2370 
2371 void G1CMTask::drain_global_stack(bool partially) {
2372   if (has_aborted()) {
2373     return;
2374   }
2375 
2376   // We have a policy to drain the local queue before we attempt to
2377   // drain the global stack.
2378   assert(partially || _task_queue->size() == 0, "invariant");
2379 
2380   // Decide what the target size is, depending whether we're going to
2381   // drain it partially (so that other tasks can steal if they run out
2382   // of things to do) or totally (at the very end).
2383   // Notice that when draining the global mark stack partially, due to the racyness
2384   // of the mark stack size update we might in fact drop below the target. But,
2385   // this is not a problem.
2386   // In case of total draining, we simply process until the global mark stack is
2387   // totally empty, disregarding the size counter.
2388   if (partially) {
2389     size_t const target_size = _cm->partial_mark_stack_size_target();
2390     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2391       if (get_entries_from_global_stack()) {
2392         drain_local_queue(partially);
2393       }
2394     }
2395   } else {
2396     while (!has_aborted() && get_entries_from_global_stack()) {
2397       drain_local_queue(partially);
2398     }
2399   }
2400 }
2401 
2402 // SATB Queue has several assumptions on whether to call the par or
2403 // non-par versions of the methods. this is why some of the code is
2404 // replicated. We should really get rid of the single-threaded version
2405 // of the code to simplify things.
2406 void G1CMTask::drain_satb_buffers() {
2407   if (has_aborted()) {
2408     return;
2409   }
2410 
2411   // We set this so that the regular clock knows that we're in the
2412   // middle of draining buffers and doesn't set the abort flag when it
2413   // notices that SATB buffers are available for draining. It'd be
2414   // very counter productive if it did that. :-)
2415   _draining_satb_buffers = true;
2416 
2417   G1CMSATBBufferClosure satb_cl(this, _g1h);
2418   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2419 
2420   // This keeps claiming and applying the closure to completed buffers
2421   // until we run out of buffers or we need to abort.
2422   while (!has_aborted() &&
2423          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2424     abort_marking_if_regular_check_fail();
2425   }
2426 
2427   // Can't assert qset is empty here, even if not aborted.  If concurrent,
2428   // some other thread might be adding to the queue.  If not concurrent,
2429   // some other thread might have won the race for the last buffer, but
2430   // has not yet decremented the count.
2431 
2432   _draining_satb_buffers = false;
2433 
2434   // again, this was a potentially expensive operation, decrease the
2435   // limits to get the regular clock call early
2436   decrease_limits();
2437 }
2438 
2439 void G1CMTask::clear_mark_stats_cache(uint region_idx) {
2440   _mark_stats_cache.reset(region_idx);
2441 }
2442 
2443 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
2444   return _mark_stats_cache.evict_all();
2445 }
2446 
2447 void G1CMTask::print_stats() {
2448   log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls);
2449   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2450                        _elapsed_time_ms, _termination_time_ms);
2451   log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms",
2452                        _step_times_ms.num(),
2453                        _step_times_ms.avg(),
2454                        _step_times_ms.sd(),
2455                        _step_times_ms.maximum(),
2456                        _step_times_ms.sum());
2457   size_t const hits = _mark_stats_cache.hits();
2458   size_t const misses = _mark_stats_cache.misses();
2459   log_debug(gc, stats)("  Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f",
2460                        hits, misses, percent_of(hits, hits + misses));
2461 }
2462 
2463 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) {
2464   return _task_queues->steal(worker_id, task_entry);
2465 }
2466 
2467 /*****************************************************************************
2468 
2469     The do_marking_step(time_target_ms, ...) method is the building
2470     block of the parallel marking framework. It can be called in parallel
2471     with other invocations of do_marking_step() on different tasks
2472     (but only one per task, obviously) and concurrently with the
2473     mutator threads, or during remark, hence it eliminates the need
2474     for two versions of the code. When called during remark, it will
2475     pick up from where the task left off during the concurrent marking
2476     phase. Interestingly, tasks are also claimable during evacuation
2477     pauses too, since do_marking_step() ensures that it aborts before
2478     it needs to yield.
2479 
2480     The data structures that it uses to do marking work are the
2481     following:
2482 
2483       (1) Marking Bitmap. If there are gray objects that appear only
2484       on the bitmap (this happens either when dealing with an overflow
2485       or when the initial marking phase has simply marked the roots
2486       and didn't push them on the stack), then tasks claim heap
2487       regions whose bitmap they then scan to find gray objects. A
2488       global finger indicates where the end of the last claimed region
2489       is. A local finger indicates how far into the region a task has
2490       scanned. The two fingers are used to determine how to gray an
2491       object (i.e. whether simply marking it is OK, as it will be
2492       visited by a task in the future, or whether it needs to be also
2493       pushed on a stack).
2494 
2495       (2) Local Queue. The local queue of the task which is accessed
2496       reasonably efficiently by the task. Other tasks can steal from
2497       it when they run out of work. Throughout the marking phase, a
2498       task attempts to keep its local queue short but not totally
2499       empty, so that entries are available for stealing by other
2500       tasks. Only when there is no more work, a task will totally
2501       drain its local queue.
2502 
2503       (3) Global Mark Stack. This handles local queue overflow. During
2504       marking only sets of entries are moved between it and the local
2505       queues, as access to it requires a mutex and more fine-grain
2506       interaction with it which might cause contention. If it
2507       overflows, then the marking phase should restart and iterate
2508       over the bitmap to identify gray objects. Throughout the marking
2509       phase, tasks attempt to keep the global mark stack at a small
2510       length but not totally empty, so that entries are available for
2511       popping by other tasks. Only when there is no more work, tasks
2512       will totally drain the global mark stack.
2513 
2514       (4) SATB Buffer Queue. This is where completed SATB buffers are
2515       made available. Buffers are regularly removed from this queue
2516       and scanned for roots, so that the queue doesn't get too
2517       long. During remark, all completed buffers are processed, as
2518       well as the filled in parts of any uncompleted buffers.
2519 
2520     The do_marking_step() method tries to abort when the time target
2521     has been reached. There are a few other cases when the
2522     do_marking_step() method also aborts:
2523 
2524       (1) When the marking phase has been aborted (after a Full GC).
2525 
2526       (2) When a global overflow (on the global stack) has been
2527       triggered. Before the task aborts, it will actually sync up with
2528       the other tasks to ensure that all the marking data structures
2529       (local queues, stacks, fingers etc.)  are re-initialized so that
2530       when do_marking_step() completes, the marking phase can
2531       immediately restart.
2532 
2533       (3) When enough completed SATB buffers are available. The
2534       do_marking_step() method only tries to drain SATB buffers right
2535       at the beginning. So, if enough buffers are available, the
2536       marking step aborts and the SATB buffers are processed at
2537       the beginning of the next invocation.
2538 
2539       (4) To yield. when we have to yield then we abort and yield
2540       right at the end of do_marking_step(). This saves us from a lot
2541       of hassle as, by yielding we might allow a Full GC. If this
2542       happens then objects will be compacted underneath our feet, the
2543       heap might shrink, etc. We save checking for this by just
2544       aborting and doing the yield right at the end.
2545 
2546     From the above it follows that the do_marking_step() method should
2547     be called in a loop (or, otherwise, regularly) until it completes.
2548 
2549     If a marking step completes without its has_aborted() flag being
2550     true, it means it has completed the current marking phase (and
2551     also all other marking tasks have done so and have all synced up).
2552 
2553     A method called regular_clock_call() is invoked "regularly" (in
2554     sub ms intervals) throughout marking. It is this clock method that
2555     checks all the abort conditions which were mentioned above and
2556     decides when the task should abort. A work-based scheme is used to
2557     trigger this clock method: when the number of object words the
2558     marking phase has scanned or the number of references the marking
2559     phase has visited reach a given limit. Additional invocations to
2560     the method clock have been planted in a few other strategic places
2561     too. The initial reason for the clock method was to avoid calling
2562     vtime too regularly, as it is quite expensive. So, once it was in
2563     place, it was natural to piggy-back all the other conditions on it
2564     too and not constantly check them throughout the code.
2565 
2566     If do_termination is true then do_marking_step will enter its
2567     termination protocol.
2568 
2569     The value of is_serial must be true when do_marking_step is being
2570     called serially (i.e. by the VMThread) and do_marking_step should
2571     skip any synchronization in the termination and overflow code.
2572     Examples include the serial remark code and the serial reference
2573     processing closures.
2574 
2575     The value of is_serial must be false when do_marking_step is
2576     being called by any of the worker threads in a work gang.
2577     Examples include the concurrent marking code (CMMarkingTask),
2578     the MT remark code, and the MT reference processing closures.
2579 
2580  *****************************************************************************/
2581 
2582 void G1CMTask::do_marking_step(double time_target_ms,
2583                                bool do_termination,
2584                                bool is_serial) {
2585   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2586 
2587   _start_time_ms = os::elapsedVTime() * 1000.0;
2588 
2589   // If do_stealing is true then do_marking_step will attempt to
2590   // steal work from the other G1CMTasks. It only makes sense to
2591   // enable stealing when the termination protocol is enabled
2592   // and do_marking_step() is not being called serially.
2593   bool do_stealing = do_termination && !is_serial;
2594 
2595   G1Predictions const& predictor = _g1h->policy()->predictor();
2596   double diff_prediction_ms = predictor.predict_zero_bounded(&_marking_step_diff_ms);
2597   _time_target_ms = time_target_ms - diff_prediction_ms;
2598 
2599   // set up the variables that are used in the work-based scheme to
2600   // call the regular clock method
2601   _words_scanned = 0;
2602   _refs_reached  = 0;
2603   recalculate_limits();
2604 
2605   // clear all flags
2606   clear_has_aborted();
2607   _has_timed_out = false;
2608   _draining_satb_buffers = false;
2609 
2610   ++_calls;
2611 
2612   // Set up the bitmap and oop closures. Anything that uses them is
2613   // eventually called from this method, so it is OK to allocate these
2614   // statically.
2615   G1CMBitMapClosure bitmap_closure(this, _cm);
2616   G1CMOopClosure cm_oop_closure(_g1h, this);
2617   set_cm_oop_closure(&cm_oop_closure);
2618 
2619   if (_cm->has_overflown()) {
2620     // This can happen if the mark stack overflows during a GC pause
2621     // and this task, after a yield point, restarts. We have to abort
2622     // as we need to get into the overflow protocol which happens
2623     // right at the end of this task.
2624     set_has_aborted();
2625   }
2626 
2627   // First drain any available SATB buffers. After this, we will not
2628   // look at SATB buffers before the next invocation of this method.
2629   // If enough completed SATB buffers are queued up, the regular clock
2630   // will abort this task so that it restarts.
2631   drain_satb_buffers();
2632   // ...then partially drain the local queue and the global stack
2633   drain_local_queue(true);
2634   drain_global_stack(true);
2635 
2636   do {
2637     if (!has_aborted() && _curr_region != NULL) {
2638       // This means that we're already holding on to a region.
2639       assert(_finger != NULL, "if region is not NULL, then the finger "
2640              "should not be NULL either");
2641 
2642       // We might have restarted this task after an evacuation pause
2643       // which might have evacuated the region we're holding on to
2644       // underneath our feet. Let's read its limit again to make sure
2645       // that we do not iterate over a region of the heap that
2646       // contains garbage (update_region_limit() will also move
2647       // _finger to the start of the region if it is found empty).
2648       update_region_limit();
2649       // We will start from _finger not from the start of the region,
2650       // as we might be restarting this task after aborting half-way
2651       // through scanning this region. In this case, _finger points to
2652       // the address where we last found a marked object. If this is a
2653       // fresh region, _finger points to start().
2654       MemRegion mr = MemRegion(_finger, _region_limit);
2655 
2656       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2657              "humongous regions should go around loop once only");
2658 
2659       // Some special cases:
2660       // If the memory region is empty, we can just give up the region.
2661       // If the current region is humongous then we only need to check
2662       // the bitmap for the bit associated with the start of the object,
2663       // scan the object if it's live, and give up the region.
2664       // Otherwise, let's iterate over the bitmap of the part of the region
2665       // that is left.
2666       // If the iteration is successful, give up the region.
2667       if (mr.is_empty()) {
2668         giveup_current_region();
2669         abort_marking_if_regular_check_fail();
2670       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2671         if (_next_mark_bitmap->is_marked(mr.start())) {
2672           // The object is marked - apply the closure
2673           bitmap_closure.do_addr(mr.start());
2674         }
2675         // Even if this task aborted while scanning the humongous object
2676         // we can (and should) give up the current region.
2677         giveup_current_region();
2678         abort_marking_if_regular_check_fail();
2679       } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
2680         giveup_current_region();
2681         abort_marking_if_regular_check_fail();
2682       } else {
2683         assert(has_aborted(), "currently the only way to do so");
2684         // The only way to abort the bitmap iteration is to return
2685         // false from the do_bit() method. However, inside the
2686         // do_bit() method we move the _finger to point to the
2687         // object currently being looked at. So, if we bail out, we
2688         // have definitely set _finger to something non-null.
2689         assert(_finger != NULL, "invariant");
2690 
2691         // Region iteration was actually aborted. So now _finger
2692         // points to the address of the object we last scanned. If we
2693         // leave it there, when we restart this task, we will rescan
2694         // the object. It is easy to avoid this. We move the finger by
2695         // enough to point to the next possible object header.
2696         assert(_finger < _region_limit, "invariant");
2697         HeapWord* const new_finger = _finger + ((oop)_finger)->size();
2698         // Check if bitmap iteration was aborted while scanning the last object
2699         if (new_finger >= _region_limit) {
2700           giveup_current_region();
2701         } else {
2702           move_finger_to(new_finger);
2703         }
2704       }
2705     }
2706     // At this point we have either completed iterating over the
2707     // region we were holding on to, or we have aborted.
2708 
2709     // We then partially drain the local queue and the global stack.
2710     // (Do we really need this?)
2711     drain_local_queue(true);
2712     drain_global_stack(true);
2713 
2714     // Read the note on the claim_region() method on why it might
2715     // return NULL with potentially more regions available for
2716     // claiming and why we have to check out_of_regions() to determine
2717     // whether we're done or not.
2718     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2719       // We are going to try to claim a new region. We should have
2720       // given up on the previous one.
2721       // Separated the asserts so that we know which one fires.
2722       assert(_curr_region  == NULL, "invariant");
2723       assert(_finger       == NULL, "invariant");
2724       assert(_region_limit == NULL, "invariant");
2725       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2726       if (claimed_region != NULL) {
2727         // Yes, we managed to claim one
2728         setup_for_region(claimed_region);
2729         assert(_curr_region == claimed_region, "invariant");
2730       }
2731       // It is important to call the regular clock here. It might take
2732       // a while to claim a region if, for example, we hit a large
2733       // block of empty regions. So we need to call the regular clock
2734       // method once round the loop to make sure it's called
2735       // frequently enough.
2736       abort_marking_if_regular_check_fail();
2737     }
2738 
2739     if (!has_aborted() && _curr_region == NULL) {
2740       assert(_cm->out_of_regions(),
2741              "at this point we should be out of regions");
2742     }
2743   } while ( _curr_region != NULL && !has_aborted());
2744 
2745   if (!has_aborted()) {
2746     // We cannot check whether the global stack is empty, since other
2747     // tasks might be pushing objects to it concurrently.
2748     assert(_cm->out_of_regions(),
2749            "at this point we should be out of regions");
2750     // Try to reduce the number of available SATB buffers so that
2751     // remark has less work to do.
2752     drain_satb_buffers();
2753   }
2754 
2755   // Since we've done everything else, we can now totally drain the
2756   // local queue and global stack.
2757   drain_local_queue(false);
2758   drain_global_stack(false);
2759 
2760   // Attempt at work stealing from other task's queues.
2761   if (do_stealing && !has_aborted()) {
2762     // We have not aborted. This means that we have finished all that
2763     // we could. Let's try to do some stealing...
2764 
2765     // We cannot check whether the global stack is empty, since other
2766     // tasks might be pushing objects to it concurrently.
2767     assert(_cm->out_of_regions() && _task_queue->size() == 0,
2768            "only way to reach here");
2769     while (!has_aborted()) {
2770       G1TaskQueueEntry entry;
2771       if (_cm->try_stealing(_worker_id, entry)) {
2772         scan_task_entry(entry);
2773 
2774         // And since we're towards the end, let's totally drain the
2775         // local queue and global stack.
2776         drain_local_queue(false);
2777         drain_global_stack(false);
2778       } else {
2779         break;
2780       }
2781     }
2782   }
2783 
2784   // We still haven't aborted. Now, let's try to get into the
2785   // termination protocol.
2786   if (do_termination && !has_aborted()) {
2787     // We cannot check whether the global stack is empty, since other
2788     // tasks might be concurrently pushing objects on it.
2789     // Separated the asserts so that we know which one fires.
2790     assert(_cm->out_of_regions(), "only way to reach here");
2791     assert(_task_queue->size() == 0, "only way to reach here");
2792     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2793 
2794     // The G1CMTask class also extends the TerminatorTerminator class,
2795     // hence its should_exit_termination() method will also decide
2796     // whether to exit the termination protocol or not.
2797     bool finished = (is_serial ||
2798                      _cm->terminator()->offer_termination(this));
2799     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2800     _termination_time_ms +=
2801       termination_end_time_ms - _termination_start_time_ms;
2802 
2803     if (finished) {
2804       // We're all done.
2805 
2806       // We can now guarantee that the global stack is empty, since
2807       // all other tasks have finished. We separated the guarantees so
2808       // that, if a condition is false, we can immediately find out
2809       // which one.
2810       guarantee(_cm->out_of_regions(), "only way to reach here");
2811       guarantee(_cm->mark_stack_empty(), "only way to reach here");
2812       guarantee(_task_queue->size() == 0, "only way to reach here");
2813       guarantee(!_cm->has_overflown(), "only way to reach here");
2814       guarantee(!has_aborted(), "should never happen if termination has completed");
2815     } else {
2816       // Apparently there's more work to do. Let's abort this task. It
2817       // will restart it and we can hopefully find more things to do.
2818       set_has_aborted();
2819     }
2820   }
2821 
2822   // Mainly for debugging purposes to make sure that a pointer to the
2823   // closure which was statically allocated in this frame doesn't
2824   // escape it by accident.
2825   set_cm_oop_closure(NULL);
2826   double end_time_ms = os::elapsedVTime() * 1000.0;
2827   double elapsed_time_ms = end_time_ms - _start_time_ms;
2828   // Update the step history.
2829   _step_times_ms.add(elapsed_time_ms);
2830 
2831   if (has_aborted()) {
2832     // The task was aborted for some reason.
2833     if (_has_timed_out) {
2834       double diff_ms = elapsed_time_ms - _time_target_ms;
2835       // Keep statistics of how well we did with respect to hitting
2836       // our target only if we actually timed out (if we aborted for
2837       // other reasons, then the results might get skewed).
2838       _marking_step_diff_ms.add(diff_ms);
2839     }
2840 
2841     if (_cm->has_overflown()) {
2842       // This is the interesting one. We aborted because a global
2843       // overflow was raised. This means we have to restart the
2844       // marking phase and start iterating over regions. However, in
2845       // order to do this we have to make sure that all tasks stop
2846       // what they are doing and re-initialize in a safe manner. We
2847       // will achieve this with the use of two barrier sync points.
2848 
2849       if (!is_serial) {
2850         // We only need to enter the sync barrier if being called
2851         // from a parallel context
2852         _cm->enter_first_sync_barrier(_worker_id);
2853 
2854         // When we exit this sync barrier we know that all tasks have
2855         // stopped doing marking work. So, it's now safe to
2856         // re-initialize our data structures.
2857       }
2858 
2859       clear_region_fields();
2860       flush_mark_stats_cache();
2861 
2862       if (!is_serial) {
2863         // If we're executing the concurrent phase of marking, reset the marking
2864         // state; otherwise the marking state is reset after reference processing,
2865         // during the remark pause.
2866         // If we reset here as a result of an overflow during the remark we will
2867         // see assertion failures from any subsequent set_concurrency_and_phase()
2868         // calls.
2869         if (_cm->concurrent() && _worker_id == 0) {
2870           // Worker 0 is responsible for clearing the global data structures because
2871           // of an overflow. During STW we should not clear the overflow flag (in
2872           // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit
2873           // method to abort the pause and restart concurrent marking.
2874           _cm->reset_marking_for_restart();
2875 
2876           log_info(gc, marking)("Concurrent Mark reset for overflow");
2877         }
2878 
2879         // ...and enter the second barrier.
2880         _cm->enter_second_sync_barrier(_worker_id);
2881       }
2882       // At this point, if we're during the concurrent phase of
2883       // marking, everything has been re-initialized and we're
2884       // ready to restart.
2885     }
2886   }
2887 }
2888 
2889 G1CMTask::G1CMTask(uint worker_id,
2890                    G1ConcurrentMark* cm,
2891                    G1CMTaskQueue* task_queue,
2892                    G1RegionMarkStats* mark_stats,
2893                    uint max_regions) :
2894   _objArray_processor(this),
2895   _worker_id(worker_id),
2896   _g1h(G1CollectedHeap::heap()),
2897   _cm(cm),
2898   _next_mark_bitmap(NULL),
2899   _task_queue(task_queue),
2900   _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize),
2901   _calls(0),
2902   _time_target_ms(0.0),
2903   _start_time_ms(0.0),
2904   _cm_oop_closure(NULL),
2905   _curr_region(NULL),
2906   _finger(NULL),
2907   _region_limit(NULL),
2908   _words_scanned(0),
2909   _words_scanned_limit(0),
2910   _real_words_scanned_limit(0),
2911   _refs_reached(0),
2912   _refs_reached_limit(0),
2913   _real_refs_reached_limit(0),
2914   _has_aborted(false),
2915   _has_timed_out(false),
2916   _draining_satb_buffers(false),
2917   _step_times_ms(),
2918   _elapsed_time_ms(0.0),
2919   _termination_time_ms(0.0),
2920   _termination_start_time_ms(0.0),
2921   _marking_step_diff_ms()
2922 {
2923   guarantee(task_queue != NULL, "invariant");
2924 
2925   _marking_step_diff_ms.add(0.5);
2926 }
2927 
2928 // These are formatting macros that are used below to ensure
2929 // consistent formatting. The *_H_* versions are used to format the
2930 // header for a particular value and they should be kept consistent
2931 // with the corresponding macro. Also note that most of the macros add
2932 // the necessary white space (as a prefix) which makes them a bit
2933 // easier to compose.
2934 
2935 // All the output lines are prefixed with this string to be able to
2936 // identify them easily in a large log file.
2937 #define G1PPRL_LINE_PREFIX            "###"
2938 
2939 #define G1PPRL_ADDR_BASE_FORMAT    " " PTR_FORMAT "-" PTR_FORMAT
2940 #ifdef _LP64
2941 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
2942 #else // _LP64
2943 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
2944 #endif // _LP64
2945 
2946 // For per-region info
2947 #define G1PPRL_TYPE_FORMAT            "   %-4s"
2948 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
2949 #define G1PPRL_STATE_FORMAT           "   %-5s"
2950 #define G1PPRL_STATE_H_FORMAT         "   %5s"
2951 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
2952 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
2953 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
2954 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
2955 
2956 // For summary info
2957 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
2958 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
2959 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
2960 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
2961 
2962 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) :
2963   _total_used_bytes(0), _total_capacity_bytes(0),
2964   _total_prev_live_bytes(0), _total_next_live_bytes(0),
2965   _total_remset_bytes(0), _total_strong_code_roots_bytes(0)
2966 {
2967   if (!log_is_enabled(Trace, gc, liveness)) {
2968     return;
2969   }
2970 
2971   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2972   MemRegion g1_reserved = g1h->g1_reserved();
2973   double now = os::elapsedTime();
2974 
2975   // Print the header of the output.
2976   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
2977   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
2978                           G1PPRL_SUM_ADDR_FORMAT("reserved")
2979                           G1PPRL_SUM_BYTE_FORMAT("region-size"),
2980                           p2i(g1_reserved.start()), p2i(g1_reserved.end()),
2981                           HeapRegion::GrainBytes);
2982   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
2983   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2984                           G1PPRL_TYPE_H_FORMAT
2985                           G1PPRL_ADDR_BASE_H_FORMAT
2986                           G1PPRL_BYTE_H_FORMAT
2987                           G1PPRL_BYTE_H_FORMAT
2988                           G1PPRL_BYTE_H_FORMAT
2989                           G1PPRL_DOUBLE_H_FORMAT
2990                           G1PPRL_BYTE_H_FORMAT
2991                           G1PPRL_STATE_H_FORMAT
2992                           G1PPRL_BYTE_H_FORMAT,
2993                           "type", "address-range",
2994                           "used", "prev-live", "next-live", "gc-eff",
2995                           "remset", "state", "code-roots");
2996   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2997                           G1PPRL_TYPE_H_FORMAT
2998                           G1PPRL_ADDR_BASE_H_FORMAT
2999                           G1PPRL_BYTE_H_FORMAT
3000                           G1PPRL_BYTE_H_FORMAT
3001                           G1PPRL_BYTE_H_FORMAT
3002                           G1PPRL_DOUBLE_H_FORMAT
3003                           G1PPRL_BYTE_H_FORMAT
3004                           G1PPRL_STATE_H_FORMAT
3005                           G1PPRL_BYTE_H_FORMAT,
3006                           "", "",
3007                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
3008                           "(bytes)", "", "(bytes)");
3009 }
3010 
3011 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
3012   if (!log_is_enabled(Trace, gc, liveness)) {
3013     return false;
3014   }
3015 
3016   const char* type       = r->get_type_str();
3017   HeapWord* bottom       = r->bottom();
3018   HeapWord* end          = r->end();
3019   size_t capacity_bytes  = r->capacity();
3020   size_t used_bytes      = r->used();
3021   size_t prev_live_bytes = r->live_bytes();
3022   size_t next_live_bytes = r->next_live_bytes();
3023   double gc_eff          = r->gc_efficiency();
3024   size_t remset_bytes    = r->rem_set()->mem_size();
3025   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3026   const char* remset_type = r->rem_set()->get_short_state_str();
3027 
3028   _total_used_bytes      += used_bytes;
3029   _total_capacity_bytes  += capacity_bytes;
3030   _total_prev_live_bytes += prev_live_bytes;
3031   _total_next_live_bytes += next_live_bytes;
3032   _total_remset_bytes    += remset_bytes;
3033   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3034 
3035   // Print a line for this particular region.
3036   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3037                           G1PPRL_TYPE_FORMAT
3038                           G1PPRL_ADDR_BASE_FORMAT
3039                           G1PPRL_BYTE_FORMAT
3040                           G1PPRL_BYTE_FORMAT
3041                           G1PPRL_BYTE_FORMAT
3042                           G1PPRL_DOUBLE_FORMAT
3043                           G1PPRL_BYTE_FORMAT
3044                           G1PPRL_STATE_FORMAT
3045                           G1PPRL_BYTE_FORMAT,
3046                           type, p2i(bottom), p2i(end),
3047                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3048                           remset_bytes, remset_type, strong_code_roots_bytes);
3049 
3050   return false;
3051 }
3052 
3053 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3054   if (!log_is_enabled(Trace, gc, liveness)) {
3055     return;
3056   }
3057 
3058   // add static memory usages to remembered set sizes
3059   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3060   // Print the footer of the output.
3061   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3062   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3063                          " SUMMARY"
3064                          G1PPRL_SUM_MB_FORMAT("capacity")
3065                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3066                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3067                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3068                          G1PPRL_SUM_MB_FORMAT("remset")
3069                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3070                          bytes_to_mb(_total_capacity_bytes),
3071                          bytes_to_mb(_total_used_bytes),
3072                          percent_of(_total_used_bytes, _total_capacity_bytes),
3073                          bytes_to_mb(_total_prev_live_bytes),
3074                          percent_of(_total_prev_live_bytes, _total_capacity_bytes),
3075                          bytes_to_mb(_total_next_live_bytes),
3076                          percent_of(_total_next_live_bytes, _total_capacity_bytes),
3077                          bytes_to_mb(_total_remset_bytes),
3078                          bytes_to_mb(_total_strong_code_roots_bytes));
3079 }