1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorState.hpp"
  31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1DirtyCardQueue.hpp"
  34 #include "gc/g1/g1HeapVerifier.hpp"
  35 #include "gc/g1/g1OopClosures.inline.hpp"
  36 #include "gc/g1/g1Policy.hpp"
  37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
  38 #include "gc/g1/g1StringDedup.hpp"
  39 #include "gc/g1/g1ThreadLocalData.hpp"
  40 #include "gc/g1/g1Trace.hpp"
  41 #include "gc/g1/heapRegion.inline.hpp"
  42 #include "gc/g1/heapRegionRemSet.hpp"
  43 #include "gc/g1/heapRegionSet.inline.hpp"
  44 #include "gc/shared/gcId.hpp"
  45 #include "gc/shared/gcTimer.hpp"
  46 #include "gc/shared/gcTraceTime.inline.hpp"
  47 #include "gc/shared/gcVMOperations.hpp"
  48 #include "gc/shared/genOopClosures.inline.hpp"
  49 #include "gc/shared/referencePolicy.hpp"
  50 #include "gc/shared/strongRootsScope.hpp"
  51 #include "gc/shared/suspendibleThreadSet.hpp"
  52 #include "gc/shared/taskqueue.inline.hpp"
  53 #include "gc/shared/weakProcessor.inline.hpp"
  54 #include "gc/shared/workerPolicy.hpp"
  55 #include "include/jvm.h"
  56 #include "logging/log.hpp"
  57 #include "memory/allocation.hpp"
  58 #include "memory/resourceArea.hpp"
  59 #include "memory/universe.hpp"
  60 #include "oops/access.inline.hpp"
  61 #include "oops/oop.inline.hpp"
  62 #include "runtime/atomic.hpp"
  63 #include "runtime/handles.inline.hpp"
  64 #include "runtime/java.hpp"
  65 #include "runtime/prefetch.inline.hpp"
  66 #include "services/memTracker.hpp"
  67 #include "utilities/align.hpp"
  68 #include "utilities/growableArray.hpp"
  69 
  70 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  71   assert(addr < _cm->finger(), "invariant");
  72   assert(addr >= _task->finger(), "invariant");
  73 
  74   // We move that task's local finger along.
  75   _task->move_finger_to(addr);
  76 
  77   _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
  78   // we only partially drain the local queue and global stack
  79   _task->drain_local_queue(true);
  80   _task->drain_global_stack(true);
  81 
  82   // if the has_aborted flag has been raised, we need to bail out of
  83   // the iteration
  84   return !_task->has_aborted();
  85 }
  86 
  87 G1CMMarkStack::G1CMMarkStack() :
  88   _max_chunk_capacity(0),
  89   _base(NULL),
  90   _chunk_capacity(0) {
  91   set_empty();
  92 }
  93 
  94 bool G1CMMarkStack::resize(size_t new_capacity) {
  95   assert(is_empty(), "Only resize when stack is empty.");
  96   assert(new_capacity <= _max_chunk_capacity,
  97          "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
  98 
  99   TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
 100 
 101   if (new_base == NULL) {
 102     log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
 103     return false;
 104   }
 105   // Release old mapping.
 106   if (_base != NULL) {
 107     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 108   }
 109 
 110   _base = new_base;
 111   _chunk_capacity = new_capacity;
 112   set_empty();
 113 
 114   return true;
 115 }
 116 
 117 size_t G1CMMarkStack::capacity_alignment() {
 118   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
 119 }
 120 
 121 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 122   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 123 
 124   size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
 125 
 126   _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 127   size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 128 
 129   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
 130             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
 131             _max_chunk_capacity,
 132             initial_chunk_capacity);
 133 
 134   log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
 135                 initial_chunk_capacity, _max_chunk_capacity);
 136 
 137   return resize(initial_chunk_capacity);
 138 }
 139 
 140 void G1CMMarkStack::expand() {
 141   if (_chunk_capacity == _max_chunk_capacity) {
 142     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 143     return;
 144   }
 145   size_t old_capacity = _chunk_capacity;
 146   // Double capacity if possible
 147   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
 148 
 149   if (resize(new_capacity)) {
 150     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 151                   old_capacity, new_capacity);
 152   } else {
 153     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 154                     old_capacity, new_capacity);
 155   }
 156 }
 157 
 158 G1CMMarkStack::~G1CMMarkStack() {
 159   if (_base != NULL) {
 160     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 161   }
 162 }
 163 
 164 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
 165   elem->next = *list;
 166   *list = elem;
 167 }
 168 
 169 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
 170   MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 171   add_chunk_to_list(&_chunk_list, elem);
 172   _chunks_in_chunk_list++;
 173 }
 174 
 175 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
 176   MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 177   add_chunk_to_list(&_free_list, elem);
 178 }
 179 
 180 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
 181   TaskQueueEntryChunk* result = *list;
 182   if (result != NULL) {
 183     *list = (*list)->next;
 184   }
 185   return result;
 186 }
 187 
 188 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 189   MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 190   TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
 191   if (result != NULL) {
 192     _chunks_in_chunk_list--;
 193   }
 194   return result;
 195 }
 196 
 197 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 198   MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 199   return remove_chunk_from_list(&_free_list);
 200 }
 201 
 202 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
 203   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 204   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 205   // wraparound of _hwm.
 206   if (_hwm >= _chunk_capacity) {
 207     return NULL;
 208   }
 209 
 210   size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
 211   if (cur_idx >= _chunk_capacity) {
 212     return NULL;
 213   }
 214 
 215   TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
 216   result->next = NULL;
 217   return result;
 218 }
 219 
 220 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
 221   // Get a new chunk.
 222   TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
 223 
 224   if (new_chunk == NULL) {
 225     // Did not get a chunk from the free list. Allocate from backing memory.
 226     new_chunk = allocate_new_chunk();
 227 
 228     if (new_chunk == NULL) {
 229       return false;
 230     }
 231   }
 232 
 233   Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 234 
 235   add_chunk_to_chunk_list(new_chunk);
 236 
 237   return true;
 238 }
 239 
 240 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
 241   TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
 242 
 243   if (cur == NULL) {
 244     return false;
 245   }
 246 
 247   Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 248 
 249   add_chunk_to_free_list(cur);
 250   return true;
 251 }
 252 
 253 void G1CMMarkStack::set_empty() {
 254   _chunks_in_chunk_list = 0;
 255   _hwm = 0;
 256   _chunk_list = NULL;
 257   _free_list = NULL;
 258 }
 259 
 260 G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) :
 261     _root_regions(NULL),
 262     _max_regions(max_regions),
 263     _num_root_regions(0),
 264     _claimed_root_regions(0),
 265     _scan_in_progress(false),
 266     _should_abort(false) {
 267   _root_regions = new MemRegion[_max_regions];
 268   if (_root_regions == NULL) {
 269     vm_exit_during_initialization("Could not allocate root MemRegion set.");
 270   }
 271 }
 272 
 273 G1CMRootMemRegions::~G1CMRootMemRegions() {
 274   delete[] _root_regions;
 275 }
 276 
 277 void G1CMRootMemRegions::reset() {
 278   _num_root_regions = 0;
 279 }
 280 
 281 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
 282   assert_at_safepoint();
 283   size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1;
 284   assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
 285   assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
 286          "end (" PTR_FORMAT ")", p2i(start), p2i(end));
 287   _root_regions[idx].set_start(start);
 288   _root_regions[idx].set_end(end);
 289 }
 290 
 291 void G1CMRootMemRegions::prepare_for_scan() {
 292   assert(!scan_in_progress(), "pre-condition");
 293 
 294   _scan_in_progress = _num_root_regions > 0;
 295 
 296   _claimed_root_regions = 0;
 297   _should_abort = false;
 298 }
 299 
 300 const MemRegion* G1CMRootMemRegions::claim_next() {
 301   if (_should_abort) {
 302     // If someone has set the should_abort flag, we return NULL to
 303     // force the caller to bail out of their loop.
 304     return NULL;
 305   }
 306 
 307   if (_claimed_root_regions >= _num_root_regions) {
 308     return NULL;
 309   }
 310 
 311   size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
 312   if (claimed_index < _num_root_regions) {
 313     return &_root_regions[claimed_index];
 314   }
 315   return NULL;
 316 }
 317 
 318 uint G1CMRootMemRegions::num_root_regions() const {
 319   return (uint)_num_root_regions;
 320 }
 321 
 322 void G1CMRootMemRegions::notify_scan_done() {
 323   MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 324   _scan_in_progress = false;
 325   RootRegionScan_lock->notify_all();
 326 }
 327 
 328 void G1CMRootMemRegions::cancel_scan() {
 329   notify_scan_done();
 330 }
 331 
 332 void G1CMRootMemRegions::scan_finished() {
 333   assert(scan_in_progress(), "pre-condition");
 334 
 335   if (!_should_abort) {
 336     assert(_claimed_root_regions >= num_root_regions(),
 337            "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
 338            _claimed_root_regions, num_root_regions());
 339   }
 340 
 341   notify_scan_done();
 342 }
 343 
 344 bool G1CMRootMemRegions::wait_until_scan_finished() {
 345   if (!scan_in_progress()) {
 346     return false;
 347   }
 348 
 349   {
 350     MonitorLocker ml(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 351     while (scan_in_progress()) {
 352       ml.wait();
 353     }
 354   }
 355   return true;
 356 }
 357 
 358 // Returns the maximum number of workers to be used in a concurrent
 359 // phase based on the number of GC workers being used in a STW
 360 // phase.
 361 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
 362   return MAX2((num_gc_workers + 2) / 4, 1U);
 363 }
 364 
 365 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
 366                                    G1RegionToSpaceMapper* prev_bitmap_storage,
 367                                    G1RegionToSpaceMapper* next_bitmap_storage) :
 368   // _cm_thread set inside the constructor
 369   _g1h(g1h),
 370   _completed_initialization(false),
 371 
 372   _mark_bitmap_1(),
 373   _mark_bitmap_2(),
 374   _prev_mark_bitmap(&_mark_bitmap_1),
 375   _next_mark_bitmap(&_mark_bitmap_2),
 376 
 377   _heap(_g1h->reserved_region()),
 378 
 379   _root_regions(_g1h->max_regions()),
 380 
 381   _global_mark_stack(),
 382 
 383   // _finger set in set_non_marking_state
 384 
 385   _worker_id_offset(G1DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
 386   _max_num_tasks(ParallelGCThreads),
 387   // _num_active_tasks set in set_non_marking_state()
 388   // _tasks set inside the constructor
 389 
 390   _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
 391   _terminator((int) _max_num_tasks, _task_queues),
 392 
 393   _first_overflow_barrier_sync(),
 394   _second_overflow_barrier_sync(),
 395 
 396   _has_overflown(false),
 397   _concurrent(false),
 398   _has_aborted(false),
 399   _restart_for_overflow(false),
 400   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 401   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 402 
 403   // _verbose_level set below
 404 
 405   _init_times(),
 406   _remark_times(),
 407   _remark_mark_times(),
 408   _remark_weak_ref_times(),
 409   _cleanup_times(),
 410   _total_cleanup_time(0.0),
 411 
 412   _accum_task_vtime(NULL),
 413 
 414   _concurrent_workers(NULL),
 415   _num_concurrent_workers(0),
 416   _max_concurrent_workers(0),
 417 
 418   _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
 419   _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 420 {
 421   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 422   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 423 
 424   // Create & start ConcurrentMark thread.
 425   _cm_thread = new G1ConcurrentMarkThread(this);
 426   if (_cm_thread->osthread() == NULL) {
 427     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 428   }
 429 
 430   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 431 
 432   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 433     // Calculate the number of concurrent worker threads by scaling
 434     // the number of parallel GC threads.
 435     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 436     FLAG_SET_ERGO(ConcGCThreads, marking_thread_num);
 437   }
 438 
 439   assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
 440   if (ConcGCThreads > ParallelGCThreads) {
 441     log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
 442                     ConcGCThreads, ParallelGCThreads);
 443     return;
 444   }
 445 
 446   log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
 447   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 448 
 449   _num_concurrent_workers = ConcGCThreads;
 450   _max_concurrent_workers = _num_concurrent_workers;
 451 
 452   _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
 453   _concurrent_workers->initialize_workers();
 454 
 455   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 456     size_t mark_stack_size =
 457       MIN2(MarkStackSizeMax,
 458           MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE)));
 459     // Verify that the calculated value for MarkStackSize is in range.
 460     // It would be nice to use the private utility routine from Arguments.
 461     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 462       log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 463                       "must be between 1 and " SIZE_FORMAT,
 464                       mark_stack_size, MarkStackSizeMax);
 465       return;
 466     }
 467     FLAG_SET_ERGO(MarkStackSize, mark_stack_size);
 468   } else {
 469     // Verify MarkStackSize is in range.
 470     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 471       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 472         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 473           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 474                           "must be between 1 and " SIZE_FORMAT,
 475                           MarkStackSize, MarkStackSizeMax);
 476           return;
 477         }
 478       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 479         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 480           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 481                           " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 482                           MarkStackSize, MarkStackSizeMax);
 483           return;
 484         }
 485       }
 486     }
 487   }
 488 
 489   if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
 490     vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
 491   }
 492 
 493   _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
 494   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
 495 
 496   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 497   _num_active_tasks = _max_num_tasks;
 498 
 499   for (uint i = 0; i < _max_num_tasks; ++i) {
 500     G1CMTaskQueue* task_queue = new G1CMTaskQueue();
 501     task_queue->initialize();
 502     _task_queues->register_queue(i, task_queue);
 503 
 504     _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
 505 
 506     _accum_task_vtime[i] = 0.0;
 507   }
 508 
 509   reset_at_marking_complete();
 510   _completed_initialization = true;
 511 }
 512 
 513 void G1ConcurrentMark::reset() {
 514   _has_aborted = false;
 515 
 516   reset_marking_for_restart();
 517 
 518   // Reset all tasks, since different phases will use different number of active
 519   // threads. So, it's easiest to have all of them ready.
 520   for (uint i = 0; i < _max_num_tasks; ++i) {
 521     _tasks[i]->reset(_next_mark_bitmap);
 522   }
 523 
 524   uint max_regions = _g1h->max_regions();
 525   for (uint i = 0; i < max_regions; i++) {
 526     _top_at_rebuild_starts[i] = NULL;
 527     _region_mark_stats[i].clear();
 528   }
 529 }
 530 
 531 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
 532   for (uint j = 0; j < _max_num_tasks; ++j) {
 533     _tasks[j]->clear_mark_stats_cache(region_idx);
 534   }
 535   _top_at_rebuild_starts[region_idx] = NULL;
 536   _region_mark_stats[region_idx].clear();
 537 }
 538 
 539 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
 540   uint const region_idx = r->hrm_index();
 541   if (r->is_humongous()) {
 542     assert(r->is_starts_humongous(), "Got humongous continues region here");
 543     uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
 544     for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
 545       clear_statistics_in_region(j);
 546     }
 547   } else {
 548     clear_statistics_in_region(region_idx);
 549   }
 550 }
 551 
 552 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) {
 553   if (bitmap->is_marked(addr)) {
 554     bitmap->clear(addr);
 555   }
 556 }
 557 
 558 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
 559   assert_at_safepoint_on_vm_thread();
 560 
 561   // Need to clear all mark bits of the humongous object.
 562   clear_mark_if_set(_prev_mark_bitmap, r->bottom());
 563   clear_mark_if_set(_next_mark_bitmap, r->bottom());
 564 
 565   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
 566     return;
 567   }
 568 
 569   // Clear any statistics about the region gathered so far.
 570   clear_statistics(r);
 571 }
 572 
 573 void G1ConcurrentMark::reset_marking_for_restart() {
 574   _global_mark_stack.set_empty();
 575 
 576   // Expand the marking stack, if we have to and if we can.
 577   if (has_overflown()) {
 578     _global_mark_stack.expand();
 579 
 580     uint max_regions = _g1h->max_regions();
 581     for (uint i = 0; i < max_regions; i++) {
 582       _region_mark_stats[i].clear_during_overflow();
 583     }
 584   }
 585 
 586   clear_has_overflown();
 587   _finger = _heap.start();
 588 
 589   for (uint i = 0; i < _max_num_tasks; ++i) {
 590     G1CMTaskQueue* queue = _task_queues->queue(i);
 591     queue->set_empty();
 592   }
 593 }
 594 
 595 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
 596   assert(active_tasks <= _max_num_tasks, "we should not have more");
 597 
 598   _num_active_tasks = active_tasks;
 599   // Need to update the three data structures below according to the
 600   // number of active threads for this phase.
 601   _terminator.terminator()->reset_for_reuse((int) active_tasks);
 602   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 603   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 604 }
 605 
 606 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 607   set_concurrency(active_tasks);
 608 
 609   _concurrent = concurrent;
 610 
 611   if (!concurrent) {
 612     // At this point we should be in a STW phase, and completed marking.
 613     assert_at_safepoint_on_vm_thread();
 614     assert(out_of_regions(),
 615            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 616            p2i(_finger), p2i(_heap.end()));
 617   }
 618 }
 619 
 620 void G1ConcurrentMark::reset_at_marking_complete() {
 621   // We set the global marking state to some default values when we're
 622   // not doing marking.
 623   reset_marking_for_restart();
 624   _num_active_tasks = 0;
 625 }
 626 
 627 G1ConcurrentMark::~G1ConcurrentMark() {
 628   FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
 629   FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
 630   // The G1ConcurrentMark instance is never freed.
 631   ShouldNotReachHere();
 632 }
 633 
 634 class G1ClearBitMapTask : public AbstractGangTask {
 635 public:
 636   static size_t chunk_size() { return M; }
 637 
 638 private:
 639   // Heap region closure used for clearing the given mark bitmap.
 640   class G1ClearBitmapHRClosure : public HeapRegionClosure {
 641   private:
 642     G1CMBitMap* _bitmap;
 643     G1ConcurrentMark* _cm;
 644   public:
 645     G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) {
 646     }
 647 
 648     virtual bool do_heap_region(HeapRegion* r) {
 649       size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
 650 
 651       HeapWord* cur = r->bottom();
 652       HeapWord* const end = r->end();
 653 
 654       while (cur < end) {
 655         MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 656         _bitmap->clear_range(mr);
 657 
 658         cur += chunk_size_in_words;
 659 
 660         // Abort iteration if after yielding the marking has been aborted.
 661         if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
 662           return true;
 663         }
 664         // Repeat the asserts from before the start of the closure. We will do them
 665         // as asserts here to minimize their overhead on the product. However, we
 666         // will have them as guarantees at the beginning / end of the bitmap
 667         // clearing to get some checking in the product.
 668         assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
 669         assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 670       }
 671       assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
 672 
 673       return false;
 674     }
 675   };
 676 
 677   G1ClearBitmapHRClosure _cl;
 678   HeapRegionClaimer _hr_claimer;
 679   bool _suspendible; // If the task is suspendible, workers must join the STS.
 680 
 681 public:
 682   G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
 683     AbstractGangTask("G1 Clear Bitmap"),
 684     _cl(bitmap, suspendible ? cm : NULL),
 685     _hr_claimer(n_workers),
 686     _suspendible(suspendible)
 687   { }
 688 
 689   void work(uint worker_id) {
 690     SuspendibleThreadSetJoiner sts_join(_suspendible);
 691     G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
 692   }
 693 
 694   bool is_complete() {
 695     return _cl.is_complete();
 696   }
 697 };
 698 
 699 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 700   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 701 
 702   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 703   size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 704 
 705   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 706 
 707   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 708 
 709   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 710   workers->run_task(&cl, num_workers);
 711   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 712 }
 713 
 714 void G1ConcurrentMark::cleanup_for_next_mark() {
 715   // Make sure that the concurrent mark thread looks to still be in
 716   // the current cycle.
 717   guarantee(cm_thread()->during_cycle(), "invariant");
 718 
 719   // We are finishing up the current cycle by clearing the next
 720   // marking bitmap and getting it ready for the next cycle. During
 721   // this time no other cycle can start. So, let's make sure that this
 722   // is the case.
 723   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 724 
 725   clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 726 
 727   // Repeat the asserts from above.
 728   guarantee(cm_thread()->during_cycle(), "invariant");
 729   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 730 }
 731 
 732 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 733   assert_at_safepoint_on_vm_thread();
 734   clear_bitmap(_prev_mark_bitmap, workers, false);
 735 }
 736 
 737 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 738 public:
 739   bool do_heap_region(HeapRegion* r) {
 740     r->note_start_of_marking();
 741     return false;
 742   }
 743 };
 744 
 745 void G1ConcurrentMark::pre_initial_mark() {
 746   assert_at_safepoint_on_vm_thread();
 747 
 748   // Reset marking state.
 749   reset();
 750 
 751   // For each region note start of marking.
 752   NoteStartOfMarkHRClosure startcl;
 753   _g1h->heap_region_iterate(&startcl);
 754 
 755   _root_regions.reset();
 756 }
 757 
 758 
 759 void G1ConcurrentMark::post_initial_mark() {
 760   // Start Concurrent Marking weak-reference discovery.
 761   ReferenceProcessor* rp = _g1h->ref_processor_cm();
 762   // enable ("weak") refs discovery
 763   rp->enable_discovery();
 764   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 765 
 766   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
 767   // This is the start of  the marking cycle, we're expected all
 768   // threads to have SATB queues with active set to false.
 769   satb_mq_set.set_active_all_threads(true, /* new active value */
 770                                      false /* expected_active */);
 771 
 772   _root_regions.prepare_for_scan();
 773 
 774   // update_g1_committed() will be called at the end of an evac pause
 775   // when marking is on. So, it's also called at the end of the
 776   // initial-mark pause to update the heap end, if the heap expands
 777   // during it. No need to call it here.
 778 }
 779 
 780 /*
 781  * Notice that in the next two methods, we actually leave the STS
 782  * during the barrier sync and join it immediately afterwards. If we
 783  * do not do this, the following deadlock can occur: one thread could
 784  * be in the barrier sync code, waiting for the other thread to also
 785  * sync up, whereas another one could be trying to yield, while also
 786  * waiting for the other threads to sync up too.
 787  *
 788  * Note, however, that this code is also used during remark and in
 789  * this case we should not attempt to leave / enter the STS, otherwise
 790  * we'll either hit an assert (debug / fastdebug) or deadlock
 791  * (product). So we should only leave / enter the STS if we are
 792  * operating concurrently.
 793  *
 794  * Because the thread that does the sync barrier has left the STS, it
 795  * is possible to be suspended for a Full GC or an evacuation pause
 796  * could occur. This is actually safe, since the entering the sync
 797  * barrier is one of the last things do_marking_step() does, and it
 798  * doesn't manipulate any data structures afterwards.
 799  */
 800 
 801 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 802   bool barrier_aborted;
 803   {
 804     SuspendibleThreadSetLeaver sts_leave(concurrent());
 805     barrier_aborted = !_first_overflow_barrier_sync.enter();
 806   }
 807 
 808   // at this point everyone should have synced up and not be doing any
 809   // more work
 810 
 811   if (barrier_aborted) {
 812     // If the barrier aborted we ignore the overflow condition and
 813     // just abort the whole marking phase as quickly as possible.
 814     return;
 815   }
 816 }
 817 
 818 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 819   SuspendibleThreadSetLeaver sts_leave(concurrent());
 820   _second_overflow_barrier_sync.enter();
 821 
 822   // at this point everything should be re-initialized and ready to go
 823 }
 824 
 825 class G1CMConcurrentMarkingTask : public AbstractGangTask {
 826   G1ConcurrentMark*     _cm;
 827 
 828 public:
 829   void work(uint worker_id) {
 830     assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread");
 831     ResourceMark rm;
 832 
 833     double start_vtime = os::elapsedVTime();
 834 
 835     {
 836       SuspendibleThreadSetJoiner sts_join;
 837 
 838       assert(worker_id < _cm->active_tasks(), "invariant");
 839 
 840       G1CMTask* task = _cm->task(worker_id);
 841       task->record_start_time();
 842       if (!_cm->has_aborted()) {
 843         do {
 844           task->do_marking_step(G1ConcMarkStepDurationMillis,
 845                                 true  /* do_termination */,
 846                                 false /* is_serial*/);
 847 
 848           _cm->do_yield_check();
 849         } while (!_cm->has_aborted() && task->has_aborted());
 850       }
 851       task->record_end_time();
 852       guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant");
 853     }
 854 
 855     double end_vtime = os::elapsedVTime();
 856     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
 857   }
 858 
 859   G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
 860       AbstractGangTask("Concurrent Mark"), _cm(cm) { }
 861 
 862   ~G1CMConcurrentMarkingTask() { }
 863 };
 864 
 865 uint G1ConcurrentMark::calc_active_marking_workers() {
 866   uint result = 0;
 867   if (!UseDynamicNumberOfGCThreads ||
 868       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 869        !ForceDynamicNumberOfGCThreads)) {
 870     result = _max_concurrent_workers;
 871   } else {
 872     result =
 873       WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
 874                                                 1, /* Minimum workers */
 875                                                 _num_concurrent_workers,
 876                                                 Threads::number_of_non_daemon_threads());
 877     // Don't scale the result down by scale_concurrent_workers() because
 878     // that scaling has already gone into "_max_concurrent_workers".
 879   }
 880   assert(result > 0 && result <= _max_concurrent_workers,
 881          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 882          _max_concurrent_workers, result);
 883   return result;
 884 }
 885 
 886 void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) {
 887 #ifdef ASSERT
 888   HeapWord* last = region->last();
 889   HeapRegion* hr = _g1h->heap_region_containing(last);
 890   assert(hr->is_old() || hr->next_top_at_mark_start() == hr->bottom(),
 891          "Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str());
 892   assert(hr->next_top_at_mark_start() == region->start(),
 893          "MemRegion start should be equal to nTAMS");
 894 #endif
 895 
 896   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 897 
 898   const uintx interval = PrefetchScanIntervalInBytes;
 899   HeapWord* curr = region->start();
 900   const HeapWord* end = region->end();
 901   while (curr < end) {
 902     Prefetch::read(curr, interval);
 903     oop obj = oop(curr);
 904     int size = obj->oop_iterate_size(&cl);
 905     assert(size == obj->size(), "sanity");
 906     curr += size;
 907   }
 908 }
 909 
 910 class G1CMRootRegionScanTask : public AbstractGangTask {
 911   G1ConcurrentMark* _cm;
 912 public:
 913   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 914     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
 915 
 916   void work(uint worker_id) {
 917     assert(Thread::current()->is_ConcurrentGC_thread(),
 918            "this should only be done by a conc GC thread");
 919 
 920     G1CMRootMemRegions* root_regions = _cm->root_regions();
 921     const MemRegion* region = root_regions->claim_next();
 922     while (region != NULL) {
 923       _cm->scan_root_region(region, worker_id);
 924       region = root_regions->claim_next();
 925     }
 926   }
 927 };
 928 
 929 void G1ConcurrentMark::scan_root_regions() {
 930   // scan_in_progress() will have been set to true only if there was
 931   // at least one root region to scan. So, if it's false, we
 932   // should not attempt to do any further work.
 933   if (root_regions()->scan_in_progress()) {
 934     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 935 
 936     _num_concurrent_workers = MIN2(calc_active_marking_workers(),
 937                                    // We distribute work on a per-region basis, so starting
 938                                    // more threads than that is useless.
 939                                    root_regions()->num_root_regions());
 940     assert(_num_concurrent_workers <= _max_concurrent_workers,
 941            "Maximum number of marking threads exceeded");
 942 
 943     G1CMRootRegionScanTask task(this);
 944     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
 945                         task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
 946     _concurrent_workers->run_task(&task, _num_concurrent_workers);
 947 
 948     // It's possible that has_aborted() is true here without actually
 949     // aborting the survivor scan earlier. This is OK as it's
 950     // mainly used for sanity checking.
 951     root_regions()->scan_finished();
 952   }
 953 }
 954 
 955 void G1ConcurrentMark::concurrent_cycle_start() {
 956   _gc_timer_cm->register_gc_start();
 957 
 958   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
 959 
 960   _g1h->trace_heap_before_gc(_gc_tracer_cm);
 961 }
 962 
 963 void G1ConcurrentMark::concurrent_cycle_end() {
 964   _g1h->collector_state()->set_clearing_next_bitmap(false);
 965 
 966   _g1h->trace_heap_after_gc(_gc_tracer_cm);
 967 
 968   if (has_aborted()) {
 969     log_info(gc, marking)("Concurrent Mark Abort");
 970     _gc_tracer_cm->report_concurrent_mode_failure();
 971   }
 972 
 973   _gc_timer_cm->register_gc_end();
 974 
 975   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
 976 }
 977 
 978 void G1ConcurrentMark::mark_from_roots() {
 979   _restart_for_overflow = false;
 980 
 981   _num_concurrent_workers = calc_active_marking_workers();
 982 
 983   uint active_workers = MAX2(1U, _num_concurrent_workers);
 984 
 985   // Setting active workers is not guaranteed since fewer
 986   // worker threads may currently exist and more may not be
 987   // available.
 988   active_workers = _concurrent_workers->update_active_workers(active_workers);
 989   log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
 990 
 991   // Parallel task terminator is set in "set_concurrency_and_phase()"
 992   set_concurrency_and_phase(active_workers, true /* concurrent */);
 993 
 994   G1CMConcurrentMarkingTask marking_task(this);
 995   _concurrent_workers->run_task(&marking_task);
 996   print_stats();
 997 }
 998 
 999 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
1000   G1HeapVerifier* verifier = _g1h->verifier();
1001 
1002   verifier->verify_region_sets_optional();
1003 
1004   if (VerifyDuringGC) {
1005     GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm);
1006 
1007     size_t const BufLen = 512;
1008     char buffer[BufLen];
1009 
1010     jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
1011     verifier->verify(type, vo, buffer);
1012   }
1013 
1014   verifier->check_bitmaps(caller);
1015 }
1016 
1017 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
1018   G1CollectedHeap* _g1h;
1019   G1ConcurrentMark* _cm;
1020   HeapRegionClaimer _hrclaimer;
1021   uint volatile _total_selected_for_rebuild;
1022 
1023   G1PrintRegionLivenessInfoClosure _cl;
1024 
1025   class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1026     G1CollectedHeap* _g1h;
1027     G1ConcurrentMark* _cm;
1028 
1029     G1PrintRegionLivenessInfoClosure* _cl;
1030 
1031     uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1032 
1033     void update_remset_before_rebuild(HeapRegion* hr) {
1034       G1RemSetTrackingPolicy* tracking_policy = _g1h->policy()->remset_tracker();
1035 
1036       bool selected_for_rebuild;
1037       if (hr->is_humongous()) {
1038         bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0;
1039         selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live);
1040       } else {
1041         size_t const live_bytes = _cm->liveness(hr->hrm_index());
1042         selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1043       }
1044       if (selected_for_rebuild) {
1045         _num_regions_selected_for_rebuild++;
1046       }
1047       _cm->update_top_at_rebuild_start(hr);
1048     }
1049 
1050     // Distribute the given words across the humongous object starting with hr and
1051     // note end of marking.
1052     void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1053       uint const region_idx = hr->hrm_index();
1054       size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size();
1055       uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words);
1056 
1057       // "Distributing" zero words means that we only note end of marking for these
1058       // regions.
1059       assert(marked_words == 0 || obj_size_in_words == marked_words,
1060              "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT,
1061              obj_size_in_words, marked_words);
1062 
1063       for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
1064         HeapRegion* const r = _g1h->region_at(i);
1065         size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
1066 
1067         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)",
1068                                words_to_add, i, r->get_type_str());
1069         add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize);
1070         marked_words -= words_to_add;
1071       }
1072       assert(marked_words == 0,
1073              SIZE_FORMAT " words left after distributing space across %u regions",
1074              marked_words, num_regions_in_humongous);
1075     }
1076 
1077     void update_marked_bytes(HeapRegion* hr) {
1078       uint const region_idx = hr->hrm_index();
1079       size_t const marked_words = _cm->liveness(region_idx);
1080       // The marking attributes the object's size completely to the humongous starts
1081       // region. We need to distribute this value across the entire set of regions a
1082       // humongous object spans.
1083       if (hr->is_humongous()) {
1084         assert(hr->is_starts_humongous() || marked_words == 0,
1085                "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)",
1086                marked_words, region_idx, hr->get_type_str());
1087         if (hr->is_starts_humongous()) {
1088           distribute_marked_bytes(hr, marked_words);
1089         }
1090       } else {
1091         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
1092         add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize);
1093       }
1094     }
1095 
1096     void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) {
1097       hr->add_to_marked_bytes(marked_bytes);
1098       _cl->do_heap_region(hr);
1099       hr->note_end_of_marking();
1100     }
1101 
1102   public:
1103     G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) :
1104       _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { }
1105 
1106     virtual bool do_heap_region(HeapRegion* r) {
1107       update_remset_before_rebuild(r);
1108       update_marked_bytes(r);
1109 
1110       return false;
1111     }
1112 
1113     uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1114   };
1115 
1116 public:
1117   G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) :
1118     AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"),
1119     _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1120 
1121   virtual void work(uint worker_id) {
1122     G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1123     _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1124     Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild);
1125   }
1126 
1127   uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1128 
1129   // Number of regions for which roughly one thread should be spawned for this work.
1130   static const uint RegionsPerThread = 384;
1131 };
1132 
1133 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1134   G1CollectedHeap* _g1h;
1135 public:
1136   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1137 
1138   virtual bool do_heap_region(HeapRegion* r) {
1139     _g1h->policy()->remset_tracker()->update_after_rebuild(r);
1140     return false;
1141   }
1142 };
1143 
1144 void G1ConcurrentMark::remark() {
1145   assert_at_safepoint_on_vm_thread();
1146 
1147   // If a full collection has happened, we should not continue. However we might
1148   // have ended up here as the Remark VM operation has been scheduled already.
1149   if (has_aborted()) {
1150     return;
1151   }
1152 
1153   G1Policy* policy = _g1h->policy();
1154   policy->record_concurrent_mark_remark_start();
1155 
1156   double start = os::elapsedTime();
1157 
1158   verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1159 
1160   {
1161     GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1162     finalize_marking();
1163   }
1164 
1165   double mark_work_end = os::elapsedTime();
1166 
1167   bool const mark_finished = !has_overflown();
1168   if (mark_finished) {
1169     weak_refs_work(false /* clear_all_soft_refs */);
1170 
1171     SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1172     // We're done with marking.
1173     // This is the end of the marking cycle, we're expected all
1174     // threads to have SATB queues with active set to true.
1175     satb_mq_set.set_active_all_threads(false, /* new active value */
1176                                        true /* expected_active */);
1177 
1178     {
1179       GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm);
1180       flush_all_task_caches();
1181     }
1182 
1183     // Install newly created mark bitmap as "prev".
1184     swap_mark_bitmaps();
1185     {
1186       GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);
1187 
1188       uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) /
1189                                        G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread;
1190       uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity);
1191 
1192       G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
1193       log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
1194       _g1h->workers()->run_task(&cl, num_workers);
1195 
1196       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1197                                       _g1h->num_regions(), cl.total_selected_for_rebuild());
1198     }
1199     {
1200       GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm);
1201       reclaim_empty_regions();
1202     }
1203 
1204     // Clean out dead classes
1205     if (ClassUnloadingWithConcurrentMark) {
1206       GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
1207       ClassLoaderDataGraph::purge();
1208     }
1209 
1210     _g1h->resize_heap_if_necessary();
1211 
1212     compute_new_sizes();
1213 
1214     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1215 
1216     assert(!restart_for_overflow(), "sanity");
1217     // Completely reset the marking state since marking completed
1218     reset_at_marking_complete();
1219   } else {
1220     // We overflowed.  Restart concurrent marking.
1221     _restart_for_overflow = true;
1222 
1223     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1224 
1225     // Clear the marking state because we will be restarting
1226     // marking due to overflowing the global mark stack.
1227     reset_marking_for_restart();
1228   }
1229 
1230   {
1231     GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm);
1232     report_object_count(mark_finished);
1233   }
1234 
1235   // Statistics
1236   double now = os::elapsedTime();
1237   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1238   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1239   _remark_times.add((now - start) * 1000.0);
1240 
1241   policy->record_concurrent_mark_remark_end();
1242 }
1243 
1244 class G1ReclaimEmptyRegionsTask : public AbstractGangTask {
1245   // Per-region work during the Cleanup pause.
1246   class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure {
1247     G1CollectedHeap* _g1h;
1248     size_t _freed_bytes;
1249     FreeRegionList* _local_cleanup_list;
1250     uint _old_regions_removed;
1251     uint _humongous_regions_removed;
1252 
1253   public:
1254     G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h,
1255                                  FreeRegionList* local_cleanup_list) :
1256       _g1h(g1h),
1257       _freed_bytes(0),
1258       _local_cleanup_list(local_cleanup_list),
1259       _old_regions_removed(0),
1260       _humongous_regions_removed(0) { }
1261 
1262     size_t freed_bytes() { return _freed_bytes; }
1263     const uint old_regions_removed() { return _old_regions_removed; }
1264     const uint humongous_regions_removed() { return _humongous_regions_removed; }
1265 
1266     bool do_heap_region(HeapRegion *hr) {
1267       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1268         _freed_bytes += hr->used();
1269         hr->set_containing_set(NULL);
1270         if (hr->is_humongous()) {
1271           _humongous_regions_removed++;
1272           _g1h->free_humongous_region(hr, _local_cleanup_list);
1273         } else {
1274           _old_regions_removed++;
1275           _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1276         }
1277         hr->clear_cardtable();
1278         _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1279         log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1280       }
1281 
1282       return false;
1283     }
1284   };
1285 
1286   G1CollectedHeap* _g1h;
1287   FreeRegionList* _cleanup_list;
1288   HeapRegionClaimer _hrclaimer;
1289 
1290 public:
1291   G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1292     AbstractGangTask("G1 Cleanup"),
1293     _g1h(g1h),
1294     _cleanup_list(cleanup_list),
1295     _hrclaimer(n_workers) {
1296   }
1297 
1298   void work(uint worker_id) {
1299     FreeRegionList local_cleanup_list("Local Cleanup List");
1300     G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list);
1301     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1302     assert(cl.is_complete(), "Shouldn't have aborted!");
1303 
1304     // Now update the old/humongous region sets
1305     _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
1306     {
1307       MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1308       _g1h->decrement_summary_bytes(cl.freed_bytes());
1309 
1310       _cleanup_list->add_ordered(&local_cleanup_list);
1311       assert(local_cleanup_list.is_empty(), "post-condition");
1312     }
1313   }
1314 };
1315 
1316 void G1ConcurrentMark::reclaim_empty_regions() {
1317   WorkGang* workers = _g1h->workers();
1318   FreeRegionList empty_regions_list("Empty Regions After Mark List");
1319 
1320   G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers());
1321   workers->run_task(&cl);
1322 
1323   if (!empty_regions_list.is_empty()) {
1324     log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1325     // Now print the empty regions list.
1326     G1HRPrinter* hrp = _g1h->hr_printer();
1327     if (hrp->is_active()) {
1328       FreeRegionListIterator iter(&empty_regions_list);
1329       while (iter.more_available()) {
1330         HeapRegion* hr = iter.get_next();
1331         hrp->cleanup(hr);
1332       }
1333     }
1334     // And actually make them available.
1335     _g1h->prepend_to_freelist(&empty_regions_list);
1336   }
1337 }
1338 
1339 void G1ConcurrentMark::compute_new_sizes() {
1340   MetaspaceGC::compute_new_size();
1341 
1342   // Cleanup will have freed any regions completely full of garbage.
1343   // Update the soft reference policy with the new heap occupancy.
1344   Universe::update_heap_info_at_gc();
1345 
1346   // We reclaimed old regions so we should calculate the sizes to make
1347   // sure we update the old gen/space data.
1348   _g1h->g1mm()->update_sizes();
1349 }
1350 
1351 void G1ConcurrentMark::cleanup() {
1352   assert_at_safepoint_on_vm_thread();
1353 
1354   // If a full collection has happened, we shouldn't do this.
1355   if (has_aborted()) {
1356     return;
1357   }
1358 
1359   G1Policy* policy = _g1h->policy();
1360   policy->record_concurrent_mark_cleanup_start();
1361 
1362   double start = os::elapsedTime();
1363 
1364   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1365 
1366   {
1367     GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1368     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1369     _g1h->heap_region_iterate(&cl);
1370   }
1371 
1372   if (log_is_enabled(Trace, gc, liveness)) {
1373     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1374     _g1h->heap_region_iterate(&cl);
1375   }
1376 
1377   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1378 
1379   // We need to make this be a "collection" so any collection pause that
1380   // races with it goes around and waits for Cleanup to finish.
1381   _g1h->increment_total_collections();
1382 
1383   // Local statistics
1384   double recent_cleanup_time = (os::elapsedTime() - start);
1385   _total_cleanup_time += recent_cleanup_time;
1386   _cleanup_times.add(recent_cleanup_time);
1387 
1388   {
1389     GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1390     policy->record_concurrent_mark_cleanup_end();
1391   }
1392 }
1393 
1394 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1395 // Uses the G1CMTask associated with a worker thread (for serial reference
1396 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1397 // trace referent objects.
1398 //
1399 // Using the G1CMTask and embedded local queues avoids having the worker
1400 // threads operating on the global mark stack. This reduces the risk
1401 // of overflowing the stack - which we would rather avoid at this late
1402 // state. Also using the tasks' local queues removes the potential
1403 // of the workers interfering with each other that could occur if
1404 // operating on the global stack.
1405 
1406 class G1CMKeepAliveAndDrainClosure : public OopClosure {
1407   G1ConcurrentMark* _cm;
1408   G1CMTask*         _task;
1409   uint              _ref_counter_limit;
1410   uint              _ref_counter;
1411   bool              _is_serial;
1412 public:
1413   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1414     _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval),
1415     _ref_counter(_ref_counter_limit), _is_serial(is_serial) {
1416     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1417   }
1418 
1419   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1420   virtual void do_oop(      oop* p) { do_oop_work(p); }
1421 
1422   template <class T> void do_oop_work(T* p) {
1423     if (_cm->has_overflown()) {
1424       return;
1425     }
1426     if (!_task->deal_with_reference(p)) {
1427       // We did not add anything to the mark bitmap (or mark stack), so there is
1428       // no point trying to drain it.
1429       return;
1430     }
1431     _ref_counter--;
1432 
1433     if (_ref_counter == 0) {
1434       // We have dealt with _ref_counter_limit references, pushing them
1435       // and objects reachable from them on to the local stack (and
1436       // possibly the global stack). Call G1CMTask::do_marking_step() to
1437       // process these entries.
1438       //
1439       // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1440       // there's nothing more to do (i.e. we're done with the entries that
1441       // were pushed as a result of the G1CMTask::deal_with_reference() calls
1442       // above) or we overflow.
1443       //
1444       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1445       // flag while there may still be some work to do. (See the comment at
1446       // the beginning of G1CMTask::do_marking_step() for those conditions -
1447       // one of which is reaching the specified time target.) It is only
1448       // when G1CMTask::do_marking_step() returns without setting the
1449       // has_aborted() flag that the marking step has completed.
1450       do {
1451         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1452         _task->do_marking_step(mark_step_duration_ms,
1453                                false      /* do_termination */,
1454                                _is_serial);
1455       } while (_task->has_aborted() && !_cm->has_overflown());
1456       _ref_counter = _ref_counter_limit;
1457     }
1458   }
1459 };
1460 
1461 // 'Drain' oop closure used by both serial and parallel reference processing.
1462 // Uses the G1CMTask associated with a given worker thread (for serial
1463 // reference processing the G1CMtask for worker 0 is used). Calls the
1464 // do_marking_step routine, with an unbelievably large timeout value,
1465 // to drain the marking data structures of the remaining entries
1466 // added by the 'keep alive' oop closure above.
1467 
1468 class G1CMDrainMarkingStackClosure : public VoidClosure {
1469   G1ConcurrentMark* _cm;
1470   G1CMTask*         _task;
1471   bool              _is_serial;
1472  public:
1473   G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1474     _cm(cm), _task(task), _is_serial(is_serial) {
1475     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1476   }
1477 
1478   void do_void() {
1479     do {
1480       // We call G1CMTask::do_marking_step() to completely drain the local
1481       // and global marking stacks of entries pushed by the 'keep alive'
1482       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1483       //
1484       // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1485       // if there's nothing more to do (i.e. we've completely drained the
1486       // entries that were pushed as a a result of applying the 'keep alive'
1487       // closure to the entries on the discovered ref lists) or we overflow
1488       // the global marking stack.
1489       //
1490       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1491       // flag while there may still be some work to do. (See the comment at
1492       // the beginning of G1CMTask::do_marking_step() for those conditions -
1493       // one of which is reaching the specified time target.) It is only
1494       // when G1CMTask::do_marking_step() returns without setting the
1495       // has_aborted() flag that the marking step has completed.
1496 
1497       _task->do_marking_step(1000000000.0 /* something very large */,
1498                              true         /* do_termination */,
1499                              _is_serial);
1500     } while (_task->has_aborted() && !_cm->has_overflown());
1501   }
1502 };
1503 
1504 // Implementation of AbstractRefProcTaskExecutor for parallel
1505 // reference processing at the end of G1 concurrent marking
1506 
1507 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1508 private:
1509   G1CollectedHeap*  _g1h;
1510   G1ConcurrentMark* _cm;
1511   WorkGang*         _workers;
1512   uint              _active_workers;
1513 
1514 public:
1515   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1516                           G1ConcurrentMark* cm,
1517                           WorkGang* workers,
1518                           uint n_workers) :
1519     _g1h(g1h), _cm(cm),
1520     _workers(workers), _active_workers(n_workers) { }
1521 
1522   virtual void execute(ProcessTask& task, uint ergo_workers);
1523 };
1524 
1525 class G1CMRefProcTaskProxy : public AbstractGangTask {
1526   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1527   ProcessTask&      _proc_task;
1528   G1CollectedHeap*  _g1h;
1529   G1ConcurrentMark* _cm;
1530 
1531 public:
1532   G1CMRefProcTaskProxy(ProcessTask& proc_task,
1533                        G1CollectedHeap* g1h,
1534                        G1ConcurrentMark* cm) :
1535     AbstractGangTask("Process reference objects in parallel"),
1536     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1537     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1538     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1539   }
1540 
1541   virtual void work(uint worker_id) {
1542     ResourceMark rm;
1543     HandleMark hm;
1544     G1CMTask* task = _cm->task(worker_id);
1545     G1CMIsAliveClosure g1_is_alive(_g1h);
1546     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1547     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1548 
1549     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1550   }
1551 };
1552 
1553 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) {
1554   assert(_workers != NULL, "Need parallel worker threads.");
1555   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1556   assert(_workers->active_workers() >= ergo_workers,
1557          "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)",
1558          ergo_workers, _workers->active_workers());
1559 
1560   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1561 
1562   // We need to reset the concurrency level before each
1563   // proxy task execution, so that the termination protocol
1564   // and overflow handling in G1CMTask::do_marking_step() knows
1565   // how many workers to wait for.
1566   _cm->set_concurrency(ergo_workers);
1567   _workers->run_task(&proc_task_proxy, ergo_workers);
1568 }
1569 
1570 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
1571   ResourceMark rm;
1572   HandleMark   hm;
1573 
1574   // Is alive closure.
1575   G1CMIsAliveClosure g1_is_alive(_g1h);
1576 
1577   // Inner scope to exclude the cleaning of the string table
1578   // from the displayed time.
1579   {
1580     GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm);
1581 
1582     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1583 
1584     // See the comment in G1CollectedHeap::ref_processing_init()
1585     // about how reference processing currently works in G1.
1586 
1587     // Set the soft reference policy
1588     rp->setup_policy(clear_all_soft_refs);
1589     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1590 
1591     // Instances of the 'Keep Alive' and 'Complete GC' closures used
1592     // in serial reference processing. Note these closures are also
1593     // used for serially processing (by the the current thread) the
1594     // JNI references during parallel reference processing.
1595     //
1596     // These closures do not need to synchronize with the worker
1597     // threads involved in parallel reference processing as these
1598     // instances are executed serially by the current thread (e.g.
1599     // reference processing is not multi-threaded and is thus
1600     // performed by the current thread instead of a gang worker).
1601     //
1602     // The gang tasks involved in parallel reference processing create
1603     // their own instances of these closures, which do their own
1604     // synchronization among themselves.
1605     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1606     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1607 
1608     // We need at least one active thread. If reference processing
1609     // is not multi-threaded we use the current (VMThread) thread,
1610     // otherwise we use the work gang from the G1CollectedHeap and
1611     // we utilize all the worker threads we can.
1612     bool processing_is_mt = rp->processing_is_mt();
1613     uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
1614     active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U);
1615 
1616     // Parallel processing task executor.
1617     G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
1618                                               _g1h->workers(), active_workers);
1619     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1620 
1621     // Set the concurrency level. The phase was already set prior to
1622     // executing the remark task.
1623     set_concurrency(active_workers);
1624 
1625     // Set the degree of MT processing here.  If the discovery was done MT,
1626     // the number of threads involved during discovery could differ from
1627     // the number of active workers.  This is OK as long as the discovered
1628     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1629     rp->set_active_mt_degree(active_workers);
1630 
1631     ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
1632 
1633     // Process the weak references.
1634     const ReferenceProcessorStats& stats =
1635         rp->process_discovered_references(&g1_is_alive,
1636                                           &g1_keep_alive,
1637                                           &g1_drain_mark_stack,
1638                                           executor,
1639                                           &pt);
1640     _gc_tracer_cm->report_gc_reference_stats(stats);
1641     pt.print_all_references();
1642 
1643     // The do_oop work routines of the keep_alive and drain_marking_stack
1644     // oop closures will set the has_overflown flag if we overflow the
1645     // global marking stack.
1646 
1647     assert(has_overflown() || _global_mark_stack.is_empty(),
1648            "Mark stack should be empty (unless it has overflown)");
1649 
1650     assert(rp->num_queues() == active_workers, "why not");
1651 
1652     rp->verify_no_references_recorded();
1653     assert(!rp->discovery_enabled(), "Post condition");
1654   }
1655 
1656   if (has_overflown()) {
1657     // We can not trust g1_is_alive and the contents of the heap if the marking stack
1658     // overflowed while processing references. Exit the VM.
1659     fatal("Overflow during reference processing, can not continue. Please "
1660           "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and "
1661           "restart.", MarkStackSizeMax);
1662     return;
1663   }
1664 
1665   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1666 
1667   {
1668     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1669     WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1);
1670   }
1671 
1672   // Unload Klasses, String, Code Cache, etc.
1673   if (ClassUnloadingWithConcurrentMark) {
1674     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1675     bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm);
1676     _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1677   } else if (StringDedup::is_enabled()) {
1678     GCTraceTime(Debug, gc, phases) debug("String Deduplication", _gc_timer_cm);
1679     _g1h->string_dedup_cleaning(&g1_is_alive, NULL);
1680   }
1681 }
1682 
1683 class G1PrecleanYieldClosure : public YieldClosure {
1684   G1ConcurrentMark* _cm;
1685 
1686 public:
1687   G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { }
1688 
1689   virtual bool should_return() {
1690     return _cm->has_aborted();
1691   }
1692 
1693   virtual bool should_return_fine_grain() {
1694     _cm->do_yield_check();
1695     return _cm->has_aborted();
1696   }
1697 };
1698 
1699 void G1ConcurrentMark::preclean() {
1700   assert(G1UseReferencePrecleaning, "Precleaning must be enabled.");
1701 
1702   SuspendibleThreadSetJoiner joiner;
1703 
1704   G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */);
1705   G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */);
1706 
1707   set_concurrency_and_phase(1, true);
1708 
1709   G1PrecleanYieldClosure yield_cl(this);
1710 
1711   ReferenceProcessor* rp = _g1h->ref_processor_cm();
1712   // Precleaning is single threaded. Temporarily disable MT discovery.
1713   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
1714   rp->preclean_discovered_references(rp->is_alive_non_header(),
1715                                      &keep_alive,
1716                                      &drain_mark_stack,
1717                                      &yield_cl,
1718                                      _gc_timer_cm);
1719 }
1720 
1721 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
1722 // the prev bitmap determining liveness.
1723 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
1724   G1CollectedHeap* _g1h;
1725 public:
1726   G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
1727 
1728   bool do_object_b(oop obj) {
1729     HeapWord* addr = (HeapWord*)obj;
1730     return addr != NULL &&
1731            (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj));
1732   }
1733 };
1734 
1735 void G1ConcurrentMark::report_object_count(bool mark_completed) {
1736   // Depending on the completion of the marking liveness needs to be determined
1737   // using either the next or prev bitmap.
1738   if (mark_completed) {
1739     G1ObjectCountIsAliveClosure is_alive(_g1h);
1740     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1741   } else {
1742     G1CMIsAliveClosure is_alive(_g1h);
1743     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1744   }
1745 }
1746 
1747 
1748 void G1ConcurrentMark::swap_mark_bitmaps() {
1749   G1CMBitMap* temp = _prev_mark_bitmap;
1750   _prev_mark_bitmap = _next_mark_bitmap;
1751   _next_mark_bitmap = temp;
1752   _g1h->collector_state()->set_clearing_next_bitmap(true);
1753 }
1754 
1755 // Closure for marking entries in SATB buffers.
1756 class G1CMSATBBufferClosure : public SATBBufferClosure {
1757 private:
1758   G1CMTask* _task;
1759   G1CollectedHeap* _g1h;
1760 
1761   // This is very similar to G1CMTask::deal_with_reference, but with
1762   // more relaxed requirements for the argument, so this must be more
1763   // circumspect about treating the argument as an object.
1764   void do_entry(void* entry) const {
1765     _task->increment_refs_reached();
1766     oop const obj = static_cast<oop>(entry);
1767     _task->make_reference_grey(obj);
1768   }
1769 
1770 public:
1771   G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1772     : _task(task), _g1h(g1h) { }
1773 
1774   virtual void do_buffer(void** buffer, size_t size) {
1775     for (size_t i = 0; i < size; ++i) {
1776       do_entry(buffer[i]);
1777     }
1778   }
1779 };
1780 
1781 class G1RemarkThreadsClosure : public ThreadClosure {
1782   G1CMSATBBufferClosure _cm_satb_cl;
1783   G1CMOopClosure _cm_cl;
1784   MarkingCodeBlobClosure _code_cl;
1785   uintx _claim_token;
1786 
1787  public:
1788   G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1789     _cm_satb_cl(task, g1h),
1790     _cm_cl(g1h, task),
1791     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1792     _claim_token(Threads::thread_claim_token()) {}
1793 
1794   void do_thread(Thread* thread) {
1795     if (thread->claim_threads_do(true, _claim_token)) {
1796       SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(thread);
1797       queue.apply_closure_and_empty(&_cm_satb_cl);
1798       if (thread->is_Java_thread()) {
1799         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1800         // however the liveness of oops reachable from nmethods have very complex lifecycles:
1801         // * Alive if on the stack of an executing method
1802         // * Weakly reachable otherwise
1803         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1804         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1805         JavaThread* jt = (JavaThread*)thread;
1806         jt->nmethods_do(&_code_cl);
1807       }
1808     }
1809   }
1810 };
1811 
1812 class G1CMRemarkTask : public AbstractGangTask {
1813   G1ConcurrentMark* _cm;
1814 public:
1815   void work(uint worker_id) {
1816     G1CMTask* task = _cm->task(worker_id);
1817     task->record_start_time();
1818     {
1819       ResourceMark rm;
1820       HandleMark hm;
1821 
1822       G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1823       Threads::threads_do(&threads_f);
1824     }
1825 
1826     do {
1827       task->do_marking_step(1000000000.0 /* something very large */,
1828                             true         /* do_termination       */,
1829                             false        /* is_serial            */);
1830     } while (task->has_aborted() && !_cm->has_overflown());
1831     // If we overflow, then we do not want to restart. We instead
1832     // want to abort remark and do concurrent marking again.
1833     task->record_end_time();
1834   }
1835 
1836   G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1837     AbstractGangTask("Par Remark"), _cm(cm) {
1838     _cm->terminator()->reset_for_reuse(active_workers);
1839   }
1840 };
1841 
1842 void G1ConcurrentMark::finalize_marking() {
1843   ResourceMark rm;
1844   HandleMark   hm;
1845 
1846   _g1h->ensure_parsability(false);
1847 
1848   // this is remark, so we'll use up all active threads
1849   uint active_workers = _g1h->workers()->active_workers();
1850   set_concurrency_and_phase(active_workers, false /* concurrent */);
1851   // Leave _parallel_marking_threads at it's
1852   // value originally calculated in the G1ConcurrentMark
1853   // constructor and pass values of the active workers
1854   // through the gang in the task.
1855 
1856   {
1857     StrongRootsScope srs(active_workers);
1858 
1859     G1CMRemarkTask remarkTask(this, active_workers);
1860     // We will start all available threads, even if we decide that the
1861     // active_workers will be fewer. The extra ones will just bail out
1862     // immediately.
1863     _g1h->workers()->run_task(&remarkTask);
1864   }
1865 
1866   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1867   guarantee(has_overflown() ||
1868             satb_mq_set.completed_buffers_num() == 0,
1869             "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1870             BOOL_TO_STR(has_overflown()),
1871             satb_mq_set.completed_buffers_num());
1872 
1873   print_stats();
1874 }
1875 
1876 void G1ConcurrentMark::flush_all_task_caches() {
1877   size_t hits = 0;
1878   size_t misses = 0;
1879   for (uint i = 0; i < _max_num_tasks; i++) {
1880     Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
1881     hits += stats.first;
1882     misses += stats.second;
1883   }
1884   size_t sum = hits + misses;
1885   log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
1886                        hits, misses, percent_of(hits, sum));
1887 }
1888 
1889 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
1890   _prev_mark_bitmap->clear_range(mr);
1891 }
1892 
1893 HeapRegion*
1894 G1ConcurrentMark::claim_region(uint worker_id) {
1895   // "checkpoint" the finger
1896   HeapWord* finger = _finger;
1897 
1898   while (finger < _heap.end()) {
1899     assert(_g1h->is_in_g1_reserved(finger), "invariant");
1900 
1901     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1902     // Make sure that the reads below do not float before loading curr_region.
1903     OrderAccess::loadload();
1904     // Above heap_region_containing may return NULL as we always scan claim
1905     // until the end of the heap. In this case, just jump to the next region.
1906     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1907 
1908     // Is the gap between reading the finger and doing the CAS too long?
1909     HeapWord* res = Atomic::cmpxchg(end, &_finger, finger);
1910     if (res == finger && curr_region != NULL) {
1911       // we succeeded
1912       HeapWord*   bottom        = curr_region->bottom();
1913       HeapWord*   limit         = curr_region->next_top_at_mark_start();
1914 
1915       // notice that _finger == end cannot be guaranteed here since,
1916       // someone else might have moved the finger even further
1917       assert(_finger >= end, "the finger should have moved forward");
1918 
1919       if (limit > bottom) {
1920         return curr_region;
1921       } else {
1922         assert(limit == bottom,
1923                "the region limit should be at bottom");
1924         // we return NULL and the caller should try calling
1925         // claim_region() again.
1926         return NULL;
1927       }
1928     } else {
1929       assert(_finger > finger, "the finger should have moved forward");
1930       // read it again
1931       finger = _finger;
1932     }
1933   }
1934 
1935   return NULL;
1936 }
1937 
1938 #ifndef PRODUCT
1939 class VerifyNoCSetOops {
1940   G1CollectedHeap* _g1h;
1941   const char* _phase;
1942   int _info;
1943 
1944 public:
1945   VerifyNoCSetOops(const char* phase, int info = -1) :
1946     _g1h(G1CollectedHeap::heap()),
1947     _phase(phase),
1948     _info(info)
1949   { }
1950 
1951   void operator()(G1TaskQueueEntry task_entry) const {
1952     if (task_entry.is_array_slice()) {
1953       guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1954       return;
1955     }
1956     guarantee(oopDesc::is_oop(task_entry.obj()),
1957               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1958               p2i(task_entry.obj()), _phase, _info);
1959     HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
1960     guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
1961               "obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
1962               p2i(task_entry.obj()), _phase, _info, r->hrm_index());
1963   }
1964 };
1965 
1966 void G1ConcurrentMark::verify_no_collection_set_oops() {
1967   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1968   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1969     return;
1970   }
1971 
1972   // Verify entries on the global mark stack
1973   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1974 
1975   // Verify entries on the task queues
1976   for (uint i = 0; i < _max_num_tasks; ++i) {
1977     G1CMTaskQueue* queue = _task_queues->queue(i);
1978     queue->iterate(VerifyNoCSetOops("Queue", i));
1979   }
1980 
1981   // Verify the global finger
1982   HeapWord* global_finger = finger();
1983   if (global_finger != NULL && global_finger < _heap.end()) {
1984     // Since we always iterate over all regions, we might get a NULL HeapRegion
1985     // here.
1986     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
1987     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
1988               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
1989               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
1990   }
1991 
1992   // Verify the task fingers
1993   assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1994   for (uint i = 0; i < _num_concurrent_workers; ++i) {
1995     G1CMTask* task = _tasks[i];
1996     HeapWord* task_finger = task->finger();
1997     if (task_finger != NULL && task_finger < _heap.end()) {
1998       // See above note on the global finger verification.
1999       HeapRegion* r = _g1h->heap_region_containing(task_finger);
2000       guarantee(r == NULL || task_finger == r->bottom() ||
2001                 !r->in_collection_set() || !r->has_index_in_opt_cset(),
2002                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2003                 p2i(task_finger), HR_FORMAT_PARAMS(r));
2004     }
2005   }
2006 }
2007 #endif // PRODUCT
2008 
2009 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
2010   _g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
2011 }
2012 
2013 void G1ConcurrentMark::print_stats() {
2014   if (!log_is_enabled(Debug, gc, stats)) {
2015     return;
2016   }
2017   log_debug(gc, stats)("---------------------------------------------------------------------");
2018   for (size_t i = 0; i < _num_active_tasks; ++i) {
2019     _tasks[i]->print_stats();
2020     log_debug(gc, stats)("---------------------------------------------------------------------");
2021   }
2022 }
2023 
2024 void G1ConcurrentMark::concurrent_cycle_abort() {
2025   if (!cm_thread()->during_cycle() || _has_aborted) {
2026     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2027     return;
2028   }
2029 
2030   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2031   // concurrent bitmap clearing.
2032   {
2033     GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
2034     clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
2035   }
2036   // Note we cannot clear the previous marking bitmap here
2037   // since VerifyDuringGC verifies the objects marked during
2038   // a full GC against the previous bitmap.
2039 
2040   // Empty mark stack
2041   reset_marking_for_restart();
2042   for (uint i = 0; i < _max_num_tasks; ++i) {
2043     _tasks[i]->clear_region_fields();
2044   }
2045   _first_overflow_barrier_sync.abort();
2046   _second_overflow_barrier_sync.abort();
2047   _has_aborted = true;
2048 
2049   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2050   satb_mq_set.abandon_partial_marking();
2051   // This can be called either during or outside marking, we'll read
2052   // the expected_active value from the SATB queue set.
2053   satb_mq_set.set_active_all_threads(
2054                                  false, /* new active value */
2055                                  satb_mq_set.is_active() /* expected_active */);
2056 }
2057 
2058 static void print_ms_time_info(const char* prefix, const char* name,
2059                                NumberSeq& ns) {
2060   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2061                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2062   if (ns.num() > 0) {
2063     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2064                            prefix, ns.sd(), ns.maximum());
2065   }
2066 }
2067 
2068 void G1ConcurrentMark::print_summary_info() {
2069   Log(gc, marking) log;
2070   if (!log.is_trace()) {
2071     return;
2072   }
2073 
2074   log.trace(" Concurrent marking:");
2075   print_ms_time_info("  ", "init marks", _init_times);
2076   print_ms_time_info("  ", "remarks", _remark_times);
2077   {
2078     print_ms_time_info("     ", "final marks", _remark_mark_times);
2079     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2080 
2081   }
2082   print_ms_time_info("  ", "cleanups", _cleanup_times);
2083   log.trace("    Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2084             _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2085   log.trace("  Total stop_world time = %8.2f s.",
2086             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2087   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
2088             cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
2089 }
2090 
2091 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2092   _concurrent_workers->print_worker_threads_on(st);
2093 }
2094 
2095 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2096   _concurrent_workers->threads_do(tc);
2097 }
2098 
2099 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2100   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2101                p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
2102   _prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
2103   _next_mark_bitmap->print_on_error(st, " Next Bits: ");
2104 }
2105 
2106 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2107   ReferenceProcessor* result = g1h->ref_processor_cm();
2108   assert(result != NULL, "CM reference processor should not be NULL");
2109   return result;
2110 }
2111 
2112 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2113                                G1CMTask* task)
2114   : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)),
2115     _g1h(g1h), _task(task)
2116 { }
2117 
2118 void G1CMTask::setup_for_region(HeapRegion* hr) {
2119   assert(hr != NULL,
2120         "claim_region() should have filtered out NULL regions");
2121   _curr_region  = hr;
2122   _finger       = hr->bottom();
2123   update_region_limit();
2124 }
2125 
2126 void G1CMTask::update_region_limit() {
2127   HeapRegion* hr            = _curr_region;
2128   HeapWord* bottom          = hr->bottom();
2129   HeapWord* limit           = hr->next_top_at_mark_start();
2130 
2131   if (limit == bottom) {
2132     // The region was collected underneath our feet.
2133     // We set the finger to bottom to ensure that the bitmap
2134     // iteration that will follow this will not do anything.
2135     // (this is not a condition that holds when we set the region up,
2136     // as the region is not supposed to be empty in the first place)
2137     _finger = bottom;
2138   } else if (limit >= _region_limit) {
2139     assert(limit >= _finger, "peace of mind");
2140   } else {
2141     assert(limit < _region_limit, "only way to get here");
2142     // This can happen under some pretty unusual circumstances.  An
2143     // evacuation pause empties the region underneath our feet (NTAMS
2144     // at bottom). We then do some allocation in the region (NTAMS
2145     // stays at bottom), followed by the region being used as a GC
2146     // alloc region (NTAMS will move to top() and the objects
2147     // originally below it will be grayed). All objects now marked in
2148     // the region are explicitly grayed, if below the global finger,
2149     // and we do not need in fact to scan anything else. So, we simply
2150     // set _finger to be limit to ensure that the bitmap iteration
2151     // doesn't do anything.
2152     _finger = limit;
2153   }
2154 
2155   _region_limit = limit;
2156 }
2157 
2158 void G1CMTask::giveup_current_region() {
2159   assert(_curr_region != NULL, "invariant");
2160   clear_region_fields();
2161 }
2162 
2163 void G1CMTask::clear_region_fields() {
2164   // Values for these three fields that indicate that we're not
2165   // holding on to a region.
2166   _curr_region   = NULL;
2167   _finger        = NULL;
2168   _region_limit  = NULL;
2169 }
2170 
2171 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2172   if (cm_oop_closure == NULL) {
2173     assert(_cm_oop_closure != NULL, "invariant");
2174   } else {
2175     assert(_cm_oop_closure == NULL, "invariant");
2176   }
2177   _cm_oop_closure = cm_oop_closure;
2178 }
2179 
2180 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
2181   guarantee(next_mark_bitmap != NULL, "invariant");
2182   _next_mark_bitmap              = next_mark_bitmap;
2183   clear_region_fields();
2184 
2185   _calls                         = 0;
2186   _elapsed_time_ms               = 0.0;
2187   _termination_time_ms           = 0.0;
2188   _termination_start_time_ms     = 0.0;
2189 
2190   _mark_stats_cache.reset();
2191 }
2192 
2193 bool G1CMTask::should_exit_termination() {
2194   if (!regular_clock_call()) {
2195     return true;
2196   }
2197 
2198   // This is called when we are in the termination protocol. We should
2199   // quit if, for some reason, this task wants to abort or the global
2200   // stack is not empty (this means that we can get work from it).
2201   return !_cm->mark_stack_empty() || has_aborted();
2202 }
2203 
2204 void G1CMTask::reached_limit() {
2205   assert(_words_scanned >= _words_scanned_limit ||
2206          _refs_reached >= _refs_reached_limit ,
2207          "shouldn't have been called otherwise");
2208   abort_marking_if_regular_check_fail();
2209 }
2210 
2211 bool G1CMTask::regular_clock_call() {
2212   if (has_aborted()) {
2213     return false;
2214   }
2215 
2216   // First, we need to recalculate the words scanned and refs reached
2217   // limits for the next clock call.
2218   recalculate_limits();
2219 
2220   // During the regular clock call we do the following
2221 
2222   // (1) If an overflow has been flagged, then we abort.
2223   if (_cm->has_overflown()) {
2224     return false;
2225   }
2226 
2227   // If we are not concurrent (i.e. we're doing remark) we don't need
2228   // to check anything else. The other steps are only needed during
2229   // the concurrent marking phase.
2230   if (!_cm->concurrent()) {
2231     return true;
2232   }
2233 
2234   // (2) If marking has been aborted for Full GC, then we also abort.
2235   if (_cm->has_aborted()) {
2236     return false;
2237   }
2238 
2239   double curr_time_ms = os::elapsedVTime() * 1000.0;
2240 
2241   // (4) We check whether we should yield. If we have to, then we abort.
2242   if (SuspendibleThreadSet::should_yield()) {
2243     // We should yield. To do this we abort the task. The caller is
2244     // responsible for yielding.
2245     return false;
2246   }
2247 
2248   // (5) We check whether we've reached our time quota. If we have,
2249   // then we abort.
2250   double elapsed_time_ms = curr_time_ms - _start_time_ms;
2251   if (elapsed_time_ms > _time_target_ms) {
2252     _has_timed_out = true;
2253     return false;
2254   }
2255 
2256   // (6) Finally, we check whether there are enough completed STAB
2257   // buffers available for processing. If there are, we abort.
2258   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2259   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2260     // we do need to process SATB buffers, we'll abort and restart
2261     // the marking task to do so
2262     return false;
2263   }
2264   return true;
2265 }
2266 
2267 void G1CMTask::recalculate_limits() {
2268   _real_words_scanned_limit = _words_scanned + words_scanned_period;
2269   _words_scanned_limit      = _real_words_scanned_limit;
2270 
2271   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
2272   _refs_reached_limit       = _real_refs_reached_limit;
2273 }
2274 
2275 void G1CMTask::decrease_limits() {
2276   // This is called when we believe that we're going to do an infrequent
2277   // operation which will increase the per byte scanned cost (i.e. move
2278   // entries to/from the global stack). It basically tries to decrease the
2279   // scanning limit so that the clock is called earlier.
2280 
2281   _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
2282   _refs_reached_limit  = _real_refs_reached_limit - 3 * refs_reached_period / 4;
2283 }
2284 
2285 void G1CMTask::move_entries_to_global_stack() {
2286   // Local array where we'll store the entries that will be popped
2287   // from the local queue.
2288   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2289 
2290   size_t n = 0;
2291   G1TaskQueueEntry task_entry;
2292   while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) {
2293     buffer[n] = task_entry;
2294     ++n;
2295   }
2296   if (n < G1CMMarkStack::EntriesPerChunk) {
2297     buffer[n] = G1TaskQueueEntry();
2298   }
2299 
2300   if (n > 0) {
2301     if (!_cm->mark_stack_push(buffer)) {
2302       set_has_aborted();
2303     }
2304   }
2305 
2306   // This operation was quite expensive, so decrease the limits.
2307   decrease_limits();
2308 }
2309 
2310 bool G1CMTask::get_entries_from_global_stack() {
2311   // Local array where we'll store the entries that will be popped
2312   // from the global stack.
2313   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2314 
2315   if (!_cm->mark_stack_pop(buffer)) {
2316     return false;
2317   }
2318 
2319   // We did actually pop at least one entry.
2320   for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) {
2321     G1TaskQueueEntry task_entry = buffer[i];
2322     if (task_entry.is_null()) {
2323       break;
2324     }
2325     assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
2326     bool success = _task_queue->push(task_entry);
2327     // We only call this when the local queue is empty or under a
2328     // given target limit. So, we do not expect this push to fail.
2329     assert(success, "invariant");
2330   }
2331 
2332   // This operation was quite expensive, so decrease the limits
2333   decrease_limits();
2334   return true;
2335 }
2336 
2337 void G1CMTask::drain_local_queue(bool partially) {
2338   if (has_aborted()) {
2339     return;
2340   }
2341 
2342   // Decide what the target size is, depending whether we're going to
2343   // drain it partially (so that other tasks can steal if they run out
2344   // of things to do) or totally (at the very end).
2345   size_t target_size;
2346   if (partially) {
2347     target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize);
2348   } else {
2349     target_size = 0;
2350   }
2351 
2352   if (_task_queue->size() > target_size) {
2353     G1TaskQueueEntry entry;
2354     bool ret = _task_queue->pop_local(entry);
2355     while (ret) {
2356       scan_task_entry(entry);
2357       if (_task_queue->size() <= target_size || has_aborted()) {
2358         ret = false;
2359       } else {
2360         ret = _task_queue->pop_local(entry);
2361       }
2362     }
2363   }
2364 }
2365 
2366 void G1CMTask::drain_global_stack(bool partially) {
2367   if (has_aborted()) {
2368     return;
2369   }
2370 
2371   // We have a policy to drain the local queue before we attempt to
2372   // drain the global stack.
2373   assert(partially || _task_queue->size() == 0, "invariant");
2374 
2375   // Decide what the target size is, depending whether we're going to
2376   // drain it partially (so that other tasks can steal if they run out
2377   // of things to do) or totally (at the very end).
2378   // Notice that when draining the global mark stack partially, due to the racyness
2379   // of the mark stack size update we might in fact drop below the target. But,
2380   // this is not a problem.
2381   // In case of total draining, we simply process until the global mark stack is
2382   // totally empty, disregarding the size counter.
2383   if (partially) {
2384     size_t const target_size = _cm->partial_mark_stack_size_target();
2385     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2386       if (get_entries_from_global_stack()) {
2387         drain_local_queue(partially);
2388       }
2389     }
2390   } else {
2391     while (!has_aborted() && get_entries_from_global_stack()) {
2392       drain_local_queue(partially);
2393     }
2394   }
2395 }
2396 
2397 // SATB Queue has several assumptions on whether to call the par or
2398 // non-par versions of the methods. this is why some of the code is
2399 // replicated. We should really get rid of the single-threaded version
2400 // of the code to simplify things.
2401 void G1CMTask::drain_satb_buffers() {
2402   if (has_aborted()) {
2403     return;
2404   }
2405 
2406   // We set this so that the regular clock knows that we're in the
2407   // middle of draining buffers and doesn't set the abort flag when it
2408   // notices that SATB buffers are available for draining. It'd be
2409   // very counter productive if it did that. :-)
2410   _draining_satb_buffers = true;
2411 
2412   G1CMSATBBufferClosure satb_cl(this, _g1h);
2413   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2414 
2415   // This keeps claiming and applying the closure to completed buffers
2416   // until we run out of buffers or we need to abort.
2417   while (!has_aborted() &&
2418          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2419     abort_marking_if_regular_check_fail();
2420   }
2421 
2422   // Can't assert qset is empty here, even if not aborted.  If concurrent,
2423   // some other thread might be adding to the queue.  If not concurrent,
2424   // some other thread might have won the race for the last buffer, but
2425   // has not yet decremented the count.
2426 
2427   _draining_satb_buffers = false;
2428 
2429   // again, this was a potentially expensive operation, decrease the
2430   // limits to get the regular clock call early
2431   decrease_limits();
2432 }
2433 
2434 void G1CMTask::clear_mark_stats_cache(uint region_idx) {
2435   _mark_stats_cache.reset(region_idx);
2436 }
2437 
2438 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
2439   return _mark_stats_cache.evict_all();
2440 }
2441 
2442 void G1CMTask::print_stats() {
2443   log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls);
2444   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2445                        _elapsed_time_ms, _termination_time_ms);
2446   log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms",
2447                        _step_times_ms.num(),
2448                        _step_times_ms.avg(),
2449                        _step_times_ms.sd(),
2450                        _step_times_ms.maximum(),
2451                        _step_times_ms.sum());
2452   size_t const hits = _mark_stats_cache.hits();
2453   size_t const misses = _mark_stats_cache.misses();
2454   log_debug(gc, stats)("  Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f",
2455                        hits, misses, percent_of(hits, hits + misses));
2456 }
2457 
2458 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) {
2459   return _task_queues->steal(worker_id, task_entry);
2460 }
2461 
2462 /*****************************************************************************
2463 
2464     The do_marking_step(time_target_ms, ...) method is the building
2465     block of the parallel marking framework. It can be called in parallel
2466     with other invocations of do_marking_step() on different tasks
2467     (but only one per task, obviously) and concurrently with the
2468     mutator threads, or during remark, hence it eliminates the need
2469     for two versions of the code. When called during remark, it will
2470     pick up from where the task left off during the concurrent marking
2471     phase. Interestingly, tasks are also claimable during evacuation
2472     pauses too, since do_marking_step() ensures that it aborts before
2473     it needs to yield.
2474 
2475     The data structures that it uses to do marking work are the
2476     following:
2477 
2478       (1) Marking Bitmap. If there are gray objects that appear only
2479       on the bitmap (this happens either when dealing with an overflow
2480       or when the initial marking phase has simply marked the roots
2481       and didn't push them on the stack), then tasks claim heap
2482       regions whose bitmap they then scan to find gray objects. A
2483       global finger indicates where the end of the last claimed region
2484       is. A local finger indicates how far into the region a task has
2485       scanned. The two fingers are used to determine how to gray an
2486       object (i.e. whether simply marking it is OK, as it will be
2487       visited by a task in the future, or whether it needs to be also
2488       pushed on a stack).
2489 
2490       (2) Local Queue. The local queue of the task which is accessed
2491       reasonably efficiently by the task. Other tasks can steal from
2492       it when they run out of work. Throughout the marking phase, a
2493       task attempts to keep its local queue short but not totally
2494       empty, so that entries are available for stealing by other
2495       tasks. Only when there is no more work, a task will totally
2496       drain its local queue.
2497 
2498       (3) Global Mark Stack. This handles local queue overflow. During
2499       marking only sets of entries are moved between it and the local
2500       queues, as access to it requires a mutex and more fine-grain
2501       interaction with it which might cause contention. If it
2502       overflows, then the marking phase should restart and iterate
2503       over the bitmap to identify gray objects. Throughout the marking
2504       phase, tasks attempt to keep the global mark stack at a small
2505       length but not totally empty, so that entries are available for
2506       popping by other tasks. Only when there is no more work, tasks
2507       will totally drain the global mark stack.
2508 
2509       (4) SATB Buffer Queue. This is where completed SATB buffers are
2510       made available. Buffers are regularly removed from this queue
2511       and scanned for roots, so that the queue doesn't get too
2512       long. During remark, all completed buffers are processed, as
2513       well as the filled in parts of any uncompleted buffers.
2514 
2515     The do_marking_step() method tries to abort when the time target
2516     has been reached. There are a few other cases when the
2517     do_marking_step() method also aborts:
2518 
2519       (1) When the marking phase has been aborted (after a Full GC).
2520 
2521       (2) When a global overflow (on the global stack) has been
2522       triggered. Before the task aborts, it will actually sync up with
2523       the other tasks to ensure that all the marking data structures
2524       (local queues, stacks, fingers etc.)  are re-initialized so that
2525       when do_marking_step() completes, the marking phase can
2526       immediately restart.
2527 
2528       (3) When enough completed SATB buffers are available. The
2529       do_marking_step() method only tries to drain SATB buffers right
2530       at the beginning. So, if enough buffers are available, the
2531       marking step aborts and the SATB buffers are processed at
2532       the beginning of the next invocation.
2533 
2534       (4) To yield. when we have to yield then we abort and yield
2535       right at the end of do_marking_step(). This saves us from a lot
2536       of hassle as, by yielding we might allow a Full GC. If this
2537       happens then objects will be compacted underneath our feet, the
2538       heap might shrink, etc. We save checking for this by just
2539       aborting and doing the yield right at the end.
2540 
2541     From the above it follows that the do_marking_step() method should
2542     be called in a loop (or, otherwise, regularly) until it completes.
2543 
2544     If a marking step completes without its has_aborted() flag being
2545     true, it means it has completed the current marking phase (and
2546     also all other marking tasks have done so and have all synced up).
2547 
2548     A method called regular_clock_call() is invoked "regularly" (in
2549     sub ms intervals) throughout marking. It is this clock method that
2550     checks all the abort conditions which were mentioned above and
2551     decides when the task should abort. A work-based scheme is used to
2552     trigger this clock method: when the number of object words the
2553     marking phase has scanned or the number of references the marking
2554     phase has visited reach a given limit. Additional invocations to
2555     the method clock have been planted in a few other strategic places
2556     too. The initial reason for the clock method was to avoid calling
2557     vtime too regularly, as it is quite expensive. So, once it was in
2558     place, it was natural to piggy-back all the other conditions on it
2559     too and not constantly check them throughout the code.
2560 
2561     If do_termination is true then do_marking_step will enter its
2562     termination protocol.
2563 
2564     The value of is_serial must be true when do_marking_step is being
2565     called serially (i.e. by the VMThread) and do_marking_step should
2566     skip any synchronization in the termination and overflow code.
2567     Examples include the serial remark code and the serial reference
2568     processing closures.
2569 
2570     The value of is_serial must be false when do_marking_step is
2571     being called by any of the worker threads in a work gang.
2572     Examples include the concurrent marking code (CMMarkingTask),
2573     the MT remark code, and the MT reference processing closures.
2574 
2575  *****************************************************************************/
2576 
2577 void G1CMTask::do_marking_step(double time_target_ms,
2578                                bool do_termination,
2579                                bool is_serial) {
2580   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2581 
2582   _start_time_ms = os::elapsedVTime() * 1000.0;
2583 
2584   // If do_stealing is true then do_marking_step will attempt to
2585   // steal work from the other G1CMTasks. It only makes sense to
2586   // enable stealing when the termination protocol is enabled
2587   // and do_marking_step() is not being called serially.
2588   bool do_stealing = do_termination && !is_serial;
2589 
2590   double diff_prediction_ms = _g1h->policy()->predictor().get_new_prediction(&_marking_step_diff_ms);
2591   _time_target_ms = time_target_ms - diff_prediction_ms;
2592 
2593   // set up the variables that are used in the work-based scheme to
2594   // call the regular clock method
2595   _words_scanned = 0;
2596   _refs_reached  = 0;
2597   recalculate_limits();
2598 
2599   // clear all flags
2600   clear_has_aborted();
2601   _has_timed_out = false;
2602   _draining_satb_buffers = false;
2603 
2604   ++_calls;
2605 
2606   // Set up the bitmap and oop closures. Anything that uses them is
2607   // eventually called from this method, so it is OK to allocate these
2608   // statically.
2609   G1CMBitMapClosure bitmap_closure(this, _cm);
2610   G1CMOopClosure cm_oop_closure(_g1h, this);
2611   set_cm_oop_closure(&cm_oop_closure);
2612 
2613   if (_cm->has_overflown()) {
2614     // This can happen if the mark stack overflows during a GC pause
2615     // and this task, after a yield point, restarts. We have to abort
2616     // as we need to get into the overflow protocol which happens
2617     // right at the end of this task.
2618     set_has_aborted();
2619   }
2620 
2621   // First drain any available SATB buffers. After this, we will not
2622   // look at SATB buffers before the next invocation of this method.
2623   // If enough completed SATB buffers are queued up, the regular clock
2624   // will abort this task so that it restarts.
2625   drain_satb_buffers();
2626   // ...then partially drain the local queue and the global stack
2627   drain_local_queue(true);
2628   drain_global_stack(true);
2629 
2630   do {
2631     if (!has_aborted() && _curr_region != NULL) {
2632       // This means that we're already holding on to a region.
2633       assert(_finger != NULL, "if region is not NULL, then the finger "
2634              "should not be NULL either");
2635 
2636       // We might have restarted this task after an evacuation pause
2637       // which might have evacuated the region we're holding on to
2638       // underneath our feet. Let's read its limit again to make sure
2639       // that we do not iterate over a region of the heap that
2640       // contains garbage (update_region_limit() will also move
2641       // _finger to the start of the region if it is found empty).
2642       update_region_limit();
2643       // We will start from _finger not from the start of the region,
2644       // as we might be restarting this task after aborting half-way
2645       // through scanning this region. In this case, _finger points to
2646       // the address where we last found a marked object. If this is a
2647       // fresh region, _finger points to start().
2648       MemRegion mr = MemRegion(_finger, _region_limit);
2649 
2650       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2651              "humongous regions should go around loop once only");
2652 
2653       // Some special cases:
2654       // If the memory region is empty, we can just give up the region.
2655       // If the current region is humongous then we only need to check
2656       // the bitmap for the bit associated with the start of the object,
2657       // scan the object if it's live, and give up the region.
2658       // Otherwise, let's iterate over the bitmap of the part of the region
2659       // that is left.
2660       // If the iteration is successful, give up the region.
2661       if (mr.is_empty()) {
2662         giveup_current_region();
2663         abort_marking_if_regular_check_fail();
2664       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2665         if (_next_mark_bitmap->is_marked(mr.start())) {
2666           // The object is marked - apply the closure
2667           bitmap_closure.do_addr(mr.start());
2668         }
2669         // Even if this task aborted while scanning the humongous object
2670         // we can (and should) give up the current region.
2671         giveup_current_region();
2672         abort_marking_if_regular_check_fail();
2673       } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
2674         giveup_current_region();
2675         abort_marking_if_regular_check_fail();
2676       } else {
2677         assert(has_aborted(), "currently the only way to do so");
2678         // The only way to abort the bitmap iteration is to return
2679         // false from the do_bit() method. However, inside the
2680         // do_bit() method we move the _finger to point to the
2681         // object currently being looked at. So, if we bail out, we
2682         // have definitely set _finger to something non-null.
2683         assert(_finger != NULL, "invariant");
2684 
2685         // Region iteration was actually aborted. So now _finger
2686         // points to the address of the object we last scanned. If we
2687         // leave it there, when we restart this task, we will rescan
2688         // the object. It is easy to avoid this. We move the finger by
2689         // enough to point to the next possible object header.
2690         assert(_finger < _region_limit, "invariant");
2691         HeapWord* const new_finger = _finger + ((oop)_finger)->size();
2692         // Check if bitmap iteration was aborted while scanning the last object
2693         if (new_finger >= _region_limit) {
2694           giveup_current_region();
2695         } else {
2696           move_finger_to(new_finger);
2697         }
2698       }
2699     }
2700     // At this point we have either completed iterating over the
2701     // region we were holding on to, or we have aborted.
2702 
2703     // We then partially drain the local queue and the global stack.
2704     // (Do we really need this?)
2705     drain_local_queue(true);
2706     drain_global_stack(true);
2707 
2708     // Read the note on the claim_region() method on why it might
2709     // return NULL with potentially more regions available for
2710     // claiming and why we have to check out_of_regions() to determine
2711     // whether we're done or not.
2712     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2713       // We are going to try to claim a new region. We should have
2714       // given up on the previous one.
2715       // Separated the asserts so that we know which one fires.
2716       assert(_curr_region  == NULL, "invariant");
2717       assert(_finger       == NULL, "invariant");
2718       assert(_region_limit == NULL, "invariant");
2719       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2720       if (claimed_region != NULL) {
2721         // Yes, we managed to claim one
2722         setup_for_region(claimed_region);
2723         assert(_curr_region == claimed_region, "invariant");
2724       }
2725       // It is important to call the regular clock here. It might take
2726       // a while to claim a region if, for example, we hit a large
2727       // block of empty regions. So we need to call the regular clock
2728       // method once round the loop to make sure it's called
2729       // frequently enough.
2730       abort_marking_if_regular_check_fail();
2731     }
2732 
2733     if (!has_aborted() && _curr_region == NULL) {
2734       assert(_cm->out_of_regions(),
2735              "at this point we should be out of regions");
2736     }
2737   } while ( _curr_region != NULL && !has_aborted());
2738 
2739   if (!has_aborted()) {
2740     // We cannot check whether the global stack is empty, since other
2741     // tasks might be pushing objects to it concurrently.
2742     assert(_cm->out_of_regions(),
2743            "at this point we should be out of regions");
2744     // Try to reduce the number of available SATB buffers so that
2745     // remark has less work to do.
2746     drain_satb_buffers();
2747   }
2748 
2749   // Since we've done everything else, we can now totally drain the
2750   // local queue and global stack.
2751   drain_local_queue(false);
2752   drain_global_stack(false);
2753 
2754   // Attempt at work stealing from other task's queues.
2755   if (do_stealing && !has_aborted()) {
2756     // We have not aborted. This means that we have finished all that
2757     // we could. Let's try to do some stealing...
2758 
2759     // We cannot check whether the global stack is empty, since other
2760     // tasks might be pushing objects to it concurrently.
2761     assert(_cm->out_of_regions() && _task_queue->size() == 0,
2762            "only way to reach here");
2763     while (!has_aborted()) {
2764       G1TaskQueueEntry entry;
2765       if (_cm->try_stealing(_worker_id, entry)) {
2766         scan_task_entry(entry);
2767 
2768         // And since we're towards the end, let's totally drain the
2769         // local queue and global stack.
2770         drain_local_queue(false);
2771         drain_global_stack(false);
2772       } else {
2773         break;
2774       }
2775     }
2776   }
2777 
2778   // We still haven't aborted. Now, let's try to get into the
2779   // termination protocol.
2780   if (do_termination && !has_aborted()) {
2781     // We cannot check whether the global stack is empty, since other
2782     // tasks might be concurrently pushing objects on it.
2783     // Separated the asserts so that we know which one fires.
2784     assert(_cm->out_of_regions(), "only way to reach here");
2785     assert(_task_queue->size() == 0, "only way to reach here");
2786     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2787 
2788     // The G1CMTask class also extends the TerminatorTerminator class,
2789     // hence its should_exit_termination() method will also decide
2790     // whether to exit the termination protocol or not.
2791     bool finished = (is_serial ||
2792                      _cm->terminator()->offer_termination(this));
2793     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2794     _termination_time_ms +=
2795       termination_end_time_ms - _termination_start_time_ms;
2796 
2797     if (finished) {
2798       // We're all done.
2799 
2800       // We can now guarantee that the global stack is empty, since
2801       // all other tasks have finished. We separated the guarantees so
2802       // that, if a condition is false, we can immediately find out
2803       // which one.
2804       guarantee(_cm->out_of_regions(), "only way to reach here");
2805       guarantee(_cm->mark_stack_empty(), "only way to reach here");
2806       guarantee(_task_queue->size() == 0, "only way to reach here");
2807       guarantee(!_cm->has_overflown(), "only way to reach here");
2808       guarantee(!has_aborted(), "should never happen if termination has completed");
2809     } else {
2810       // Apparently there's more work to do. Let's abort this task. It
2811       // will restart it and we can hopefully find more things to do.
2812       set_has_aborted();
2813     }
2814   }
2815 
2816   // Mainly for debugging purposes to make sure that a pointer to the
2817   // closure which was statically allocated in this frame doesn't
2818   // escape it by accident.
2819   set_cm_oop_closure(NULL);
2820   double end_time_ms = os::elapsedVTime() * 1000.0;
2821   double elapsed_time_ms = end_time_ms - _start_time_ms;
2822   // Update the step history.
2823   _step_times_ms.add(elapsed_time_ms);
2824 
2825   if (has_aborted()) {
2826     // The task was aborted for some reason.
2827     if (_has_timed_out) {
2828       double diff_ms = elapsed_time_ms - _time_target_ms;
2829       // Keep statistics of how well we did with respect to hitting
2830       // our target only if we actually timed out (if we aborted for
2831       // other reasons, then the results might get skewed).
2832       _marking_step_diff_ms.add(diff_ms);
2833     }
2834 
2835     if (_cm->has_overflown()) {
2836       // This is the interesting one. We aborted because a global
2837       // overflow was raised. This means we have to restart the
2838       // marking phase and start iterating over regions. However, in
2839       // order to do this we have to make sure that all tasks stop
2840       // what they are doing and re-initialize in a safe manner. We
2841       // will achieve this with the use of two barrier sync points.
2842 
2843       if (!is_serial) {
2844         // We only need to enter the sync barrier if being called
2845         // from a parallel context
2846         _cm->enter_first_sync_barrier(_worker_id);
2847 
2848         // When we exit this sync barrier we know that all tasks have
2849         // stopped doing marking work. So, it's now safe to
2850         // re-initialize our data structures.
2851       }
2852 
2853       clear_region_fields();
2854       flush_mark_stats_cache();
2855 
2856       if (!is_serial) {
2857         // If we're executing the concurrent phase of marking, reset the marking
2858         // state; otherwise the marking state is reset after reference processing,
2859         // during the remark pause.
2860         // If we reset here as a result of an overflow during the remark we will
2861         // see assertion failures from any subsequent set_concurrency_and_phase()
2862         // calls.
2863         if (_cm->concurrent() && _worker_id == 0) {
2864           // Worker 0 is responsible for clearing the global data structures because
2865           // of an overflow. During STW we should not clear the overflow flag (in
2866           // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit
2867           // method to abort the pause and restart concurrent marking.
2868           _cm->reset_marking_for_restart();
2869 
2870           log_info(gc, marking)("Concurrent Mark reset for overflow");
2871         }
2872 
2873         // ...and enter the second barrier.
2874         _cm->enter_second_sync_barrier(_worker_id);
2875       }
2876       // At this point, if we're during the concurrent phase of
2877       // marking, everything has been re-initialized and we're
2878       // ready to restart.
2879     }
2880   }
2881 }
2882 
2883 G1CMTask::G1CMTask(uint worker_id,
2884                    G1ConcurrentMark* cm,
2885                    G1CMTaskQueue* task_queue,
2886                    G1RegionMarkStats* mark_stats,
2887                    uint max_regions) :
2888   _objArray_processor(this),
2889   _worker_id(worker_id),
2890   _g1h(G1CollectedHeap::heap()),
2891   _cm(cm),
2892   _next_mark_bitmap(NULL),
2893   _task_queue(task_queue),
2894   _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize),
2895   _calls(0),
2896   _time_target_ms(0.0),
2897   _start_time_ms(0.0),
2898   _cm_oop_closure(NULL),
2899   _curr_region(NULL),
2900   _finger(NULL),
2901   _region_limit(NULL),
2902   _words_scanned(0),
2903   _words_scanned_limit(0),
2904   _real_words_scanned_limit(0),
2905   _refs_reached(0),
2906   _refs_reached_limit(0),
2907   _real_refs_reached_limit(0),
2908   _has_aborted(false),
2909   _has_timed_out(false),
2910   _draining_satb_buffers(false),
2911   _step_times_ms(),
2912   _elapsed_time_ms(0.0),
2913   _termination_time_ms(0.0),
2914   _termination_start_time_ms(0.0),
2915   _marking_step_diff_ms()
2916 {
2917   guarantee(task_queue != NULL, "invariant");
2918 
2919   _marking_step_diff_ms.add(0.5);
2920 }
2921 
2922 // These are formatting macros that are used below to ensure
2923 // consistent formatting. The *_H_* versions are used to format the
2924 // header for a particular value and they should be kept consistent
2925 // with the corresponding macro. Also note that most of the macros add
2926 // the necessary white space (as a prefix) which makes them a bit
2927 // easier to compose.
2928 
2929 // All the output lines are prefixed with this string to be able to
2930 // identify them easily in a large log file.
2931 #define G1PPRL_LINE_PREFIX            "###"
2932 
2933 #define G1PPRL_ADDR_BASE_FORMAT    " " PTR_FORMAT "-" PTR_FORMAT
2934 #ifdef _LP64
2935 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
2936 #else // _LP64
2937 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
2938 #endif // _LP64
2939 
2940 // For per-region info
2941 #define G1PPRL_TYPE_FORMAT            "   %-4s"
2942 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
2943 #define G1PPRL_STATE_FORMAT           "   %-5s"
2944 #define G1PPRL_STATE_H_FORMAT         "   %5s"
2945 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
2946 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
2947 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
2948 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
2949 
2950 // For summary info
2951 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
2952 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
2953 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
2954 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
2955 
2956 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) :
2957   _total_used_bytes(0), _total_capacity_bytes(0),
2958   _total_prev_live_bytes(0), _total_next_live_bytes(0),
2959   _total_remset_bytes(0), _total_strong_code_roots_bytes(0)
2960 {
2961   if (!log_is_enabled(Trace, gc, liveness)) {
2962     return;
2963   }
2964 
2965   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2966   MemRegion g1_reserved = g1h->g1_reserved();
2967   double now = os::elapsedTime();
2968 
2969   // Print the header of the output.
2970   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
2971   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
2972                           G1PPRL_SUM_ADDR_FORMAT("reserved")
2973                           G1PPRL_SUM_BYTE_FORMAT("region-size"),
2974                           p2i(g1_reserved.start()), p2i(g1_reserved.end()),
2975                           HeapRegion::GrainBytes);
2976   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
2977   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2978                           G1PPRL_TYPE_H_FORMAT
2979                           G1PPRL_ADDR_BASE_H_FORMAT
2980                           G1PPRL_BYTE_H_FORMAT
2981                           G1PPRL_BYTE_H_FORMAT
2982                           G1PPRL_BYTE_H_FORMAT
2983                           G1PPRL_DOUBLE_H_FORMAT
2984                           G1PPRL_BYTE_H_FORMAT
2985                           G1PPRL_STATE_H_FORMAT
2986                           G1PPRL_BYTE_H_FORMAT,
2987                           "type", "address-range",
2988                           "used", "prev-live", "next-live", "gc-eff",
2989                           "remset", "state", "code-roots");
2990   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2991                           G1PPRL_TYPE_H_FORMAT
2992                           G1PPRL_ADDR_BASE_H_FORMAT
2993                           G1PPRL_BYTE_H_FORMAT
2994                           G1PPRL_BYTE_H_FORMAT
2995                           G1PPRL_BYTE_H_FORMAT
2996                           G1PPRL_DOUBLE_H_FORMAT
2997                           G1PPRL_BYTE_H_FORMAT
2998                           G1PPRL_STATE_H_FORMAT
2999                           G1PPRL_BYTE_H_FORMAT,
3000                           "", "",
3001                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
3002                           "(bytes)", "", "(bytes)");
3003 }
3004 
3005 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
3006   if (!log_is_enabled(Trace, gc, liveness)) {
3007     return false;
3008   }
3009 
3010   const char* type       = r->get_type_str();
3011   HeapWord* bottom       = r->bottom();
3012   HeapWord* end          = r->end();
3013   size_t capacity_bytes  = r->capacity();
3014   size_t used_bytes      = r->used();
3015   size_t prev_live_bytes = r->live_bytes();
3016   size_t next_live_bytes = r->next_live_bytes();
3017   double gc_eff          = r->gc_efficiency();
3018   size_t remset_bytes    = r->rem_set()->mem_size();
3019   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3020   const char* remset_type = r->rem_set()->get_short_state_str();
3021 
3022   _total_used_bytes      += used_bytes;
3023   _total_capacity_bytes  += capacity_bytes;
3024   _total_prev_live_bytes += prev_live_bytes;
3025   _total_next_live_bytes += next_live_bytes;
3026   _total_remset_bytes    += remset_bytes;
3027   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3028 
3029   // Print a line for this particular region.
3030   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3031                           G1PPRL_TYPE_FORMAT
3032                           G1PPRL_ADDR_BASE_FORMAT
3033                           G1PPRL_BYTE_FORMAT
3034                           G1PPRL_BYTE_FORMAT
3035                           G1PPRL_BYTE_FORMAT
3036                           G1PPRL_DOUBLE_FORMAT
3037                           G1PPRL_BYTE_FORMAT
3038                           G1PPRL_STATE_FORMAT
3039                           G1PPRL_BYTE_FORMAT,
3040                           type, p2i(bottom), p2i(end),
3041                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3042                           remset_bytes, remset_type, strong_code_roots_bytes);
3043 
3044   return false;
3045 }
3046 
3047 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3048   if (!log_is_enabled(Trace, gc, liveness)) {
3049     return;
3050   }
3051 
3052   // add static memory usages to remembered set sizes
3053   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3054   // Print the footer of the output.
3055   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3056   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3057                          " SUMMARY"
3058                          G1PPRL_SUM_MB_FORMAT("capacity")
3059                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3060                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3061                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3062                          G1PPRL_SUM_MB_FORMAT("remset")
3063                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3064                          bytes_to_mb(_total_capacity_bytes),
3065                          bytes_to_mb(_total_used_bytes),
3066                          percent_of(_total_used_bytes, _total_capacity_bytes),
3067                          bytes_to_mb(_total_prev_live_bytes),
3068                          percent_of(_total_prev_live_bytes, _total_capacity_bytes),
3069                          bytes_to_mb(_total_next_live_bytes),
3070                          percent_of(_total_next_live_bytes, _total_capacity_bytes),
3071                          bytes_to_mb(_total_remset_bytes),
3072                          bytes_to_mb(_total_strong_code_roots_bytes));
3073 }