rev 58017 : [mq]: 8238854-remove-superfluous-alloc-checks

   1 /*
   2  * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorState.hpp"
  31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1DirtyCardQueue.hpp"
  34 #include "gc/g1/g1HeapVerifier.hpp"
  35 #include "gc/g1/g1OopClosures.inline.hpp"
  36 #include "gc/g1/g1Policy.hpp"
  37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
  38 #include "gc/g1/g1StringDedup.hpp"
  39 #include "gc/g1/g1ThreadLocalData.hpp"
  40 #include "gc/g1/g1Trace.hpp"
  41 #include "gc/g1/heapRegion.inline.hpp"
  42 #include "gc/g1/heapRegionRemSet.hpp"
  43 #include "gc/g1/heapRegionSet.inline.hpp"
  44 #include "gc/shared/gcId.hpp"
  45 #include "gc/shared/gcTimer.hpp"
  46 #include "gc/shared/gcTraceTime.inline.hpp"
  47 #include "gc/shared/gcVMOperations.hpp"
  48 #include "gc/shared/genOopClosures.inline.hpp"
  49 #include "gc/shared/referencePolicy.hpp"
  50 #include "gc/shared/strongRootsScope.hpp"
  51 #include "gc/shared/suspendibleThreadSet.hpp"
  52 #include "gc/shared/taskTerminator.hpp"
  53 #include "gc/shared/taskqueue.inline.hpp"
  54 #include "gc/shared/weakProcessor.inline.hpp"
  55 #include "gc/shared/workerPolicy.hpp"
  56 #include "include/jvm.h"
  57 #include "logging/log.hpp"
  58 #include "memory/allocation.hpp"
  59 #include "memory/iterator.hpp"
  60 #include "memory/resourceArea.hpp"
  61 #include "memory/universe.hpp"
  62 #include "oops/access.inline.hpp"
  63 #include "oops/oop.inline.hpp"
  64 #include "runtime/atomic.hpp"
  65 #include "runtime/handles.inline.hpp"
  66 #include "runtime/java.hpp"
  67 #include "runtime/orderAccess.hpp"
  68 #include "runtime/prefetch.inline.hpp"
  69 #include "services/memTracker.hpp"
  70 #include "utilities/align.hpp"
  71 #include "utilities/growableArray.hpp"
  72 
  73 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  74   assert(addr < _cm->finger(), "invariant");
  75   assert(addr >= _task->finger(), "invariant");
  76 
  77   // We move that task's local finger along.
  78   _task->move_finger_to(addr);
  79 
  80   _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
  81   // we only partially drain the local queue and global stack
  82   _task->drain_local_queue(true);
  83   _task->drain_global_stack(true);
  84 
  85   // if the has_aborted flag has been raised, we need to bail out of
  86   // the iteration
  87   return !_task->has_aborted();
  88 }
  89 
  90 G1CMMarkStack::G1CMMarkStack() :
  91   _max_chunk_capacity(0),
  92   _base(NULL),
  93   _chunk_capacity(0) {
  94   set_empty();
  95 }
  96 
  97 bool G1CMMarkStack::resize(size_t new_capacity) {
  98   assert(is_empty(), "Only resize when stack is empty.");
  99   assert(new_capacity <= _max_chunk_capacity,
 100          "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
 101 
 102   TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
 103 
 104   if (new_base == NULL) {
 105     log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
 106     return false;
 107   }
 108   // Release old mapping.
 109   if (_base != NULL) {
 110     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 111   }
 112 
 113   _base = new_base;
 114   _chunk_capacity = new_capacity;
 115   set_empty();
 116 
 117   return true;
 118 }
 119 
 120 size_t G1CMMarkStack::capacity_alignment() {
 121   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
 122 }
 123 
 124 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 125   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 126 
 127   size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
 128 
 129   _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 130   size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 131 
 132   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
 133             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
 134             _max_chunk_capacity,
 135             initial_chunk_capacity);
 136 
 137   log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
 138                 initial_chunk_capacity, _max_chunk_capacity);
 139 
 140   return resize(initial_chunk_capacity);
 141 }
 142 
 143 void G1CMMarkStack::expand() {
 144   if (_chunk_capacity == _max_chunk_capacity) {
 145     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 146     return;
 147   }
 148   size_t old_capacity = _chunk_capacity;
 149   // Double capacity if possible
 150   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
 151 
 152   if (resize(new_capacity)) {
 153     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 154                   old_capacity, new_capacity);
 155   } else {
 156     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 157                     old_capacity, new_capacity);
 158   }
 159 }
 160 
 161 G1CMMarkStack::~G1CMMarkStack() {
 162   if (_base != NULL) {
 163     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 164   }
 165 }
 166 
 167 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
 168   elem->next = *list;
 169   *list = elem;
 170 }
 171 
 172 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
 173   MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 174   add_chunk_to_list(&_chunk_list, elem);
 175   _chunks_in_chunk_list++;
 176 }
 177 
 178 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
 179   MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 180   add_chunk_to_list(&_free_list, elem);
 181 }
 182 
 183 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
 184   TaskQueueEntryChunk* result = *list;
 185   if (result != NULL) {
 186     *list = (*list)->next;
 187   }
 188   return result;
 189 }
 190 
 191 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 192   MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 193   TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
 194   if (result != NULL) {
 195     _chunks_in_chunk_list--;
 196   }
 197   return result;
 198 }
 199 
 200 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 201   MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 202   return remove_chunk_from_list(&_free_list);
 203 }
 204 
 205 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
 206   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 207   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 208   // wraparound of _hwm.
 209   if (_hwm >= _chunk_capacity) {
 210     return NULL;
 211   }
 212 
 213   size_t cur_idx = Atomic::fetch_and_add(&_hwm, 1u);
 214   if (cur_idx >= _chunk_capacity) {
 215     return NULL;
 216   }
 217 
 218   TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
 219   result->next = NULL;
 220   return result;
 221 }
 222 
 223 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
 224   // Get a new chunk.
 225   TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
 226 
 227   if (new_chunk == NULL) {
 228     // Did not get a chunk from the free list. Allocate from backing memory.
 229     new_chunk = allocate_new_chunk();
 230 
 231     if (new_chunk == NULL) {
 232       return false;
 233     }
 234   }
 235 
 236   Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 237 
 238   add_chunk_to_chunk_list(new_chunk);
 239 
 240   return true;
 241 }
 242 
 243 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
 244   TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
 245 
 246   if (cur == NULL) {
 247     return false;
 248   }
 249 
 250   Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 251 
 252   add_chunk_to_free_list(cur);
 253   return true;
 254 }
 255 
 256 void G1CMMarkStack::set_empty() {
 257   _chunks_in_chunk_list = 0;
 258   _hwm = 0;
 259   _chunk_list = NULL;
 260   _free_list = NULL;
 261 }
 262 
 263 G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) :
 264     _root_regions(NULL),
 265     _max_regions(max_regions),
 266     _num_root_regions(0),
 267     _claimed_root_regions(0),
 268     _scan_in_progress(false),
 269     _should_abort(false) {
 270   _root_regions = new MemRegion[_max_regions];
 271   if (_root_regions == NULL) {
 272     vm_exit_during_initialization("Could not allocate root MemRegion set.");
 273   }
 274 }
 275 
 276 G1CMRootMemRegions::~G1CMRootMemRegions() {
 277   delete[] _root_regions;
 278 }
 279 
 280 void G1CMRootMemRegions::reset() {
 281   _num_root_regions = 0;
 282 }
 283 
 284 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
 285   assert_at_safepoint();
 286   size_t idx = Atomic::fetch_and_add(&_num_root_regions, 1u);
 287   assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
 288   assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
 289          "end (" PTR_FORMAT ")", p2i(start), p2i(end));
 290   _root_regions[idx].set_start(start);
 291   _root_regions[idx].set_end(end);
 292 }
 293 
 294 void G1CMRootMemRegions::prepare_for_scan() {
 295   assert(!scan_in_progress(), "pre-condition");
 296 
 297   _scan_in_progress = _num_root_regions > 0;
 298 
 299   _claimed_root_regions = 0;
 300   _should_abort = false;
 301 }
 302 
 303 const MemRegion* G1CMRootMemRegions::claim_next() {
 304   if (_should_abort) {
 305     // If someone has set the should_abort flag, we return NULL to
 306     // force the caller to bail out of their loop.
 307     return NULL;
 308   }
 309 
 310   if (_claimed_root_regions >= _num_root_regions) {
 311     return NULL;
 312   }
 313 
 314   size_t claimed_index = Atomic::fetch_and_add(&_claimed_root_regions, 1u);
 315   if (claimed_index < _num_root_regions) {
 316     return &_root_regions[claimed_index];
 317   }
 318   return NULL;
 319 }
 320 
 321 uint G1CMRootMemRegions::num_root_regions() const {
 322   return (uint)_num_root_regions;
 323 }
 324 
 325 void G1CMRootMemRegions::notify_scan_done() {
 326   MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 327   _scan_in_progress = false;
 328   RootRegionScan_lock->notify_all();
 329 }
 330 
 331 void G1CMRootMemRegions::cancel_scan() {
 332   notify_scan_done();
 333 }
 334 
 335 void G1CMRootMemRegions::scan_finished() {
 336   assert(scan_in_progress(), "pre-condition");
 337 
 338   if (!_should_abort) {
 339     assert(_claimed_root_regions >= num_root_regions(),
 340            "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
 341            _claimed_root_regions, num_root_regions());
 342   }
 343 
 344   notify_scan_done();
 345 }
 346 
 347 bool G1CMRootMemRegions::wait_until_scan_finished() {
 348   if (!scan_in_progress()) {
 349     return false;
 350   }
 351 
 352   {
 353     MonitorLocker ml(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 354     while (scan_in_progress()) {
 355       ml.wait();
 356     }
 357   }
 358   return true;
 359 }
 360 
 361 // Returns the maximum number of workers to be used in a concurrent
 362 // phase based on the number of GC workers being used in a STW
 363 // phase.
 364 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
 365   return MAX2((num_gc_workers + 2) / 4, 1U);
 366 }
 367 
 368 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
 369                                    G1RegionToSpaceMapper* prev_bitmap_storage,
 370                                    G1RegionToSpaceMapper* next_bitmap_storage) :
 371   // _cm_thread set inside the constructor
 372   _g1h(g1h),
 373   _completed_initialization(false),
 374 
 375   _mark_bitmap_1(),
 376   _mark_bitmap_2(),
 377   _prev_mark_bitmap(&_mark_bitmap_1),
 378   _next_mark_bitmap(&_mark_bitmap_2),
 379 
 380   _heap(_g1h->reserved_region()),
 381 
 382   _root_regions(_g1h->max_regions()),
 383 
 384   _global_mark_stack(),
 385 
 386   // _finger set in set_non_marking_state
 387 
 388   _worker_id_offset(G1DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
 389   _max_num_tasks(ParallelGCThreads),
 390   // _num_active_tasks set in set_non_marking_state()
 391   // _tasks set inside the constructor
 392 
 393   _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
 394   _terminator((int) _max_num_tasks, _task_queues),
 395 
 396   _first_overflow_barrier_sync(),
 397   _second_overflow_barrier_sync(),
 398 
 399   _has_overflown(false),
 400   _concurrent(false),
 401   _has_aborted(false),
 402   _restart_for_overflow(false),
 403   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 404   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 405 
 406   // _verbose_level set below
 407 
 408   _init_times(),
 409   _remark_times(),
 410   _remark_mark_times(),
 411   _remark_weak_ref_times(),
 412   _cleanup_times(),
 413   _total_cleanup_time(0.0),
 414 
 415   _accum_task_vtime(NULL),
 416 
 417   _concurrent_workers(NULL),
 418   _num_concurrent_workers(0),
 419   _max_concurrent_workers(0),
 420 
 421   _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
 422   _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 423 {
 424   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 425   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 426 
 427   // Create & start ConcurrentMark thread.
 428   _cm_thread = new G1ConcurrentMarkThread(this);
 429   if (_cm_thread->osthread() == NULL) {
 430     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 431   }
 432 
 433   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 434 
 435   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 436     // Calculate the number of concurrent worker threads by scaling
 437     // the number of parallel GC threads.
 438     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 439     FLAG_SET_ERGO(ConcGCThreads, marking_thread_num);
 440   }
 441 
 442   assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
 443   if (ConcGCThreads > ParallelGCThreads) {
 444     log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
 445                     ConcGCThreads, ParallelGCThreads);
 446     return;
 447   }
 448 
 449   log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
 450   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 451 
 452   _num_concurrent_workers = ConcGCThreads;
 453   _max_concurrent_workers = _num_concurrent_workers;
 454 
 455   _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
 456   _concurrent_workers->initialize_workers();
 457 
 458   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 459     size_t mark_stack_size =
 460       MIN2(MarkStackSizeMax,
 461           MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE)));
 462     // Verify that the calculated value for MarkStackSize is in range.
 463     // It would be nice to use the private utility routine from Arguments.
 464     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 465       log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 466                       "must be between 1 and " SIZE_FORMAT,
 467                       mark_stack_size, MarkStackSizeMax);
 468       return;
 469     }
 470     FLAG_SET_ERGO(MarkStackSize, mark_stack_size);
 471   } else {
 472     // Verify MarkStackSize is in range.
 473     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 474       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 475         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 476           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 477                           "must be between 1 and " SIZE_FORMAT,
 478                           MarkStackSize, MarkStackSizeMax);
 479           return;
 480         }
 481       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 482         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 483           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 484                           " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 485                           MarkStackSize, MarkStackSizeMax);
 486           return;
 487         }
 488       }
 489     }
 490   }
 491 
 492   if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
 493     vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
 494   }
 495 
 496   _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
 497   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
 498 
 499   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 500   _num_active_tasks = _max_num_tasks;
 501 
 502   for (uint i = 0; i < _max_num_tasks; ++i) {
 503     G1CMTaskQueue* task_queue = new G1CMTaskQueue();
 504     task_queue->initialize();
 505     _task_queues->register_queue(i, task_queue);
 506 
 507     _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
 508 
 509     _accum_task_vtime[i] = 0.0;
 510   }
 511 
 512   reset_at_marking_complete();
 513   _completed_initialization = true;
 514 }
 515 
 516 void G1ConcurrentMark::reset() {
 517   _has_aborted = false;
 518 
 519   reset_marking_for_restart();
 520 
 521   // Reset all tasks, since different phases will use different number of active
 522   // threads. So, it's easiest to have all of them ready.
 523   for (uint i = 0; i < _max_num_tasks; ++i) {
 524     _tasks[i]->reset(_next_mark_bitmap);
 525   }
 526 
 527   uint max_regions = _g1h->max_regions();
 528   for (uint i = 0; i < max_regions; i++) {
 529     _top_at_rebuild_starts[i] = NULL;
 530     _region_mark_stats[i].clear();
 531   }
 532 }
 533 
 534 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
 535   for (uint j = 0; j < _max_num_tasks; ++j) {
 536     _tasks[j]->clear_mark_stats_cache(region_idx);
 537   }
 538   _top_at_rebuild_starts[region_idx] = NULL;
 539   _region_mark_stats[region_idx].clear();
 540 }
 541 
 542 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
 543   uint const region_idx = r->hrm_index();
 544   if (r->is_humongous()) {
 545     assert(r->is_starts_humongous(), "Got humongous continues region here");
 546     uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
 547     for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
 548       clear_statistics_in_region(j);
 549     }
 550   } else {
 551     clear_statistics_in_region(region_idx);
 552   }
 553 }
 554 
 555 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) {
 556   if (bitmap->is_marked(addr)) {
 557     bitmap->clear(addr);
 558   }
 559 }
 560 
 561 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
 562   assert_at_safepoint_on_vm_thread();
 563 
 564   // Need to clear all mark bits of the humongous object.
 565   clear_mark_if_set(_prev_mark_bitmap, r->bottom());
 566   clear_mark_if_set(_next_mark_bitmap, r->bottom());
 567 
 568   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
 569     return;
 570   }
 571 
 572   // Clear any statistics about the region gathered so far.
 573   clear_statistics(r);
 574 }
 575 
 576 void G1ConcurrentMark::reset_marking_for_restart() {
 577   _global_mark_stack.set_empty();
 578 
 579   // Expand the marking stack, if we have to and if we can.
 580   if (has_overflown()) {
 581     _global_mark_stack.expand();
 582 
 583     uint max_regions = _g1h->max_regions();
 584     for (uint i = 0; i < max_regions; i++) {
 585       _region_mark_stats[i].clear_during_overflow();
 586     }
 587   }
 588 
 589   clear_has_overflown();
 590   _finger = _heap.start();
 591 
 592   for (uint i = 0; i < _max_num_tasks; ++i) {
 593     G1CMTaskQueue* queue = _task_queues->queue(i);
 594     queue->set_empty();
 595   }
 596 }
 597 
 598 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
 599   assert(active_tasks <= _max_num_tasks, "we should not have more");
 600 
 601   _num_active_tasks = active_tasks;
 602   // Need to update the three data structures below according to the
 603   // number of active threads for this phase.
 604   _terminator.reset_for_reuse(active_tasks);
 605   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 606   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 607 }
 608 
 609 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 610   set_concurrency(active_tasks);
 611 
 612   _concurrent = concurrent;
 613 
 614   if (!concurrent) {
 615     // At this point we should be in a STW phase, and completed marking.
 616     assert_at_safepoint_on_vm_thread();
 617     assert(out_of_regions(),
 618            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 619            p2i(_finger), p2i(_heap.end()));
 620   }
 621 }
 622 
 623 void G1ConcurrentMark::reset_at_marking_complete() {
 624   // We set the global marking state to some default values when we're
 625   // not doing marking.
 626   reset_marking_for_restart();
 627   _num_active_tasks = 0;
 628 }
 629 
 630 G1ConcurrentMark::~G1ConcurrentMark() {
 631   FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
 632   FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
 633   // The G1ConcurrentMark instance is never freed.
 634   ShouldNotReachHere();
 635 }
 636 
 637 class G1ClearBitMapTask : public AbstractGangTask {
 638 public:
 639   static size_t chunk_size() { return M; }
 640 
 641 private:
 642   // Heap region closure used for clearing the given mark bitmap.
 643   class G1ClearBitmapHRClosure : public HeapRegionClosure {
 644   private:
 645     G1CMBitMap* _bitmap;
 646     G1ConcurrentMark* _cm;
 647   public:
 648     G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) {
 649     }
 650 
 651     virtual bool do_heap_region(HeapRegion* r) {
 652       size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
 653 
 654       HeapWord* cur = r->bottom();
 655       HeapWord* const end = r->end();
 656 
 657       while (cur < end) {
 658         MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 659         _bitmap->clear_range(mr);
 660 
 661         cur += chunk_size_in_words;
 662 
 663         // Abort iteration if after yielding the marking has been aborted.
 664         if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
 665           return true;
 666         }
 667         // Repeat the asserts from before the start of the closure. We will do them
 668         // as asserts here to minimize their overhead on the product. However, we
 669         // will have them as guarantees at the beginning / end of the bitmap
 670         // clearing to get some checking in the product.
 671         assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
 672         assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 673       }
 674       assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
 675 
 676       return false;
 677     }
 678   };
 679 
 680   G1ClearBitmapHRClosure _cl;
 681   HeapRegionClaimer _hr_claimer;
 682   bool _suspendible; // If the task is suspendible, workers must join the STS.
 683 
 684 public:
 685   G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
 686     AbstractGangTask("G1 Clear Bitmap"),
 687     _cl(bitmap, suspendible ? cm : NULL),
 688     _hr_claimer(n_workers),
 689     _suspendible(suspendible)
 690   { }
 691 
 692   void work(uint worker_id) {
 693     SuspendibleThreadSetJoiner sts_join(_suspendible);
 694     G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
 695   }
 696 
 697   bool is_complete() {
 698     return _cl.is_complete();
 699   }
 700 };
 701 
 702 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 703   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 704 
 705   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 706   size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 707 
 708   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 709 
 710   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 711 
 712   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 713   workers->run_task(&cl, num_workers);
 714   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 715 }
 716 
 717 void G1ConcurrentMark::cleanup_for_next_mark() {
 718   // Make sure that the concurrent mark thread looks to still be in
 719   // the current cycle.
 720   guarantee(cm_thread()->during_cycle(), "invariant");
 721 
 722   // We are finishing up the current cycle by clearing the next
 723   // marking bitmap and getting it ready for the next cycle. During
 724   // this time no other cycle can start. So, let's make sure that this
 725   // is the case.
 726   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 727 
 728   clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 729 
 730   // Repeat the asserts from above.
 731   guarantee(cm_thread()->during_cycle(), "invariant");
 732   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 733 }
 734 
 735 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 736   assert_at_safepoint_on_vm_thread();
 737   clear_bitmap(_prev_mark_bitmap, workers, false);
 738 }
 739 
 740 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 741 public:
 742   bool do_heap_region(HeapRegion* r) {
 743     r->note_start_of_marking();
 744     return false;
 745   }
 746 };
 747 
 748 void G1ConcurrentMark::pre_initial_mark() {
 749   assert_at_safepoint_on_vm_thread();
 750 
 751   // Reset marking state.
 752   reset();
 753 
 754   // For each region note start of marking.
 755   NoteStartOfMarkHRClosure startcl;
 756   _g1h->heap_region_iterate(&startcl);
 757 
 758   _root_regions.reset();
 759 }
 760 
 761 
 762 void G1ConcurrentMark::post_initial_mark() {
 763   // Start Concurrent Marking weak-reference discovery.
 764   ReferenceProcessor* rp = _g1h->ref_processor_cm();
 765   // enable ("weak") refs discovery
 766   rp->enable_discovery();
 767   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 768 
 769   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
 770   // This is the start of  the marking cycle, we're expected all
 771   // threads to have SATB queues with active set to false.
 772   satb_mq_set.set_active_all_threads(true, /* new active value */
 773                                      false /* expected_active */);
 774 
 775   _root_regions.prepare_for_scan();
 776 
 777   // update_g1_committed() will be called at the end of an evac pause
 778   // when marking is on. So, it's also called at the end of the
 779   // initial-mark pause to update the heap end, if the heap expands
 780   // during it. No need to call it here.
 781 }
 782 
 783 /*
 784  * Notice that in the next two methods, we actually leave the STS
 785  * during the barrier sync and join it immediately afterwards. If we
 786  * do not do this, the following deadlock can occur: one thread could
 787  * be in the barrier sync code, waiting for the other thread to also
 788  * sync up, whereas another one could be trying to yield, while also
 789  * waiting for the other threads to sync up too.
 790  *
 791  * Note, however, that this code is also used during remark and in
 792  * this case we should not attempt to leave / enter the STS, otherwise
 793  * we'll either hit an assert (debug / fastdebug) or deadlock
 794  * (product). So we should only leave / enter the STS if we are
 795  * operating concurrently.
 796  *
 797  * Because the thread that does the sync barrier has left the STS, it
 798  * is possible to be suspended for a Full GC or an evacuation pause
 799  * could occur. This is actually safe, since the entering the sync
 800  * barrier is one of the last things do_marking_step() does, and it
 801  * doesn't manipulate any data structures afterwards.
 802  */
 803 
 804 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 805   bool barrier_aborted;
 806   {
 807     SuspendibleThreadSetLeaver sts_leave(concurrent());
 808     barrier_aborted = !_first_overflow_barrier_sync.enter();
 809   }
 810 
 811   // at this point everyone should have synced up and not be doing any
 812   // more work
 813 
 814   if (barrier_aborted) {
 815     // If the barrier aborted we ignore the overflow condition and
 816     // just abort the whole marking phase as quickly as possible.
 817     return;
 818   }
 819 }
 820 
 821 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 822   SuspendibleThreadSetLeaver sts_leave(concurrent());
 823   _second_overflow_barrier_sync.enter();
 824 
 825   // at this point everything should be re-initialized and ready to go
 826 }
 827 
 828 class G1CMConcurrentMarkingTask : public AbstractGangTask {
 829   G1ConcurrentMark*     _cm;
 830 
 831 public:
 832   void work(uint worker_id) {
 833     assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread");
 834     ResourceMark rm;
 835 
 836     double start_vtime = os::elapsedVTime();
 837 
 838     {
 839       SuspendibleThreadSetJoiner sts_join;
 840 
 841       assert(worker_id < _cm->active_tasks(), "invariant");
 842 
 843       G1CMTask* task = _cm->task(worker_id);
 844       task->record_start_time();
 845       if (!_cm->has_aborted()) {
 846         do {
 847           task->do_marking_step(G1ConcMarkStepDurationMillis,
 848                                 true  /* do_termination */,
 849                                 false /* is_serial*/);
 850 
 851           _cm->do_yield_check();
 852         } while (!_cm->has_aborted() && task->has_aborted());
 853       }
 854       task->record_end_time();
 855       guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant");
 856     }
 857 
 858     double end_vtime = os::elapsedVTime();
 859     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
 860   }
 861 
 862   G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
 863       AbstractGangTask("Concurrent Mark"), _cm(cm) { }
 864 
 865   ~G1CMConcurrentMarkingTask() { }
 866 };
 867 
 868 uint G1ConcurrentMark::calc_active_marking_workers() {
 869   uint result = 0;
 870   if (!UseDynamicNumberOfGCThreads ||
 871       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 872        !ForceDynamicNumberOfGCThreads)) {
 873     result = _max_concurrent_workers;
 874   } else {
 875     result =
 876       WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
 877                                                 1, /* Minimum workers */
 878                                                 _num_concurrent_workers,
 879                                                 Threads::number_of_non_daemon_threads());
 880     // Don't scale the result down by scale_concurrent_workers() because
 881     // that scaling has already gone into "_max_concurrent_workers".
 882   }
 883   assert(result > 0 && result <= _max_concurrent_workers,
 884          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 885          _max_concurrent_workers, result);
 886   return result;
 887 }
 888 
 889 void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) {
 890 #ifdef ASSERT
 891   HeapWord* last = region->last();
 892   HeapRegion* hr = _g1h->heap_region_containing(last);
 893   assert(hr->is_old() || hr->next_top_at_mark_start() == hr->bottom(),
 894          "Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str());
 895   assert(hr->next_top_at_mark_start() == region->start(),
 896          "MemRegion start should be equal to nTAMS");
 897 #endif
 898 
 899   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 900 
 901   const uintx interval = PrefetchScanIntervalInBytes;
 902   HeapWord* curr = region->start();
 903   const HeapWord* end = region->end();
 904   while (curr < end) {
 905     Prefetch::read(curr, interval);
 906     oop obj = oop(curr);
 907     int size = obj->oop_iterate_size(&cl);
 908     assert(size == obj->size(), "sanity");
 909     curr += size;
 910   }
 911 }
 912 
 913 class G1CMRootRegionScanTask : public AbstractGangTask {
 914   G1ConcurrentMark* _cm;
 915 public:
 916   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 917     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
 918 
 919   void work(uint worker_id) {
 920     assert(Thread::current()->is_ConcurrentGC_thread(),
 921            "this should only be done by a conc GC thread");
 922 
 923     G1CMRootMemRegions* root_regions = _cm->root_regions();
 924     const MemRegion* region = root_regions->claim_next();
 925     while (region != NULL) {
 926       _cm->scan_root_region(region, worker_id);
 927       region = root_regions->claim_next();
 928     }
 929   }
 930 };
 931 
 932 void G1ConcurrentMark::scan_root_regions() {
 933   // scan_in_progress() will have been set to true only if there was
 934   // at least one root region to scan. So, if it's false, we
 935   // should not attempt to do any further work.
 936   if (root_regions()->scan_in_progress()) {
 937     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 938 
 939     _num_concurrent_workers = MIN2(calc_active_marking_workers(),
 940                                    // We distribute work on a per-region basis, so starting
 941                                    // more threads than that is useless.
 942                                    root_regions()->num_root_regions());
 943     assert(_num_concurrent_workers <= _max_concurrent_workers,
 944            "Maximum number of marking threads exceeded");
 945 
 946     G1CMRootRegionScanTask task(this);
 947     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
 948                         task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
 949     _concurrent_workers->run_task(&task, _num_concurrent_workers);
 950 
 951     // It's possible that has_aborted() is true here without actually
 952     // aborting the survivor scan earlier. This is OK as it's
 953     // mainly used for sanity checking.
 954     root_regions()->scan_finished();
 955   }
 956 }
 957 
 958 void G1ConcurrentMark::concurrent_cycle_start() {
 959   _gc_timer_cm->register_gc_start();
 960 
 961   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
 962 
 963   _g1h->trace_heap_before_gc(_gc_tracer_cm);
 964 }
 965 
 966 void G1ConcurrentMark::concurrent_cycle_end() {
 967   _g1h->collector_state()->set_clearing_next_bitmap(false);
 968 
 969   _g1h->trace_heap_after_gc(_gc_tracer_cm);
 970 
 971   if (has_aborted()) {
 972     log_info(gc, marking)("Concurrent Mark Abort");
 973     _gc_tracer_cm->report_concurrent_mode_failure();
 974   }
 975 
 976   _gc_timer_cm->register_gc_end();
 977 
 978   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
 979 }
 980 
 981 void G1ConcurrentMark::mark_from_roots() {
 982   _restart_for_overflow = false;
 983 
 984   _num_concurrent_workers = calc_active_marking_workers();
 985 
 986   uint active_workers = MAX2(1U, _num_concurrent_workers);
 987 
 988   // Setting active workers is not guaranteed since fewer
 989   // worker threads may currently exist and more may not be
 990   // available.
 991   active_workers = _concurrent_workers->update_active_workers(active_workers);
 992   log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
 993 
 994   // Parallel task terminator is set in "set_concurrency_and_phase()"
 995   set_concurrency_and_phase(active_workers, true /* concurrent */);
 996 
 997   G1CMConcurrentMarkingTask marking_task(this);
 998   _concurrent_workers->run_task(&marking_task);
 999   print_stats();
1000 }
1001 
1002 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
1003   G1HeapVerifier* verifier = _g1h->verifier();
1004 
1005   verifier->verify_region_sets_optional();
1006 
1007   if (VerifyDuringGC) {
1008     GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm);
1009 
1010     size_t const BufLen = 512;
1011     char buffer[BufLen];
1012 
1013     jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
1014     verifier->verify(type, vo, buffer);
1015   }
1016 
1017   verifier->check_bitmaps(caller);
1018 }
1019 
1020 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
1021   G1CollectedHeap* _g1h;
1022   G1ConcurrentMark* _cm;
1023   HeapRegionClaimer _hrclaimer;
1024   uint volatile _total_selected_for_rebuild;
1025 
1026   G1PrintRegionLivenessInfoClosure _cl;
1027 
1028   class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1029     G1CollectedHeap* _g1h;
1030     G1ConcurrentMark* _cm;
1031 
1032     G1PrintRegionLivenessInfoClosure* _cl;
1033 
1034     uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1035 
1036     void update_remset_before_rebuild(HeapRegion* hr) {
1037       G1RemSetTrackingPolicy* tracking_policy = _g1h->policy()->remset_tracker();
1038 
1039       bool selected_for_rebuild;
1040       if (hr->is_humongous()) {
1041         bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0;
1042         selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live);
1043       } else {
1044         size_t const live_bytes = _cm->liveness(hr->hrm_index());
1045         selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1046       }
1047       if (selected_for_rebuild) {
1048         _num_regions_selected_for_rebuild++;
1049       }
1050       _cm->update_top_at_rebuild_start(hr);
1051     }
1052 
1053     // Distribute the given words across the humongous object starting with hr and
1054     // note end of marking.
1055     void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1056       uint const region_idx = hr->hrm_index();
1057       size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size();
1058       uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words);
1059 
1060       // "Distributing" zero words means that we only note end of marking for these
1061       // regions.
1062       assert(marked_words == 0 || obj_size_in_words == marked_words,
1063              "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT,
1064              obj_size_in_words, marked_words);
1065 
1066       for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
1067         HeapRegion* const r = _g1h->region_at(i);
1068         size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
1069 
1070         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)",
1071                                words_to_add, i, r->get_type_str());
1072         add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize);
1073         marked_words -= words_to_add;
1074       }
1075       assert(marked_words == 0,
1076              SIZE_FORMAT " words left after distributing space across %u regions",
1077              marked_words, num_regions_in_humongous);
1078     }
1079 
1080     void update_marked_bytes(HeapRegion* hr) {
1081       uint const region_idx = hr->hrm_index();
1082       size_t const marked_words = _cm->liveness(region_idx);
1083       // The marking attributes the object's size completely to the humongous starts
1084       // region. We need to distribute this value across the entire set of regions a
1085       // humongous object spans.
1086       if (hr->is_humongous()) {
1087         assert(hr->is_starts_humongous() || marked_words == 0,
1088                "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)",
1089                marked_words, region_idx, hr->get_type_str());
1090         if (hr->is_starts_humongous()) {
1091           distribute_marked_bytes(hr, marked_words);
1092         }
1093       } else {
1094         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
1095         add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize);
1096       }
1097     }
1098 
1099     void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) {
1100       hr->add_to_marked_bytes(marked_bytes);
1101       _cl->do_heap_region(hr);
1102       hr->note_end_of_marking();
1103     }
1104 
1105   public:
1106     G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) :
1107       _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { }
1108 
1109     virtual bool do_heap_region(HeapRegion* r) {
1110       update_remset_before_rebuild(r);
1111       update_marked_bytes(r);
1112 
1113       return false;
1114     }
1115 
1116     uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1117   };
1118 
1119 public:
1120   G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) :
1121     AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"),
1122     _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1123 
1124   virtual void work(uint worker_id) {
1125     G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1126     _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1127     Atomic::add(&_total_selected_for_rebuild, update_cl.num_selected_for_rebuild());
1128   }
1129 
1130   uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1131 
1132   // Number of regions for which roughly one thread should be spawned for this work.
1133   static const uint RegionsPerThread = 384;
1134 };
1135 
1136 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1137   G1CollectedHeap* _g1h;
1138 public:
1139   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1140 
1141   virtual bool do_heap_region(HeapRegion* r) {
1142     _g1h->policy()->remset_tracker()->update_after_rebuild(r);
1143     return false;
1144   }
1145 };
1146 
1147 void G1ConcurrentMark::remark() {
1148   assert_at_safepoint_on_vm_thread();
1149 
1150   // If a full collection has happened, we should not continue. However we might
1151   // have ended up here as the Remark VM operation has been scheduled already.
1152   if (has_aborted()) {
1153     return;
1154   }
1155 
1156   G1Policy* policy = _g1h->policy();
1157   policy->record_concurrent_mark_remark_start();
1158 
1159   double start = os::elapsedTime();
1160 
1161   verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1162 
1163   {
1164     GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1165     finalize_marking();
1166   }
1167 
1168   double mark_work_end = os::elapsedTime();
1169 
1170   bool const mark_finished = !has_overflown();
1171   if (mark_finished) {
1172     weak_refs_work(false /* clear_all_soft_refs */);
1173 
1174     SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1175     // We're done with marking.
1176     // This is the end of the marking cycle, we're expected all
1177     // threads to have SATB queues with active set to true.
1178     satb_mq_set.set_active_all_threads(false, /* new active value */
1179                                        true /* expected_active */);
1180 
1181     {
1182       GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm);
1183       flush_all_task_caches();
1184     }
1185 
1186     // Install newly created mark bitmap as "prev".
1187     swap_mark_bitmaps();
1188     {
1189       GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);
1190 
1191       uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) /
1192                                        G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread;
1193       uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity);
1194 
1195       G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
1196       log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
1197       _g1h->workers()->run_task(&cl, num_workers);
1198 
1199       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1200                                       _g1h->num_regions(), cl.total_selected_for_rebuild());
1201     }
1202     {
1203       GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm);
1204       reclaim_empty_regions();
1205     }
1206 
1207     // Clean out dead classes
1208     if (ClassUnloadingWithConcurrentMark) {
1209       GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
1210       ClassLoaderDataGraph::purge();
1211     }
1212 
1213     _g1h->resize_heap_if_necessary();
1214 
1215     compute_new_sizes();
1216 
1217     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1218 
1219     assert(!restart_for_overflow(), "sanity");
1220     // Completely reset the marking state since marking completed
1221     reset_at_marking_complete();
1222   } else {
1223     // We overflowed.  Restart concurrent marking.
1224     _restart_for_overflow = true;
1225 
1226     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1227 
1228     // Clear the marking state because we will be restarting
1229     // marking due to overflowing the global mark stack.
1230     reset_marking_for_restart();
1231   }
1232 
1233   {
1234     GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm);
1235     report_object_count(mark_finished);
1236   }
1237 
1238   // Statistics
1239   double now = os::elapsedTime();
1240   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1241   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1242   _remark_times.add((now - start) * 1000.0);
1243 
1244   policy->record_concurrent_mark_remark_end();
1245 }
1246 
1247 class G1ReclaimEmptyRegionsTask : public AbstractGangTask {
1248   // Per-region work during the Cleanup pause.
1249   class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure {
1250     G1CollectedHeap* _g1h;
1251     size_t _freed_bytes;
1252     FreeRegionList* _local_cleanup_list;
1253     uint _old_regions_removed;
1254     uint _humongous_regions_removed;
1255 
1256   public:
1257     G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h,
1258                                  FreeRegionList* local_cleanup_list) :
1259       _g1h(g1h),
1260       _freed_bytes(0),
1261       _local_cleanup_list(local_cleanup_list),
1262       _old_regions_removed(0),
1263       _humongous_regions_removed(0) { }
1264 
1265     size_t freed_bytes() { return _freed_bytes; }
1266     const uint old_regions_removed() { return _old_regions_removed; }
1267     const uint humongous_regions_removed() { return _humongous_regions_removed; }
1268 
1269     bool do_heap_region(HeapRegion *hr) {
1270       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1271         _freed_bytes += hr->used();
1272         hr->set_containing_set(NULL);
1273         if (hr->is_humongous()) {
1274           _humongous_regions_removed++;
1275           _g1h->free_humongous_region(hr, _local_cleanup_list);
1276         } else {
1277           _old_regions_removed++;
1278           _g1h->free_region(hr, _local_cleanup_list);
1279         }
1280         hr->clear_cardtable();
1281         _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1282         log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1283       }
1284 
1285       return false;
1286     }
1287   };
1288 
1289   G1CollectedHeap* _g1h;
1290   FreeRegionList* _cleanup_list;
1291   HeapRegionClaimer _hrclaimer;
1292 
1293 public:
1294   G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1295     AbstractGangTask("G1 Cleanup"),
1296     _g1h(g1h),
1297     _cleanup_list(cleanup_list),
1298     _hrclaimer(n_workers) {
1299   }
1300 
1301   void work(uint worker_id) {
1302     FreeRegionList local_cleanup_list("Local Cleanup List");
1303     G1ReclaimEmptyRegionsClosure cl(_g1h, &local_cleanup_list);
1304     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1305     assert(cl.is_complete(), "Shouldn't have aborted!");
1306 
1307     // Now update the old/humongous region sets
1308     _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
1309     {
1310       MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1311       _g1h->decrement_summary_bytes(cl.freed_bytes());
1312 
1313       _cleanup_list->add_ordered(&local_cleanup_list);
1314       assert(local_cleanup_list.is_empty(), "post-condition");
1315     }
1316   }
1317 };
1318 
1319 void G1ConcurrentMark::reclaim_empty_regions() {
1320   WorkGang* workers = _g1h->workers();
1321   FreeRegionList empty_regions_list("Empty Regions After Mark List");
1322 
1323   G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers());
1324   workers->run_task(&cl);
1325 
1326   if (!empty_regions_list.is_empty()) {
1327     log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1328     // Now print the empty regions list.
1329     G1HRPrinter* hrp = _g1h->hr_printer();
1330     if (hrp->is_active()) {
1331       FreeRegionListIterator iter(&empty_regions_list);
1332       while (iter.more_available()) {
1333         HeapRegion* hr = iter.get_next();
1334         hrp->cleanup(hr);
1335       }
1336     }
1337     // And actually make them available.
1338     _g1h->prepend_to_freelist(&empty_regions_list);
1339   }
1340 }
1341 
1342 void G1ConcurrentMark::compute_new_sizes() {
1343   MetaspaceGC::compute_new_size();
1344 
1345   // Cleanup will have freed any regions completely full of garbage.
1346   // Update the soft reference policy with the new heap occupancy.
1347   Universe::update_heap_info_at_gc();
1348 
1349   // We reclaimed old regions so we should calculate the sizes to make
1350   // sure we update the old gen/space data.
1351   _g1h->g1mm()->update_sizes();
1352 }
1353 
1354 void G1ConcurrentMark::cleanup() {
1355   assert_at_safepoint_on_vm_thread();
1356 
1357   // If a full collection has happened, we shouldn't do this.
1358   if (has_aborted()) {
1359     return;
1360   }
1361 
1362   G1Policy* policy = _g1h->policy();
1363   policy->record_concurrent_mark_cleanup_start();
1364 
1365   double start = os::elapsedTime();
1366 
1367   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1368 
1369   {
1370     GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1371     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1372     _g1h->heap_region_iterate(&cl);
1373   }
1374 
1375   if (log_is_enabled(Trace, gc, liveness)) {
1376     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1377     _g1h->heap_region_iterate(&cl);
1378   }
1379 
1380   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1381 
1382   // We need to make this be a "collection" so any collection pause that
1383   // races with it goes around and waits for Cleanup to finish.
1384   _g1h->increment_total_collections();
1385 
1386   // Local statistics
1387   double recent_cleanup_time = (os::elapsedTime() - start);
1388   _total_cleanup_time += recent_cleanup_time;
1389   _cleanup_times.add(recent_cleanup_time);
1390 
1391   {
1392     GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1393     policy->record_concurrent_mark_cleanup_end();
1394   }
1395 }
1396 
1397 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1398 // Uses the G1CMTask associated with a worker thread (for serial reference
1399 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1400 // trace referent objects.
1401 //
1402 // Using the G1CMTask and embedded local queues avoids having the worker
1403 // threads operating on the global mark stack. This reduces the risk
1404 // of overflowing the stack - which we would rather avoid at this late
1405 // state. Also using the tasks' local queues removes the potential
1406 // of the workers interfering with each other that could occur if
1407 // operating on the global stack.
1408 
1409 class G1CMKeepAliveAndDrainClosure : public OopClosure {
1410   G1ConcurrentMark* _cm;
1411   G1CMTask*         _task;
1412   uint              _ref_counter_limit;
1413   uint              _ref_counter;
1414   bool              _is_serial;
1415 public:
1416   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1417     _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval),
1418     _ref_counter(_ref_counter_limit), _is_serial(is_serial) {
1419     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1420   }
1421 
1422   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1423   virtual void do_oop(      oop* p) { do_oop_work(p); }
1424 
1425   template <class T> void do_oop_work(T* p) {
1426     if (_cm->has_overflown()) {
1427       return;
1428     }
1429     if (!_task->deal_with_reference(p)) {
1430       // We did not add anything to the mark bitmap (or mark stack), so there is
1431       // no point trying to drain it.
1432       return;
1433     }
1434     _ref_counter--;
1435 
1436     if (_ref_counter == 0) {
1437       // We have dealt with _ref_counter_limit references, pushing them
1438       // and objects reachable from them on to the local stack (and
1439       // possibly the global stack). Call G1CMTask::do_marking_step() to
1440       // process these entries.
1441       //
1442       // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1443       // there's nothing more to do (i.e. we're done with the entries that
1444       // were pushed as a result of the G1CMTask::deal_with_reference() calls
1445       // above) or we overflow.
1446       //
1447       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1448       // flag while there may still be some work to do. (See the comment at
1449       // the beginning of G1CMTask::do_marking_step() for those conditions -
1450       // one of which is reaching the specified time target.) It is only
1451       // when G1CMTask::do_marking_step() returns without setting the
1452       // has_aborted() flag that the marking step has completed.
1453       do {
1454         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1455         _task->do_marking_step(mark_step_duration_ms,
1456                                false      /* do_termination */,
1457                                _is_serial);
1458       } while (_task->has_aborted() && !_cm->has_overflown());
1459       _ref_counter = _ref_counter_limit;
1460     }
1461   }
1462 };
1463 
1464 // 'Drain' oop closure used by both serial and parallel reference processing.
1465 // Uses the G1CMTask associated with a given worker thread (for serial
1466 // reference processing the G1CMtask for worker 0 is used). Calls the
1467 // do_marking_step routine, with an unbelievably large timeout value,
1468 // to drain the marking data structures of the remaining entries
1469 // added by the 'keep alive' oop closure above.
1470 
1471 class G1CMDrainMarkingStackClosure : public VoidClosure {
1472   G1ConcurrentMark* _cm;
1473   G1CMTask*         _task;
1474   bool              _is_serial;
1475  public:
1476   G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1477     _cm(cm), _task(task), _is_serial(is_serial) {
1478     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1479   }
1480 
1481   void do_void() {
1482     do {
1483       // We call G1CMTask::do_marking_step() to completely drain the local
1484       // and global marking stacks of entries pushed by the 'keep alive'
1485       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1486       //
1487       // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1488       // if there's nothing more to do (i.e. we've completely drained the
1489       // entries that were pushed as a a result of applying the 'keep alive'
1490       // closure to the entries on the discovered ref lists) or we overflow
1491       // the global marking stack.
1492       //
1493       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1494       // flag while there may still be some work to do. (See the comment at
1495       // the beginning of G1CMTask::do_marking_step() for those conditions -
1496       // one of which is reaching the specified time target.) It is only
1497       // when G1CMTask::do_marking_step() returns without setting the
1498       // has_aborted() flag that the marking step has completed.
1499 
1500       _task->do_marking_step(1000000000.0 /* something very large */,
1501                              true         /* do_termination */,
1502                              _is_serial);
1503     } while (_task->has_aborted() && !_cm->has_overflown());
1504   }
1505 };
1506 
1507 // Implementation of AbstractRefProcTaskExecutor for parallel
1508 // reference processing at the end of G1 concurrent marking
1509 
1510 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1511 private:
1512   G1CollectedHeap*  _g1h;
1513   G1ConcurrentMark* _cm;
1514   WorkGang*         _workers;
1515   uint              _active_workers;
1516 
1517 public:
1518   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1519                           G1ConcurrentMark* cm,
1520                           WorkGang* workers,
1521                           uint n_workers) :
1522     _g1h(g1h), _cm(cm),
1523     _workers(workers), _active_workers(n_workers) { }
1524 
1525   virtual void execute(ProcessTask& task, uint ergo_workers);
1526 };
1527 
1528 class G1CMRefProcTaskProxy : public AbstractGangTask {
1529   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1530   ProcessTask&      _proc_task;
1531   G1CollectedHeap*  _g1h;
1532   G1ConcurrentMark* _cm;
1533 
1534 public:
1535   G1CMRefProcTaskProxy(ProcessTask& proc_task,
1536                        G1CollectedHeap* g1h,
1537                        G1ConcurrentMark* cm) :
1538     AbstractGangTask("Process reference objects in parallel"),
1539     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1540     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1541     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1542   }
1543 
1544   virtual void work(uint worker_id) {
1545     ResourceMark rm;
1546     HandleMark hm;
1547     G1CMTask* task = _cm->task(worker_id);
1548     G1CMIsAliveClosure g1_is_alive(_g1h);
1549     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1550     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1551 
1552     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1553   }
1554 };
1555 
1556 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) {
1557   assert(_workers != NULL, "Need parallel worker threads.");
1558   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1559   assert(_workers->active_workers() >= ergo_workers,
1560          "Ergonomically chosen workers(%u) should be less than or equal to active workers(%u)",
1561          ergo_workers, _workers->active_workers());
1562 
1563   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1564 
1565   // We need to reset the concurrency level before each
1566   // proxy task execution, so that the termination protocol
1567   // and overflow handling in G1CMTask::do_marking_step() knows
1568   // how many workers to wait for.
1569   _cm->set_concurrency(ergo_workers);
1570   _workers->run_task(&proc_task_proxy, ergo_workers);
1571 }
1572 
1573 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
1574   ResourceMark rm;
1575   HandleMark   hm;
1576 
1577   // Is alive closure.
1578   G1CMIsAliveClosure g1_is_alive(_g1h);
1579 
1580   // Inner scope to exclude the cleaning of the string table
1581   // from the displayed time.
1582   {
1583     GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm);
1584 
1585     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1586 
1587     // See the comment in G1CollectedHeap::ref_processing_init()
1588     // about how reference processing currently works in G1.
1589 
1590     // Set the soft reference policy
1591     rp->setup_policy(clear_all_soft_refs);
1592     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1593 
1594     // Instances of the 'Keep Alive' and 'Complete GC' closures used
1595     // in serial reference processing. Note these closures are also
1596     // used for serially processing (by the the current thread) the
1597     // JNI references during parallel reference processing.
1598     //
1599     // These closures do not need to synchronize with the worker
1600     // threads involved in parallel reference processing as these
1601     // instances are executed serially by the current thread (e.g.
1602     // reference processing is not multi-threaded and is thus
1603     // performed by the current thread instead of a gang worker).
1604     //
1605     // The gang tasks involved in parallel reference processing create
1606     // their own instances of these closures, which do their own
1607     // synchronization among themselves.
1608     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1609     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1610 
1611     // We need at least one active thread. If reference processing
1612     // is not multi-threaded we use the current (VMThread) thread,
1613     // otherwise we use the work gang from the G1CollectedHeap and
1614     // we utilize all the worker threads we can.
1615     bool processing_is_mt = rp->processing_is_mt();
1616     uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
1617     active_workers = clamp(active_workers, 1u, _max_num_tasks);
1618 
1619     // Parallel processing task executor.
1620     G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
1621                                               _g1h->workers(), active_workers);
1622     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1623 
1624     // Set the concurrency level. The phase was already set prior to
1625     // executing the remark task.
1626     set_concurrency(active_workers);
1627 
1628     // Set the degree of MT processing here.  If the discovery was done MT,
1629     // the number of threads involved during discovery could differ from
1630     // the number of active workers.  This is OK as long as the discovered
1631     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1632     rp->set_active_mt_degree(active_workers);
1633 
1634     ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
1635 
1636     // Process the weak references.
1637     const ReferenceProcessorStats& stats =
1638         rp->process_discovered_references(&g1_is_alive,
1639                                           &g1_keep_alive,
1640                                           &g1_drain_mark_stack,
1641                                           executor,
1642                                           &pt);
1643     _gc_tracer_cm->report_gc_reference_stats(stats);
1644     pt.print_all_references();
1645 
1646     // The do_oop work routines of the keep_alive and drain_marking_stack
1647     // oop closures will set the has_overflown flag if we overflow the
1648     // global marking stack.
1649 
1650     assert(has_overflown() || _global_mark_stack.is_empty(),
1651            "Mark stack should be empty (unless it has overflown)");
1652 
1653     assert(rp->num_queues() == active_workers, "why not");
1654 
1655     rp->verify_no_references_recorded();
1656     assert(!rp->discovery_enabled(), "Post condition");
1657   }
1658 
1659   if (has_overflown()) {
1660     // We can not trust g1_is_alive and the contents of the heap if the marking stack
1661     // overflowed while processing references. Exit the VM.
1662     fatal("Overflow during reference processing, can not continue. Please "
1663           "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and "
1664           "restart.", MarkStackSizeMax);
1665     return;
1666   }
1667 
1668   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1669 
1670   {
1671     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1672     WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1);
1673   }
1674 
1675   // Unload Klasses, String, Code Cache, etc.
1676   if (ClassUnloadingWithConcurrentMark) {
1677     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1678     bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm);
1679     _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1680   } else if (StringDedup::is_enabled()) {
1681     GCTraceTime(Debug, gc, phases) debug("String Deduplication", _gc_timer_cm);
1682     _g1h->string_dedup_cleaning(&g1_is_alive, NULL);
1683   }
1684 }
1685 
1686 class G1PrecleanYieldClosure : public YieldClosure {
1687   G1ConcurrentMark* _cm;
1688 
1689 public:
1690   G1PrecleanYieldClosure(G1ConcurrentMark* cm) : _cm(cm) { }
1691 
1692   virtual bool should_return() {
1693     return _cm->has_aborted();
1694   }
1695 
1696   virtual bool should_return_fine_grain() {
1697     _cm->do_yield_check();
1698     return _cm->has_aborted();
1699   }
1700 };
1701 
1702 void G1ConcurrentMark::preclean() {
1703   assert(G1UseReferencePrecleaning, "Precleaning must be enabled.");
1704 
1705   SuspendibleThreadSetJoiner joiner;
1706 
1707   G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */);
1708   G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */);
1709 
1710   set_concurrency_and_phase(1, true);
1711 
1712   G1PrecleanYieldClosure yield_cl(this);
1713 
1714   ReferenceProcessor* rp = _g1h->ref_processor_cm();
1715   // Precleaning is single threaded. Temporarily disable MT discovery.
1716   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
1717   rp->preclean_discovered_references(rp->is_alive_non_header(),
1718                                      &keep_alive,
1719                                      &drain_mark_stack,
1720                                      &yield_cl,
1721                                      _gc_timer_cm);
1722 }
1723 
1724 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
1725 // the prev bitmap determining liveness.
1726 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
1727   G1CollectedHeap* _g1h;
1728 public:
1729   G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
1730 
1731   bool do_object_b(oop obj) {
1732     return obj != NULL &&
1733            (!_g1h->is_in_g1_reserved(obj) || !_g1h->is_obj_dead(obj));
1734   }
1735 };
1736 
1737 void G1ConcurrentMark::report_object_count(bool mark_completed) {
1738   // Depending on the completion of the marking liveness needs to be determined
1739   // using either the next or prev bitmap.
1740   if (mark_completed) {
1741     G1ObjectCountIsAliveClosure is_alive(_g1h);
1742     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1743   } else {
1744     G1CMIsAliveClosure is_alive(_g1h);
1745     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1746   }
1747 }
1748 
1749 
1750 void G1ConcurrentMark::swap_mark_bitmaps() {
1751   G1CMBitMap* temp = _prev_mark_bitmap;
1752   _prev_mark_bitmap = _next_mark_bitmap;
1753   _next_mark_bitmap = temp;
1754   _g1h->collector_state()->set_clearing_next_bitmap(true);
1755 }
1756 
1757 // Closure for marking entries in SATB buffers.
1758 class G1CMSATBBufferClosure : public SATBBufferClosure {
1759 private:
1760   G1CMTask* _task;
1761   G1CollectedHeap* _g1h;
1762 
1763   // This is very similar to G1CMTask::deal_with_reference, but with
1764   // more relaxed requirements for the argument, so this must be more
1765   // circumspect about treating the argument as an object.
1766   void do_entry(void* entry) const {
1767     _task->increment_refs_reached();
1768     oop const obj = static_cast<oop>(entry);
1769     _task->make_reference_grey(obj);
1770   }
1771 
1772 public:
1773   G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1774     : _task(task), _g1h(g1h) { }
1775 
1776   virtual void do_buffer(void** buffer, size_t size) {
1777     for (size_t i = 0; i < size; ++i) {
1778       do_entry(buffer[i]);
1779     }
1780   }
1781 };
1782 
1783 class G1RemarkThreadsClosure : public ThreadClosure {
1784   G1CMSATBBufferClosure _cm_satb_cl;
1785   G1CMOopClosure _cm_cl;
1786   MarkingCodeBlobClosure _code_cl;
1787   uintx _claim_token;
1788 
1789  public:
1790   G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1791     _cm_satb_cl(task, g1h),
1792     _cm_cl(g1h, task),
1793     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1794     _claim_token(Threads::thread_claim_token()) {}
1795 
1796   void do_thread(Thread* thread) {
1797     if (thread->claim_threads_do(true, _claim_token)) {
1798       SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(thread);
1799       queue.apply_closure_and_empty(&_cm_satb_cl);
1800       if (thread->is_Java_thread()) {
1801         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1802         // however the liveness of oops reachable from nmethods have very complex lifecycles:
1803         // * Alive if on the stack of an executing method
1804         // * Weakly reachable otherwise
1805         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1806         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1807         JavaThread* jt = (JavaThread*)thread;
1808         jt->nmethods_do(&_code_cl);
1809       }
1810     }
1811   }
1812 };
1813 
1814 class G1CMRemarkTask : public AbstractGangTask {
1815   G1ConcurrentMark* _cm;
1816 public:
1817   void work(uint worker_id) {
1818     G1CMTask* task = _cm->task(worker_id);
1819     task->record_start_time();
1820     {
1821       ResourceMark rm;
1822       HandleMark hm;
1823 
1824       G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1825       Threads::threads_do(&threads_f);
1826     }
1827 
1828     do {
1829       task->do_marking_step(1000000000.0 /* something very large */,
1830                             true         /* do_termination       */,
1831                             false        /* is_serial            */);
1832     } while (task->has_aborted() && !_cm->has_overflown());
1833     // If we overflow, then we do not want to restart. We instead
1834     // want to abort remark and do concurrent marking again.
1835     task->record_end_time();
1836   }
1837 
1838   G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1839     AbstractGangTask("Par Remark"), _cm(cm) {
1840     _cm->terminator()->reset_for_reuse(active_workers);
1841   }
1842 };
1843 
1844 void G1ConcurrentMark::finalize_marking() {
1845   ResourceMark rm;
1846   HandleMark   hm;
1847 
1848   _g1h->ensure_parsability(false);
1849 
1850   // this is remark, so we'll use up all active threads
1851   uint active_workers = _g1h->workers()->active_workers();
1852   set_concurrency_and_phase(active_workers, false /* concurrent */);
1853   // Leave _parallel_marking_threads at it's
1854   // value originally calculated in the G1ConcurrentMark
1855   // constructor and pass values of the active workers
1856   // through the gang in the task.
1857 
1858   {
1859     StrongRootsScope srs(active_workers);
1860 
1861     G1CMRemarkTask remarkTask(this, active_workers);
1862     // We will start all available threads, even if we decide that the
1863     // active_workers will be fewer. The extra ones will just bail out
1864     // immediately.
1865     _g1h->workers()->run_task(&remarkTask);
1866   }
1867 
1868   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1869   guarantee(has_overflown() ||
1870             satb_mq_set.completed_buffers_num() == 0,
1871             "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1872             BOOL_TO_STR(has_overflown()),
1873             satb_mq_set.completed_buffers_num());
1874 
1875   print_stats();
1876 }
1877 
1878 void G1ConcurrentMark::flush_all_task_caches() {
1879   size_t hits = 0;
1880   size_t misses = 0;
1881   for (uint i = 0; i < _max_num_tasks; i++) {
1882     Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
1883     hits += stats.first;
1884     misses += stats.second;
1885   }
1886   size_t sum = hits + misses;
1887   log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
1888                        hits, misses, percent_of(hits, sum));
1889 }
1890 
1891 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
1892   _prev_mark_bitmap->clear_range(mr);
1893 }
1894 
1895 HeapRegion*
1896 G1ConcurrentMark::claim_region(uint worker_id) {
1897   // "checkpoint" the finger
1898   HeapWord* finger = _finger;
1899 
1900   while (finger < _heap.end()) {
1901     assert(_g1h->is_in_g1_reserved(finger), "invariant");
1902 
1903     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1904     // Make sure that the reads below do not float before loading curr_region.
1905     OrderAccess::loadload();
1906     // Above heap_region_containing may return NULL as we always scan claim
1907     // until the end of the heap. In this case, just jump to the next region.
1908     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1909 
1910     // Is the gap between reading the finger and doing the CAS too long?
1911     HeapWord* res = Atomic::cmpxchg(&_finger, finger, end);
1912     if (res == finger && curr_region != NULL) {
1913       // we succeeded
1914       HeapWord*   bottom        = curr_region->bottom();
1915       HeapWord*   limit         = curr_region->next_top_at_mark_start();
1916 
1917       // notice that _finger == end cannot be guaranteed here since,
1918       // someone else might have moved the finger even further
1919       assert(_finger >= end, "the finger should have moved forward");
1920 
1921       if (limit > bottom) {
1922         return curr_region;
1923       } else {
1924         assert(limit == bottom,
1925                "the region limit should be at bottom");
1926         // we return NULL and the caller should try calling
1927         // claim_region() again.
1928         return NULL;
1929       }
1930     } else {
1931       assert(_finger > finger, "the finger should have moved forward");
1932       // read it again
1933       finger = _finger;
1934     }
1935   }
1936 
1937   return NULL;
1938 }
1939 
1940 #ifndef PRODUCT
1941 class VerifyNoCSetOops {
1942   G1CollectedHeap* _g1h;
1943   const char* _phase;
1944   int _info;
1945 
1946 public:
1947   VerifyNoCSetOops(const char* phase, int info = -1) :
1948     _g1h(G1CollectedHeap::heap()),
1949     _phase(phase),
1950     _info(info)
1951   { }
1952 
1953   void operator()(G1TaskQueueEntry task_entry) const {
1954     if (task_entry.is_array_slice()) {
1955       guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1956       return;
1957     }
1958     guarantee(oopDesc::is_oop(task_entry.obj()),
1959               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1960               p2i(task_entry.obj()), _phase, _info);
1961     HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
1962     guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
1963               "obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
1964               p2i(task_entry.obj()), _phase, _info, r->hrm_index());
1965   }
1966 };
1967 
1968 void G1ConcurrentMark::verify_no_collection_set_oops() {
1969   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1970   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1971     return;
1972   }
1973 
1974   // Verify entries on the global mark stack
1975   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1976 
1977   // Verify entries on the task queues
1978   for (uint i = 0; i < _max_num_tasks; ++i) {
1979     G1CMTaskQueue* queue = _task_queues->queue(i);
1980     queue->iterate(VerifyNoCSetOops("Queue", i));
1981   }
1982 
1983   // Verify the global finger
1984   HeapWord* global_finger = finger();
1985   if (global_finger != NULL && global_finger < _heap.end()) {
1986     // Since we always iterate over all regions, we might get a NULL HeapRegion
1987     // here.
1988     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
1989     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
1990               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
1991               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
1992   }
1993 
1994   // Verify the task fingers
1995   assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1996   for (uint i = 0; i < _num_concurrent_workers; ++i) {
1997     G1CMTask* task = _tasks[i];
1998     HeapWord* task_finger = task->finger();
1999     if (task_finger != NULL && task_finger < _heap.end()) {
2000       // See above note on the global finger verification.
2001       HeapRegion* r = _g1h->heap_region_containing(task_finger);
2002       guarantee(r == NULL || task_finger == r->bottom() ||
2003                 !r->in_collection_set() || !r->has_index_in_opt_cset(),
2004                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2005                 p2i(task_finger), HR_FORMAT_PARAMS(r));
2006     }
2007   }
2008 }
2009 #endif // PRODUCT
2010 
2011 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
2012   _g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
2013 }
2014 
2015 void G1ConcurrentMark::print_stats() {
2016   if (!log_is_enabled(Debug, gc, stats)) {
2017     return;
2018   }
2019   log_debug(gc, stats)("---------------------------------------------------------------------");
2020   for (size_t i = 0; i < _num_active_tasks; ++i) {
2021     _tasks[i]->print_stats();
2022     log_debug(gc, stats)("---------------------------------------------------------------------");
2023   }
2024 }
2025 
2026 void G1ConcurrentMark::concurrent_cycle_abort() {
2027   if (!cm_thread()->during_cycle() || _has_aborted) {
2028     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2029     return;
2030   }
2031 
2032   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2033   // concurrent bitmap clearing.
2034   {
2035     GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
2036     clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
2037   }
2038   // Note we cannot clear the previous marking bitmap here
2039   // since VerifyDuringGC verifies the objects marked during
2040   // a full GC against the previous bitmap.
2041 
2042   // Empty mark stack
2043   reset_marking_for_restart();
2044   for (uint i = 0; i < _max_num_tasks; ++i) {
2045     _tasks[i]->clear_region_fields();
2046   }
2047   _first_overflow_barrier_sync.abort();
2048   _second_overflow_barrier_sync.abort();
2049   _has_aborted = true;
2050 
2051   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2052   satb_mq_set.abandon_partial_marking();
2053   // This can be called either during or outside marking, we'll read
2054   // the expected_active value from the SATB queue set.
2055   satb_mq_set.set_active_all_threads(
2056                                  false, /* new active value */
2057                                  satb_mq_set.is_active() /* expected_active */);
2058 }
2059 
2060 static void print_ms_time_info(const char* prefix, const char* name,
2061                                NumberSeq& ns) {
2062   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2063                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2064   if (ns.num() > 0) {
2065     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2066                            prefix, ns.sd(), ns.maximum());
2067   }
2068 }
2069 
2070 void G1ConcurrentMark::print_summary_info() {
2071   Log(gc, marking) log;
2072   if (!log.is_trace()) {
2073     return;
2074   }
2075 
2076   log.trace(" Concurrent marking:");
2077   print_ms_time_info("  ", "init marks", _init_times);
2078   print_ms_time_info("  ", "remarks", _remark_times);
2079   {
2080     print_ms_time_info("     ", "final marks", _remark_mark_times);
2081     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2082 
2083   }
2084   print_ms_time_info("  ", "cleanups", _cleanup_times);
2085   log.trace("    Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2086             _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2087   log.trace("  Total stop_world time = %8.2f s.",
2088             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2089   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
2090             cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
2091 }
2092 
2093 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2094   _concurrent_workers->print_worker_threads_on(st);
2095 }
2096 
2097 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2098   _concurrent_workers->threads_do(tc);
2099 }
2100 
2101 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2102   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2103                p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
2104   _prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
2105   _next_mark_bitmap->print_on_error(st, " Next Bits: ");
2106 }
2107 
2108 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2109   ReferenceProcessor* result = g1h->ref_processor_cm();
2110   assert(result != NULL, "CM reference processor should not be NULL");
2111   return result;
2112 }
2113 
2114 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2115                                G1CMTask* task)
2116   : MetadataVisitingOopIterateClosure(get_cm_oop_closure_ref_processor(g1h)),
2117     _g1h(g1h), _task(task)
2118 { }
2119 
2120 void G1CMTask::setup_for_region(HeapRegion* hr) {
2121   assert(hr != NULL,
2122         "claim_region() should have filtered out NULL regions");
2123   _curr_region  = hr;
2124   _finger       = hr->bottom();
2125   update_region_limit();
2126 }
2127 
2128 void G1CMTask::update_region_limit() {
2129   HeapRegion* hr            = _curr_region;
2130   HeapWord* bottom          = hr->bottom();
2131   HeapWord* limit           = hr->next_top_at_mark_start();
2132 
2133   if (limit == bottom) {
2134     // The region was collected underneath our feet.
2135     // We set the finger to bottom to ensure that the bitmap
2136     // iteration that will follow this will not do anything.
2137     // (this is not a condition that holds when we set the region up,
2138     // as the region is not supposed to be empty in the first place)
2139     _finger = bottom;
2140   } else if (limit >= _region_limit) {
2141     assert(limit >= _finger, "peace of mind");
2142   } else {
2143     assert(limit < _region_limit, "only way to get here");
2144     // This can happen under some pretty unusual circumstances.  An
2145     // evacuation pause empties the region underneath our feet (NTAMS
2146     // at bottom). We then do some allocation in the region (NTAMS
2147     // stays at bottom), followed by the region being used as a GC
2148     // alloc region (NTAMS will move to top() and the objects
2149     // originally below it will be grayed). All objects now marked in
2150     // the region are explicitly grayed, if below the global finger,
2151     // and we do not need in fact to scan anything else. So, we simply
2152     // set _finger to be limit to ensure that the bitmap iteration
2153     // doesn't do anything.
2154     _finger = limit;
2155   }
2156 
2157   _region_limit = limit;
2158 }
2159 
2160 void G1CMTask::giveup_current_region() {
2161   assert(_curr_region != NULL, "invariant");
2162   clear_region_fields();
2163 }
2164 
2165 void G1CMTask::clear_region_fields() {
2166   // Values for these three fields that indicate that we're not
2167   // holding on to a region.
2168   _curr_region   = NULL;
2169   _finger        = NULL;
2170   _region_limit  = NULL;
2171 }
2172 
2173 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2174   if (cm_oop_closure == NULL) {
2175     assert(_cm_oop_closure != NULL, "invariant");
2176   } else {
2177     assert(_cm_oop_closure == NULL, "invariant");
2178   }
2179   _cm_oop_closure = cm_oop_closure;
2180 }
2181 
2182 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
2183   guarantee(next_mark_bitmap != NULL, "invariant");
2184   _next_mark_bitmap              = next_mark_bitmap;
2185   clear_region_fields();
2186 
2187   _calls                         = 0;
2188   _elapsed_time_ms               = 0.0;
2189   _termination_time_ms           = 0.0;
2190   _termination_start_time_ms     = 0.0;
2191 
2192   _mark_stats_cache.reset();
2193 }
2194 
2195 bool G1CMTask::should_exit_termination() {
2196   if (!regular_clock_call()) {
2197     return true;
2198   }
2199 
2200   // This is called when we are in the termination protocol. We should
2201   // quit if, for some reason, this task wants to abort or the global
2202   // stack is not empty (this means that we can get work from it).
2203   return !_cm->mark_stack_empty() || has_aborted();
2204 }
2205 
2206 void G1CMTask::reached_limit() {
2207   assert(_words_scanned >= _words_scanned_limit ||
2208          _refs_reached >= _refs_reached_limit ,
2209          "shouldn't have been called otherwise");
2210   abort_marking_if_regular_check_fail();
2211 }
2212 
2213 bool G1CMTask::regular_clock_call() {
2214   if (has_aborted()) {
2215     return false;
2216   }
2217 
2218   // First, we need to recalculate the words scanned and refs reached
2219   // limits for the next clock call.
2220   recalculate_limits();
2221 
2222   // During the regular clock call we do the following
2223 
2224   // (1) If an overflow has been flagged, then we abort.
2225   if (_cm->has_overflown()) {
2226     return false;
2227   }
2228 
2229   // If we are not concurrent (i.e. we're doing remark) we don't need
2230   // to check anything else. The other steps are only needed during
2231   // the concurrent marking phase.
2232   if (!_cm->concurrent()) {
2233     return true;
2234   }
2235 
2236   // (2) If marking has been aborted for Full GC, then we also abort.
2237   if (_cm->has_aborted()) {
2238     return false;
2239   }
2240 
2241   double curr_time_ms = os::elapsedVTime() * 1000.0;
2242 
2243   // (4) We check whether we should yield. If we have to, then we abort.
2244   if (SuspendibleThreadSet::should_yield()) {
2245     // We should yield. To do this we abort the task. The caller is
2246     // responsible for yielding.
2247     return false;
2248   }
2249 
2250   // (5) We check whether we've reached our time quota. If we have,
2251   // then we abort.
2252   double elapsed_time_ms = curr_time_ms - _start_time_ms;
2253   if (elapsed_time_ms > _time_target_ms) {
2254     _has_timed_out = true;
2255     return false;
2256   }
2257 
2258   // (6) Finally, we check whether there are enough completed STAB
2259   // buffers available for processing. If there are, we abort.
2260   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2261   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2262     // we do need to process SATB buffers, we'll abort and restart
2263     // the marking task to do so
2264     return false;
2265   }
2266   return true;
2267 }
2268 
2269 void G1CMTask::recalculate_limits() {
2270   _real_words_scanned_limit = _words_scanned + words_scanned_period;
2271   _words_scanned_limit      = _real_words_scanned_limit;
2272 
2273   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
2274   _refs_reached_limit       = _real_refs_reached_limit;
2275 }
2276 
2277 void G1CMTask::decrease_limits() {
2278   // This is called when we believe that we're going to do an infrequent
2279   // operation which will increase the per byte scanned cost (i.e. move
2280   // entries to/from the global stack). It basically tries to decrease the
2281   // scanning limit so that the clock is called earlier.
2282 
2283   _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
2284   _refs_reached_limit  = _real_refs_reached_limit - 3 * refs_reached_period / 4;
2285 }
2286 
2287 void G1CMTask::move_entries_to_global_stack() {
2288   // Local array where we'll store the entries that will be popped
2289   // from the local queue.
2290   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2291 
2292   size_t n = 0;
2293   G1TaskQueueEntry task_entry;
2294   while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) {
2295     buffer[n] = task_entry;
2296     ++n;
2297   }
2298   if (n < G1CMMarkStack::EntriesPerChunk) {
2299     buffer[n] = G1TaskQueueEntry();
2300   }
2301 
2302   if (n > 0) {
2303     if (!_cm->mark_stack_push(buffer)) {
2304       set_has_aborted();
2305     }
2306   }
2307 
2308   // This operation was quite expensive, so decrease the limits.
2309   decrease_limits();
2310 }
2311 
2312 bool G1CMTask::get_entries_from_global_stack() {
2313   // Local array where we'll store the entries that will be popped
2314   // from the global stack.
2315   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2316 
2317   if (!_cm->mark_stack_pop(buffer)) {
2318     return false;
2319   }
2320 
2321   // We did actually pop at least one entry.
2322   for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) {
2323     G1TaskQueueEntry task_entry = buffer[i];
2324     if (task_entry.is_null()) {
2325       break;
2326     }
2327     assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
2328     bool success = _task_queue->push(task_entry);
2329     // We only call this when the local queue is empty or under a
2330     // given target limit. So, we do not expect this push to fail.
2331     assert(success, "invariant");
2332   }
2333 
2334   // This operation was quite expensive, so decrease the limits
2335   decrease_limits();
2336   return true;
2337 }
2338 
2339 void G1CMTask::drain_local_queue(bool partially) {
2340   if (has_aborted()) {
2341     return;
2342   }
2343 
2344   // Decide what the target size is, depending whether we're going to
2345   // drain it partially (so that other tasks can steal if they run out
2346   // of things to do) or totally (at the very end).
2347   size_t target_size;
2348   if (partially) {
2349     target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize);
2350   } else {
2351     target_size = 0;
2352   }
2353 
2354   if (_task_queue->size() > target_size) {
2355     G1TaskQueueEntry entry;
2356     bool ret = _task_queue->pop_local(entry);
2357     while (ret) {
2358       scan_task_entry(entry);
2359       if (_task_queue->size() <= target_size || has_aborted()) {
2360         ret = false;
2361       } else {
2362         ret = _task_queue->pop_local(entry);
2363       }
2364     }
2365   }
2366 }
2367 
2368 void G1CMTask::drain_global_stack(bool partially) {
2369   if (has_aborted()) {
2370     return;
2371   }
2372 
2373   // We have a policy to drain the local queue before we attempt to
2374   // drain the global stack.
2375   assert(partially || _task_queue->size() == 0, "invariant");
2376 
2377   // Decide what the target size is, depending whether we're going to
2378   // drain it partially (so that other tasks can steal if they run out
2379   // of things to do) or totally (at the very end).
2380   // Notice that when draining the global mark stack partially, due to the racyness
2381   // of the mark stack size update we might in fact drop below the target. But,
2382   // this is not a problem.
2383   // In case of total draining, we simply process until the global mark stack is
2384   // totally empty, disregarding the size counter.
2385   if (partially) {
2386     size_t const target_size = _cm->partial_mark_stack_size_target();
2387     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2388       if (get_entries_from_global_stack()) {
2389         drain_local_queue(partially);
2390       }
2391     }
2392   } else {
2393     while (!has_aborted() && get_entries_from_global_stack()) {
2394       drain_local_queue(partially);
2395     }
2396   }
2397 }
2398 
2399 // SATB Queue has several assumptions on whether to call the par or
2400 // non-par versions of the methods. this is why some of the code is
2401 // replicated. We should really get rid of the single-threaded version
2402 // of the code to simplify things.
2403 void G1CMTask::drain_satb_buffers() {
2404   if (has_aborted()) {
2405     return;
2406   }
2407 
2408   // We set this so that the regular clock knows that we're in the
2409   // middle of draining buffers and doesn't set the abort flag when it
2410   // notices that SATB buffers are available for draining. It'd be
2411   // very counter productive if it did that. :-)
2412   _draining_satb_buffers = true;
2413 
2414   G1CMSATBBufferClosure satb_cl(this, _g1h);
2415   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2416 
2417   // This keeps claiming and applying the closure to completed buffers
2418   // until we run out of buffers or we need to abort.
2419   while (!has_aborted() &&
2420          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2421     abort_marking_if_regular_check_fail();
2422   }
2423 
2424   // Can't assert qset is empty here, even if not aborted.  If concurrent,
2425   // some other thread might be adding to the queue.  If not concurrent,
2426   // some other thread might have won the race for the last buffer, but
2427   // has not yet decremented the count.
2428 
2429   _draining_satb_buffers = false;
2430 
2431   // again, this was a potentially expensive operation, decrease the
2432   // limits to get the regular clock call early
2433   decrease_limits();
2434 }
2435 
2436 void G1CMTask::clear_mark_stats_cache(uint region_idx) {
2437   _mark_stats_cache.reset(region_idx);
2438 }
2439 
2440 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
2441   return _mark_stats_cache.evict_all();
2442 }
2443 
2444 void G1CMTask::print_stats() {
2445   log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls);
2446   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2447                        _elapsed_time_ms, _termination_time_ms);
2448   log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms",
2449                        _step_times_ms.num(),
2450                        _step_times_ms.avg(),
2451                        _step_times_ms.sd(),
2452                        _step_times_ms.maximum(),
2453                        _step_times_ms.sum());
2454   size_t const hits = _mark_stats_cache.hits();
2455   size_t const misses = _mark_stats_cache.misses();
2456   log_debug(gc, stats)("  Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f",
2457                        hits, misses, percent_of(hits, hits + misses));
2458 }
2459 
2460 bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) {
2461   return _task_queues->steal(worker_id, task_entry);
2462 }
2463 
2464 /*****************************************************************************
2465 
2466     The do_marking_step(time_target_ms, ...) method is the building
2467     block of the parallel marking framework. It can be called in parallel
2468     with other invocations of do_marking_step() on different tasks
2469     (but only one per task, obviously) and concurrently with the
2470     mutator threads, or during remark, hence it eliminates the need
2471     for two versions of the code. When called during remark, it will
2472     pick up from where the task left off during the concurrent marking
2473     phase. Interestingly, tasks are also claimable during evacuation
2474     pauses too, since do_marking_step() ensures that it aborts before
2475     it needs to yield.
2476 
2477     The data structures that it uses to do marking work are the
2478     following:
2479 
2480       (1) Marking Bitmap. If there are gray objects that appear only
2481       on the bitmap (this happens either when dealing with an overflow
2482       or when the initial marking phase has simply marked the roots
2483       and didn't push them on the stack), then tasks claim heap
2484       regions whose bitmap they then scan to find gray objects. A
2485       global finger indicates where the end of the last claimed region
2486       is. A local finger indicates how far into the region a task has
2487       scanned. The two fingers are used to determine how to gray an
2488       object (i.e. whether simply marking it is OK, as it will be
2489       visited by a task in the future, or whether it needs to be also
2490       pushed on a stack).
2491 
2492       (2) Local Queue. The local queue of the task which is accessed
2493       reasonably efficiently by the task. Other tasks can steal from
2494       it when they run out of work. Throughout the marking phase, a
2495       task attempts to keep its local queue short but not totally
2496       empty, so that entries are available for stealing by other
2497       tasks. Only when there is no more work, a task will totally
2498       drain its local queue.
2499 
2500       (3) Global Mark Stack. This handles local queue overflow. During
2501       marking only sets of entries are moved between it and the local
2502       queues, as access to it requires a mutex and more fine-grain
2503       interaction with it which might cause contention. If it
2504       overflows, then the marking phase should restart and iterate
2505       over the bitmap to identify gray objects. Throughout the marking
2506       phase, tasks attempt to keep the global mark stack at a small
2507       length but not totally empty, so that entries are available for
2508       popping by other tasks. Only when there is no more work, tasks
2509       will totally drain the global mark stack.
2510 
2511       (4) SATB Buffer Queue. This is where completed SATB buffers are
2512       made available. Buffers are regularly removed from this queue
2513       and scanned for roots, so that the queue doesn't get too
2514       long. During remark, all completed buffers are processed, as
2515       well as the filled in parts of any uncompleted buffers.
2516 
2517     The do_marking_step() method tries to abort when the time target
2518     has been reached. There are a few other cases when the
2519     do_marking_step() method also aborts:
2520 
2521       (1) When the marking phase has been aborted (after a Full GC).
2522 
2523       (2) When a global overflow (on the global stack) has been
2524       triggered. Before the task aborts, it will actually sync up with
2525       the other tasks to ensure that all the marking data structures
2526       (local queues, stacks, fingers etc.)  are re-initialized so that
2527       when do_marking_step() completes, the marking phase can
2528       immediately restart.
2529 
2530       (3) When enough completed SATB buffers are available. The
2531       do_marking_step() method only tries to drain SATB buffers right
2532       at the beginning. So, if enough buffers are available, the
2533       marking step aborts and the SATB buffers are processed at
2534       the beginning of the next invocation.
2535 
2536       (4) To yield. when we have to yield then we abort and yield
2537       right at the end of do_marking_step(). This saves us from a lot
2538       of hassle as, by yielding we might allow a Full GC. If this
2539       happens then objects will be compacted underneath our feet, the
2540       heap might shrink, etc. We save checking for this by just
2541       aborting and doing the yield right at the end.
2542 
2543     From the above it follows that the do_marking_step() method should
2544     be called in a loop (or, otherwise, regularly) until it completes.
2545 
2546     If a marking step completes without its has_aborted() flag being
2547     true, it means it has completed the current marking phase (and
2548     also all other marking tasks have done so and have all synced up).
2549 
2550     A method called regular_clock_call() is invoked "regularly" (in
2551     sub ms intervals) throughout marking. It is this clock method that
2552     checks all the abort conditions which were mentioned above and
2553     decides when the task should abort. A work-based scheme is used to
2554     trigger this clock method: when the number of object words the
2555     marking phase has scanned or the number of references the marking
2556     phase has visited reach a given limit. Additional invocations to
2557     the method clock have been planted in a few other strategic places
2558     too. The initial reason for the clock method was to avoid calling
2559     vtime too regularly, as it is quite expensive. So, once it was in
2560     place, it was natural to piggy-back all the other conditions on it
2561     too and not constantly check them throughout the code.
2562 
2563     If do_termination is true then do_marking_step will enter its
2564     termination protocol.
2565 
2566     The value of is_serial must be true when do_marking_step is being
2567     called serially (i.e. by the VMThread) and do_marking_step should
2568     skip any synchronization in the termination and overflow code.
2569     Examples include the serial remark code and the serial reference
2570     processing closures.
2571 
2572     The value of is_serial must be false when do_marking_step is
2573     being called by any of the worker threads in a work gang.
2574     Examples include the concurrent marking code (CMMarkingTask),
2575     the MT remark code, and the MT reference processing closures.
2576 
2577  *****************************************************************************/
2578 
2579 void G1CMTask::do_marking_step(double time_target_ms,
2580                                bool do_termination,
2581                                bool is_serial) {
2582   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2583 
2584   _start_time_ms = os::elapsedVTime() * 1000.0;
2585 
2586   // If do_stealing is true then do_marking_step will attempt to
2587   // steal work from the other G1CMTasks. It only makes sense to
2588   // enable stealing when the termination protocol is enabled
2589   // and do_marking_step() is not being called serially.
2590   bool do_stealing = do_termination && !is_serial;
2591 
2592   G1Predictions const& predictor = _g1h->policy()->predictor();
2593   double diff_prediction_ms = predictor.predict_zero_bounded(&_marking_step_diff_ms);
2594   _time_target_ms = time_target_ms - diff_prediction_ms;
2595 
2596   // set up the variables that are used in the work-based scheme to
2597   // call the regular clock method
2598   _words_scanned = 0;
2599   _refs_reached  = 0;
2600   recalculate_limits();
2601 
2602   // clear all flags
2603   clear_has_aborted();
2604   _has_timed_out = false;
2605   _draining_satb_buffers = false;
2606 
2607   ++_calls;
2608 
2609   // Set up the bitmap and oop closures. Anything that uses them is
2610   // eventually called from this method, so it is OK to allocate these
2611   // statically.
2612   G1CMBitMapClosure bitmap_closure(this, _cm);
2613   G1CMOopClosure cm_oop_closure(_g1h, this);
2614   set_cm_oop_closure(&cm_oop_closure);
2615 
2616   if (_cm->has_overflown()) {
2617     // This can happen if the mark stack overflows during a GC pause
2618     // and this task, after a yield point, restarts. We have to abort
2619     // as we need to get into the overflow protocol which happens
2620     // right at the end of this task.
2621     set_has_aborted();
2622   }
2623 
2624   // First drain any available SATB buffers. After this, we will not
2625   // look at SATB buffers before the next invocation of this method.
2626   // If enough completed SATB buffers are queued up, the regular clock
2627   // will abort this task so that it restarts.
2628   drain_satb_buffers();
2629   // ...then partially drain the local queue and the global stack
2630   drain_local_queue(true);
2631   drain_global_stack(true);
2632 
2633   do {
2634     if (!has_aborted() && _curr_region != NULL) {
2635       // This means that we're already holding on to a region.
2636       assert(_finger != NULL, "if region is not NULL, then the finger "
2637              "should not be NULL either");
2638 
2639       // We might have restarted this task after an evacuation pause
2640       // which might have evacuated the region we're holding on to
2641       // underneath our feet. Let's read its limit again to make sure
2642       // that we do not iterate over a region of the heap that
2643       // contains garbage (update_region_limit() will also move
2644       // _finger to the start of the region if it is found empty).
2645       update_region_limit();
2646       // We will start from _finger not from the start of the region,
2647       // as we might be restarting this task after aborting half-way
2648       // through scanning this region. In this case, _finger points to
2649       // the address where we last found a marked object. If this is a
2650       // fresh region, _finger points to start().
2651       MemRegion mr = MemRegion(_finger, _region_limit);
2652 
2653       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2654              "humongous regions should go around loop once only");
2655 
2656       // Some special cases:
2657       // If the memory region is empty, we can just give up the region.
2658       // If the current region is humongous then we only need to check
2659       // the bitmap for the bit associated with the start of the object,
2660       // scan the object if it's live, and give up the region.
2661       // Otherwise, let's iterate over the bitmap of the part of the region
2662       // that is left.
2663       // If the iteration is successful, give up the region.
2664       if (mr.is_empty()) {
2665         giveup_current_region();
2666         abort_marking_if_regular_check_fail();
2667       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2668         if (_next_mark_bitmap->is_marked(mr.start())) {
2669           // The object is marked - apply the closure
2670           bitmap_closure.do_addr(mr.start());
2671         }
2672         // Even if this task aborted while scanning the humongous object
2673         // we can (and should) give up the current region.
2674         giveup_current_region();
2675         abort_marking_if_regular_check_fail();
2676       } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
2677         giveup_current_region();
2678         abort_marking_if_regular_check_fail();
2679       } else {
2680         assert(has_aborted(), "currently the only way to do so");
2681         // The only way to abort the bitmap iteration is to return
2682         // false from the do_bit() method. However, inside the
2683         // do_bit() method we move the _finger to point to the
2684         // object currently being looked at. So, if we bail out, we
2685         // have definitely set _finger to something non-null.
2686         assert(_finger != NULL, "invariant");
2687 
2688         // Region iteration was actually aborted. So now _finger
2689         // points to the address of the object we last scanned. If we
2690         // leave it there, when we restart this task, we will rescan
2691         // the object. It is easy to avoid this. We move the finger by
2692         // enough to point to the next possible object header.
2693         assert(_finger < _region_limit, "invariant");
2694         HeapWord* const new_finger = _finger + ((oop)_finger)->size();
2695         // Check if bitmap iteration was aborted while scanning the last object
2696         if (new_finger >= _region_limit) {
2697           giveup_current_region();
2698         } else {
2699           move_finger_to(new_finger);
2700         }
2701       }
2702     }
2703     // At this point we have either completed iterating over the
2704     // region we were holding on to, or we have aborted.
2705 
2706     // We then partially drain the local queue and the global stack.
2707     // (Do we really need this?)
2708     drain_local_queue(true);
2709     drain_global_stack(true);
2710 
2711     // Read the note on the claim_region() method on why it might
2712     // return NULL with potentially more regions available for
2713     // claiming and why we have to check out_of_regions() to determine
2714     // whether we're done or not.
2715     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2716       // We are going to try to claim a new region. We should have
2717       // given up on the previous one.
2718       // Separated the asserts so that we know which one fires.
2719       assert(_curr_region  == NULL, "invariant");
2720       assert(_finger       == NULL, "invariant");
2721       assert(_region_limit == NULL, "invariant");
2722       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2723       if (claimed_region != NULL) {
2724         // Yes, we managed to claim one
2725         setup_for_region(claimed_region);
2726         assert(_curr_region == claimed_region, "invariant");
2727       }
2728       // It is important to call the regular clock here. It might take
2729       // a while to claim a region if, for example, we hit a large
2730       // block of empty regions. So we need to call the regular clock
2731       // method once round the loop to make sure it's called
2732       // frequently enough.
2733       abort_marking_if_regular_check_fail();
2734     }
2735 
2736     if (!has_aborted() && _curr_region == NULL) {
2737       assert(_cm->out_of_regions(),
2738              "at this point we should be out of regions");
2739     }
2740   } while ( _curr_region != NULL && !has_aborted());
2741 
2742   if (!has_aborted()) {
2743     // We cannot check whether the global stack is empty, since other
2744     // tasks might be pushing objects to it concurrently.
2745     assert(_cm->out_of_regions(),
2746            "at this point we should be out of regions");
2747     // Try to reduce the number of available SATB buffers so that
2748     // remark has less work to do.
2749     drain_satb_buffers();
2750   }
2751 
2752   // Since we've done everything else, we can now totally drain the
2753   // local queue and global stack.
2754   drain_local_queue(false);
2755   drain_global_stack(false);
2756 
2757   // Attempt at work stealing from other task's queues.
2758   if (do_stealing && !has_aborted()) {
2759     // We have not aborted. This means that we have finished all that
2760     // we could. Let's try to do some stealing...
2761 
2762     // We cannot check whether the global stack is empty, since other
2763     // tasks might be pushing objects to it concurrently.
2764     assert(_cm->out_of_regions() && _task_queue->size() == 0,
2765            "only way to reach here");
2766     while (!has_aborted()) {
2767       G1TaskQueueEntry entry;
2768       if (_cm->try_stealing(_worker_id, entry)) {
2769         scan_task_entry(entry);
2770 
2771         // And since we're towards the end, let's totally drain the
2772         // local queue and global stack.
2773         drain_local_queue(false);
2774         drain_global_stack(false);
2775       } else {
2776         break;
2777       }
2778     }
2779   }
2780 
2781   // We still haven't aborted. Now, let's try to get into the
2782   // termination protocol.
2783   if (do_termination && !has_aborted()) {
2784     // We cannot check whether the global stack is empty, since other
2785     // tasks might be concurrently pushing objects on it.
2786     // Separated the asserts so that we know which one fires.
2787     assert(_cm->out_of_regions(), "only way to reach here");
2788     assert(_task_queue->size() == 0, "only way to reach here");
2789     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2790 
2791     // The G1CMTask class also extends the TerminatorTerminator class,
2792     // hence its should_exit_termination() method will also decide
2793     // whether to exit the termination protocol or not.
2794     bool finished = (is_serial ||
2795                      _cm->terminator()->offer_termination(this));
2796     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2797     _termination_time_ms +=
2798       termination_end_time_ms - _termination_start_time_ms;
2799 
2800     if (finished) {
2801       // We're all done.
2802 
2803       // We can now guarantee that the global stack is empty, since
2804       // all other tasks have finished. We separated the guarantees so
2805       // that, if a condition is false, we can immediately find out
2806       // which one.
2807       guarantee(_cm->out_of_regions(), "only way to reach here");
2808       guarantee(_cm->mark_stack_empty(), "only way to reach here");
2809       guarantee(_task_queue->size() == 0, "only way to reach here");
2810       guarantee(!_cm->has_overflown(), "only way to reach here");
2811       guarantee(!has_aborted(), "should never happen if termination has completed");
2812     } else {
2813       // Apparently there's more work to do. Let's abort this task. It
2814       // will restart it and we can hopefully find more things to do.
2815       set_has_aborted();
2816     }
2817   }
2818 
2819   // Mainly for debugging purposes to make sure that a pointer to the
2820   // closure which was statically allocated in this frame doesn't
2821   // escape it by accident.
2822   set_cm_oop_closure(NULL);
2823   double end_time_ms = os::elapsedVTime() * 1000.0;
2824   double elapsed_time_ms = end_time_ms - _start_time_ms;
2825   // Update the step history.
2826   _step_times_ms.add(elapsed_time_ms);
2827 
2828   if (has_aborted()) {
2829     // The task was aborted for some reason.
2830     if (_has_timed_out) {
2831       double diff_ms = elapsed_time_ms - _time_target_ms;
2832       // Keep statistics of how well we did with respect to hitting
2833       // our target only if we actually timed out (if we aborted for
2834       // other reasons, then the results might get skewed).
2835       _marking_step_diff_ms.add(diff_ms);
2836     }
2837 
2838     if (_cm->has_overflown()) {
2839       // This is the interesting one. We aborted because a global
2840       // overflow was raised. This means we have to restart the
2841       // marking phase and start iterating over regions. However, in
2842       // order to do this we have to make sure that all tasks stop
2843       // what they are doing and re-initialize in a safe manner. We
2844       // will achieve this with the use of two barrier sync points.
2845 
2846       if (!is_serial) {
2847         // We only need to enter the sync barrier if being called
2848         // from a parallel context
2849         _cm->enter_first_sync_barrier(_worker_id);
2850 
2851         // When we exit this sync barrier we know that all tasks have
2852         // stopped doing marking work. So, it's now safe to
2853         // re-initialize our data structures.
2854       }
2855 
2856       clear_region_fields();
2857       flush_mark_stats_cache();
2858 
2859       if (!is_serial) {
2860         // If we're executing the concurrent phase of marking, reset the marking
2861         // state; otherwise the marking state is reset after reference processing,
2862         // during the remark pause.
2863         // If we reset here as a result of an overflow during the remark we will
2864         // see assertion failures from any subsequent set_concurrency_and_phase()
2865         // calls.
2866         if (_cm->concurrent() && _worker_id == 0) {
2867           // Worker 0 is responsible for clearing the global data structures because
2868           // of an overflow. During STW we should not clear the overflow flag (in
2869           // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit
2870           // method to abort the pause and restart concurrent marking.
2871           _cm->reset_marking_for_restart();
2872 
2873           log_info(gc, marking)("Concurrent Mark reset for overflow");
2874         }
2875 
2876         // ...and enter the second barrier.
2877         _cm->enter_second_sync_barrier(_worker_id);
2878       }
2879       // At this point, if we're during the concurrent phase of
2880       // marking, everything has been re-initialized and we're
2881       // ready to restart.
2882     }
2883   }
2884 }
2885 
2886 G1CMTask::G1CMTask(uint worker_id,
2887                    G1ConcurrentMark* cm,
2888                    G1CMTaskQueue* task_queue,
2889                    G1RegionMarkStats* mark_stats,
2890                    uint max_regions) :
2891   _objArray_processor(this),
2892   _worker_id(worker_id),
2893   _g1h(G1CollectedHeap::heap()),
2894   _cm(cm),
2895   _next_mark_bitmap(NULL),
2896   _task_queue(task_queue),
2897   _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize),
2898   _calls(0),
2899   _time_target_ms(0.0),
2900   _start_time_ms(0.0),
2901   _cm_oop_closure(NULL),
2902   _curr_region(NULL),
2903   _finger(NULL),
2904   _region_limit(NULL),
2905   _words_scanned(0),
2906   _words_scanned_limit(0),
2907   _real_words_scanned_limit(0),
2908   _refs_reached(0),
2909   _refs_reached_limit(0),
2910   _real_refs_reached_limit(0),
2911   _has_aborted(false),
2912   _has_timed_out(false),
2913   _draining_satb_buffers(false),
2914   _step_times_ms(),
2915   _elapsed_time_ms(0.0),
2916   _termination_time_ms(0.0),
2917   _termination_start_time_ms(0.0),
2918   _marking_step_diff_ms()
2919 {
2920   guarantee(task_queue != NULL, "invariant");
2921 
2922   _marking_step_diff_ms.add(0.5);
2923 }
2924 
2925 // These are formatting macros that are used below to ensure
2926 // consistent formatting. The *_H_* versions are used to format the
2927 // header for a particular value and they should be kept consistent
2928 // with the corresponding macro. Also note that most of the macros add
2929 // the necessary white space (as a prefix) which makes them a bit
2930 // easier to compose.
2931 
2932 // All the output lines are prefixed with this string to be able to
2933 // identify them easily in a large log file.
2934 #define G1PPRL_LINE_PREFIX            "###"
2935 
2936 #define G1PPRL_ADDR_BASE_FORMAT    " " PTR_FORMAT "-" PTR_FORMAT
2937 #ifdef _LP64
2938 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
2939 #else // _LP64
2940 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
2941 #endif // _LP64
2942 
2943 // For per-region info
2944 #define G1PPRL_TYPE_FORMAT            "   %-4s"
2945 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
2946 #define G1PPRL_STATE_FORMAT           "   %-5s"
2947 #define G1PPRL_STATE_H_FORMAT         "   %5s"
2948 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
2949 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
2950 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
2951 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
2952 
2953 // For summary info
2954 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
2955 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
2956 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
2957 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
2958 
2959 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) :
2960   _total_used_bytes(0), _total_capacity_bytes(0),
2961   _total_prev_live_bytes(0), _total_next_live_bytes(0),
2962   _total_remset_bytes(0), _total_strong_code_roots_bytes(0)
2963 {
2964   if (!log_is_enabled(Trace, gc, liveness)) {
2965     return;
2966   }
2967 
2968   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2969   MemRegion g1_reserved = g1h->g1_reserved();
2970   double now = os::elapsedTime();
2971 
2972   // Print the header of the output.
2973   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
2974   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
2975                           G1PPRL_SUM_ADDR_FORMAT("reserved")
2976                           G1PPRL_SUM_BYTE_FORMAT("region-size"),
2977                           p2i(g1_reserved.start()), p2i(g1_reserved.end()),
2978                           HeapRegion::GrainBytes);
2979   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
2980   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2981                           G1PPRL_TYPE_H_FORMAT
2982                           G1PPRL_ADDR_BASE_H_FORMAT
2983                           G1PPRL_BYTE_H_FORMAT
2984                           G1PPRL_BYTE_H_FORMAT
2985                           G1PPRL_BYTE_H_FORMAT
2986                           G1PPRL_DOUBLE_H_FORMAT
2987                           G1PPRL_BYTE_H_FORMAT
2988                           G1PPRL_STATE_H_FORMAT
2989                           G1PPRL_BYTE_H_FORMAT,
2990                           "type", "address-range",
2991                           "used", "prev-live", "next-live", "gc-eff",
2992                           "remset", "state", "code-roots");
2993   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2994                           G1PPRL_TYPE_H_FORMAT
2995                           G1PPRL_ADDR_BASE_H_FORMAT
2996                           G1PPRL_BYTE_H_FORMAT
2997                           G1PPRL_BYTE_H_FORMAT
2998                           G1PPRL_BYTE_H_FORMAT
2999                           G1PPRL_DOUBLE_H_FORMAT
3000                           G1PPRL_BYTE_H_FORMAT
3001                           G1PPRL_STATE_H_FORMAT
3002                           G1PPRL_BYTE_H_FORMAT,
3003                           "", "",
3004                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
3005                           "(bytes)", "", "(bytes)");
3006 }
3007 
3008 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
3009   if (!log_is_enabled(Trace, gc, liveness)) {
3010     return false;
3011   }
3012 
3013   const char* type       = r->get_type_str();
3014   HeapWord* bottom       = r->bottom();
3015   HeapWord* end          = r->end();
3016   size_t capacity_bytes  = r->capacity();
3017   size_t used_bytes      = r->used();
3018   size_t prev_live_bytes = r->live_bytes();
3019   size_t next_live_bytes = r->next_live_bytes();
3020   double gc_eff          = r->gc_efficiency();
3021   size_t remset_bytes    = r->rem_set()->mem_size();
3022   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3023   const char* remset_type = r->rem_set()->get_short_state_str();
3024 
3025   _total_used_bytes      += used_bytes;
3026   _total_capacity_bytes  += capacity_bytes;
3027   _total_prev_live_bytes += prev_live_bytes;
3028   _total_next_live_bytes += next_live_bytes;
3029   _total_remset_bytes    += remset_bytes;
3030   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3031 
3032   // Print a line for this particular region.
3033   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3034                           G1PPRL_TYPE_FORMAT
3035                           G1PPRL_ADDR_BASE_FORMAT
3036                           G1PPRL_BYTE_FORMAT
3037                           G1PPRL_BYTE_FORMAT
3038                           G1PPRL_BYTE_FORMAT
3039                           G1PPRL_DOUBLE_FORMAT
3040                           G1PPRL_BYTE_FORMAT
3041                           G1PPRL_STATE_FORMAT
3042                           G1PPRL_BYTE_FORMAT,
3043                           type, p2i(bottom), p2i(end),
3044                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3045                           remset_bytes, remset_type, strong_code_roots_bytes);
3046 
3047   return false;
3048 }
3049 
3050 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3051   if (!log_is_enabled(Trace, gc, liveness)) {
3052     return;
3053   }
3054 
3055   // add static memory usages to remembered set sizes
3056   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3057   // Print the footer of the output.
3058   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3059   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3060                          " SUMMARY"
3061                          G1PPRL_SUM_MB_FORMAT("capacity")
3062                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3063                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3064                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3065                          G1PPRL_SUM_MB_FORMAT("remset")
3066                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3067                          bytes_to_mb(_total_capacity_bytes),
3068                          bytes_to_mb(_total_used_bytes),
3069                          percent_of(_total_used_bytes, _total_capacity_bytes),
3070                          bytes_to_mb(_total_prev_live_bytes),
3071                          percent_of(_total_prev_live_bytes, _total_capacity_bytes),
3072                          bytes_to_mb(_total_next_live_bytes),
3073                          percent_of(_total_next_live_bytes, _total_capacity_bytes),
3074                          bytes_to_mb(_total_remset_bytes),
3075                          bytes_to_mb(_total_strong_code_roots_bytes));
3076 }
--- EOF ---