1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/g1BarrierSet.hpp"
  30 #include "gc/g1/g1CollectedHeap.inline.hpp"
  31 #include "gc/g1/g1CollectorState.hpp"
  32 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  33 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  34 #include "gc/g1/g1HeapVerifier.hpp"
  35 #include "gc/g1/g1OopClosures.inline.hpp"
  36 #include "gc/g1/g1Policy.hpp"
  37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
  38 #include "gc/g1/g1StringDedup.hpp"
  39 #include "gc/g1/g1ThreadLocalData.hpp"
  40 #include "gc/g1/heapRegion.inline.hpp"
  41 #include "gc/g1/heapRegionRemSet.hpp"
  42 #include "gc/g1/heapRegionSet.inline.hpp"
  43 #include "gc/shared/adaptiveSizePolicy.hpp"
  44 #include "gc/shared/gcId.hpp"
  45 #include "gc/shared/gcTimer.hpp"
  46 #include "gc/shared/gcTrace.hpp"
  47 #include "gc/shared/gcTraceTime.inline.hpp"
  48 #include "gc/shared/genOopClosures.inline.hpp"
  49 #include "gc/shared/referencePolicy.hpp"
  50 #include "gc/shared/strongRootsScope.hpp"
  51 #include "gc/shared/suspendibleThreadSet.hpp"
  52 #include "gc/shared/taskqueue.inline.hpp"
  53 #include "gc/shared/vmGCOperations.hpp"
  54 #include "gc/shared/weakProcessor.hpp"
  55 #include "include/jvm.h"
  56 #include "logging/log.hpp"
  57 #include "memory/allocation.hpp"
  58 #include "memory/resourceArea.hpp"
  59 #include "oops/access.inline.hpp"
  60 #include "oops/oop.inline.hpp"
  61 #include "runtime/atomic.hpp"
  62 #include "runtime/handles.inline.hpp"
  63 #include "runtime/java.hpp"
  64 #include "runtime/prefetch.inline.hpp"
  65 #include "services/memTracker.hpp"
  66 #include "utilities/align.hpp"
  67 #include "utilities/growableArray.hpp"
  68 
  69 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  70   assert(addr < _cm->finger(), "invariant");
  71   assert(addr >= _task->finger(), "invariant");
  72 
  73   // We move that task's local finger along.
  74   _task->move_finger_to(addr);
  75 
  76   _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
  77   // we only partially drain the local queue and global stack
  78   _task->drain_local_queue(true);
  79   _task->drain_global_stack(true);
  80 
  81   // if the has_aborted flag has been raised, we need to bail out of
  82   // the iteration
  83   return !_task->has_aborted();
  84 }
  85 
  86 G1CMMarkStack::G1CMMarkStack() :
  87   _max_chunk_capacity(0),
  88   _base(NULL),
  89   _chunk_capacity(0) {
  90   set_empty();
  91 }
  92 
  93 bool G1CMMarkStack::resize(size_t new_capacity) {
  94   assert(is_empty(), "Only resize when stack is empty.");
  95   assert(new_capacity <= _max_chunk_capacity,
  96          "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
  97 
  98   TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
  99 
 100   if (new_base == NULL) {
 101     log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
 102     return false;
 103   }
 104   // Release old mapping.
 105   if (_base != NULL) {
 106     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 107   }
 108 
 109   _base = new_base;
 110   _chunk_capacity = new_capacity;
 111   set_empty();
 112 
 113   return true;
 114 }
 115 
 116 size_t G1CMMarkStack::capacity_alignment() {
 117   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
 118 }
 119 
 120 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 121   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 122 
 123   size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
 124 
 125   _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 126   size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 127 
 128   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
 129             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
 130             _max_chunk_capacity,
 131             initial_chunk_capacity);
 132 
 133   log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
 134                 initial_chunk_capacity, _max_chunk_capacity);
 135 
 136   return resize(initial_chunk_capacity);
 137 }
 138 
 139 void G1CMMarkStack::expand() {
 140   if (_chunk_capacity == _max_chunk_capacity) {
 141     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 142     return;
 143   }
 144   size_t old_capacity = _chunk_capacity;
 145   // Double capacity if possible
 146   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
 147 
 148   if (resize(new_capacity)) {
 149     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 150                   old_capacity, new_capacity);
 151   } else {
 152     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 153                     old_capacity, new_capacity);
 154   }
 155 }
 156 
 157 G1CMMarkStack::~G1CMMarkStack() {
 158   if (_base != NULL) {
 159     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 160   }
 161 }
 162 
 163 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
 164   elem->next = *list;
 165   *list = elem;
 166 }
 167 
 168 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
 169   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 170   add_chunk_to_list(&_chunk_list, elem);
 171   _chunks_in_chunk_list++;
 172 }
 173 
 174 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
 175   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 176   add_chunk_to_list(&_free_list, elem);
 177 }
 178 
 179 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
 180   TaskQueueEntryChunk* result = *list;
 181   if (result != NULL) {
 182     *list = (*list)->next;
 183   }
 184   return result;
 185 }
 186 
 187 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 188   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 189   TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
 190   if (result != NULL) {
 191     _chunks_in_chunk_list--;
 192   }
 193   return result;
 194 }
 195 
 196 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 197   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 198   return remove_chunk_from_list(&_free_list);
 199 }
 200 
 201 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
 202   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 203   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 204   // wraparound of _hwm.
 205   if (_hwm >= _chunk_capacity) {
 206     return NULL;
 207   }
 208 
 209   size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
 210   if (cur_idx >= _chunk_capacity) {
 211     return NULL;
 212   }
 213 
 214   TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
 215   result->next = NULL;
 216   return result;
 217 }
 218 
 219 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
 220   // Get a new chunk.
 221   TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
 222 
 223   if (new_chunk == NULL) {
 224     // Did not get a chunk from the free list. Allocate from backing memory.
 225     new_chunk = allocate_new_chunk();
 226 
 227     if (new_chunk == NULL) {
 228       return false;
 229     }
 230   }
 231 
 232   Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 233 
 234   add_chunk_to_chunk_list(new_chunk);
 235 
 236   return true;
 237 }
 238 
 239 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
 240   TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
 241 
 242   if (cur == NULL) {
 243     return false;
 244   }
 245 
 246   Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
 247 
 248   add_chunk_to_free_list(cur);
 249   return true;
 250 }
 251 
 252 void G1CMMarkStack::set_empty() {
 253   _chunks_in_chunk_list = 0;
 254   _hwm = 0;
 255   _chunk_list = NULL;
 256   _free_list = NULL;
 257 }
 258 
 259 G1CMRootRegions::G1CMRootRegions() :
 260   _survivors(NULL), _cm(NULL), _scan_in_progress(false),
 261   _should_abort(false), _claimed_survivor_index(0) { }
 262 
 263 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
 264   _survivors = survivors;
 265   _cm = cm;
 266 }
 267 
 268 void G1CMRootRegions::prepare_for_scan() {
 269   assert(!scan_in_progress(), "pre-condition");
 270 
 271   // Currently, only survivors can be root regions.
 272   _claimed_survivor_index = 0;
 273   _scan_in_progress = _survivors->regions()->is_nonempty();
 274   _should_abort = false;
 275 }
 276 
 277 HeapRegion* G1CMRootRegions::claim_next() {
 278   if (_should_abort) {
 279     // If someone has set the should_abort flag, we return NULL to
 280     // force the caller to bail out of their loop.
 281     return NULL;
 282   }
 283 
 284   // Currently, only survivors can be root regions.
 285   const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions();
 286 
 287   int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1;
 288   if (claimed_index < survivor_regions->length()) {
 289     return survivor_regions->at(claimed_index);
 290   }
 291   return NULL;
 292 }
 293 
 294 uint G1CMRootRegions::num_root_regions() const {
 295   return (uint)_survivors->regions()->length();
 296 }
 297 
 298 void G1CMRootRegions::notify_scan_done() {
 299   MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 300   _scan_in_progress = false;
 301   RootRegionScan_lock->notify_all();
 302 }
 303 
 304 void G1CMRootRegions::cancel_scan() {
 305   notify_scan_done();
 306 }
 307 
 308 void G1CMRootRegions::scan_finished() {
 309   assert(scan_in_progress(), "pre-condition");
 310 
 311   // Currently, only survivors can be root regions.
 312   if (!_should_abort) {
 313     assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index);
 314     assert((uint)_claimed_survivor_index >= _survivors->length(),
 315            "we should have claimed all survivors, claimed index = %u, length = %u",
 316            (uint)_claimed_survivor_index, _survivors->length());
 317   }
 318 
 319   notify_scan_done();
 320 }
 321 
 322 bool G1CMRootRegions::wait_until_scan_finished() {
 323   if (!scan_in_progress()) {
 324     return false;
 325   }
 326 
 327   {
 328     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 329     while (scan_in_progress()) {
 330       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 331     }
 332   }
 333   return true;
 334 }
 335 
 336 // Returns the maximum number of workers to be used in a concurrent
 337 // phase based on the number of GC workers being used in a STW
 338 // phase.
 339 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
 340   return MAX2((num_gc_workers + 2) / 4, 1U);
 341 }
 342 
 343 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
 344                                    G1RegionToSpaceMapper* prev_bitmap_storage,
 345                                    G1RegionToSpaceMapper* next_bitmap_storage) :
 346   // _cm_thread set inside the constructor
 347   _g1h(g1h),
 348   _completed_initialization(false),
 349 
 350   _mark_bitmap_1(),
 351   _mark_bitmap_2(),
 352   _prev_mark_bitmap(&_mark_bitmap_1),
 353   _next_mark_bitmap(&_mark_bitmap_2),
 354 
 355   _heap(_g1h->reserved_region()),
 356 
 357   _root_regions(),
 358 
 359   _global_mark_stack(),
 360 
 361   // _finger set in set_non_marking_state
 362 
 363   _worker_id_offset(DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
 364   _max_num_tasks(ParallelGCThreads),
 365   // _num_active_tasks set in set_non_marking_state()
 366   // _tasks set inside the constructor
 367 
 368   _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
 369   _terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)),
 370 
 371   _first_overflow_barrier_sync(),
 372   _second_overflow_barrier_sync(),
 373 
 374   _has_overflown(false),
 375   _concurrent(false),
 376   _has_aborted(false),
 377   _restart_for_overflow(false),
 378   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 379   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 380 
 381   // _verbose_level set below
 382 
 383   _init_times(),
 384   _remark_times(),
 385   _remark_mark_times(),
 386   _remark_weak_ref_times(),
 387   _cleanup_times(),
 388   _total_cleanup_time(0.0),
 389 
 390   _accum_task_vtime(NULL),
 391 
 392   _concurrent_workers(NULL),
 393   _num_concurrent_workers(0),
 394   _max_concurrent_workers(0),
 395 
 396   _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
 397   _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 398 {
 399   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 400   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 401 
 402   // Create & start ConcurrentMark thread.
 403   _cm_thread = new G1ConcurrentMarkThread(this);
 404   if (_cm_thread->osthread() == NULL) {
 405     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 406   }
 407 
 408   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 409 
 410   SATBMarkQueueSet& satb_qs = G1BarrierSet::satb_mark_queue_set();
 411   satb_qs.set_buffer_size(G1SATBBufferSize);
 412 
 413   _root_regions.init(_g1h->survivor(), this);
 414 
 415   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 416     // Calculate the number of concurrent worker threads by scaling
 417     // the number of parallel GC threads.
 418     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 419     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 420   }
 421 
 422   assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
 423   if (ConcGCThreads > ParallelGCThreads) {
 424     log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
 425                     ConcGCThreads, ParallelGCThreads);
 426     return;
 427   }
 428 
 429   log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
 430   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 431 
 432   _num_concurrent_workers = ConcGCThreads;
 433   _max_concurrent_workers = _num_concurrent_workers;
 434 
 435   _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
 436   _concurrent_workers->initialize_workers();
 437 
 438   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 439     size_t mark_stack_size =
 440       MIN2(MarkStackSizeMax,
 441           MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE)));
 442     // Verify that the calculated value for MarkStackSize is in range.
 443     // It would be nice to use the private utility routine from Arguments.
 444     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 445       log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 446                       "must be between 1 and " SIZE_FORMAT,
 447                       mark_stack_size, MarkStackSizeMax);
 448       return;
 449     }
 450     FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
 451   } else {
 452     // Verify MarkStackSize is in range.
 453     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 454       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 455         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 456           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 457                           "must be between 1 and " SIZE_FORMAT,
 458                           MarkStackSize, MarkStackSizeMax);
 459           return;
 460         }
 461       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 462         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 463           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 464                           " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 465                           MarkStackSize, MarkStackSizeMax);
 466           return;
 467         }
 468       }
 469     }
 470   }
 471 
 472   if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
 473     vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
 474   }
 475 
 476   _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
 477   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
 478 
 479   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 480   _num_active_tasks = _max_num_tasks;
 481 
 482   for (uint i = 0; i < _max_num_tasks; ++i) {
 483     G1CMTaskQueue* task_queue = new G1CMTaskQueue();
 484     task_queue->initialize();
 485     _task_queues->register_queue(i, task_queue);
 486 
 487     _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
 488 
 489     _accum_task_vtime[i] = 0.0;
 490   }
 491 
 492   reset_at_marking_complete();
 493   _completed_initialization = true;
 494 }
 495 
 496 void G1ConcurrentMark::reset() {
 497   _has_aborted = false;
 498 
 499   reset_marking_for_restart();
 500 
 501   // Reset all tasks, since different phases will use different number of active
 502   // threads. So, it's easiest to have all of them ready.
 503   for (uint i = 0; i < _max_num_tasks; ++i) {
 504     _tasks[i]->reset(_next_mark_bitmap);
 505   }
 506 
 507   uint max_regions = _g1h->max_regions();
 508   for (uint i = 0; i < max_regions; i++) {
 509     _top_at_rebuild_starts[i] = NULL;
 510     _region_mark_stats[i].clear();
 511   }
 512 }
 513 
 514 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
 515   for (uint j = 0; j < _max_num_tasks; ++j) {
 516     _tasks[j]->clear_mark_stats_cache(region_idx);
 517   }
 518   _top_at_rebuild_starts[region_idx] = NULL;
 519   _region_mark_stats[region_idx].clear();
 520 }
 521 
 522 void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
 523   uint const region_idx = r->hrm_index();
 524   if (r->is_humongous()) {
 525     assert(r->is_starts_humongous(), "Got humongous continues region here");
 526     uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size());
 527     for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
 528       clear_statistics_in_region(j);
 529     }
 530   } else {
 531     clear_statistics_in_region(region_idx);
 532   }
 533 }
 534 
 535 static void clear_mark_if_set(G1CMBitMap* bitmap, HeapWord* addr) {
 536   if (bitmap->is_marked(addr)) {
 537     bitmap->clear(addr);
 538   }
 539 }
 540 
 541 void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
 542   assert_at_safepoint_on_vm_thread();
 543 
 544   // Need to clear all mark bits of the humongous object.
 545   clear_mark_if_set(_prev_mark_bitmap, r->bottom());
 546   clear_mark_if_set(_next_mark_bitmap, r->bottom());
 547 
 548   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
 549     return;
 550   }
 551 
 552   // Clear any statistics about the region gathered so far.
 553   clear_statistics(r);
 554 }
 555 
 556 void G1ConcurrentMark::reset_marking_for_restart() {
 557   _global_mark_stack.set_empty();
 558 
 559   // Expand the marking stack, if we have to and if we can.
 560   if (has_overflown()) {
 561     _global_mark_stack.expand();
 562 
 563     uint max_regions = _g1h->max_regions();
 564     for (uint i = 0; i < max_regions; i++) {
 565       _region_mark_stats[i].clear_during_overflow();
 566     }
 567   }
 568 
 569   clear_has_overflown();
 570   _finger = _heap.start();
 571 
 572   for (uint i = 0; i < _max_num_tasks; ++i) {
 573     G1CMTaskQueue* queue = _task_queues->queue(i);
 574     queue->set_empty();
 575   }
 576 }
 577 
 578 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
 579   assert(active_tasks <= _max_num_tasks, "we should not have more");
 580 
 581   _num_active_tasks = active_tasks;
 582   // Need to update the three data structures below according to the
 583   // number of active threads for this phase.
 584   _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
 585   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 586   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 587 }
 588 
 589 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 590   set_concurrency(active_tasks);
 591 
 592   _concurrent = concurrent;
 593 
 594   if (!concurrent) {
 595     // At this point we should be in a STW phase, and completed marking.
 596     assert_at_safepoint_on_vm_thread();
 597     assert(out_of_regions(),
 598            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 599            p2i(_finger), p2i(_heap.end()));
 600   }
 601 }
 602 
 603 void G1ConcurrentMark::reset_at_marking_complete() {
 604   // We set the global marking state to some default values when we're
 605   // not doing marking.
 606   reset_marking_for_restart();
 607   _num_active_tasks = 0;
 608 }
 609 
 610 G1ConcurrentMark::~G1ConcurrentMark() {
 611   FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
 612   FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
 613   // The G1ConcurrentMark instance is never freed.
 614   ShouldNotReachHere();
 615 }
 616 
 617 class G1ClearBitMapTask : public AbstractGangTask {
 618 public:
 619   static size_t chunk_size() { return M; }
 620 
 621 private:
 622   // Heap region closure used for clearing the given mark bitmap.
 623   class G1ClearBitmapHRClosure : public HeapRegionClosure {
 624   private:
 625     G1CMBitMap* _bitmap;
 626     G1ConcurrentMark* _cm;
 627   public:
 628     G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) {
 629     }
 630 
 631     virtual bool do_heap_region(HeapRegion* r) {
 632       size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
 633 
 634       HeapWord* cur = r->bottom();
 635       HeapWord* const end = r->end();
 636 
 637       while (cur < end) {
 638         MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 639         _bitmap->clear_range(mr);
 640 
 641         cur += chunk_size_in_words;
 642 
 643         // Abort iteration if after yielding the marking has been aborted.
 644         if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
 645           return true;
 646         }
 647         // Repeat the asserts from before the start of the closure. We will do them
 648         // as asserts here to minimize their overhead on the product. However, we
 649         // will have them as guarantees at the beginning / end of the bitmap
 650         // clearing to get some checking in the product.
 651         assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant");
 652         assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 653       }
 654       assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
 655 
 656       return false;
 657     }
 658   };
 659 
 660   G1ClearBitmapHRClosure _cl;
 661   HeapRegionClaimer _hr_claimer;
 662   bool _suspendible; // If the task is suspendible, workers must join the STS.
 663 
 664 public:
 665   G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
 666     AbstractGangTask("G1 Clear Bitmap"),
 667     _cl(bitmap, suspendible ? cm : NULL),
 668     _hr_claimer(n_workers),
 669     _suspendible(suspendible)
 670   { }
 671 
 672   void work(uint worker_id) {
 673     SuspendibleThreadSetJoiner sts_join(_suspendible);
 674     G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
 675   }
 676 
 677   bool is_complete() {
 678     return _cl.is_complete();
 679   }
 680 };
 681 
 682 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 683   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 684 
 685   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 686   size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 687 
 688   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 689 
 690   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 691 
 692   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 693   workers->run_task(&cl, num_workers);
 694   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 695 }
 696 
 697 void G1ConcurrentMark::cleanup_for_next_mark() {
 698   // Make sure that the concurrent mark thread looks to still be in
 699   // the current cycle.
 700   guarantee(cm_thread()->during_cycle(), "invariant");
 701 
 702   // We are finishing up the current cycle by clearing the next
 703   // marking bitmap and getting it ready for the next cycle. During
 704   // this time no other cycle can start. So, let's make sure that this
 705   // is the case.
 706   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 707 
 708   clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
 709 
 710   // Repeat the asserts from above.
 711   guarantee(cm_thread()->during_cycle(), "invariant");
 712   guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
 713 }
 714 
 715 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 716   assert_at_safepoint_on_vm_thread();
 717   clear_bitmap(_prev_mark_bitmap, workers, false);
 718 }
 719 
 720 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 721   G1CMBitMap* _bitmap;
 722  public:
 723   CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) {
 724   }
 725 
 726   virtual bool do_heap_region(HeapRegion* r) {
 727     // This closure can be called concurrently to the mutator, so we must make sure
 728     // that the result of the getNextMarkedWordAddress() call is compared to the
 729     // value passed to it as limit to detect any found bits.
 730     // end never changes in G1.
 731     HeapWord* end = r->end();
 732     return _bitmap->get_next_marked_addr(r->bottom(), end) != end;
 733   }
 734 };
 735 
 736 bool G1ConcurrentMark::next_mark_bitmap_is_clear() {
 737   CheckBitmapClearHRClosure cl(_next_mark_bitmap);
 738   _g1h->heap_region_iterate(&cl);
 739   return cl.is_complete();
 740 }
 741 
 742 class NoteStartOfMarkHRClosure : public HeapRegionClosure {
 743 public:
 744   bool do_heap_region(HeapRegion* r) {
 745     r->note_start_of_marking();
 746     return false;
 747   }
 748 };
 749 
 750 void G1ConcurrentMark::pre_initial_mark() {
 751   // Initialize marking structures. This has to be done in a STW phase.
 752   reset();
 753 
 754   // For each region note start of marking.
 755   NoteStartOfMarkHRClosure startcl;
 756   _g1h->heap_region_iterate(&startcl);
 757 }
 758 
 759 
 760 void G1ConcurrentMark::post_initial_mark() {
 761   // Start Concurrent Marking weak-reference discovery.
 762   ReferenceProcessor* rp = _g1h->ref_processor_cm();
 763   // enable ("weak") refs discovery
 764   rp->enable_discovery();
 765   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 766 
 767   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
 768   // This is the start of  the marking cycle, we're expected all
 769   // threads to have SATB queues with active set to false.
 770   satb_mq_set.set_active_all_threads(true, /* new active value */
 771                                      false /* expected_active */);
 772 
 773   _root_regions.prepare_for_scan();
 774 
 775   // update_g1_committed() will be called at the end of an evac pause
 776   // when marking is on. So, it's also called at the end of the
 777   // initial-mark pause to update the heap end, if the heap expands
 778   // during it. No need to call it here.
 779 }
 780 
 781 /*
 782  * Notice that in the next two methods, we actually leave the STS
 783  * during the barrier sync and join it immediately afterwards. If we
 784  * do not do this, the following deadlock can occur: one thread could
 785  * be in the barrier sync code, waiting for the other thread to also
 786  * sync up, whereas another one could be trying to yield, while also
 787  * waiting for the other threads to sync up too.
 788  *
 789  * Note, however, that this code is also used during remark and in
 790  * this case we should not attempt to leave / enter the STS, otherwise
 791  * we'll either hit an assert (debug / fastdebug) or deadlock
 792  * (product). So we should only leave / enter the STS if we are
 793  * operating concurrently.
 794  *
 795  * Because the thread that does the sync barrier has left the STS, it
 796  * is possible to be suspended for a Full GC or an evacuation pause
 797  * could occur. This is actually safe, since the entering the sync
 798  * barrier is one of the last things do_marking_step() does, and it
 799  * doesn't manipulate any data structures afterwards.
 800  */
 801 
 802 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 803   bool barrier_aborted;
 804   {
 805     SuspendibleThreadSetLeaver sts_leave(concurrent());
 806     barrier_aborted = !_first_overflow_barrier_sync.enter();
 807   }
 808 
 809   // at this point everyone should have synced up and not be doing any
 810   // more work
 811 
 812   if (barrier_aborted) {
 813     // If the barrier aborted we ignore the overflow condition and
 814     // just abort the whole marking phase as quickly as possible.
 815     return;
 816   }
 817 }
 818 
 819 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 820   SuspendibleThreadSetLeaver sts_leave(concurrent());
 821   _second_overflow_barrier_sync.enter();
 822 
 823   // at this point everything should be re-initialized and ready to go
 824 }
 825 
 826 class G1CMConcurrentMarkingTask : public AbstractGangTask {
 827   G1ConcurrentMark*     _cm;
 828 
 829 public:
 830   void work(uint worker_id) {
 831     assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread");
 832     ResourceMark rm;
 833 
 834     double start_vtime = os::elapsedVTime();
 835 
 836     {
 837       SuspendibleThreadSetJoiner sts_join;
 838 
 839       assert(worker_id < _cm->active_tasks(), "invariant");
 840 
 841       G1CMTask* task = _cm->task(worker_id);
 842       task->record_start_time();
 843       if (!_cm->has_aborted()) {
 844         do {
 845           task->do_marking_step(G1ConcMarkStepDurationMillis,
 846                                 true  /* do_termination */,
 847                                 false /* is_serial*/);
 848 
 849           _cm->do_yield_check();
 850         } while (!_cm->has_aborted() && task->has_aborted());
 851       }
 852       task->record_end_time();
 853       guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant");
 854     }
 855 
 856     double end_vtime = os::elapsedVTime();
 857     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
 858   }
 859 
 860   G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
 861       AbstractGangTask("Concurrent Mark"), _cm(cm) { }
 862 
 863   ~G1CMConcurrentMarkingTask() { }
 864 };
 865 
 866 uint G1ConcurrentMark::calc_active_marking_workers() {
 867   uint result = 0;
 868   if (!UseDynamicNumberOfGCThreads ||
 869       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 870        !ForceDynamicNumberOfGCThreads)) {
 871     result = _max_concurrent_workers;
 872   } else {
 873     result =
 874       AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers,
 875                                                       1, /* Minimum workers */
 876                                                       _num_concurrent_workers,
 877                                                       Threads::number_of_non_daemon_threads());
 878     // Don't scale the result down by scale_concurrent_workers() because
 879     // that scaling has already gone into "_max_concurrent_workers".
 880   }
 881   assert(result > 0 && result <= _max_concurrent_workers,
 882          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 883          _max_concurrent_workers, result);
 884   return result;
 885 }
 886 
 887 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
 888   // Currently, only survivors can be root regions.
 889   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
 890   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 891 
 892   const uintx interval = PrefetchScanIntervalInBytes;
 893   HeapWord* curr = hr->bottom();
 894   const HeapWord* end = hr->top();
 895   while (curr < end) {
 896     Prefetch::read(curr, interval);
 897     oop obj = oop(curr);
 898     int size = obj->oop_iterate_size(&cl);
 899     assert(size == obj->size(), "sanity");
 900     curr += size;
 901   }
 902 }
 903 
 904 class G1CMRootRegionScanTask : public AbstractGangTask {
 905   G1ConcurrentMark* _cm;
 906 public:
 907   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 908     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
 909 
 910   void work(uint worker_id) {
 911     assert(Thread::current()->is_ConcurrentGC_thread(),
 912            "this should only be done by a conc GC thread");
 913 
 914     G1CMRootRegions* root_regions = _cm->root_regions();
 915     HeapRegion* hr = root_regions->claim_next();
 916     while (hr != NULL) {
 917       _cm->scan_root_region(hr, worker_id);
 918       hr = root_regions->claim_next();
 919     }
 920   }
 921 };
 922 
 923 void G1ConcurrentMark::scan_root_regions() {
 924   // scan_in_progress() will have been set to true only if there was
 925   // at least one root region to scan. So, if it's false, we
 926   // should not attempt to do any further work.
 927   if (root_regions()->scan_in_progress()) {
 928     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
 929 
 930     _num_concurrent_workers = MIN2(calc_active_marking_workers(),
 931                                    // We distribute work on a per-region basis, so starting
 932                                    // more threads than that is useless.
 933                                    root_regions()->num_root_regions());
 934     assert(_num_concurrent_workers <= _max_concurrent_workers,
 935            "Maximum number of marking threads exceeded");
 936 
 937     G1CMRootRegionScanTask task(this);
 938     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
 939                         task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
 940     _concurrent_workers->run_task(&task, _num_concurrent_workers);
 941 
 942     // It's possible that has_aborted() is true here without actually
 943     // aborting the survivor scan earlier. This is OK as it's
 944     // mainly used for sanity checking.
 945     root_regions()->scan_finished();
 946   }
 947 }
 948 
 949 void G1ConcurrentMark::concurrent_cycle_start() {
 950   _gc_timer_cm->register_gc_start();
 951 
 952   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
 953 
 954   _g1h->trace_heap_before_gc(_gc_tracer_cm);
 955 }
 956 
 957 void G1ConcurrentMark::concurrent_cycle_end() {
 958   _g1h->collector_state()->set_clearing_next_bitmap(false);
 959 
 960   _g1h->trace_heap_after_gc(_gc_tracer_cm);
 961 
 962   if (has_aborted()) {
 963     log_info(gc, marking)("Concurrent Mark Abort");
 964     _gc_tracer_cm->report_concurrent_mode_failure();
 965   }
 966 
 967   _gc_timer_cm->register_gc_end();
 968 
 969   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
 970 }
 971 
 972 void G1ConcurrentMark::mark_from_roots() {
 973   _restart_for_overflow = false;
 974 
 975   _num_concurrent_workers = calc_active_marking_workers();
 976 
 977   uint active_workers = MAX2(1U, _num_concurrent_workers);
 978 
 979   // Setting active workers is not guaranteed since fewer
 980   // worker threads may currently exist and more may not be
 981   // available.
 982   active_workers = _concurrent_workers->update_active_workers(active_workers);
 983   log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
 984 
 985   // Parallel task terminator is set in "set_concurrency_and_phase()"
 986   set_concurrency_and_phase(active_workers, true /* concurrent */);
 987 
 988   G1CMConcurrentMarkingTask marking_task(this);
 989   _concurrent_workers->run_task(&marking_task);
 990   print_stats();
 991 }
 992 
 993 void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) {
 994   G1HeapVerifier* verifier = _g1h->verifier();
 995 
 996   verifier->verify_region_sets_optional();
 997 
 998   if (VerifyDuringGC) {
 999     GCTraceTime(Debug, gc, phases) debug(caller, _gc_timer_cm);
1000 
1001     size_t const BufLen = 512;
1002     char buffer[BufLen];
1003 
1004     jio_snprintf(buffer, BufLen, "During GC (%s)", caller);
1005     verifier->verify(type, vo, buffer);
1006   }
1007 
1008   verifier->check_bitmaps(caller);
1009 }
1010 
1011 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
1012   G1CollectedHeap* _g1h;
1013   G1ConcurrentMark* _cm;
1014   HeapRegionClaimer _hrclaimer;
1015   uint volatile _total_selected_for_rebuild;
1016 
1017   G1PrintRegionLivenessInfoClosure _cl;
1018 
1019   class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1020     G1CollectedHeap* _g1h;
1021     G1ConcurrentMark* _cm;
1022 
1023     G1PrintRegionLivenessInfoClosure* _cl;
1024 
1025     uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1026 
1027     void update_remset_before_rebuild(HeapRegion * hr) {
1028       G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
1029 
1030       size_t const live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize;
1031       bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1032       if (selected_for_rebuild) {
1033         _num_regions_selected_for_rebuild++;
1034       }
1035       _cm->update_top_at_rebuild_start(hr);
1036     }
1037 
1038     // Distribute the given words across the humongous object starting with hr and
1039     // note end of marking.
1040     void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1041       uint const region_idx = hr->hrm_index();
1042       size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size();
1043       uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words);
1044 
1045       // "Distributing" zero words means that we only note end of marking for these
1046       // regions.
1047       assert(marked_words == 0 || obj_size_in_words == marked_words,
1048              "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT,
1049              obj_size_in_words, marked_words);
1050 
1051       for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
1052         HeapRegion* const r = _g1h->region_at(i);
1053         size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
1054 
1055         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)",
1056                                words_to_add, i, r->get_type_str());
1057         add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize);
1058         marked_words -= words_to_add;
1059       }
1060       assert(marked_words == 0,
1061              SIZE_FORMAT " words left after distributing space across %u regions",
1062              marked_words, num_regions_in_humongous);
1063     }
1064 
1065     void update_marked_bytes(HeapRegion* hr) {
1066       uint const region_idx = hr->hrm_index();
1067       size_t const marked_words = _cm->liveness(region_idx);
1068       // The marking attributes the object's size completely to the humongous starts
1069       // region. We need to distribute this value across the entire set of regions a
1070       // humongous object spans.
1071       if (hr->is_humongous()) {
1072         assert(hr->is_starts_humongous() || marked_words == 0,
1073                "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)",
1074                marked_words, region_idx, hr->get_type_str());
1075         if (hr->is_starts_humongous()) {
1076           distribute_marked_bytes(hr, marked_words);
1077         }
1078       } else {
1079         log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
1080         add_marked_bytes_and_note_end(hr, marked_words * HeapWordSize);
1081       }
1082     }
1083 
1084     void add_marked_bytes_and_note_end(HeapRegion* hr, size_t marked_bytes) {
1085       hr->add_to_marked_bytes(marked_bytes);
1086       _cl->do_heap_region(hr);
1087       hr->note_end_of_marking();
1088     }
1089 
1090   public:
1091     G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) :
1092       _g1h(g1h), _cm(cm), _num_regions_selected_for_rebuild(0), _cl(cl) { }
1093 
1094     virtual bool do_heap_region(HeapRegion* r) {
1095       update_remset_before_rebuild(r);
1096       update_marked_bytes(r);
1097 
1098       return false;
1099     }
1100 
1101     uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
1102   };
1103 
1104 public:
1105   G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) :
1106     AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"),
1107     _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1108 
1109   virtual void work(uint worker_id) {
1110     G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1111     _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1112     Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild);
1113   }
1114 
1115   uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1116 
1117   // Number of regions for which roughly one thread should be spawned for this work.
1118   static const uint RegionsPerThread = 384;
1119 };
1120 
1121 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1122   G1CollectedHeap* _g1h;
1123 public:
1124   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1125 
1126   virtual bool do_heap_region(HeapRegion* r) {
1127     _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1128     return false;
1129   }
1130 };
1131 
1132 void G1ConcurrentMark::remark() {
1133   assert_at_safepoint_on_vm_thread();
1134 
1135   // If a full collection has happened, we should not continue. However we might
1136   // have ended up here as the Remark VM operation has been scheduled already.
1137   if (has_aborted()) {
1138     return;
1139   }
1140 
1141   G1Policy* g1p = _g1h->g1_policy();
1142   g1p->record_concurrent_mark_remark_start();
1143 
1144   double start = os::elapsedTime();
1145 
1146   verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1147 
1148   {
1149     GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1150     finalize_marking();
1151   }
1152 
1153   double mark_work_end = os::elapsedTime();
1154 
1155   bool const mark_finished = !has_overflown();
1156   if (mark_finished) {
1157     weak_refs_work(false /* clear_all_soft_refs */);
1158 
1159     SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1160     // We're done with marking.
1161     // This is the end of the marking cycle, we're expected all
1162     // threads to have SATB queues with active set to true.
1163     satb_mq_set.set_active_all_threads(false, /* new active value */
1164                                        true /* expected_active */);
1165 
1166     {
1167       GCTraceTime(Debug, gc, phases) debug("Flush Task Caches", _gc_timer_cm);
1168       flush_all_task_caches();
1169     }
1170 
1171     // Install newly created mark bitmap as "prev".
1172     swap_mark_bitmaps();
1173     {
1174       GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);
1175 
1176       uint const workers_by_capacity = (_g1h->num_regions() + G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread - 1) /
1177                                        G1UpdateRemSetTrackingBeforeRebuildTask::RegionsPerThread;
1178       uint const num_workers = MIN2(_g1h->workers()->active_workers(), workers_by_capacity);
1179 
1180       G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
1181       log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
1182       _g1h->workers()->run_task(&cl, num_workers);
1183 
1184       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1185                                       _g1h->num_regions(), cl.total_selected_for_rebuild());
1186     }
1187     {
1188       GCTraceTime(Debug, gc, phases) debug("Reclaim Empty Regions", _gc_timer_cm);
1189       reclaim_empty_regions();
1190     }
1191 
1192     // Clean out dead classes
1193     if (ClassUnloadingWithConcurrentMark) {
1194       GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm);
1195       ClassLoaderDataGraph::purge();
1196     }
1197 
1198     compute_new_sizes();
1199 
1200     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after");
1201 
1202     assert(!restart_for_overflow(), "sanity");
1203     // Completely reset the marking state since marking completed
1204     reset_at_marking_complete();
1205   } else {
1206     // We overflowed.  Restart concurrent marking.
1207     _restart_for_overflow = true;
1208 
1209     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow");
1210 
1211     // Clear the marking state because we will be restarting
1212     // marking due to overflowing the global mark stack.
1213     reset_marking_for_restart();
1214   }
1215 
1216   {
1217     GCTraceTime(Debug, gc, phases) debug("Report Object Count", _gc_timer_cm);
1218     report_object_count(mark_finished);
1219   }
1220 
1221   // Statistics
1222   double now = os::elapsedTime();
1223   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1224   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1225   _remark_times.add((now - start) * 1000.0);
1226 
1227   g1p->record_concurrent_mark_remark_end();
1228 }
1229 
1230 class G1ReclaimEmptyRegionsTask : public AbstractGangTask {
1231   // Per-region work during the Cleanup pause.
1232   class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure {
1233     G1CollectedHeap* _g1h;
1234     size_t _freed_bytes;
1235     FreeRegionList* _local_cleanup_list;
1236     uint _old_regions_removed;
1237     uint _humongous_regions_removed;
1238     HRRSCleanupTask* _hrrs_cleanup_task;
1239 
1240   public:
1241     G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h,
1242                                  FreeRegionList* local_cleanup_list,
1243                                  HRRSCleanupTask* hrrs_cleanup_task) :
1244       _g1h(g1h),
1245       _freed_bytes(0),
1246       _local_cleanup_list(local_cleanup_list),
1247       _old_regions_removed(0),
1248       _humongous_regions_removed(0),
1249       _hrrs_cleanup_task(hrrs_cleanup_task) { }
1250 
1251     size_t freed_bytes() { return _freed_bytes; }
1252     const uint old_regions_removed() { return _old_regions_removed; }
1253     const uint humongous_regions_removed() { return _humongous_regions_removed; }
1254 
1255     bool do_heap_region(HeapRegion *hr) {
1256       if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
1257         _freed_bytes += hr->used();
1258         hr->set_containing_set(NULL);
1259         if (hr->is_humongous()) {
1260           _humongous_regions_removed++;
1261           _g1h->free_humongous_region(hr, _local_cleanup_list);
1262         } else {
1263           _old_regions_removed++;
1264           _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
1265         }
1266         hr->clear_cardtable();
1267         _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
1268         log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
1269       } else {
1270         hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1271       }
1272 
1273       return false;
1274     }
1275   };
1276 
1277   G1CollectedHeap* _g1h;
1278   FreeRegionList* _cleanup_list;
1279   HeapRegionClaimer _hrclaimer;
1280 
1281 public:
1282   G1ReclaimEmptyRegionsTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1283     AbstractGangTask("G1 Cleanup"),
1284     _g1h(g1h),
1285     _cleanup_list(cleanup_list),
1286     _hrclaimer(n_workers) {
1287 
1288     HeapRegionRemSet::reset_for_cleanup_tasks();
1289   }
1290 
1291   void work(uint worker_id) {
1292     FreeRegionList local_cleanup_list("Local Cleanup List");
1293     HRRSCleanupTask hrrs_cleanup_task;
1294     G1ReclaimEmptyRegionsClosure cl(_g1h,
1295                                     &local_cleanup_list,
1296                                     &hrrs_cleanup_task);
1297     _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
1298     assert(cl.is_complete(), "Shouldn't have aborted!");
1299 
1300     // Now update the old/humongous region sets
1301     _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
1302     {
1303       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1304       _g1h->decrement_summary_bytes(cl.freed_bytes());
1305 
1306       _cleanup_list->add_ordered(&local_cleanup_list);
1307       assert(local_cleanup_list.is_empty(), "post-condition");
1308 
1309       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1310     }
1311   }
1312 };
1313 
1314 void G1ConcurrentMark::reclaim_empty_regions() {
1315   WorkGang* workers = _g1h->workers();
1316   FreeRegionList empty_regions_list("Empty Regions After Mark List");
1317 
1318   G1ReclaimEmptyRegionsTask cl(_g1h, &empty_regions_list, workers->active_workers());
1319   workers->run_task(&cl);
1320 
1321   if (!empty_regions_list.is_empty()) {
1322     log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length());
1323     // Now print the empty regions list.
1324     G1HRPrinter* hrp = _g1h->hr_printer();
1325     if (hrp->is_active()) {
1326       FreeRegionListIterator iter(&empty_regions_list);
1327       while (iter.more_available()) {
1328         HeapRegion* hr = iter.get_next();
1329         hrp->cleanup(hr);
1330       }
1331     }
1332     // And actually make them available.
1333     _g1h->prepend_to_freelist(&empty_regions_list);
1334   }
1335 }
1336 
1337 void G1ConcurrentMark::compute_new_sizes() {
1338   MetaspaceGC::compute_new_size();
1339 
1340   // Cleanup will have freed any regions completely full of garbage.
1341   // Update the soft reference policy with the new heap occupancy.
1342   Universe::update_heap_info_at_gc();
1343 
1344   // We reclaimed old regions so we should calculate the sizes to make
1345   // sure we update the old gen/space data.
1346   _g1h->g1mm()->update_sizes();
1347 }
1348 
1349 void G1ConcurrentMark::cleanup() {
1350   assert_at_safepoint_on_vm_thread();
1351 
1352   // If a full collection has happened, we shouldn't do this.
1353   if (has_aborted()) {
1354     return;
1355   }
1356 
1357   G1Policy* g1p = _g1h->g1_policy();
1358   g1p->record_concurrent_mark_cleanup_start();
1359 
1360   double start = os::elapsedTime();
1361 
1362   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1363 
1364   {
1365     GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1366     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1367     _g1h->heap_region_iterate(&cl);
1368   }
1369 
1370   if (log_is_enabled(Trace, gc, liveness)) {
1371     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1372     _g1h->heap_region_iterate(&cl);
1373   }
1374 
1375   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1376 
1377   // We need to make this be a "collection" so any collection pause that
1378   // races with it goes around and waits for Cleanup to finish.
1379   _g1h->increment_total_collections();
1380 
1381   // Local statistics
1382   double recent_cleanup_time = (os::elapsedTime() - start);
1383   _total_cleanup_time += recent_cleanup_time;
1384   _cleanup_times.add(recent_cleanup_time);
1385 
1386   {
1387     GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1388     _g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1389   }
1390 }
1391 
1392 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1393 // Uses the G1CMTask associated with a worker thread (for serial reference
1394 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1395 // trace referent objects.
1396 //
1397 // Using the G1CMTask and embedded local queues avoids having the worker
1398 // threads operating on the global mark stack. This reduces the risk
1399 // of overflowing the stack - which we would rather avoid at this late
1400 // state. Also using the tasks' local queues removes the potential
1401 // of the workers interfering with each other that could occur if
1402 // operating on the global stack.
1403 
1404 class G1CMKeepAliveAndDrainClosure : public OopClosure {
1405   G1ConcurrentMark* _cm;
1406   G1CMTask*         _task;
1407   uint              _ref_counter_limit;
1408   uint              _ref_counter;
1409   bool              _is_serial;
1410 public:
1411   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1412     _cm(cm), _task(task), _is_serial(is_serial),
1413     _ref_counter_limit(G1RefProcDrainInterval) {
1414     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1415     _ref_counter = _ref_counter_limit;
1416   }
1417 
1418   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1419   virtual void do_oop(      oop* p) { do_oop_work(p); }
1420 
1421   template <class T> void do_oop_work(T* p) {
1422     if (_cm->has_overflown()) {
1423       return;
1424     }
1425     if (!_task->deal_with_reference(p)) {
1426       // We did not add anything to the mark bitmap (or mark stack), so there is
1427       // no point trying to drain it.
1428       return;
1429     }
1430     _ref_counter--;
1431 
1432     if (_ref_counter == 0) {
1433       // We have dealt with _ref_counter_limit references, pushing them
1434       // and objects reachable from them on to the local stack (and
1435       // possibly the global stack). Call G1CMTask::do_marking_step() to
1436       // process these entries.
1437       //
1438       // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1439       // there's nothing more to do (i.e. we're done with the entries that
1440       // were pushed as a result of the G1CMTask::deal_with_reference() calls
1441       // above) or we overflow.
1442       //
1443       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1444       // flag while there may still be some work to do. (See the comment at
1445       // the beginning of G1CMTask::do_marking_step() for those conditions -
1446       // one of which is reaching the specified time target.) It is only
1447       // when G1CMTask::do_marking_step() returns without setting the
1448       // has_aborted() flag that the marking step has completed.
1449       do {
1450         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1451         _task->do_marking_step(mark_step_duration_ms,
1452                                false      /* do_termination */,
1453                                _is_serial);
1454       } while (_task->has_aborted() && !_cm->has_overflown());
1455       _ref_counter = _ref_counter_limit;
1456     }
1457   }
1458 };
1459 
1460 // 'Drain' oop closure used by both serial and parallel reference processing.
1461 // Uses the G1CMTask associated with a given worker thread (for serial
1462 // reference processing the G1CMtask for worker 0 is used). Calls the
1463 // do_marking_step routine, with an unbelievably large timeout value,
1464 // to drain the marking data structures of the remaining entries
1465 // added by the 'keep alive' oop closure above.
1466 
1467 class G1CMDrainMarkingStackClosure : public VoidClosure {
1468   G1ConcurrentMark* _cm;
1469   G1CMTask*         _task;
1470   bool              _is_serial;
1471  public:
1472   G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1473     _cm(cm), _task(task), _is_serial(is_serial) {
1474     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1475   }
1476 
1477   void do_void() {
1478     do {
1479       // We call G1CMTask::do_marking_step() to completely drain the local
1480       // and global marking stacks of entries pushed by the 'keep alive'
1481       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1482       //
1483       // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1484       // if there's nothing more to do (i.e. we've completely drained the
1485       // entries that were pushed as a a result of applying the 'keep alive'
1486       // closure to the entries on the discovered ref lists) or we overflow
1487       // the global marking stack.
1488       //
1489       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1490       // flag while there may still be some work to do. (See the comment at
1491       // the beginning of G1CMTask::do_marking_step() for those conditions -
1492       // one of which is reaching the specified time target.) It is only
1493       // when G1CMTask::do_marking_step() returns without setting the
1494       // has_aborted() flag that the marking step has completed.
1495 
1496       _task->do_marking_step(1000000000.0 /* something very large */,
1497                              true         /* do_termination */,
1498                              _is_serial);
1499     } while (_task->has_aborted() && !_cm->has_overflown());
1500   }
1501 };
1502 
1503 // Implementation of AbstractRefProcTaskExecutor for parallel
1504 // reference processing at the end of G1 concurrent marking
1505 
1506 class G1CMRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1507 private:
1508   G1CollectedHeap*  _g1h;
1509   G1ConcurrentMark* _cm;
1510   WorkGang*         _workers;
1511   uint              _active_workers;
1512 
1513 public:
1514   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1515                           G1ConcurrentMark* cm,
1516                           WorkGang* workers,
1517                           uint n_workers) :
1518     _g1h(g1h), _cm(cm),
1519     _workers(workers), _active_workers(n_workers) { }
1520 
1521   // Executes the given task using concurrent marking worker threads.
1522   virtual void execute(ProcessTask& task);
1523   virtual void execute(EnqueueTask& task);
1524 };
1525 
1526 class G1CMRefProcTaskProxy : public AbstractGangTask {
1527   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1528   ProcessTask&      _proc_task;
1529   G1CollectedHeap*  _g1h;
1530   G1ConcurrentMark* _cm;
1531 
1532 public:
1533   G1CMRefProcTaskProxy(ProcessTask& proc_task,
1534                        G1CollectedHeap* g1h,
1535                        G1ConcurrentMark* cm) :
1536     AbstractGangTask("Process reference objects in parallel"),
1537     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1538     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1539     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1540   }
1541 
1542   virtual void work(uint worker_id) {
1543     ResourceMark rm;
1544     HandleMark hm;
1545     G1CMTask* task = _cm->task(worker_id);
1546     G1CMIsAliveClosure g1_is_alive(_g1h);
1547     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1548     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1549 
1550     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1551   }
1552 };
1553 
1554 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
1555   assert(_workers != NULL, "Need parallel worker threads.");
1556   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1557 
1558   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1559 
1560   // We need to reset the concurrency level before each
1561   // proxy task execution, so that the termination protocol
1562   // and overflow handling in G1CMTask::do_marking_step() knows
1563   // how many workers to wait for.
1564   _cm->set_concurrency(_active_workers);
1565   _workers->run_task(&proc_task_proxy);
1566 }
1567 
1568 class G1CMRefEnqueueTaskProxy : public AbstractGangTask {
1569   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
1570   EnqueueTask& _enq_task;
1571 
1572 public:
1573   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
1574     AbstractGangTask("Enqueue reference objects in parallel"),
1575     _enq_task(enq_task) { }
1576 
1577   virtual void work(uint worker_id) {
1578     _enq_task.work(worker_id);
1579   }
1580 };
1581 
1582 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
1583   assert(_workers != NULL, "Need parallel worker threads.");
1584   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1585 
1586   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
1587 
1588   // Not strictly necessary but...
1589   //
1590   // We need to reset the concurrency level before each
1591   // proxy task execution, so that the termination protocol
1592   // and overflow handling in G1CMTask::do_marking_step() knows
1593   // how many workers to wait for.
1594   _cm->set_concurrency(_active_workers);
1595   _workers->run_task(&enq_task_proxy);
1596 }
1597 
1598 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
1599   ResourceMark rm;
1600   HandleMark   hm;
1601 
1602   // Is alive closure.
1603   G1CMIsAliveClosure g1_is_alive(_g1h);
1604 
1605   // Inner scope to exclude the cleaning of the string and symbol
1606   // tables from the displayed time.
1607   {
1608     GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm);
1609 
1610     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1611 
1612     // See the comment in G1CollectedHeap::ref_processing_init()
1613     // about how reference processing currently works in G1.
1614 
1615     // Set the soft reference policy
1616     rp->setup_policy(clear_all_soft_refs);
1617     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1618 
1619     // Instances of the 'Keep Alive' and 'Complete GC' closures used
1620     // in serial reference processing. Note these closures are also
1621     // used for serially processing (by the the current thread) the
1622     // JNI references during parallel reference processing.
1623     //
1624     // These closures do not need to synchronize with the worker
1625     // threads involved in parallel reference processing as these
1626     // instances are executed serially by the current thread (e.g.
1627     // reference processing is not multi-threaded and is thus
1628     // performed by the current thread instead of a gang worker).
1629     //
1630     // The gang tasks involved in parallel reference processing create
1631     // their own instances of these closures, which do their own
1632     // synchronization among themselves.
1633     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1634     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1635 
1636     // We need at least one active thread. If reference processing
1637     // is not multi-threaded we use the current (VMThread) thread,
1638     // otherwise we use the work gang from the G1CollectedHeap and
1639     // we utilize all the worker threads we can.
1640     bool processing_is_mt = rp->processing_is_mt();
1641     uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
1642     active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U);
1643 
1644     // Parallel processing task executor.
1645     G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
1646                                               _g1h->workers(), active_workers);
1647     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1648 
1649     // Set the concurrency level. The phase was already set prior to
1650     // executing the remark task.
1651     set_concurrency(active_workers);
1652 
1653     // Set the degree of MT processing here.  If the discovery was done MT,
1654     // the number of threads involved during discovery could differ from
1655     // the number of active workers.  This is OK as long as the discovered
1656     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1657     rp->set_active_mt_degree(active_workers);
1658 
1659     ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_queues());
1660 
1661     // Process the weak references.
1662     const ReferenceProcessorStats& stats =
1663         rp->process_discovered_references(&g1_is_alive,
1664                                           &g1_keep_alive,
1665                                           &g1_drain_mark_stack,
1666                                           executor,
1667                                           &pt);
1668     _gc_tracer_cm->report_gc_reference_stats(stats);
1669     pt.print_all_references();
1670 
1671     // The do_oop work routines of the keep_alive and drain_marking_stack
1672     // oop closures will set the has_overflown flag if we overflow the
1673     // global marking stack.
1674 
1675     assert(has_overflown() || _global_mark_stack.is_empty(),
1676            "Mark stack should be empty (unless it has overflown)");
1677 
1678     assert(rp->num_queues() == active_workers, "why not");
1679 
1680     rp->enqueue_discovered_references(executor, &pt);
1681 
1682     rp->verify_no_references_recorded();
1683 
1684     pt.print_enqueue_phase();
1685 
1686     assert(!rp->discovery_enabled(), "Post condition");
1687   }
1688 
1689   assert(has_overflown() || _global_mark_stack.is_empty(),
1690          "Mark stack should be empty (unless it has overflown)");
1691 
1692   {
1693     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1694     WeakProcessor::weak_oops_do(&g1_is_alive, &do_nothing_cl);
1695   }
1696 
1697   if (has_overflown()) {
1698     // We can not trust g1_is_alive if the marking stack overflowed
1699     return;
1700   }
1701 
1702   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1703 
1704   // Unload Klasses, String, Symbols, Code Cache, etc.
1705   if (ClassUnloadingWithConcurrentMark) {
1706     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1707     bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
1708     _g1h->complete_cleaning(&g1_is_alive, purged_classes);
1709   } else {
1710     GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1711     // No need to clean string table and symbol table as they are treated as strong roots when
1712     // class unloading is disabled.
1713     _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
1714   }
1715 }
1716 
1717 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
1718 // the prev bitmap determining liveness.
1719 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
1720   G1CollectedHeap* _g1h;
1721 public:
1722   G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
1723 
1724   bool do_object_b(oop obj) {
1725     HeapWord* addr = (HeapWord*)obj;
1726     return addr != NULL &&
1727            (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj));
1728   }
1729 };
1730 
1731 void G1ConcurrentMark::report_object_count(bool mark_completed) {
1732   // Depending on the completion of the marking liveness needs to be determined
1733   // using either the next or prev bitmap.
1734   if (mark_completed) {
1735     G1ObjectCountIsAliveClosure is_alive(_g1h);
1736     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1737   } else {
1738     G1CMIsAliveClosure is_alive(_g1h);
1739     _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1740   }
1741 }
1742 
1743 
1744 void G1ConcurrentMark::swap_mark_bitmaps() {
1745   G1CMBitMap* temp = _prev_mark_bitmap;
1746   _prev_mark_bitmap = _next_mark_bitmap;
1747   _next_mark_bitmap = temp;
1748   _g1h->collector_state()->set_clearing_next_bitmap(true);
1749 }
1750 
1751 // Closure for marking entries in SATB buffers.
1752 class G1CMSATBBufferClosure : public SATBBufferClosure {
1753 private:
1754   G1CMTask* _task;
1755   G1CollectedHeap* _g1h;
1756 
1757   // This is very similar to G1CMTask::deal_with_reference, but with
1758   // more relaxed requirements for the argument, so this must be more
1759   // circumspect about treating the argument as an object.
1760   void do_entry(void* entry) const {
1761     _task->increment_refs_reached();
1762     oop const obj = static_cast<oop>(entry);
1763     _task->make_reference_grey(obj);
1764   }
1765 
1766 public:
1767   G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1768     : _task(task), _g1h(g1h) { }
1769 
1770   virtual void do_buffer(void** buffer, size_t size) {
1771     for (size_t i = 0; i < size; ++i) {
1772       do_entry(buffer[i]);
1773     }
1774   }
1775 };
1776 
1777 class G1RemarkThreadsClosure : public ThreadClosure {
1778   G1CMSATBBufferClosure _cm_satb_cl;
1779   G1CMOopClosure _cm_cl;
1780   MarkingCodeBlobClosure _code_cl;
1781   int _thread_parity;
1782 
1783  public:
1784   G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1785     _cm_satb_cl(task, g1h),
1786     _cm_cl(g1h, task),
1787     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1788     _thread_parity(Threads::thread_claim_parity()) {}
1789 
1790   void do_thread(Thread* thread) {
1791     if (thread->is_Java_thread()) {
1792       if (thread->claim_oops_do(true, _thread_parity)) {
1793         JavaThread* jt = (JavaThread*)thread;
1794 
1795         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1796         // however the liveness of oops reachable from nmethods have very complex lifecycles:
1797         // * Alive if on the stack of an executing method
1798         // * Weakly reachable otherwise
1799         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1800         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1801         jt->nmethods_do(&_code_cl);
1802 
1803         G1ThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(&_cm_satb_cl);
1804       }
1805     } else if (thread->is_VM_thread()) {
1806       if (thread->claim_oops_do(true, _thread_parity)) {
1807         G1BarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
1808       }
1809     }
1810   }
1811 };
1812 
1813 class G1CMRemarkTask : public AbstractGangTask {
1814   G1ConcurrentMark* _cm;
1815 public:
1816   void work(uint worker_id) {
1817     G1CMTask* task = _cm->task(worker_id);
1818     task->record_start_time();
1819     {
1820       ResourceMark rm;
1821       HandleMark hm;
1822 
1823       G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1824       Threads::threads_do(&threads_f);
1825     }
1826 
1827     do {
1828       task->do_marking_step(1000000000.0 /* something very large */,
1829                             true         /* do_termination       */,
1830                             false        /* is_serial            */);
1831     } while (task->has_aborted() && !_cm->has_overflown());
1832     // If we overflow, then we do not want to restart. We instead
1833     // want to abort remark and do concurrent marking again.
1834     task->record_end_time();
1835   }
1836 
1837   G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1838     AbstractGangTask("Par Remark"), _cm(cm) {
1839     _cm->terminator()->reset_for_reuse(active_workers);
1840   }
1841 };
1842 
1843 void G1ConcurrentMark::finalize_marking() {
1844   ResourceMark rm;
1845   HandleMark   hm;
1846 
1847   _g1h->ensure_parsability(false);
1848 
1849   // this is remark, so we'll use up all active threads
1850   uint active_workers = _g1h->workers()->active_workers();
1851   set_concurrency_and_phase(active_workers, false /* concurrent */);
1852   // Leave _parallel_marking_threads at it's
1853   // value originally calculated in the G1ConcurrentMark
1854   // constructor and pass values of the active workers
1855   // through the gang in the task.
1856 
1857   {
1858     StrongRootsScope srs(active_workers);
1859 
1860     G1CMRemarkTask remarkTask(this, active_workers);
1861     // We will start all available threads, even if we decide that the
1862     // active_workers will be fewer. The extra ones will just bail out
1863     // immediately.
1864     _g1h->workers()->run_task(&remarkTask);
1865   }
1866 
1867   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1868   guarantee(has_overflown() ||
1869             satb_mq_set.completed_buffers_num() == 0,
1870             "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1871             BOOL_TO_STR(has_overflown()),
1872             satb_mq_set.completed_buffers_num());
1873 
1874   print_stats();
1875 }
1876 
1877 void G1ConcurrentMark::flush_all_task_caches() {
1878   size_t hits = 0;
1879   size_t misses = 0;
1880   for (uint i = 0; i < _max_num_tasks; i++) {
1881     Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
1882     hits += stats.first;
1883     misses += stats.second;
1884   }
1885   size_t sum = hits + misses;
1886   log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
1887                        hits, misses, percent_of(hits, sum));
1888 }
1889 
1890 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) {
1891   _prev_mark_bitmap->clear_range(mr);
1892 }
1893 
1894 HeapRegion*
1895 G1ConcurrentMark::claim_region(uint worker_id) {
1896   // "checkpoint" the finger
1897   HeapWord* finger = _finger;
1898 
1899   while (finger < _heap.end()) {
1900     assert(_g1h->is_in_g1_reserved(finger), "invariant");
1901 
1902     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1903     // Make sure that the reads below do not float before loading curr_region.
1904     OrderAccess::loadload();
1905     // Above heap_region_containing may return NULL as we always scan claim
1906     // until the end of the heap. In this case, just jump to the next region.
1907     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1908 
1909     // Is the gap between reading the finger and doing the CAS too long?
1910     HeapWord* res = Atomic::cmpxchg(end, &_finger, finger);
1911     if (res == finger && curr_region != NULL) {
1912       // we succeeded
1913       HeapWord*   bottom        = curr_region->bottom();
1914       HeapWord*   limit         = curr_region->next_top_at_mark_start();
1915 
1916       // notice that _finger == end cannot be guaranteed here since,
1917       // someone else might have moved the finger even further
1918       assert(_finger >= end, "the finger should have moved forward");
1919 
1920       if (limit > bottom) {
1921         return curr_region;
1922       } else {
1923         assert(limit == bottom,
1924                "the region limit should be at bottom");
1925         // we return NULL and the caller should try calling
1926         // claim_region() again.
1927         return NULL;
1928       }
1929     } else {
1930       assert(_finger > finger, "the finger should have moved forward");
1931       // read it again
1932       finger = _finger;
1933     }
1934   }
1935 
1936   return NULL;
1937 }
1938 
1939 #ifndef PRODUCT
1940 class VerifyNoCSetOops {
1941   G1CollectedHeap* _g1h;
1942   const char* _phase;
1943   int _info;
1944 
1945 public:
1946   VerifyNoCSetOops(const char* phase, int info = -1) :
1947     _g1h(G1CollectedHeap::heap()),
1948     _phase(phase),
1949     _info(info)
1950   { }
1951 
1952   void operator()(G1TaskQueueEntry task_entry) const {
1953     if (task_entry.is_array_slice()) {
1954       guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
1955       return;
1956     }
1957     guarantee(oopDesc::is_oop(task_entry.obj()),
1958               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1959               p2i(task_entry.obj()), _phase, _info);
1960     guarantee(!_g1h->is_in_cset(task_entry.obj()),
1961               "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
1962               p2i(task_entry.obj()), _phase, _info);
1963   }
1964 };
1965 
1966 void G1ConcurrentMark::verify_no_cset_oops() {
1967   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1968   if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) {
1969     return;
1970   }
1971 
1972   // Verify entries on the global mark stack
1973   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
1974 
1975   // Verify entries on the task queues
1976   for (uint i = 0; i < _max_num_tasks; ++i) {
1977     G1CMTaskQueue* queue = _task_queues->queue(i);
1978     queue->iterate(VerifyNoCSetOops("Queue", i));
1979   }
1980 
1981   // Verify the global finger
1982   HeapWord* global_finger = finger();
1983   if (global_finger != NULL && global_finger < _heap.end()) {
1984     // Since we always iterate over all regions, we might get a NULL HeapRegion
1985     // here.
1986     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
1987     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
1988               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
1989               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
1990   }
1991 
1992   // Verify the task fingers
1993   assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1994   for (uint i = 0; i < _num_concurrent_workers; ++i) {
1995     G1CMTask* task = _tasks[i];
1996     HeapWord* task_finger = task->finger();
1997     if (task_finger != NULL && task_finger < _heap.end()) {
1998       // See above note on the global finger verification.
1999       HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
2000       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2001                 !task_hr->in_collection_set(),
2002                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2003                 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
2004     }
2005   }
2006 }
2007 #endif // PRODUCT
2008 
2009 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
2010   _g1h->g1_rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
2011 }
2012 
2013 void G1ConcurrentMark::print_stats() {
2014   if (!log_is_enabled(Debug, gc, stats)) {
2015     return;
2016   }
2017   log_debug(gc, stats)("---------------------------------------------------------------------");
2018   for (size_t i = 0; i < _num_active_tasks; ++i) {
2019     _tasks[i]->print_stats();
2020     log_debug(gc, stats)("---------------------------------------------------------------------");
2021   }
2022 }
2023 
2024 void G1ConcurrentMark::concurrent_cycle_abort() {
2025   if (!cm_thread()->during_cycle() || _has_aborted) {
2026     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2027     return;
2028   }
2029 
2030   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2031   // concurrent bitmap clearing.
2032   {
2033     GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
2034     clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
2035   }
2036   // Note we cannot clear the previous marking bitmap here
2037   // since VerifyDuringGC verifies the objects marked during
2038   // a full GC against the previous bitmap.
2039 
2040   // Empty mark stack
2041   reset_marking_for_restart();
2042   for (uint i = 0; i < _max_num_tasks; ++i) {
2043     _tasks[i]->clear_region_fields();
2044   }
2045   _first_overflow_barrier_sync.abort();
2046   _second_overflow_barrier_sync.abort();
2047   _has_aborted = true;
2048 
2049   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2050   satb_mq_set.abandon_partial_marking();
2051   // This can be called either during or outside marking, we'll read
2052   // the expected_active value from the SATB queue set.
2053   satb_mq_set.set_active_all_threads(
2054                                  false, /* new active value */
2055                                  satb_mq_set.is_active() /* expected_active */);
2056 }
2057 
2058 static void print_ms_time_info(const char* prefix, const char* name,
2059                                NumberSeq& ns) {
2060   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2061                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2062   if (ns.num() > 0) {
2063     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2064                            prefix, ns.sd(), ns.maximum());
2065   }
2066 }
2067 
2068 void G1ConcurrentMark::print_summary_info() {
2069   Log(gc, marking) log;
2070   if (!log.is_trace()) {
2071     return;
2072   }
2073 
2074   log.trace(" Concurrent marking:");
2075   print_ms_time_info("  ", "init marks", _init_times);
2076   print_ms_time_info("  ", "remarks", _remark_times);
2077   {
2078     print_ms_time_info("     ", "final marks", _remark_mark_times);
2079     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2080 
2081   }
2082   print_ms_time_info("  ", "cleanups", _cleanup_times);
2083   log.trace("    Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2084             _total_cleanup_time, (_cleanup_times.num() > 0 ? _total_cleanup_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2085   log.trace("  Total stop_world time = %8.2f s.",
2086             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2087   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
2088             cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum());
2089 }
2090 
2091 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2092   _concurrent_workers->print_worker_threads_on(st);
2093 }
2094 
2095 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2096   _concurrent_workers->threads_do(tc);
2097 }
2098 
2099 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2100   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2101                p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap));
2102   _prev_mark_bitmap->print_on_error(st, " Prev Bits: ");
2103   _next_mark_bitmap->print_on_error(st, " Next Bits: ");
2104 }
2105 
2106 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2107   ReferenceProcessor* result = g1h->ref_processor_cm();
2108   assert(result != NULL, "CM reference processor should not be NULL");
2109   return result;
2110 }
2111 
2112 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2113                                G1CMTask* task)
2114   : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
2115     _g1h(g1h), _task(task)
2116 { }
2117 
2118 void G1CMTask::setup_for_region(HeapRegion* hr) {
2119   assert(hr != NULL,
2120         "claim_region() should have filtered out NULL regions");
2121   _curr_region  = hr;
2122   _finger       = hr->bottom();
2123   update_region_limit();
2124 }
2125 
2126 void G1CMTask::update_region_limit() {
2127   HeapRegion* hr            = _curr_region;
2128   HeapWord* bottom          = hr->bottom();
2129   HeapWord* limit           = hr->next_top_at_mark_start();
2130 
2131   if (limit == bottom) {
2132     // The region was collected underneath our feet.
2133     // We set the finger to bottom to ensure that the bitmap
2134     // iteration that will follow this will not do anything.
2135     // (this is not a condition that holds when we set the region up,
2136     // as the region is not supposed to be empty in the first place)
2137     _finger = bottom;
2138   } else if (limit >= _region_limit) {
2139     assert(limit >= _finger, "peace of mind");
2140   } else {
2141     assert(limit < _region_limit, "only way to get here");
2142     // This can happen under some pretty unusual circumstances.  An
2143     // evacuation pause empties the region underneath our feet (NTAMS
2144     // at bottom). We then do some allocation in the region (NTAMS
2145     // stays at bottom), followed by the region being used as a GC
2146     // alloc region (NTAMS will move to top() and the objects
2147     // originally below it will be grayed). All objects now marked in
2148     // the region are explicitly grayed, if below the global finger,
2149     // and we do not need in fact to scan anything else. So, we simply
2150     // set _finger to be limit to ensure that the bitmap iteration
2151     // doesn't do anything.
2152     _finger = limit;
2153   }
2154 
2155   _region_limit = limit;
2156 }
2157 
2158 void G1CMTask::giveup_current_region() {
2159   assert(_curr_region != NULL, "invariant");
2160   clear_region_fields();
2161 }
2162 
2163 void G1CMTask::clear_region_fields() {
2164   // Values for these three fields that indicate that we're not
2165   // holding on to a region.
2166   _curr_region   = NULL;
2167   _finger        = NULL;
2168   _region_limit  = NULL;
2169 }
2170 
2171 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2172   if (cm_oop_closure == NULL) {
2173     assert(_cm_oop_closure != NULL, "invariant");
2174   } else {
2175     assert(_cm_oop_closure == NULL, "invariant");
2176   }
2177   _cm_oop_closure = cm_oop_closure;
2178 }
2179 
2180 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
2181   guarantee(next_mark_bitmap != NULL, "invariant");
2182   _next_mark_bitmap              = next_mark_bitmap;
2183   clear_region_fields();
2184 
2185   _calls                         = 0;
2186   _elapsed_time_ms               = 0.0;
2187   _termination_time_ms           = 0.0;
2188   _termination_start_time_ms     = 0.0;
2189 
2190   _mark_stats_cache.reset();
2191 }
2192 
2193 bool G1CMTask::should_exit_termination() {
2194   regular_clock_call();
2195   // This is called when we are in the termination protocol. We should
2196   // quit if, for some reason, this task wants to abort or the global
2197   // stack is not empty (this means that we can get work from it).
2198   return !_cm->mark_stack_empty() || has_aborted();
2199 }
2200 
2201 void G1CMTask::reached_limit() {
2202   assert(_words_scanned >= _words_scanned_limit ||
2203          _refs_reached >= _refs_reached_limit ,
2204          "shouldn't have been called otherwise");
2205   regular_clock_call();
2206 }
2207 
2208 void G1CMTask::regular_clock_call() {
2209   if (has_aborted()) {
2210     return;
2211   }
2212 
2213   // First, we need to recalculate the words scanned and refs reached
2214   // limits for the next clock call.
2215   recalculate_limits();
2216 
2217   // During the regular clock call we do the following
2218 
2219   // (1) If an overflow has been flagged, then we abort.
2220   if (_cm->has_overflown()) {
2221     set_has_aborted();
2222     return;
2223   }
2224 
2225   // If we are not concurrent (i.e. we're doing remark) we don't need
2226   // to check anything else. The other steps are only needed during
2227   // the concurrent marking phase.
2228   if (!_cm->concurrent()) {
2229     return;
2230   }
2231 
2232   // (2) If marking has been aborted for Full GC, then we also abort.
2233   if (_cm->has_aborted()) {
2234     set_has_aborted();
2235     return;
2236   }
2237 
2238   double curr_time_ms = os::elapsedVTime() * 1000.0;
2239 
2240   // (4) We check whether we should yield. If we have to, then we abort.
2241   if (SuspendibleThreadSet::should_yield()) {
2242     // We should yield. To do this we abort the task. The caller is
2243     // responsible for yielding.
2244     set_has_aborted();
2245     return;
2246   }
2247 
2248   // (5) We check whether we've reached our time quota. If we have,
2249   // then we abort.
2250   double elapsed_time_ms = curr_time_ms - _start_time_ms;
2251   if (elapsed_time_ms > _time_target_ms) {
2252     set_has_aborted();
2253     _has_timed_out = true;
2254     return;
2255   }
2256 
2257   // (6) Finally, we check whether there are enough completed STAB
2258   // buffers available for processing. If there are, we abort.
2259   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2260   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2261     // we do need to process SATB buffers, we'll abort and restart
2262     // the marking task to do so
2263     set_has_aborted();
2264     return;
2265   }
2266 }
2267 
2268 void G1CMTask::recalculate_limits() {
2269   _real_words_scanned_limit = _words_scanned + words_scanned_period;
2270   _words_scanned_limit      = _real_words_scanned_limit;
2271 
2272   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
2273   _refs_reached_limit       = _real_refs_reached_limit;
2274 }
2275 
2276 void G1CMTask::decrease_limits() {
2277   // This is called when we believe that we're going to do an infrequent
2278   // operation which will increase the per byte scanned cost (i.e. move
2279   // entries to/from the global stack). It basically tries to decrease the
2280   // scanning limit so that the clock is called earlier.
2281 
2282   _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4;
2283   _refs_reached_limit  = _real_refs_reached_limit - 3 * refs_reached_period / 4;
2284 }
2285 
2286 void G1CMTask::move_entries_to_global_stack() {
2287   // Local array where we'll store the entries that will be popped
2288   // from the local queue.
2289   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2290 
2291   size_t n = 0;
2292   G1TaskQueueEntry task_entry;
2293   while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) {
2294     buffer[n] = task_entry;
2295     ++n;
2296   }
2297   if (n < G1CMMarkStack::EntriesPerChunk) {
2298     buffer[n] = G1TaskQueueEntry();
2299   }
2300 
2301   if (n > 0) {
2302     if (!_cm->mark_stack_push(buffer)) {
2303       set_has_aborted();
2304     }
2305   }
2306 
2307   // This operation was quite expensive, so decrease the limits.
2308   decrease_limits();
2309 }
2310 
2311 bool G1CMTask::get_entries_from_global_stack() {
2312   // Local array where we'll store the entries that will be popped
2313   // from the global stack.
2314   G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2315 
2316   if (!_cm->mark_stack_pop(buffer)) {
2317     return false;
2318   }
2319 
2320   // We did actually pop at least one entry.
2321   for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) {
2322     G1TaskQueueEntry task_entry = buffer[i];
2323     if (task_entry.is_null()) {
2324       break;
2325     }
2326     assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
2327     bool success = _task_queue->push(task_entry);
2328     // We only call this when the local queue is empty or under a
2329     // given target limit. So, we do not expect this push to fail.
2330     assert(success, "invariant");
2331   }
2332 
2333   // This operation was quite expensive, so decrease the limits
2334   decrease_limits();
2335   return true;
2336 }
2337 
2338 void G1CMTask::drain_local_queue(bool partially) {
2339   if (has_aborted()) {
2340     return;
2341   }
2342 
2343   // Decide what the target size is, depending whether we're going to
2344   // drain it partially (so that other tasks can steal if they run out
2345   // of things to do) or totally (at the very end).
2346   size_t target_size;
2347   if (partially) {
2348     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
2349   } else {
2350     target_size = 0;
2351   }
2352 
2353   if (_task_queue->size() > target_size) {
2354     G1TaskQueueEntry entry;
2355     bool ret = _task_queue->pop_local(entry);
2356     while (ret) {
2357       scan_task_entry(entry);
2358       if (_task_queue->size() <= target_size || has_aborted()) {
2359         ret = false;
2360       } else {
2361         ret = _task_queue->pop_local(entry);
2362       }
2363     }
2364   }
2365 }
2366 
2367 void G1CMTask::drain_global_stack(bool partially) {
2368   if (has_aborted()) {
2369     return;
2370   }
2371 
2372   // We have a policy to drain the local queue before we attempt to
2373   // drain the global stack.
2374   assert(partially || _task_queue->size() == 0, "invariant");
2375 
2376   // Decide what the target size is, depending whether we're going to
2377   // drain it partially (so that other tasks can steal if they run out
2378   // of things to do) or totally (at the very end).
2379   // Notice that when draining the global mark stack partially, due to the racyness
2380   // of the mark stack size update we might in fact drop below the target. But,
2381   // this is not a problem.
2382   // In case of total draining, we simply process until the global mark stack is
2383   // totally empty, disregarding the size counter.
2384   if (partially) {
2385     size_t const target_size = _cm->partial_mark_stack_size_target();
2386     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2387       if (get_entries_from_global_stack()) {
2388         drain_local_queue(partially);
2389       }
2390     }
2391   } else {
2392     while (!has_aborted() && get_entries_from_global_stack()) {
2393       drain_local_queue(partially);
2394     }
2395   }
2396 }
2397 
2398 // SATB Queue has several assumptions on whether to call the par or
2399 // non-par versions of the methods. this is why some of the code is
2400 // replicated. We should really get rid of the single-threaded version
2401 // of the code to simplify things.
2402 void G1CMTask::drain_satb_buffers() {
2403   if (has_aborted()) {
2404     return;
2405   }
2406 
2407   // We set this so that the regular clock knows that we're in the
2408   // middle of draining buffers and doesn't set the abort flag when it
2409   // notices that SATB buffers are available for draining. It'd be
2410   // very counter productive if it did that. :-)
2411   _draining_satb_buffers = true;
2412 
2413   G1CMSATBBufferClosure satb_cl(this, _g1h);
2414   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2415 
2416   // This keeps claiming and applying the closure to completed buffers
2417   // until we run out of buffers or we need to abort.
2418   while (!has_aborted() &&
2419          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2420     regular_clock_call();
2421   }
2422 
2423   _draining_satb_buffers = false;
2424 
2425   assert(has_aborted() ||
2426          _cm->concurrent() ||
2427          satb_mq_set.completed_buffers_num() == 0, "invariant");
2428 
2429   // again, this was a potentially expensive operation, decrease the
2430   // limits to get the regular clock call early
2431   decrease_limits();
2432 }
2433 
2434 void G1CMTask::clear_mark_stats_cache(uint region_idx) {
2435   _mark_stats_cache.reset(region_idx);
2436 }
2437 
2438 Pair<size_t, size_t> G1CMTask::flush_mark_stats_cache() {
2439   return _mark_stats_cache.evict_all();
2440 }
2441 
2442 void G1CMTask::print_stats() {
2443   log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", _worker_id, _calls);
2444   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2445                        _elapsed_time_ms, _termination_time_ms);
2446   log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms max = %1.2lfms, total = %1.2lfms",
2447                        _step_times_ms.num(),
2448                        _step_times_ms.avg(),
2449                        _step_times_ms.sd(),
2450                        _step_times_ms.maximum(),
2451                        _step_times_ms.sum());
2452   size_t const hits = _mark_stats_cache.hits();
2453   size_t const misses = _mark_stats_cache.misses();
2454   log_debug(gc, stats)("  Mark Stats Cache: hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %.3f",
2455                        hits, misses, percent_of(hits, hits + misses));
2456 }
2457 
2458 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry) {
2459   return _task_queues->steal(worker_id, hash_seed, task_entry);
2460 }
2461 
2462 /*****************************************************************************
2463 
2464     The do_marking_step(time_target_ms, ...) method is the building
2465     block of the parallel marking framework. It can be called in parallel
2466     with other invocations of do_marking_step() on different tasks
2467     (but only one per task, obviously) and concurrently with the
2468     mutator threads, or during remark, hence it eliminates the need
2469     for two versions of the code. When called during remark, it will
2470     pick up from where the task left off during the concurrent marking
2471     phase. Interestingly, tasks are also claimable during evacuation
2472     pauses too, since do_marking_step() ensures that it aborts before
2473     it needs to yield.
2474 
2475     The data structures that it uses to do marking work are the
2476     following:
2477 
2478       (1) Marking Bitmap. If there are gray objects that appear only
2479       on the bitmap (this happens either when dealing with an overflow
2480       or when the initial marking phase has simply marked the roots
2481       and didn't push them on the stack), then tasks claim heap
2482       regions whose bitmap they then scan to find gray objects. A
2483       global finger indicates where the end of the last claimed region
2484       is. A local finger indicates how far into the region a task has
2485       scanned. The two fingers are used to determine how to gray an
2486       object (i.e. whether simply marking it is OK, as it will be
2487       visited by a task in the future, or whether it needs to be also
2488       pushed on a stack).
2489 
2490       (2) Local Queue. The local queue of the task which is accessed
2491       reasonably efficiently by the task. Other tasks can steal from
2492       it when they run out of work. Throughout the marking phase, a
2493       task attempts to keep its local queue short but not totally
2494       empty, so that entries are available for stealing by other
2495       tasks. Only when there is no more work, a task will totally
2496       drain its local queue.
2497 
2498       (3) Global Mark Stack. This handles local queue overflow. During
2499       marking only sets of entries are moved between it and the local
2500       queues, as access to it requires a mutex and more fine-grain
2501       interaction with it which might cause contention. If it
2502       overflows, then the marking phase should restart and iterate
2503       over the bitmap to identify gray objects. Throughout the marking
2504       phase, tasks attempt to keep the global mark stack at a small
2505       length but not totally empty, so that entries are available for
2506       popping by other tasks. Only when there is no more work, tasks
2507       will totally drain the global mark stack.
2508 
2509       (4) SATB Buffer Queue. This is where completed SATB buffers are
2510       made available. Buffers are regularly removed from this queue
2511       and scanned for roots, so that the queue doesn't get too
2512       long. During remark, all completed buffers are processed, as
2513       well as the filled in parts of any uncompleted buffers.
2514 
2515     The do_marking_step() method tries to abort when the time target
2516     has been reached. There are a few other cases when the
2517     do_marking_step() method also aborts:
2518 
2519       (1) When the marking phase has been aborted (after a Full GC).
2520 
2521       (2) When a global overflow (on the global stack) has been
2522       triggered. Before the task aborts, it will actually sync up with
2523       the other tasks to ensure that all the marking data structures
2524       (local queues, stacks, fingers etc.)  are re-initialized so that
2525       when do_marking_step() completes, the marking phase can
2526       immediately restart.
2527 
2528       (3) When enough completed SATB buffers are available. The
2529       do_marking_step() method only tries to drain SATB buffers right
2530       at the beginning. So, if enough buffers are available, the
2531       marking step aborts and the SATB buffers are processed at
2532       the beginning of the next invocation.
2533 
2534       (4) To yield. when we have to yield then we abort and yield
2535       right at the end of do_marking_step(). This saves us from a lot
2536       of hassle as, by yielding we might allow a Full GC. If this
2537       happens then objects will be compacted underneath our feet, the
2538       heap might shrink, etc. We save checking for this by just
2539       aborting and doing the yield right at the end.
2540 
2541     From the above it follows that the do_marking_step() method should
2542     be called in a loop (or, otherwise, regularly) until it completes.
2543 
2544     If a marking step completes without its has_aborted() flag being
2545     true, it means it has completed the current marking phase (and
2546     also all other marking tasks have done so and have all synced up).
2547 
2548     A method called regular_clock_call() is invoked "regularly" (in
2549     sub ms intervals) throughout marking. It is this clock method that
2550     checks all the abort conditions which were mentioned above and
2551     decides when the task should abort. A work-based scheme is used to
2552     trigger this clock method: when the number of object words the
2553     marking phase has scanned or the number of references the marking
2554     phase has visited reach a given limit. Additional invocations to
2555     the method clock have been planted in a few other strategic places
2556     too. The initial reason for the clock method was to avoid calling
2557     vtime too regularly, as it is quite expensive. So, once it was in
2558     place, it was natural to piggy-back all the other conditions on it
2559     too and not constantly check them throughout the code.
2560 
2561     If do_termination is true then do_marking_step will enter its
2562     termination protocol.
2563 
2564     The value of is_serial must be true when do_marking_step is being
2565     called serially (i.e. by the VMThread) and do_marking_step should
2566     skip any synchronization in the termination and overflow code.
2567     Examples include the serial remark code and the serial reference
2568     processing closures.
2569 
2570     The value of is_serial must be false when do_marking_step is
2571     being called by any of the worker threads in a work gang.
2572     Examples include the concurrent marking code (CMMarkingTask),
2573     the MT remark code, and the MT reference processing closures.
2574 
2575  *****************************************************************************/
2576 
2577 void G1CMTask::do_marking_step(double time_target_ms,
2578                                bool do_termination,
2579                                bool is_serial) {
2580   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2581 
2582   _start_time_ms = os::elapsedVTime() * 1000.0;
2583 
2584   // If do_stealing is true then do_marking_step will attempt to
2585   // steal work from the other G1CMTasks. It only makes sense to
2586   // enable stealing when the termination protocol is enabled
2587   // and do_marking_step() is not being called serially.
2588   bool do_stealing = do_termination && !is_serial;
2589 
2590   double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
2591   _time_target_ms = time_target_ms - diff_prediction_ms;
2592 
2593   // set up the variables that are used in the work-based scheme to
2594   // call the regular clock method
2595   _words_scanned = 0;
2596   _refs_reached  = 0;
2597   recalculate_limits();
2598 
2599   // clear all flags
2600   clear_has_aborted();
2601   _has_timed_out = false;
2602   _draining_satb_buffers = false;
2603 
2604   ++_calls;
2605 
2606   // Set up the bitmap and oop closures. Anything that uses them is
2607   // eventually called from this method, so it is OK to allocate these
2608   // statically.
2609   G1CMBitMapClosure bitmap_closure(this, _cm);
2610   G1CMOopClosure cm_oop_closure(_g1h, this);
2611   set_cm_oop_closure(&cm_oop_closure);
2612 
2613   if (_cm->has_overflown()) {
2614     // This can happen if the mark stack overflows during a GC pause
2615     // and this task, after a yield point, restarts. We have to abort
2616     // as we need to get into the overflow protocol which happens
2617     // right at the end of this task.
2618     set_has_aborted();
2619   }
2620 
2621   // First drain any available SATB buffers. After this, we will not
2622   // look at SATB buffers before the next invocation of this method.
2623   // If enough completed SATB buffers are queued up, the regular clock
2624   // will abort this task so that it restarts.
2625   drain_satb_buffers();
2626   // ...then partially drain the local queue and the global stack
2627   drain_local_queue(true);
2628   drain_global_stack(true);
2629 
2630   do {
2631     if (!has_aborted() && _curr_region != NULL) {
2632       // This means that we're already holding on to a region.
2633       assert(_finger != NULL, "if region is not NULL, then the finger "
2634              "should not be NULL either");
2635 
2636       // We might have restarted this task after an evacuation pause
2637       // which might have evacuated the region we're holding on to
2638       // underneath our feet. Let's read its limit again to make sure
2639       // that we do not iterate over a region of the heap that
2640       // contains garbage (update_region_limit() will also move
2641       // _finger to the start of the region if it is found empty).
2642       update_region_limit();
2643       // We will start from _finger not from the start of the region,
2644       // as we might be restarting this task after aborting half-way
2645       // through scanning this region. In this case, _finger points to
2646       // the address where we last found a marked object. If this is a
2647       // fresh region, _finger points to start().
2648       MemRegion mr = MemRegion(_finger, _region_limit);
2649 
2650       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2651              "humongous regions should go around loop once only");
2652 
2653       // Some special cases:
2654       // If the memory region is empty, we can just give up the region.
2655       // If the current region is humongous then we only need to check
2656       // the bitmap for the bit associated with the start of the object,
2657       // scan the object if it's live, and give up the region.
2658       // Otherwise, let's iterate over the bitmap of the part of the region
2659       // that is left.
2660       // If the iteration is successful, give up the region.
2661       if (mr.is_empty()) {
2662         giveup_current_region();
2663         regular_clock_call();
2664       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2665         if (_next_mark_bitmap->is_marked(mr.start())) {
2666           // The object is marked - apply the closure
2667           bitmap_closure.do_addr(mr.start());
2668         }
2669         // Even if this task aborted while scanning the humongous object
2670         // we can (and should) give up the current region.
2671         giveup_current_region();
2672         regular_clock_call();
2673       } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
2674         giveup_current_region();
2675         regular_clock_call();
2676       } else {
2677         assert(has_aborted(), "currently the only way to do so");
2678         // The only way to abort the bitmap iteration is to return
2679         // false from the do_bit() method. However, inside the
2680         // do_bit() method we move the _finger to point to the
2681         // object currently being looked at. So, if we bail out, we
2682         // have definitely set _finger to something non-null.
2683         assert(_finger != NULL, "invariant");
2684 
2685         // Region iteration was actually aborted. So now _finger
2686         // points to the address of the object we last scanned. If we
2687         // leave it there, when we restart this task, we will rescan
2688         // the object. It is easy to avoid this. We move the finger by
2689         // enough to point to the next possible object header.
2690         assert(_finger < _region_limit, "invariant");
2691         HeapWord* const new_finger = _finger + ((oop)_finger)->size();
2692         // Check if bitmap iteration was aborted while scanning the last object
2693         if (new_finger >= _region_limit) {
2694           giveup_current_region();
2695         } else {
2696           move_finger_to(new_finger);
2697         }
2698       }
2699     }
2700     // At this point we have either completed iterating over the
2701     // region we were holding on to, or we have aborted.
2702 
2703     // We then partially drain the local queue and the global stack.
2704     // (Do we really need this?)
2705     drain_local_queue(true);
2706     drain_global_stack(true);
2707 
2708     // Read the note on the claim_region() method on why it might
2709     // return NULL with potentially more regions available for
2710     // claiming and why we have to check out_of_regions() to determine
2711     // whether we're done or not.
2712     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2713       // We are going to try to claim a new region. We should have
2714       // given up on the previous one.
2715       // Separated the asserts so that we know which one fires.
2716       assert(_curr_region  == NULL, "invariant");
2717       assert(_finger       == NULL, "invariant");
2718       assert(_region_limit == NULL, "invariant");
2719       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2720       if (claimed_region != NULL) {
2721         // Yes, we managed to claim one
2722         setup_for_region(claimed_region);
2723         assert(_curr_region == claimed_region, "invariant");
2724       }
2725       // It is important to call the regular clock here. It might take
2726       // a while to claim a region if, for example, we hit a large
2727       // block of empty regions. So we need to call the regular clock
2728       // method once round the loop to make sure it's called
2729       // frequently enough.
2730       regular_clock_call();
2731     }
2732 
2733     if (!has_aborted() && _curr_region == NULL) {
2734       assert(_cm->out_of_regions(),
2735              "at this point we should be out of regions");
2736     }
2737   } while ( _curr_region != NULL && !has_aborted());
2738 
2739   if (!has_aborted()) {
2740     // We cannot check whether the global stack is empty, since other
2741     // tasks might be pushing objects to it concurrently.
2742     assert(_cm->out_of_regions(),
2743            "at this point we should be out of regions");
2744     // Try to reduce the number of available SATB buffers so that
2745     // remark has less work to do.
2746     drain_satb_buffers();
2747   }
2748 
2749   // Since we've done everything else, we can now totally drain the
2750   // local queue and global stack.
2751   drain_local_queue(false);
2752   drain_global_stack(false);
2753 
2754   // Attempt at work stealing from other task's queues.
2755   if (do_stealing && !has_aborted()) {
2756     // We have not aborted. This means that we have finished all that
2757     // we could. Let's try to do some stealing...
2758 
2759     // We cannot check whether the global stack is empty, since other
2760     // tasks might be pushing objects to it concurrently.
2761     assert(_cm->out_of_regions() && _task_queue->size() == 0,
2762            "only way to reach here");
2763     while (!has_aborted()) {
2764       G1TaskQueueEntry entry;
2765       if (_cm->try_stealing(_worker_id, &_hash_seed, entry)) {
2766         scan_task_entry(entry);
2767 
2768         // And since we're towards the end, let's totally drain the
2769         // local queue and global stack.
2770         drain_local_queue(false);
2771         drain_global_stack(false);
2772       } else {
2773         break;
2774       }
2775     }
2776   }
2777 
2778   // We still haven't aborted. Now, let's try to get into the
2779   // termination protocol.
2780   if (do_termination && !has_aborted()) {
2781     // We cannot check whether the global stack is empty, since other
2782     // tasks might be concurrently pushing objects on it.
2783     // Separated the asserts so that we know which one fires.
2784     assert(_cm->out_of_regions(), "only way to reach here");
2785     assert(_task_queue->size() == 0, "only way to reach here");
2786     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2787 
2788     // The G1CMTask class also extends the TerminatorTerminator class,
2789     // hence its should_exit_termination() method will also decide
2790     // whether to exit the termination protocol or not.
2791     bool finished = (is_serial ||
2792                      _cm->terminator()->offer_termination(this));
2793     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2794     _termination_time_ms +=
2795       termination_end_time_ms - _termination_start_time_ms;
2796 
2797     if (finished) {
2798       // We're all done.
2799 
2800       // We can now guarantee that the global stack is empty, since
2801       // all other tasks have finished. We separated the guarantees so
2802       // that, if a condition is false, we can immediately find out
2803       // which one.
2804       guarantee(_cm->out_of_regions(), "only way to reach here");
2805       guarantee(_cm->mark_stack_empty(), "only way to reach here");
2806       guarantee(_task_queue->size() == 0, "only way to reach here");
2807       guarantee(!_cm->has_overflown(), "only way to reach here");
2808     } else {
2809       // Apparently there's more work to do. Let's abort this task. It
2810       // will restart it and we can hopefully find more things to do.
2811       set_has_aborted();
2812     }
2813   }
2814 
2815   // Mainly for debugging purposes to make sure that a pointer to the
2816   // closure which was statically allocated in this frame doesn't
2817   // escape it by accident.
2818   set_cm_oop_closure(NULL);
2819   double end_time_ms = os::elapsedVTime() * 1000.0;
2820   double elapsed_time_ms = end_time_ms - _start_time_ms;
2821   // Update the step history.
2822   _step_times_ms.add(elapsed_time_ms);
2823 
2824   if (has_aborted()) {
2825     // The task was aborted for some reason.
2826     if (_has_timed_out) {
2827       double diff_ms = elapsed_time_ms - _time_target_ms;
2828       // Keep statistics of how well we did with respect to hitting
2829       // our target only if we actually timed out (if we aborted for
2830       // other reasons, then the results might get skewed).
2831       _marking_step_diffs_ms.add(diff_ms);
2832     }
2833 
2834     if (_cm->has_overflown()) {
2835       // This is the interesting one. We aborted because a global
2836       // overflow was raised. This means we have to restart the
2837       // marking phase and start iterating over regions. However, in
2838       // order to do this we have to make sure that all tasks stop
2839       // what they are doing and re-initialize in a safe manner. We
2840       // will achieve this with the use of two barrier sync points.
2841 
2842       if (!is_serial) {
2843         // We only need to enter the sync barrier if being called
2844         // from a parallel context
2845         _cm->enter_first_sync_barrier(_worker_id);
2846 
2847         // When we exit this sync barrier we know that all tasks have
2848         // stopped doing marking work. So, it's now safe to
2849         // re-initialize our data structures.
2850       }
2851 
2852       clear_region_fields();
2853       flush_mark_stats_cache();
2854 
2855       if (!is_serial) {
2856         // If we're executing the concurrent phase of marking, reset the marking
2857         // state; otherwise the marking state is reset after reference processing,
2858         // during the remark pause.
2859         // If we reset here as a result of an overflow during the remark we will
2860         // see assertion failures from any subsequent set_concurrency_and_phase()
2861         // calls.
2862         if (_cm->concurrent() && _worker_id == 0) {
2863           // Worker 0 is responsible for clearing the global data structures because
2864           // of an overflow. During STW we should not clear the overflow flag (in
2865           // G1ConcurrentMark::reset_marking_state()) since we rely on it being true when we exit
2866           // method to abort the pause and restart concurrent marking.
2867           _cm->reset_marking_for_restart();
2868 
2869           log_info(gc, marking)("Concurrent Mark reset for overflow");
2870         }
2871 
2872         // ...and enter the second barrier.
2873         _cm->enter_second_sync_barrier(_worker_id);
2874       }
2875       // At this point, if we're during the concurrent phase of
2876       // marking, everything has been re-initialized and we're
2877       // ready to restart.
2878     }
2879   }
2880 }
2881 
2882 G1CMTask::G1CMTask(uint worker_id,
2883                    G1ConcurrentMark* cm,
2884                    G1CMTaskQueue* task_queue,
2885                    G1RegionMarkStats* mark_stats,
2886                    uint max_regions) :
2887   _objArray_processor(this),
2888   _worker_id(worker_id),
2889   _g1h(G1CollectedHeap::heap()),
2890   _cm(cm),
2891   _next_mark_bitmap(NULL),
2892   _task_queue(task_queue),
2893   _mark_stats_cache(mark_stats, max_regions, RegionMarkStatsCacheSize),
2894   _calls(0),
2895   _time_target_ms(0.0),
2896   _start_time_ms(0.0),
2897   _cm_oop_closure(NULL),
2898   _curr_region(NULL),
2899   _finger(NULL),
2900   _region_limit(NULL),
2901   _words_scanned(0),
2902   _words_scanned_limit(0),
2903   _real_words_scanned_limit(0),
2904   _refs_reached(0),
2905   _refs_reached_limit(0),
2906   _real_refs_reached_limit(0),
2907   _hash_seed(17),
2908   _has_aborted(false),
2909   _has_timed_out(false),
2910   _draining_satb_buffers(false),
2911   _step_times_ms(),
2912   _elapsed_time_ms(0.0),
2913   _termination_time_ms(0.0),
2914   _termination_start_time_ms(0.0),
2915   _marking_step_diffs_ms()
2916 {
2917   guarantee(task_queue != NULL, "invariant");
2918 
2919   _marking_step_diffs_ms.add(0.5);
2920 }
2921 
2922 // These are formatting macros that are used below to ensure
2923 // consistent formatting. The *_H_* versions are used to format the
2924 // header for a particular value and they should be kept consistent
2925 // with the corresponding macro. Also note that most of the macros add
2926 // the necessary white space (as a prefix) which makes them a bit
2927 // easier to compose.
2928 
2929 // All the output lines are prefixed with this string to be able to
2930 // identify them easily in a large log file.
2931 #define G1PPRL_LINE_PREFIX            "###"
2932 
2933 #define G1PPRL_ADDR_BASE_FORMAT    " " PTR_FORMAT "-" PTR_FORMAT
2934 #ifdef _LP64
2935 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
2936 #else // _LP64
2937 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
2938 #endif // _LP64
2939 
2940 // For per-region info
2941 #define G1PPRL_TYPE_FORMAT            "   %-4s"
2942 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
2943 #define G1PPRL_STATE_FORMAT           "   %-5s"
2944 #define G1PPRL_STATE_H_FORMAT         "   %5s"
2945 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
2946 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
2947 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
2948 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
2949 
2950 // For summary info
2951 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
2952 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
2953 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
2954 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
2955 
2956 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) :
2957   _total_used_bytes(0), _total_capacity_bytes(0),
2958   _total_prev_live_bytes(0), _total_next_live_bytes(0),
2959   _total_remset_bytes(0), _total_strong_code_roots_bytes(0)
2960 {
2961   if (!log_is_enabled(Trace, gc, liveness)) {
2962     return;
2963   }
2964 
2965   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2966   MemRegion g1_reserved = g1h->g1_reserved();
2967   double now = os::elapsedTime();
2968 
2969   // Print the header of the output.
2970   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
2971   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
2972                           G1PPRL_SUM_ADDR_FORMAT("reserved")
2973                           G1PPRL_SUM_BYTE_FORMAT("region-size"),
2974                           p2i(g1_reserved.start()), p2i(g1_reserved.end()),
2975                           HeapRegion::GrainBytes);
2976   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
2977   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2978                           G1PPRL_TYPE_H_FORMAT
2979                           G1PPRL_ADDR_BASE_H_FORMAT
2980                           G1PPRL_BYTE_H_FORMAT
2981                           G1PPRL_BYTE_H_FORMAT
2982                           G1PPRL_BYTE_H_FORMAT
2983                           G1PPRL_DOUBLE_H_FORMAT
2984                           G1PPRL_BYTE_H_FORMAT
2985                           G1PPRL_STATE_H_FORMAT
2986                           G1PPRL_BYTE_H_FORMAT,
2987                           "type", "address-range",
2988                           "used", "prev-live", "next-live", "gc-eff",
2989                           "remset", "state", "code-roots");
2990   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
2991                           G1PPRL_TYPE_H_FORMAT
2992                           G1PPRL_ADDR_BASE_H_FORMAT
2993                           G1PPRL_BYTE_H_FORMAT
2994                           G1PPRL_BYTE_H_FORMAT
2995                           G1PPRL_BYTE_H_FORMAT
2996                           G1PPRL_DOUBLE_H_FORMAT
2997                           G1PPRL_BYTE_H_FORMAT
2998                           G1PPRL_STATE_H_FORMAT
2999                           G1PPRL_BYTE_H_FORMAT,
3000                           "", "",
3001                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
3002                           "(bytes)", "", "(bytes)");
3003 }
3004 
3005 bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
3006   if (!log_is_enabled(Trace, gc, liveness)) {
3007     return false;
3008   }
3009 
3010   const char* type       = r->get_type_str();
3011   HeapWord* bottom       = r->bottom();
3012   HeapWord* end          = r->end();
3013   size_t capacity_bytes  = r->capacity();
3014   size_t used_bytes      = r->used();
3015   size_t prev_live_bytes = r->live_bytes();
3016   size_t next_live_bytes = r->next_live_bytes();
3017   double gc_eff          = r->gc_efficiency();
3018   size_t remset_bytes    = r->rem_set()->mem_size();
3019   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3020   const char* remset_type = r->rem_set()->get_short_state_str();
3021 
3022   _total_used_bytes      += used_bytes;
3023   _total_capacity_bytes  += capacity_bytes;
3024   _total_prev_live_bytes += prev_live_bytes;
3025   _total_next_live_bytes += next_live_bytes;
3026   _total_remset_bytes    += remset_bytes;
3027   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3028 
3029   // Print a line for this particular region.
3030   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3031                           G1PPRL_TYPE_FORMAT
3032                           G1PPRL_ADDR_BASE_FORMAT
3033                           G1PPRL_BYTE_FORMAT
3034                           G1PPRL_BYTE_FORMAT
3035                           G1PPRL_BYTE_FORMAT
3036                           G1PPRL_DOUBLE_FORMAT
3037                           G1PPRL_BYTE_FORMAT
3038                           G1PPRL_STATE_FORMAT
3039                           G1PPRL_BYTE_FORMAT,
3040                           type, p2i(bottom), p2i(end),
3041                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3042                           remset_bytes, remset_type, strong_code_roots_bytes);
3043 
3044   return false;
3045 }
3046 
3047 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3048   if (!log_is_enabled(Trace, gc, liveness)) {
3049     return;
3050   }
3051 
3052   // add static memory usages to remembered set sizes
3053   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3054   // Print the footer of the output.
3055   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3056   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3057                          " SUMMARY"
3058                          G1PPRL_SUM_MB_FORMAT("capacity")
3059                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3060                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3061                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3062                          G1PPRL_SUM_MB_FORMAT("remset")
3063                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3064                          bytes_to_mb(_total_capacity_bytes),
3065                          bytes_to_mb(_total_used_bytes),
3066                          percent_of(_total_used_bytes, _total_capacity_bytes),
3067                          bytes_to_mb(_total_prev_live_bytes),
3068                          percent_of(_total_prev_live_bytes, _total_capacity_bytes),
3069                          bytes_to_mb(_total_next_live_bytes),
3070                          percent_of(_total_next_live_bytes, _total_capacity_bytes),
3071                          bytes_to_mb(_total_remset_bytes),
3072                          bytes_to_mb(_total_strong_code_roots_bytes));
3073 }