1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/concurrentMarkThread.inline.hpp"
  30 #include "gc/g1/g1CollectedHeap.inline.hpp"
  31 #include "gc/g1/g1CollectorState.hpp"
  32 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  33 #include "gc/g1/g1HeapVerifier.hpp"
  34 #include "gc/g1/g1OopClosures.inline.hpp"
  35 #include "gc/g1/g1CardLiveData.inline.hpp"
  36 #include "gc/g1/g1Policy.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/heapRegion.inline.hpp"
  39 #include "gc/g1/heapRegionRemSet.hpp"
  40 #include "gc/g1/heapRegionSet.inline.hpp"
  41 #include "gc/g1/suspendibleThreadSet.hpp"
  42 #include "gc/shared/gcId.hpp"
  43 #include "gc/shared/gcTimer.hpp"
  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.inline.hpp"
  46 #include "gc/shared/genOopClosures.inline.hpp"
  47 #include "gc/shared/referencePolicy.hpp"
  48 #include "gc/shared/strongRootsScope.hpp"
  49 #include "gc/shared/taskqueue.inline.hpp"
  50 #include "gc/shared/vmGCOperations.hpp"
  51 #include "logging/log.hpp"
  52 #include "memory/allocation.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "runtime/atomic.hpp"
  56 #include "runtime/handles.inline.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/prefetch.inline.hpp"
  59 #include "services/memTracker.hpp"
  60 #include "utilities/growableArray.hpp"
  61 
  62 // Concurrent marking bit map wrapper
  63 
  64 G1CMBitMapRO::G1CMBitMapRO(int shifter) :
  65   _bm(),
  66   _shifter(shifter) {
  67   _bmStartWord = 0;
  68   _bmWordSize = 0;
  69 }
  70 
  71 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  72                                                  const HeapWord* limit) const {
  73   // First we must round addr *up* to a possible object boundary.
  74   addr = (HeapWord*)align_size_up((intptr_t)addr,
  75                                   HeapWordSize << _shifter);
  76   size_t addrOffset = heapWordToOffset(addr);
  77   assert(limit != NULL, "limit must not be NULL");
  78   size_t limitOffset = heapWordToOffset(limit);
  79   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  80   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  81   assert(nextAddr >= addr, "get_next_one postcondition");
  82   assert(nextAddr == limit || isMarked(nextAddr),
  83          "get_next_one postcondition");
  84   return nextAddr;
  85 }
  86 
  87 #ifndef PRODUCT
  88 bool G1CMBitMapRO::covers(MemRegion heap_rs) const {
  89   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
  90   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
  91          "size inconsistency");
  92   return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
  93          _bmWordSize  == heap_rs.word_size();
  94 }
  95 #endif
  96 
  97 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
  98   _bm.print_on_error(st, prefix);
  99 }
 100 
 101 size_t G1CMBitMap::compute_size(size_t heap_size) {
 102   return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
 103 }
 104 
 105 size_t G1CMBitMap::mark_distance() {
 106   return MinObjAlignmentInBytes * BitsPerByte;
 107 }
 108 
 109 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 110   _bmStartWord = heap.start();
 111   _bmWordSize = heap.word_size();
 112 
 113   _bm = BitMapView((BitMap::bm_word_t*) storage->reserved().start(), _bmWordSize >> _shifter);
 114 
 115   storage->set_mapping_changed_listener(&_listener);
 116 }
 117 
 118 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
 119   if (zero_filled) {
 120     return;
 121   }
 122   // We need to clear the bitmap on commit, removing any existing information.
 123   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
 124   _bm->clear_range(mr);
 125 }
 126 
 127 void G1CMBitMap::clear_range(MemRegion mr) {
 128   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 129   assert(!mr.is_empty(), "unexpected empty region");
 130   // convert address range into offset range
 131   _bm.at_put_range(heapWordToOffset(mr.start()),
 132                    heapWordToOffset(mr.end()), false);
 133 }
 134 
 135 G1CMMarkStack::G1CMMarkStack() :
 136   _max_chunk_capacity(0),
 137   _base(NULL),
 138   _chunk_capacity(0),
 139   _out_of_memory(false),
 140   _should_expand(false) {
 141   set_empty();
 142 }
 143 
 144 bool G1CMMarkStack::resize(size_t new_capacity) {
 145   assert(is_empty(), "Only resize when stack is empty.");
 146   assert(new_capacity <= _max_chunk_capacity,
 147          "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
 148 
 149   OopChunk* new_base = MmapArrayAllocator<OopChunk, mtGC>::allocate_or_null(new_capacity);
 150 
 151   if (new_base == NULL) {
 152     log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(OopChunk));
 153     return false;
 154   }
 155   // Release old mapping.
 156   if (_base != NULL) {
 157     MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity);
 158   }
 159 
 160   _base = new_base;
 161   _chunk_capacity = new_capacity;
 162   set_empty();
 163   _should_expand = false;
 164 
 165   return true;
 166 }
 167 
 168 size_t G1CMMarkStack::capacity_alignment() {
 169   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(OopChunk)) / sizeof(void*);
 170 }
 171 
 172 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 173   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 174 
 175   size_t const OopChunkSizeInVoidStar = sizeof(OopChunk) / sizeof(void*);
 176   
 177   _max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / OopChunkSizeInVoidStar;
 178   size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / OopChunkSizeInVoidStar;
 179 
 180   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
 181             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
 182             _max_chunk_capacity,
 183             initial_chunk_capacity);
 184 
 185   log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
 186                 initial_chunk_capacity, _max_chunk_capacity);
 187 
 188   return resize(initial_chunk_capacity);
 189 }
 190 
 191 void G1CMMarkStack::expand() {
 192   // Clear expansion flag
 193   _should_expand = false;
 194 
 195   if (_chunk_capacity == _max_chunk_capacity) {
 196     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 197     return;
 198   }
 199   size_t old_capacity = _chunk_capacity;
 200   // Double capacity if possible
 201   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
 202 
 203   if (resize(new_capacity)) {
 204     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 205                   old_capacity, new_capacity);
 206   } else {
 207     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 208                     old_capacity, new_capacity);
 209   }
 210 }
 211 
 212 G1CMMarkStack::~G1CMMarkStack() {
 213   if (_base != NULL) {
 214     MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity);
 215   }
 216 }
 217 
 218 void G1CMMarkStack::add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem) {
 219   elem->next = *list;
 220   *list = elem;
 221 }
 222 
 223 void G1CMMarkStack::add_chunk_to_chunk_list(OopChunk* elem) {
 224   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 225   add_chunk_to_list(&_chunk_list, elem);
 226   _chunks_in_chunk_list++;
 227 }
 228 
 229 void G1CMMarkStack::add_chunk_to_free_list(OopChunk* elem) {
 230   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 231   add_chunk_to_list(&_free_list, elem);
 232 }
 233 
 234 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) {
 235   OopChunk* result = *list;
 236   if (result != NULL) {
 237     *list = (*list)->next;
 238   }
 239   return result;
 240 }
 241 
 242 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 243   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 244   OopChunk* result = remove_chunk_from_list(&_chunk_list);
 245   if (result != NULL) {
 246     _chunks_in_chunk_list--;
 247   }
 248   return result;
 249 }
 250 
 251 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 252   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 253   return remove_chunk_from_list(&_free_list);
 254 }
 255 
 256 G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() {
 257   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 258   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 259   // wraparound of _hwm.
 260   if (_hwm >= _chunk_capacity) {
 261     return NULL;
 262   }
 263 
 264   size_t cur_idx = Atomic::add(1, &_hwm) - 1;
 265   if (cur_idx >= _chunk_capacity) {
 266     return NULL;
 267   }
 268 
 269   OopChunk* result = ::new (&_base[cur_idx]) OopChunk;
 270   result->next = NULL;
 271   return result;
 272 }
 273 
 274 bool G1CMMarkStack::par_push_chunk(oop* ptr_arr) {
 275   // Get a new chunk.
 276   OopChunk* new_chunk = remove_chunk_from_free_list();
 277 
 278   if (new_chunk == NULL) {
 279     // Did not get a chunk from the free list. Allocate from backing memory.
 280     new_chunk = allocate_new_chunk();
 281   }
 282 
 283   if (new_chunk == NULL) {
 284     _out_of_memory = true;
 285     return false;
 286   }
 287 
 288   Copy::conjoint_oops_atomic(ptr_arr, new_chunk->data, OopsPerChunk);
 289 
 290   add_chunk_to_chunk_list(new_chunk);
 291 
 292   return true;
 293 }
 294 
 295 bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) {
 296   OopChunk* cur = remove_chunk_from_chunk_list();
 297 
 298   if (cur == NULL) {
 299     return false;
 300   }
 301 
 302   Copy::conjoint_oops_atomic(cur->data, ptr_arr, OopsPerChunk);
 303 
 304   add_chunk_to_free_list(cur);
 305   return true;
 306 }
 307 
 308 void G1CMMarkStack::set_empty() {
 309   _chunks_in_chunk_list = 0;
 310   _hwm = 0;
 311   clear_out_of_memory();
 312   _chunk_list = NULL;
 313   _free_list = NULL;
 314 }
 315 
 316 G1CMRootRegions::G1CMRootRegions() :
 317   _cm(NULL), _scan_in_progress(false),
 318   _should_abort(false), _claimed_survivor_index(0) { }
 319 
 320 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
 321   _survivors = survivors;
 322   _cm = cm;
 323 }
 324 
 325 void G1CMRootRegions::prepare_for_scan() {
 326   assert(!scan_in_progress(), "pre-condition");
 327 
 328   // Currently, only survivors can be root regions.
 329   _claimed_survivor_index = 0;
 330   _scan_in_progress = _survivors->regions()->is_nonempty();
 331   _should_abort = false;
 332 }
 333 
 334 HeapRegion* G1CMRootRegions::claim_next() {
 335   if (_should_abort) {
 336     // If someone has set the should_abort flag, we return NULL to
 337     // force the caller to bail out of their loop.
 338     return NULL;
 339   }
 340 
 341   // Currently, only survivors can be root regions.
 342   const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions();
 343 
 344   int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1;
 345   if (claimed_index < survivor_regions->length()) {
 346     return survivor_regions->at(claimed_index);
 347   }
 348   return NULL;
 349 }
 350 
 351 uint G1CMRootRegions::num_root_regions() const {
 352   return (uint)_survivors->regions()->length();
 353 }
 354 
 355 void G1CMRootRegions::notify_scan_done() {
 356   MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 357   _scan_in_progress = false;
 358   RootRegionScan_lock->notify_all();
 359 }
 360 
 361 void G1CMRootRegions::cancel_scan() {
 362   notify_scan_done();
 363 }
 364 
 365 void G1CMRootRegions::scan_finished() {
 366   assert(scan_in_progress(), "pre-condition");
 367 
 368   // Currently, only survivors can be root regions.
 369   if (!_should_abort) {
 370     assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index);
 371     assert((uint)_claimed_survivor_index >= _survivors->length(),
 372            "we should have claimed all survivors, claimed index = %u, length = %u",
 373            (uint)_claimed_survivor_index, _survivors->length());
 374   }
 375 
 376   notify_scan_done();
 377 }
 378 
 379 bool G1CMRootRegions::wait_until_scan_finished() {
 380   if (!scan_in_progress()) return false;
 381 
 382   {
 383     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 384     while (scan_in_progress()) {
 385       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 386     }
 387   }
 388   return true;
 389 }
 390 
 391 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 392   return MAX2((n_par_threads + 2) / 4, 1U);
 393 }
 394 
 395 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
 396   _g1h(g1h),
 397   _markBitMap1(),
 398   _markBitMap2(),
 399   _parallel_marking_threads(0),
 400   _max_parallel_marking_threads(0),
 401   _sleep_factor(0.0),
 402   _marking_task_overhead(1.0),
 403   _cleanup_list("Cleanup List"),
 404 
 405   _prevMarkBitMap(&_markBitMap1),
 406   _nextMarkBitMap(&_markBitMap2),
 407 
 408   _global_mark_stack(),
 409   // _finger set in set_non_marking_state
 410 
 411   _max_worker_id(ParallelGCThreads),
 412   // _active_tasks set in set_non_marking_state
 413   // _tasks set inside the constructor
 414   _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)),
 415   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 416 
 417   _has_overflown(false),
 418   _concurrent(false),
 419   _has_aborted(false),
 420   _restart_for_overflow(false),
 421   _concurrent_marking_in_progress(false),
 422   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 423   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 424 
 425   // _verbose_level set below
 426 
 427   _init_times(),
 428   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 429   _cleanup_times(),
 430   _total_counting_time(0.0),
 431   _total_rs_scrub_time(0.0),
 432 
 433   _parallel_workers(NULL),
 434 
 435   _completed_initialization(false) {
 436 
 437   _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 438   _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 439 
 440   // Create & start a ConcurrentMark thread.
 441   _cmThread = new ConcurrentMarkThread(this);
 442   assert(cmThread() != NULL, "CM Thread should have been created");
 443   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 444   if (_cmThread->osthread() == NULL) {
 445       vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 446   }
 447 
 448   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 449   assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
 450   assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
 451 
 452   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 453   satb_qs.set_buffer_size(G1SATBBufferSize);
 454 
 455   _root_regions.init(_g1h->survivor(), this);
 456 
 457   if (ConcGCThreads > ParallelGCThreads) {
 458     log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).",
 459                     ConcGCThreads, ParallelGCThreads);
 460     return;
 461   }
 462   if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
 463     // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
 464     // if both are set
 465     _sleep_factor             = 0.0;
 466     _marking_task_overhead    = 1.0;
 467   } else if (G1MarkingOverheadPercent > 0) {
 468     // We will calculate the number of parallel marking threads based
 469     // on a target overhead with respect to the soft real-time goal
 470     double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
 471     double overall_cm_overhead =
 472       (double) MaxGCPauseMillis * marking_overhead /
 473       (double) GCPauseIntervalMillis;
 474     double cpu_ratio = 1.0 / os::initial_active_processor_count();
 475     double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
 476     double marking_task_overhead =
 477       overall_cm_overhead / marking_thread_num * os::initial_active_processor_count();
 478     double sleep_factor =
 479                        (1.0 - marking_task_overhead) / marking_task_overhead;
 480 
 481     FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num);
 482     _sleep_factor             = sleep_factor;
 483     _marking_task_overhead    = marking_task_overhead;
 484   } else {
 485     // Calculate the number of parallel marking threads by scaling
 486     // the number of parallel GC threads.
 487     uint marking_thread_num = scale_parallel_threads(ParallelGCThreads);
 488     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 489     _sleep_factor             = 0.0;
 490     _marking_task_overhead    = 1.0;
 491   }
 492 
 493   assert(ConcGCThreads > 0, "Should have been set");
 494   log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
 495   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 496   _parallel_marking_threads = ConcGCThreads;
 497   _max_parallel_marking_threads = _parallel_marking_threads;
 498 
 499   _parallel_workers = new WorkGang("G1 Marker",
 500        _max_parallel_marking_threads, false, true);
 501   if (_parallel_workers == NULL) {
 502     vm_exit_during_initialization("Failed necessary allocation.");
 503   } else {
 504     _parallel_workers->initialize_workers();
 505   }
 506 
 507   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 508     size_t mark_stack_size =
 509       MIN2(MarkStackSizeMax,
 510           MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 511     // Verify that the calculated value for MarkStackSize is in range.
 512     // It would be nice to use the private utility routine from Arguments.
 513     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 514       log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 515                       "must be between 1 and " SIZE_FORMAT,
 516                       mark_stack_size, MarkStackSizeMax);
 517       return;
 518     }
 519     FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
 520   } else {
 521     // Verify MarkStackSize is in range.
 522     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 523       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 524         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 525           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 526                           "must be between 1 and " SIZE_FORMAT,
 527                           MarkStackSize, MarkStackSizeMax);
 528           return;
 529         }
 530       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 531         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 532           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 533                           " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 534                           MarkStackSize, MarkStackSizeMax);
 535           return;
 536         }
 537       }
 538     }
 539   }
 540 
 541   if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
 542     vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
 543   }
 544 
 545   _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC);
 546   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 547 
 548   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 549   _active_tasks = _max_worker_id;
 550 
 551   for (uint i = 0; i < _max_worker_id; ++i) {
 552     G1CMTaskQueue* task_queue = new G1CMTaskQueue();
 553     task_queue->initialize();
 554     _task_queues->register_queue(i, task_queue);
 555 
 556     _tasks[i] = new G1CMTask(i, this, task_queue, _task_queues);
 557 
 558     _accum_task_vtime[i] = 0.0;
 559   }
 560 
 561   // so that the call below can read a sensible value
 562   _heap_start = g1h->reserved_region().start();
 563   set_non_marking_state();
 564   _completed_initialization = true;
 565 }
 566 
 567 void G1ConcurrentMark::reset() {
 568   // Starting values for these two. This should be called in a STW
 569   // phase.
 570   MemRegion reserved = _g1h->g1_reserved();
 571   _heap_start = reserved.start();
 572   _heap_end   = reserved.end();
 573 
 574   // Separated the asserts so that we know which one fires.
 575   assert(_heap_start != NULL, "heap bounds should look ok");
 576   assert(_heap_end != NULL, "heap bounds should look ok");
 577   assert(_heap_start < _heap_end, "heap bounds should look ok");
 578 
 579   // Reset all the marking data structures and any necessary flags
 580   reset_marking_state();
 581 
 582   // We do reset all of them, since different phases will use
 583   // different number of active threads. So, it's easiest to have all
 584   // of them ready.
 585   for (uint i = 0; i < _max_worker_id; ++i) {
 586     _tasks[i]->reset(_nextMarkBitMap);
 587   }
 588 
 589   // we need this to make sure that the flag is on during the evac
 590   // pause with initial mark piggy-backed
 591   set_concurrent_marking_in_progress();
 592 }
 593 
 594 
 595 void G1ConcurrentMark::reset_marking_state(bool clear_overflow) {
 596   _global_mark_stack.set_should_expand(has_overflown());
 597   _global_mark_stack.set_empty();        // Also clears the overflow stack's overflow flag
 598   if (clear_overflow) {
 599     clear_has_overflown();
 600   } else {
 601     assert(has_overflown(), "pre-condition");
 602   }
 603   _finger = _heap_start;
 604 
 605   for (uint i = 0; i < _max_worker_id; ++i) {
 606     G1CMTaskQueue* queue = _task_queues->queue(i);
 607     queue->set_empty();
 608   }
 609 }
 610 
 611 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
 612   assert(active_tasks <= _max_worker_id, "we should not have more");
 613 
 614   _active_tasks = active_tasks;
 615   // Need to update the three data structures below according to the
 616   // number of active threads for this phase.
 617   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 618   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 619   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 620 }
 621 
 622 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 623   set_concurrency(active_tasks);
 624 
 625   _concurrent = concurrent;
 626   // We propagate this to all tasks, not just the active ones.
 627   for (uint i = 0; i < _max_worker_id; ++i)
 628     _tasks[i]->set_concurrent(concurrent);
 629 
 630   if (concurrent) {
 631     set_concurrent_marking_in_progress();
 632   } else {
 633     // We currently assume that the concurrent flag has been set to
 634     // false before we start remark. At this point we should also be
 635     // in a STW phase.
 636     assert(!concurrent_marking_in_progress(), "invariant");
 637     assert(out_of_regions(),
 638            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 639            p2i(_finger), p2i(_heap_end));
 640   }
 641 }
 642 
 643 void G1ConcurrentMark::set_non_marking_state() {
 644   // We set the global marking state to some default values when we're
 645   // not doing marking.
 646   reset_marking_state();
 647   _active_tasks = 0;
 648   clear_concurrent_marking_in_progress();
 649 }
 650 
 651 G1ConcurrentMark::~G1ConcurrentMark() {
 652   // The G1ConcurrentMark instance is never freed.
 653   ShouldNotReachHere();
 654 }
 655 
 656 class G1ClearBitMapTask : public AbstractGangTask {
 657 public:
 658   static size_t chunk_size() { return M; }
 659 
 660 private:
 661   // Heap region closure used for clearing the given mark bitmap.
 662   class G1ClearBitmapHRClosure : public HeapRegionClosure {
 663   private:
 664     G1CMBitMap* _bitmap;
 665     G1ConcurrentMark* _cm;
 666   public:
 667     G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) {
 668     }
 669 
 670     virtual bool doHeapRegion(HeapRegion* r) {
 671       size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
 672 
 673       HeapWord* cur = r->bottom();
 674       HeapWord* const end = r->end();
 675 
 676       while (cur < end) {
 677         MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 678         _bitmap->clear_range(mr);
 679 
 680         cur += chunk_size_in_words;
 681 
 682         // Abort iteration if after yielding the marking has been aborted.
 683         if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
 684           return true;
 685         }
 686         // Repeat the asserts from before the start of the closure. We will do them
 687         // as asserts here to minimize their overhead on the product. However, we
 688         // will have them as guarantees at the beginning / end of the bitmap
 689         // clearing to get some checking in the product.
 690         assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant");
 691         assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
 692       }
 693       assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
 694 
 695       return false;
 696     }
 697   };
 698 
 699   G1ClearBitmapHRClosure _cl;
 700   HeapRegionClaimer _hr_claimer;
 701   bool _suspendible; // If the task is suspendible, workers must join the STS.
 702 
 703 public:
 704   G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
 705     AbstractGangTask("G1 Clear Bitmap"),
 706     _cl(bitmap, suspendible ? cm : NULL),
 707     _hr_claimer(n_workers),
 708     _suspendible(suspendible)
 709   { }
 710 
 711   void work(uint worker_id) {
 712     SuspendibleThreadSetJoiner sts_join(_suspendible);
 713     G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true);
 714   }
 715 
 716   bool is_complete() {
 717     return _cl.complete();
 718   }
 719 };
 720 
 721 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 722   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 723 
 724   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 725   size_t const num_chunks = align_size_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 726 
 727   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 728 
 729   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 730 
 731   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 732   workers->run_task(&cl, num_workers);
 733   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 734 }
 735 
 736 void G1ConcurrentMark::cleanup_for_next_mark() {
 737   // Make sure that the concurrent mark thread looks to still be in
 738   // the current cycle.
 739   guarantee(cmThread()->during_cycle(), "invariant");
 740 
 741   // We are finishing up the current cycle by clearing the next
 742   // marking bitmap and getting it ready for the next cycle. During
 743   // this time no other cycle can start. So, let's make sure that this
 744   // is the case.
 745   guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
 746 
 747   clear_bitmap(_nextMarkBitMap, _parallel_workers, true);
 748 
 749   // Clear the live count data. If the marking has been aborted, the abort()
 750   // call already did that.
 751   if (!has_aborted()) {
 752     clear_live_data(_parallel_workers);
 753     DEBUG_ONLY(verify_live_data_clear());
 754   }
 755 
 756   // Repeat the asserts from above.
 757   guarantee(cmThread()->during_cycle(), "invariant");
 758   guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
 759 }
 760 
 761 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 762   assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint.");
 763   clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false);
 764 }
 765 
 766 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 767   G1CMBitMap* _bitmap;
 768   bool _error;
 769  public:
 770   CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) {
 771   }
 772 
 773   virtual bool doHeapRegion(HeapRegion* r) {
 774     // This closure can be called concurrently to the mutator, so we must make sure
 775     // that the result of the getNextMarkedWordAddress() call is compared to the
 776     // value passed to it as limit to detect any found bits.
 777     // end never changes in G1.
 778     HeapWord* end = r->end();
 779     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
 780   }
 781 };
 782 
 783 bool G1ConcurrentMark::nextMarkBitmapIsClear() {
 784   CheckBitmapClearHRClosure cl(_nextMarkBitMap);
 785   _g1h->heap_region_iterate(&cl);
 786   return cl.complete();
 787 }
 788 
 789 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 790 public:
 791   bool doHeapRegion(HeapRegion* r) {
 792     r->note_start_of_marking();
 793     return false;
 794   }
 795 };
 796 
 797 void G1ConcurrentMark::checkpointRootsInitialPre() {
 798   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 799   G1Policy* g1p = g1h->g1_policy();
 800 
 801   _has_aborted = false;
 802 
 803   // Initialize marking structures. This has to be done in a STW phase.
 804   reset();
 805 
 806   // For each region note start of marking.
 807   NoteStartOfMarkHRClosure startcl;
 808   g1h->heap_region_iterate(&startcl);
 809 }
 810 
 811 
 812 void G1ConcurrentMark::checkpointRootsInitialPost() {
 813   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 814 
 815   // Start Concurrent Marking weak-reference discovery.
 816   ReferenceProcessor* rp = g1h->ref_processor_cm();
 817   // enable ("weak") refs discovery
 818   rp->enable_discovery();
 819   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 820 
 821   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 822   // This is the start of  the marking cycle, we're expected all
 823   // threads to have SATB queues with active set to false.
 824   satb_mq_set.set_active_all_threads(true, /* new active value */
 825                                      false /* expected_active */);
 826 
 827   _root_regions.prepare_for_scan();
 828 
 829   // update_g1_committed() will be called at the end of an evac pause
 830   // when marking is on. So, it's also called at the end of the
 831   // initial-mark pause to update the heap end, if the heap expands
 832   // during it. No need to call it here.
 833 }
 834 
 835 /*
 836  * Notice that in the next two methods, we actually leave the STS
 837  * during the barrier sync and join it immediately afterwards. If we
 838  * do not do this, the following deadlock can occur: one thread could
 839  * be in the barrier sync code, waiting for the other thread to also
 840  * sync up, whereas another one could be trying to yield, while also
 841  * waiting for the other threads to sync up too.
 842  *
 843  * Note, however, that this code is also used during remark and in
 844  * this case we should not attempt to leave / enter the STS, otherwise
 845  * we'll either hit an assert (debug / fastdebug) or deadlock
 846  * (product). So we should only leave / enter the STS if we are
 847  * operating concurrently.
 848  *
 849  * Because the thread that does the sync barrier has left the STS, it
 850  * is possible to be suspended for a Full GC or an evacuation pause
 851  * could occur. This is actually safe, since the entering the sync
 852  * barrier is one of the last things do_marking_step() does, and it
 853  * doesn't manipulate any data structures afterwards.
 854  */
 855 
 856 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 857   bool barrier_aborted;
 858   {
 859     SuspendibleThreadSetLeaver sts_leave(concurrent());
 860     barrier_aborted = !_first_overflow_barrier_sync.enter();
 861   }
 862 
 863   // at this point everyone should have synced up and not be doing any
 864   // more work
 865 
 866   if (barrier_aborted) {
 867     // If the barrier aborted we ignore the overflow condition and
 868     // just abort the whole marking phase as quickly as possible.
 869     return;
 870   }
 871 
 872   // If we're executing the concurrent phase of marking, reset the marking
 873   // state; otherwise the marking state is reset after reference processing,
 874   // during the remark pause.
 875   // If we reset here as a result of an overflow during the remark we will
 876   // see assertion failures from any subsequent set_concurrency_and_phase()
 877   // calls.
 878   if (concurrent()) {
 879     // let the task associated with with worker 0 do this
 880     if (worker_id == 0) {
 881       // task 0 is responsible for clearing the global data structures
 882       // We should be here because of an overflow. During STW we should
 883       // not clear the overflow flag since we rely on it being true when
 884       // we exit this method to abort the pause and restart concurrent
 885       // marking.
 886       reset_marking_state(true /* clear_overflow */);
 887 
 888       log_info(gc, marking)("Concurrent Mark reset for overflow");
 889     }
 890   }
 891 
 892   // after this, each task should reset its own data structures then
 893   // then go into the second barrier
 894 }
 895 
 896 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 897   SuspendibleThreadSetLeaver sts_leave(concurrent());
 898   _second_overflow_barrier_sync.enter();
 899 
 900   // at this point everything should be re-initialized and ready to go
 901 }
 902 
 903 class G1CMConcurrentMarkingTask: public AbstractGangTask {
 904 private:
 905   G1ConcurrentMark*     _cm;
 906   ConcurrentMarkThread* _cmt;
 907 
 908 public:
 909   void work(uint worker_id) {
 910     assert(Thread::current()->is_ConcurrentGC_thread(),
 911            "this should only be done by a conc GC thread");
 912     ResourceMark rm;
 913 
 914     double start_vtime = os::elapsedVTime();
 915 
 916     {
 917       SuspendibleThreadSetJoiner sts_join;
 918 
 919       assert(worker_id < _cm->active_tasks(), "invariant");
 920       G1CMTask* the_task = _cm->task(worker_id);
 921       the_task->record_start_time();
 922       if (!_cm->has_aborted()) {
 923         do {
 924           double start_vtime_sec = os::elapsedVTime();
 925           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
 926 
 927           the_task->do_marking_step(mark_step_duration_ms,
 928                                     true  /* do_termination */,
 929                                     false /* is_serial*/);
 930 
 931           double end_vtime_sec = os::elapsedVTime();
 932           double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
 933           _cm->clear_has_overflown();
 934 
 935           _cm->do_yield_check();
 936 
 937           jlong sleep_time_ms;
 938           if (!_cm->has_aborted() && the_task->has_aborted()) {
 939             sleep_time_ms =
 940               (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
 941             {
 942               SuspendibleThreadSetLeaver sts_leave;
 943               os::sleep(Thread::current(), sleep_time_ms, false);
 944             }
 945           }
 946         } while (!_cm->has_aborted() && the_task->has_aborted());
 947       }
 948       the_task->record_end_time();
 949       guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
 950     }
 951 
 952     double end_vtime = os::elapsedVTime();
 953     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
 954   }
 955 
 956   G1CMConcurrentMarkingTask(G1ConcurrentMark* cm,
 957                             ConcurrentMarkThread* cmt) :
 958       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
 959 
 960   ~G1CMConcurrentMarkingTask() { }
 961 };
 962 
 963 // Calculates the number of active workers for a concurrent
 964 // phase.
 965 uint G1ConcurrentMark::calc_parallel_marking_threads() {
 966   uint n_conc_workers = 0;
 967   if (!UseDynamicNumberOfGCThreads ||
 968       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 969        !ForceDynamicNumberOfGCThreads)) {
 970     n_conc_workers = max_parallel_marking_threads();
 971   } else {
 972     n_conc_workers =
 973       AdaptiveSizePolicy::calc_default_active_workers(max_parallel_marking_threads(),
 974                                                       1, /* Minimum workers */
 975                                                       parallel_marking_threads(),
 976                                                       Threads::number_of_non_daemon_threads());
 977     // Don't scale down "n_conc_workers" by scale_parallel_threads() because
 978     // that scaling has already gone into "_max_parallel_marking_threads".
 979   }
 980   assert(n_conc_workers > 0 && n_conc_workers <= max_parallel_marking_threads(),
 981          "Calculated number of workers must be larger than zero and at most the maximum %u, but is %u",
 982          max_parallel_marking_threads(), n_conc_workers);
 983   return n_conc_workers;
 984 }
 985 
 986 void G1ConcurrentMark::scanRootRegion(HeapRegion* hr) {
 987   // Currently, only survivors can be root regions.
 988   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
 989   G1RootRegionScanClosure cl(_g1h, this);
 990 
 991   const uintx interval = PrefetchScanIntervalInBytes;
 992   HeapWord* curr = hr->bottom();
 993   const HeapWord* end = hr->top();
 994   while (curr < end) {
 995     Prefetch::read(curr, interval);
 996     oop obj = oop(curr);
 997     int size = obj->oop_iterate_size(&cl);
 998     assert(size == obj->size(), "sanity");
 999     curr += size;
1000   }
1001 }
1002 
1003 class G1CMRootRegionScanTask : public AbstractGangTask {
1004 private:
1005   G1ConcurrentMark* _cm;
1006 
1007 public:
1008   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
1009     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
1010 
1011   void work(uint worker_id) {
1012     assert(Thread::current()->is_ConcurrentGC_thread(),
1013            "this should only be done by a conc GC thread");
1014 
1015     G1CMRootRegions* root_regions = _cm->root_regions();
1016     HeapRegion* hr = root_regions->claim_next();
1017     while (hr != NULL) {
1018       _cm->scanRootRegion(hr);
1019       hr = root_regions->claim_next();
1020     }
1021   }
1022 };
1023 
1024 void G1ConcurrentMark::scan_root_regions() {
1025   // scan_in_progress() will have been set to true only if there was
1026   // at least one root region to scan. So, if it's false, we
1027   // should not attempt to do any further work.
1028   if (root_regions()->scan_in_progress()) {
1029     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
1030 
1031     _parallel_marking_threads = MIN2(calc_parallel_marking_threads(),
1032                                      // We distribute work on a per-region basis, so starting
1033                                      // more threads than that is useless.
1034                                      root_regions()->num_root_regions());
1035     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1036            "Maximum number of marking threads exceeded");
1037 
1038     G1CMRootRegionScanTask task(this);
1039     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
1040                         task.name(), _parallel_marking_threads, root_regions()->num_root_regions());
1041     _parallel_workers->run_task(&task, _parallel_marking_threads);
1042 
1043     // It's possible that has_aborted() is true here without actually
1044     // aborting the survivor scan earlier. This is OK as it's
1045     // mainly used for sanity checking.
1046     root_regions()->scan_finished();
1047   }
1048 }
1049 
1050 void G1ConcurrentMark::concurrent_cycle_start() {
1051   _gc_timer_cm->register_gc_start();
1052 
1053   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
1054 
1055   _g1h->trace_heap_before_gc(_gc_tracer_cm);
1056 }
1057 
1058 void G1ConcurrentMark::concurrent_cycle_end() {
1059   _g1h->trace_heap_after_gc(_gc_tracer_cm);
1060 
1061   if (has_aborted()) {
1062     _gc_tracer_cm->report_concurrent_mode_failure();
1063   }
1064 
1065   _gc_timer_cm->register_gc_end();
1066 
1067   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1068 }
1069 
1070 void G1ConcurrentMark::mark_from_roots() {
1071   // we might be tempted to assert that:
1072   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1073   //        "inconsistent argument?");
1074   // However that wouldn't be right, because it's possible that
1075   // a safepoint is indeed in progress as a younger generation
1076   // stop-the-world GC happens even as we mark in this generation.
1077 
1078   _restart_for_overflow = false;
1079 
1080   // _g1h has _n_par_threads
1081   _parallel_marking_threads = calc_parallel_marking_threads();
1082   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1083     "Maximum number of marking threads exceeded");
1084 
1085   uint active_workers = MAX2(1U, parallel_marking_threads());
1086   assert(active_workers > 0, "Should have been set");
1087 
1088   // Setting active workers is not guaranteed since fewer
1089   // worker threads may currently exist and more may not be
1090   // available.
1091   active_workers = _parallel_workers->update_active_workers(active_workers);
1092   log_info(gc, task)("Using %u workers of %u for marking", active_workers, _parallel_workers->total_workers());
1093 
1094   // Parallel task terminator is set in "set_concurrency_and_phase()"
1095   set_concurrency_and_phase(active_workers, true /* concurrent */);
1096 
1097   G1CMConcurrentMarkingTask markingTask(this, cmThread());
1098   _parallel_workers->run_task(&markingTask);
1099   print_stats();
1100 }
1101 
1102 void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1103   // world is stopped at this checkpoint
1104   assert(SafepointSynchronize::is_at_safepoint(),
1105          "world should be stopped");
1106 
1107   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1108 
1109   // If a full collection has happened, we shouldn't do this.
1110   if (has_aborted()) {
1111     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1112     return;
1113   }
1114 
1115   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1116 
1117   if (VerifyDuringGC) {
1118     HandleMark hm;  // handle scope
1119     g1h->prepare_for_verify();
1120     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1121   }
1122   g1h->verifier()->check_bitmaps("Remark Start");
1123 
1124   G1Policy* g1p = g1h->g1_policy();
1125   g1p->record_concurrent_mark_remark_start();
1126 
1127   double start = os::elapsedTime();
1128 
1129   checkpointRootsFinalWork();
1130 
1131   double mark_work_end = os::elapsedTime();
1132 
1133   weakRefsWork(clear_all_soft_refs);
1134 
1135   if (has_overflown()) {
1136     // We overflowed.  Restart concurrent marking.
1137     _restart_for_overflow = true;
1138 
1139     // Verify the heap w.r.t. the previous marking bitmap.
1140     if (VerifyDuringGC) {
1141       HandleMark hm;  // handle scope
1142       g1h->prepare_for_verify();
1143       Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
1144     }
1145 
1146     // Clear the marking state because we will be restarting
1147     // marking due to overflowing the global mark stack.
1148     reset_marking_state();
1149   } else {
1150     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1151     // We're done with marking.
1152     // This is the end of  the marking cycle, we're expected all
1153     // threads to have SATB queues with active set to true.
1154     satb_mq_set.set_active_all_threads(false, /* new active value */
1155                                        true /* expected_active */);
1156 
1157     if (VerifyDuringGC) {
1158       HandleMark hm;  // handle scope
1159       g1h->prepare_for_verify();
1160       Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");
1161     }
1162     g1h->verifier()->check_bitmaps("Remark End");
1163     assert(!restart_for_overflow(), "sanity");
1164     // Completely reset the marking state since marking completed
1165     set_non_marking_state();
1166   }
1167 
1168   // Expand the marking stack, if we have to and if we can.
1169   if (_global_mark_stack.should_expand()) {
1170     _global_mark_stack.expand();
1171   }
1172 
1173   // Statistics
1174   double now = os::elapsedTime();
1175   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1176   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1177   _remark_times.add((now - start) * 1000.0);
1178 
1179   g1p->record_concurrent_mark_remark_end();
1180 
1181   G1CMIsAliveClosure is_alive(g1h);
1182   _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1183 }
1184 
1185 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1186   G1CollectedHeap* _g1;
1187   size_t _freed_bytes;
1188   FreeRegionList* _local_cleanup_list;
1189   uint _old_regions_removed;
1190   uint _humongous_regions_removed;
1191   HRRSCleanupTask* _hrrs_cleanup_task;
1192 
1193 public:
1194   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1195                              FreeRegionList* local_cleanup_list,
1196                              HRRSCleanupTask* hrrs_cleanup_task) :
1197     _g1(g1),
1198     _freed_bytes(0),
1199     _local_cleanup_list(local_cleanup_list),
1200     _old_regions_removed(0),
1201     _humongous_regions_removed(0),
1202     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1203 
1204   size_t freed_bytes() { return _freed_bytes; }
1205   const uint old_regions_removed() { return _old_regions_removed; }
1206   const uint humongous_regions_removed() { return _humongous_regions_removed; }
1207 
1208   bool doHeapRegion(HeapRegion *hr) {
1209     if (hr->is_archive()) {
1210       return false;
1211     }
1212     _g1->reset_gc_time_stamps(hr);
1213     hr->note_end_of_marking();
1214 
1215     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1216       _freed_bytes += hr->used();
1217       hr->set_containing_set(NULL);
1218       if (hr->is_humongous()) {
1219         _humongous_regions_removed++;
1220         _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */);
1221       } else {
1222         _old_regions_removed++;
1223         _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */);
1224       }
1225     } else {
1226       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1227     }
1228 
1229     return false;
1230   }
1231 };
1232 
1233 class G1ParNoteEndTask: public AbstractGangTask {
1234   friend class G1NoteEndOfConcMarkClosure;
1235 
1236 protected:
1237   G1CollectedHeap* _g1h;
1238   FreeRegionList* _cleanup_list;
1239   HeapRegionClaimer _hrclaimer;
1240 
1241 public:
1242   G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1243       AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1244   }
1245 
1246   void work(uint worker_id) {
1247     FreeRegionList local_cleanup_list("Local Cleanup List");
1248     HRRSCleanupTask hrrs_cleanup_task;
1249     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1250                                            &hrrs_cleanup_task);
1251     _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);
1252     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1253 
1254     // Now update the lists
1255     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1256     {
1257       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1258       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1259 
1260       // If we iterate over the global cleanup list at the end of
1261       // cleanup to do this printing we will not guarantee to only
1262       // generate output for the newly-reclaimed regions (the list
1263       // might not be empty at the beginning of cleanup; we might
1264       // still be working on its previous contents). So we do the
1265       // printing here, before we append the new regions to the global
1266       // cleanup list.
1267 
1268       G1HRPrinter* hr_printer = _g1h->hr_printer();
1269       if (hr_printer->is_active()) {
1270         FreeRegionListIterator iter(&local_cleanup_list);
1271         while (iter.more_available()) {
1272           HeapRegion* hr = iter.get_next();
1273           hr_printer->cleanup(hr);
1274         }
1275       }
1276 
1277       _cleanup_list->add_ordered(&local_cleanup_list);
1278       assert(local_cleanup_list.is_empty(), "post-condition");
1279 
1280       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1281     }
1282   }
1283 };
1284 
1285 void G1ConcurrentMark::cleanup() {
1286   // world is stopped at this checkpoint
1287   assert(SafepointSynchronize::is_at_safepoint(),
1288          "world should be stopped");
1289   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1290 
1291   // If a full collection has happened, we shouldn't do this.
1292   if (has_aborted()) {
1293     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1294     return;
1295   }
1296 
1297   g1h->verifier()->verify_region_sets_optional();
1298 
1299   if (VerifyDuringGC) {
1300     HandleMark hm;  // handle scope
1301     g1h->prepare_for_verify();
1302     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1303   }
1304   g1h->verifier()->check_bitmaps("Cleanup Start");
1305 
1306   G1Policy* g1p = g1h->g1_policy();
1307   g1p->record_concurrent_mark_cleanup_start();
1308 
1309   double start = os::elapsedTime();
1310 
1311   HeapRegionRemSet::reset_for_cleanup_tasks();
1312 
1313   {
1314     GCTraceTime(Debug, gc)("Finalize Live Data");
1315     finalize_live_data();
1316   }
1317 
1318   if (VerifyDuringGC) {
1319     GCTraceTime(Debug, gc)("Verify Live Data");
1320     verify_live_data();
1321   }
1322 
1323   g1h->collector_state()->set_mark_in_progress(false);
1324 
1325   double count_end = os::elapsedTime();
1326   double this_final_counting_time = (count_end - start);
1327   _total_counting_time += this_final_counting_time;
1328 
1329   if (log_is_enabled(Trace, gc, liveness)) {
1330     G1PrintRegionLivenessInfoClosure cl("Post-Marking");
1331     _g1h->heap_region_iterate(&cl);
1332   }
1333 
1334   // Install newly created mark bitMap as "prev".
1335   swapMarkBitMaps();
1336 
1337   g1h->reset_gc_time_stamp();
1338 
1339   uint n_workers = _g1h->workers()->active_workers();
1340 
1341   // Note end of marking in all heap regions.
1342   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
1343   g1h->workers()->run_task(&g1_par_note_end_task);
1344   g1h->check_gc_time_stamps();
1345 
1346   if (!cleanup_list_is_empty()) {
1347     // The cleanup list is not empty, so we'll have to process it
1348     // concurrently. Notify anyone else that might be wanting free
1349     // regions that there will be more free regions coming soon.
1350     g1h->set_free_regions_coming();
1351   }
1352 
1353   // call below, since it affects the metric by which we sort the heap
1354   // regions.
1355   if (G1ScrubRemSets) {
1356     double rs_scrub_start = os::elapsedTime();
1357     g1h->scrub_rem_set();
1358     _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start);
1359   }
1360 
1361   // this will also free any regions totally full of garbage objects,
1362   // and sort the regions.
1363   g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1364 
1365   // Statistics.
1366   double end = os::elapsedTime();
1367   _cleanup_times.add((end - start) * 1000.0);
1368 
1369   // Clean up will have freed any regions completely full of garbage.
1370   // Update the soft reference policy with the new heap occupancy.
1371   Universe::update_heap_info_at_gc();
1372 
1373   if (VerifyDuringGC) {
1374     HandleMark hm;  // handle scope
1375     g1h->prepare_for_verify();
1376     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)");
1377   }
1378 
1379   g1h->verifier()->check_bitmaps("Cleanup End");
1380 
1381   g1h->verifier()->verify_region_sets_optional();
1382 
1383   // We need to make this be a "collection" so any collection pause that
1384   // races with it goes around and waits for completeCleanup to finish.
1385   g1h->increment_total_collections();
1386 
1387   // Clean out dead classes and update Metaspace sizes.
1388   if (ClassUnloadingWithConcurrentMark) {
1389     ClassLoaderDataGraph::purge();
1390   }
1391   MetaspaceGC::compute_new_size();
1392 
1393   // We reclaimed old regions so we should calculate the sizes to make
1394   // sure we update the old gen/space data.
1395   g1h->g1mm()->update_sizes();
1396   g1h->allocation_context_stats().update_after_mark();
1397 }
1398 
1399 void G1ConcurrentMark::complete_cleanup() {
1400   if (has_aborted()) return;
1401 
1402   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1403 
1404   _cleanup_list.verify_optional();
1405   FreeRegionList tmp_free_list("Tmp Free List");
1406 
1407   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1408                                   "cleanup list has %u entries",
1409                                   _cleanup_list.length());
1410 
1411   // No one else should be accessing the _cleanup_list at this point,
1412   // so it is not necessary to take any locks
1413   while (!_cleanup_list.is_empty()) {
1414     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
1415     assert(hr != NULL, "Got NULL from a non-empty list");
1416     hr->par_clear();
1417     tmp_free_list.add_ordered(hr);
1418 
1419     // Instead of adding one region at a time to the secondary_free_list,
1420     // we accumulate them in the local list and move them a few at a
1421     // time. This also cuts down on the number of notify_all() calls
1422     // we do during this process. We'll also append the local list when
1423     // _cleanup_list is empty (which means we just removed the last
1424     // region from the _cleanup_list).
1425     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
1426         _cleanup_list.is_empty()) {
1427       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1428                                       "appending %u entries to the secondary_free_list, "
1429                                       "cleanup list still has %u entries",
1430                                       tmp_free_list.length(),
1431                                       _cleanup_list.length());
1432 
1433       {
1434         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1435         g1h->secondary_free_list_add(&tmp_free_list);
1436         SecondaryFreeList_lock->notify_all();
1437       }
1438 #ifndef PRODUCT
1439       if (G1StressConcRegionFreeing) {
1440         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
1441           os::sleep(Thread::current(), (jlong) 1, false);
1442         }
1443       }
1444 #endif
1445     }
1446   }
1447   assert(tmp_free_list.is_empty(), "post-condition");
1448 }
1449 
1450 // Supporting Object and Oop closures for reference discovery
1451 // and processing in during marking
1452 
1453 bool G1CMIsAliveClosure::do_object_b(oop obj) {
1454   HeapWord* addr = (HeapWord*)obj;
1455   return addr != NULL &&
1456          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
1457 }
1458 
1459 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1460 // Uses the G1CMTask associated with a worker thread (for serial reference
1461 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1462 // trace referent objects.
1463 //
1464 // Using the G1CMTask and embedded local queues avoids having the worker
1465 // threads operating on the global mark stack. This reduces the risk
1466 // of overflowing the stack - which we would rather avoid at this late
1467 // state. Also using the tasks' local queues removes the potential
1468 // of the workers interfering with each other that could occur if
1469 // operating on the global stack.
1470 
1471 class G1CMKeepAliveAndDrainClosure: public OopClosure {
1472   G1ConcurrentMark* _cm;
1473   G1CMTask*         _task;
1474   int               _ref_counter_limit;
1475   int               _ref_counter;
1476   bool              _is_serial;
1477  public:
1478   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1479     _cm(cm), _task(task), _is_serial(is_serial),
1480     _ref_counter_limit(G1RefProcDrainInterval) {
1481     assert(_ref_counter_limit > 0, "sanity");
1482     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1483     _ref_counter = _ref_counter_limit;
1484   }
1485 
1486   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1487   virtual void do_oop(      oop* p) { do_oop_work(p); }
1488 
1489   template <class T> void do_oop_work(T* p) {
1490     if (!_cm->has_overflown()) {
1491       oop obj = oopDesc::load_decode_heap_oop(p);
1492       _task->deal_with_reference(obj);
1493       _ref_counter--;
1494 
1495       if (_ref_counter == 0) {
1496         // We have dealt with _ref_counter_limit references, pushing them
1497         // and objects reachable from them on to the local stack (and
1498         // possibly the global stack). Call G1CMTask::do_marking_step() to
1499         // process these entries.
1500         //
1501         // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1502         // there's nothing more to do (i.e. we're done with the entries that
1503         // were pushed as a result of the G1CMTask::deal_with_reference() calls
1504         // above) or we overflow.
1505         //
1506         // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1507         // flag while there may still be some work to do. (See the comment at
1508         // the beginning of G1CMTask::do_marking_step() for those conditions -
1509         // one of which is reaching the specified time target.) It is only
1510         // when G1CMTask::do_marking_step() returns without setting the
1511         // has_aborted() flag that the marking step has completed.
1512         do {
1513           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1514           _task->do_marking_step(mark_step_duration_ms,
1515                                  false      /* do_termination */,
1516                                  _is_serial);
1517         } while (_task->has_aborted() && !_cm->has_overflown());
1518         _ref_counter = _ref_counter_limit;
1519       }
1520     }
1521   }
1522 };
1523 
1524 // 'Drain' oop closure used by both serial and parallel reference processing.
1525 // Uses the G1CMTask associated with a given worker thread (for serial
1526 // reference processing the G1CMtask for worker 0 is used). Calls the
1527 // do_marking_step routine, with an unbelievably large timeout value,
1528 // to drain the marking data structures of the remaining entries
1529 // added by the 'keep alive' oop closure above.
1530 
1531 class G1CMDrainMarkingStackClosure: public VoidClosure {
1532   G1ConcurrentMark* _cm;
1533   G1CMTask*         _task;
1534   bool              _is_serial;
1535  public:
1536   G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1537     _cm(cm), _task(task), _is_serial(is_serial) {
1538     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1539   }
1540 
1541   void do_void() {
1542     do {
1543       // We call G1CMTask::do_marking_step() to completely drain the local
1544       // and global marking stacks of entries pushed by the 'keep alive'
1545       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1546       //
1547       // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1548       // if there's nothing more to do (i.e. we've completely drained the
1549       // entries that were pushed as a a result of applying the 'keep alive'
1550       // closure to the entries on the discovered ref lists) or we overflow
1551       // the global marking stack.
1552       //
1553       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1554       // flag while there may still be some work to do. (See the comment at
1555       // the beginning of G1CMTask::do_marking_step() for those conditions -
1556       // one of which is reaching the specified time target.) It is only
1557       // when G1CMTask::do_marking_step() returns without setting the
1558       // has_aborted() flag that the marking step has completed.
1559 
1560       _task->do_marking_step(1000000000.0 /* something very large */,
1561                              true         /* do_termination */,
1562                              _is_serial);
1563     } while (_task->has_aborted() && !_cm->has_overflown());
1564   }
1565 };
1566 
1567 // Implementation of AbstractRefProcTaskExecutor for parallel
1568 // reference processing at the end of G1 concurrent marking
1569 
1570 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
1571 private:
1572   G1CollectedHeap*  _g1h;
1573   G1ConcurrentMark* _cm;
1574   WorkGang*         _workers;
1575   uint              _active_workers;
1576 
1577 public:
1578   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1579                           G1ConcurrentMark* cm,
1580                           WorkGang* workers,
1581                           uint n_workers) :
1582     _g1h(g1h), _cm(cm),
1583     _workers(workers), _active_workers(n_workers) { }
1584 
1585   // Executes the given task using concurrent marking worker threads.
1586   virtual void execute(ProcessTask& task);
1587   virtual void execute(EnqueueTask& task);
1588 };
1589 
1590 class G1CMRefProcTaskProxy: public AbstractGangTask {
1591   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1592   ProcessTask&      _proc_task;
1593   G1CollectedHeap*  _g1h;
1594   G1ConcurrentMark* _cm;
1595 
1596 public:
1597   G1CMRefProcTaskProxy(ProcessTask& proc_task,
1598                        G1CollectedHeap* g1h,
1599                        G1ConcurrentMark* cm) :
1600     AbstractGangTask("Process reference objects in parallel"),
1601     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1602     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1603     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1604   }
1605 
1606   virtual void work(uint worker_id) {
1607     ResourceMark rm;
1608     HandleMark hm;
1609     G1CMTask* task = _cm->task(worker_id);
1610     G1CMIsAliveClosure g1_is_alive(_g1h);
1611     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1612     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1613 
1614     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1615   }
1616 };
1617 
1618 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
1619   assert(_workers != NULL, "Need parallel worker threads.");
1620   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1621 
1622   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1623 
1624   // We need to reset the concurrency level before each
1625   // proxy task execution, so that the termination protocol
1626   // and overflow handling in G1CMTask::do_marking_step() knows
1627   // how many workers to wait for.
1628   _cm->set_concurrency(_active_workers);
1629   _workers->run_task(&proc_task_proxy);
1630 }
1631 
1632 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
1633   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
1634   EnqueueTask& _enq_task;
1635 
1636 public:
1637   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
1638     AbstractGangTask("Enqueue reference objects in parallel"),
1639     _enq_task(enq_task) { }
1640 
1641   virtual void work(uint worker_id) {
1642     _enq_task.work(worker_id);
1643   }
1644 };
1645 
1646 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
1647   assert(_workers != NULL, "Need parallel worker threads.");
1648   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1649 
1650   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
1651 
1652   // Not strictly necessary but...
1653   //
1654   // We need to reset the concurrency level before each
1655   // proxy task execution, so that the termination protocol
1656   // and overflow handling in G1CMTask::do_marking_step() knows
1657   // how many workers to wait for.
1658   _cm->set_concurrency(_active_workers);
1659   _workers->run_task(&enq_task_proxy);
1660 }
1661 
1662 void G1ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
1663   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
1664 }
1665 
1666 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
1667   if (has_overflown()) {
1668     // Skip processing the discovered references if we have
1669     // overflown the global marking stack. Reference objects
1670     // only get discovered once so it is OK to not
1671     // de-populate the discovered reference lists. We could have,
1672     // but the only benefit would be that, when marking restarts,
1673     // less reference objects are discovered.
1674     return;
1675   }
1676 
1677   ResourceMark rm;
1678   HandleMark   hm;
1679 
1680   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1681 
1682   // Is alive closure.
1683   G1CMIsAliveClosure g1_is_alive(g1h);
1684 
1685   // Inner scope to exclude the cleaning of the string and symbol
1686   // tables from the displayed time.
1687   {
1688     GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm);
1689 
1690     ReferenceProcessor* rp = g1h->ref_processor_cm();
1691 
1692     // See the comment in G1CollectedHeap::ref_processing_init()
1693     // about how reference processing currently works in G1.
1694 
1695     // Set the soft reference policy
1696     rp->setup_policy(clear_all_soft_refs);
1697     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1698 
1699     // Instances of the 'Keep Alive' and 'Complete GC' closures used
1700     // in serial reference processing. Note these closures are also
1701     // used for serially processing (by the the current thread) the
1702     // JNI references during parallel reference processing.
1703     //
1704     // These closures do not need to synchronize with the worker
1705     // threads involved in parallel reference processing as these
1706     // instances are executed serially by the current thread (e.g.
1707     // reference processing is not multi-threaded and is thus
1708     // performed by the current thread instead of a gang worker).
1709     //
1710     // The gang tasks involved in parallel reference processing create
1711     // their own instances of these closures, which do their own
1712     // synchronization among themselves.
1713     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1714     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1715 
1716     // We need at least one active thread. If reference processing
1717     // is not multi-threaded we use the current (VMThread) thread,
1718     // otherwise we use the work gang from the G1CollectedHeap and
1719     // we utilize all the worker threads we can.
1720     bool processing_is_mt = rp->processing_is_mt();
1721     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
1722     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
1723 
1724     // Parallel processing task executor.
1725     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
1726                                               g1h->workers(), active_workers);
1727     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1728 
1729     // Set the concurrency level. The phase was already set prior to
1730     // executing the remark task.
1731     set_concurrency(active_workers);
1732 
1733     // Set the degree of MT processing here.  If the discovery was done MT,
1734     // the number of threads involved during discovery could differ from
1735     // the number of active workers.  This is OK as long as the discovered
1736     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1737     rp->set_active_mt_degree(active_workers);
1738 
1739     // Process the weak references.
1740     const ReferenceProcessorStats& stats =
1741         rp->process_discovered_references(&g1_is_alive,
1742                                           &g1_keep_alive,
1743                                           &g1_drain_mark_stack,
1744                                           executor,
1745                                           _gc_timer_cm);
1746     _gc_tracer_cm->report_gc_reference_stats(stats);
1747 
1748     // The do_oop work routines of the keep_alive and drain_marking_stack
1749     // oop closures will set the has_overflown flag if we overflow the
1750     // global marking stack.
1751 
1752     assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(),
1753             "Mark stack should be empty (unless it is out of memory)");
1754 
1755     if (_global_mark_stack.is_out_of_memory()) {
1756       // This should have been done already when we tried to push an
1757       // entry on to the global mark stack. But let's do it again.
1758       set_has_overflown();
1759     }
1760 
1761     assert(rp->num_q() == active_workers, "why not");
1762 
1763     rp->enqueue_discovered_references(executor);
1764 
1765     rp->verify_no_references_recorded();
1766     assert(!rp->discovery_enabled(), "Post condition");
1767   }
1768 
1769   if (has_overflown()) {
1770     // We can not trust g1_is_alive if the marking stack overflowed
1771     return;
1772   }
1773 
1774   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1775 
1776   // Unload Klasses, String, Symbols, Code Cache, etc.
1777   if (ClassUnloadingWithConcurrentMark) {
1778     bool purged_classes;
1779 
1780     {
1781       GCTraceTime(Debug, gc, phases) trace("System Dictionary Unloading", _gc_timer_cm);
1782       purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
1783     }
1784 
1785     {
1786       GCTraceTime(Debug, gc, phases) trace("Parallel Unloading", _gc_timer_cm);
1787       weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
1788     }
1789   }
1790 
1791   if (G1StringDedup::is_enabled()) {
1792     GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", _gc_timer_cm);
1793     G1StringDedup::unlink(&g1_is_alive);
1794   }
1795 }
1796 
1797 void G1ConcurrentMark::swapMarkBitMaps() {
1798   G1CMBitMapRO* temp = _prevMarkBitMap;
1799   _prevMarkBitMap    = (G1CMBitMapRO*)_nextMarkBitMap;
1800   _nextMarkBitMap    = (G1CMBitMap*)  temp;
1801 }
1802 
1803 // Closure for marking entries in SATB buffers.
1804 class G1CMSATBBufferClosure : public SATBBufferClosure {
1805 private:
1806   G1CMTask* _task;
1807   G1CollectedHeap* _g1h;
1808 
1809   // This is very similar to G1CMTask::deal_with_reference, but with
1810   // more relaxed requirements for the argument, so this must be more
1811   // circumspect about treating the argument as an object.
1812   void do_entry(void* entry) const {
1813     _task->increment_refs_reached();
1814     HeapRegion* hr = _g1h->heap_region_containing(entry);
1815     if (entry < hr->next_top_at_mark_start()) {
1816       // Until we get here, we don't know whether entry refers to a valid
1817       // object; it could instead have been a stale reference.
1818       oop obj = static_cast<oop>(entry);
1819       assert(obj->is_oop(true /* ignore mark word */),
1820              "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj));
1821       _task->make_reference_grey(obj);
1822     }
1823   }
1824 
1825 public:
1826   G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1827     : _task(task), _g1h(g1h) { }
1828 
1829   virtual void do_buffer(void** buffer, size_t size) {
1830     for (size_t i = 0; i < size; ++i) {
1831       do_entry(buffer[i]);
1832     }
1833   }
1834 };
1835 
1836 class G1RemarkThreadsClosure : public ThreadClosure {
1837   G1CMSATBBufferClosure _cm_satb_cl;
1838   G1CMOopClosure _cm_cl;
1839   MarkingCodeBlobClosure _code_cl;
1840   int _thread_parity;
1841 
1842  public:
1843   G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1844     _cm_satb_cl(task, g1h),
1845     _cm_cl(g1h, g1h->concurrent_mark(), task),
1846     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1847     _thread_parity(Threads::thread_claim_parity()) {}
1848 
1849   void do_thread(Thread* thread) {
1850     if (thread->is_Java_thread()) {
1851       if (thread->claim_oops_do(true, _thread_parity)) {
1852         JavaThread* jt = (JavaThread*)thread;
1853 
1854         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1855         // however the liveness of oops reachable from nmethods have very complex lifecycles:
1856         // * Alive if on the stack of an executing method
1857         // * Weakly reachable otherwise
1858         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1859         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1860         jt->nmethods_do(&_code_cl);
1861 
1862         jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl);
1863       }
1864     } else if (thread->is_VM_thread()) {
1865       if (thread->claim_oops_do(true, _thread_parity)) {
1866         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
1867       }
1868     }
1869   }
1870 };
1871 
1872 class G1CMRemarkTask: public AbstractGangTask {
1873 private:
1874   G1ConcurrentMark* _cm;
1875 public:
1876   void work(uint worker_id) {
1877     // Since all available tasks are actually started, we should
1878     // only proceed if we're supposed to be active.
1879     if (worker_id < _cm->active_tasks()) {
1880       G1CMTask* task = _cm->task(worker_id);
1881       task->record_start_time();
1882       {
1883         ResourceMark rm;
1884         HandleMark hm;
1885 
1886         G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1887         Threads::threads_do(&threads_f);
1888       }
1889 
1890       do {
1891         task->do_marking_step(1000000000.0 /* something very large */,
1892                               true         /* do_termination       */,
1893                               false        /* is_serial            */);
1894       } while (task->has_aborted() && !_cm->has_overflown());
1895       // If we overflow, then we do not want to restart. We instead
1896       // want to abort remark and do concurrent marking again.
1897       task->record_end_time();
1898     }
1899   }
1900 
1901   G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1902     AbstractGangTask("Par Remark"), _cm(cm) {
1903     _cm->terminator()->reset_for_reuse(active_workers);
1904   }
1905 };
1906 
1907 void G1ConcurrentMark::checkpointRootsFinalWork() {
1908   ResourceMark rm;
1909   HandleMark   hm;
1910   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1911 
1912   GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
1913 
1914   g1h->ensure_parsability(false);
1915 
1916   // this is remark, so we'll use up all active threads
1917   uint active_workers = g1h->workers()->active_workers();
1918   set_concurrency_and_phase(active_workers, false /* concurrent */);
1919   // Leave _parallel_marking_threads at it's
1920   // value originally calculated in the G1ConcurrentMark
1921   // constructor and pass values of the active workers
1922   // through the gang in the task.
1923 
1924   {
1925     StrongRootsScope srs(active_workers);
1926 
1927     G1CMRemarkTask remarkTask(this, active_workers);
1928     // We will start all available threads, even if we decide that the
1929     // active_workers will be fewer. The extra ones will just bail out
1930     // immediately.
1931     g1h->workers()->run_task(&remarkTask);
1932   }
1933 
1934   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1935   guarantee(has_overflown() ||
1936             satb_mq_set.completed_buffers_num() == 0,
1937             "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1938             BOOL_TO_STR(has_overflown()),
1939             satb_mq_set.completed_buffers_num());
1940 
1941   print_stats();
1942 }
1943 
1944 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
1945   // Note we are overriding the read-only view of the prev map here, via
1946   // the cast.
1947   ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr);
1948 }
1949 
1950 HeapRegion*
1951 G1ConcurrentMark::claim_region(uint worker_id) {
1952   // "checkpoint" the finger
1953   HeapWord* finger = _finger;
1954 
1955   // _heap_end will not change underneath our feet; it only changes at
1956   // yield points.
1957   while (finger < _heap_end) {
1958     assert(_g1h->is_in_g1_reserved(finger), "invariant");
1959 
1960     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1961 
1962     // Above heap_region_containing may return NULL as we always scan claim
1963     // until the end of the heap. In this case, just jump to the next region.
1964     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1965 
1966     // Is the gap between reading the finger and doing the CAS too long?
1967     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
1968     if (res == finger && curr_region != NULL) {
1969       // we succeeded
1970       HeapWord*   bottom        = curr_region->bottom();
1971       HeapWord*   limit         = curr_region->next_top_at_mark_start();
1972 
1973       // notice that _finger == end cannot be guaranteed here since,
1974       // someone else might have moved the finger even further
1975       assert(_finger >= end, "the finger should have moved forward");
1976 
1977       if (limit > bottom) {
1978         return curr_region;
1979       } else {
1980         assert(limit == bottom,
1981                "the region limit should be at bottom");
1982         // we return NULL and the caller should try calling
1983         // claim_region() again.
1984         return NULL;
1985       }
1986     } else {
1987       assert(_finger > finger, "the finger should have moved forward");
1988       // read it again
1989       finger = _finger;
1990     }
1991   }
1992 
1993   return NULL;
1994 }
1995 
1996 #ifndef PRODUCT
1997 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC {
1998 private:
1999   G1CollectedHeap* _g1h;
2000   const char* _phase;
2001   int _info;
2002 
2003 public:
2004   VerifyNoCSetOops(const char* phase, int info = -1) :
2005     _g1h(G1CollectedHeap::heap()),
2006     _phase(phase),
2007     _info(info)
2008   { }
2009 
2010   void operator()(oop obj) const {
2011     guarantee(obj->is_oop(),
2012               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
2013               p2i(obj), _phase, _info);
2014     guarantee(!_g1h->obj_in_cs(obj),
2015               "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
2016               p2i(obj), _phase, _info);
2017   }
2018 };
2019 
2020 void G1ConcurrentMark::verify_no_cset_oops() {
2021   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2022   if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
2023     return;
2024   }
2025 
2026   // Verify entries on the global mark stack
2027   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
2028 
2029   // Verify entries on the task queues
2030   for (uint i = 0; i < _max_worker_id; ++i) {
2031     G1CMTaskQueue* queue = _task_queues->queue(i);
2032     queue->iterate(VerifyNoCSetOops("Queue", i));
2033   }
2034 
2035   // Verify the global finger
2036   HeapWord* global_finger = finger();
2037   if (global_finger != NULL && global_finger < _heap_end) {
2038     // Since we always iterate over all regions, we might get a NULL HeapRegion
2039     // here.
2040     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
2041     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2042               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
2043               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
2044   }
2045 
2046   // Verify the task fingers
2047   assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2048   for (uint i = 0; i < parallel_marking_threads(); ++i) {
2049     G1CMTask* task = _tasks[i];
2050     HeapWord* task_finger = task->finger();
2051     if (task_finger != NULL && task_finger < _heap_end) {
2052       // See above note on the global finger verification.
2053       HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
2054       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2055                 !task_hr->in_collection_set(),
2056                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2057                 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
2058     }
2059   }
2060 }
2061 #endif // PRODUCT
2062 void G1ConcurrentMark::create_live_data() {
2063   _g1h->g1_rem_set()->create_card_live_data(_parallel_workers, _nextMarkBitMap);
2064 }
2065 
2066 void G1ConcurrentMark::finalize_live_data() {
2067   _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _nextMarkBitMap);
2068 }
2069 
2070 void G1ConcurrentMark::verify_live_data() {
2071   _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _nextMarkBitMap);
2072 }
2073 
2074 void G1ConcurrentMark::clear_live_data(WorkGang* workers) {
2075   _g1h->g1_rem_set()->clear_card_live_data(workers);
2076 }
2077 
2078 #ifdef ASSERT
2079 void G1ConcurrentMark::verify_live_data_clear() {
2080   _g1h->g1_rem_set()->verify_card_live_data_is_clear();
2081 }
2082 #endif
2083 
2084 void G1ConcurrentMark::print_stats() {
2085   if (!log_is_enabled(Debug, gc, stats)) {
2086     return;
2087   }
2088   log_debug(gc, stats)("---------------------------------------------------------------------");
2089   for (size_t i = 0; i < _active_tasks; ++i) {
2090     _tasks[i]->print_stats();
2091     log_debug(gc, stats)("---------------------------------------------------------------------");
2092   }
2093 }
2094 
2095 void G1ConcurrentMark::abort() {
2096   if (!cmThread()->during_cycle() || _has_aborted) {
2097     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2098     return;
2099   }
2100 
2101   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2102   // concurrent bitmap clearing.
2103   {
2104     GCTraceTime(Debug, gc)("Clear Next Bitmap");
2105     clear_bitmap(_nextMarkBitMap, _g1h->workers(), false);
2106   }
2107   // Note we cannot clear the previous marking bitmap here
2108   // since VerifyDuringGC verifies the objects marked during
2109   // a full GC against the previous bitmap.
2110 
2111   {
2112     GCTraceTime(Debug, gc)("Clear Live Data");
2113     clear_live_data(_g1h->workers());
2114   }
2115   DEBUG_ONLY({
2116     GCTraceTime(Debug, gc)("Verify Live Data Clear");
2117     verify_live_data_clear();
2118   })
2119   // Empty mark stack
2120   reset_marking_state();
2121   for (uint i = 0; i < _max_worker_id; ++i) {
2122     _tasks[i]->clear_region_fields();
2123   }
2124   _first_overflow_barrier_sync.abort();
2125   _second_overflow_barrier_sync.abort();
2126   _has_aborted = true;
2127 
2128   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2129   satb_mq_set.abandon_partial_marking();
2130   // This can be called either during or outside marking, we'll read
2131   // the expected_active value from the SATB queue set.
2132   satb_mq_set.set_active_all_threads(
2133                                  false, /* new active value */
2134                                  satb_mq_set.is_active() /* expected_active */);
2135 }
2136 
2137 static void print_ms_time_info(const char* prefix, const char* name,
2138                                NumberSeq& ns) {
2139   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2140                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2141   if (ns.num() > 0) {
2142     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2143                            prefix, ns.sd(), ns.maximum());
2144   }
2145 }
2146 
2147 void G1ConcurrentMark::print_summary_info() {
2148   Log(gc, marking) log;
2149   if (!log.is_trace()) {
2150     return;
2151   }
2152 
2153   log.trace(" Concurrent marking:");
2154   print_ms_time_info("  ", "init marks", _init_times);
2155   print_ms_time_info("  ", "remarks", _remark_times);
2156   {
2157     print_ms_time_info("     ", "final marks", _remark_mark_times);
2158     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2159 
2160   }
2161   print_ms_time_info("  ", "cleanups", _cleanup_times);
2162   log.trace("    Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2163             _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2164   if (G1ScrubRemSets) {
2165     log.trace("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
2166               _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2167   }
2168   log.trace("  Total stop_world time = %8.2f s.",
2169             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2170   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
2171             cmThread()->vtime_accum(), cmThread()->vtime_mark_accum());
2172 }
2173 
2174 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2175   _parallel_workers->print_worker_threads_on(st);
2176 }
2177 
2178 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2179   _parallel_workers->threads_do(tc);
2180 }
2181 
2182 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2183   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2184       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
2185   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
2186   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
2187 }
2188 
2189 // Closure for iteration over bitmaps
2190 class G1CMBitMapClosure : public BitMapClosure {
2191 private:
2192   // the bitmap that is being iterated over
2193   G1CMBitMap*                 _nextMarkBitMap;
2194   G1ConcurrentMark*           _cm;
2195   G1CMTask*                   _task;
2196 
2197 public:
2198   G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm, G1CMBitMap* nextMarkBitMap) :
2199     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
2200 
2201   bool do_bit(size_t offset) {
2202     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
2203     assert(_nextMarkBitMap->isMarked(addr), "invariant");
2204     assert( addr < _cm->finger(), "invariant");
2205     assert(addr >= _task->finger(), "invariant");
2206 
2207     // We move that task's local finger along.
2208     _task->move_finger_to(addr);
2209 
2210     _task->scan_object(oop(addr));
2211     // we only partially drain the local queue and global stack
2212     _task->drain_local_queue(true);
2213     _task->drain_global_stack(true);
2214 
2215     // if the has_aborted flag has been raised, we need to bail out of
2216     // the iteration
2217     return !_task->has_aborted();
2218   }
2219 };
2220 
2221 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2222   ReferenceProcessor* result = g1h->ref_processor_cm();
2223   assert(result != NULL, "CM reference processor should not be NULL");
2224   return result;
2225 }
2226 
2227 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2228                                G1ConcurrentMark* cm,
2229                                G1CMTask* task)
2230   : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
2231     _g1h(g1h), _cm(cm), _task(task)
2232 { }
2233 
2234 void G1CMTask::setup_for_region(HeapRegion* hr) {
2235   assert(hr != NULL,
2236         "claim_region() should have filtered out NULL regions");
2237   _curr_region  = hr;
2238   _finger       = hr->bottom();
2239   update_region_limit();
2240 }
2241 
2242 void G1CMTask::update_region_limit() {
2243   HeapRegion* hr            = _curr_region;
2244   HeapWord* bottom          = hr->bottom();
2245   HeapWord* limit           = hr->next_top_at_mark_start();
2246 
2247   if (limit == bottom) {
2248     // The region was collected underneath our feet.
2249     // We set the finger to bottom to ensure that the bitmap
2250     // iteration that will follow this will not do anything.
2251     // (this is not a condition that holds when we set the region up,
2252     // as the region is not supposed to be empty in the first place)
2253     _finger = bottom;
2254   } else if (limit >= _region_limit) {
2255     assert(limit >= _finger, "peace of mind");
2256   } else {
2257     assert(limit < _region_limit, "only way to get here");
2258     // This can happen under some pretty unusual circumstances.  An
2259     // evacuation pause empties the region underneath our feet (NTAMS
2260     // at bottom). We then do some allocation in the region (NTAMS
2261     // stays at bottom), followed by the region being used as a GC
2262     // alloc region (NTAMS will move to top() and the objects
2263     // originally below it will be grayed). All objects now marked in
2264     // the region are explicitly grayed, if below the global finger,
2265     // and we do not need in fact to scan anything else. So, we simply
2266     // set _finger to be limit to ensure that the bitmap iteration
2267     // doesn't do anything.
2268     _finger = limit;
2269   }
2270 
2271   _region_limit = limit;
2272 }
2273 
2274 void G1CMTask::giveup_current_region() {
2275   assert(_curr_region != NULL, "invariant");
2276   clear_region_fields();
2277 }
2278 
2279 void G1CMTask::clear_region_fields() {
2280   // Values for these three fields that indicate that we're not
2281   // holding on to a region.
2282   _curr_region   = NULL;
2283   _finger        = NULL;
2284   _region_limit  = NULL;
2285 }
2286 
2287 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2288   if (cm_oop_closure == NULL) {
2289     assert(_cm_oop_closure != NULL, "invariant");
2290   } else {
2291     assert(_cm_oop_closure == NULL, "invariant");
2292   }
2293   _cm_oop_closure = cm_oop_closure;
2294 }
2295 
2296 void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) {
2297   guarantee(nextMarkBitMap != NULL, "invariant");
2298   _nextMarkBitMap                = nextMarkBitMap;
2299   clear_region_fields();
2300 
2301   _calls                         = 0;
2302   _elapsed_time_ms               = 0.0;
2303   _termination_time_ms           = 0.0;
2304   _termination_start_time_ms     = 0.0;
2305 }
2306 
2307 bool G1CMTask::should_exit_termination() {
2308   regular_clock_call();
2309   // This is called when we are in the termination protocol. We should
2310   // quit if, for some reason, this task wants to abort or the global
2311   // stack is not empty (this means that we can get work from it).
2312   return !_cm->mark_stack_empty() || has_aborted();
2313 }
2314 
2315 void G1CMTask::reached_limit() {
2316   assert(_words_scanned >= _words_scanned_limit ||
2317          _refs_reached >= _refs_reached_limit ,
2318          "shouldn't have been called otherwise");
2319   regular_clock_call();
2320 }
2321 
2322 void G1CMTask::regular_clock_call() {
2323   if (has_aborted()) return;
2324 
2325   // First, we need to recalculate the words scanned and refs reached
2326   // limits for the next clock call.
2327   recalculate_limits();
2328 
2329   // During the regular clock call we do the following
2330 
2331   // (1) If an overflow has been flagged, then we abort.
2332   if (_cm->has_overflown()) {
2333     set_has_aborted();
2334     return;
2335   }
2336 
2337   // If we are not concurrent (i.e. we're doing remark) we don't need
2338   // to check anything else. The other steps are only needed during
2339   // the concurrent marking phase.
2340   if (!concurrent()) return;
2341 
2342   // (2) If marking has been aborted for Full GC, then we also abort.
2343   if (_cm->has_aborted()) {
2344     set_has_aborted();
2345     return;
2346   }
2347 
2348   double curr_time_ms = os::elapsedVTime() * 1000.0;
2349 
2350   // (4) We check whether we should yield. If we have to, then we abort.
2351   if (SuspendibleThreadSet::should_yield()) {
2352     // We should yield. To do this we abort the task. The caller is
2353     // responsible for yielding.
2354     set_has_aborted();
2355     return;
2356   }
2357 
2358   // (5) We check whether we've reached our time quota. If we have,
2359   // then we abort.
2360   double elapsed_time_ms = curr_time_ms - _start_time_ms;
2361   if (elapsed_time_ms > _time_target_ms) {
2362     set_has_aborted();
2363     _has_timed_out = true;
2364     return;
2365   }
2366 
2367   // (6) Finally, we check whether there are enough completed STAB
2368   // buffers available for processing. If there are, we abort.
2369   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2370   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2371     // we do need to process SATB buffers, we'll abort and restart
2372     // the marking task to do so
2373     set_has_aborted();
2374     return;
2375   }
2376 }
2377 
2378 void G1CMTask::recalculate_limits() {
2379   _real_words_scanned_limit = _words_scanned + words_scanned_period;
2380   _words_scanned_limit      = _real_words_scanned_limit;
2381 
2382   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
2383   _refs_reached_limit       = _real_refs_reached_limit;
2384 }
2385 
2386 void G1CMTask::decrease_limits() {
2387   // This is called when we believe that we're going to do an infrequent
2388   // operation which will increase the per byte scanned cost (i.e. move
2389   // entries to/from the global stack). It basically tries to decrease the
2390   // scanning limit so that the clock is called earlier.
2391 
2392   _words_scanned_limit = _real_words_scanned_limit -
2393     3 * words_scanned_period / 4;
2394   _refs_reached_limit  = _real_refs_reached_limit -
2395     3 * refs_reached_period / 4;
2396 }
2397 
2398 void G1CMTask::move_entries_to_global_stack() {
2399   // Local array where we'll store the entries that will be popped
2400   // from the local queue.
2401   oop buffer[G1CMMarkStack::OopsPerChunk];
2402 
2403   size_t n = 0;
2404   oop obj;
2405   while (n < G1CMMarkStack::OopsPerChunk && _task_queue->pop_local(obj)) {
2406     buffer[n] = obj;
2407     ++n;
2408   }
2409   if (n < G1CMMarkStack::OopsPerChunk) {
2410     buffer[n] = NULL;
2411   }
2412 
2413   if (n > 0) {
2414     if (!_cm->mark_stack_push(buffer)) {
2415       set_has_aborted();
2416     }
2417   }
2418 
2419   // This operation was quite expensive, so decrease the limits.
2420   decrease_limits();
2421 }
2422 
2423 bool G1CMTask::get_entries_from_global_stack() {
2424   // Local array where we'll store the entries that will be popped
2425   // from the global stack.
2426   oop buffer[G1CMMarkStack::OopsPerChunk];
2427 
2428   if (!_cm->mark_stack_pop(buffer)) {
2429     return false;
2430   }
2431 
2432   // We did actually pop at least one entry.
2433   for (size_t i = 0; i < G1CMMarkStack::OopsPerChunk; ++i) {
2434     oop elem = buffer[i];
2435     if (elem == NULL) {
2436       break;
2437     }
2438     bool success = _task_queue->push(elem);
2439     // We only call this when the local queue is empty or under a
2440     // given target limit. So, we do not expect this push to fail.
2441     assert(success, "invariant");
2442   }
2443 
2444   // This operation was quite expensive, so decrease the limits
2445   decrease_limits();
2446   return true;
2447 }
2448 
2449 void G1CMTask::drain_local_queue(bool partially) {
2450   if (has_aborted()) return;
2451 
2452   // Decide what the target size is, depending whether we're going to
2453   // drain it partially (so that other tasks can steal if they run out
2454   // of things to do) or totally (at the very end).
2455   size_t target_size;
2456   if (partially) {
2457     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
2458   } else {
2459     target_size = 0;
2460   }
2461 
2462   if (_task_queue->size() > target_size) {
2463     oop obj;
2464     bool ret = _task_queue->pop_local(obj);
2465     while (ret) {
2466       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
2467       assert(!_g1h->is_on_master_free_list(
2468                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
2469 
2470       scan_object(obj);
2471 
2472       if (_task_queue->size() <= target_size || has_aborted()) {
2473         ret = false;
2474       } else {
2475         ret = _task_queue->pop_local(obj);
2476       }
2477     }
2478   }
2479 }
2480 
2481 void G1CMTask::drain_global_stack(bool partially) {
2482   if (has_aborted()) return;
2483 
2484   // We have a policy to drain the local queue before we attempt to
2485   // drain the global stack.
2486   assert(partially || _task_queue->size() == 0, "invariant");
2487 
2488   // Decide what the target size is, depending whether we're going to
2489   // drain it partially (so that other tasks can steal if they run out
2490   // of things to do) or totally (at the very end).
2491   // Notice that when draining the global mark stack partially, due to the racyness
2492   // of the mark stack size update we might in fact drop below the target. But,
2493   // this is not a problem.
2494   // In case of total draining, we simply process until the global mark stack is
2495   // totally empty, disregarding the size counter.
2496   if (partially) {
2497     size_t const target_size = _cm->partial_mark_stack_size_target();
2498     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2499       if (get_entries_from_global_stack()) {
2500         drain_local_queue(partially);
2501       }
2502     }
2503   } else {
2504     while (!has_aborted() && get_entries_from_global_stack()) {
2505       drain_local_queue(partially);
2506     }
2507   }
2508 }
2509 
2510 // SATB Queue has several assumptions on whether to call the par or
2511 // non-par versions of the methods. this is why some of the code is
2512 // replicated. We should really get rid of the single-threaded version
2513 // of the code to simplify things.
2514 void G1CMTask::drain_satb_buffers() {
2515   if (has_aborted()) return;
2516 
2517   // We set this so that the regular clock knows that we're in the
2518   // middle of draining buffers and doesn't set the abort flag when it
2519   // notices that SATB buffers are available for draining. It'd be
2520   // very counter productive if it did that. :-)
2521   _draining_satb_buffers = true;
2522 
2523   G1CMSATBBufferClosure satb_cl(this, _g1h);
2524   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2525 
2526   // This keeps claiming and applying the closure to completed buffers
2527   // until we run out of buffers or we need to abort.
2528   while (!has_aborted() &&
2529          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2530     regular_clock_call();
2531   }
2532 
2533   _draining_satb_buffers = false;
2534 
2535   assert(has_aborted() ||
2536          concurrent() ||
2537          satb_mq_set.completed_buffers_num() == 0, "invariant");
2538 
2539   // again, this was a potentially expensive operation, decrease the
2540   // limits to get the regular clock call early
2541   decrease_limits();
2542 }
2543 
2544 void G1CMTask::print_stats() {
2545   log_debug(gc, stats)("Marking Stats, task = %u, calls = %d",
2546                        _worker_id, _calls);
2547   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2548                        _elapsed_time_ms, _termination_time_ms);
2549   log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
2550                        _step_times_ms.num(), _step_times_ms.avg(),
2551                        _step_times_ms.sd());
2552   log_debug(gc, stats)("                    max = %1.2lfms, total = %1.2lfms",
2553                        _step_times_ms.maximum(), _step_times_ms.sum());
2554 }
2555 
2556 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) {
2557   return _task_queues->steal(worker_id, hash_seed, obj);
2558 }
2559 
2560 /*****************************************************************************
2561 
2562     The do_marking_step(time_target_ms, ...) method is the building
2563     block of the parallel marking framework. It can be called in parallel
2564     with other invocations of do_marking_step() on different tasks
2565     (but only one per task, obviously) and concurrently with the
2566     mutator threads, or during remark, hence it eliminates the need
2567     for two versions of the code. When called during remark, it will
2568     pick up from where the task left off during the concurrent marking
2569     phase. Interestingly, tasks are also claimable during evacuation
2570     pauses too, since do_marking_step() ensures that it aborts before
2571     it needs to yield.
2572 
2573     The data structures that it uses to do marking work are the
2574     following:
2575 
2576       (1) Marking Bitmap. If there are gray objects that appear only
2577       on the bitmap (this happens either when dealing with an overflow
2578       or when the initial marking phase has simply marked the roots
2579       and didn't push them on the stack), then tasks claim heap
2580       regions whose bitmap they then scan to find gray objects. A
2581       global finger indicates where the end of the last claimed region
2582       is. A local finger indicates how far into the region a task has
2583       scanned. The two fingers are used to determine how to gray an
2584       object (i.e. whether simply marking it is OK, as it will be
2585       visited by a task in the future, or whether it needs to be also
2586       pushed on a stack).
2587 
2588       (2) Local Queue. The local queue of the task which is accessed
2589       reasonably efficiently by the task. Other tasks can steal from
2590       it when they run out of work. Throughout the marking phase, a
2591       task attempts to keep its local queue short but not totally
2592       empty, so that entries are available for stealing by other
2593       tasks. Only when there is no more work, a task will totally
2594       drain its local queue.
2595 
2596       (3) Global Mark Stack. This handles local queue overflow. During
2597       marking only sets of entries are moved between it and the local
2598       queues, as access to it requires a mutex and more fine-grain
2599       interaction with it which might cause contention. If it
2600       overflows, then the marking phase should restart and iterate
2601       over the bitmap to identify gray objects. Throughout the marking
2602       phase, tasks attempt to keep the global mark stack at a small
2603       length but not totally empty, so that entries are available for
2604       popping by other tasks. Only when there is no more work, tasks
2605       will totally drain the global mark stack.
2606 
2607       (4) SATB Buffer Queue. This is where completed SATB buffers are
2608       made available. Buffers are regularly removed from this queue
2609       and scanned for roots, so that the queue doesn't get too
2610       long. During remark, all completed buffers are processed, as
2611       well as the filled in parts of any uncompleted buffers.
2612 
2613     The do_marking_step() method tries to abort when the time target
2614     has been reached. There are a few other cases when the
2615     do_marking_step() method also aborts:
2616 
2617       (1) When the marking phase has been aborted (after a Full GC).
2618 
2619       (2) When a global overflow (on the global stack) has been
2620       triggered. Before the task aborts, it will actually sync up with
2621       the other tasks to ensure that all the marking data structures
2622       (local queues, stacks, fingers etc.)  are re-initialized so that
2623       when do_marking_step() completes, the marking phase can
2624       immediately restart.
2625 
2626       (3) When enough completed SATB buffers are available. The
2627       do_marking_step() method only tries to drain SATB buffers right
2628       at the beginning. So, if enough buffers are available, the
2629       marking step aborts and the SATB buffers are processed at
2630       the beginning of the next invocation.
2631 
2632       (4) To yield. when we have to yield then we abort and yield
2633       right at the end of do_marking_step(). This saves us from a lot
2634       of hassle as, by yielding we might allow a Full GC. If this
2635       happens then objects will be compacted underneath our feet, the
2636       heap might shrink, etc. We save checking for this by just
2637       aborting and doing the yield right at the end.
2638 
2639     From the above it follows that the do_marking_step() method should
2640     be called in a loop (or, otherwise, regularly) until it completes.
2641 
2642     If a marking step completes without its has_aborted() flag being
2643     true, it means it has completed the current marking phase (and
2644     also all other marking tasks have done so and have all synced up).
2645 
2646     A method called regular_clock_call() is invoked "regularly" (in
2647     sub ms intervals) throughout marking. It is this clock method that
2648     checks all the abort conditions which were mentioned above and
2649     decides when the task should abort. A work-based scheme is used to
2650     trigger this clock method: when the number of object words the
2651     marking phase has scanned or the number of references the marking
2652     phase has visited reach a given limit. Additional invocations to
2653     the method clock have been planted in a few other strategic places
2654     too. The initial reason for the clock method was to avoid calling
2655     vtime too regularly, as it is quite expensive. So, once it was in
2656     place, it was natural to piggy-back all the other conditions on it
2657     too and not constantly check them throughout the code.
2658 
2659     If do_termination is true then do_marking_step will enter its
2660     termination protocol.
2661 
2662     The value of is_serial must be true when do_marking_step is being
2663     called serially (i.e. by the VMThread) and do_marking_step should
2664     skip any synchronization in the termination and overflow code.
2665     Examples include the serial remark code and the serial reference
2666     processing closures.
2667 
2668     The value of is_serial must be false when do_marking_step is
2669     being called by any of the worker threads in a work gang.
2670     Examples include the concurrent marking code (CMMarkingTask),
2671     the MT remark code, and the MT reference processing closures.
2672 
2673  *****************************************************************************/
2674 
2675 void G1CMTask::do_marking_step(double time_target_ms,
2676                                bool do_termination,
2677                                bool is_serial) {
2678   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2679   assert(concurrent() == _cm->concurrent(), "they should be the same");
2680 
2681   G1Policy* g1_policy = _g1h->g1_policy();
2682   assert(_task_queues != NULL, "invariant");
2683   assert(_task_queue != NULL, "invariant");
2684   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
2685 
2686   assert(!_claimed,
2687          "only one thread should claim this task at any one time");
2688 
2689   // OK, this doesn't safeguard again all possible scenarios, as it is
2690   // possible for two threads to set the _claimed flag at the same
2691   // time. But it is only for debugging purposes anyway and it will
2692   // catch most problems.
2693   _claimed = true;
2694 
2695   _start_time_ms = os::elapsedVTime() * 1000.0;
2696 
2697   // If do_stealing is true then do_marking_step will attempt to
2698   // steal work from the other G1CMTasks. It only makes sense to
2699   // enable stealing when the termination protocol is enabled
2700   // and do_marking_step() is not being called serially.
2701   bool do_stealing = do_termination && !is_serial;
2702 
2703   double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
2704   _time_target_ms = time_target_ms - diff_prediction_ms;
2705 
2706   // set up the variables that are used in the work-based scheme to
2707   // call the regular clock method
2708   _words_scanned = 0;
2709   _refs_reached  = 0;
2710   recalculate_limits();
2711 
2712   // clear all flags
2713   clear_has_aborted();
2714   _has_timed_out = false;
2715   _draining_satb_buffers = false;
2716 
2717   ++_calls;
2718 
2719   // Set up the bitmap and oop closures. Anything that uses them is
2720   // eventually called from this method, so it is OK to allocate these
2721   // statically.
2722   G1CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
2723   G1CMOopClosure    cm_oop_closure(_g1h, _cm, this);
2724   set_cm_oop_closure(&cm_oop_closure);
2725 
2726   if (_cm->has_overflown()) {
2727     // This can happen if the mark stack overflows during a GC pause
2728     // and this task, after a yield point, restarts. We have to abort
2729     // as we need to get into the overflow protocol which happens
2730     // right at the end of this task.
2731     set_has_aborted();
2732   }
2733 
2734   // First drain any available SATB buffers. After this, we will not
2735   // look at SATB buffers before the next invocation of this method.
2736   // If enough completed SATB buffers are queued up, the regular clock
2737   // will abort this task so that it restarts.
2738   drain_satb_buffers();
2739   // ...then partially drain the local queue and the global stack
2740   drain_local_queue(true);
2741   drain_global_stack(true);
2742 
2743   do {
2744     if (!has_aborted() && _curr_region != NULL) {
2745       // This means that we're already holding on to a region.
2746       assert(_finger != NULL, "if region is not NULL, then the finger "
2747              "should not be NULL either");
2748 
2749       // We might have restarted this task after an evacuation pause
2750       // which might have evacuated the region we're holding on to
2751       // underneath our feet. Let's read its limit again to make sure
2752       // that we do not iterate over a region of the heap that
2753       // contains garbage (update_region_limit() will also move
2754       // _finger to the start of the region if it is found empty).
2755       update_region_limit();
2756       // We will start from _finger not from the start of the region,
2757       // as we might be restarting this task after aborting half-way
2758       // through scanning this region. In this case, _finger points to
2759       // the address where we last found a marked object. If this is a
2760       // fresh region, _finger points to start().
2761       MemRegion mr = MemRegion(_finger, _region_limit);
2762 
2763       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2764              "humongous regions should go around loop once only");
2765 
2766       // Some special cases:
2767       // If the memory region is empty, we can just give up the region.
2768       // If the current region is humongous then we only need to check
2769       // the bitmap for the bit associated with the start of the object,
2770       // scan the object if it's live, and give up the region.
2771       // Otherwise, let's iterate over the bitmap of the part of the region
2772       // that is left.
2773       // If the iteration is successful, give up the region.
2774       if (mr.is_empty()) {
2775         giveup_current_region();
2776         regular_clock_call();
2777       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2778         if (_nextMarkBitMap->isMarked(mr.start())) {
2779           // The object is marked - apply the closure
2780           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
2781           bitmap_closure.do_bit(offset);
2782         }
2783         // Even if this task aborted while scanning the humongous object
2784         // we can (and should) give up the current region.
2785         giveup_current_region();
2786         regular_clock_call();
2787       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
2788         giveup_current_region();
2789         regular_clock_call();
2790       } else {
2791         assert(has_aborted(), "currently the only way to do so");
2792         // The only way to abort the bitmap iteration is to return
2793         // false from the do_bit() method. However, inside the
2794         // do_bit() method we move the _finger to point to the
2795         // object currently being looked at. So, if we bail out, we
2796         // have definitely set _finger to something non-null.
2797         assert(_finger != NULL, "invariant");
2798 
2799         // Region iteration was actually aborted. So now _finger
2800         // points to the address of the object we last scanned. If we
2801         // leave it there, when we restart this task, we will rescan
2802         // the object. It is easy to avoid this. We move the finger by
2803         // enough to point to the next possible object header (the
2804         // bitmap knows by how much we need to move it as it knows its
2805         // granularity).
2806         assert(_finger < _region_limit, "invariant");
2807         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
2808         // Check if bitmap iteration was aborted while scanning the last object
2809         if (new_finger >= _region_limit) {
2810           giveup_current_region();
2811         } else {
2812           move_finger_to(new_finger);
2813         }
2814       }
2815     }
2816     // At this point we have either completed iterating over the
2817     // region we were holding on to, or we have aborted.
2818 
2819     // We then partially drain the local queue and the global stack.
2820     // (Do we really need this?)
2821     drain_local_queue(true);
2822     drain_global_stack(true);
2823 
2824     // Read the note on the claim_region() method on why it might
2825     // return NULL with potentially more regions available for
2826     // claiming and why we have to check out_of_regions() to determine
2827     // whether we're done or not.
2828     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2829       // We are going to try to claim a new region. We should have
2830       // given up on the previous one.
2831       // Separated the asserts so that we know which one fires.
2832       assert(_curr_region  == NULL, "invariant");
2833       assert(_finger       == NULL, "invariant");
2834       assert(_region_limit == NULL, "invariant");
2835       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2836       if (claimed_region != NULL) {
2837         // Yes, we managed to claim one
2838         setup_for_region(claimed_region);
2839         assert(_curr_region == claimed_region, "invariant");
2840       }
2841       // It is important to call the regular clock here. It might take
2842       // a while to claim a region if, for example, we hit a large
2843       // block of empty regions. So we need to call the regular clock
2844       // method once round the loop to make sure it's called
2845       // frequently enough.
2846       regular_clock_call();
2847     }
2848 
2849     if (!has_aborted() && _curr_region == NULL) {
2850       assert(_cm->out_of_regions(),
2851              "at this point we should be out of regions");
2852     }
2853   } while ( _curr_region != NULL && !has_aborted());
2854 
2855   if (!has_aborted()) {
2856     // We cannot check whether the global stack is empty, since other
2857     // tasks might be pushing objects to it concurrently.
2858     assert(_cm->out_of_regions(),
2859            "at this point we should be out of regions");
2860     // Try to reduce the number of available SATB buffers so that
2861     // remark has less work to do.
2862     drain_satb_buffers();
2863   }
2864 
2865   // Since we've done everything else, we can now totally drain the
2866   // local queue and global stack.
2867   drain_local_queue(false);
2868   drain_global_stack(false);
2869 
2870   // Attempt at work stealing from other task's queues.
2871   if (do_stealing && !has_aborted()) {
2872     // We have not aborted. This means that we have finished all that
2873     // we could. Let's try to do some stealing...
2874 
2875     // We cannot check whether the global stack is empty, since other
2876     // tasks might be pushing objects to it concurrently.
2877     assert(_cm->out_of_regions() && _task_queue->size() == 0,
2878            "only way to reach here");
2879     while (!has_aborted()) {
2880       oop obj;
2881       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
2882         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
2883                "any stolen object should be marked");
2884         scan_object(obj);
2885 
2886         // And since we're towards the end, let's totally drain the
2887         // local queue and global stack.
2888         drain_local_queue(false);
2889         drain_global_stack(false);
2890       } else {
2891         break;
2892       }
2893     }
2894   }
2895 
2896   // We still haven't aborted. Now, let's try to get into the
2897   // termination protocol.
2898   if (do_termination && !has_aborted()) {
2899     // We cannot check whether the global stack is empty, since other
2900     // tasks might be concurrently pushing objects on it.
2901     // Separated the asserts so that we know which one fires.
2902     assert(_cm->out_of_regions(), "only way to reach here");
2903     assert(_task_queue->size() == 0, "only way to reach here");
2904     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2905 
2906     // The G1CMTask class also extends the TerminatorTerminator class,
2907     // hence its should_exit_termination() method will also decide
2908     // whether to exit the termination protocol or not.
2909     bool finished = (is_serial ||
2910                      _cm->terminator()->offer_termination(this));
2911     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2912     _termination_time_ms +=
2913       termination_end_time_ms - _termination_start_time_ms;
2914 
2915     if (finished) {
2916       // We're all done.
2917 
2918       if (_worker_id == 0) {
2919         // let's allow task 0 to do this
2920         if (concurrent()) {
2921           assert(_cm->concurrent_marking_in_progress(), "invariant");
2922           // we need to set this to false before the next
2923           // safepoint. This way we ensure that the marking phase
2924           // doesn't observe any more heap expansions.
2925           _cm->clear_concurrent_marking_in_progress();
2926         }
2927       }
2928 
2929       // We can now guarantee that the global stack is empty, since
2930       // all other tasks have finished. We separated the guarantees so
2931       // that, if a condition is false, we can immediately find out
2932       // which one.
2933       guarantee(_cm->out_of_regions(), "only way to reach here");
2934       guarantee(_cm->mark_stack_empty(), "only way to reach here");
2935       guarantee(_task_queue->size() == 0, "only way to reach here");
2936       guarantee(!_cm->has_overflown(), "only way to reach here");
2937       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
2938     } else {
2939       // Apparently there's more work to do. Let's abort this task. It
2940       // will restart it and we can hopefully find more things to do.
2941       set_has_aborted();
2942     }
2943   }
2944 
2945   // Mainly for debugging purposes to make sure that a pointer to the
2946   // closure which was statically allocated in this frame doesn't
2947   // escape it by accident.
2948   set_cm_oop_closure(NULL);
2949   double end_time_ms = os::elapsedVTime() * 1000.0;
2950   double elapsed_time_ms = end_time_ms - _start_time_ms;
2951   // Update the step history.
2952   _step_times_ms.add(elapsed_time_ms);
2953 
2954   if (has_aborted()) {
2955     // The task was aborted for some reason.
2956     if (_has_timed_out) {
2957       double diff_ms = elapsed_time_ms - _time_target_ms;
2958       // Keep statistics of how well we did with respect to hitting
2959       // our target only if we actually timed out (if we aborted for
2960       // other reasons, then the results might get skewed).
2961       _marking_step_diffs_ms.add(diff_ms);
2962     }
2963 
2964     if (_cm->has_overflown()) {
2965       // This is the interesting one. We aborted because a global
2966       // overflow was raised. This means we have to restart the
2967       // marking phase and start iterating over regions. However, in
2968       // order to do this we have to make sure that all tasks stop
2969       // what they are doing and re-initialize in a safe manner. We
2970       // will achieve this with the use of two barrier sync points.
2971 
2972       if (!is_serial) {
2973         // We only need to enter the sync barrier if being called
2974         // from a parallel context
2975         _cm->enter_first_sync_barrier(_worker_id);
2976 
2977         // When we exit this sync barrier we know that all tasks have
2978         // stopped doing marking work. So, it's now safe to
2979         // re-initialize our data structures. At the end of this method,
2980         // task 0 will clear the global data structures.
2981       }
2982 
2983       // We clear the local state of this task...
2984       clear_region_fields();
2985 
2986       if (!is_serial) {
2987         // ...and enter the second barrier.
2988         _cm->enter_second_sync_barrier(_worker_id);
2989       }
2990       // At this point, if we're during the concurrent phase of
2991       // marking, everything has been re-initialized and we're
2992       // ready to restart.
2993     }
2994   }
2995 
2996   _claimed = false;
2997 }
2998 
2999 G1CMTask::G1CMTask(uint worker_id,
3000                    G1ConcurrentMark* cm,
3001                    G1CMTaskQueue* task_queue,
3002                    G1CMTaskQueueSet* task_queues)
3003   : _g1h(G1CollectedHeap::heap()),
3004     _worker_id(worker_id), _cm(cm),
3005     _claimed(false),
3006     _nextMarkBitMap(NULL), _hash_seed(17),
3007     _task_queue(task_queue),
3008     _task_queues(task_queues),
3009     _cm_oop_closure(NULL) {
3010   guarantee(task_queue != NULL, "invariant");
3011   guarantee(task_queues != NULL, "invariant");
3012 
3013   _marking_step_diffs_ms.add(0.5);
3014 }
3015 
3016 // These are formatting macros that are used below to ensure
3017 // consistent formatting. The *_H_* versions are used to format the
3018 // header for a particular value and they should be kept consistent
3019 // with the corresponding macro. Also note that most of the macros add
3020 // the necessary white space (as a prefix) which makes them a bit
3021 // easier to compose.
3022 
3023 // All the output lines are prefixed with this string to be able to
3024 // identify them easily in a large log file.
3025 #define G1PPRL_LINE_PREFIX            "###"
3026 
3027 #define G1PPRL_ADDR_BASE_FORMAT    " " PTR_FORMAT "-" PTR_FORMAT
3028 #ifdef _LP64
3029 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
3030 #else // _LP64
3031 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
3032 #endif // _LP64
3033 
3034 // For per-region info
3035 #define G1PPRL_TYPE_FORMAT            "   %-4s"
3036 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
3037 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
3038 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
3039 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
3040 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
3041 
3042 // For summary info
3043 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
3044 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
3045 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
3046 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
3047 
3048 G1PrintRegionLivenessInfoClosure::
3049 G1PrintRegionLivenessInfoClosure(const char* phase_name)
3050   : _total_used_bytes(0), _total_capacity_bytes(0),
3051     _total_prev_live_bytes(0), _total_next_live_bytes(0),
3052     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
3053   G1CollectedHeap* g1h = G1CollectedHeap::heap();
3054   MemRegion g1_reserved = g1h->g1_reserved();
3055   double now = os::elapsedTime();
3056 
3057   // Print the header of the output.
3058   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
3059   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
3060                           G1PPRL_SUM_ADDR_FORMAT("reserved")
3061                           G1PPRL_SUM_BYTE_FORMAT("region-size"),
3062                           p2i(g1_reserved.start()), p2i(g1_reserved.end()),
3063                           HeapRegion::GrainBytes);
3064   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3065   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3066                           G1PPRL_TYPE_H_FORMAT
3067                           G1PPRL_ADDR_BASE_H_FORMAT
3068                           G1PPRL_BYTE_H_FORMAT
3069                           G1PPRL_BYTE_H_FORMAT
3070                           G1PPRL_BYTE_H_FORMAT
3071                           G1PPRL_DOUBLE_H_FORMAT
3072                           G1PPRL_BYTE_H_FORMAT
3073                           G1PPRL_BYTE_H_FORMAT,
3074                           "type", "address-range",
3075                           "used", "prev-live", "next-live", "gc-eff",
3076                           "remset", "code-roots");
3077   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3078                           G1PPRL_TYPE_H_FORMAT
3079                           G1PPRL_ADDR_BASE_H_FORMAT
3080                           G1PPRL_BYTE_H_FORMAT
3081                           G1PPRL_BYTE_H_FORMAT
3082                           G1PPRL_BYTE_H_FORMAT
3083                           G1PPRL_DOUBLE_H_FORMAT
3084                           G1PPRL_BYTE_H_FORMAT
3085                           G1PPRL_BYTE_H_FORMAT,
3086                           "", "",
3087                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
3088                           "(bytes)", "(bytes)");
3089 }
3090 
3091 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
3092   const char* type       = r->get_type_str();
3093   HeapWord* bottom       = r->bottom();
3094   HeapWord* end          = r->end();
3095   size_t capacity_bytes  = r->capacity();
3096   size_t used_bytes      = r->used();
3097   size_t prev_live_bytes = r->live_bytes();
3098   size_t next_live_bytes = r->next_live_bytes();
3099   double gc_eff          = r->gc_efficiency();
3100   size_t remset_bytes    = r->rem_set()->mem_size();
3101   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3102 
3103   _total_used_bytes      += used_bytes;
3104   _total_capacity_bytes  += capacity_bytes;
3105   _total_prev_live_bytes += prev_live_bytes;
3106   _total_next_live_bytes += next_live_bytes;
3107   _total_remset_bytes    += remset_bytes;
3108   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3109 
3110   // Print a line for this particular region.
3111   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3112                           G1PPRL_TYPE_FORMAT
3113                           G1PPRL_ADDR_BASE_FORMAT
3114                           G1PPRL_BYTE_FORMAT
3115                           G1PPRL_BYTE_FORMAT
3116                           G1PPRL_BYTE_FORMAT
3117                           G1PPRL_DOUBLE_FORMAT
3118                           G1PPRL_BYTE_FORMAT
3119                           G1PPRL_BYTE_FORMAT,
3120                           type, p2i(bottom), p2i(end),
3121                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3122                           remset_bytes, strong_code_roots_bytes);
3123 
3124   return false;
3125 }
3126 
3127 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3128   // add static memory usages to remembered set sizes
3129   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3130   // Print the footer of the output.
3131   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3132   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3133                          " SUMMARY"
3134                          G1PPRL_SUM_MB_FORMAT("capacity")
3135                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3136                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3137                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3138                          G1PPRL_SUM_MB_FORMAT("remset")
3139                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3140                          bytes_to_mb(_total_capacity_bytes),
3141                          bytes_to_mb(_total_used_bytes),
3142                          perc(_total_used_bytes, _total_capacity_bytes),
3143                          bytes_to_mb(_total_prev_live_bytes),
3144                          perc(_total_prev_live_bytes, _total_capacity_bytes),
3145                          bytes_to_mb(_total_next_live_bytes),
3146                          perc(_total_next_live_bytes, _total_capacity_bytes),
3147                          bytes_to_mb(_total_remset_bytes),
3148                          bytes_to_mb(_total_strong_code_roots_bytes));
3149 }