1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/concurrentMarkThread.inline.hpp"
  30 #include "gc/g1/g1CollectedHeap.inline.hpp"
  31 #include "gc/g1/g1CollectorState.hpp"
  32 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  33 #include "gc/g1/g1HeapVerifier.hpp"
  34 #include "gc/g1/g1OopClosures.inline.hpp"
  35 #include "gc/g1/g1CardLiveData.inline.hpp"
  36 #include "gc/g1/g1Policy.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/heapRegion.inline.hpp"
  39 #include "gc/g1/heapRegionRemSet.hpp"
  40 #include "gc/g1/heapRegionSet.inline.hpp"
  41 #include "gc/g1/suspendibleThreadSet.hpp"
  42 #include "gc/shared/gcId.hpp"
  43 #include "gc/shared/gcTimer.hpp"
  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.inline.hpp"
  46 #include "gc/shared/genOopClosures.inline.hpp"
  47 #include "gc/shared/referencePolicy.hpp"
  48 #include "gc/shared/strongRootsScope.hpp"
  49 #include "gc/shared/taskqueue.inline.hpp"
  50 #include "gc/shared/vmGCOperations.hpp"
  51 #include "logging/log.hpp"
  52 #include "memory/allocation.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "runtime/atomic.hpp"
  56 #include "runtime/handles.inline.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/prefetch.inline.hpp"
  59 #include "services/memTracker.hpp"
  60 #include "utilities/growableArray.hpp"
  61 
  62 // Concurrent marking bit map wrapper
  63 
  64 G1CMBitMapRO::G1CMBitMapRO(int shifter) :
  65   _bm(),
  66   _shifter(shifter) {
  67   _bmStartWord = 0;
  68   _bmWordSize = 0;
  69 }
  70 
  71 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  72                                                  const HeapWord* limit) const {
  73   // First we must round addr *up* to a possible object boundary.
  74   addr = (HeapWord*)align_size_up((intptr_t)addr,
  75                                   HeapWordSize << _shifter);
  76   size_t addrOffset = heapWordToOffset(addr);
  77   assert(limit != NULL, "limit must not be NULL");
  78   size_t limitOffset = heapWordToOffset(limit);
  79   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  80   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  81   assert(nextAddr >= addr, "get_next_one postcondition");
  82   assert(nextAddr == limit || isMarked(nextAddr),
  83          "get_next_one postcondition");
  84   return nextAddr;
  85 }
  86 
  87 #ifndef PRODUCT
  88 bool G1CMBitMapRO::covers(MemRegion heap_rs) const {
  89   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
  90   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
  91          "size inconsistency");
  92   return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
  93          _bmWordSize  == heap_rs.word_size();
  94 }
  95 #endif
  96 
  97 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
  98   _bm.print_on_error(st, prefix);
  99 }
 100 
 101 size_t G1CMBitMap::compute_size(size_t heap_size) {
 102   return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
 103 }
 104 
 105 size_t G1CMBitMap::mark_distance() {
 106   return MinObjAlignmentInBytes * BitsPerByte;
 107 }
 108 
 109 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 110   _bmStartWord = heap.start();
 111   _bmWordSize = heap.word_size();
 112 
 113   _bm = BitMapView((BitMap::bm_word_t*) storage->reserved().start(), _bmWordSize >> _shifter);
 114 
 115   storage->set_mapping_changed_listener(&_listener);
 116 }
 117 
 118 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
 119   if (zero_filled) {
 120     return;
 121   }
 122   // We need to clear the bitmap on commit, removing any existing information.
 123   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
 124   _bm->clear_range(mr);
 125 }
 126 
 127 void G1CMBitMap::clear_range(MemRegion mr) {
 128   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 129   assert(!mr.is_empty(), "unexpected empty region");
 130   // convert address range into offset range
 131   _bm.at_put_range(heapWordToOffset(mr.start()),
 132                    heapWordToOffset(mr.end()), false);
 133 }
 134 
 135 G1CMMarkStack::G1CMMarkStack() :
 136   _max_chunk_capacity(0),
 137   _base(NULL),
 138   _chunk_capacity(0),
 139   _out_of_memory(false),
 140   _should_expand(false) {
 141   set_empty();
 142 }
 143 
 144 bool G1CMMarkStack::resize(size_t new_capacity) {
 145   assert(is_empty(), "Only resize when stack is empty.");
 146   assert(new_capacity <= _max_chunk_capacity,
 147          "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
 148 
 149   OopChunk* new_base = MmapArrayAllocator<OopChunk, mtGC>::allocate_or_null(new_capacity);
 150 
 151   if (new_base == NULL) {
 152     log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(OopChunk));
 153     return false;
 154   }
 155   // Release old mapping.
 156   if (_base != NULL) {
 157     MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity);
 158   }
 159 
 160   _base = new_base;
 161   _chunk_capacity = new_capacity;
 162   set_empty();
 163   _should_expand = false;
 164 
 165   return true;
 166 }
 167 
 168 size_t G1CMMarkStack::capacity_alignment() {
 169   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(OopChunk)) / sizeof(void*);
 170 }
 171 
 172 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 173   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 174 
 175   size_t const OopChunkSizeInVoidStar = sizeof(OopChunk) / sizeof(void*);
 176 
 177   _max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / OopChunkSizeInVoidStar;
 178   size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / OopChunkSizeInVoidStar;
 179 
 180   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
 181             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
 182             _max_chunk_capacity,
 183             initial_chunk_capacity);
 184 
 185   log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
 186                 initial_chunk_capacity, _max_chunk_capacity);
 187 
 188   return resize(initial_chunk_capacity);
 189 }
 190 
 191 void G1CMMarkStack::expand() {
 192   // Clear expansion flag
 193   _should_expand = false;
 194 
 195   if (_chunk_capacity == _max_chunk_capacity) {
 196     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 197     return;
 198   }
 199   size_t old_capacity = _chunk_capacity;
 200   // Double capacity if possible
 201   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
 202 
 203   if (resize(new_capacity)) {
 204     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 205                   old_capacity, new_capacity);
 206   } else {
 207     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 208                     old_capacity, new_capacity);
 209   }
 210 }
 211 
 212 G1CMMarkStack::~G1CMMarkStack() {
 213   if (_base != NULL) {
 214     MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity);
 215   }
 216 }
 217 
 218 void G1CMMarkStack::add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem) {
 219   elem->next = *list;
 220   *list = elem;
 221 }
 222 
 223 void G1CMMarkStack::add_chunk_to_chunk_list(OopChunk* elem) {
 224   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 225   add_chunk_to_list(&_chunk_list, elem);
 226   _chunks_in_chunk_list++;
 227 }
 228 
 229 void G1CMMarkStack::add_chunk_to_free_list(OopChunk* elem) {
 230   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 231   add_chunk_to_list(&_free_list, elem);
 232 }
 233 
 234 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) {
 235   OopChunk* result = *list;
 236   if (result != NULL) {
 237     *list = (*list)->next;
 238   }
 239   return result;
 240 }
 241 
 242 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
 243   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 244   OopChunk* result = remove_chunk_from_list(&_chunk_list);
 245   if (result != NULL) {
 246     _chunks_in_chunk_list--;
 247   }
 248   return result;
 249 }
 250 
 251 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_free_list() {
 252   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 253   return remove_chunk_from_list(&_free_list);
 254 }
 255 
 256 G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() {
 257   // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
 258   // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
 259   // wraparound of _hwm.
 260   if (_hwm >= _chunk_capacity) {
 261     return NULL;
 262   }
 263 
 264   size_t cur_idx = Atomic::add(1, &_hwm) - 1;
 265   if (cur_idx >= _chunk_capacity) {
 266     return NULL;
 267   }
 268 
 269   OopChunk* result = ::new (&_base[cur_idx]) OopChunk;
 270   result->next = NULL;
 271   return result;
 272 }
 273 
 274 bool G1CMMarkStack::par_push_chunk(oop* ptr_arr) {
 275   // Get a new chunk.
 276   OopChunk* new_chunk = remove_chunk_from_free_list();
 277 
 278   if (new_chunk == NULL) {
 279     // Did not get a chunk from the free list. Allocate from backing memory.
 280     new_chunk = allocate_new_chunk();
 281   }
 282 
 283   if (new_chunk == NULL) {
 284     _out_of_memory = true;
 285     return false;
 286   }
 287 
 288   Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, OopsPerChunk * sizeof(oop));
 289 
 290   add_chunk_to_chunk_list(new_chunk);
 291 
 292   return true;
 293 }
 294 
 295 bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) {
 296   OopChunk* cur = remove_chunk_from_chunk_list();
 297 
 298   if (cur == NULL) {
 299     return false;
 300   }
 301 
 302   Copy::conjoint_memory_atomic(cur->data, ptr_arr, OopsPerChunk * sizeof(oop));
 303 
 304   add_chunk_to_free_list(cur);
 305   return true;
 306 }
 307 
 308 void G1CMMarkStack::set_empty() {
 309   _chunks_in_chunk_list = 0;
 310   _hwm = 0;
 311   clear_out_of_memory();
 312   _chunk_list = NULL;
 313   _free_list = NULL;
 314 }
 315 
 316 G1CMRootRegions::G1CMRootRegions() :
 317   _cm(NULL), _scan_in_progress(false),
 318   _should_abort(false), _claimed_survivor_index(0) { }
 319 
 320 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
 321   _survivors = survivors;
 322   _cm = cm;
 323 }
 324 
 325 void G1CMRootRegions::prepare_for_scan() {
 326   assert(!scan_in_progress(), "pre-condition");
 327 
 328   // Currently, only survivors can be root regions.
 329   _claimed_survivor_index = 0;
 330   _scan_in_progress = _survivors->regions()->is_nonempty();
 331   _should_abort = false;
 332 }
 333 
 334 HeapRegion* G1CMRootRegions::claim_next() {
 335   if (_should_abort) {
 336     // If someone has set the should_abort flag, we return NULL to
 337     // force the caller to bail out of their loop.
 338     return NULL;
 339   }
 340 
 341   // Currently, only survivors can be root regions.
 342   const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions();
 343 
 344   int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1;
 345   if (claimed_index < survivor_regions->length()) {
 346     return survivor_regions->at(claimed_index);
 347   }
 348   return NULL;
 349 }
 350 
 351 uint G1CMRootRegions::num_root_regions() const {
 352   return (uint)_survivors->regions()->length();
 353 }
 354 
 355 void G1CMRootRegions::notify_scan_done() {
 356   MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 357   _scan_in_progress = false;
 358   RootRegionScan_lock->notify_all();
 359 }
 360 
 361 void G1CMRootRegions::cancel_scan() {
 362   notify_scan_done();
 363 }
 364 
 365 void G1CMRootRegions::scan_finished() {
 366   assert(scan_in_progress(), "pre-condition");
 367 
 368   // Currently, only survivors can be root regions.
 369   if (!_should_abort) {
 370     assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index);
 371     assert((uint)_claimed_survivor_index >= _survivors->length(),
 372            "we should have claimed all survivors, claimed index = %u, length = %u",
 373            (uint)_claimed_survivor_index, _survivors->length());
 374   }
 375 
 376   notify_scan_done();
 377 }
 378 
 379 bool G1CMRootRegions::wait_until_scan_finished() {
 380   if (!scan_in_progress()) return false;
 381 
 382   {
 383     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 384     while (scan_in_progress()) {
 385       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 386     }
 387   }
 388   return true;
 389 }
 390 
 391 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 392   return MAX2((n_par_threads + 2) / 4, 1U);
 393 }
 394 
 395 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
 396   _g1h(g1h),
 397   _markBitMap1(),
 398   _markBitMap2(),
 399   _parallel_marking_threads(0),
 400   _max_parallel_marking_threads(0),
 401   _sleep_factor(0.0),
 402   _marking_task_overhead(1.0),
 403   _cleanup_list("Cleanup List"),
 404 
 405   _prevMarkBitMap(&_markBitMap1),
 406   _nextMarkBitMap(&_markBitMap2),
 407 
 408   _global_mark_stack(),
 409   // _finger set in set_non_marking_state
 410 
 411   _max_worker_id(ParallelGCThreads),
 412   // _active_tasks set in set_non_marking_state
 413   // _tasks set inside the constructor
 414   _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)),
 415   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 416 
 417   _has_overflown(false),
 418   _concurrent(false),
 419   _has_aborted(false),
 420   _restart_for_overflow(false),
 421   _concurrent_marking_in_progress(false),
 422   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 423   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 424 
 425   // _verbose_level set below
 426 
 427   _init_times(),
 428   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 429   _cleanup_times(),
 430   _total_counting_time(0.0),
 431   _total_rs_scrub_time(0.0),
 432 
 433   _parallel_workers(NULL),
 434 
 435   _completed_initialization(false) {
 436 
 437   _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 438   _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 439 
 440   // Create & start a ConcurrentMark thread.
 441   _cmThread = new ConcurrentMarkThread(this);
 442   assert(cmThread() != NULL, "CM Thread should have been created");
 443   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 444   if (_cmThread->osthread() == NULL) {
 445       vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 446   }
 447 
 448   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 449   assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
 450   assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
 451 
 452   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 453   satb_qs.set_buffer_size(G1SATBBufferSize);
 454 
 455   _root_regions.init(_g1h->survivor(), this);
 456 
 457   if (ConcGCThreads > ParallelGCThreads) {
 458     log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).",
 459                     ConcGCThreads, ParallelGCThreads);
 460     return;
 461   }
 462   if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
 463     // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
 464     // if both are set
 465     _sleep_factor             = 0.0;
 466     _marking_task_overhead    = 1.0;
 467   } else if (G1MarkingOverheadPercent > 0) {
 468     // We will calculate the number of parallel marking threads based
 469     // on a target overhead with respect to the soft real-time goal
 470     double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
 471     double overall_cm_overhead =
 472       (double) MaxGCPauseMillis * marking_overhead /
 473       (double) GCPauseIntervalMillis;
 474     double cpu_ratio = 1.0 / os::initial_active_processor_count();
 475     double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
 476     double marking_task_overhead =
 477       overall_cm_overhead / marking_thread_num * os::initial_active_processor_count();
 478     double sleep_factor =
 479                        (1.0 - marking_task_overhead) / marking_task_overhead;
 480 
 481     FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num);
 482     _sleep_factor             = sleep_factor;
 483     _marking_task_overhead    = marking_task_overhead;
 484   } else {
 485     // Calculate the number of parallel marking threads by scaling
 486     // the number of parallel GC threads.
 487     uint marking_thread_num = scale_parallel_threads(ParallelGCThreads);
 488     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 489     _sleep_factor             = 0.0;
 490     _marking_task_overhead    = 1.0;
 491   }
 492 
 493   assert(ConcGCThreads > 0, "Should have been set");
 494   log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
 495   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 496   _parallel_marking_threads = ConcGCThreads;
 497   _max_parallel_marking_threads = _parallel_marking_threads;
 498 
 499   _parallel_workers = new WorkGang("G1 Marker",
 500        _max_parallel_marking_threads, false, true);
 501   if (_parallel_workers == NULL) {
 502     vm_exit_during_initialization("Failed necessary allocation.");
 503   } else {
 504     _parallel_workers->initialize_workers();
 505   }
 506 
 507   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 508     size_t mark_stack_size =
 509       MIN2(MarkStackSizeMax,
 510           MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 511     // Verify that the calculated value for MarkStackSize is in range.
 512     // It would be nice to use the private utility routine from Arguments.
 513     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 514       log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 515                       "must be between 1 and " SIZE_FORMAT,
 516                       mark_stack_size, MarkStackSizeMax);
 517       return;
 518     }
 519     FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
 520   } else {
 521     // Verify MarkStackSize is in range.
 522     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 523       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 524         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 525           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 526                           "must be between 1 and " SIZE_FORMAT,
 527                           MarkStackSize, MarkStackSizeMax);
 528           return;
 529         }
 530       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 531         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 532           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 533                           " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 534                           MarkStackSize, MarkStackSizeMax);
 535           return;
 536         }
 537       }
 538     }
 539   }
 540 
 541   if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
 542     vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
 543   }
 544 
 545   _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC);
 546   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 547 
 548   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 549   _active_tasks = _max_worker_id;
 550 
 551   for (uint i = 0; i < _max_worker_id; ++i) {
 552     G1CMTaskQueue* task_queue = new G1CMTaskQueue();
 553     task_queue->initialize();
 554     _task_queues->register_queue(i, task_queue);
 555 
 556     _tasks[i] = new G1CMTask(i, this, task_queue, _task_queues);
 557 
 558     _accum_task_vtime[i] = 0.0;
 559   }
 560 
 561   // so that the call below can read a sensible value
 562   _heap_start = g1h->reserved_region().start();
 563   set_non_marking_state();
 564   _completed_initialization = true;
 565 }
 566 
 567 void G1ConcurrentMark::reset() {
 568   // Starting values for these two. This should be called in a STW
 569   // phase.
 570   MemRegion reserved = _g1h->g1_reserved();
 571   _heap_start = reserved.start();
 572   _heap_end   = reserved.end();
 573 
 574   // Separated the asserts so that we know which one fires.
 575   assert(_heap_start != NULL, "heap bounds should look ok");
 576   assert(_heap_end != NULL, "heap bounds should look ok");
 577   assert(_heap_start < _heap_end, "heap bounds should look ok");
 578 
 579   // Reset all the marking data structures and any necessary flags
 580   reset_marking_state();
 581 
 582   // We do reset all of them, since different phases will use
 583   // different number of active threads. So, it's easiest to have all
 584   // of them ready.
 585   for (uint i = 0; i < _max_worker_id; ++i) {
 586     _tasks[i]->reset(_nextMarkBitMap);
 587   }
 588 
 589   // we need this to make sure that the flag is on during the evac
 590   // pause with initial mark piggy-backed
 591   set_concurrent_marking_in_progress();
 592 }
 593 
 594 
 595 void G1ConcurrentMark::reset_marking_state(bool clear_overflow) {
 596   _global_mark_stack.set_should_expand(has_overflown());
 597   _global_mark_stack.set_empty();        // Also clears the overflow stack's overflow flag
 598   if (clear_overflow) {
 599     clear_has_overflown();
 600   } else {
 601     assert(has_overflown(), "pre-condition");
 602   }
 603   _finger = _heap_start;
 604 
 605   for (uint i = 0; i < _max_worker_id; ++i) {
 606     G1CMTaskQueue* queue = _task_queues->queue(i);
 607     queue->set_empty();
 608   }
 609 }
 610 
 611 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
 612   assert(active_tasks <= _max_worker_id, "we should not have more");
 613 
 614   _active_tasks = active_tasks;
 615   // Need to update the three data structures below according to the
 616   // number of active threads for this phase.
 617   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 618   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 619   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 620 }
 621 
 622 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 623   set_concurrency(active_tasks);
 624 
 625   _concurrent = concurrent;
 626   // We propagate this to all tasks, not just the active ones.
 627   for (uint i = 0; i < _max_worker_id; ++i)
 628     _tasks[i]->set_concurrent(concurrent);
 629 
 630   if (concurrent) {
 631     set_concurrent_marking_in_progress();
 632   } else {
 633     // We currently assume that the concurrent flag has been set to
 634     // false before we start remark. At this point we should also be
 635     // in a STW phase.
 636     assert(!concurrent_marking_in_progress(), "invariant");
 637     assert(out_of_regions(),
 638            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 639            p2i(_finger), p2i(_heap_end));
 640   }
 641 }
 642 
 643 void G1ConcurrentMark::set_non_marking_state() {
 644   // We set the global marking state to some default values when we're
 645   // not doing marking.
 646   reset_marking_state();
 647   _active_tasks = 0;
 648   clear_concurrent_marking_in_progress();
 649 }
 650 
 651 G1ConcurrentMark::~G1ConcurrentMark() {
 652   // The G1ConcurrentMark instance is never freed.
 653   ShouldNotReachHere();
 654 }
 655 
 656 class G1ClearBitMapTask : public AbstractGangTask {
 657 public:
 658   static size_t chunk_size() { return M; }
 659 
 660 private:
 661   // Heap region closure used for clearing the given mark bitmap.
 662   class G1ClearBitmapHRClosure : public HeapRegionClosure {
 663   private:
 664     G1CMBitMap* _bitmap;
 665     G1ConcurrentMark* _cm;
 666   public:
 667     G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) {
 668     }
 669 
 670     virtual bool doHeapRegion(HeapRegion* r) {
 671       size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
 672 
 673       HeapWord* cur = r->bottom();
 674       HeapWord* const end = r->end();
 675 
 676       while (cur < end) {
 677         MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 678         _bitmap->clear_range(mr);
 679 
 680         cur += chunk_size_in_words;
 681 
 682         // Abort iteration if after yielding the marking has been aborted.
 683         if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
 684           return true;
 685         }
 686         // Repeat the asserts from before the start of the closure. We will do them
 687         // as asserts here to minimize their overhead on the product. However, we
 688         // will have them as guarantees at the beginning / end of the bitmap
 689         // clearing to get some checking in the product.
 690         assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant");
 691         assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
 692       }
 693       assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
 694 
 695       return false;
 696     }
 697   };
 698 
 699   G1ClearBitmapHRClosure _cl;
 700   HeapRegionClaimer _hr_claimer;
 701   bool _suspendible; // If the task is suspendible, workers must join the STS.
 702 
 703 public:
 704   G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
 705     AbstractGangTask("G1 Clear Bitmap"),
 706     _cl(bitmap, suspendible ? cm : NULL),
 707     _hr_claimer(n_workers),
 708     _suspendible(suspendible)
 709   { }
 710 
 711   void work(uint worker_id) {
 712     SuspendibleThreadSetJoiner sts_join(_suspendible);
 713     G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true);
 714   }
 715 
 716   bool is_complete() {
 717     return _cl.complete();
 718   }
 719 };
 720 
 721 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 722   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 723 
 724   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 725   size_t const num_chunks = align_size_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 726 
 727   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 728 
 729   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 730 
 731   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 732   workers->run_task(&cl, num_workers);
 733   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 734 }
 735 
 736 void G1ConcurrentMark::cleanup_for_next_mark() {
 737   // Make sure that the concurrent mark thread looks to still be in
 738   // the current cycle.
 739   guarantee(cmThread()->during_cycle(), "invariant");
 740 
 741   // We are finishing up the current cycle by clearing the next
 742   // marking bitmap and getting it ready for the next cycle. During
 743   // this time no other cycle can start. So, let's make sure that this
 744   // is the case.
 745   guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
 746 
 747   clear_bitmap(_nextMarkBitMap, _parallel_workers, true);
 748 
 749   // Clear the live count data. If the marking has been aborted, the abort()
 750   // call already did that.
 751   if (!has_aborted()) {
 752     clear_live_data(_parallel_workers);
 753     DEBUG_ONLY(verify_live_data_clear());
 754   }
 755 
 756   // Repeat the asserts from above.
 757   guarantee(cmThread()->during_cycle(), "invariant");
 758   guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
 759 }
 760 
 761 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 762   assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint.");
 763   clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false);
 764 }
 765 
 766 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 767   G1CMBitMap* _bitmap;
 768   bool _error;
 769  public:
 770   CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) {
 771   }
 772 
 773   virtual bool doHeapRegion(HeapRegion* r) {
 774     // This closure can be called concurrently to the mutator, so we must make sure
 775     // that the result of the getNextMarkedWordAddress() call is compared to the
 776     // value passed to it as limit to detect any found bits.
 777     // end never changes in G1.
 778     HeapWord* end = r->end();
 779     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
 780   }
 781 };
 782 
 783 bool G1ConcurrentMark::nextMarkBitmapIsClear() {
 784   CheckBitmapClearHRClosure cl(_nextMarkBitMap);
 785   _g1h->heap_region_iterate(&cl);
 786   return cl.complete();
 787 }
 788 
 789 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 790 public:
 791   bool doHeapRegion(HeapRegion* r) {
 792     r->note_start_of_marking();
 793     return false;
 794   }
 795 };
 796 
 797 void G1ConcurrentMark::checkpointRootsInitialPre() {
 798   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 799   G1Policy* g1p = g1h->g1_policy();
 800 
 801   _has_aborted = false;
 802 
 803   // Initialize marking structures. This has to be done in a STW phase.
 804   reset();
 805 
 806   // For each region note start of marking.
 807   NoteStartOfMarkHRClosure startcl;
 808   g1h->heap_region_iterate(&startcl);
 809 }
 810 
 811 
 812 void G1ConcurrentMark::checkpointRootsInitialPost() {
 813   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 814 
 815   // Start Concurrent Marking weak-reference discovery.
 816   ReferenceProcessor* rp = g1h->ref_processor_cm();
 817   // enable ("weak") refs discovery
 818   rp->enable_discovery();
 819   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 820 
 821   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 822   // This is the start of  the marking cycle, we're expected all
 823   // threads to have SATB queues with active set to false.
 824   satb_mq_set.set_active_all_threads(true, /* new active value */
 825                                      false /* expected_active */);
 826 
 827   _root_regions.prepare_for_scan();
 828 
 829   // update_g1_committed() will be called at the end of an evac pause
 830   // when marking is on. So, it's also called at the end of the
 831   // initial-mark pause to update the heap end, if the heap expands
 832   // during it. No need to call it here.
 833 }
 834 
 835 /*
 836  * Notice that in the next two methods, we actually leave the STS
 837  * during the barrier sync and join it immediately afterwards. If we
 838  * do not do this, the following deadlock can occur: one thread could
 839  * be in the barrier sync code, waiting for the other thread to also
 840  * sync up, whereas another one could be trying to yield, while also
 841  * waiting for the other threads to sync up too.
 842  *
 843  * Note, however, that this code is also used during remark and in
 844  * this case we should not attempt to leave / enter the STS, otherwise
 845  * we'll either hit an assert (debug / fastdebug) or deadlock
 846  * (product). So we should only leave / enter the STS if we are
 847  * operating concurrently.
 848  *
 849  * Because the thread that does the sync barrier has left the STS, it
 850  * is possible to be suspended for a Full GC or an evacuation pause
 851  * could occur. This is actually safe, since the entering the sync
 852  * barrier is one of the last things do_marking_step() does, and it
 853  * doesn't manipulate any data structures afterwards.
 854  */
 855 
 856 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 857   bool barrier_aborted;
 858   {
 859     SuspendibleThreadSetLeaver sts_leave(concurrent());
 860     barrier_aborted = !_first_overflow_barrier_sync.enter();
 861   }
 862 
 863   // at this point everyone should have synced up and not be doing any
 864   // more work
 865 
 866   if (barrier_aborted) {
 867     // If the barrier aborted we ignore the overflow condition and
 868     // just abort the whole marking phase as quickly as possible.
 869     return;
 870   }
 871 
 872   // If we're executing the concurrent phase of marking, reset the marking
 873   // state; otherwise the marking state is reset after reference processing,
 874   // during the remark pause.
 875   // If we reset here as a result of an overflow during the remark we will
 876   // see assertion failures from any subsequent set_concurrency_and_phase()
 877   // calls.
 878   if (concurrent()) {
 879     // let the task associated with with worker 0 do this
 880     if (worker_id == 0) {
 881       // task 0 is responsible for clearing the global data structures
 882       // We should be here because of an overflow. During STW we should
 883       // not clear the overflow flag since we rely on it being true when
 884       // we exit this method to abort the pause and restart concurrent
 885       // marking.
 886       reset_marking_state(true /* clear_overflow */);
 887 
 888       log_info(gc, marking)("Concurrent Mark reset for overflow");
 889     }
 890   }
 891 
 892   // after this, each task should reset its own data structures then
 893   // then go into the second barrier
 894 }
 895 
 896 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 897   SuspendibleThreadSetLeaver sts_leave(concurrent());
 898   _second_overflow_barrier_sync.enter();
 899 
 900   // at this point everything should be re-initialized and ready to go
 901 }
 902 
 903 class G1CMConcurrentMarkingTask: public AbstractGangTask {
 904 private:
 905   G1ConcurrentMark*     _cm;
 906   ConcurrentMarkThread* _cmt;
 907 
 908 public:
 909   void work(uint worker_id) {
 910     assert(Thread::current()->is_ConcurrentGC_thread(),
 911            "this should only be done by a conc GC thread");
 912     ResourceMark rm;
 913 
 914     double start_vtime = os::elapsedVTime();
 915 
 916     {
 917       SuspendibleThreadSetJoiner sts_join;
 918 
 919       assert(worker_id < _cm->active_tasks(), "invariant");
 920       G1CMTask* the_task = _cm->task(worker_id);
 921       the_task->record_start_time();
 922       if (!_cm->has_aborted()) {
 923         do {
 924           double start_vtime_sec = os::elapsedVTime();
 925           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
 926 
 927           the_task->do_marking_step(mark_step_duration_ms,
 928                                     true  /* do_termination */,
 929                                     false /* is_serial*/);
 930 
 931           double end_vtime_sec = os::elapsedVTime();
 932           double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
 933           _cm->clear_has_overflown();
 934 
 935           _cm->do_yield_check();
 936 
 937           jlong sleep_time_ms;
 938           if (!_cm->has_aborted() && the_task->has_aborted()) {
 939             sleep_time_ms =
 940               (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
 941             {
 942               SuspendibleThreadSetLeaver sts_leave;
 943               os::sleep(Thread::current(), sleep_time_ms, false);
 944             }
 945           }
 946         } while (!_cm->has_aborted() && the_task->has_aborted());
 947       }
 948       the_task->record_end_time();
 949       guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
 950     }
 951 
 952     double end_vtime = os::elapsedVTime();
 953     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
 954   }
 955 
 956   G1CMConcurrentMarkingTask(G1ConcurrentMark* cm,
 957                             ConcurrentMarkThread* cmt) :
 958       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
 959 
 960   ~G1CMConcurrentMarkingTask() { }
 961 };
 962 
 963 // Calculates the number of active workers for a concurrent
 964 // phase.
 965 uint G1ConcurrentMark::calc_parallel_marking_threads() {
 966   uint n_conc_workers = 0;
 967   if (!UseDynamicNumberOfGCThreads ||
 968       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 969        !ForceDynamicNumberOfGCThreads)) {
 970     n_conc_workers = max_parallel_marking_threads();
 971   } else {
 972     n_conc_workers =
 973       AdaptiveSizePolicy::calc_default_active_workers(max_parallel_marking_threads(),
 974                                                       1, /* Minimum workers */
 975                                                       parallel_marking_threads(),
 976                                                       Threads::number_of_non_daemon_threads());
 977     // Don't scale down "n_conc_workers" by scale_parallel_threads() because
 978     // that scaling has already gone into "_max_parallel_marking_threads".
 979   }
 980   assert(n_conc_workers > 0 && n_conc_workers <= max_parallel_marking_threads(),
 981          "Calculated number of workers must be larger than zero and at most the maximum %u, but is %u",
 982          max_parallel_marking_threads(), n_conc_workers);
 983   return n_conc_workers;
 984 }
 985 
 986 void G1ConcurrentMark::scanRootRegion(HeapRegion* hr) {
 987   // Currently, only survivors can be root regions.
 988   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
 989   G1RootRegionScanClosure cl(_g1h, this);
 990 
 991   const uintx interval = PrefetchScanIntervalInBytes;
 992   HeapWord* curr = hr->bottom();
 993   const HeapWord* end = hr->top();
 994   while (curr < end) {
 995     Prefetch::read(curr, interval);
 996     oop obj = oop(curr);
 997     int size = obj->oop_iterate_size(&cl);
 998     assert(size == obj->size(), "sanity");
 999     curr += size;
1000   }
1001 }
1002 
1003 class G1CMRootRegionScanTask : public AbstractGangTask {
1004 private:
1005   G1ConcurrentMark* _cm;
1006 
1007 public:
1008   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
1009     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
1010 
1011   void work(uint worker_id) {
1012     assert(Thread::current()->is_ConcurrentGC_thread(),
1013            "this should only be done by a conc GC thread");
1014 
1015     G1CMRootRegions* root_regions = _cm->root_regions();
1016     HeapRegion* hr = root_regions->claim_next();
1017     while (hr != NULL) {
1018       _cm->scanRootRegion(hr);
1019       hr = root_regions->claim_next();
1020     }
1021   }
1022 };
1023 
1024 void G1ConcurrentMark::scan_root_regions() {
1025   // scan_in_progress() will have been set to true only if there was
1026   // at least one root region to scan. So, if it's false, we
1027   // should not attempt to do any further work.
1028   if (root_regions()->scan_in_progress()) {
1029     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
1030 
1031     _parallel_marking_threads = MIN2(calc_parallel_marking_threads(),
1032                                      // We distribute work on a per-region basis, so starting
1033                                      // more threads than that is useless.
1034                                      root_regions()->num_root_regions());
1035     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1036            "Maximum number of marking threads exceeded");
1037 
1038     G1CMRootRegionScanTask task(this);
1039     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
1040                         task.name(), _parallel_marking_threads, root_regions()->num_root_regions());
1041     _parallel_workers->run_task(&task, _parallel_marking_threads);
1042 
1043     // It's possible that has_aborted() is true here without actually
1044     // aborting the survivor scan earlier. This is OK as it's
1045     // mainly used for sanity checking.
1046     root_regions()->scan_finished();
1047   }
1048 }
1049 
1050 void G1ConcurrentMark::concurrent_cycle_start() {
1051   _gc_timer_cm->register_gc_start();
1052 
1053   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
1054 
1055   _g1h->trace_heap_before_gc(_gc_tracer_cm);
1056 }
1057 
1058 void G1ConcurrentMark::concurrent_cycle_end() {
1059   _g1h->trace_heap_after_gc(_gc_tracer_cm);
1060 
1061   if (has_aborted()) {
1062     _gc_tracer_cm->report_concurrent_mode_failure();
1063   }
1064 
1065   _gc_timer_cm->register_gc_end();
1066 
1067   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1068 }
1069 
1070 void G1ConcurrentMark::mark_from_roots() {
1071   // we might be tempted to assert that:
1072   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1073   //        "inconsistent argument?");
1074   // However that wouldn't be right, because it's possible that
1075   // a safepoint is indeed in progress as a younger generation
1076   // stop-the-world GC happens even as we mark in this generation.
1077 
1078   _restart_for_overflow = false;
1079 
1080   // _g1h has _n_par_threads
1081   _parallel_marking_threads = calc_parallel_marking_threads();
1082   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1083     "Maximum number of marking threads exceeded");
1084 
1085   uint active_workers = MAX2(1U, parallel_marking_threads());
1086   assert(active_workers > 0, "Should have been set");
1087 
1088   // Setting active workers is not guaranteed since fewer
1089   // worker threads may currently exist and more may not be
1090   // available.
1091   active_workers = _parallel_workers->update_active_workers(active_workers);
1092   log_info(gc, task)("Using %u workers of %u for marking", active_workers, _parallel_workers->total_workers());
1093 
1094   // Parallel task terminator is set in "set_concurrency_and_phase()"
1095   set_concurrency_and_phase(active_workers, true /* concurrent */);
1096 
1097   G1CMConcurrentMarkingTask markingTask(this, cmThread());
1098   _parallel_workers->run_task(&markingTask);
1099   print_stats();
1100 }
1101 
1102 void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1103   // world is stopped at this checkpoint
1104   assert(SafepointSynchronize::is_at_safepoint(),
1105          "world should be stopped");
1106 
1107   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1108 
1109   // If a full collection has happened, we shouldn't do this.
1110   if (has_aborted()) {
1111     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1112     return;
1113   }
1114 
1115   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1116 
1117   if (VerifyDuringGC) {
1118     HandleMark hm;  // handle scope
1119     g1h->prepare_for_verify();
1120     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1121   }
1122   g1h->verifier()->check_bitmaps("Remark Start");
1123 
1124   G1Policy* g1p = g1h->g1_policy();
1125   g1p->record_concurrent_mark_remark_start();
1126 
1127   double start = os::elapsedTime();
1128 
1129   checkpointRootsFinalWork();
1130 
1131   double mark_work_end = os::elapsedTime();
1132 
1133   weakRefsWork(clear_all_soft_refs);
1134 
1135   if (has_overflown()) {
1136     // We overflowed.  Restart concurrent marking.
1137     _restart_for_overflow = true;
1138 
1139     // Verify the heap w.r.t. the previous marking bitmap.
1140     if (VerifyDuringGC) {
1141       HandleMark hm;  // handle scope
1142       g1h->prepare_for_verify();
1143       Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
1144     }
1145 
1146     // Clear the marking state because we will be restarting
1147     // marking due to overflowing the global mark stack.
1148     reset_marking_state();
1149   } else {
1150     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1151     // We're done with marking.
1152     // This is the end of  the marking cycle, we're expected all
1153     // threads to have SATB queues with active set to true.
1154     satb_mq_set.set_active_all_threads(false, /* new active value */
1155                                        true /* expected_active */);
1156 
1157     if (VerifyDuringGC) {
1158       HandleMark hm;  // handle scope
1159       g1h->prepare_for_verify();
1160       Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");
1161     }
1162     g1h->verifier()->check_bitmaps("Remark End");
1163     assert(!restart_for_overflow(), "sanity");
1164     // Completely reset the marking state since marking completed
1165     set_non_marking_state();
1166   }
1167 
1168   // Expand the marking stack, if we have to and if we can.
1169   if (_global_mark_stack.should_expand()) {
1170     _global_mark_stack.expand();
1171   }
1172 
1173   // Statistics
1174   double now = os::elapsedTime();
1175   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1176   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1177   _remark_times.add((now - start) * 1000.0);
1178 
1179   g1p->record_concurrent_mark_remark_end();
1180 
1181   G1CMIsAliveClosure is_alive(g1h);
1182   _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1183 }
1184 
1185 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1186   G1CollectedHeap* _g1;
1187   size_t _freed_bytes;
1188   FreeRegionList* _local_cleanup_list;
1189   uint _old_regions_removed;
1190   uint _humongous_regions_removed;
1191   HRRSCleanupTask* _hrrs_cleanup_task;
1192 
1193 public:
1194   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1195                              FreeRegionList* local_cleanup_list,
1196                              HRRSCleanupTask* hrrs_cleanup_task) :
1197     _g1(g1),
1198     _freed_bytes(0),
1199     _local_cleanup_list(local_cleanup_list),
1200     _old_regions_removed(0),
1201     _humongous_regions_removed(0),
1202     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1203 
1204   size_t freed_bytes() { return _freed_bytes; }
1205   const uint old_regions_removed() { return _old_regions_removed; }
1206   const uint humongous_regions_removed() { return _humongous_regions_removed; }
1207 
1208   bool doHeapRegion(HeapRegion *hr) {
1209     if (hr->is_archive()) {
1210       return false;
1211     }
1212     _g1->reset_gc_time_stamps(hr);
1213     hr->note_end_of_marking();
1214 
1215     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1216       _freed_bytes += hr->used();
1217       hr->set_containing_set(NULL);
1218       if (hr->is_humongous()) {
1219         _humongous_regions_removed++;
1220         _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */);
1221       } else {
1222         _old_regions_removed++;
1223         _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */);
1224       }
1225     } else {
1226       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1227     }
1228 
1229     return false;
1230   }
1231 };
1232 
1233 class G1ParNoteEndTask: public AbstractGangTask {
1234   friend class G1NoteEndOfConcMarkClosure;
1235 
1236 protected:
1237   G1CollectedHeap* _g1h;
1238   FreeRegionList* _cleanup_list;
1239   HeapRegionClaimer _hrclaimer;
1240 
1241 public:
1242   G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1243       AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1244   }
1245 
1246   void work(uint worker_id) {
1247     FreeRegionList local_cleanup_list("Local Cleanup List");
1248     HRRSCleanupTask hrrs_cleanup_task;
1249     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1250                                            &hrrs_cleanup_task);
1251     _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);
1252     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1253 
1254     // Now update the lists
1255     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1256     {
1257       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1258       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1259 
1260       // If we iterate over the global cleanup list at the end of
1261       // cleanup to do this printing we will not guarantee to only
1262       // generate output for the newly-reclaimed regions (the list
1263       // might not be empty at the beginning of cleanup; we might
1264       // still be working on its previous contents). So we do the
1265       // printing here, before we append the new regions to the global
1266       // cleanup list.
1267 
1268       G1HRPrinter* hr_printer = _g1h->hr_printer();
1269       if (hr_printer->is_active()) {
1270         FreeRegionListIterator iter(&local_cleanup_list);
1271         while (iter.more_available()) {
1272           HeapRegion* hr = iter.get_next();
1273           hr_printer->cleanup(hr);
1274         }
1275       }
1276 
1277       _cleanup_list->add_ordered(&local_cleanup_list);
1278       assert(local_cleanup_list.is_empty(), "post-condition");
1279 
1280       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1281     }
1282   }
1283 };
1284 
1285 void G1ConcurrentMark::cleanup() {
1286   // world is stopped at this checkpoint
1287   assert(SafepointSynchronize::is_at_safepoint(),
1288          "world should be stopped");
1289   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1290 
1291   // If a full collection has happened, we shouldn't do this.
1292   if (has_aborted()) {
1293     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1294     return;
1295   }
1296 
1297   g1h->verifier()->verify_region_sets_optional();
1298 
1299   if (VerifyDuringGC) {
1300     HandleMark hm;  // handle scope
1301     g1h->prepare_for_verify();
1302     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1303   }
1304   g1h->verifier()->check_bitmaps("Cleanup Start");
1305 
1306   G1Policy* g1p = g1h->g1_policy();
1307   g1p->record_concurrent_mark_cleanup_start();
1308 
1309   double start = os::elapsedTime();
1310 
1311   HeapRegionRemSet::reset_for_cleanup_tasks();
1312 
1313   {
1314     GCTraceTime(Debug, gc)("Finalize Live Data");
1315     finalize_live_data();
1316   }
1317 
1318   if (VerifyDuringGC) {
1319     GCTraceTime(Debug, gc)("Verify Live Data");
1320     verify_live_data();
1321   }
1322 
1323   g1h->collector_state()->set_mark_in_progress(false);
1324 
1325   double count_end = os::elapsedTime();
1326   double this_final_counting_time = (count_end - start);
1327   _total_counting_time += this_final_counting_time;
1328 
1329   if (log_is_enabled(Trace, gc, liveness)) {
1330     G1PrintRegionLivenessInfoClosure cl("Post-Marking");
1331     _g1h->heap_region_iterate(&cl);
1332   }
1333 
1334   // Install newly created mark bitMap as "prev".
1335   swapMarkBitMaps();
1336 
1337   g1h->reset_gc_time_stamp();
1338 
1339   uint n_workers = _g1h->workers()->active_workers();
1340 
1341   // Note end of marking in all heap regions.
1342   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
1343   g1h->workers()->run_task(&g1_par_note_end_task);
1344   g1h->check_gc_time_stamps();
1345 
1346   if (!cleanup_list_is_empty()) {
1347     // The cleanup list is not empty, so we'll have to process it
1348     // concurrently. Notify anyone else that might be wanting free
1349     // regions that there will be more free regions coming soon.
1350     g1h->set_free_regions_coming();
1351   }
1352 
1353   // call below, since it affects the metric by which we sort the heap
1354   // regions.
1355   if (G1ScrubRemSets) {
1356     double rs_scrub_start = os::elapsedTime();
1357     g1h->scrub_rem_set();
1358     _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start);
1359   }
1360 
1361   // this will also free any regions totally full of garbage objects,
1362   // and sort the regions.
1363   g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1364 
1365   // Statistics.
1366   double end = os::elapsedTime();
1367   _cleanup_times.add((end - start) * 1000.0);
1368 
1369   // Clean up will have freed any regions completely full of garbage.
1370   // Update the soft reference policy with the new heap occupancy.
1371   Universe::update_heap_info_at_gc();
1372 
1373   if (VerifyDuringGC) {
1374     HandleMark hm;  // handle scope
1375     g1h->prepare_for_verify();
1376     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)");
1377   }
1378 
1379   g1h->verifier()->check_bitmaps("Cleanup End");
1380 
1381   g1h->verifier()->verify_region_sets_optional();
1382 
1383   // We need to make this be a "collection" so any collection pause that
1384   // races with it goes around and waits for completeCleanup to finish.
1385   g1h->increment_total_collections();
1386 
1387   // Clean out dead classes and update Metaspace sizes.
1388   if (ClassUnloadingWithConcurrentMark) {
1389     ClassLoaderDataGraph::purge();
1390   }
1391   MetaspaceGC::compute_new_size();
1392 
1393   // We reclaimed old regions so we should calculate the sizes to make
1394   // sure we update the old gen/space data.
1395   g1h->g1mm()->update_sizes();
1396   g1h->allocation_context_stats().update_after_mark();
1397 }
1398 
1399 void G1ConcurrentMark::complete_cleanup() {
1400   if (has_aborted()) return;
1401 
1402   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1403 
1404   _cleanup_list.verify_optional();
1405   FreeRegionList tmp_free_list("Tmp Free List");
1406 
1407   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1408                                   "cleanup list has %u entries",
1409                                   _cleanup_list.length());
1410 
1411   // No one else should be accessing the _cleanup_list at this point,
1412   // so it is not necessary to take any locks
1413   while (!_cleanup_list.is_empty()) {
1414     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
1415     assert(hr != NULL, "Got NULL from a non-empty list");
1416     hr->par_clear();
1417     tmp_free_list.add_ordered(hr);
1418 
1419     // Instead of adding one region at a time to the secondary_free_list,
1420     // we accumulate them in the local list and move them a few at a
1421     // time. This also cuts down on the number of notify_all() calls
1422     // we do during this process. We'll also append the local list when
1423     // _cleanup_list is empty (which means we just removed the last
1424     // region from the _cleanup_list).
1425     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
1426         _cleanup_list.is_empty()) {
1427       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1428                                       "appending %u entries to the secondary_free_list, "
1429                                       "cleanup list still has %u entries",
1430                                       tmp_free_list.length(),
1431                                       _cleanup_list.length());
1432 
1433       {
1434         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1435         g1h->secondary_free_list_add(&tmp_free_list);
1436         SecondaryFreeList_lock->notify_all();
1437       }
1438 #ifndef PRODUCT
1439       if (G1StressConcRegionFreeing) {
1440         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
1441           os::sleep(Thread::current(), (jlong) 1, false);
1442         }
1443       }
1444 #endif
1445     }
1446   }
1447   assert(tmp_free_list.is_empty(), "post-condition");
1448 }
1449 
1450 // Supporting Object and Oop closures for reference discovery
1451 // and processing in during marking
1452 
1453 bool G1CMIsAliveClosure::do_object_b(oop obj) {
1454   HeapWord* addr = (HeapWord*)obj;
1455   return addr != NULL &&
1456          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
1457 }
1458 
1459 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1460 // Uses the G1CMTask associated with a worker thread (for serial reference
1461 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1462 // trace referent objects.
1463 //
1464 // Using the G1CMTask and embedded local queues avoids having the worker
1465 // threads operating on the global mark stack. This reduces the risk
1466 // of overflowing the stack - which we would rather avoid at this late
1467 // state. Also using the tasks' local queues removes the potential
1468 // of the workers interfering with each other that could occur if
1469 // operating on the global stack.
1470 
1471 class G1CMKeepAliveAndDrainClosure: public OopClosure {
1472   G1ConcurrentMark* _cm;
1473   G1CMTask*         _task;
1474   int               _ref_counter_limit;
1475   int               _ref_counter;
1476   bool              _is_serial;
1477  public:
1478   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1479     _cm(cm), _task(task), _is_serial(is_serial),
1480     _ref_counter_limit(G1RefProcDrainInterval) {
1481     assert(_ref_counter_limit > 0, "sanity");
1482     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1483     _ref_counter = _ref_counter_limit;
1484   }
1485 
1486   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1487   virtual void do_oop(      oop* p) { do_oop_work(p); }
1488 
1489   template <class T> void do_oop_work(T* p) {
1490     if (!_cm->has_overflown()) {
1491       oop obj = oopDesc::load_decode_heap_oop(p);
1492       _task->deal_with_reference(obj);
1493       _ref_counter--;
1494 
1495       if (_ref_counter == 0) {
1496         // We have dealt with _ref_counter_limit references, pushing them
1497         // and objects reachable from them on to the local stack (and
1498         // possibly the global stack). Call G1CMTask::do_marking_step() to
1499         // process these entries.
1500         //
1501         // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1502         // there's nothing more to do (i.e. we're done with the entries that
1503         // were pushed as a result of the G1CMTask::deal_with_reference() calls
1504         // above) or we overflow.
1505         //
1506         // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1507         // flag while there may still be some work to do. (See the comment at
1508         // the beginning of G1CMTask::do_marking_step() for those conditions -
1509         // one of which is reaching the specified time target.) It is only
1510         // when G1CMTask::do_marking_step() returns without setting the
1511         // has_aborted() flag that the marking step has completed.
1512         do {
1513           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1514           _task->do_marking_step(mark_step_duration_ms,
1515                                  false      /* do_termination */,
1516                                  _is_serial);
1517         } while (_task->has_aborted() && !_cm->has_overflown());
1518         _ref_counter = _ref_counter_limit;
1519       }
1520     }
1521   }
1522 };
1523 
1524 // 'Drain' oop closure used by both serial and parallel reference processing.
1525 // Uses the G1CMTask associated with a given worker thread (for serial
1526 // reference processing the G1CMtask for worker 0 is used). Calls the
1527 // do_marking_step routine, with an unbelievably large timeout value,
1528 // to drain the marking data structures of the remaining entries
1529 // added by the 'keep alive' oop closure above.
1530 
1531 class G1CMDrainMarkingStackClosure: public VoidClosure {
1532   G1ConcurrentMark* _cm;
1533   G1CMTask*         _task;
1534   bool              _is_serial;
1535  public:
1536   G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1537     _cm(cm), _task(task), _is_serial(is_serial) {
1538     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1539   }
1540 
1541   void do_void() {
1542     do {
1543       // We call G1CMTask::do_marking_step() to completely drain the local
1544       // and global marking stacks of entries pushed by the 'keep alive'
1545       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1546       //
1547       // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1548       // if there's nothing more to do (i.e. we've completely drained the
1549       // entries that were pushed as a a result of applying the 'keep alive'
1550       // closure to the entries on the discovered ref lists) or we overflow
1551       // the global marking stack.
1552       //
1553       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1554       // flag while there may still be some work to do. (See the comment at
1555       // the beginning of G1CMTask::do_marking_step() for those conditions -
1556       // one of which is reaching the specified time target.) It is only
1557       // when G1CMTask::do_marking_step() returns without setting the
1558       // has_aborted() flag that the marking step has completed.
1559 
1560       _task->do_marking_step(1000000000.0 /* something very large */,
1561                              true         /* do_termination */,
1562                              _is_serial);
1563     } while (_task->has_aborted() && !_cm->has_overflown());
1564   }
1565 };
1566 
1567 // Implementation of AbstractRefProcTaskExecutor for parallel
1568 // reference processing at the end of G1 concurrent marking
1569 
1570 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
1571 private:
1572   G1CollectedHeap*  _g1h;
1573   G1ConcurrentMark* _cm;
1574   WorkGang*         _workers;
1575   uint              _active_workers;
1576 
1577 public:
1578   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1579                           G1ConcurrentMark* cm,
1580                           WorkGang* workers,
1581                           uint n_workers) :
1582     _g1h(g1h), _cm(cm),
1583     _workers(workers), _active_workers(n_workers) { }
1584 
1585   // Executes the given task using concurrent marking worker threads.
1586   virtual void execute(ProcessTask& task);
1587   virtual void execute(EnqueueTask& task);
1588 };
1589 
1590 class G1CMRefProcTaskProxy: public AbstractGangTask {
1591   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1592   ProcessTask&      _proc_task;
1593   G1CollectedHeap*  _g1h;
1594   G1ConcurrentMark* _cm;
1595 
1596 public:
1597   G1CMRefProcTaskProxy(ProcessTask& proc_task,
1598                        G1CollectedHeap* g1h,
1599                        G1ConcurrentMark* cm) :
1600     AbstractGangTask("Process reference objects in parallel"),
1601     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1602     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1603     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1604   }
1605 
1606   virtual void work(uint worker_id) {
1607     ResourceMark rm;
1608     HandleMark hm;
1609     G1CMTask* task = _cm->task(worker_id);
1610     G1CMIsAliveClosure g1_is_alive(_g1h);
1611     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1612     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1613 
1614     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1615   }
1616 };
1617 
1618 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
1619   assert(_workers != NULL, "Need parallel worker threads.");
1620   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1621 
1622   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1623 
1624   // We need to reset the concurrency level before each
1625   // proxy task execution, so that the termination protocol
1626   // and overflow handling in G1CMTask::do_marking_step() knows
1627   // how many workers to wait for.
1628   _cm->set_concurrency(_active_workers);
1629   _workers->run_task(&proc_task_proxy);
1630 }
1631 
1632 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
1633   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
1634   EnqueueTask& _enq_task;
1635 
1636 public:
1637   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
1638     AbstractGangTask("Enqueue reference objects in parallel"),
1639     _enq_task(enq_task) { }
1640 
1641   virtual void work(uint worker_id) {
1642     _enq_task.work(worker_id);
1643   }
1644 };
1645 
1646 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
1647   assert(_workers != NULL, "Need parallel worker threads.");
1648   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1649 
1650   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
1651 
1652   // Not strictly necessary but...
1653   //
1654   // We need to reset the concurrency level before each
1655   // proxy task execution, so that the termination protocol
1656   // and overflow handling in G1CMTask::do_marking_step() knows
1657   // how many workers to wait for.
1658   _cm->set_concurrency(_active_workers);
1659   _workers->run_task(&enq_task_proxy);
1660 }
1661 
1662 void G1ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
1663   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
1664 }
1665 
1666 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
1667   if (has_overflown()) {
1668     // Skip processing the discovered references if we have
1669     // overflown the global marking stack. Reference objects
1670     // only get discovered once so it is OK to not
1671     // de-populate the discovered reference lists. We could have,
1672     // but the only benefit would be that, when marking restarts,
1673     // less reference objects are discovered.
1674     return;
1675   }
1676 
1677   ResourceMark rm;
1678   HandleMark   hm;
1679 
1680   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1681 
1682   // Is alive closure.
1683   G1CMIsAliveClosure g1_is_alive(g1h);
1684 
1685   // Inner scope to exclude the cleaning of the string and symbol
1686   // tables from the displayed time.
1687   {
1688     GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm);
1689 
1690     ReferenceProcessor* rp = g1h->ref_processor_cm();
1691 
1692     // See the comment in G1CollectedHeap::ref_processing_init()
1693     // about how reference processing currently works in G1.
1694 
1695     // Set the soft reference policy
1696     rp->setup_policy(clear_all_soft_refs);
1697     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1698 
1699     // Instances of the 'Keep Alive' and 'Complete GC' closures used
1700     // in serial reference processing. Note these closures are also
1701     // used for serially processing (by the the current thread) the
1702     // JNI references during parallel reference processing.
1703     //
1704     // These closures do not need to synchronize with the worker
1705     // threads involved in parallel reference processing as these
1706     // instances are executed serially by the current thread (e.g.
1707     // reference processing is not multi-threaded and is thus
1708     // performed by the current thread instead of a gang worker).
1709     //
1710     // The gang tasks involved in parallel reference processing create
1711     // their own instances of these closures, which do their own
1712     // synchronization among themselves.
1713     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1714     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1715 
1716     // We need at least one active thread. If reference processing
1717     // is not multi-threaded we use the current (VMThread) thread,
1718     // otherwise we use the work gang from the G1CollectedHeap and
1719     // we utilize all the worker threads we can.
1720     bool processing_is_mt = rp->processing_is_mt();
1721     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
1722     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
1723 
1724     // Parallel processing task executor.
1725     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
1726                                               g1h->workers(), active_workers);
1727     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1728 
1729     // Set the concurrency level. The phase was already set prior to
1730     // executing the remark task.
1731     set_concurrency(active_workers);
1732 
1733     // Set the degree of MT processing here.  If the discovery was done MT,
1734     // the number of threads involved during discovery could differ from
1735     // the number of active workers.  This is OK as long as the discovered
1736     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1737     rp->set_active_mt_degree(active_workers);
1738 
1739     // Process the weak references.
1740     const ReferenceProcessorStats& stats =
1741         rp->process_discovered_references(&g1_is_alive,
1742                                           &g1_keep_alive,
1743                                           &g1_drain_mark_stack,
1744                                           executor,
1745                                           _gc_timer_cm);
1746     _gc_tracer_cm->report_gc_reference_stats(stats);
1747 
1748     // The do_oop work routines of the keep_alive and drain_marking_stack
1749     // oop closures will set the has_overflown flag if we overflow the
1750     // global marking stack.
1751 
1752     assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(),
1753             "Mark stack should be empty (unless it is out of memory)");
1754 
1755     if (_global_mark_stack.is_out_of_memory()) {
1756       // This should have been done already when we tried to push an
1757       // entry on to the global mark stack. But let's do it again.
1758       set_has_overflown();
1759     }
1760 
1761     assert(rp->num_q() == active_workers, "why not");
1762 
1763     rp->enqueue_discovered_references(executor);
1764 
1765     rp->verify_no_references_recorded();
1766     assert(!rp->discovery_enabled(), "Post condition");
1767   }
1768 
1769   if (has_overflown()) {
1770     // We can not trust g1_is_alive if the marking stack overflowed
1771     return;
1772   }
1773 
1774   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1775 
1776   // Unload Klasses, String, Symbols, Code Cache, etc.
1777   if (ClassUnloadingWithConcurrentMark) {
1778     bool purged_classes;
1779 
1780     {
1781       GCTraceTime(Debug, gc, phases) trace("System Dictionary Unloading", _gc_timer_cm);
1782       purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
1783     }
1784 
1785     {
1786       GCTraceTime(Debug, gc, phases) trace("Parallel Unloading", _gc_timer_cm);
1787       weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
1788     }
1789   }
1790 
1791   if (G1StringDedup::is_enabled()) {
1792     GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", _gc_timer_cm);
1793     G1StringDedup::unlink(&g1_is_alive);
1794   }
1795 }
1796 
1797 void G1ConcurrentMark::swapMarkBitMaps() {
1798   G1CMBitMapRO* temp = _prevMarkBitMap;
1799   _prevMarkBitMap    = (G1CMBitMapRO*)_nextMarkBitMap;
1800   _nextMarkBitMap    = (G1CMBitMap*)  temp;
1801 }
1802 
1803 // Closure for marking entries in SATB buffers.
1804 class G1CMSATBBufferClosure : public SATBBufferClosure {
1805 private:
1806   G1CMTask* _task;
1807   G1CollectedHeap* _g1h;
1808 
1809   // This is very similar to G1CMTask::deal_with_reference, but with
1810   // more relaxed requirements for the argument, so this must be more
1811   // circumspect about treating the argument as an object.
1812   void do_entry(void* entry) const {
1813     _task->increment_refs_reached();
1814     HeapRegion* hr = _g1h->heap_region_containing(entry);
1815     if (entry < hr->next_top_at_mark_start()) {
1816       // Until we get here, we don't know whether entry refers to a valid
1817       // object; it could instead have been a stale reference.
1818       oop obj = static_cast<oop>(entry);
1819       assert(obj->is_oop(true /* ignore mark word */),
1820              "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj));
1821       _task->make_reference_grey(obj);
1822     }
1823   }
1824 
1825 public:
1826   G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1827     : _task(task), _g1h(g1h) { }
1828 
1829   virtual void do_buffer(void** buffer, size_t size) {
1830     for (size_t i = 0; i < size; ++i) {
1831       do_entry(buffer[i]);
1832     }
1833   }
1834 };
1835 
1836 class G1RemarkThreadsClosure : public ThreadClosure {
1837   G1CMSATBBufferClosure _cm_satb_cl;
1838   G1CMOopClosure _cm_cl;
1839   MarkingCodeBlobClosure _code_cl;
1840   int _thread_parity;
1841 
1842  public:
1843   G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1844     _cm_satb_cl(task, g1h),
1845     _cm_cl(g1h, g1h->concurrent_mark(), task),
1846     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1847     _thread_parity(Threads::thread_claim_parity()) {}
1848 
1849   void do_thread(Thread* thread) {
1850     if (thread->is_Java_thread()) {
1851       if (thread->claim_oops_do(true, _thread_parity)) {
1852         JavaThread* jt = (JavaThread*)thread;
1853 
1854         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1855         // however the liveness of oops reachable from nmethods have very complex lifecycles:
1856         // * Alive if on the stack of an executing method
1857         // * Weakly reachable otherwise
1858         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1859         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1860         jt->nmethods_do(&_code_cl);
1861 
1862         jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl);
1863       }
1864     } else if (thread->is_VM_thread()) {
1865       if (thread->claim_oops_do(true, _thread_parity)) {
1866         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
1867       }
1868     }
1869   }
1870 };
1871 
1872 class G1CMRemarkTask: public AbstractGangTask {
1873 private:
1874   G1ConcurrentMark* _cm;
1875 public:
1876   void work(uint worker_id) {
1877     // Since all available tasks are actually started, we should
1878     // only proceed if we're supposed to be active.
1879     if (worker_id < _cm->active_tasks()) {
1880       G1CMTask* task = _cm->task(worker_id);
1881       task->record_start_time();
1882       {
1883         ResourceMark rm;
1884         HandleMark hm;
1885 
1886         G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1887         Threads::threads_do(&threads_f);
1888       }
1889 
1890       do {
1891         task->do_marking_step(1000000000.0 /* something very large */,
1892                               true         /* do_termination       */,
1893                               false        /* is_serial            */);
1894       } while (task->has_aborted() && !_cm->has_overflown());
1895       // If we overflow, then we do not want to restart. We instead
1896       // want to abort remark and do concurrent marking again.
1897       task->record_end_time();
1898     }
1899   }
1900 
1901   G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1902     AbstractGangTask("Par Remark"), _cm(cm) {
1903     _cm->terminator()->reset_for_reuse(active_workers);
1904   }
1905 };
1906 
1907 void G1ConcurrentMark::checkpointRootsFinalWork() {
1908   ResourceMark rm;
1909   HandleMark   hm;
1910   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1911 
1912   GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
1913 
1914   g1h->ensure_parsability(false);
1915 
1916   // this is remark, so we'll use up all active threads
1917   uint active_workers = g1h->workers()->active_workers();
1918   set_concurrency_and_phase(active_workers, false /* concurrent */);
1919   // Leave _parallel_marking_threads at it's
1920   // value originally calculated in the G1ConcurrentMark
1921   // constructor and pass values of the active workers
1922   // through the gang in the task.
1923 
1924   {
1925     StrongRootsScope srs(active_workers);
1926 
1927     G1CMRemarkTask remarkTask(this, active_workers);
1928     // We will start all available threads, even if we decide that the
1929     // active_workers will be fewer. The extra ones will just bail out
1930     // immediately.
1931     g1h->workers()->run_task(&remarkTask);
1932   }
1933 
1934   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1935   guarantee(has_overflown() ||
1936             satb_mq_set.completed_buffers_num() == 0,
1937             "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1938             BOOL_TO_STR(has_overflown()),
1939             satb_mq_set.completed_buffers_num());
1940 
1941   print_stats();
1942 }
1943 
1944 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
1945   // Note we are overriding the read-only view of the prev map here, via
1946   // the cast.
1947   ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr);
1948 }
1949 
1950 HeapRegion*
1951 G1ConcurrentMark::claim_region(uint worker_id) {
1952   // "checkpoint" the finger
1953   HeapWord* finger = _finger;
1954 
1955   // _heap_end will not change underneath our feet; it only changes at
1956   // yield points.
1957   while (finger < _heap_end) {
1958     assert(_g1h->is_in_g1_reserved(finger), "invariant");
1959 
1960     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1961     // Make sure that the reads below do not float before loading curr_region.
1962     OrderAccess::loadload();
1963     // Above heap_region_containing may return NULL as we always scan claim
1964     // until the end of the heap. In this case, just jump to the next region.
1965     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1966 
1967     // Is the gap between reading the finger and doing the CAS too long?
1968     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
1969     if (res == finger && curr_region != NULL) {
1970       // we succeeded
1971       HeapWord*   bottom        = curr_region->bottom();
1972       HeapWord*   limit         = curr_region->next_top_at_mark_start();
1973 
1974       // notice that _finger == end cannot be guaranteed here since,
1975       // someone else might have moved the finger even further
1976       assert(_finger >= end, "the finger should have moved forward");
1977 
1978       if (limit > bottom) {
1979         return curr_region;
1980       } else {
1981         assert(limit == bottom,
1982                "the region limit should be at bottom");
1983         // we return NULL and the caller should try calling
1984         // claim_region() again.
1985         return NULL;
1986       }
1987     } else {
1988       assert(_finger > finger, "the finger should have moved forward");
1989       // read it again
1990       finger = _finger;
1991     }
1992   }
1993 
1994   return NULL;
1995 }
1996 
1997 #ifndef PRODUCT
1998 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC {
1999 private:
2000   G1CollectedHeap* _g1h;
2001   const char* _phase;
2002   int _info;
2003 
2004 public:
2005   VerifyNoCSetOops(const char* phase, int info = -1) :
2006     _g1h(G1CollectedHeap::heap()),
2007     _phase(phase),
2008     _info(info)
2009   { }
2010 
2011   void operator()(oop obj) const {
2012     guarantee(G1CMObjArrayProcessor::is_array_slice(obj) || obj->is_oop(),
2013               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
2014               p2i(obj), _phase, _info);
2015     guarantee(G1CMObjArrayProcessor::is_array_slice(obj) || !_g1h->obj_in_cs(obj),
2016               "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
2017               p2i(obj), _phase, _info);
2018   }
2019 };
2020 
2021 void G1ConcurrentMark::verify_no_cset_oops() {
2022   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2023   if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
2024     return;
2025   }
2026 
2027   // Verify entries on the global mark stack
2028   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
2029 
2030   // Verify entries on the task queues
2031   for (uint i = 0; i < _max_worker_id; ++i) {
2032     G1CMTaskQueue* queue = _task_queues->queue(i);
2033     queue->iterate(VerifyNoCSetOops("Queue", i));
2034   }
2035 
2036   // Verify the global finger
2037   HeapWord* global_finger = finger();
2038   if (global_finger != NULL && global_finger < _heap_end) {
2039     // Since we always iterate over all regions, we might get a NULL HeapRegion
2040     // here.
2041     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
2042     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2043               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
2044               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
2045   }
2046 
2047   // Verify the task fingers
2048   assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2049   for (uint i = 0; i < parallel_marking_threads(); ++i) {
2050     G1CMTask* task = _tasks[i];
2051     HeapWord* task_finger = task->finger();
2052     if (task_finger != NULL && task_finger < _heap_end) {
2053       // See above note on the global finger verification.
2054       HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
2055       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2056                 !task_hr->in_collection_set(),
2057                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2058                 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
2059     }
2060   }
2061 }
2062 #endif // PRODUCT
2063 void G1ConcurrentMark::create_live_data() {
2064   _g1h->g1_rem_set()->create_card_live_data(_parallel_workers, _nextMarkBitMap);
2065 }
2066 
2067 void G1ConcurrentMark::finalize_live_data() {
2068   _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _nextMarkBitMap);
2069 }
2070 
2071 void G1ConcurrentMark::verify_live_data() {
2072   _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _nextMarkBitMap);
2073 }
2074 
2075 void G1ConcurrentMark::clear_live_data(WorkGang* workers) {
2076   _g1h->g1_rem_set()->clear_card_live_data(workers);
2077 }
2078 
2079 #ifdef ASSERT
2080 void G1ConcurrentMark::verify_live_data_clear() {
2081   _g1h->g1_rem_set()->verify_card_live_data_is_clear();
2082 }
2083 #endif
2084 
2085 void G1ConcurrentMark::print_stats() {
2086   if (!log_is_enabled(Debug, gc, stats)) {
2087     return;
2088   }
2089   log_debug(gc, stats)("---------------------------------------------------------------------");
2090   for (size_t i = 0; i < _active_tasks; ++i) {
2091     _tasks[i]->print_stats();
2092     log_debug(gc, stats)("---------------------------------------------------------------------");
2093   }
2094 }
2095 
2096 void G1ConcurrentMark::abort() {
2097   if (!cmThread()->during_cycle() || _has_aborted) {
2098     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2099     return;
2100   }
2101 
2102   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2103   // concurrent bitmap clearing.
2104   {
2105     GCTraceTime(Debug, gc)("Clear Next Bitmap");
2106     clear_bitmap(_nextMarkBitMap, _g1h->workers(), false);
2107   }
2108   // Note we cannot clear the previous marking bitmap here
2109   // since VerifyDuringGC verifies the objects marked during
2110   // a full GC against the previous bitmap.
2111 
2112   {
2113     GCTraceTime(Debug, gc)("Clear Live Data");
2114     clear_live_data(_g1h->workers());
2115   }
2116   DEBUG_ONLY({
2117     GCTraceTime(Debug, gc)("Verify Live Data Clear");
2118     verify_live_data_clear();
2119   })
2120   // Empty mark stack
2121   reset_marking_state();
2122   for (uint i = 0; i < _max_worker_id; ++i) {
2123     _tasks[i]->clear_region_fields();
2124   }
2125   _first_overflow_barrier_sync.abort();
2126   _second_overflow_barrier_sync.abort();
2127   _has_aborted = true;
2128 
2129   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2130   satb_mq_set.abandon_partial_marking();
2131   // This can be called either during or outside marking, we'll read
2132   // the expected_active value from the SATB queue set.
2133   satb_mq_set.set_active_all_threads(
2134                                  false, /* new active value */
2135                                  satb_mq_set.is_active() /* expected_active */);
2136 }
2137 
2138 static void print_ms_time_info(const char* prefix, const char* name,
2139                                NumberSeq& ns) {
2140   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2141                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2142   if (ns.num() > 0) {
2143     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2144                            prefix, ns.sd(), ns.maximum());
2145   }
2146 }
2147 
2148 void G1ConcurrentMark::print_summary_info() {
2149   Log(gc, marking) log;
2150   if (!log.is_trace()) {
2151     return;
2152   }
2153 
2154   log.trace(" Concurrent marking:");
2155   print_ms_time_info("  ", "init marks", _init_times);
2156   print_ms_time_info("  ", "remarks", _remark_times);
2157   {
2158     print_ms_time_info("     ", "final marks", _remark_mark_times);
2159     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2160 
2161   }
2162   print_ms_time_info("  ", "cleanups", _cleanup_times);
2163   log.trace("    Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2164             _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2165   if (G1ScrubRemSets) {
2166     log.trace("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
2167               _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2168   }
2169   log.trace("  Total stop_world time = %8.2f s.",
2170             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2171   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
2172             cmThread()->vtime_accum(), cmThread()->vtime_mark_accum());
2173 }
2174 
2175 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2176   _parallel_workers->print_worker_threads_on(st);
2177 }
2178 
2179 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2180   _parallel_workers->threads_do(tc);
2181 }
2182 
2183 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2184   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2185       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
2186   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
2187   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
2188 }
2189 
2190 // Closure for iteration over bitmaps
2191 class G1CMBitMapClosure : public BitMapClosure {
2192 private:
2193   // the bitmap that is being iterated over
2194   G1CMBitMap*                 _nextMarkBitMap;
2195   G1ConcurrentMark*           _cm;
2196   G1CMTask*                   _task;
2197 
2198 public:
2199   G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm, G1CMBitMap* nextMarkBitMap) :
2200     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
2201 
2202   bool do_bit(size_t offset) {
2203     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
2204     assert(_nextMarkBitMap->isMarked(addr), "invariant");
2205     assert( addr < _cm->finger(), "invariant");
2206     assert(addr >= _task->finger(), "invariant");
2207 
2208     // We move that task's local finger along.
2209     _task->move_finger_to(addr);
2210 
2211     _task->scan_object(oop(addr));
2212     // we only partially drain the local queue and global stack
2213     _task->drain_local_queue(true);
2214     _task->drain_global_stack(true);
2215 
2216     // if the has_aborted flag has been raised, we need to bail out of
2217     // the iteration
2218     return !_task->has_aborted();
2219   }
2220 };
2221 
2222 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2223   ReferenceProcessor* result = g1h->ref_processor_cm();
2224   assert(result != NULL, "CM reference processor should not be NULL");
2225   return result;
2226 }
2227 
2228 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2229                                G1ConcurrentMark* cm,
2230                                G1CMTask* task)
2231   : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
2232     _g1h(g1h), _cm(cm), _task(task)
2233 { }
2234 
2235 void G1CMTask::setup_for_region(HeapRegion* hr) {
2236   assert(hr != NULL,
2237         "claim_region() should have filtered out NULL regions");
2238   _curr_region  = hr;
2239   _finger       = hr->bottom();
2240   update_region_limit();
2241 }
2242 
2243 void G1CMTask::update_region_limit() {
2244   HeapRegion* hr            = _curr_region;
2245   HeapWord* bottom          = hr->bottom();
2246   HeapWord* limit           = hr->next_top_at_mark_start();
2247 
2248   if (limit == bottom) {
2249     // The region was collected underneath our feet.
2250     // We set the finger to bottom to ensure that the bitmap
2251     // iteration that will follow this will not do anything.
2252     // (this is not a condition that holds when we set the region up,
2253     // as the region is not supposed to be empty in the first place)
2254     _finger = bottom;
2255   } else if (limit >= _region_limit) {
2256     assert(limit >= _finger, "peace of mind");
2257   } else {
2258     assert(limit < _region_limit, "only way to get here");
2259     // This can happen under some pretty unusual circumstances.  An
2260     // evacuation pause empties the region underneath our feet (NTAMS
2261     // at bottom). We then do some allocation in the region (NTAMS
2262     // stays at bottom), followed by the region being used as a GC
2263     // alloc region (NTAMS will move to top() and the objects
2264     // originally below it will be grayed). All objects now marked in
2265     // the region are explicitly grayed, if below the global finger,
2266     // and we do not need in fact to scan anything else. So, we simply
2267     // set _finger to be limit to ensure that the bitmap iteration
2268     // doesn't do anything.
2269     _finger = limit;
2270   }
2271 
2272   _region_limit = limit;
2273 }
2274 
2275 void G1CMTask::giveup_current_region() {
2276   assert(_curr_region != NULL, "invariant");
2277   clear_region_fields();
2278 }
2279 
2280 void G1CMTask::clear_region_fields() {
2281   // Values for these three fields that indicate that we're not
2282   // holding on to a region.
2283   _curr_region   = NULL;
2284   _finger        = NULL;
2285   _region_limit  = NULL;
2286 }
2287 
2288 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2289   if (cm_oop_closure == NULL) {
2290     assert(_cm_oop_closure != NULL, "invariant");
2291   } else {
2292     assert(_cm_oop_closure == NULL, "invariant");
2293   }
2294   _cm_oop_closure = cm_oop_closure;
2295 }
2296 
2297 void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) {
2298   guarantee(nextMarkBitMap != NULL, "invariant");
2299   _nextMarkBitMap                = nextMarkBitMap;
2300   clear_region_fields();
2301 
2302   _calls                         = 0;
2303   _elapsed_time_ms               = 0.0;
2304   _termination_time_ms           = 0.0;
2305   _termination_start_time_ms     = 0.0;
2306 }
2307 
2308 bool G1CMTask::should_exit_termination() {
2309   regular_clock_call();
2310   // This is called when we are in the termination protocol. We should
2311   // quit if, for some reason, this task wants to abort or the global
2312   // stack is not empty (this means that we can get work from it).
2313   return !_cm->mark_stack_empty() || has_aborted();
2314 }
2315 
2316 void G1CMTask::reached_limit() {
2317   assert(_words_scanned >= _words_scanned_limit ||
2318          _refs_reached >= _refs_reached_limit ,
2319          "shouldn't have been called otherwise");
2320   regular_clock_call();
2321 }
2322 
2323 void G1CMTask::regular_clock_call() {
2324   if (has_aborted()) return;
2325 
2326   // First, we need to recalculate the words scanned and refs reached
2327   // limits for the next clock call.
2328   recalculate_limits();
2329 
2330   // During the regular clock call we do the following
2331 
2332   // (1) If an overflow has been flagged, then we abort.
2333   if (_cm->has_overflown()) {
2334     set_has_aborted();
2335     return;
2336   }
2337 
2338   // If we are not concurrent (i.e. we're doing remark) we don't need
2339   // to check anything else. The other steps are only needed during
2340   // the concurrent marking phase.
2341   if (!concurrent()) return;
2342 
2343   // (2) If marking has been aborted for Full GC, then we also abort.
2344   if (_cm->has_aborted()) {
2345     set_has_aborted();
2346     return;
2347   }
2348 
2349   double curr_time_ms = os::elapsedVTime() * 1000.0;
2350 
2351   // (4) We check whether we should yield. If we have to, then we abort.
2352   if (SuspendibleThreadSet::should_yield()) {
2353     // We should yield. To do this we abort the task. The caller is
2354     // responsible for yielding.
2355     set_has_aborted();
2356     return;
2357   }
2358 
2359   // (5) We check whether we've reached our time quota. If we have,
2360   // then we abort.
2361   double elapsed_time_ms = curr_time_ms - _start_time_ms;
2362   if (elapsed_time_ms > _time_target_ms) {
2363     set_has_aborted();
2364     _has_timed_out = true;
2365     return;
2366   }
2367 
2368   // (6) Finally, we check whether there are enough completed STAB
2369   // buffers available for processing. If there are, we abort.
2370   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2371   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2372     // we do need to process SATB buffers, we'll abort and restart
2373     // the marking task to do so
2374     set_has_aborted();
2375     return;
2376   }
2377 }
2378 
2379 void G1CMTask::recalculate_limits() {
2380   _real_words_scanned_limit = _words_scanned + words_scanned_period;
2381   _words_scanned_limit      = _real_words_scanned_limit;
2382 
2383   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
2384   _refs_reached_limit       = _real_refs_reached_limit;
2385 }
2386 
2387 void G1CMTask::decrease_limits() {
2388   // This is called when we believe that we're going to do an infrequent
2389   // operation which will increase the per byte scanned cost (i.e. move
2390   // entries to/from the global stack). It basically tries to decrease the
2391   // scanning limit so that the clock is called earlier.
2392 
2393   _words_scanned_limit = _real_words_scanned_limit -
2394     3 * words_scanned_period / 4;
2395   _refs_reached_limit  = _real_refs_reached_limit -
2396     3 * refs_reached_period / 4;
2397 }
2398 
2399 void G1CMTask::move_entries_to_global_stack() {
2400   // Local array where we'll store the entries that will be popped
2401   // from the local queue.
2402   oop buffer[G1CMMarkStack::OopsPerChunk];
2403 
2404   size_t n = 0;
2405   oop obj;
2406   while (n < G1CMMarkStack::OopsPerChunk && _task_queue->pop_local(obj)) {
2407     buffer[n] = obj;
2408     ++n;
2409   }
2410   if (n < G1CMMarkStack::OopsPerChunk) {
2411     buffer[n] = NULL;
2412   }
2413 
2414   if (n > 0) {
2415     if (!_cm->mark_stack_push(buffer)) {
2416       set_has_aborted();
2417     }
2418   }
2419 
2420   // This operation was quite expensive, so decrease the limits.
2421   decrease_limits();
2422 }
2423 
2424 bool G1CMTask::get_entries_from_global_stack() {
2425   // Local array where we'll store the entries that will be popped
2426   // from the global stack.
2427   oop buffer[G1CMMarkStack::OopsPerChunk];
2428 
2429   if (!_cm->mark_stack_pop(buffer)) {
2430     return false;
2431   }
2432 
2433   // We did actually pop at least one entry.
2434   for (size_t i = 0; i < G1CMMarkStack::OopsPerChunk; ++i) {
2435     oop elem = buffer[i];
2436     if (elem == NULL) {
2437       break;
2438     }
2439     assert(G1CMObjArrayProcessor::is_array_slice(elem) || elem->is_oop(), "Element " PTR_FORMAT " must be an array slice or oop", p2i(elem));
2440     bool success = _task_queue->push(elem);
2441     // We only call this when the local queue is empty or under a
2442     // given target limit. So, we do not expect this push to fail.
2443     assert(success, "invariant");
2444   }
2445 
2446   // This operation was quite expensive, so decrease the limits
2447   decrease_limits();
2448   return true;
2449 }
2450 
2451 void G1CMTask::drain_local_queue(bool partially) {
2452   if (has_aborted()) {
2453     return;
2454   }
2455 
2456   // Decide what the target size is, depending whether we're going to
2457   // drain it partially (so that other tasks can steal if they run out
2458   // of things to do) or totally (at the very end).
2459   size_t target_size;
2460   if (partially) {
2461     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
2462   } else {
2463     target_size = 0;
2464   }
2465 
2466   if (_task_queue->size() > target_size) {
2467     oop obj;
2468     bool ret = _task_queue->pop_local(obj);
2469     while (ret) {
2470       scan_object(obj);
2471       if (_task_queue->size() <= target_size || has_aborted()) {
2472         ret = false;
2473       } else {
2474         ret = _task_queue->pop_local(obj);
2475       }
2476     }
2477   }
2478 }
2479 
2480 void G1CMTask::drain_global_stack(bool partially) {
2481   if (has_aborted()) return;
2482 
2483   // We have a policy to drain the local queue before we attempt to
2484   // drain the global stack.
2485   assert(partially || _task_queue->size() == 0, "invariant");
2486 
2487   // Decide what the target size is, depending whether we're going to
2488   // drain it partially (so that other tasks can steal if they run out
2489   // of things to do) or totally (at the very end).
2490   // Notice that when draining the global mark stack partially, due to the racyness
2491   // of the mark stack size update we might in fact drop below the target. But,
2492   // this is not a problem.
2493   // In case of total draining, we simply process until the global mark stack is
2494   // totally empty, disregarding the size counter.
2495   if (partially) {
2496     size_t const target_size = _cm->partial_mark_stack_size_target();
2497     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2498       if (get_entries_from_global_stack()) {
2499         drain_local_queue(partially);
2500       }
2501     }
2502   } else {
2503     while (!has_aborted() && get_entries_from_global_stack()) {
2504       drain_local_queue(partially);
2505     }
2506   }
2507 }
2508 
2509 // SATB Queue has several assumptions on whether to call the par or
2510 // non-par versions of the methods. this is why some of the code is
2511 // replicated. We should really get rid of the single-threaded version
2512 // of the code to simplify things.
2513 void G1CMTask::drain_satb_buffers() {
2514   if (has_aborted()) return;
2515 
2516   // We set this so that the regular clock knows that we're in the
2517   // middle of draining buffers and doesn't set the abort flag when it
2518   // notices that SATB buffers are available for draining. It'd be
2519   // very counter productive if it did that. :-)
2520   _draining_satb_buffers = true;
2521 
2522   G1CMSATBBufferClosure satb_cl(this, _g1h);
2523   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2524 
2525   // This keeps claiming and applying the closure to completed buffers
2526   // until we run out of buffers or we need to abort.
2527   while (!has_aborted() &&
2528          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2529     regular_clock_call();
2530   }
2531 
2532   _draining_satb_buffers = false;
2533 
2534   assert(has_aborted() ||
2535          concurrent() ||
2536          satb_mq_set.completed_buffers_num() == 0, "invariant");
2537 
2538   // again, this was a potentially expensive operation, decrease the
2539   // limits to get the regular clock call early
2540   decrease_limits();
2541 }
2542 
2543 void G1CMTask::print_stats() {
2544   log_debug(gc, stats)("Marking Stats, task = %u, calls = %d",
2545                        _worker_id, _calls);
2546   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2547                        _elapsed_time_ms, _termination_time_ms);
2548   log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
2549                        _step_times_ms.num(), _step_times_ms.avg(),
2550                        _step_times_ms.sd());
2551   log_debug(gc, stats)("                    max = %1.2lfms, total = %1.2lfms",
2552                        _step_times_ms.maximum(), _step_times_ms.sum());
2553 }
2554 
2555 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) {
2556   return _task_queues->steal(worker_id, hash_seed, obj);
2557 }
2558 
2559 /*****************************************************************************
2560 
2561     The do_marking_step(time_target_ms, ...) method is the building
2562     block of the parallel marking framework. It can be called in parallel
2563     with other invocations of do_marking_step() on different tasks
2564     (but only one per task, obviously) and concurrently with the
2565     mutator threads, or during remark, hence it eliminates the need
2566     for two versions of the code. When called during remark, it will
2567     pick up from where the task left off during the concurrent marking
2568     phase. Interestingly, tasks are also claimable during evacuation
2569     pauses too, since do_marking_step() ensures that it aborts before
2570     it needs to yield.
2571 
2572     The data structures that it uses to do marking work are the
2573     following:
2574 
2575       (1) Marking Bitmap. If there are gray objects that appear only
2576       on the bitmap (this happens either when dealing with an overflow
2577       or when the initial marking phase has simply marked the roots
2578       and didn't push them on the stack), then tasks claim heap
2579       regions whose bitmap they then scan to find gray objects. A
2580       global finger indicates where the end of the last claimed region
2581       is. A local finger indicates how far into the region a task has
2582       scanned. The two fingers are used to determine how to gray an
2583       object (i.e. whether simply marking it is OK, as it will be
2584       visited by a task in the future, or whether it needs to be also
2585       pushed on a stack).
2586 
2587       (2) Local Queue. The local queue of the task which is accessed
2588       reasonably efficiently by the task. Other tasks can steal from
2589       it when they run out of work. Throughout the marking phase, a
2590       task attempts to keep its local queue short but not totally
2591       empty, so that entries are available for stealing by other
2592       tasks. Only when there is no more work, a task will totally
2593       drain its local queue.
2594 
2595       (3) Global Mark Stack. This handles local queue overflow. During
2596       marking only sets of entries are moved between it and the local
2597       queues, as access to it requires a mutex and more fine-grain
2598       interaction with it which might cause contention. If it
2599       overflows, then the marking phase should restart and iterate
2600       over the bitmap to identify gray objects. Throughout the marking
2601       phase, tasks attempt to keep the global mark stack at a small
2602       length but not totally empty, so that entries are available for
2603       popping by other tasks. Only when there is no more work, tasks
2604       will totally drain the global mark stack.
2605 
2606       (4) SATB Buffer Queue. This is where completed SATB buffers are
2607       made available. Buffers are regularly removed from this queue
2608       and scanned for roots, so that the queue doesn't get too
2609       long. During remark, all completed buffers are processed, as
2610       well as the filled in parts of any uncompleted buffers.
2611 
2612     The do_marking_step() method tries to abort when the time target
2613     has been reached. There are a few other cases when the
2614     do_marking_step() method also aborts:
2615 
2616       (1) When the marking phase has been aborted (after a Full GC).
2617 
2618       (2) When a global overflow (on the global stack) has been
2619       triggered. Before the task aborts, it will actually sync up with
2620       the other tasks to ensure that all the marking data structures
2621       (local queues, stacks, fingers etc.)  are re-initialized so that
2622       when do_marking_step() completes, the marking phase can
2623       immediately restart.
2624 
2625       (3) When enough completed SATB buffers are available. The
2626       do_marking_step() method only tries to drain SATB buffers right
2627       at the beginning. So, if enough buffers are available, the
2628       marking step aborts and the SATB buffers are processed at
2629       the beginning of the next invocation.
2630 
2631       (4) To yield. when we have to yield then we abort and yield
2632       right at the end of do_marking_step(). This saves us from a lot
2633       of hassle as, by yielding we might allow a Full GC. If this
2634       happens then objects will be compacted underneath our feet, the
2635       heap might shrink, etc. We save checking for this by just
2636       aborting and doing the yield right at the end.
2637 
2638     From the above it follows that the do_marking_step() method should
2639     be called in a loop (or, otherwise, regularly) until it completes.
2640 
2641     If a marking step completes without its has_aborted() flag being
2642     true, it means it has completed the current marking phase (and
2643     also all other marking tasks have done so and have all synced up).
2644 
2645     A method called regular_clock_call() is invoked "regularly" (in
2646     sub ms intervals) throughout marking. It is this clock method that
2647     checks all the abort conditions which were mentioned above and
2648     decides when the task should abort. A work-based scheme is used to
2649     trigger this clock method: when the number of object words the
2650     marking phase has scanned or the number of references the marking
2651     phase has visited reach a given limit. Additional invocations to
2652     the method clock have been planted in a few other strategic places
2653     too. The initial reason for the clock method was to avoid calling
2654     vtime too regularly, as it is quite expensive. So, once it was in
2655     place, it was natural to piggy-back all the other conditions on it
2656     too and not constantly check them throughout the code.
2657 
2658     If do_termination is true then do_marking_step will enter its
2659     termination protocol.
2660 
2661     The value of is_serial must be true when do_marking_step is being
2662     called serially (i.e. by the VMThread) and do_marking_step should
2663     skip any synchronization in the termination and overflow code.
2664     Examples include the serial remark code and the serial reference
2665     processing closures.
2666 
2667     The value of is_serial must be false when do_marking_step is
2668     being called by any of the worker threads in a work gang.
2669     Examples include the concurrent marking code (CMMarkingTask),
2670     the MT remark code, and the MT reference processing closures.
2671 
2672  *****************************************************************************/
2673 
2674 void G1CMTask::do_marking_step(double time_target_ms,
2675                                bool do_termination,
2676                                bool is_serial) {
2677   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2678   assert(concurrent() == _cm->concurrent(), "they should be the same");
2679 
2680   G1Policy* g1_policy = _g1h->g1_policy();
2681   assert(_task_queues != NULL, "invariant");
2682   assert(_task_queue != NULL, "invariant");
2683   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
2684 
2685   assert(!_claimed,
2686          "only one thread should claim this task at any one time");
2687 
2688   // OK, this doesn't safeguard again all possible scenarios, as it is
2689   // possible for two threads to set the _claimed flag at the same
2690   // time. But it is only for debugging purposes anyway and it will
2691   // catch most problems.
2692   _claimed = true;
2693 
2694   _start_time_ms = os::elapsedVTime() * 1000.0;
2695 
2696   // If do_stealing is true then do_marking_step will attempt to
2697   // steal work from the other G1CMTasks. It only makes sense to
2698   // enable stealing when the termination protocol is enabled
2699   // and do_marking_step() is not being called serially.
2700   bool do_stealing = do_termination && !is_serial;
2701 
2702   double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
2703   _time_target_ms = time_target_ms - diff_prediction_ms;
2704 
2705   // set up the variables that are used in the work-based scheme to
2706   // call the regular clock method
2707   _words_scanned = 0;
2708   _refs_reached  = 0;
2709   recalculate_limits();
2710 
2711   // clear all flags
2712   clear_has_aborted();
2713   _has_timed_out = false;
2714   _draining_satb_buffers = false;
2715 
2716   ++_calls;
2717 
2718   // Set up the bitmap and oop closures. Anything that uses them is
2719   // eventually called from this method, so it is OK to allocate these
2720   // statically.
2721   G1CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
2722   G1CMOopClosure    cm_oop_closure(_g1h, _cm, this);
2723   set_cm_oop_closure(&cm_oop_closure);
2724 
2725   if (_cm->has_overflown()) {
2726     // This can happen if the mark stack overflows during a GC pause
2727     // and this task, after a yield point, restarts. We have to abort
2728     // as we need to get into the overflow protocol which happens
2729     // right at the end of this task.
2730     set_has_aborted();
2731   }
2732 
2733   // First drain any available SATB buffers. After this, we will not
2734   // look at SATB buffers before the next invocation of this method.
2735   // If enough completed SATB buffers are queued up, the regular clock
2736   // will abort this task so that it restarts.
2737   drain_satb_buffers();
2738   // ...then partially drain the local queue and the global stack
2739   drain_local_queue(true);
2740   drain_global_stack(true);
2741 
2742   do {
2743     if (!has_aborted() && _curr_region != NULL) {
2744       // This means that we're already holding on to a region.
2745       assert(_finger != NULL, "if region is not NULL, then the finger "
2746              "should not be NULL either");
2747 
2748       // We might have restarted this task after an evacuation pause
2749       // which might have evacuated the region we're holding on to
2750       // underneath our feet. Let's read its limit again to make sure
2751       // that we do not iterate over a region of the heap that
2752       // contains garbage (update_region_limit() will also move
2753       // _finger to the start of the region if it is found empty).
2754       update_region_limit();
2755       // We will start from _finger not from the start of the region,
2756       // as we might be restarting this task after aborting half-way
2757       // through scanning this region. In this case, _finger points to
2758       // the address where we last found a marked object. If this is a
2759       // fresh region, _finger points to start().
2760       MemRegion mr = MemRegion(_finger, _region_limit);
2761 
2762       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2763              "humongous regions should go around loop once only");
2764 
2765       // Some special cases:
2766       // If the memory region is empty, we can just give up the region.
2767       // If the current region is humongous then we only need to check
2768       // the bitmap for the bit associated with the start of the object,
2769       // scan the object if it's live, and give up the region.
2770       // Otherwise, let's iterate over the bitmap of the part of the region
2771       // that is left.
2772       // If the iteration is successful, give up the region.
2773       if (mr.is_empty()) {
2774         giveup_current_region();
2775         regular_clock_call();
2776       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2777         if (_nextMarkBitMap->isMarked(mr.start())) {
2778           // The object is marked - apply the closure
2779           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
2780           bitmap_closure.do_bit(offset);
2781         }
2782         // Even if this task aborted while scanning the humongous object
2783         // we can (and should) give up the current region.
2784         giveup_current_region();
2785         regular_clock_call();
2786       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
2787         giveup_current_region();
2788         regular_clock_call();
2789       } else {
2790         assert(has_aborted(), "currently the only way to do so");
2791         // The only way to abort the bitmap iteration is to return
2792         // false from the do_bit() method. However, inside the
2793         // do_bit() method we move the _finger to point to the
2794         // object currently being looked at. So, if we bail out, we
2795         // have definitely set _finger to something non-null.
2796         assert(_finger != NULL, "invariant");
2797 
2798         // Region iteration was actually aborted. So now _finger
2799         // points to the address of the object we last scanned. If we
2800         // leave it there, when we restart this task, we will rescan
2801         // the object. It is easy to avoid this. We move the finger by
2802         // enough to point to the next possible object header (the
2803         // bitmap knows by how much we need to move it as it knows its
2804         // granularity).
2805         assert(_finger < _region_limit, "invariant");
2806         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
2807         // Check if bitmap iteration was aborted while scanning the last object
2808         if (new_finger >= _region_limit) {
2809           giveup_current_region();
2810         } else {
2811           move_finger_to(new_finger);
2812         }
2813       }
2814     }
2815     // At this point we have either completed iterating over the
2816     // region we were holding on to, or we have aborted.
2817 
2818     // We then partially drain the local queue and the global stack.
2819     // (Do we really need this?)
2820     drain_local_queue(true);
2821     drain_global_stack(true);
2822 
2823     // Read the note on the claim_region() method on why it might
2824     // return NULL with potentially more regions available for
2825     // claiming and why we have to check out_of_regions() to determine
2826     // whether we're done or not.
2827     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2828       // We are going to try to claim a new region. We should have
2829       // given up on the previous one.
2830       // Separated the asserts so that we know which one fires.
2831       assert(_curr_region  == NULL, "invariant");
2832       assert(_finger       == NULL, "invariant");
2833       assert(_region_limit == NULL, "invariant");
2834       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2835       if (claimed_region != NULL) {
2836         // Yes, we managed to claim one
2837         setup_for_region(claimed_region);
2838         assert(_curr_region == claimed_region, "invariant");
2839       }
2840       // It is important to call the regular clock here. It might take
2841       // a while to claim a region if, for example, we hit a large
2842       // block of empty regions. So we need to call the regular clock
2843       // method once round the loop to make sure it's called
2844       // frequently enough.
2845       regular_clock_call();
2846     }
2847 
2848     if (!has_aborted() && _curr_region == NULL) {
2849       assert(_cm->out_of_regions(),
2850              "at this point we should be out of regions");
2851     }
2852   } while ( _curr_region != NULL && !has_aborted());
2853 
2854   if (!has_aborted()) {
2855     // We cannot check whether the global stack is empty, since other
2856     // tasks might be pushing objects to it concurrently.
2857     assert(_cm->out_of_regions(),
2858            "at this point we should be out of regions");
2859     // Try to reduce the number of available SATB buffers so that
2860     // remark has less work to do.
2861     drain_satb_buffers();
2862   }
2863 
2864   // Since we've done everything else, we can now totally drain the
2865   // local queue and global stack.
2866   drain_local_queue(false);
2867   drain_global_stack(false);
2868 
2869   // Attempt at work stealing from other task's queues.
2870   if (do_stealing && !has_aborted()) {
2871     // We have not aborted. This means that we have finished all that
2872     // we could. Let's try to do some stealing...
2873 
2874     // We cannot check whether the global stack is empty, since other
2875     // tasks might be pushing objects to it concurrently.
2876     assert(_cm->out_of_regions() && _task_queue->size() == 0,
2877            "only way to reach here");
2878     while (!has_aborted()) {
2879       oop obj;
2880       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
2881         scan_object(obj);
2882 
2883         // And since we're towards the end, let's totally drain the
2884         // local queue and global stack.
2885         drain_local_queue(false);
2886         drain_global_stack(false);
2887       } else {
2888         break;
2889       }
2890     }
2891   }
2892 
2893   // We still haven't aborted. Now, let's try to get into the
2894   // termination protocol.
2895   if (do_termination && !has_aborted()) {
2896     // We cannot check whether the global stack is empty, since other
2897     // tasks might be concurrently pushing objects on it.
2898     // Separated the asserts so that we know which one fires.
2899     assert(_cm->out_of_regions(), "only way to reach here");
2900     assert(_task_queue->size() == 0, "only way to reach here");
2901     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2902 
2903     // The G1CMTask class also extends the TerminatorTerminator class,
2904     // hence its should_exit_termination() method will also decide
2905     // whether to exit the termination protocol or not.
2906     bool finished = (is_serial ||
2907                      _cm->terminator()->offer_termination(this));
2908     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2909     _termination_time_ms +=
2910       termination_end_time_ms - _termination_start_time_ms;
2911 
2912     if (finished) {
2913       // We're all done.
2914 
2915       if (_worker_id == 0) {
2916         // let's allow task 0 to do this
2917         if (concurrent()) {
2918           assert(_cm->concurrent_marking_in_progress(), "invariant");
2919           // we need to set this to false before the next
2920           // safepoint. This way we ensure that the marking phase
2921           // doesn't observe any more heap expansions.
2922           _cm->clear_concurrent_marking_in_progress();
2923         }
2924       }
2925 
2926       // We can now guarantee that the global stack is empty, since
2927       // all other tasks have finished. We separated the guarantees so
2928       // that, if a condition is false, we can immediately find out
2929       // which one.
2930       guarantee(_cm->out_of_regions(), "only way to reach here");
2931       guarantee(_cm->mark_stack_empty(), "only way to reach here");
2932       guarantee(_task_queue->size() == 0, "only way to reach here");
2933       guarantee(!_cm->has_overflown(), "only way to reach here");
2934       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
2935     } else {
2936       // Apparently there's more work to do. Let's abort this task. It
2937       // will restart it and we can hopefully find more things to do.
2938       set_has_aborted();
2939     }
2940   }
2941 
2942   // Mainly for debugging purposes to make sure that a pointer to the
2943   // closure which was statically allocated in this frame doesn't
2944   // escape it by accident.
2945   set_cm_oop_closure(NULL);
2946   double end_time_ms = os::elapsedVTime() * 1000.0;
2947   double elapsed_time_ms = end_time_ms - _start_time_ms;
2948   // Update the step history.
2949   _step_times_ms.add(elapsed_time_ms);
2950 
2951   if (has_aborted()) {
2952     // The task was aborted for some reason.
2953     if (_has_timed_out) {
2954       double diff_ms = elapsed_time_ms - _time_target_ms;
2955       // Keep statistics of how well we did with respect to hitting
2956       // our target only if we actually timed out (if we aborted for
2957       // other reasons, then the results might get skewed).
2958       _marking_step_diffs_ms.add(diff_ms);
2959     }
2960 
2961     if (_cm->has_overflown()) {
2962       // This is the interesting one. We aborted because a global
2963       // overflow was raised. This means we have to restart the
2964       // marking phase and start iterating over regions. However, in
2965       // order to do this we have to make sure that all tasks stop
2966       // what they are doing and re-initialize in a safe manner. We
2967       // will achieve this with the use of two barrier sync points.
2968 
2969       if (!is_serial) {
2970         // We only need to enter the sync barrier if being called
2971         // from a parallel context
2972         _cm->enter_first_sync_barrier(_worker_id);
2973 
2974         // When we exit this sync barrier we know that all tasks have
2975         // stopped doing marking work. So, it's now safe to
2976         // re-initialize our data structures. At the end of this method,
2977         // task 0 will clear the global data structures.
2978       }
2979 
2980       // We clear the local state of this task...
2981       clear_region_fields();
2982 
2983       if (!is_serial) {
2984         // ...and enter the second barrier.
2985         _cm->enter_second_sync_barrier(_worker_id);
2986       }
2987       // At this point, if we're during the concurrent phase of
2988       // marking, everything has been re-initialized and we're
2989       // ready to restart.
2990     }
2991   }
2992 
2993   _claimed = false;
2994 }
2995 
2996 G1CMTask::G1CMTask(uint worker_id,
2997                    G1ConcurrentMark* cm,
2998                    G1CMTaskQueue* task_queue,
2999                    G1CMTaskQueueSet* task_queues)
3000   : _g1h(G1CollectedHeap::heap()),
3001     _worker_id(worker_id), _cm(cm),
3002     _objArray_processor(this),
3003     _claimed(false),
3004     _nextMarkBitMap(NULL), _hash_seed(17),
3005     _task_queue(task_queue),
3006     _task_queues(task_queues),
3007     _cm_oop_closure(NULL) {
3008   guarantee(task_queue != NULL, "invariant");
3009   guarantee(task_queues != NULL, "invariant");
3010 
3011   _marking_step_diffs_ms.add(0.5);
3012 }
3013 
3014 // These are formatting macros that are used below to ensure
3015 // consistent formatting. The *_H_* versions are used to format the
3016 // header for a particular value and they should be kept consistent
3017 // with the corresponding macro. Also note that most of the macros add
3018 // the necessary white space (as a prefix) which makes them a bit
3019 // easier to compose.
3020 
3021 // All the output lines are prefixed with this string to be able to
3022 // identify them easily in a large log file.
3023 #define G1PPRL_LINE_PREFIX            "###"
3024 
3025 #define G1PPRL_ADDR_BASE_FORMAT    " " PTR_FORMAT "-" PTR_FORMAT
3026 #ifdef _LP64
3027 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
3028 #else // _LP64
3029 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
3030 #endif // _LP64
3031 
3032 // For per-region info
3033 #define G1PPRL_TYPE_FORMAT            "   %-4s"
3034 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
3035 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
3036 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
3037 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
3038 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
3039 
3040 // For summary info
3041 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
3042 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
3043 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
3044 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
3045 
3046 G1PrintRegionLivenessInfoClosure::
3047 G1PrintRegionLivenessInfoClosure(const char* phase_name)
3048   : _total_used_bytes(0), _total_capacity_bytes(0),
3049     _total_prev_live_bytes(0), _total_next_live_bytes(0),
3050     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
3051   G1CollectedHeap* g1h = G1CollectedHeap::heap();
3052   MemRegion g1_reserved = g1h->g1_reserved();
3053   double now = os::elapsedTime();
3054 
3055   // Print the header of the output.
3056   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
3057   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
3058                           G1PPRL_SUM_ADDR_FORMAT("reserved")
3059                           G1PPRL_SUM_BYTE_FORMAT("region-size"),
3060                           p2i(g1_reserved.start()), p2i(g1_reserved.end()),
3061                           HeapRegion::GrainBytes);
3062   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3063   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3064                           G1PPRL_TYPE_H_FORMAT
3065                           G1PPRL_ADDR_BASE_H_FORMAT
3066                           G1PPRL_BYTE_H_FORMAT
3067                           G1PPRL_BYTE_H_FORMAT
3068                           G1PPRL_BYTE_H_FORMAT
3069                           G1PPRL_DOUBLE_H_FORMAT
3070                           G1PPRL_BYTE_H_FORMAT
3071                           G1PPRL_BYTE_H_FORMAT,
3072                           "type", "address-range",
3073                           "used", "prev-live", "next-live", "gc-eff",
3074                           "remset", "code-roots");
3075   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3076                           G1PPRL_TYPE_H_FORMAT
3077                           G1PPRL_ADDR_BASE_H_FORMAT
3078                           G1PPRL_BYTE_H_FORMAT
3079                           G1PPRL_BYTE_H_FORMAT
3080                           G1PPRL_BYTE_H_FORMAT
3081                           G1PPRL_DOUBLE_H_FORMAT
3082                           G1PPRL_BYTE_H_FORMAT
3083                           G1PPRL_BYTE_H_FORMAT,
3084                           "", "",
3085                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
3086                           "(bytes)", "(bytes)");
3087 }
3088 
3089 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
3090   const char* type       = r->get_type_str();
3091   HeapWord* bottom       = r->bottom();
3092   HeapWord* end          = r->end();
3093   size_t capacity_bytes  = r->capacity();
3094   size_t used_bytes      = r->used();
3095   size_t prev_live_bytes = r->live_bytes();
3096   size_t next_live_bytes = r->next_live_bytes();
3097   double gc_eff          = r->gc_efficiency();
3098   size_t remset_bytes    = r->rem_set()->mem_size();
3099   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3100 
3101   _total_used_bytes      += used_bytes;
3102   _total_capacity_bytes  += capacity_bytes;
3103   _total_prev_live_bytes += prev_live_bytes;
3104   _total_next_live_bytes += next_live_bytes;
3105   _total_remset_bytes    += remset_bytes;
3106   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3107 
3108   // Print a line for this particular region.
3109   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3110                           G1PPRL_TYPE_FORMAT
3111                           G1PPRL_ADDR_BASE_FORMAT
3112                           G1PPRL_BYTE_FORMAT
3113                           G1PPRL_BYTE_FORMAT
3114                           G1PPRL_BYTE_FORMAT
3115                           G1PPRL_DOUBLE_FORMAT
3116                           G1PPRL_BYTE_FORMAT
3117                           G1PPRL_BYTE_FORMAT,
3118                           type, p2i(bottom), p2i(end),
3119                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3120                           remset_bytes, strong_code_roots_bytes);
3121 
3122   return false;
3123 }
3124 
3125 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3126   // add static memory usages to remembered set sizes
3127   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3128   // Print the footer of the output.
3129   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3130   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3131                          " SUMMARY"
3132                          G1PPRL_SUM_MB_FORMAT("capacity")
3133                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3134                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3135                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3136                          G1PPRL_SUM_MB_FORMAT("remset")
3137                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3138                          bytes_to_mb(_total_capacity_bytes),
3139                          bytes_to_mb(_total_used_bytes),
3140                          perc(_total_used_bytes, _total_capacity_bytes),
3141                          bytes_to_mb(_total_prev_live_bytes),
3142                          perc(_total_prev_live_bytes, _total_capacity_bytes),
3143                          bytes_to_mb(_total_next_live_bytes),
3144                          perc(_total_next_live_bytes, _total_capacity_bytes),
3145                          bytes_to_mb(_total_remset_bytes),
3146                          bytes_to_mb(_total_strong_code_roots_bytes));
3147 }