1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/concurrentMarkThread.inline.hpp"
  30 #include "gc/g1/g1CollectedHeap.inline.hpp"
  31 #include "gc/g1/g1CollectorState.hpp"
  32 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  33 #include "gc/g1/g1HeapVerifier.hpp"
  34 #include "gc/g1/g1OopClosures.inline.hpp"
  35 #include "gc/g1/g1CardLiveData.inline.hpp"
  36 #include "gc/g1/g1Policy.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/heapRegion.inline.hpp"
  39 #include "gc/g1/heapRegionRemSet.hpp"
  40 #include "gc/g1/heapRegionSet.inline.hpp"
  41 #include "gc/g1/suspendibleThreadSet.hpp"
  42 #include "gc/shared/gcId.hpp"
  43 #include "gc/shared/gcTimer.hpp"
  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.inline.hpp"
  46 #include "gc/shared/genOopClosures.inline.hpp"
  47 #include "gc/shared/referencePolicy.hpp"
  48 #include "gc/shared/strongRootsScope.hpp"
  49 #include "gc/shared/taskqueue.inline.hpp"
  50 #include "gc/shared/vmGCOperations.hpp"
  51 #include "logging/log.hpp"
  52 #include "memory/allocation.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "runtime/atomic.hpp"
  56 #include "runtime/handles.inline.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/prefetch.inline.hpp"
  59 #include "services/memTracker.hpp"
  60 #include "utilities/growableArray.hpp"
  61 
  62 // Concurrent marking bit map wrapper
  63 
  64 G1CMBitMapRO::G1CMBitMapRO(int shifter) :
  65   _bm(),
  66   _shifter(shifter) {
  67   _bmStartWord = 0;
  68   _bmWordSize = 0;
  69 }
  70 
  71 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  72                                                  const HeapWord* limit) const {
  73   // First we must round addr *up* to a possible object boundary.
  74   addr = (HeapWord*)align_size_up((intptr_t)addr,
  75                                   HeapWordSize << _shifter);
  76   size_t addrOffset = heapWordToOffset(addr);
  77   assert(limit != NULL, "limit must not be NULL");
  78   size_t limitOffset = heapWordToOffset(limit);
  79   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
  80   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
  81   assert(nextAddr >= addr, "get_next_one postcondition");
  82   assert(nextAddr == limit || isMarked(nextAddr),
  83          "get_next_one postcondition");
  84   return nextAddr;
  85 }
  86 
  87 #ifndef PRODUCT
  88 bool G1CMBitMapRO::covers(MemRegion heap_rs) const {
  89   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
  90   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
  91          "size inconsistency");
  92   return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
  93          _bmWordSize  == heap_rs.word_size();
  94 }
  95 #endif
  96 
  97 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
  98   _bm.print_on_error(st, prefix);
  99 }
 100 
 101 size_t G1CMBitMap::compute_size(size_t heap_size) {
 102   return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
 103 }
 104 
 105 size_t G1CMBitMap::mark_distance() {
 106   return MinObjAlignmentInBytes * BitsPerByte;
 107 }
 108 
 109 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
 110   _bmStartWord = heap.start();
 111   _bmWordSize = heap.word_size();
 112 
 113   _bm = BitMapView((BitMap::bm_word_t*) storage->reserved().start(), _bmWordSize >> _shifter);
 114 
 115   storage->set_mapping_changed_listener(&_listener);
 116 }
 117 
 118 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
 119   if (zero_filled) {
 120     return;
 121   }
 122   // We need to clear the bitmap on commit, removing any existing information.
 123   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
 124   _bm->clear_range(mr);
 125 }
 126 
 127 void G1CMBitMap::clear_range(MemRegion mr) {
 128   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 129   assert(!mr.is_empty(), "unexpected empty region");
 130   // convert address range into offset range
 131   _bm.at_put_range(heapWordToOffset(mr.start()),
 132                    heapWordToOffset(mr.end()), false);
 133 }
 134 
 135 G1CMMarkStack::G1CMMarkStack() :
 136   _max_chunk_capacity(0),
 137   _base(NULL),
 138   _chunk_capacity(0),
 139   _out_of_memory(false),
 140   _should_expand(false) {
 141   set_empty();
 142 }
 143 
 144 bool G1CMMarkStack::resize(size_t new_capacity) {
 145   assert(is_empty(), "Only resize when stack is empty.");
 146   assert(new_capacity <= _max_chunk_capacity,
 147          "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
 148 
 149   OopChunk* new_base = MmapArrayAllocator<OopChunk, mtGC>::allocate_or_null(new_capacity);
 150 
 151   if (new_base == NULL) {
 152     log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(OopChunk));
 153     return false;
 154   }
 155   // Release old mapping.
 156   if (_base != NULL) {
 157     MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity);
 158   }
 159 
 160   _base = new_base;
 161   _chunk_capacity = new_capacity;
 162   set_empty();
 163   _should_expand = false;
 164 
 165   return true;
 166 }
 167 
 168 size_t G1CMMarkStack::capacity_alignment() {
 169   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(OopChunk)) / sizeof(void*);
 170 }
 171 
 172 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 173   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 174 
 175   size_t const OopChunkSizeInVoidStar = sizeof(OopChunk) / sizeof(void*);
 176   
 177   _max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / OopChunkSizeInVoidStar;
 178   size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / OopChunkSizeInVoidStar;
 179 
 180   guarantee(initial_chunk_capacity <= _max_chunk_capacity,
 181             "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
 182             _max_chunk_capacity,
 183             initial_chunk_capacity);
 184 
 185   log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
 186                 initial_chunk_capacity, _max_chunk_capacity);
 187 
 188   return resize(initial_chunk_capacity);
 189 }
 190 
 191 void G1CMMarkStack::expand() {
 192   // Clear expansion flag
 193   _should_expand = false;
 194 
 195   if (_chunk_capacity == _max_chunk_capacity) {
 196     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 197     return;
 198   }
 199   size_t old_capacity = _chunk_capacity;
 200   // Double capacity if possible
 201   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
 202 
 203   if (resize(new_capacity)) {
 204     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 205                   old_capacity, new_capacity);
 206   } else {
 207     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 208                     old_capacity, new_capacity);
 209   }
 210 }
 211 
 212 G1CMMarkStack::~G1CMMarkStack() {
 213   if (_base != NULL) {
 214     MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity);
 215   }
 216 }
 217 
 218 void G1CMMarkStack::add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem) {
 219   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 220   elem->next = *list;
 221   *list = elem;
 222 }
 223 
 224 G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) {
 225   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 226 
 227   OopChunk* result = *list;
 228   if (result != NULL) {
 229     *list = (*list)->next;
 230   }
 231   return result;
 232 }
 233 
 234 G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() {
 235   // This dirty read is okay because we only ever increase the _hwm in parallel code.
 236   if (_hwm >= _chunk_capacity) {
 237     return NULL;
 238   }
 239 
 240   size_t cur_idx = Atomic::add(1, &_hwm) - 1;
 241   if (cur_idx >= _chunk_capacity) {
 242     return NULL;
 243   }
 244 
 245   OopChunk* result = ::new (&_base[cur_idx]) OopChunk;
 246   result->next = NULL;
 247   return result;
 248 }
 249 
 250 void G1CMMarkStack::par_push_chunk(oop* ptr_arr) {
 251   // Get a new chunk.
 252   OopChunk* new_chunk = remove_chunk_from_list(&_free_list);
 253 
 254   if (new_chunk == NULL) {
 255     // Did not get a chunk from the free list. Allocate from backing memory.
 256     new_chunk = allocate_new_chunk();
 257   }
 258 
 259   if (new_chunk == NULL) {
 260     _out_of_memory = true;
 261     return;
 262   }
 263 
 264   for (size_t i = 0; i < OopsPerChunk; i++) {
 265     new_chunk->data[i] = ptr_arr[i];
 266 }
 267 
 268   add_chunk_to_list(&_chunk_list, new_chunk);
 269   Atomic::inc(&_chunks_in_chunk_list);
 270 }
 271 
 272 bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) {
 273   OopChunk* cur = remove_chunk_from_list(&_chunk_list);
 274 
 275   if (cur == NULL) {
 276     return false;
 277   }
 278 
 279   Atomic::dec(&_chunks_in_chunk_list);
 280 
 281   for (size_t i = 0; i < OopsPerChunk; i++) {
 282     ptr_arr[i] = (oop)cur->data[i];
 283   }
 284 
 285   add_chunk_to_list(&_free_list, cur);
 286   return true;
 287 }
 288 
 289 void G1CMMarkStack::set_empty() {
 290   _chunks_in_chunk_list = 0;
 291   _hwm = 0;
 292   clear_out_of_memory();
 293   _chunk_list = NULL;
 294   _free_list = NULL;
 295 }
 296 
 297 G1CMRootRegions::G1CMRootRegions() :
 298   _cm(NULL), _scan_in_progress(false),
 299   _should_abort(false), _claimed_survivor_index(0) { }
 300 
 301 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
 302   _survivors = survivors;
 303   _cm = cm;
 304 }
 305 
 306 void G1CMRootRegions::prepare_for_scan() {
 307   assert(!scan_in_progress(), "pre-condition");
 308 
 309   // Currently, only survivors can be root regions.
 310   _claimed_survivor_index = 0;
 311   _scan_in_progress = _survivors->regions()->is_nonempty();
 312   _should_abort = false;
 313 }
 314 
 315 HeapRegion* G1CMRootRegions::claim_next() {
 316   if (_should_abort) {
 317     // If someone has set the should_abort flag, we return NULL to
 318     // force the caller to bail out of their loop.
 319     return NULL;
 320   }
 321 
 322   // Currently, only survivors can be root regions.
 323   const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions();
 324 
 325   int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1;
 326   if (claimed_index < survivor_regions->length()) {
 327     return survivor_regions->at(claimed_index);
 328   }
 329   return NULL;
 330 }
 331 
 332 uint G1CMRootRegions::num_root_regions() const {
 333   return (uint)_survivors->regions()->length();
 334 }
 335 
 336 void G1CMRootRegions::notify_scan_done() {
 337   MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 338   _scan_in_progress = false;
 339   RootRegionScan_lock->notify_all();
 340 }
 341 
 342 void G1CMRootRegions::cancel_scan() {
 343   notify_scan_done();
 344 }
 345 
 346 void G1CMRootRegions::scan_finished() {
 347   assert(scan_in_progress(), "pre-condition");
 348 
 349   // Currently, only survivors can be root regions.
 350   if (!_should_abort) {
 351     assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index);
 352     assert((uint)_claimed_survivor_index >= _survivors->length(),
 353            "we should have claimed all survivors, claimed index = %u, length = %u",
 354            (uint)_claimed_survivor_index, _survivors->length());
 355   }
 356 
 357   notify_scan_done();
 358 }
 359 
 360 bool G1CMRootRegions::wait_until_scan_finished() {
 361   if (!scan_in_progress()) return false;
 362 
 363   {
 364     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
 365     while (scan_in_progress()) {
 366       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
 367     }
 368   }
 369   return true;
 370 }
 371 
 372 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
 373   return MAX2((n_par_threads + 2) / 4, 1U);
 374 }
 375 
 376 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
 377   _g1h(g1h),
 378   _markBitMap1(),
 379   _markBitMap2(),
 380   _parallel_marking_threads(0),
 381   _max_parallel_marking_threads(0),
 382   _sleep_factor(0.0),
 383   _marking_task_overhead(1.0),
 384   _cleanup_list("Cleanup List"),
 385 
 386   _prevMarkBitMap(&_markBitMap1),
 387   _nextMarkBitMap(&_markBitMap2),
 388 
 389   _global_mark_stack(),
 390   // _finger set in set_non_marking_state
 391 
 392   _max_worker_id(ParallelGCThreads),
 393   // _active_tasks set in set_non_marking_state
 394   // _tasks set inside the constructor
 395   _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)),
 396   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
 397 
 398   _has_overflown(false),
 399   _concurrent(false),
 400   _has_aborted(false),
 401   _restart_for_overflow(false),
 402   _concurrent_marking_in_progress(false),
 403   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 404   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
 405 
 406   // _verbose_level set below
 407 
 408   _init_times(),
 409   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
 410   _cleanup_times(),
 411   _total_counting_time(0.0),
 412   _total_rs_scrub_time(0.0),
 413 
 414   _parallel_workers(NULL),
 415 
 416   _completed_initialization(false) {
 417 
 418   _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 419   _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 420 
 421   // Create & start a ConcurrentMark thread.
 422   _cmThread = new ConcurrentMarkThread(this);
 423   assert(cmThread() != NULL, "CM Thread should have been created");
 424   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
 425   if (_cmThread->osthread() == NULL) {
 426       vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 427   }
 428 
 429   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 430   assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
 431   assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
 432 
 433   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 434   satb_qs.set_buffer_size(G1SATBBufferSize);
 435 
 436   _root_regions.init(_g1h->survivor(), this);
 437 
 438   if (ConcGCThreads > ParallelGCThreads) {
 439     log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).",
 440                     ConcGCThreads, ParallelGCThreads);
 441     return;
 442   }
 443   if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
 444     // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
 445     // if both are set
 446     _sleep_factor             = 0.0;
 447     _marking_task_overhead    = 1.0;
 448   } else if (G1MarkingOverheadPercent > 0) {
 449     // We will calculate the number of parallel marking threads based
 450     // on a target overhead with respect to the soft real-time goal
 451     double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
 452     double overall_cm_overhead =
 453       (double) MaxGCPauseMillis * marking_overhead /
 454       (double) GCPauseIntervalMillis;
 455     double cpu_ratio = 1.0 / os::initial_active_processor_count();
 456     double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
 457     double marking_task_overhead =
 458       overall_cm_overhead / marking_thread_num * os::initial_active_processor_count();
 459     double sleep_factor =
 460                        (1.0 - marking_task_overhead) / marking_task_overhead;
 461 
 462     FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num);
 463     _sleep_factor             = sleep_factor;
 464     _marking_task_overhead    = marking_task_overhead;
 465   } else {
 466     // Calculate the number of parallel marking threads by scaling
 467     // the number of parallel GC threads.
 468     uint marking_thread_num = scale_parallel_threads(ParallelGCThreads);
 469     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 470     _sleep_factor             = 0.0;
 471     _marking_task_overhead    = 1.0;
 472   }
 473 
 474   assert(ConcGCThreads > 0, "Should have been set");
 475   log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
 476   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 477   _parallel_marking_threads = ConcGCThreads;
 478   _max_parallel_marking_threads = _parallel_marking_threads;
 479 
 480   _parallel_workers = new WorkGang("G1 Marker",
 481        _max_parallel_marking_threads, false, true);
 482   if (_parallel_workers == NULL) {
 483     vm_exit_during_initialization("Failed necessary allocation.");
 484   } else {
 485     _parallel_workers->initialize_workers();
 486   }
 487 
 488   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 489     size_t mark_stack_size =
 490       MIN2(MarkStackSizeMax,
 491           MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 492     // Verify that the calculated value for MarkStackSize is in range.
 493     // It would be nice to use the private utility routine from Arguments.
 494     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 495       log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 496                       "must be between 1 and " SIZE_FORMAT,
 497                       mark_stack_size, MarkStackSizeMax);
 498       return;
 499     }
 500     FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
 501   } else {
 502     // Verify MarkStackSize is in range.
 503     if (FLAG_IS_CMDLINE(MarkStackSize)) {
 504       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
 505         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 506           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
 507                           "must be between 1 and " SIZE_FORMAT,
 508                           MarkStackSize, MarkStackSizeMax);
 509           return;
 510         }
 511       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
 512         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
 513           log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
 514                           " or for MarkStackSizeMax (" SIZE_FORMAT ")",
 515                           MarkStackSize, MarkStackSizeMax);
 516           return;
 517         }
 518       }
 519     }
 520   }
 521 
 522   if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
 523     vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
 524   }
 525 
 526   _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC);
 527   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
 528 
 529   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
 530   _active_tasks = _max_worker_id;
 531 
 532   for (uint i = 0; i < _max_worker_id; ++i) {
 533     G1CMTaskQueue* task_queue = new G1CMTaskQueue();
 534     task_queue->initialize();
 535     _task_queues->register_queue(i, task_queue);
 536 
 537     _tasks[i] = new G1CMTask(i, this, task_queue, _task_queues);
 538 
 539     _accum_task_vtime[i] = 0.0;
 540   }
 541 
 542   // so that the call below can read a sensible value
 543   _heap_start = g1h->reserved_region().start();
 544   set_non_marking_state();
 545   _completed_initialization = true;
 546 }
 547 
 548 void G1ConcurrentMark::reset() {
 549   // Starting values for these two. This should be called in a STW
 550   // phase.
 551   MemRegion reserved = _g1h->g1_reserved();
 552   _heap_start = reserved.start();
 553   _heap_end   = reserved.end();
 554 
 555   // Separated the asserts so that we know which one fires.
 556   assert(_heap_start != NULL, "heap bounds should look ok");
 557   assert(_heap_end != NULL, "heap bounds should look ok");
 558   assert(_heap_start < _heap_end, "heap bounds should look ok");
 559 
 560   // Reset all the marking data structures and any necessary flags
 561   reset_marking_state();
 562 
 563   // We do reset all of them, since different phases will use
 564   // different number of active threads. So, it's easiest to have all
 565   // of them ready.
 566   for (uint i = 0; i < _max_worker_id; ++i) {
 567     _tasks[i]->reset(_nextMarkBitMap);
 568   }
 569 
 570   // we need this to make sure that the flag is on during the evac
 571   // pause with initial mark piggy-backed
 572   set_concurrent_marking_in_progress();
 573 }
 574 
 575 
 576 void G1ConcurrentMark::reset_marking_state(bool clear_overflow) {
 577   _global_mark_stack.set_should_expand(has_overflown());
 578   _global_mark_stack.set_empty();        // Also clears the overflow stack's overflow flag
 579   if (clear_overflow) {
 580     clear_has_overflown();
 581   } else {
 582     assert(has_overflown(), "pre-condition");
 583   }
 584   _finger = _heap_start;
 585 
 586   for (uint i = 0; i < _max_worker_id; ++i) {
 587     G1CMTaskQueue* queue = _task_queues->queue(i);
 588     queue->set_empty();
 589   }
 590 }
 591 
 592 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
 593   assert(active_tasks <= _max_worker_id, "we should not have more");
 594 
 595   _active_tasks = active_tasks;
 596   // Need to update the three data structures below according to the
 597   // number of active threads for this phase.
 598   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
 599   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
 600   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 601 }
 602 
 603 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
 604   set_concurrency(active_tasks);
 605 
 606   _concurrent = concurrent;
 607   // We propagate this to all tasks, not just the active ones.
 608   for (uint i = 0; i < _max_worker_id; ++i)
 609     _tasks[i]->set_concurrent(concurrent);
 610 
 611   if (concurrent) {
 612     set_concurrent_marking_in_progress();
 613   } else {
 614     // We currently assume that the concurrent flag has been set to
 615     // false before we start remark. At this point we should also be
 616     // in a STW phase.
 617     assert(!concurrent_marking_in_progress(), "invariant");
 618     assert(out_of_regions(),
 619            "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
 620            p2i(_finger), p2i(_heap_end));
 621   }
 622 }
 623 
 624 void G1ConcurrentMark::set_non_marking_state() {
 625   // We set the global marking state to some default values when we're
 626   // not doing marking.
 627   reset_marking_state();
 628   _active_tasks = 0;
 629   clear_concurrent_marking_in_progress();
 630 }
 631 
 632 G1ConcurrentMark::~G1ConcurrentMark() {
 633   // The G1ConcurrentMark instance is never freed.
 634   ShouldNotReachHere();
 635 }
 636 
 637 class G1ClearBitMapTask : public AbstractGangTask {
 638 public:
 639   static size_t chunk_size() { return M; }
 640 
 641 private:
 642   // Heap region closure used for clearing the given mark bitmap.
 643   class G1ClearBitmapHRClosure : public HeapRegionClosure {
 644   private:
 645     G1CMBitMap* _bitmap;
 646     G1ConcurrentMark* _cm;
 647   public:
 648     G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) {
 649     }
 650 
 651     virtual bool doHeapRegion(HeapRegion* r) {
 652       size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
 653 
 654       HeapWord* cur = r->bottom();
 655       HeapWord* const end = r->end();
 656 
 657       while (cur < end) {
 658         MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
 659         _bitmap->clear_range(mr);
 660 
 661         cur += chunk_size_in_words;
 662 
 663         // Abort iteration if after yielding the marking has been aborted.
 664         if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
 665           return true;
 666         }
 667         // Repeat the asserts from before the start of the closure. We will do them
 668         // as asserts here to minimize their overhead on the product. However, we
 669         // will have them as guarantees at the beginning / end of the bitmap
 670         // clearing to get some checking in the product.
 671         assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant");
 672         assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
 673       }
 674       assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
 675 
 676       return false;
 677     }
 678   };
 679 
 680   G1ClearBitmapHRClosure _cl;
 681   HeapRegionClaimer _hr_claimer;
 682   bool _suspendible; // If the task is suspendible, workers must join the STS.
 683 
 684 public:
 685   G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
 686     AbstractGangTask("G1 Clear Bitmap"),
 687     _cl(bitmap, suspendible ? cm : NULL),
 688     _hr_claimer(n_workers),
 689     _suspendible(suspendible)
 690   { }
 691 
 692   void work(uint worker_id) {
 693     SuspendibleThreadSetJoiner sts_join(_suspendible);
 694     G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true);
 695   }
 696 
 697   bool is_complete() {
 698     return _cl.complete();
 699   }
 700 };
 701 
 702 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
 703   assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
 704 
 705   size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
 706   size_t const num_chunks = align_size_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
 707 
 708   uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
 709 
 710   G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
 711 
 712   log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
 713   workers->run_task(&cl, num_workers);
 714   guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
 715 }
 716 
 717 void G1ConcurrentMark::cleanup_for_next_mark() {
 718   // Make sure that the concurrent mark thread looks to still be in
 719   // the current cycle.
 720   guarantee(cmThread()->during_cycle(), "invariant");
 721 
 722   // We are finishing up the current cycle by clearing the next
 723   // marking bitmap and getting it ready for the next cycle. During
 724   // this time no other cycle can start. So, let's make sure that this
 725   // is the case.
 726   guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
 727 
 728   clear_bitmap(_nextMarkBitMap, _parallel_workers, true);
 729 
 730   // Clear the live count data. If the marking has been aborted, the abort()
 731   // call already did that.
 732   if (!has_aborted()) {
 733     clear_live_data(_parallel_workers);
 734     DEBUG_ONLY(verify_live_data_clear());
 735   }
 736 
 737   // Repeat the asserts from above.
 738   guarantee(cmThread()->during_cycle(), "invariant");
 739   guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
 740 }
 741 
 742 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
 743   assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint.");
 744   clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false);
 745 }
 746 
 747 class CheckBitmapClearHRClosure : public HeapRegionClosure {
 748   G1CMBitMap* _bitmap;
 749   bool _error;
 750  public:
 751   CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) {
 752   }
 753 
 754   virtual bool doHeapRegion(HeapRegion* r) {
 755     // This closure can be called concurrently to the mutator, so we must make sure
 756     // that the result of the getNextMarkedWordAddress() call is compared to the
 757     // value passed to it as limit to detect any found bits.
 758     // end never changes in G1.
 759     HeapWord* end = r->end();
 760     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
 761   }
 762 };
 763 
 764 bool G1ConcurrentMark::nextMarkBitmapIsClear() {
 765   CheckBitmapClearHRClosure cl(_nextMarkBitMap);
 766   _g1h->heap_region_iterate(&cl);
 767   return cl.complete();
 768 }
 769 
 770 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 771 public:
 772   bool doHeapRegion(HeapRegion* r) {
 773     r->note_start_of_marking();
 774     return false;
 775   }
 776 };
 777 
 778 void G1ConcurrentMark::checkpointRootsInitialPre() {
 779   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 780   G1Policy* g1p = g1h->g1_policy();
 781 
 782   _has_aborted = false;
 783 
 784   // Initialize marking structures. This has to be done in a STW phase.
 785   reset();
 786 
 787   // For each region note start of marking.
 788   NoteStartOfMarkHRClosure startcl;
 789   g1h->heap_region_iterate(&startcl);
 790 }
 791 
 792 
 793 void G1ConcurrentMark::checkpointRootsInitialPost() {
 794   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 795 
 796   // Start Concurrent Marking weak-reference discovery.
 797   ReferenceProcessor* rp = g1h->ref_processor_cm();
 798   // enable ("weak") refs discovery
 799   rp->enable_discovery();
 800   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 801 
 802   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 803   // This is the start of  the marking cycle, we're expected all
 804   // threads to have SATB queues with active set to false.
 805   satb_mq_set.set_active_all_threads(true, /* new active value */
 806                                      false /* expected_active */);
 807 
 808   _root_regions.prepare_for_scan();
 809 
 810   // update_g1_committed() will be called at the end of an evac pause
 811   // when marking is on. So, it's also called at the end of the
 812   // initial-mark pause to update the heap end, if the heap expands
 813   // during it. No need to call it here.
 814 }
 815 
 816 /*
 817  * Notice that in the next two methods, we actually leave the STS
 818  * during the barrier sync and join it immediately afterwards. If we
 819  * do not do this, the following deadlock can occur: one thread could
 820  * be in the barrier sync code, waiting for the other thread to also
 821  * sync up, whereas another one could be trying to yield, while also
 822  * waiting for the other threads to sync up too.
 823  *
 824  * Note, however, that this code is also used during remark and in
 825  * this case we should not attempt to leave / enter the STS, otherwise
 826  * we'll either hit an assert (debug / fastdebug) or deadlock
 827  * (product). So we should only leave / enter the STS if we are
 828  * operating concurrently.
 829  *
 830  * Because the thread that does the sync barrier has left the STS, it
 831  * is possible to be suspended for a Full GC or an evacuation pause
 832  * could occur. This is actually safe, since the entering the sync
 833  * barrier is one of the last things do_marking_step() does, and it
 834  * doesn't manipulate any data structures afterwards.
 835  */
 836 
 837 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
 838   bool barrier_aborted;
 839   {
 840     SuspendibleThreadSetLeaver sts_leave(concurrent());
 841     barrier_aborted = !_first_overflow_barrier_sync.enter();
 842   }
 843 
 844   // at this point everyone should have synced up and not be doing any
 845   // more work
 846 
 847   if (barrier_aborted) {
 848     // If the barrier aborted we ignore the overflow condition and
 849     // just abort the whole marking phase as quickly as possible.
 850     return;
 851   }
 852 
 853   // If we're executing the concurrent phase of marking, reset the marking
 854   // state; otherwise the marking state is reset after reference processing,
 855   // during the remark pause.
 856   // If we reset here as a result of an overflow during the remark we will
 857   // see assertion failures from any subsequent set_concurrency_and_phase()
 858   // calls.
 859   if (concurrent()) {
 860     // let the task associated with with worker 0 do this
 861     if (worker_id == 0) {
 862       // task 0 is responsible for clearing the global data structures
 863       // We should be here because of an overflow. During STW we should
 864       // not clear the overflow flag since we rely on it being true when
 865       // we exit this method to abort the pause and restart concurrent
 866       // marking.
 867       reset_marking_state(true /* clear_overflow */);
 868 
 869       log_info(gc, marking)("Concurrent Mark reset for overflow");
 870     }
 871   }
 872 
 873   // after this, each task should reset its own data structures then
 874   // then go into the second barrier
 875 }
 876 
 877 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 878   SuspendibleThreadSetLeaver sts_leave(concurrent());
 879   _second_overflow_barrier_sync.enter();
 880 
 881   // at this point everything should be re-initialized and ready to go
 882 }
 883 
 884 class G1CMConcurrentMarkingTask: public AbstractGangTask {
 885 private:
 886   G1ConcurrentMark*     _cm;
 887   ConcurrentMarkThread* _cmt;
 888 
 889 public:
 890   void work(uint worker_id) {
 891     assert(Thread::current()->is_ConcurrentGC_thread(),
 892            "this should only be done by a conc GC thread");
 893     ResourceMark rm;
 894 
 895     double start_vtime = os::elapsedVTime();
 896 
 897     {
 898       SuspendibleThreadSetJoiner sts_join;
 899 
 900       assert(worker_id < _cm->active_tasks(), "invariant");
 901       G1CMTask* the_task = _cm->task(worker_id);
 902       the_task->record_start_time();
 903       if (!_cm->has_aborted()) {
 904         do {
 905           double start_vtime_sec = os::elapsedVTime();
 906           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
 907 
 908           the_task->do_marking_step(mark_step_duration_ms,
 909                                     true  /* do_termination */,
 910                                     false /* is_serial*/);
 911 
 912           double end_vtime_sec = os::elapsedVTime();
 913           double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
 914           _cm->clear_has_overflown();
 915 
 916           _cm->do_yield_check();
 917 
 918           jlong sleep_time_ms;
 919           if (!_cm->has_aborted() && the_task->has_aborted()) {
 920             sleep_time_ms =
 921               (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
 922             {
 923               SuspendibleThreadSetLeaver sts_leave;
 924               os::sleep(Thread::current(), sleep_time_ms, false);
 925             }
 926           }
 927         } while (!_cm->has_aborted() && the_task->has_aborted());
 928       }
 929       the_task->record_end_time();
 930       guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
 931     }
 932 
 933     double end_vtime = os::elapsedVTime();
 934     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
 935   }
 936 
 937   G1CMConcurrentMarkingTask(G1ConcurrentMark* cm,
 938                             ConcurrentMarkThread* cmt) :
 939       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
 940 
 941   ~G1CMConcurrentMarkingTask() { }
 942 };
 943 
 944 // Calculates the number of active workers for a concurrent
 945 // phase.
 946 uint G1ConcurrentMark::calc_parallel_marking_threads() {
 947   uint n_conc_workers = 0;
 948   if (!UseDynamicNumberOfGCThreads ||
 949       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 950        !ForceDynamicNumberOfGCThreads)) {
 951     n_conc_workers = max_parallel_marking_threads();
 952   } else {
 953     n_conc_workers =
 954       AdaptiveSizePolicy::calc_default_active_workers(max_parallel_marking_threads(),
 955                                                       1, /* Minimum workers */
 956                                                       parallel_marking_threads(),
 957                                                       Threads::number_of_non_daemon_threads());
 958     // Don't scale down "n_conc_workers" by scale_parallel_threads() because
 959     // that scaling has already gone into "_max_parallel_marking_threads".
 960   }
 961   assert(n_conc_workers > 0 && n_conc_workers <= max_parallel_marking_threads(),
 962          "Calculated number of workers must be larger than zero and at most the maximum %u, but is %u",
 963          max_parallel_marking_threads(), n_conc_workers);
 964   return n_conc_workers;
 965 }
 966 
 967 void G1ConcurrentMark::scanRootRegion(HeapRegion* hr) {
 968   // Currently, only survivors can be root regions.
 969   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
 970   G1RootRegionScanClosure cl(_g1h, this);
 971 
 972   const uintx interval = PrefetchScanIntervalInBytes;
 973   HeapWord* curr = hr->bottom();
 974   const HeapWord* end = hr->top();
 975   while (curr < end) {
 976     Prefetch::read(curr, interval);
 977     oop obj = oop(curr);
 978     int size = obj->oop_iterate_size(&cl);
 979     assert(size == obj->size(), "sanity");
 980     curr += size;
 981   }
 982 }
 983 
 984 class G1CMRootRegionScanTask : public AbstractGangTask {
 985 private:
 986   G1ConcurrentMark* _cm;
 987 
 988 public:
 989   G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
 990     AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
 991 
 992   void work(uint worker_id) {
 993     assert(Thread::current()->is_ConcurrentGC_thread(),
 994            "this should only be done by a conc GC thread");
 995 
 996     G1CMRootRegions* root_regions = _cm->root_regions();
 997     HeapRegion* hr = root_regions->claim_next();
 998     while (hr != NULL) {
 999       _cm->scanRootRegion(hr);
1000       hr = root_regions->claim_next();
1001     }
1002   }
1003 };
1004 
1005 void G1ConcurrentMark::scan_root_regions() {
1006   // scan_in_progress() will have been set to true only if there was
1007   // at least one root region to scan. So, if it's false, we
1008   // should not attempt to do any further work.
1009   if (root_regions()->scan_in_progress()) {
1010     assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
1011 
1012     _parallel_marking_threads = MIN2(calc_parallel_marking_threads(),
1013                                      // We distribute work on a per-region basis, so starting
1014                                      // more threads than that is useless.
1015                                      root_regions()->num_root_regions());
1016     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1017            "Maximum number of marking threads exceeded");
1018 
1019     G1CMRootRegionScanTask task(this);
1020     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
1021                         task.name(), _parallel_marking_threads, root_regions()->num_root_regions());
1022     _parallel_workers->run_task(&task, _parallel_marking_threads);
1023 
1024     // It's possible that has_aborted() is true here without actually
1025     // aborting the survivor scan earlier. This is OK as it's
1026     // mainly used for sanity checking.
1027     root_regions()->scan_finished();
1028   }
1029 }
1030 
1031 void G1ConcurrentMark::concurrent_cycle_start() {
1032   _gc_timer_cm->register_gc_start();
1033 
1034   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
1035 
1036   _g1h->trace_heap_before_gc(_gc_tracer_cm);
1037 }
1038 
1039 void G1ConcurrentMark::concurrent_cycle_end() {
1040   _g1h->trace_heap_after_gc(_gc_tracer_cm);
1041 
1042   if (has_aborted()) {
1043     _gc_tracer_cm->report_concurrent_mode_failure();
1044   }
1045 
1046   _gc_timer_cm->register_gc_end();
1047 
1048   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1049 }
1050 
1051 void G1ConcurrentMark::mark_from_roots() {
1052   // we might be tempted to assert that:
1053   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1054   //        "inconsistent argument?");
1055   // However that wouldn't be right, because it's possible that
1056   // a safepoint is indeed in progress as a younger generation
1057   // stop-the-world GC happens even as we mark in this generation.
1058 
1059   _restart_for_overflow = false;
1060 
1061   // _g1h has _n_par_threads
1062   _parallel_marking_threads = calc_parallel_marking_threads();
1063   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1064     "Maximum number of marking threads exceeded");
1065 
1066   uint active_workers = MAX2(1U, parallel_marking_threads());
1067   assert(active_workers > 0, "Should have been set");
1068 
1069   // Setting active workers is not guaranteed since fewer
1070   // worker threads may currently exist and more may not be
1071   // available.
1072   active_workers = _parallel_workers->update_active_workers(active_workers);
1073   // Parallel task terminator is set in "set_concurrency_and_phase()"
1074   set_concurrency_and_phase(active_workers, true /* concurrent */);
1075 
1076   G1CMConcurrentMarkingTask markingTask(this, cmThread());
1077   _parallel_workers->run_task(&markingTask);
1078   print_stats();
1079 }
1080 
1081 void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1082   // world is stopped at this checkpoint
1083   assert(SafepointSynchronize::is_at_safepoint(),
1084          "world should be stopped");
1085 
1086   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1087 
1088   // If a full collection has happened, we shouldn't do this.
1089   if (has_aborted()) {
1090     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1091     return;
1092   }
1093 
1094   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1095 
1096   if (VerifyDuringGC) {
1097     HandleMark hm;  // handle scope
1098     g1h->prepare_for_verify();
1099     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1100   }
1101   g1h->verifier()->check_bitmaps("Remark Start");
1102 
1103   G1Policy* g1p = g1h->g1_policy();
1104   g1p->record_concurrent_mark_remark_start();
1105 
1106   double start = os::elapsedTime();
1107 
1108   checkpointRootsFinalWork();
1109 
1110   double mark_work_end = os::elapsedTime();
1111 
1112   weakRefsWork(clear_all_soft_refs);
1113 
1114   if (has_overflown()) {
1115     // We overflowed.  Restart concurrent marking.
1116     _restart_for_overflow = true;
1117 
1118     // Verify the heap w.r.t. the previous marking bitmap.
1119     if (VerifyDuringGC) {
1120       HandleMark hm;  // handle scope
1121       g1h->prepare_for_verify();
1122       Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
1123     }
1124 
1125     // Clear the marking state because we will be restarting
1126     // marking due to overflowing the global mark stack.
1127     reset_marking_state();
1128   } else {
1129     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1130     // We're done with marking.
1131     // This is the end of  the marking cycle, we're expected all
1132     // threads to have SATB queues with active set to true.
1133     satb_mq_set.set_active_all_threads(false, /* new active value */
1134                                        true /* expected_active */);
1135 
1136     if (VerifyDuringGC) {
1137       HandleMark hm;  // handle scope
1138       g1h->prepare_for_verify();
1139       Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");
1140     }
1141     g1h->verifier()->check_bitmaps("Remark End");
1142     assert(!restart_for_overflow(), "sanity");
1143     // Completely reset the marking state since marking completed
1144     set_non_marking_state();
1145   }
1146 
1147   // Expand the marking stack, if we have to and if we can.
1148   if (_global_mark_stack.should_expand()) {
1149     _global_mark_stack.expand();
1150   }
1151 
1152   // Statistics
1153   double now = os::elapsedTime();
1154   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1155   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1156   _remark_times.add((now - start) * 1000.0);
1157 
1158   g1p->record_concurrent_mark_remark_end();
1159 
1160   G1CMIsAliveClosure is_alive(g1h);
1161   _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1162 }
1163 
1164 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1165   G1CollectedHeap* _g1;
1166   size_t _freed_bytes;
1167   FreeRegionList* _local_cleanup_list;
1168   uint _old_regions_removed;
1169   uint _humongous_regions_removed;
1170   HRRSCleanupTask* _hrrs_cleanup_task;
1171 
1172 public:
1173   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1174                              FreeRegionList* local_cleanup_list,
1175                              HRRSCleanupTask* hrrs_cleanup_task) :
1176     _g1(g1),
1177     _freed_bytes(0),
1178     _local_cleanup_list(local_cleanup_list),
1179     _old_regions_removed(0),
1180     _humongous_regions_removed(0),
1181     _hrrs_cleanup_task(hrrs_cleanup_task) { }
1182 
1183   size_t freed_bytes() { return _freed_bytes; }
1184   const uint old_regions_removed() { return _old_regions_removed; }
1185   const uint humongous_regions_removed() { return _humongous_regions_removed; }
1186 
1187   bool doHeapRegion(HeapRegion *hr) {
1188     if (hr->is_archive()) {
1189       return false;
1190     }
1191     _g1->reset_gc_time_stamps(hr);
1192     hr->note_end_of_marking();
1193 
1194     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1195       _freed_bytes += hr->used();
1196       hr->set_containing_set(NULL);
1197       if (hr->is_humongous()) {
1198         _humongous_regions_removed++;
1199         _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */);
1200       } else {
1201         _old_regions_removed++;
1202         _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */);
1203       }
1204     } else {
1205       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1206     }
1207 
1208     return false;
1209   }
1210 };
1211 
1212 class G1ParNoteEndTask: public AbstractGangTask {
1213   friend class G1NoteEndOfConcMarkClosure;
1214 
1215 protected:
1216   G1CollectedHeap* _g1h;
1217   FreeRegionList* _cleanup_list;
1218   HeapRegionClaimer _hrclaimer;
1219 
1220 public:
1221   G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1222       AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1223   }
1224 
1225   void work(uint worker_id) {
1226     FreeRegionList local_cleanup_list("Local Cleanup List");
1227     HRRSCleanupTask hrrs_cleanup_task;
1228     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1229                                            &hrrs_cleanup_task);
1230     _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);
1231     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1232 
1233     // Now update the lists
1234     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1235     {
1236       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1237       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1238 
1239       // If we iterate over the global cleanup list at the end of
1240       // cleanup to do this printing we will not guarantee to only
1241       // generate output for the newly-reclaimed regions (the list
1242       // might not be empty at the beginning of cleanup; we might
1243       // still be working on its previous contents). So we do the
1244       // printing here, before we append the new regions to the global
1245       // cleanup list.
1246 
1247       G1HRPrinter* hr_printer = _g1h->hr_printer();
1248       if (hr_printer->is_active()) {
1249         FreeRegionListIterator iter(&local_cleanup_list);
1250         while (iter.more_available()) {
1251           HeapRegion* hr = iter.get_next();
1252           hr_printer->cleanup(hr);
1253         }
1254       }
1255 
1256       _cleanup_list->add_ordered(&local_cleanup_list);
1257       assert(local_cleanup_list.is_empty(), "post-condition");
1258 
1259       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1260     }
1261   }
1262 };
1263 
1264 void G1ConcurrentMark::cleanup() {
1265   // world is stopped at this checkpoint
1266   assert(SafepointSynchronize::is_at_safepoint(),
1267          "world should be stopped");
1268   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1269 
1270   // If a full collection has happened, we shouldn't do this.
1271   if (has_aborted()) {
1272     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1273     return;
1274   }
1275 
1276   g1h->verifier()->verify_region_sets_optional();
1277 
1278   if (VerifyDuringGC) {
1279     HandleMark hm;  // handle scope
1280     g1h->prepare_for_verify();
1281     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1282   }
1283   g1h->verifier()->check_bitmaps("Cleanup Start");
1284 
1285   G1Policy* g1p = g1h->g1_policy();
1286   g1p->record_concurrent_mark_cleanup_start();
1287 
1288   double start = os::elapsedTime();
1289 
1290   HeapRegionRemSet::reset_for_cleanup_tasks();
1291 
1292   {
1293     GCTraceTime(Debug, gc)("Finalize Live Data");
1294     finalize_live_data();
1295   }
1296 
1297   if (VerifyDuringGC) {
1298     GCTraceTime(Debug, gc)("Verify Live Data");
1299     verify_live_data();
1300   }
1301 
1302   g1h->collector_state()->set_mark_in_progress(false);
1303 
1304   double count_end = os::elapsedTime();
1305   double this_final_counting_time = (count_end - start);
1306   _total_counting_time += this_final_counting_time;
1307 
1308   if (log_is_enabled(Trace, gc, liveness)) {
1309     G1PrintRegionLivenessInfoClosure cl("Post-Marking");
1310     _g1h->heap_region_iterate(&cl);
1311   }
1312 
1313   // Install newly created mark bitMap as "prev".
1314   swapMarkBitMaps();
1315 
1316   g1h->reset_gc_time_stamp();
1317 
1318   uint n_workers = _g1h->workers()->active_workers();
1319 
1320   // Note end of marking in all heap regions.
1321   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
1322   g1h->workers()->run_task(&g1_par_note_end_task);
1323   g1h->check_gc_time_stamps();
1324 
1325   if (!cleanup_list_is_empty()) {
1326     // The cleanup list is not empty, so we'll have to process it
1327     // concurrently. Notify anyone else that might be wanting free
1328     // regions that there will be more free regions coming soon.
1329     g1h->set_free_regions_coming();
1330   }
1331 
1332   // call below, since it affects the metric by which we sort the heap
1333   // regions.
1334   if (G1ScrubRemSets) {
1335     double rs_scrub_start = os::elapsedTime();
1336     g1h->scrub_rem_set();
1337     _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start);
1338   }
1339 
1340   // this will also free any regions totally full of garbage objects,
1341   // and sort the regions.
1342   g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1343 
1344   // Statistics.
1345   double end = os::elapsedTime();
1346   _cleanup_times.add((end - start) * 1000.0);
1347 
1348   // Clean up will have freed any regions completely full of garbage.
1349   // Update the soft reference policy with the new heap occupancy.
1350   Universe::update_heap_info_at_gc();
1351 
1352   if (VerifyDuringGC) {
1353     HandleMark hm;  // handle scope
1354     g1h->prepare_for_verify();
1355     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)");
1356   }
1357 
1358   g1h->verifier()->check_bitmaps("Cleanup End");
1359 
1360   g1h->verifier()->verify_region_sets_optional();
1361 
1362   // We need to make this be a "collection" so any collection pause that
1363   // races with it goes around and waits for completeCleanup to finish.
1364   g1h->increment_total_collections();
1365 
1366   // Clean out dead classes and update Metaspace sizes.
1367   if (ClassUnloadingWithConcurrentMark) {
1368     ClassLoaderDataGraph::purge();
1369   }
1370   MetaspaceGC::compute_new_size();
1371 
1372   // We reclaimed old regions so we should calculate the sizes to make
1373   // sure we update the old gen/space data.
1374   g1h->g1mm()->update_sizes();
1375   g1h->allocation_context_stats().update_after_mark();
1376 }
1377 
1378 void G1ConcurrentMark::complete_cleanup() {
1379   if (has_aborted()) return;
1380 
1381   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1382 
1383   _cleanup_list.verify_optional();
1384   FreeRegionList tmp_free_list("Tmp Free List");
1385 
1386   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1387                                   "cleanup list has %u entries",
1388                                   _cleanup_list.length());
1389 
1390   // No one else should be accessing the _cleanup_list at this point,
1391   // so it is not necessary to take any locks
1392   while (!_cleanup_list.is_empty()) {
1393     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
1394     assert(hr != NULL, "Got NULL from a non-empty list");
1395     hr->par_clear();
1396     tmp_free_list.add_ordered(hr);
1397 
1398     // Instead of adding one region at a time to the secondary_free_list,
1399     // we accumulate them in the local list and move them a few at a
1400     // time. This also cuts down on the number of notify_all() calls
1401     // we do during this process. We'll also append the local list when
1402     // _cleanup_list is empty (which means we just removed the last
1403     // region from the _cleanup_list).
1404     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
1405         _cleanup_list.is_empty()) {
1406       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1407                                       "appending %u entries to the secondary_free_list, "
1408                                       "cleanup list still has %u entries",
1409                                       tmp_free_list.length(),
1410                                       _cleanup_list.length());
1411 
1412       {
1413         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1414         g1h->secondary_free_list_add(&tmp_free_list);
1415         SecondaryFreeList_lock->notify_all();
1416       }
1417 #ifndef PRODUCT
1418       if (G1StressConcRegionFreeing) {
1419         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
1420           os::sleep(Thread::current(), (jlong) 1, false);
1421         }
1422       }
1423 #endif
1424     }
1425   }
1426   assert(tmp_free_list.is_empty(), "post-condition");
1427 }
1428 
1429 // Supporting Object and Oop closures for reference discovery
1430 // and processing in during marking
1431 
1432 bool G1CMIsAliveClosure::do_object_b(oop obj) {
1433   HeapWord* addr = (HeapWord*)obj;
1434   return addr != NULL &&
1435          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
1436 }
1437 
1438 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1439 // Uses the G1CMTask associated with a worker thread (for serial reference
1440 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1441 // trace referent objects.
1442 //
1443 // Using the G1CMTask and embedded local queues avoids having the worker
1444 // threads operating on the global mark stack. This reduces the risk
1445 // of overflowing the stack - which we would rather avoid at this late
1446 // state. Also using the tasks' local queues removes the potential
1447 // of the workers interfering with each other that could occur if
1448 // operating on the global stack.
1449 
1450 class G1CMKeepAliveAndDrainClosure: public OopClosure {
1451   G1ConcurrentMark* _cm;
1452   G1CMTask*         _task;
1453   int               _ref_counter_limit;
1454   int               _ref_counter;
1455   bool              _is_serial;
1456  public:
1457   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1458     _cm(cm), _task(task), _is_serial(is_serial),
1459     _ref_counter_limit(G1RefProcDrainInterval) {
1460     assert(_ref_counter_limit > 0, "sanity");
1461     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1462     _ref_counter = _ref_counter_limit;
1463   }
1464 
1465   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1466   virtual void do_oop(      oop* p) { do_oop_work(p); }
1467 
1468   template <class T> void do_oop_work(T* p) {
1469     if (!_cm->has_overflown()) {
1470       oop obj = oopDesc::load_decode_heap_oop(p);
1471       _task->deal_with_reference(obj);
1472       _ref_counter--;
1473 
1474       if (_ref_counter == 0) {
1475         // We have dealt with _ref_counter_limit references, pushing them
1476         // and objects reachable from them on to the local stack (and
1477         // possibly the global stack). Call G1CMTask::do_marking_step() to
1478         // process these entries.
1479         //
1480         // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1481         // there's nothing more to do (i.e. we're done with the entries that
1482         // were pushed as a result of the G1CMTask::deal_with_reference() calls
1483         // above) or we overflow.
1484         //
1485         // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1486         // flag while there may still be some work to do. (See the comment at
1487         // the beginning of G1CMTask::do_marking_step() for those conditions -
1488         // one of which is reaching the specified time target.) It is only
1489         // when G1CMTask::do_marking_step() returns without setting the
1490         // has_aborted() flag that the marking step has completed.
1491         do {
1492           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1493           _task->do_marking_step(mark_step_duration_ms,
1494                                  false      /* do_termination */,
1495                                  _is_serial);
1496         } while (_task->has_aborted() && !_cm->has_overflown());
1497         _ref_counter = _ref_counter_limit;
1498       }
1499     }
1500   }
1501 };
1502 
1503 // 'Drain' oop closure used by both serial and parallel reference processing.
1504 // Uses the G1CMTask associated with a given worker thread (for serial
1505 // reference processing the G1CMtask for worker 0 is used). Calls the
1506 // do_marking_step routine, with an unbelievably large timeout value,
1507 // to drain the marking data structures of the remaining entries
1508 // added by the 'keep alive' oop closure above.
1509 
1510 class G1CMDrainMarkingStackClosure: public VoidClosure {
1511   G1ConcurrentMark* _cm;
1512   G1CMTask*         _task;
1513   bool              _is_serial;
1514  public:
1515   G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1516     _cm(cm), _task(task), _is_serial(is_serial) {
1517     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1518   }
1519 
1520   void do_void() {
1521     do {
1522       // We call G1CMTask::do_marking_step() to completely drain the local
1523       // and global marking stacks of entries pushed by the 'keep alive'
1524       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1525       //
1526       // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1527       // if there's nothing more to do (i.e. we've completely drained the
1528       // entries that were pushed as a a result of applying the 'keep alive'
1529       // closure to the entries on the discovered ref lists) or we overflow
1530       // the global marking stack.
1531       //
1532       // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1533       // flag while there may still be some work to do. (See the comment at
1534       // the beginning of G1CMTask::do_marking_step() for those conditions -
1535       // one of which is reaching the specified time target.) It is only
1536       // when G1CMTask::do_marking_step() returns without setting the
1537       // has_aborted() flag that the marking step has completed.
1538 
1539       _task->do_marking_step(1000000000.0 /* something very large */,
1540                              true         /* do_termination */,
1541                              _is_serial);
1542     } while (_task->has_aborted() && !_cm->has_overflown());
1543   }
1544 };
1545 
1546 // Implementation of AbstractRefProcTaskExecutor for parallel
1547 // reference processing at the end of G1 concurrent marking
1548 
1549 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
1550 private:
1551   G1CollectedHeap*  _g1h;
1552   G1ConcurrentMark* _cm;
1553   WorkGang*         _workers;
1554   uint              _active_workers;
1555 
1556 public:
1557   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1558                           G1ConcurrentMark* cm,
1559                           WorkGang* workers,
1560                           uint n_workers) :
1561     _g1h(g1h), _cm(cm),
1562     _workers(workers), _active_workers(n_workers) { }
1563 
1564   // Executes the given task using concurrent marking worker threads.
1565   virtual void execute(ProcessTask& task);
1566   virtual void execute(EnqueueTask& task);
1567 };
1568 
1569 class G1CMRefProcTaskProxy: public AbstractGangTask {
1570   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1571   ProcessTask&      _proc_task;
1572   G1CollectedHeap*  _g1h;
1573   G1ConcurrentMark* _cm;
1574 
1575 public:
1576   G1CMRefProcTaskProxy(ProcessTask& proc_task,
1577                        G1CollectedHeap* g1h,
1578                        G1ConcurrentMark* cm) :
1579     AbstractGangTask("Process reference objects in parallel"),
1580     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1581     ReferenceProcessor* rp = _g1h->ref_processor_cm();
1582     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1583   }
1584 
1585   virtual void work(uint worker_id) {
1586     ResourceMark rm;
1587     HandleMark hm;
1588     G1CMTask* task = _cm->task(worker_id);
1589     G1CMIsAliveClosure g1_is_alive(_g1h);
1590     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1591     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1592 
1593     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1594   }
1595 };
1596 
1597 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
1598   assert(_workers != NULL, "Need parallel worker threads.");
1599   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1600 
1601   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1602 
1603   // We need to reset the concurrency level before each
1604   // proxy task execution, so that the termination protocol
1605   // and overflow handling in G1CMTask::do_marking_step() knows
1606   // how many workers to wait for.
1607   _cm->set_concurrency(_active_workers);
1608   _workers->run_task(&proc_task_proxy);
1609 }
1610 
1611 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
1612   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
1613   EnqueueTask& _enq_task;
1614 
1615 public:
1616   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
1617     AbstractGangTask("Enqueue reference objects in parallel"),
1618     _enq_task(enq_task) { }
1619 
1620   virtual void work(uint worker_id) {
1621     _enq_task.work(worker_id);
1622   }
1623 };
1624 
1625 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
1626   assert(_workers != NULL, "Need parallel worker threads.");
1627   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1628 
1629   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
1630 
1631   // Not strictly necessary but...
1632   //
1633   // We need to reset the concurrency level before each
1634   // proxy task execution, so that the termination protocol
1635   // and overflow handling in G1CMTask::do_marking_step() knows
1636   // how many workers to wait for.
1637   _cm->set_concurrency(_active_workers);
1638   _workers->run_task(&enq_task_proxy);
1639 }
1640 
1641 void G1ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
1642   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
1643 }
1644 
1645 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
1646   if (has_overflown()) {
1647     // Skip processing the discovered references if we have
1648     // overflown the global marking stack. Reference objects
1649     // only get discovered once so it is OK to not
1650     // de-populate the discovered reference lists. We could have,
1651     // but the only benefit would be that, when marking restarts,
1652     // less reference objects are discovered.
1653     return;
1654   }
1655 
1656   ResourceMark rm;
1657   HandleMark   hm;
1658 
1659   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1660 
1661   // Is alive closure.
1662   G1CMIsAliveClosure g1_is_alive(g1h);
1663 
1664   // Inner scope to exclude the cleaning of the string and symbol
1665   // tables from the displayed time.
1666   {
1667     GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm);
1668 
1669     ReferenceProcessor* rp = g1h->ref_processor_cm();
1670 
1671     // See the comment in G1CollectedHeap::ref_processing_init()
1672     // about how reference processing currently works in G1.
1673 
1674     // Set the soft reference policy
1675     rp->setup_policy(clear_all_soft_refs);
1676     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1677 
1678     // Instances of the 'Keep Alive' and 'Complete GC' closures used
1679     // in serial reference processing. Note these closures are also
1680     // used for serially processing (by the the current thread) the
1681     // JNI references during parallel reference processing.
1682     //
1683     // These closures do not need to synchronize with the worker
1684     // threads involved in parallel reference processing as these
1685     // instances are executed serially by the current thread (e.g.
1686     // reference processing is not multi-threaded and is thus
1687     // performed by the current thread instead of a gang worker).
1688     //
1689     // The gang tasks involved in parallel reference processing create
1690     // their own instances of these closures, which do their own
1691     // synchronization among themselves.
1692     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1693     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1694 
1695     // We need at least one active thread. If reference processing
1696     // is not multi-threaded we use the current (VMThread) thread,
1697     // otherwise we use the work gang from the G1CollectedHeap and
1698     // we utilize all the worker threads we can.
1699     bool processing_is_mt = rp->processing_is_mt();
1700     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
1701     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
1702 
1703     // Parallel processing task executor.
1704     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
1705                                               g1h->workers(), active_workers);
1706     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1707 
1708     // Set the concurrency level. The phase was already set prior to
1709     // executing the remark task.
1710     set_concurrency(active_workers);
1711 
1712     // Set the degree of MT processing here.  If the discovery was done MT,
1713     // the number of threads involved during discovery could differ from
1714     // the number of active workers.  This is OK as long as the discovered
1715     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1716     rp->set_active_mt_degree(active_workers);
1717 
1718     // Process the weak references.
1719     const ReferenceProcessorStats& stats =
1720         rp->process_discovered_references(&g1_is_alive,
1721                                           &g1_keep_alive,
1722                                           &g1_drain_mark_stack,
1723                                           executor,
1724                                           _gc_timer_cm);
1725     _gc_tracer_cm->report_gc_reference_stats(stats);
1726 
1727     // The do_oop work routines of the keep_alive and drain_marking_stack
1728     // oop closures will set the has_overflown flag if we overflow the
1729     // global marking stack.
1730 
1731     assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(),
1732             "mark stack should be empty (unless it overflowed)");
1733 
1734     if (_global_mark_stack.is_out_of_memory()) {
1735       // This should have been done already when we tried to push an
1736       // entry on to the global mark stack. But let's do it again.
1737       set_has_overflown();
1738     }
1739 
1740     assert(rp->num_q() == active_workers, "why not");
1741 
1742     rp->enqueue_discovered_references(executor);
1743 
1744     rp->verify_no_references_recorded();
1745     assert(!rp->discovery_enabled(), "Post condition");
1746   }
1747 
1748   if (has_overflown()) {
1749     // We can not trust g1_is_alive if the marking stack overflowed
1750     return;
1751   }
1752 
1753   assert(_global_mark_stack.is_empty(), "Marking should have completed");
1754 
1755   // Unload Klasses, String, Symbols, Code Cache, etc.
1756   if (ClassUnloadingWithConcurrentMark) {
1757     bool purged_classes;
1758 
1759     {
1760       GCTraceTime(Debug, gc, phases) trace("System Dictionary Unloading", _gc_timer_cm);
1761       purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
1762     }
1763 
1764     {
1765       GCTraceTime(Debug, gc, phases) trace("Parallel Unloading", _gc_timer_cm);
1766       weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
1767     }
1768   }
1769 
1770   if (G1StringDedup::is_enabled()) {
1771     GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", _gc_timer_cm);
1772     G1StringDedup::unlink(&g1_is_alive);
1773   }
1774 }
1775 
1776 void G1ConcurrentMark::swapMarkBitMaps() {
1777   G1CMBitMapRO* temp = _prevMarkBitMap;
1778   _prevMarkBitMap    = (G1CMBitMapRO*)_nextMarkBitMap;
1779   _nextMarkBitMap    = (G1CMBitMap*)  temp;
1780 }
1781 
1782 // Closure for marking entries in SATB buffers.
1783 class G1CMSATBBufferClosure : public SATBBufferClosure {
1784 private:
1785   G1CMTask* _task;
1786   G1CollectedHeap* _g1h;
1787 
1788   // This is very similar to G1CMTask::deal_with_reference, but with
1789   // more relaxed requirements for the argument, so this must be more
1790   // circumspect about treating the argument as an object.
1791   void do_entry(void* entry) const {
1792     _task->increment_refs_reached();
1793     HeapRegion* hr = _g1h->heap_region_containing(entry);
1794     if (entry < hr->next_top_at_mark_start()) {
1795       // Until we get here, we don't know whether entry refers to a valid
1796       // object; it could instead have been a stale reference.
1797       oop obj = static_cast<oop>(entry);
1798       assert(obj->is_oop(true /* ignore mark word */),
1799              "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj));
1800       _task->make_reference_grey(obj);
1801     }
1802   }
1803 
1804 public:
1805   G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1806     : _task(task), _g1h(g1h) { }
1807 
1808   virtual void do_buffer(void** buffer, size_t size) {
1809     for (size_t i = 0; i < size; ++i) {
1810       do_entry(buffer[i]);
1811     }
1812   }
1813 };
1814 
1815 class G1RemarkThreadsClosure : public ThreadClosure {
1816   G1CMSATBBufferClosure _cm_satb_cl;
1817   G1CMOopClosure _cm_cl;
1818   MarkingCodeBlobClosure _code_cl;
1819   int _thread_parity;
1820 
1821  public:
1822   G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1823     _cm_satb_cl(task, g1h),
1824     _cm_cl(g1h, g1h->concurrent_mark(), task),
1825     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1826     _thread_parity(Threads::thread_claim_parity()) {}
1827 
1828   void do_thread(Thread* thread) {
1829     if (thread->is_Java_thread()) {
1830       if (thread->claim_oops_do(true, _thread_parity)) {
1831         JavaThread* jt = (JavaThread*)thread;
1832 
1833         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1834         // however the liveness of oops reachable from nmethods have very complex lifecycles:
1835         // * Alive if on the stack of an executing method
1836         // * Weakly reachable otherwise
1837         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1838         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1839         jt->nmethods_do(&_code_cl);
1840 
1841         jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl);
1842       }
1843     } else if (thread->is_VM_thread()) {
1844       if (thread->claim_oops_do(true, _thread_parity)) {
1845         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
1846       }
1847     }
1848   }
1849 };
1850 
1851 class G1CMRemarkTask: public AbstractGangTask {
1852 private:
1853   G1ConcurrentMark* _cm;
1854 public:
1855   void work(uint worker_id) {
1856     // Since all available tasks are actually started, we should
1857     // only proceed if we're supposed to be active.
1858     if (worker_id < _cm->active_tasks()) {
1859       G1CMTask* task = _cm->task(worker_id);
1860       task->record_start_time();
1861       {
1862         ResourceMark rm;
1863         HandleMark hm;
1864 
1865         G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1866         Threads::threads_do(&threads_f);
1867       }
1868 
1869       do {
1870         task->do_marking_step(1000000000.0 /* something very large */,
1871                               true         /* do_termination       */,
1872                               false        /* is_serial            */);
1873       } while (task->has_aborted() && !_cm->has_overflown());
1874       // If we overflow, then we do not want to restart. We instead
1875       // want to abort remark and do concurrent marking again.
1876       task->record_end_time();
1877     }
1878   }
1879 
1880   G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1881     AbstractGangTask("Par Remark"), _cm(cm) {
1882     _cm->terminator()->reset_for_reuse(active_workers);
1883   }
1884 };
1885 
1886 void G1ConcurrentMark::checkpointRootsFinalWork() {
1887   ResourceMark rm;
1888   HandleMark   hm;
1889   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1890 
1891   GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
1892 
1893   g1h->ensure_parsability(false);
1894 
1895   // this is remark, so we'll use up all active threads
1896   uint active_workers = g1h->workers()->active_workers();
1897   set_concurrency_and_phase(active_workers, false /* concurrent */);
1898   // Leave _parallel_marking_threads at it's
1899   // value originally calculated in the G1ConcurrentMark
1900   // constructor and pass values of the active workers
1901   // through the gang in the task.
1902 
1903   {
1904     StrongRootsScope srs(active_workers);
1905 
1906     G1CMRemarkTask remarkTask(this, active_workers);
1907     // We will start all available threads, even if we decide that the
1908     // active_workers will be fewer. The extra ones will just bail out
1909     // immediately.
1910     g1h->workers()->run_task(&remarkTask);
1911   }
1912 
1913   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1914   guarantee(has_overflown() ||
1915             satb_mq_set.completed_buffers_num() == 0,
1916             "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1917             BOOL_TO_STR(has_overflown()),
1918             satb_mq_set.completed_buffers_num());
1919 
1920   print_stats();
1921 }
1922 
1923 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
1924   // Note we are overriding the read-only view of the prev map here, via
1925   // the cast.
1926   ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr);
1927 }
1928 
1929 HeapRegion*
1930 G1ConcurrentMark::claim_region(uint worker_id) {
1931   // "checkpoint" the finger
1932   HeapWord* finger = _finger;
1933 
1934   // _heap_end will not change underneath our feet; it only changes at
1935   // yield points.
1936   while (finger < _heap_end) {
1937     assert(_g1h->is_in_g1_reserved(finger), "invariant");
1938 
1939     HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1940 
1941     // Above heap_region_containing may return NULL as we always scan claim
1942     // until the end of the heap. In this case, just jump to the next region.
1943     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1944 
1945     // Is the gap between reading the finger and doing the CAS too long?
1946     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
1947     if (res == finger && curr_region != NULL) {
1948       // we succeeded
1949       HeapWord*   bottom        = curr_region->bottom();
1950       HeapWord*   limit         = curr_region->next_top_at_mark_start();
1951 
1952       // notice that _finger == end cannot be guaranteed here since,
1953       // someone else might have moved the finger even further
1954       assert(_finger >= end, "the finger should have moved forward");
1955 
1956       if (limit > bottom) {
1957         return curr_region;
1958       } else {
1959         assert(limit == bottom,
1960                "the region limit should be at bottom");
1961         // we return NULL and the caller should try calling
1962         // claim_region() again.
1963         return NULL;
1964       }
1965     } else {
1966       assert(_finger > finger, "the finger should have moved forward");
1967       // read it again
1968       finger = _finger;
1969     }
1970   }
1971 
1972   return NULL;
1973 }
1974 
1975 #ifndef PRODUCT
1976 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC {
1977 private:
1978   G1CollectedHeap* _g1h;
1979   const char* _phase;
1980   int _info;
1981 
1982 public:
1983   VerifyNoCSetOops(const char* phase, int info = -1) :
1984     _g1h(G1CollectedHeap::heap()),
1985     _phase(phase),
1986     _info(info)
1987   { }
1988 
1989   void operator()(oop obj) const {
1990     guarantee(obj->is_oop(),
1991               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
1992               p2i(obj), _phase, _info);
1993     guarantee(!_g1h->obj_in_cs(obj),
1994               "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
1995               p2i(obj), _phase, _info);
1996   }
1997 };
1998 
1999 void G1ConcurrentMark::verify_no_cset_oops() {
2000   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2001   if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
2002     return;
2003   }
2004 
2005   // Verify entries on the global mark stack
2006   _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
2007 
2008   // Verify entries on the task queues
2009   for (uint i = 0; i < _max_worker_id; ++i) {
2010     G1CMTaskQueue* queue = _task_queues->queue(i);
2011     queue->iterate(VerifyNoCSetOops("Queue", i));
2012   }
2013 
2014   // Verify the global finger
2015   HeapWord* global_finger = finger();
2016   if (global_finger != NULL && global_finger < _heap_end) {
2017     // Since we always iterate over all regions, we might get a NULL HeapRegion
2018     // here.
2019     HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
2020     guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2021               "global finger: " PTR_FORMAT " region: " HR_FORMAT,
2022               p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
2023   }
2024 
2025   // Verify the task fingers
2026   assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2027   for (uint i = 0; i < parallel_marking_threads(); ++i) {
2028     G1CMTask* task = _tasks[i];
2029     HeapWord* task_finger = task->finger();
2030     if (task_finger != NULL && task_finger < _heap_end) {
2031       // See above note on the global finger verification.
2032       HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
2033       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2034                 !task_hr->in_collection_set(),
2035                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2036                 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
2037     }
2038   }
2039 }
2040 #endif // PRODUCT
2041 void G1ConcurrentMark::create_live_data() {
2042   _g1h->g1_rem_set()->create_card_live_data(_parallel_workers, _nextMarkBitMap);
2043 }
2044 
2045 void G1ConcurrentMark::finalize_live_data() {
2046   _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _nextMarkBitMap);
2047 }
2048 
2049 void G1ConcurrentMark::verify_live_data() {
2050   _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _nextMarkBitMap);
2051 }
2052 
2053 void G1ConcurrentMark::clear_live_data(WorkGang* workers) {
2054   _g1h->g1_rem_set()->clear_card_live_data(workers);
2055 }
2056 
2057 #ifdef ASSERT
2058 void G1ConcurrentMark::verify_live_data_clear() {
2059   _g1h->g1_rem_set()->verify_card_live_data_is_clear();
2060 }
2061 #endif
2062 
2063 void G1ConcurrentMark::print_stats() {
2064   if (!log_is_enabled(Debug, gc, stats)) {
2065     return;
2066   }
2067   log_debug(gc, stats)("---------------------------------------------------------------------");
2068   for (size_t i = 0; i < _active_tasks; ++i) {
2069     _tasks[i]->print_stats();
2070     log_debug(gc, stats)("---------------------------------------------------------------------");
2071   }
2072 }
2073 
2074 void G1ConcurrentMark::abort() {
2075   if (!cmThread()->during_cycle() || _has_aborted) {
2076     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2077     return;
2078   }
2079 
2080   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2081   // concurrent bitmap clearing.
2082   {
2083     GCTraceTime(Debug, gc)("Clear Next Bitmap");
2084     clear_bitmap(_nextMarkBitMap, _g1h->workers(), false);
2085   }
2086   // Note we cannot clear the previous marking bitmap here
2087   // since VerifyDuringGC verifies the objects marked during
2088   // a full GC against the previous bitmap.
2089 
2090   {
2091     GCTraceTime(Debug, gc)("Clear Live Data");
2092     clear_live_data(_g1h->workers());
2093   }
2094   DEBUG_ONLY({
2095     GCTraceTime(Debug, gc)("Verify Live Data Clear");
2096     verify_live_data_clear();
2097   })
2098   // Empty mark stack
2099   reset_marking_state();
2100   for (uint i = 0; i < _max_worker_id; ++i) {
2101     _tasks[i]->clear_region_fields();
2102   }
2103   _first_overflow_barrier_sync.abort();
2104   _second_overflow_barrier_sync.abort();
2105   _has_aborted = true;
2106 
2107   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2108   satb_mq_set.abandon_partial_marking();
2109   // This can be called either during or outside marking, we'll read
2110   // the expected_active value from the SATB queue set.
2111   satb_mq_set.set_active_all_threads(
2112                                  false, /* new active value */
2113                                  satb_mq_set.is_active() /* expected_active */);
2114 }
2115 
2116 static void print_ms_time_info(const char* prefix, const char* name,
2117                                NumberSeq& ns) {
2118   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2119                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2120   if (ns.num() > 0) {
2121     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2122                            prefix, ns.sd(), ns.maximum());
2123   }
2124 }
2125 
2126 void G1ConcurrentMark::print_summary_info() {
2127   Log(gc, marking) log;
2128   if (!log.is_trace()) {
2129     return;
2130   }
2131 
2132   log.trace(" Concurrent marking:");
2133   print_ms_time_info("  ", "init marks", _init_times);
2134   print_ms_time_info("  ", "remarks", _remark_times);
2135   {
2136     print_ms_time_info("     ", "final marks", _remark_mark_times);
2137     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2138 
2139   }
2140   print_ms_time_info("  ", "cleanups", _cleanup_times);
2141   log.trace("    Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2142             _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2143   if (G1ScrubRemSets) {
2144     log.trace("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
2145               _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2146   }
2147   log.trace("  Total stop_world time = %8.2f s.",
2148             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2149   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
2150             cmThread()->vtime_accum(), cmThread()->vtime_mark_accum());
2151 }
2152 
2153 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2154   _parallel_workers->print_worker_threads_on(st);
2155 }
2156 
2157 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2158   _parallel_workers->threads_do(tc);
2159 }
2160 
2161 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2162   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2163       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
2164   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
2165   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
2166 }
2167 
2168 // Closure for iteration over bitmaps
2169 class G1CMBitMapClosure : public BitMapClosure {
2170 private:
2171   // the bitmap that is being iterated over
2172   G1CMBitMap*                 _nextMarkBitMap;
2173   G1ConcurrentMark*           _cm;
2174   G1CMTask*                   _task;
2175 
2176 public:
2177   G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm, G1CMBitMap* nextMarkBitMap) :
2178     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
2179 
2180   bool do_bit(size_t offset) {
2181     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
2182     assert(_nextMarkBitMap->isMarked(addr), "invariant");
2183     assert( addr < _cm->finger(), "invariant");
2184     assert(addr >= _task->finger(), "invariant");
2185 
2186     // We move that task's local finger along.
2187     _task->move_finger_to(addr);
2188 
2189     _task->scan_object(oop(addr));
2190     // we only partially drain the local queue and global stack
2191     _task->drain_local_queue(true);
2192     _task->drain_global_stack(true);
2193 
2194     // if the has_aborted flag has been raised, we need to bail out of
2195     // the iteration
2196     return !_task->has_aborted();
2197   }
2198 };
2199 
2200 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2201   ReferenceProcessor* result = g1h->ref_processor_cm();
2202   assert(result != NULL, "CM reference processor should not be NULL");
2203   return result;
2204 }
2205 
2206 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2207                                G1ConcurrentMark* cm,
2208                                G1CMTask* task)
2209   : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
2210     _g1h(g1h), _cm(cm), _task(task)
2211 { }
2212 
2213 void G1CMTask::setup_for_region(HeapRegion* hr) {
2214   assert(hr != NULL,
2215         "claim_region() should have filtered out NULL regions");
2216   _curr_region  = hr;
2217   _finger       = hr->bottom();
2218   update_region_limit();
2219 }
2220 
2221 void G1CMTask::update_region_limit() {
2222   HeapRegion* hr            = _curr_region;
2223   HeapWord* bottom          = hr->bottom();
2224   HeapWord* limit           = hr->next_top_at_mark_start();
2225 
2226   if (limit == bottom) {
2227     // The region was collected underneath our feet.
2228     // We set the finger to bottom to ensure that the bitmap
2229     // iteration that will follow this will not do anything.
2230     // (this is not a condition that holds when we set the region up,
2231     // as the region is not supposed to be empty in the first place)
2232     _finger = bottom;
2233   } else if (limit >= _region_limit) {
2234     assert(limit >= _finger, "peace of mind");
2235   } else {
2236     assert(limit < _region_limit, "only way to get here");
2237     // This can happen under some pretty unusual circumstances.  An
2238     // evacuation pause empties the region underneath our feet (NTAMS
2239     // at bottom). We then do some allocation in the region (NTAMS
2240     // stays at bottom), followed by the region being used as a GC
2241     // alloc region (NTAMS will move to top() and the objects
2242     // originally below it will be grayed). All objects now marked in
2243     // the region are explicitly grayed, if below the global finger,
2244     // and we do not need in fact to scan anything else. So, we simply
2245     // set _finger to be limit to ensure that the bitmap iteration
2246     // doesn't do anything.
2247     _finger = limit;
2248   }
2249 
2250   _region_limit = limit;
2251 }
2252 
2253 void G1CMTask::giveup_current_region() {
2254   assert(_curr_region != NULL, "invariant");
2255   clear_region_fields();
2256 }
2257 
2258 void G1CMTask::clear_region_fields() {
2259   // Values for these three fields that indicate that we're not
2260   // holding on to a region.
2261   _curr_region   = NULL;
2262   _finger        = NULL;
2263   _region_limit  = NULL;
2264 }
2265 
2266 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2267   if (cm_oop_closure == NULL) {
2268     assert(_cm_oop_closure != NULL, "invariant");
2269   } else {
2270     assert(_cm_oop_closure == NULL, "invariant");
2271   }
2272   _cm_oop_closure = cm_oop_closure;
2273 }
2274 
2275 void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) {
2276   guarantee(nextMarkBitMap != NULL, "invariant");
2277   _nextMarkBitMap                = nextMarkBitMap;
2278   clear_region_fields();
2279 
2280   _calls                         = 0;
2281   _elapsed_time_ms               = 0.0;
2282   _termination_time_ms           = 0.0;
2283   _termination_start_time_ms     = 0.0;
2284 }
2285 
2286 bool G1CMTask::should_exit_termination() {
2287   regular_clock_call();
2288   // This is called when we are in the termination protocol. We should
2289   // quit if, for some reason, this task wants to abort or the global
2290   // stack is not empty (this means that we can get work from it).
2291   return !_cm->mark_stack_empty() || has_aborted();
2292 }
2293 
2294 void G1CMTask::reached_limit() {
2295   assert(_words_scanned >= _words_scanned_limit ||
2296          _refs_reached >= _refs_reached_limit ,
2297          "shouldn't have been called otherwise");
2298   regular_clock_call();
2299 }
2300 
2301 void G1CMTask::regular_clock_call() {
2302   if (has_aborted()) return;
2303 
2304   // First, we need to recalculate the words scanned and refs reached
2305   // limits for the next clock call.
2306   recalculate_limits();
2307 
2308   // During the regular clock call we do the following
2309 
2310   // (1) If an overflow has been flagged, then we abort.
2311   if (_cm->has_overflown()) {
2312     set_has_aborted();
2313     return;
2314   }
2315 
2316   // If we are not concurrent (i.e. we're doing remark) we don't need
2317   // to check anything else. The other steps are only needed during
2318   // the concurrent marking phase.
2319   if (!concurrent()) return;
2320 
2321   // (2) If marking has been aborted for Full GC, then we also abort.
2322   if (_cm->has_aborted()) {
2323     set_has_aborted();
2324     return;
2325   }
2326 
2327   double curr_time_ms = os::elapsedVTime() * 1000.0;
2328 
2329   // (4) We check whether we should yield. If we have to, then we abort.
2330   if (SuspendibleThreadSet::should_yield()) {
2331     // We should yield. To do this we abort the task. The caller is
2332     // responsible for yielding.
2333     set_has_aborted();
2334     return;
2335   }
2336 
2337   // (5) We check whether we've reached our time quota. If we have,
2338   // then we abort.
2339   double elapsed_time_ms = curr_time_ms - _start_time_ms;
2340   if (elapsed_time_ms > _time_target_ms) {
2341     set_has_aborted();
2342     _has_timed_out = true;
2343     return;
2344   }
2345 
2346   // (6) Finally, we check whether there are enough completed STAB
2347   // buffers available for processing. If there are, we abort.
2348   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2349   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2350     // we do need to process SATB buffers, we'll abort and restart
2351     // the marking task to do so
2352     set_has_aborted();
2353     return;
2354   }
2355 }
2356 
2357 void G1CMTask::recalculate_limits() {
2358   _real_words_scanned_limit = _words_scanned + words_scanned_period;
2359   _words_scanned_limit      = _real_words_scanned_limit;
2360 
2361   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
2362   _refs_reached_limit       = _real_refs_reached_limit;
2363 }
2364 
2365 void G1CMTask::decrease_limits() {
2366   // This is called when we believe that we're going to do an infrequent
2367   // operation which will increase the per byte scanned cost (i.e. move
2368   // entries to/from the global stack). It basically tries to decrease the
2369   // scanning limit so that the clock is called earlier.
2370 
2371   _words_scanned_limit = _real_words_scanned_limit -
2372     3 * words_scanned_period / 4;
2373   _refs_reached_limit  = _real_refs_reached_limit -
2374     3 * refs_reached_period / 4;
2375 }
2376 
2377 void G1CMTask::move_entries_to_global_stack() {
2378   // Local array where we'll store the entries that will be popped
2379   // from the local queue.
2380   oop buffer[G1CMMarkStack::OopsPerChunk];
2381 
2382   size_t n = 0;
2383   oop obj;
2384   while (n < G1CMMarkStack::OopsPerChunk && _task_queue->pop_local(obj)) {
2385     buffer[n] = obj;
2386     ++n;
2387   }
2388   if (n < G1CMMarkStack::OopsPerChunk) {
2389     buffer[n] = NULL;
2390   }
2391 
2392   if (n > 0) {
2393     if (!_cm->mark_stack_push(buffer)) {
2394       set_has_aborted();
2395     }
2396   }
2397 
2398   // This operation was quite expensive, so decrease the limits.
2399   decrease_limits();
2400 }
2401 
2402 bool G1CMTask::get_entries_from_global_stack() {
2403   // Local array where we'll store the entries that will be popped
2404   // from the global stack.
2405   oop buffer[G1CMMarkStack::OopsPerChunk];
2406 
2407   if (!_cm->mark_stack_pop(buffer)) {
2408     return false;
2409   }
2410 
2411   // We did actually pop at least one entry.
2412   for (size_t i = 0; i < G1CMMarkStack::OopsPerChunk; ++i) {
2413     oop elem = buffer[i];
2414     if (elem == NULL) {
2415       break;
2416     }
2417     bool success = _task_queue->push(elem);
2418     // We only call this when the local queue is empty or under a
2419     // given target limit. So, we do not expect this push to fail.
2420     assert(success, "invariant");
2421   }
2422 
2423   // This operation was quite expensive, so decrease the limits
2424   decrease_limits();
2425   return true;
2426 }
2427 
2428 void G1CMTask::drain_local_queue(bool partially) {
2429   if (has_aborted()) return;
2430 
2431   // Decide what the target size is, depending whether we're going to
2432   // drain it partially (so that other tasks can steal if they run out
2433   // of things to do) or totally (at the very end).
2434   size_t target_size;
2435   if (partially) {
2436     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
2437   } else {
2438     target_size = 0;
2439   }
2440 
2441   if (_task_queue->size() > target_size) {
2442     oop obj;
2443     bool ret = _task_queue->pop_local(obj);
2444     while (ret) {
2445       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
2446       assert(!_g1h->is_on_master_free_list(
2447                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
2448 
2449       scan_object(obj);
2450 
2451       if (_task_queue->size() <= target_size || has_aborted()) {
2452         ret = false;
2453       } else {
2454         ret = _task_queue->pop_local(obj);
2455       }
2456     }
2457   }
2458 }
2459 
2460 void G1CMTask::drain_global_stack(bool partially) {
2461   if (has_aborted()) return;
2462 
2463   // We have a policy to drain the local queue before we attempt to
2464   // drain the global stack.
2465   assert(partially || _task_queue->size() == 0, "invariant");
2466 
2467   // Decide what the target size is, depending whether we're going to
2468   // drain it partially (so that other tasks can steal if they run out
2469   // of things to do) or totally (at the very end).
2470   // Notice that when draining the global mark stack partially, due to the racyness
2471   // of the mark stack size update we might in fact drop below the target. But,
2472   // this is not a problem.
2473   // In case of total draining, we simply process until the global mark stack is
2474   // totally empty, disregarding the size counter.
2475   if (partially) {
2476     size_t const target_size = _cm->partial_mark_stack_size_target();
2477     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2478       if (get_entries_from_global_stack()) {
2479         drain_local_queue(partially);
2480       }
2481     }
2482   } else {
2483     while (!has_aborted() && get_entries_from_global_stack()) {
2484       drain_local_queue(partially);
2485     }
2486   }
2487 }
2488 
2489 // SATB Queue has several assumptions on whether to call the par or
2490 // non-par versions of the methods. this is why some of the code is
2491 // replicated. We should really get rid of the single-threaded version
2492 // of the code to simplify things.
2493 void G1CMTask::drain_satb_buffers() {
2494   if (has_aborted()) return;
2495 
2496   // We set this so that the regular clock knows that we're in the
2497   // middle of draining buffers and doesn't set the abort flag when it
2498   // notices that SATB buffers are available for draining. It'd be
2499   // very counter productive if it did that. :-)
2500   _draining_satb_buffers = true;
2501 
2502   G1CMSATBBufferClosure satb_cl(this, _g1h);
2503   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2504 
2505   // This keeps claiming and applying the closure to completed buffers
2506   // until we run out of buffers or we need to abort.
2507   while (!has_aborted() &&
2508          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2509     regular_clock_call();
2510   }
2511 
2512   _draining_satb_buffers = false;
2513 
2514   assert(has_aborted() ||
2515          concurrent() ||
2516          satb_mq_set.completed_buffers_num() == 0, "invariant");
2517 
2518   // again, this was a potentially expensive operation, decrease the
2519   // limits to get the regular clock call early
2520   decrease_limits();
2521 }
2522 
2523 void G1CMTask::print_stats() {
2524   log_debug(gc, stats)("Marking Stats, task = %u, calls = %d",
2525                        _worker_id, _calls);
2526   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2527                        _elapsed_time_ms, _termination_time_ms);
2528   log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
2529                        _step_times_ms.num(), _step_times_ms.avg(),
2530                        _step_times_ms.sd());
2531   log_debug(gc, stats)("                    max = %1.2lfms, total = %1.2lfms",
2532                        _step_times_ms.maximum(), _step_times_ms.sum());
2533 }
2534 
2535 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) {
2536   return _task_queues->steal(worker_id, hash_seed, obj);
2537 }
2538 
2539 /*****************************************************************************
2540 
2541     The do_marking_step(time_target_ms, ...) method is the building
2542     block of the parallel marking framework. It can be called in parallel
2543     with other invocations of do_marking_step() on different tasks
2544     (but only one per task, obviously) and concurrently with the
2545     mutator threads, or during remark, hence it eliminates the need
2546     for two versions of the code. When called during remark, it will
2547     pick up from where the task left off during the concurrent marking
2548     phase. Interestingly, tasks are also claimable during evacuation
2549     pauses too, since do_marking_step() ensures that it aborts before
2550     it needs to yield.
2551 
2552     The data structures that it uses to do marking work are the
2553     following:
2554 
2555       (1) Marking Bitmap. If there are gray objects that appear only
2556       on the bitmap (this happens either when dealing with an overflow
2557       or when the initial marking phase has simply marked the roots
2558       and didn't push them on the stack), then tasks claim heap
2559       regions whose bitmap they then scan to find gray objects. A
2560       global finger indicates where the end of the last claimed region
2561       is. A local finger indicates how far into the region a task has
2562       scanned. The two fingers are used to determine how to gray an
2563       object (i.e. whether simply marking it is OK, as it will be
2564       visited by a task in the future, or whether it needs to be also
2565       pushed on a stack).
2566 
2567       (2) Local Queue. The local queue of the task which is accessed
2568       reasonably efficiently by the task. Other tasks can steal from
2569       it when they run out of work. Throughout the marking phase, a
2570       task attempts to keep its local queue short but not totally
2571       empty, so that entries are available for stealing by other
2572       tasks. Only when there is no more work, a task will totally
2573       drain its local queue.
2574 
2575       (3) Global Mark Stack. This handles local queue overflow. During
2576       marking only sets of entries are moved between it and the local
2577       queues, as access to it requires a mutex and more fine-grain
2578       interaction with it which might cause contention. If it
2579       overflows, then the marking phase should restart and iterate
2580       over the bitmap to identify gray objects. Throughout the marking
2581       phase, tasks attempt to keep the global mark stack at a small
2582       length but not totally empty, so that entries are available for
2583       popping by other tasks. Only when there is no more work, tasks
2584       will totally drain the global mark stack.
2585 
2586       (4) SATB Buffer Queue. This is where completed SATB buffers are
2587       made available. Buffers are regularly removed from this queue
2588       and scanned for roots, so that the queue doesn't get too
2589       long. During remark, all completed buffers are processed, as
2590       well as the filled in parts of any uncompleted buffers.
2591 
2592     The do_marking_step() method tries to abort when the time target
2593     has been reached. There are a few other cases when the
2594     do_marking_step() method also aborts:
2595 
2596       (1) When the marking phase has been aborted (after a Full GC).
2597 
2598       (2) When a global overflow (on the global stack) has been
2599       triggered. Before the task aborts, it will actually sync up with
2600       the other tasks to ensure that all the marking data structures
2601       (local queues, stacks, fingers etc.)  are re-initialized so that
2602       when do_marking_step() completes, the marking phase can
2603       immediately restart.
2604 
2605       (3) When enough completed SATB buffers are available. The
2606       do_marking_step() method only tries to drain SATB buffers right
2607       at the beginning. So, if enough buffers are available, the
2608       marking step aborts and the SATB buffers are processed at
2609       the beginning of the next invocation.
2610 
2611       (4) To yield. when we have to yield then we abort and yield
2612       right at the end of do_marking_step(). This saves us from a lot
2613       of hassle as, by yielding we might allow a Full GC. If this
2614       happens then objects will be compacted underneath our feet, the
2615       heap might shrink, etc. We save checking for this by just
2616       aborting and doing the yield right at the end.
2617 
2618     From the above it follows that the do_marking_step() method should
2619     be called in a loop (or, otherwise, regularly) until it completes.
2620 
2621     If a marking step completes without its has_aborted() flag being
2622     true, it means it has completed the current marking phase (and
2623     also all other marking tasks have done so and have all synced up).
2624 
2625     A method called regular_clock_call() is invoked "regularly" (in
2626     sub ms intervals) throughout marking. It is this clock method that
2627     checks all the abort conditions which were mentioned above and
2628     decides when the task should abort. A work-based scheme is used to
2629     trigger this clock method: when the number of object words the
2630     marking phase has scanned or the number of references the marking
2631     phase has visited reach a given limit. Additional invocations to
2632     the method clock have been planted in a few other strategic places
2633     too. The initial reason for the clock method was to avoid calling
2634     vtime too regularly, as it is quite expensive. So, once it was in
2635     place, it was natural to piggy-back all the other conditions on it
2636     too and not constantly check them throughout the code.
2637 
2638     If do_termination is true then do_marking_step will enter its
2639     termination protocol.
2640 
2641     The value of is_serial must be true when do_marking_step is being
2642     called serially (i.e. by the VMThread) and do_marking_step should
2643     skip any synchronization in the termination and overflow code.
2644     Examples include the serial remark code and the serial reference
2645     processing closures.
2646 
2647     The value of is_serial must be false when do_marking_step is
2648     being called by any of the worker threads in a work gang.
2649     Examples include the concurrent marking code (CMMarkingTask),
2650     the MT remark code, and the MT reference processing closures.
2651 
2652  *****************************************************************************/
2653 
2654 void G1CMTask::do_marking_step(double time_target_ms,
2655                                bool do_termination,
2656                                bool is_serial) {
2657   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2658   assert(concurrent() == _cm->concurrent(), "they should be the same");
2659 
2660   G1Policy* g1_policy = _g1h->g1_policy();
2661   assert(_task_queues != NULL, "invariant");
2662   assert(_task_queue != NULL, "invariant");
2663   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
2664 
2665   assert(!_claimed,
2666          "only one thread should claim this task at any one time");
2667 
2668   // OK, this doesn't safeguard again all possible scenarios, as it is
2669   // possible for two threads to set the _claimed flag at the same
2670   // time. But it is only for debugging purposes anyway and it will
2671   // catch most problems.
2672   _claimed = true;
2673 
2674   _start_time_ms = os::elapsedVTime() * 1000.0;
2675 
2676   // If do_stealing is true then do_marking_step will attempt to
2677   // steal work from the other G1CMTasks. It only makes sense to
2678   // enable stealing when the termination protocol is enabled
2679   // and do_marking_step() is not being called serially.
2680   bool do_stealing = do_termination && !is_serial;
2681 
2682   double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
2683   _time_target_ms = time_target_ms - diff_prediction_ms;
2684 
2685   // set up the variables that are used in the work-based scheme to
2686   // call the regular clock method
2687   _words_scanned = 0;
2688   _refs_reached  = 0;
2689   recalculate_limits();
2690 
2691   // clear all flags
2692   clear_has_aborted();
2693   _has_timed_out = false;
2694   _draining_satb_buffers = false;
2695 
2696   ++_calls;
2697 
2698   // Set up the bitmap and oop closures. Anything that uses them is
2699   // eventually called from this method, so it is OK to allocate these
2700   // statically.
2701   G1CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
2702   G1CMOopClosure    cm_oop_closure(_g1h, _cm, this);
2703   set_cm_oop_closure(&cm_oop_closure);
2704 
2705   if (_cm->has_overflown()) {
2706     // This can happen if the mark stack overflows during a GC pause
2707     // and this task, after a yield point, restarts. We have to abort
2708     // as we need to get into the overflow protocol which happens
2709     // right at the end of this task.
2710     set_has_aborted();
2711   }
2712 
2713   // First drain any available SATB buffers. After this, we will not
2714   // look at SATB buffers before the next invocation of this method.
2715   // If enough completed SATB buffers are queued up, the regular clock
2716   // will abort this task so that it restarts.
2717   drain_satb_buffers();
2718   // ...then partially drain the local queue and the global stack
2719   drain_local_queue(true);
2720   drain_global_stack(true);
2721 
2722   do {
2723     if (!has_aborted() && _curr_region != NULL) {
2724       // This means that we're already holding on to a region.
2725       assert(_finger != NULL, "if region is not NULL, then the finger "
2726              "should not be NULL either");
2727 
2728       // We might have restarted this task after an evacuation pause
2729       // which might have evacuated the region we're holding on to
2730       // underneath our feet. Let's read its limit again to make sure
2731       // that we do not iterate over a region of the heap that
2732       // contains garbage (update_region_limit() will also move
2733       // _finger to the start of the region if it is found empty).
2734       update_region_limit();
2735       // We will start from _finger not from the start of the region,
2736       // as we might be restarting this task after aborting half-way
2737       // through scanning this region. In this case, _finger points to
2738       // the address where we last found a marked object. If this is a
2739       // fresh region, _finger points to start().
2740       MemRegion mr = MemRegion(_finger, _region_limit);
2741 
2742       assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2743              "humongous regions should go around loop once only");
2744 
2745       // Some special cases:
2746       // If the memory region is empty, we can just give up the region.
2747       // If the current region is humongous then we only need to check
2748       // the bitmap for the bit associated with the start of the object,
2749       // scan the object if it's live, and give up the region.
2750       // Otherwise, let's iterate over the bitmap of the part of the region
2751       // that is left.
2752       // If the iteration is successful, give up the region.
2753       if (mr.is_empty()) {
2754         giveup_current_region();
2755         regular_clock_call();
2756       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2757         if (_nextMarkBitMap->isMarked(mr.start())) {
2758           // The object is marked - apply the closure
2759           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
2760           bitmap_closure.do_bit(offset);
2761         }
2762         // Even if this task aborted while scanning the humongous object
2763         // we can (and should) give up the current region.
2764         giveup_current_region();
2765         regular_clock_call();
2766       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
2767         giveup_current_region();
2768         regular_clock_call();
2769       } else {
2770         assert(has_aborted(), "currently the only way to do so");
2771         // The only way to abort the bitmap iteration is to return
2772         // false from the do_bit() method. However, inside the
2773         // do_bit() method we move the _finger to point to the
2774         // object currently being looked at. So, if we bail out, we
2775         // have definitely set _finger to something non-null.
2776         assert(_finger != NULL, "invariant");
2777 
2778         // Region iteration was actually aborted. So now _finger
2779         // points to the address of the object we last scanned. If we
2780         // leave it there, when we restart this task, we will rescan
2781         // the object. It is easy to avoid this. We move the finger by
2782         // enough to point to the next possible object header (the
2783         // bitmap knows by how much we need to move it as it knows its
2784         // granularity).
2785         assert(_finger < _region_limit, "invariant");
2786         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
2787         // Check if bitmap iteration was aborted while scanning the last object
2788         if (new_finger >= _region_limit) {
2789           giveup_current_region();
2790         } else {
2791           move_finger_to(new_finger);
2792         }
2793       }
2794     }
2795     // At this point we have either completed iterating over the
2796     // region we were holding on to, or we have aborted.
2797 
2798     // We then partially drain the local queue and the global stack.
2799     // (Do we really need this?)
2800     drain_local_queue(true);
2801     drain_global_stack(true);
2802 
2803     // Read the note on the claim_region() method on why it might
2804     // return NULL with potentially more regions available for
2805     // claiming and why we have to check out_of_regions() to determine
2806     // whether we're done or not.
2807     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2808       // We are going to try to claim a new region. We should have
2809       // given up on the previous one.
2810       // Separated the asserts so that we know which one fires.
2811       assert(_curr_region  == NULL, "invariant");
2812       assert(_finger       == NULL, "invariant");
2813       assert(_region_limit == NULL, "invariant");
2814       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2815       if (claimed_region != NULL) {
2816         // Yes, we managed to claim one
2817         setup_for_region(claimed_region);
2818         assert(_curr_region == claimed_region, "invariant");
2819       }
2820       // It is important to call the regular clock here. It might take
2821       // a while to claim a region if, for example, we hit a large
2822       // block of empty regions. So we need to call the regular clock
2823       // method once round the loop to make sure it's called
2824       // frequently enough.
2825       regular_clock_call();
2826     }
2827 
2828     if (!has_aborted() && _curr_region == NULL) {
2829       assert(_cm->out_of_regions(),
2830              "at this point we should be out of regions");
2831     }
2832   } while ( _curr_region != NULL && !has_aborted());
2833 
2834   if (!has_aborted()) {
2835     // We cannot check whether the global stack is empty, since other
2836     // tasks might be pushing objects to it concurrently.
2837     assert(_cm->out_of_regions(),
2838            "at this point we should be out of regions");
2839     // Try to reduce the number of available SATB buffers so that
2840     // remark has less work to do.
2841     drain_satb_buffers();
2842   }
2843 
2844   // Since we've done everything else, we can now totally drain the
2845   // local queue and global stack.
2846   drain_local_queue(false);
2847   drain_global_stack(false);
2848 
2849   // Attempt at work stealing from other task's queues.
2850   if (do_stealing && !has_aborted()) {
2851     // We have not aborted. This means that we have finished all that
2852     // we could. Let's try to do some stealing...
2853 
2854     // We cannot check whether the global stack is empty, since other
2855     // tasks might be pushing objects to it concurrently.
2856     assert(_cm->out_of_regions() && _task_queue->size() == 0,
2857            "only way to reach here");
2858     while (!has_aborted()) {
2859       oop obj;
2860       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
2861         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
2862                "any stolen object should be marked");
2863         scan_object(obj);
2864 
2865         // And since we're towards the end, let's totally drain the
2866         // local queue and global stack.
2867         drain_local_queue(false);
2868         drain_global_stack(false);
2869       } else {
2870         break;
2871       }
2872     }
2873   }
2874 
2875   // We still haven't aborted. Now, let's try to get into the
2876   // termination protocol.
2877   if (do_termination && !has_aborted()) {
2878     // We cannot check whether the global stack is empty, since other
2879     // tasks might be concurrently pushing objects on it.
2880     // Separated the asserts so that we know which one fires.
2881     assert(_cm->out_of_regions(), "only way to reach here");
2882     assert(_task_queue->size() == 0, "only way to reach here");
2883     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2884 
2885     // The G1CMTask class also extends the TerminatorTerminator class,
2886     // hence its should_exit_termination() method will also decide
2887     // whether to exit the termination protocol or not.
2888     bool finished = (is_serial ||
2889                      _cm->terminator()->offer_termination(this));
2890     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2891     _termination_time_ms +=
2892       termination_end_time_ms - _termination_start_time_ms;
2893 
2894     if (finished) {
2895       // We're all done.
2896 
2897       if (_worker_id == 0) {
2898         // let's allow task 0 to do this
2899         if (concurrent()) {
2900           assert(_cm->concurrent_marking_in_progress(), "invariant");
2901           // we need to set this to false before the next
2902           // safepoint. This way we ensure that the marking phase
2903           // doesn't observe any more heap expansions.
2904           _cm->clear_concurrent_marking_in_progress();
2905         }
2906       }
2907 
2908       // We can now guarantee that the global stack is empty, since
2909       // all other tasks have finished. We separated the guarantees so
2910       // that, if a condition is false, we can immediately find out
2911       // which one.
2912       guarantee(_cm->out_of_regions(), "only way to reach here");
2913       guarantee(_cm->mark_stack_empty(), "only way to reach here");
2914       guarantee(_task_queue->size() == 0, "only way to reach here");
2915       guarantee(!_cm->has_overflown(), "only way to reach here");
2916       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
2917     } else {
2918       // Apparently there's more work to do. Let's abort this task. It
2919       // will restart it and we can hopefully find more things to do.
2920       set_has_aborted();
2921     }
2922   }
2923 
2924   // Mainly for debugging purposes to make sure that a pointer to the
2925   // closure which was statically allocated in this frame doesn't
2926   // escape it by accident.
2927   set_cm_oop_closure(NULL);
2928   double end_time_ms = os::elapsedVTime() * 1000.0;
2929   double elapsed_time_ms = end_time_ms - _start_time_ms;
2930   // Update the step history.
2931   _step_times_ms.add(elapsed_time_ms);
2932 
2933   if (has_aborted()) {
2934     // The task was aborted for some reason.
2935     if (_has_timed_out) {
2936       double diff_ms = elapsed_time_ms - _time_target_ms;
2937       // Keep statistics of how well we did with respect to hitting
2938       // our target only if we actually timed out (if we aborted for
2939       // other reasons, then the results might get skewed).
2940       _marking_step_diffs_ms.add(diff_ms);
2941     }
2942 
2943     if (_cm->has_overflown()) {
2944       // This is the interesting one. We aborted because a global
2945       // overflow was raised. This means we have to restart the
2946       // marking phase and start iterating over regions. However, in
2947       // order to do this we have to make sure that all tasks stop
2948       // what they are doing and re-initialize in a safe manner. We
2949       // will achieve this with the use of two barrier sync points.
2950 
2951       if (!is_serial) {
2952         // We only need to enter the sync barrier if being called
2953         // from a parallel context
2954         _cm->enter_first_sync_barrier(_worker_id);
2955 
2956         // When we exit this sync barrier we know that all tasks have
2957         // stopped doing marking work. So, it's now safe to
2958         // re-initialize our data structures. At the end of this method,
2959         // task 0 will clear the global data structures.
2960       }
2961 
2962       // We clear the local state of this task...
2963       clear_region_fields();
2964 
2965       if (!is_serial) {
2966         // ...and enter the second barrier.
2967         _cm->enter_second_sync_barrier(_worker_id);
2968       }
2969       // At this point, if we're during the concurrent phase of
2970       // marking, everything has been re-initialized and we're
2971       // ready to restart.
2972     }
2973   }
2974 
2975   _claimed = false;
2976 }
2977 
2978 G1CMTask::G1CMTask(uint worker_id,
2979                    G1ConcurrentMark* cm,
2980                    G1CMTaskQueue* task_queue,
2981                    G1CMTaskQueueSet* task_queues)
2982   : _g1h(G1CollectedHeap::heap()),
2983     _worker_id(worker_id), _cm(cm),
2984     _claimed(false),
2985     _nextMarkBitMap(NULL), _hash_seed(17),
2986     _task_queue(task_queue),
2987     _task_queues(task_queues),
2988     _cm_oop_closure(NULL) {
2989   guarantee(task_queue != NULL, "invariant");
2990   guarantee(task_queues != NULL, "invariant");
2991 
2992   _marking_step_diffs_ms.add(0.5);
2993 }
2994 
2995 // These are formatting macros that are used below to ensure
2996 // consistent formatting. The *_H_* versions are used to format the
2997 // header for a particular value and they should be kept consistent
2998 // with the corresponding macro. Also note that most of the macros add
2999 // the necessary white space (as a prefix) which makes them a bit
3000 // easier to compose.
3001 
3002 // All the output lines are prefixed with this string to be able to
3003 // identify them easily in a large log file.
3004 #define G1PPRL_LINE_PREFIX            "###"
3005 
3006 #define G1PPRL_ADDR_BASE_FORMAT    " " PTR_FORMAT "-" PTR_FORMAT
3007 #ifdef _LP64
3008 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
3009 #else // _LP64
3010 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
3011 #endif // _LP64
3012 
3013 // For per-region info
3014 #define G1PPRL_TYPE_FORMAT            "   %-4s"
3015 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
3016 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
3017 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
3018 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
3019 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
3020 
3021 // For summary info
3022 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
3023 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
3024 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
3025 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
3026 
3027 G1PrintRegionLivenessInfoClosure::
3028 G1PrintRegionLivenessInfoClosure(const char* phase_name)
3029   : _total_used_bytes(0), _total_capacity_bytes(0),
3030     _total_prev_live_bytes(0), _total_next_live_bytes(0),
3031     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
3032   G1CollectedHeap* g1h = G1CollectedHeap::heap();
3033   MemRegion g1_reserved = g1h->g1_reserved();
3034   double now = os::elapsedTime();
3035 
3036   // Print the header of the output.
3037   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
3038   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
3039                           G1PPRL_SUM_ADDR_FORMAT("reserved")
3040                           G1PPRL_SUM_BYTE_FORMAT("region-size"),
3041                           p2i(g1_reserved.start()), p2i(g1_reserved.end()),
3042                           HeapRegion::GrainBytes);
3043   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3044   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3045                           G1PPRL_TYPE_H_FORMAT
3046                           G1PPRL_ADDR_BASE_H_FORMAT
3047                           G1PPRL_BYTE_H_FORMAT
3048                           G1PPRL_BYTE_H_FORMAT
3049                           G1PPRL_BYTE_H_FORMAT
3050                           G1PPRL_DOUBLE_H_FORMAT
3051                           G1PPRL_BYTE_H_FORMAT
3052                           G1PPRL_BYTE_H_FORMAT,
3053                           "type", "address-range",
3054                           "used", "prev-live", "next-live", "gc-eff",
3055                           "remset", "code-roots");
3056   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3057                           G1PPRL_TYPE_H_FORMAT
3058                           G1PPRL_ADDR_BASE_H_FORMAT
3059                           G1PPRL_BYTE_H_FORMAT
3060                           G1PPRL_BYTE_H_FORMAT
3061                           G1PPRL_BYTE_H_FORMAT
3062                           G1PPRL_DOUBLE_H_FORMAT
3063                           G1PPRL_BYTE_H_FORMAT
3064                           G1PPRL_BYTE_H_FORMAT,
3065                           "", "",
3066                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
3067                           "(bytes)", "(bytes)");
3068 }
3069 
3070 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
3071   const char* type       = r->get_type_str();
3072   HeapWord* bottom       = r->bottom();
3073   HeapWord* end          = r->end();
3074   size_t capacity_bytes  = r->capacity();
3075   size_t used_bytes      = r->used();
3076   size_t prev_live_bytes = r->live_bytes();
3077   size_t next_live_bytes = r->next_live_bytes();
3078   double gc_eff          = r->gc_efficiency();
3079   size_t remset_bytes    = r->rem_set()->mem_size();
3080   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3081 
3082   _total_used_bytes      += used_bytes;
3083   _total_capacity_bytes  += capacity_bytes;
3084   _total_prev_live_bytes += prev_live_bytes;
3085   _total_next_live_bytes += next_live_bytes;
3086   _total_remset_bytes    += remset_bytes;
3087   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3088 
3089   // Print a line for this particular region.
3090   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3091                           G1PPRL_TYPE_FORMAT
3092                           G1PPRL_ADDR_BASE_FORMAT
3093                           G1PPRL_BYTE_FORMAT
3094                           G1PPRL_BYTE_FORMAT
3095                           G1PPRL_BYTE_FORMAT
3096                           G1PPRL_DOUBLE_FORMAT
3097                           G1PPRL_BYTE_FORMAT
3098                           G1PPRL_BYTE_FORMAT,
3099                           type, p2i(bottom), p2i(end),
3100                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3101                           remset_bytes, strong_code_roots_bytes);
3102 
3103   return false;
3104 }
3105 
3106 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3107   // add static memory usages to remembered set sizes
3108   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3109   // Print the footer of the output.
3110   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3111   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3112                          " SUMMARY"
3113                          G1PPRL_SUM_MB_FORMAT("capacity")
3114                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3115                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3116                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3117                          G1PPRL_SUM_MB_FORMAT("remset")
3118                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3119                          bytes_to_mb(_total_capacity_bytes),
3120                          bytes_to_mb(_total_used_bytes),
3121                          perc(_total_used_bytes, _total_capacity_bytes),
3122                          bytes_to_mb(_total_prev_live_bytes),
3123                          perc(_total_prev_live_bytes, _total_capacity_bytes),
3124                          bytes_to_mb(_total_next_live_bytes),
3125                          perc(_total_next_live_bytes, _total_capacity_bytes),
3126                          bytes_to_mb(_total_remset_bytes),
3127                          bytes_to_mb(_total_strong_code_roots_bytes));
3128 }