1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "gc/g1/g1Allocator.inline.hpp"
  31 #include "gc/g1/g1BarrierSet.hpp"
  32 #include "gc/g1/g1CollectedHeap.inline.hpp"
  33 #include "gc/g1/g1CollectionSet.hpp"
  34 #include "gc/g1/g1CollectorPolicy.hpp"
  35 #include "gc/g1/g1CollectorState.hpp"
  36 #include "gc/g1/g1ConcurrentRefine.hpp"
  37 #include "gc/g1/g1ConcurrentRefineThread.hpp"
  38 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  39 #include "gc/g1/g1EvacStats.inline.hpp"
  40 #include "gc/g1/g1FullCollector.hpp"
  41 #include "gc/g1/g1GCPhaseTimes.hpp"
  42 #include "gc/g1/g1HeapSizingPolicy.hpp"
  43 #include "gc/g1/g1HeapTransition.hpp"
  44 #include "gc/g1/g1HeapVerifier.hpp"
  45 #include "gc/g1/g1HotCardCache.hpp"
  46 #include "gc/g1/g1MemoryPool.hpp"
  47 #include "gc/g1/g1OopClosures.inline.hpp"
  48 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  49 #include "gc/g1/g1Policy.hpp"
  50 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  51 #include "gc/g1/g1RemSet.hpp"
  52 #include "gc/g1/g1RootClosures.hpp"
  53 #include "gc/g1/g1RootProcessor.hpp"
  54 #include "gc/g1/g1SATBMarkQueueSet.hpp"
  55 #include "gc/g1/g1StringDedup.hpp"
  56 #include "gc/g1/g1ThreadLocalData.hpp"
  57 #include "gc/g1/g1YCTypes.hpp"
  58 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
  59 #include "gc/g1/heapRegion.inline.hpp"
  60 #include "gc/g1/heapRegionRemSet.hpp"
  61 #include "gc/g1/heapRegionSet.inline.hpp"
  62 #include "gc/g1/vm_operations_g1.hpp"
  63 #include "gc/shared/adaptiveSizePolicy.hpp"
  64 #include "gc/shared/gcHeapSummary.hpp"
  65 #include "gc/shared/gcId.hpp"
  66 #include "gc/shared/gcLocker.hpp"
  67 #include "gc/shared/gcTimer.hpp"
  68 #include "gc/shared/gcTrace.hpp"
  69 #include "gc/shared/gcTraceTime.inline.hpp"
  70 #include "gc/shared/generationSpec.hpp"
  71 #include "gc/shared/isGCActiveMark.hpp"
  72 #include "gc/shared/oopStorageParState.hpp"
  73 #include "gc/shared/parallelCleaning.hpp"
  74 #include "gc/shared/preservedMarks.inline.hpp"
  75 #include "gc/shared/suspendibleThreadSet.hpp"
  76 #include "gc/shared/referenceProcessor.inline.hpp"
  77 #include "gc/shared/taskqueue.inline.hpp"
  78 #include "gc/shared/weakProcessor.inline.hpp"
  79 #include "logging/log.hpp"
  80 #include "memory/allocation.hpp"
  81 #include "memory/iterator.hpp"
  82 #include "memory/metaspaceShared.hpp"
  83 #include "memory/resourceArea.hpp"
  84 #include "oops/access.inline.hpp"
  85 #include "oops/compressedOops.inline.hpp"
  86 #include "oops/oop.inline.hpp"
  87 #include "runtime/atomic.hpp"
  88 #include "runtime/flags/flagSetting.hpp"
  89 #include "runtime/handles.inline.hpp"
  90 #include "runtime/init.hpp"
  91 #include "runtime/orderAccess.hpp"
  92 #include "runtime/threadSMR.hpp"
  93 #include "runtime/vmThread.hpp"
  94 #include "utilities/align.hpp"
  95 #include "utilities/globalDefinitions.hpp"
  96 #include "utilities/stack.inline.hpp"
  97 
  98 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  99 
 100 // INVARIANTS/NOTES
 101 //
 102 // All allocation activity covered by the G1CollectedHeap interface is
 103 // serialized by acquiring the HeapLock.  This happens in mem_allocate
 104 // and allocate_new_tlab, which are the "entry" points to the
 105 // allocation code from the rest of the JVM.  (Note that this does not
 106 // apply to TLAB allocation, which is not part of this interface: it
 107 // is done by clients of this interface.)
 108 
 109 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
 110  private:
 111   size_t _num_dirtied;
 112   G1CollectedHeap* _g1h;
 113   G1CardTable* _g1_ct;
 114 
 115   HeapRegion* region_for_card(jbyte* card_ptr) const {
 116     return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
 117   }
 118 
 119   bool will_become_free(HeapRegion* hr) const {
 120     // A region will be freed by free_collection_set if the region is in the
 121     // collection set and has not had an evacuation failure.
 122     return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
 123   }
 124 
 125  public:
 126   RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
 127     _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
 128 
 129   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 130     HeapRegion* hr = region_for_card(card_ptr);
 131 
 132     // Should only dirty cards in regions that won't be freed.
 133     if (!will_become_free(hr)) {
 134       *card_ptr = G1CardTable::dirty_card_val();
 135       _num_dirtied++;
 136     }
 137 
 138     return true;
 139   }
 140 
 141   size_t num_dirtied()   const { return _num_dirtied; }
 142 };
 143 
 144 
 145 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
 146   HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
 147 }
 148 
 149 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 150   // The from card cache is not the memory that is actually committed. So we cannot
 151   // take advantage of the zero_filled parameter.
 152   reset_from_card_cache(start_idx, num_regions);
 153 }
 154 
 155 
 156 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
 157                                              MemRegion mr) {
 158   return new HeapRegion(hrs_index, bot(), mr);
 159 }
 160 
 161 // Private methods.
 162 
 163 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
 164   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
 165          "the only time we use this to allocate a humongous region is "
 166          "when we are allocating a single humongous region");
 167 
 168   HeapRegion* res = _hrm.allocate_free_region(is_old);
 169 
 170   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 171     // Currently, only attempts to allocate GC alloc regions set
 172     // do_expand to true. So, we should only reach here during a
 173     // safepoint. If this assumption changes we might have to
 174     // reconsider the use of _expand_heap_after_alloc_failure.
 175     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 176 
 177     log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
 178                               word_size * HeapWordSize);
 179 
 180     if (expand(word_size * HeapWordSize)) {
 181       // Given that expand() succeeded in expanding the heap, and we
 182       // always expand the heap by an amount aligned to the heap
 183       // region size, the free list should in theory not be empty.
 184       // In either case allocate_free_region() will check for NULL.
 185       res = _hrm.allocate_free_region(is_old);
 186     } else {
 187       _expand_heap_after_alloc_failure = false;
 188     }
 189   }
 190   return res;
 191 }
 192 
 193 HeapWord*
 194 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 195                                                            uint num_regions,
 196                                                            size_t word_size) {
 197   assert(first != G1_NO_HRM_INDEX, "pre-condition");
 198   assert(is_humongous(word_size), "word_size should be humongous");
 199   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 200 
 201   // Index of last region in the series.
 202   uint last = first + num_regions - 1;
 203 
 204   // We need to initialize the region(s) we just discovered. This is
 205   // a bit tricky given that it can happen concurrently with
 206   // refinement threads refining cards on these regions and
 207   // potentially wanting to refine the BOT as they are scanning
 208   // those cards (this can happen shortly after a cleanup; see CR
 209   // 6991377). So we have to set up the region(s) carefully and in
 210   // a specific order.
 211 
 212   // The word size sum of all the regions we will allocate.
 213   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
 214   assert(word_size <= word_size_sum, "sanity");
 215 
 216   // This will be the "starts humongous" region.
 217   HeapRegion* first_hr = region_at(first);
 218   // The header of the new object will be placed at the bottom of
 219   // the first region.
 220   HeapWord* new_obj = first_hr->bottom();
 221   // This will be the new top of the new object.
 222   HeapWord* obj_top = new_obj + word_size;
 223 
 224   // First, we need to zero the header of the space that we will be
 225   // allocating. When we update top further down, some refinement
 226   // threads might try to scan the region. By zeroing the header we
 227   // ensure that any thread that will try to scan the region will
 228   // come across the zero klass word and bail out.
 229   //
 230   // NOTE: It would not have been correct to have used
 231   // CollectedHeap::fill_with_object() and make the space look like
 232   // an int array. The thread that is doing the allocation will
 233   // later update the object header to a potentially different array
 234   // type and, for a very short period of time, the klass and length
 235   // fields will be inconsistent. This could cause a refinement
 236   // thread to calculate the object size incorrectly.
 237   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
 238 
 239   // Next, pad out the unused tail of the last region with filler
 240   // objects, for improved usage accounting.
 241   // How many words we use for filler objects.
 242   size_t word_fill_size = word_size_sum - word_size;
 243 
 244   // How many words memory we "waste" which cannot hold a filler object.
 245   size_t words_not_fillable = 0;
 246 
 247   if (word_fill_size >= min_fill_size()) {
 248     fill_with_objects(obj_top, word_fill_size);
 249   } else if (word_fill_size > 0) {
 250     // We have space to fill, but we cannot fit an object there.
 251     words_not_fillable = word_fill_size;
 252     word_fill_size = 0;
 253   }
 254 
 255   // We will set up the first region as "starts humongous". This
 256   // will also update the BOT covering all the regions to reflect
 257   // that there is a single object that starts at the bottom of the
 258   // first region.
 259   first_hr->set_starts_humongous(obj_top, word_fill_size);
 260   _g1_policy->remset_tracker()->update_at_allocate(first_hr);
 261   // Then, if there are any, we will set up the "continues
 262   // humongous" regions.
 263   HeapRegion* hr = NULL;
 264   for (uint i = first + 1; i <= last; ++i) {
 265     hr = region_at(i);
 266     hr->set_continues_humongous(first_hr);
 267     _g1_policy->remset_tracker()->update_at_allocate(hr);
 268   }
 269 
 270   // Up to this point no concurrent thread would have been able to
 271   // do any scanning on any region in this series. All the top
 272   // fields still point to bottom, so the intersection between
 273   // [bottom,top] and [card_start,card_end] will be empty. Before we
 274   // update the top fields, we'll do a storestore to make sure that
 275   // no thread sees the update to top before the zeroing of the
 276   // object header and the BOT initialization.
 277   OrderAccess::storestore();
 278 
 279   // Now, we will update the top fields of the "continues humongous"
 280   // regions except the last one.
 281   for (uint i = first; i < last; ++i) {
 282     hr = region_at(i);
 283     hr->set_top(hr->end());
 284   }
 285 
 286   hr = region_at(last);
 287   // If we cannot fit a filler object, we must set top to the end
 288   // of the humongous object, otherwise we cannot iterate the heap
 289   // and the BOT will not be complete.
 290   hr->set_top(hr->end() - words_not_fillable);
 291 
 292   assert(hr->bottom() < obj_top && obj_top <= hr->end(),
 293          "obj_top should be in last region");
 294 
 295   _verifier->check_bitmaps("Humongous Region Allocation", first_hr);
 296 
 297   assert(words_not_fillable == 0 ||
 298          first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
 299          "Miscalculation in humongous allocation");
 300 
 301   increase_used((word_size_sum - words_not_fillable) * HeapWordSize);
 302 
 303   for (uint i = first; i <= last; ++i) {
 304     hr = region_at(i);
 305     _humongous_set.add(hr);
 306     _hr_printer.alloc(hr);
 307   }
 308 
 309   return new_obj;
 310 }
 311 
 312 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
 313   assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
 314   return align_up(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
 315 }
 316 
 317 // If could fit into free regions w/o expansion, try.
 318 // Otherwise, if can expand, do so.
 319 // Otherwise, if using ex regions might help, try with ex given back.
 320 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
 321   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 322 
 323   _verifier->verify_region_sets_optional();
 324 
 325   uint first = G1_NO_HRM_INDEX;
 326   uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
 327 
 328   if (obj_regions == 1) {
 329     // Only one region to allocate, try to use a fast path by directly allocating
 330     // from the free lists. Do not try to expand here, we will potentially do that
 331     // later.
 332     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
 333     if (hr != NULL) {
 334       first = hr->hrm_index();
 335     }
 336   } else {
 337     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 338     // are lucky enough to find some.
 339     first = _hrm.find_contiguous_only_empty(obj_regions);
 340     if (first != G1_NO_HRM_INDEX) {
 341       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 342     }
 343   }
 344 
 345   if (first == G1_NO_HRM_INDEX) {
 346     // Policy: We could not find enough regions for the humongous object in the
 347     // free list. Look through the heap to find a mix of free and uncommitted regions.
 348     // If so, try expansion.
 349     first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
 350     if (first != G1_NO_HRM_INDEX) {
 351       // We found something. Make sure these regions are committed, i.e. expand
 352       // the heap. Alternatively we could do a defragmentation GC.
 353       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
 354                                     word_size * HeapWordSize);
 355 
 356       _hrm.expand_at(first, obj_regions, workers());
 357       g1_policy()->record_new_heap_size(num_regions());
 358 
 359 #ifdef ASSERT
 360       for (uint i = first; i < first + obj_regions; ++i) {
 361         HeapRegion* hr = region_at(i);
 362         assert(hr->is_free(), "sanity");
 363         assert(hr->is_empty(), "sanity");
 364         assert(is_on_master_free_list(hr), "sanity");
 365       }
 366 #endif
 367       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 368     } else {
 369       // Policy: Potentially trigger a defragmentation GC.
 370     }
 371   }
 372 
 373   HeapWord* result = NULL;
 374   if (first != G1_NO_HRM_INDEX) {
 375     result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
 376     assert(result != NULL, "it should always return a valid result");
 377 
 378     // A successful humongous object allocation changes the used space
 379     // information of the old generation so we need to recalculate the
 380     // sizes and update the jstat counters here.
 381     g1mm()->update_sizes();
 382   }
 383 
 384   _verifier->verify_region_sets_optional();
 385 
 386   return result;
 387 }
 388 
 389 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t min_size,
 390                                              size_t requested_size,
 391                                              size_t* actual_size) {
 392   assert_heap_not_locked_and_not_at_safepoint();
 393   assert(!is_humongous(requested_size), "we do not allow humongous TLABs");
 394 
 395   return attempt_allocation(min_size, requested_size, actual_size);
 396 }
 397 
 398 HeapWord*
 399 G1CollectedHeap::mem_allocate(size_t word_size,
 400                               bool*  gc_overhead_limit_was_exceeded) {
 401   assert_heap_not_locked_and_not_at_safepoint();
 402 
 403   if (is_humongous(word_size)) {
 404     return attempt_allocation_humongous(word_size);
 405   }
 406   size_t dummy = 0;
 407   return attempt_allocation(word_size, word_size, &dummy);
 408 }
 409 
 410 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
 411   ResourceMark rm; // For retrieving the thread names in log messages.
 412 
 413   // Make sure you read the note in attempt_allocation_humongous().
 414 
 415   assert_heap_not_locked_and_not_at_safepoint();
 416   assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
 417          "be called for humongous allocation requests");
 418 
 419   // We should only get here after the first-level allocation attempt
 420   // (attempt_allocation()) failed to allocate.
 421 
 422   // We will loop until a) we manage to successfully perform the
 423   // allocation or b) we successfully schedule a collection which
 424   // fails to perform the allocation. b) is the only case when we'll
 425   // return NULL.
 426   HeapWord* result = NULL;
 427   for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
 428     bool should_try_gc;
 429     uint gc_count_before;
 430 
 431     {
 432       MutexLockerEx x(Heap_lock);
 433       result = _allocator->attempt_allocation_locked(word_size);
 434       if (result != NULL) {
 435         return result;
 436       }
 437 
 438       // If the GCLocker is active and we are bound for a GC, try expanding young gen.
 439       // This is different to when only GCLocker::needs_gc() is set: try to avoid
 440       // waiting because the GCLocker is active to not wait too long.
 441       if (GCLocker::is_active_and_needs_gc() && g1_policy()->can_expand_young_list()) {
 442         // No need for an ergo message here, can_expand_young_list() does this when
 443         // it returns true.
 444         result = _allocator->attempt_allocation_force(word_size);
 445         if (result != NULL) {
 446           return result;
 447         }
 448       }
 449       // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
 450       // the GCLocker initiated GC has been performed and then retry. This includes
 451       // the case when the GC Locker is not active but has not been performed.
 452       should_try_gc = !GCLocker::needs_gc();
 453       // Read the GC count while still holding the Heap_lock.
 454       gc_count_before = total_collections();
 455     }
 456 
 457     if (should_try_gc) {
 458       bool succeeded;
 459       result = do_collection_pause(word_size, gc_count_before, &succeeded,
 460                                    GCCause::_g1_inc_collection_pause);
 461       if (result != NULL) {
 462         assert(succeeded, "only way to get back a non-NULL result");
 463         log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
 464                              Thread::current()->name(), p2i(result));
 465         return result;
 466       }
 467 
 468       if (succeeded) {
 469         // We successfully scheduled a collection which failed to allocate. No
 470         // point in trying to allocate further. We'll just return NULL.
 471         log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
 472                              SIZE_FORMAT " words", Thread::current()->name(), word_size);
 473         return NULL;
 474       }
 475       log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words",
 476                            Thread::current()->name(), word_size);
 477     } else {
 478       // Failed to schedule a collection.
 479       if (gclocker_retry_count > GCLockerRetryAllocationCount) {
 480         log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
 481                                SIZE_FORMAT " words", Thread::current()->name(), word_size);
 482         return NULL;
 483       }
 484       log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
 485       // The GCLocker is either active or the GCLocker initiated
 486       // GC has not yet been performed. Stall until it is and
 487       // then retry the allocation.
 488       GCLocker::stall_until_clear();
 489       gclocker_retry_count += 1;
 490     }
 491 
 492     // We can reach here if we were unsuccessful in scheduling a
 493     // collection (because another thread beat us to it) or if we were
 494     // stalled due to the GC locker. In either can we should retry the
 495     // allocation attempt in case another thread successfully
 496     // performed a collection and reclaimed enough space. We do the
 497     // first attempt (without holding the Heap_lock) here and the
 498     // follow-on attempt will be at the start of the next loop
 499     // iteration (after taking the Heap_lock).
 500     size_t dummy = 0;
 501     result = _allocator->attempt_allocation(word_size, word_size, &dummy);
 502     if (result != NULL) {
 503       return result;
 504     }
 505 
 506     // Give a warning if we seem to be looping forever.
 507     if ((QueuedAllocationWarningCount > 0) &&
 508         (try_count % QueuedAllocationWarningCount == 0)) {
 509       log_warning(gc, alloc)("%s:  Retried allocation %u times for " SIZE_FORMAT " words",
 510                              Thread::current()->name(), try_count, word_size);
 511     }
 512   }
 513 
 514   ShouldNotReachHere();
 515   return NULL;
 516 }
 517 
 518 void G1CollectedHeap::begin_archive_alloc_range(bool open) {
 519   assert_at_safepoint_on_vm_thread();
 520   if (_archive_allocator == NULL) {
 521     _archive_allocator = G1ArchiveAllocator::create_allocator(this, open);
 522   }
 523 }
 524 
 525 bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
 526   // Allocations in archive regions cannot be of a size that would be considered
 527   // humongous even for a minimum-sized region, because G1 region sizes/boundaries
 528   // may be different at archive-restore time.
 529   return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
 530 }
 531 
 532 HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
 533   assert_at_safepoint_on_vm_thread();
 534   assert(_archive_allocator != NULL, "_archive_allocator not initialized");
 535   if (is_archive_alloc_too_large(word_size)) {
 536     return NULL;
 537   }
 538   return _archive_allocator->archive_mem_allocate(word_size);
 539 }
 540 
 541 void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 542                                               size_t end_alignment_in_bytes) {
 543   assert_at_safepoint_on_vm_thread();
 544   assert(_archive_allocator != NULL, "_archive_allocator not initialized");
 545 
 546   // Call complete_archive to do the real work, filling in the MemRegion
 547   // array with the archive regions.
 548   _archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
 549   delete _archive_allocator;
 550   _archive_allocator = NULL;
 551 }
 552 
 553 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
 554   assert(ranges != NULL, "MemRegion array NULL");
 555   assert(count != 0, "No MemRegions provided");
 556   MemRegion reserved = _hrm.reserved();
 557   for (size_t i = 0; i < count; i++) {
 558     if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
 559       return false;
 560     }
 561   }
 562   return true;
 563 }
 564 
 565 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
 566                                             size_t count,
 567                                             bool open) {
 568   assert(!is_init_completed(), "Expect to be called at JVM init time");
 569   assert(ranges != NULL, "MemRegion array NULL");
 570   assert(count != 0, "No MemRegions provided");
 571   MutexLockerEx x(Heap_lock);
 572 
 573   MemRegion reserved = _hrm.reserved();
 574   HeapWord* prev_last_addr = NULL;
 575   HeapRegion* prev_last_region = NULL;
 576 
 577   // Temporarily disable pretouching of heap pages. This interface is used
 578   // when mmap'ing archived heap data in, so pre-touching is wasted.
 579   FlagSetting fs(AlwaysPreTouch, false);
 580 
 581   // Enable archive object checking used by G1MarkSweep. We have to let it know
 582   // about each archive range, so that objects in those ranges aren't marked.
 583   G1ArchiveAllocator::enable_archive_object_check();
 584 
 585   // For each specified MemRegion range, allocate the corresponding G1
 586   // regions and mark them as archive regions. We expect the ranges
 587   // in ascending starting address order, without overlap.
 588   for (size_t i = 0; i < count; i++) {
 589     MemRegion curr_range = ranges[i];
 590     HeapWord* start_address = curr_range.start();
 591     size_t word_size = curr_range.word_size();
 592     HeapWord* last_address = curr_range.last();
 593     size_t commits = 0;
 594 
 595     guarantee(reserved.contains(start_address) && reserved.contains(last_address),
 596               "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
 597               p2i(start_address), p2i(last_address));
 598     guarantee(start_address > prev_last_addr,
 599               "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
 600               p2i(start_address), p2i(prev_last_addr));
 601     prev_last_addr = last_address;
 602 
 603     // Check for ranges that start in the same G1 region in which the previous
 604     // range ended, and adjust the start address so we don't try to allocate
 605     // the same region again. If the current range is entirely within that
 606     // region, skip it, just adjusting the recorded top.
 607     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 608     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
 609       start_address = start_region->end();
 610       if (start_address > last_address) {
 611         increase_used(word_size * HeapWordSize);
 612         start_region->set_top(last_address + 1);
 613         continue;
 614       }
 615       start_region->set_top(start_address);
 616       curr_range = MemRegion(start_address, last_address + 1);
 617       start_region = _hrm.addr_to_region(start_address);
 618     }
 619 
 620     // Perform the actual region allocation, exiting if it fails.
 621     // Then note how much new space we have allocated.
 622     if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
 623       return false;
 624     }
 625     increase_used(word_size * HeapWordSize);
 626     if (commits != 0) {
 627       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
 628                                 HeapRegion::GrainWords * HeapWordSize * commits);
 629 
 630     }
 631 
 632     // Mark each G1 region touched by the range as archive, add it to
 633     // the old set, and set top.
 634     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 635     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 636     prev_last_region = last_region;
 637 
 638     while (curr_region != NULL) {
 639       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 640              "Region already in use (index %u)", curr_region->hrm_index());
 641       if (open) {
 642         curr_region->set_open_archive();
 643       } else {
 644         curr_region->set_closed_archive();
 645       }
 646       _hr_printer.alloc(curr_region);
 647       _archive_set.add(curr_region);
 648       HeapWord* top;
 649       HeapRegion* next_region;
 650       if (curr_region != last_region) {
 651         top = curr_region->end();
 652         next_region = _hrm.next_region_in_heap(curr_region);
 653       } else {
 654         top = last_address + 1;
 655         next_region = NULL;
 656       }
 657       curr_region->set_top(top);
 658       curr_region->set_first_dead(top);
 659       curr_region->set_end_of_live(top);
 660       curr_region = next_region;
 661     }
 662 
 663     // Notify mark-sweep of the archive
 664     G1ArchiveAllocator::set_range_archive(curr_range, open);
 665   }
 666   return true;
 667 }
 668 
 669 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
 670   assert(!is_init_completed(), "Expect to be called at JVM init time");
 671   assert(ranges != NULL, "MemRegion array NULL");
 672   assert(count != 0, "No MemRegions provided");
 673   MemRegion reserved = _hrm.reserved();
 674   HeapWord *prev_last_addr = NULL;
 675   HeapRegion* prev_last_region = NULL;
 676 
 677   // For each MemRegion, create filler objects, if needed, in the G1 regions
 678   // that contain the address range. The address range actually within the
 679   // MemRegion will not be modified. That is assumed to have been initialized
 680   // elsewhere, probably via an mmap of archived heap data.
 681   MutexLockerEx x(Heap_lock);
 682   for (size_t i = 0; i < count; i++) {
 683     HeapWord* start_address = ranges[i].start();
 684     HeapWord* last_address = ranges[i].last();
 685 
 686     assert(reserved.contains(start_address) && reserved.contains(last_address),
 687            "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
 688            p2i(start_address), p2i(last_address));
 689     assert(start_address > prev_last_addr,
 690            "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
 691            p2i(start_address), p2i(prev_last_addr));
 692 
 693     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 694     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 695     HeapWord* bottom_address = start_region->bottom();
 696 
 697     // Check for a range beginning in the same region in which the
 698     // previous one ended.
 699     if (start_region == prev_last_region) {
 700       bottom_address = prev_last_addr + 1;
 701     }
 702 
 703     // Verify that the regions were all marked as archive regions by
 704     // alloc_archive_regions.
 705     HeapRegion* curr_region = start_region;
 706     while (curr_region != NULL) {
 707       guarantee(curr_region->is_archive(),
 708                 "Expected archive region at index %u", curr_region->hrm_index());
 709       if (curr_region != last_region) {
 710         curr_region = _hrm.next_region_in_heap(curr_region);
 711       } else {
 712         curr_region = NULL;
 713       }
 714     }
 715 
 716     prev_last_addr = last_address;
 717     prev_last_region = last_region;
 718 
 719     // Fill the memory below the allocated range with dummy object(s),
 720     // if the region bottom does not match the range start, or if the previous
 721     // range ended within the same G1 region, and there is a gap.
 722     if (start_address != bottom_address) {
 723       size_t fill_size = pointer_delta(start_address, bottom_address);
 724       G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
 725       increase_used(fill_size * HeapWordSize);
 726     }
 727   }
 728 }
 729 
 730 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
 731                                                      size_t desired_word_size,
 732                                                      size_t* actual_word_size) {
 733   assert_heap_not_locked_and_not_at_safepoint();
 734   assert(!is_humongous(desired_word_size), "attempt_allocation() should not "
 735          "be called for humongous allocation requests");
 736 
 737   HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
 738 
 739   if (result == NULL) {
 740     *actual_word_size = desired_word_size;
 741     result = attempt_allocation_slow(desired_word_size);
 742   }
 743 
 744   assert_heap_not_locked();
 745   if (result != NULL) {
 746     assert(*actual_word_size != 0, "Actual size must have been set here");
 747     dirty_young_block(result, *actual_word_size);
 748   } else {
 749     *actual_word_size = 0;
 750   }
 751 
 752   return result;
 753 }
 754 
 755 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
 756   assert(!is_init_completed(), "Expect to be called at JVM init time");
 757   assert(ranges != NULL, "MemRegion array NULL");
 758   assert(count != 0, "No MemRegions provided");
 759   MemRegion reserved = _hrm.reserved();
 760   HeapWord* prev_last_addr = NULL;
 761   HeapRegion* prev_last_region = NULL;
 762   size_t size_used = 0;
 763   size_t uncommitted_regions = 0;
 764 
 765   // For each Memregion, free the G1 regions that constitute it, and
 766   // notify mark-sweep that the range is no longer to be considered 'archive.'
 767   MutexLockerEx x(Heap_lock);
 768   for (size_t i = 0; i < count; i++) {
 769     HeapWord* start_address = ranges[i].start();
 770     HeapWord* last_address = ranges[i].last();
 771 
 772     assert(reserved.contains(start_address) && reserved.contains(last_address),
 773            "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
 774            p2i(start_address), p2i(last_address));
 775     assert(start_address > prev_last_addr,
 776            "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
 777            p2i(start_address), p2i(prev_last_addr));
 778     size_used += ranges[i].byte_size();
 779     prev_last_addr = last_address;
 780 
 781     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 782     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 783 
 784     // Check for ranges that start in the same G1 region in which the previous
 785     // range ended, and adjust the start address so we don't try to free
 786     // the same region again. If the current range is entirely within that
 787     // region, skip it.
 788     if (start_region == prev_last_region) {
 789       start_address = start_region->end();
 790       if (start_address > last_address) {
 791         continue;
 792       }
 793       start_region = _hrm.addr_to_region(start_address);
 794     }
 795     prev_last_region = last_region;
 796 
 797     // After verifying that each region was marked as an archive region by
 798     // alloc_archive_regions, set it free and empty and uncommit it.
 799     HeapRegion* curr_region = start_region;
 800     while (curr_region != NULL) {
 801       guarantee(curr_region->is_archive(),
 802                 "Expected archive region at index %u", curr_region->hrm_index());
 803       uint curr_index = curr_region->hrm_index();
 804       _archive_set.remove(curr_region);
 805       curr_region->set_free();
 806       curr_region->set_top(curr_region->bottom());
 807       if (curr_region != last_region) {
 808         curr_region = _hrm.next_region_in_heap(curr_region);
 809       } else {
 810         curr_region = NULL;
 811       }
 812       _hrm.shrink_at(curr_index, 1);
 813       uncommitted_regions++;
 814     }
 815 
 816     // Notify mark-sweep that this is no longer an archive range.
 817     G1ArchiveAllocator::set_range_archive(ranges[i], false);
 818   }
 819 
 820   if (uncommitted_regions != 0) {
 821     log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
 822                               HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
 823   }
 824   decrease_used(size_used);
 825 }
 826 
 827 oop G1CollectedHeap::materialize_archived_object(oop obj) {
 828   assert(obj != NULL, "archived obj is NULL");
 829   assert(MetaspaceShared::is_archive_object(obj), "must be archived object");
 830 
 831   // Loading an archived object makes it strongly reachable. If it is
 832   // loaded during concurrent marking, it must be enqueued to the SATB
 833   // queue, shading the previously white object gray.
 834   G1BarrierSet::enqueue(obj);
 835 
 836   return obj;
 837 }
 838 
 839 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
 840   ResourceMark rm; // For retrieving the thread names in log messages.
 841 
 842   // The structure of this method has a lot of similarities to
 843   // attempt_allocation_slow(). The reason these two were not merged
 844   // into a single one is that such a method would require several "if
 845   // allocation is not humongous do this, otherwise do that"
 846   // conditional paths which would obscure its flow. In fact, an early
 847   // version of this code did use a unified method which was harder to
 848   // follow and, as a result, it had subtle bugs that were hard to
 849   // track down. So keeping these two methods separate allows each to
 850   // be more readable. It will be good to keep these two in sync as
 851   // much as possible.
 852 
 853   assert_heap_not_locked_and_not_at_safepoint();
 854   assert(is_humongous(word_size), "attempt_allocation_humongous() "
 855          "should only be called for humongous allocations");
 856 
 857   // Humongous objects can exhaust the heap quickly, so we should check if we
 858   // need to start a marking cycle at each humongous object allocation. We do
 859   // the check before we do the actual allocation. The reason for doing it
 860   // before the allocation is that we avoid having to keep track of the newly
 861   // allocated memory while we do a GC.
 862   if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
 863                                            word_size)) {
 864     collect(GCCause::_g1_humongous_allocation);
 865   }
 866 
 867   // We will loop until a) we manage to successfully perform the
 868   // allocation or b) we successfully schedule a collection which
 869   // fails to perform the allocation. b) is the only case when we'll
 870   // return NULL.
 871   HeapWord* result = NULL;
 872   for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
 873     bool should_try_gc;
 874     uint gc_count_before;
 875 
 876 
 877     {
 878       MutexLockerEx x(Heap_lock);
 879 
 880       // Given that humongous objects are not allocated in young
 881       // regions, we'll first try to do the allocation without doing a
 882       // collection hoping that there's enough space in the heap.
 883       result = humongous_obj_allocate(word_size);
 884       if (result != NULL) {
 885         size_t size_in_regions = humongous_obj_size_in_regions(word_size);
 886         g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
 887         return result;
 888       }
 889 
 890       // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
 891       // the GCLocker initiated GC has been performed and then retry. This includes
 892       // the case when the GC Locker is not active but has not been performed.
 893       should_try_gc = !GCLocker::needs_gc();
 894       // Read the GC count while still holding the Heap_lock.
 895       gc_count_before = total_collections();
 896     }
 897 
 898     if (should_try_gc) {
 899       bool succeeded;
 900       result = do_collection_pause(word_size, gc_count_before, &succeeded,
 901                                    GCCause::_g1_humongous_allocation);
 902       if (result != NULL) {
 903         assert(succeeded, "only way to get back a non-NULL result");
 904         log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
 905                              Thread::current()->name(), p2i(result));
 906         return result;
 907       }
 908 
 909       if (succeeded) {
 910         // We successfully scheduled a collection which failed to allocate. No
 911         // point in trying to allocate further. We'll just return NULL.
 912         log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
 913                              SIZE_FORMAT " words", Thread::current()->name(), word_size);
 914         return NULL;
 915       }
 916       log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "",
 917                            Thread::current()->name(), word_size);
 918     } else {
 919       // Failed to schedule a collection.
 920       if (gclocker_retry_count > GCLockerRetryAllocationCount) {
 921         log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
 922                                SIZE_FORMAT " words", Thread::current()->name(), word_size);
 923         return NULL;
 924       }
 925       log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
 926       // The GCLocker is either active or the GCLocker initiated
 927       // GC has not yet been performed. Stall until it is and
 928       // then retry the allocation.
 929       GCLocker::stall_until_clear();
 930       gclocker_retry_count += 1;
 931     }
 932 
 933 
 934     // We can reach here if we were unsuccessful in scheduling a
 935     // collection (because another thread beat us to it) or if we were
 936     // stalled due to the GC locker. In either can we should retry the
 937     // allocation attempt in case another thread successfully
 938     // performed a collection and reclaimed enough space.
 939     // Humongous object allocation always needs a lock, so we wait for the retry
 940     // in the next iteration of the loop, unlike for the regular iteration case.
 941     // Give a warning if we seem to be looping forever.
 942 
 943     if ((QueuedAllocationWarningCount > 0) &&
 944         (try_count % QueuedAllocationWarningCount == 0)) {
 945       log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
 946                              Thread::current()->name(), try_count, word_size);
 947     }
 948   }
 949 
 950   ShouldNotReachHere();
 951   return NULL;
 952 }
 953 
 954 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
 955                                                            bool expect_null_mutator_alloc_region) {
 956   assert_at_safepoint_on_vm_thread();
 957   assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region,
 958          "the current alloc region was unexpectedly found to be non-NULL");
 959 
 960   if (!is_humongous(word_size)) {
 961     return _allocator->attempt_allocation_locked(word_size);
 962   } else {
 963     HeapWord* result = humongous_obj_allocate(word_size);
 964     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
 965       collector_state()->set_initiate_conc_mark_if_possible(true);
 966     }
 967     return result;
 968   }
 969 
 970   ShouldNotReachHere();
 971 }
 972 
 973 class PostCompactionPrinterClosure: public HeapRegionClosure {
 974 private:
 975   G1HRPrinter* _hr_printer;
 976 public:
 977   bool do_heap_region(HeapRegion* hr) {
 978     assert(!hr->is_young(), "not expecting to find young regions");
 979     _hr_printer->post_compaction(hr);
 980     return false;
 981   }
 982 
 983   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
 984     : _hr_printer(hr_printer) { }
 985 };
 986 
 987 void G1CollectedHeap::print_hrm_post_compaction() {
 988   if (_hr_printer.is_active()) {
 989     PostCompactionPrinterClosure cl(hr_printer());
 990     heap_region_iterate(&cl);
 991   }
 992 }
 993 
 994 void G1CollectedHeap::abort_concurrent_cycle() {
 995   // If we start the compaction before the CM threads finish
 996   // scanning the root regions we might trip them over as we'll
 997   // be moving objects / updating references. So let's wait until
 998   // they are done. By telling them to abort, they should complete
 999   // early.
1000   _cm->root_regions()->abort();
1001   _cm->root_regions()->wait_until_scan_finished();
1002 
1003   // Disable discovery and empty the discovered lists
1004   // for the CM ref processor.
1005   _ref_processor_cm->disable_discovery();
1006   _ref_processor_cm->abandon_partial_discovery();
1007   _ref_processor_cm->verify_no_references_recorded();
1008 
1009   // Abandon current iterations of concurrent marking and concurrent
1010   // refinement, if any are in progress.
1011   concurrent_mark()->concurrent_cycle_abort();
1012 }
1013 
1014 void G1CollectedHeap::prepare_heap_for_full_collection() {
1015   // Make sure we'll choose a new allocation region afterwards.
1016   _allocator->release_mutator_alloc_region();
1017   _allocator->abandon_gc_alloc_regions();
1018   g1_rem_set()->cleanupHRRS();
1019 
1020   // We may have added regions to the current incremental collection
1021   // set between the last GC or pause and now. We need to clear the
1022   // incremental collection set and then start rebuilding it afresh
1023   // after this full GC.
1024   abandon_collection_set(collection_set());
1025 
1026   tear_down_region_sets(false /* free_list_only */);
1027 }
1028 
1029 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1030   assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1031   assert(used() == recalculate_used(), "Should be equal");
1032   _verifier->verify_region_sets_optional();
1033   _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1034   _verifier->check_bitmaps("Full GC Start");
1035 }
1036 
1037 void G1CollectedHeap::prepare_heap_for_mutators() {
1038   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1039   ClassLoaderDataGraph::purge();
1040   MetaspaceUtils::verify_metrics();
1041 
1042   // Prepare heap for normal collections.
1043   assert(num_free_regions() == 0, "we should not have added any free regions");
1044   rebuild_region_sets(false /* free_list_only */);
1045   abort_refinement();
1046   resize_if_necessary_after_full_collection();
1047 
1048   // Rebuild the strong code root lists for each region
1049   rebuild_strong_code_roots();
1050 
1051   // Start a new incremental collection set for the next pause
1052   start_new_collection_set();
1053 
1054   _allocator->init_mutator_alloc_region();
1055 
1056   // Post collection state updates.
1057   MetaspaceGC::compute_new_size();
1058 }
1059 
1060 void G1CollectedHeap::abort_refinement() {
1061   if (_hot_card_cache->use_cache()) {
1062     _hot_card_cache->reset_hot_cache();
1063   }
1064 
1065   // Discard all remembered set updates.
1066   G1BarrierSet::dirty_card_queue_set().abandon_logs();
1067   assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1068 }
1069 
1070 void G1CollectedHeap::verify_after_full_collection() {
1071   _hrm.verify_optional();
1072   _verifier->verify_region_sets_optional();
1073   _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
1074   // Clear the previous marking bitmap, if needed for bitmap verification.
1075   // Note we cannot do this when we clear the next marking bitmap in
1076   // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1077   // objects marked during a full GC against the previous bitmap.
1078   // But we need to clear it before calling check_bitmaps below since
1079   // the full GC has compacted objects and updated TAMS but not updated
1080   // the prev bitmap.
1081   if (G1VerifyBitmaps) {
1082     GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
1083     _cm->clear_prev_bitmap(workers());
1084   }
1085   _verifier->check_bitmaps("Full GC End");
1086 
1087   // At this point there should be no regions in the
1088   // entire heap tagged as young.
1089   assert(check_young_list_empty(), "young list should be empty at this point");
1090 
1091   // Note: since we've just done a full GC, concurrent
1092   // marking is no longer active. Therefore we need not
1093   // re-enable reference discovery for the CM ref processor.
1094   // That will be done at the start of the next marking cycle.
1095   // We also know that the STW processor should no longer
1096   // discover any new references.
1097   assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");
1098   assert(!_ref_processor_cm->discovery_enabled(), "Postcondition");
1099   _ref_processor_stw->verify_no_references_recorded();
1100   _ref_processor_cm->verify_no_references_recorded();
1101 }
1102 
1103 void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) {
1104   // Post collection logging.
1105   // We should do this after we potentially resize the heap so
1106   // that all the COMMIT / UNCOMMIT events are generated before
1107   // the compaction events.
1108   print_hrm_post_compaction();
1109   heap_transition->print();
1110   print_heap_after_gc();
1111   print_heap_regions();
1112 #ifdef TRACESPINNING
1113   ParallelTaskTerminator::print_termination_counts();
1114 #endif
1115 }
1116 
1117 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1118                                          bool clear_all_soft_refs) {
1119   assert_at_safepoint_on_vm_thread();
1120 
1121   if (GCLocker::check_active_before_gc()) {
1122     // Full GC was not completed.
1123     return false;
1124   }
1125 
1126   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1127       soft_ref_policy()->should_clear_all_soft_refs();
1128 
1129   G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
1130   GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1131 
1132   collector.prepare_collection();
1133   collector.collect();
1134   collector.complete_collection();
1135 
1136   // Full collection was successfully completed.
1137   return true;
1138 }
1139 
1140 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1141   // Currently, there is no facility in the do_full_collection(bool) API to notify
1142   // the caller that the collection did not succeed (e.g., because it was locked
1143   // out by the GC locker). So, right now, we'll ignore the return value.
1144   bool dummy = do_full_collection(true,                /* explicit_gc */
1145                                   clear_all_soft_refs);
1146 }
1147 
1148 void G1CollectedHeap::resize_if_necessary_after_full_collection() {
1149   // Capacity, free and used after the GC counted as full regions to
1150   // include the waste in the following calculations.
1151   const size_t capacity_after_gc = capacity();
1152   const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
1153 
1154   // This is enforced in arguments.cpp.
1155   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1156          "otherwise the code below doesn't make sense");
1157 
1158   // We don't have floating point command-line arguments
1159   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1160   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1161   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1162   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1163 
1164   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1165   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1166 
1167   // We have to be careful here as these two calculations can overflow
1168   // 32-bit size_t's.
1169   double used_after_gc_d = (double) used_after_gc;
1170   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1171   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1172 
1173   // Let's make sure that they are both under the max heap size, which
1174   // by default will make them fit into a size_t.
1175   double desired_capacity_upper_bound = (double) max_heap_size;
1176   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1177                                     desired_capacity_upper_bound);
1178   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1179                                     desired_capacity_upper_bound);
1180 
1181   // We can now safely turn them into size_t's.
1182   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1183   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1184 
1185   // This assert only makes sense here, before we adjust them
1186   // with respect to the min and max heap size.
1187   assert(minimum_desired_capacity <= maximum_desired_capacity,
1188          "minimum_desired_capacity = " SIZE_FORMAT ", "
1189          "maximum_desired_capacity = " SIZE_FORMAT,
1190          minimum_desired_capacity, maximum_desired_capacity);
1191 
1192   // Should not be greater than the heap max size. No need to adjust
1193   // it with respect to the heap min size as it's a lower bound (i.e.,
1194   // we'll try to make the capacity larger than it, not smaller).
1195   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1196   // Should not be less than the heap min size. No need to adjust it
1197   // with respect to the heap max size as it's an upper bound (i.e.,
1198   // we'll try to make the capacity smaller than it, not greater).
1199   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1200 
1201   if (capacity_after_gc < minimum_desired_capacity) {
1202     // Don't expand unless it's significant
1203     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1204 
1205     log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
1206                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1207                               "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1208                               capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio);
1209 
1210     expand(expand_bytes, _workers);
1211 
1212     // No expansion, now see if we want to shrink
1213   } else if (capacity_after_gc > maximum_desired_capacity) {
1214     // Capacity too large, compute shrinking size
1215     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1216 
1217     log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
1218                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1219                               "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1220                               capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
1221 
1222     shrink(shrink_bytes);
1223   }
1224 }
1225 
1226 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1227                                                             bool do_gc,
1228                                                             bool clear_all_soft_refs,
1229                                                             bool expect_null_mutator_alloc_region,
1230                                                             bool* gc_succeeded) {
1231   *gc_succeeded = true;
1232   // Let's attempt the allocation first.
1233   HeapWord* result =
1234     attempt_allocation_at_safepoint(word_size,
1235                                     expect_null_mutator_alloc_region);
1236   if (result != NULL) {
1237     return result;
1238   }
1239 
1240   // In a G1 heap, we're supposed to keep allocation from failing by
1241   // incremental pauses.  Therefore, at least for now, we'll favor
1242   // expansion over collection.  (This might change in the future if we can
1243   // do something smarter than full collection to satisfy a failed alloc.)
1244   result = expand_and_allocate(word_size);
1245   if (result != NULL) {
1246     return result;
1247   }
1248 
1249   if (do_gc) {
1250     // Expansion didn't work, we'll try to do a Full GC.
1251     *gc_succeeded = do_full_collection(false, /* explicit_gc */
1252                                        clear_all_soft_refs);
1253   }
1254 
1255   return NULL;
1256 }
1257 
1258 HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1259                                                      bool* succeeded) {
1260   assert_at_safepoint_on_vm_thread();
1261 
1262   // Attempts to allocate followed by Full GC.
1263   HeapWord* result =
1264     satisfy_failed_allocation_helper(word_size,
1265                                      true,  /* do_gc */
1266                                      false, /* clear_all_soft_refs */
1267                                      false, /* expect_null_mutator_alloc_region */
1268                                      succeeded);
1269 
1270   if (result != NULL || !*succeeded) {
1271     return result;
1272   }
1273 
1274   // Attempts to allocate followed by Full GC that will collect all soft references.
1275   result = satisfy_failed_allocation_helper(word_size,
1276                                             true, /* do_gc */
1277                                             true, /* clear_all_soft_refs */
1278                                             true, /* expect_null_mutator_alloc_region */
1279                                             succeeded);
1280 
1281   if (result != NULL || !*succeeded) {
1282     return result;
1283   }
1284 
1285   // Attempts to allocate, no GC
1286   result = satisfy_failed_allocation_helper(word_size,
1287                                             false, /* do_gc */
1288                                             false, /* clear_all_soft_refs */
1289                                             true,  /* expect_null_mutator_alloc_region */
1290                                             succeeded);
1291 
1292   if (result != NULL) {
1293     return result;
1294   }
1295 
1296   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
1297          "Flag should have been handled and cleared prior to this point");
1298 
1299   // What else?  We might try synchronous finalization later.  If the total
1300   // space available is large enough for the allocation, then a more
1301   // complete compaction phase than we've tried so far might be
1302   // appropriate.
1303   return NULL;
1304 }
1305 
1306 // Attempting to expand the heap sufficiently
1307 // to support an allocation of the given "word_size".  If
1308 // successful, perform the allocation and return the address of the
1309 // allocated block, or else "NULL".
1310 
1311 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1312   assert_at_safepoint_on_vm_thread();
1313 
1314   _verifier->verify_region_sets_optional();
1315 
1316   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1317   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1318                             word_size * HeapWordSize);
1319 
1320 
1321   if (expand(expand_bytes, _workers)) {
1322     _hrm.verify_optional();
1323     _verifier->verify_region_sets_optional();
1324     return attempt_allocation_at_safepoint(word_size,
1325                                            false /* expect_null_mutator_alloc_region */);
1326   }
1327   return NULL;
1328 }
1329 
1330 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
1331   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1332   aligned_expand_bytes = align_up(aligned_expand_bytes,
1333                                        HeapRegion::GrainBytes);
1334 
1335   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1336                             expand_bytes, aligned_expand_bytes);
1337 
1338   if (is_maximal_no_gc()) {
1339     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1340     return false;
1341   }
1342 
1343   double expand_heap_start_time_sec = os::elapsedTime();
1344   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1345   assert(regions_to_expand > 0, "Must expand by at least one region");
1346 
1347   uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
1348   if (expand_time_ms != NULL) {
1349     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1350   }
1351 
1352   if (expanded_by > 0) {
1353     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1354     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1355     g1_policy()->record_new_heap_size(num_regions());
1356   } else {
1357     log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1358 
1359     // The expansion of the virtual storage space was unsuccessful.
1360     // Let's see if it was because we ran out of swap.
1361     if (G1ExitOnExpansionFailure &&
1362         _hrm.available() >= regions_to_expand) {
1363       // We had head room...
1364       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1365     }
1366   }
1367   return regions_to_expand > 0;
1368 }
1369 
1370 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1371   size_t aligned_shrink_bytes =
1372     ReservedSpace::page_align_size_down(shrink_bytes);
1373   aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1374                                          HeapRegion::GrainBytes);
1375   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1376 
1377   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1378   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1379 
1380 
1381   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1382                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1383   if (num_regions_removed > 0) {
1384     g1_policy()->record_new_heap_size(num_regions());
1385   } else {
1386     log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1387   }
1388 }
1389 
1390 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1391   _verifier->verify_region_sets_optional();
1392 
1393   // We should only reach here at the end of a Full GC which means we
1394   // should not not be holding to any GC alloc regions. The method
1395   // below will make sure of that and do any remaining clean up.
1396   _allocator->abandon_gc_alloc_regions();
1397 
1398   // Instead of tearing down / rebuilding the free lists here, we
1399   // could instead use the remove_all_pending() method on free_list to
1400   // remove only the ones that we need to remove.
1401   tear_down_region_sets(true /* free_list_only */);
1402   shrink_helper(shrink_bytes);
1403   rebuild_region_sets(true /* free_list_only */);
1404 
1405   _hrm.verify_optional();
1406   _verifier->verify_region_sets_optional();
1407 }
1408 
1409 class OldRegionSetChecker : public HeapRegionSetChecker {
1410 public:
1411   void check_mt_safety() {
1412     // Master Old Set MT safety protocol:
1413     // (a) If we're at a safepoint, operations on the master old set
1414     // should be invoked:
1415     // - by the VM thread (which will serialize them), or
1416     // - by the GC workers while holding the FreeList_lock, if we're
1417     //   at a safepoint for an evacuation pause (this lock is taken
1418     //   anyway when an GC alloc region is retired so that a new one
1419     //   is allocated from the free list), or
1420     // - by the GC workers while holding the OldSets_lock, if we're at a
1421     //   safepoint for a cleanup pause.
1422     // (b) If we're not at a safepoint, operations on the master old set
1423     // should be invoked while holding the Heap_lock.
1424 
1425     if (SafepointSynchronize::is_at_safepoint()) {
1426       guarantee(Thread::current()->is_VM_thread() ||
1427                 FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(),
1428                 "master old set MT safety protocol at a safepoint");
1429     } else {
1430       guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint");
1431     }
1432   }
1433   bool is_correct_type(HeapRegion* hr) { return hr->is_old(); }
1434   const char* get_description() { return "Old Regions"; }
1435 };
1436 
1437 class ArchiveRegionSetChecker : public HeapRegionSetChecker {
1438 public:
1439   void check_mt_safety() {
1440     guarantee(!Universe::is_fully_initialized() || SafepointSynchronize::is_at_safepoint(),
1441               "May only change archive regions during initialization or safepoint.");
1442   }
1443   bool is_correct_type(HeapRegion* hr) { return hr->is_archive(); }
1444   const char* get_description() { return "Archive Regions"; }
1445 };
1446 
1447 class HumongousRegionSetChecker : public HeapRegionSetChecker {
1448 public:
1449   void check_mt_safety() {
1450     // Humongous Set MT safety protocol:
1451     // (a) If we're at a safepoint, operations on the master humongous
1452     // set should be invoked by either the VM thread (which will
1453     // serialize them) or by the GC workers while holding the
1454     // OldSets_lock.
1455     // (b) If we're not at a safepoint, operations on the master
1456     // humongous set should be invoked while holding the Heap_lock.
1457 
1458     if (SafepointSynchronize::is_at_safepoint()) {
1459       guarantee(Thread::current()->is_VM_thread() ||
1460                 OldSets_lock->owned_by_self(),
1461                 "master humongous set MT safety protocol at a safepoint");
1462     } else {
1463       guarantee(Heap_lock->owned_by_self(),
1464                 "master humongous set MT safety protocol outside a safepoint");
1465     }
1466   }
1467   bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
1468   const char* get_description() { return "Humongous Regions"; }
1469 };
1470 
1471 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1472   CollectedHeap(),
1473   _young_gen_sampling_thread(NULL),
1474   _workers(NULL),
1475   _collector_policy(collector_policy),
1476   _card_table(NULL),
1477   _soft_ref_policy(),
1478   _old_set("Old Region Set", new OldRegionSetChecker()),
1479   _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
1480   _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
1481   _bot(NULL),
1482   _listener(),
1483   _hrm(),
1484   _allocator(NULL),
1485   _verifier(NULL),
1486   _summary_bytes_used(0),
1487   _archive_allocator(NULL),
1488   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1489   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1490   _expand_heap_after_alloc_failure(true),
1491   _g1mm(NULL),
1492   _humongous_reclaim_candidates(),
1493   _has_humongous_reclaim_candidates(false),
1494   _hr_printer(),
1495   _collector_state(),
1496   _old_marking_cycles_started(0),
1497   _old_marking_cycles_completed(0),
1498   _eden(),
1499   _survivor(),
1500   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1501   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1502   _g1_policy(new G1Policy(_gc_timer_stw)),
1503   _heap_sizing_policy(NULL),
1504   _collection_set(this, _g1_policy),
1505   _hot_card_cache(NULL),
1506   _g1_rem_set(NULL),
1507   _dirty_card_queue_set(false),
1508   _cm(NULL),
1509   _cm_thread(NULL),
1510   _cr(NULL),
1511   _task_queues(NULL),
1512   _evacuation_failed(false),
1513   _evacuation_failed_info_array(NULL),
1514   _preserved_marks_set(true /* in_c_heap */),
1515 #ifndef PRODUCT
1516   _evacuation_failure_alot_for_current_gc(false),
1517   _evacuation_failure_alot_gc_number(0),
1518   _evacuation_failure_alot_count(0),
1519 #endif
1520   _ref_processor_stw(NULL),
1521   _is_alive_closure_stw(this),
1522   _is_subject_to_discovery_stw(this),
1523   _ref_processor_cm(NULL),
1524   _is_alive_closure_cm(this),
1525   _is_subject_to_discovery_cm(this),
1526   _in_cset_fast_test() {
1527 
1528   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1529                           true /* are_GC_task_threads */,
1530                           false /* are_ConcurrentGC_threads */);
1531   _workers->initialize_workers();
1532   _verifier = new G1HeapVerifier(this);
1533 
1534   _allocator = new G1Allocator(this);
1535 
1536   _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
1537 
1538   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1539 
1540   // Override the default _filler_array_max_size so that no humongous filler
1541   // objects are created.
1542   _filler_array_max_size = _humongous_object_threshold_in_words;
1543 
1544   uint n_queues = ParallelGCThreads;
1545   _task_queues = new RefToScanQueueSet(n_queues);
1546 
1547   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1548 
1549   for (uint i = 0; i < n_queues; i++) {
1550     RefToScanQueue* q = new RefToScanQueue();
1551     q->initialize();
1552     _task_queues->register_queue(i, q);
1553     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1554   }
1555 
1556   // Initialize the G1EvacuationFailureALot counters and flags.
1557   NOT_PRODUCT(reset_evacuation_should_fail();)
1558 
1559   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1560 }
1561 
1562 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1563                                                                  size_t size,
1564                                                                  size_t translation_factor) {
1565   size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1566   // Allocate a new reserved space, preferring to use large pages.
1567   ReservedSpace rs(size, preferred_page_size);
1568   G1RegionToSpaceMapper* result  =
1569     G1RegionToSpaceMapper::create_mapper(rs,
1570                                          size,
1571                                          rs.alignment(),
1572                                          HeapRegion::GrainBytes,
1573                                          translation_factor,
1574                                          mtGC);
1575 
1576   os::trace_page_sizes_for_requested_size(description,
1577                                           size,
1578                                           preferred_page_size,
1579                                           rs.alignment(),
1580                                           rs.base(),
1581                                           rs.size());
1582 
1583   return result;
1584 }
1585 
1586 jint G1CollectedHeap::initialize_concurrent_refinement() {
1587   jint ecode = JNI_OK;
1588   _cr = G1ConcurrentRefine::create(&ecode);
1589   return ecode;
1590 }
1591 
1592 jint G1CollectedHeap::initialize_young_gen_sampling_thread() {
1593   _young_gen_sampling_thread = new G1YoungRemSetSamplingThread();
1594   if (_young_gen_sampling_thread->osthread() == NULL) {
1595     vm_shutdown_during_initialization("Could not create G1YoungRemSetSamplingThread");
1596     return JNI_ENOMEM;
1597   }
1598   return JNI_OK;
1599 }
1600 
1601 jint G1CollectedHeap::initialize() {
1602   os::enable_vtime();
1603 
1604   // Necessary to satisfy locking discipline assertions.
1605 
1606   MutexLocker x(Heap_lock);
1607 
1608   // While there are no constraints in the GC code that HeapWordSize
1609   // be any particular value, there are multiple other areas in the
1610   // system which believe this to be true (e.g. oop->object_size in some
1611   // cases incorrectly returns the size in wordSize units rather than
1612   // HeapWordSize).
1613   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1614 
1615   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1616   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1617   size_t heap_alignment = collector_policy()->heap_alignment();
1618 
1619   // Ensure that the sizes are properly aligned.
1620   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1621   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1622   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1623 
1624   // Reserve the maximum.
1625 
1626   // When compressed oops are enabled, the preferred heap base
1627   // is calculated by subtracting the requested size from the
1628   // 32Gb boundary and using the result as the base address for
1629   // heap reservation. If the requested size is not aligned to
1630   // HeapRegion::GrainBytes (i.e. the alignment that is passed
1631   // into the ReservedHeapSpace constructor) then the actual
1632   // base of the reserved heap may end up differing from the
1633   // address that was requested (i.e. the preferred heap base).
1634   // If this happens then we could end up using a non-optimal
1635   // compressed oops mode.
1636 
1637   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1638                                                  heap_alignment);
1639 
1640   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1641 
1642   // Create the barrier set for the entire reserved region.
1643   G1CardTable* ct = new G1CardTable(reserved_region());
1644   ct->initialize();
1645   G1BarrierSet* bs = new G1BarrierSet(ct);
1646   bs->initialize();
1647   assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1648   BarrierSet::set_barrier_set(bs);
1649   _card_table = ct;
1650 
1651   // Create the hot card cache.
1652   _hot_card_cache = new G1HotCardCache(this);
1653 
1654   // Carve out the G1 part of the heap.
1655   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1656   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1657   G1RegionToSpaceMapper* heap_storage =
1658     G1RegionToSpaceMapper::create_mapper(g1_rs,
1659                                          g1_rs.size(),
1660                                          page_size,
1661                                          HeapRegion::GrainBytes,
1662                                          1,
1663                                          mtJavaHeap);
1664   os::trace_page_sizes("Heap",
1665                        collector_policy()->min_heap_byte_size(),
1666                        max_byte_size,
1667                        page_size,
1668                        heap_rs.base(),
1669                        heap_rs.size());
1670   heap_storage->set_mapping_changed_listener(&_listener);
1671 
1672   // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1673   G1RegionToSpaceMapper* bot_storage =
1674     create_aux_memory_mapper("Block Offset Table",
1675                              G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
1676                              G1BlockOffsetTable::heap_map_factor());
1677 
1678   G1RegionToSpaceMapper* cardtable_storage =
1679     create_aux_memory_mapper("Card Table",
1680                              G1CardTable::compute_size(g1_rs.size() / HeapWordSize),
1681                              G1CardTable::heap_map_factor());
1682 
1683   G1RegionToSpaceMapper* card_counts_storage =
1684     create_aux_memory_mapper("Card Counts Table",
1685                              G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
1686                              G1CardCounts::heap_map_factor());
1687 
1688   size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
1689   G1RegionToSpaceMapper* prev_bitmap_storage =
1690     create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1691   G1RegionToSpaceMapper* next_bitmap_storage =
1692     create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1693 
1694   _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1695   _card_table->initialize(cardtable_storage);
1696   // Do later initialization work for concurrent refinement.
1697   _hot_card_cache->initialize(card_counts_storage);
1698 
1699   // 6843694 - ensure that the maximum region index can fit
1700   // in the remembered set structures.
1701   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1702   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1703 
1704   // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not
1705   // start within the first card.
1706   guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
1707   // Also create a G1 rem set.
1708   _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
1709   _g1_rem_set->initialize(max_capacity(), max_regions());
1710 
1711   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1712   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1713   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1714             "too many cards per region");
1715 
1716   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1717 
1718   _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1719 
1720   {
1721     HeapWord* start = _hrm.reserved().start();
1722     HeapWord* end = _hrm.reserved().end();
1723     size_t granularity = HeapRegion::GrainBytes;
1724 
1725     _in_cset_fast_test.initialize(start, end, granularity);
1726     _humongous_reclaim_candidates.initialize(start, end, granularity);
1727   }
1728 
1729   // Create the G1ConcurrentMark data structure and thread.
1730   // (Must do this late, so that "max_regions" is defined.)
1731   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1732   if (_cm == NULL || !_cm->completed_initialization()) {
1733     vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1734     return JNI_ENOMEM;
1735   }
1736   _cm_thread = _cm->cm_thread();
1737 
1738   // Now expand into the initial heap size.
1739   if (!expand(init_byte_size, _workers)) {
1740     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1741     return JNI_ENOMEM;
1742   }
1743 
1744   // Perform any initialization actions delegated to the policy.
1745   g1_policy()->init(this, &_collection_set);
1746 
1747   G1BarrierSet::satb_mark_queue_set().initialize(this,
1748                                                  SATB_Q_CBL_mon,
1749                                                  SATB_Q_FL_lock,
1750                                                  G1SATBProcessCompletedThreshold,
1751                                                  G1SATBBufferEnqueueingThresholdPercent,
1752                                                  Shared_SATB_Q_lock);
1753 
1754   jint ecode = initialize_concurrent_refinement();
1755   if (ecode != JNI_OK) {
1756     return ecode;
1757   }
1758 
1759   ecode = initialize_young_gen_sampling_thread();
1760   if (ecode != JNI_OK) {
1761     return ecode;
1762   }
1763 
1764   G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1765                                                   DirtyCardQ_FL_lock,
1766                                                   (int)concurrent_refine()->yellow_zone(),
1767                                                   (int)concurrent_refine()->red_zone(),
1768                                                   Shared_DirtyCardQ_lock,
1769                                                   NULL,  // fl_owner
1770                                                   true); // init_free_ids
1771 
1772   dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1773                                     DirtyCardQ_FL_lock,
1774                                     -1, // never trigger processing
1775                                     -1, // no limit on length
1776                                     Shared_DirtyCardQ_lock,
1777                                     &G1BarrierSet::dirty_card_queue_set());
1778 
1779   // Here we allocate the dummy HeapRegion that is required by the
1780   // G1AllocRegion class.
1781   HeapRegion* dummy_region = _hrm.get_dummy_region();
1782 
1783   // We'll re-use the same region whether the alloc region will
1784   // require BOT updates or not and, if it doesn't, then a non-young
1785   // region will complain that it cannot support allocations without
1786   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1787   dummy_region->set_eden();
1788   // Make sure it's full.
1789   dummy_region->set_top(dummy_region->end());
1790   G1AllocRegion::setup(this, dummy_region);
1791 
1792   _allocator->init_mutator_alloc_region();
1793 
1794   // Do create of the monitoring and management support so that
1795   // values in the heap have been properly initialized.
1796   _g1mm = new G1MonitoringSupport(this);
1797 
1798   G1StringDedup::initialize();
1799 
1800   _preserved_marks_set.init(ParallelGCThreads);
1801 
1802   _collection_set.initialize(max_regions());
1803 
1804   return JNI_OK;
1805 }
1806 
1807 void G1CollectedHeap::stop() {
1808   // Stop all concurrent threads. We do this to make sure these threads
1809   // do not continue to execute and access resources (e.g. logging)
1810   // that are destroyed during shutdown.
1811   _cr->stop();
1812   _young_gen_sampling_thread->stop();
1813   _cm_thread->stop();
1814   if (G1StringDedup::is_enabled()) {
1815     G1StringDedup::stop();
1816   }
1817 }
1818 
1819 void G1CollectedHeap::safepoint_synchronize_begin() {
1820   SuspendibleThreadSet::synchronize();
1821 }
1822 
1823 void G1CollectedHeap::safepoint_synchronize_end() {
1824   SuspendibleThreadSet::desynchronize();
1825 }
1826 
1827 size_t G1CollectedHeap::conservative_max_heap_alignment() {
1828   return HeapRegion::max_region_size();
1829 }
1830 
1831 void G1CollectedHeap::post_initialize() {
1832   CollectedHeap::post_initialize();
1833   ref_processing_init();
1834 }
1835 
1836 void G1CollectedHeap::ref_processing_init() {
1837   // Reference processing in G1 currently works as follows:
1838   //
1839   // * There are two reference processor instances. One is
1840   //   used to record and process discovered references
1841   //   during concurrent marking; the other is used to
1842   //   record and process references during STW pauses
1843   //   (both full and incremental).
1844   // * Both ref processors need to 'span' the entire heap as
1845   //   the regions in the collection set may be dotted around.
1846   //
1847   // * For the concurrent marking ref processor:
1848   //   * Reference discovery is enabled at initial marking.
1849   //   * Reference discovery is disabled and the discovered
1850   //     references processed etc during remarking.
1851   //   * Reference discovery is MT (see below).
1852   //   * Reference discovery requires a barrier (see below).
1853   //   * Reference processing may or may not be MT
1854   //     (depending on the value of ParallelRefProcEnabled
1855   //     and ParallelGCThreads).
1856   //   * A full GC disables reference discovery by the CM
1857   //     ref processor and abandons any entries on it's
1858   //     discovered lists.
1859   //
1860   // * For the STW processor:
1861   //   * Non MT discovery is enabled at the start of a full GC.
1862   //   * Processing and enqueueing during a full GC is non-MT.
1863   //   * During a full GC, references are processed after marking.
1864   //
1865   //   * Discovery (may or may not be MT) is enabled at the start
1866   //     of an incremental evacuation pause.
1867   //   * References are processed near the end of a STW evacuation pause.
1868   //   * For both types of GC:
1869   //     * Discovery is atomic - i.e. not concurrent.
1870   //     * Reference discovery will not need a barrier.
1871 
1872   bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
1873 
1874   // Concurrent Mark ref processor
1875   _ref_processor_cm =
1876     new ReferenceProcessor(&_is_subject_to_discovery_cm,
1877                            mt_processing,                                  // mt processing
1878                            ParallelGCThreads,                              // degree of mt processing
1879                            (ParallelGCThreads > 1) || (ConcGCThreads > 1), // mt discovery
1880                            MAX2(ParallelGCThreads, ConcGCThreads),         // degree of mt discovery
1881                            false,                                          // Reference discovery is not atomic
1882                            &_is_alive_closure_cm,                          // is alive closure
1883                            true);                                          // allow changes to number of processing threads
1884 
1885   // STW ref processor
1886   _ref_processor_stw =
1887     new ReferenceProcessor(&_is_subject_to_discovery_stw,
1888                            mt_processing,                        // mt processing
1889                            ParallelGCThreads,                    // degree of mt processing
1890                            (ParallelGCThreads > 1),              // mt discovery
1891                            ParallelGCThreads,                    // degree of mt discovery
1892                            true,                                 // Reference discovery is atomic
1893                            &_is_alive_closure_stw,               // is alive closure
1894                            true);                                // allow changes to number of processing threads
1895 }
1896 
1897 CollectorPolicy* G1CollectedHeap::collector_policy() const {
1898   return _collector_policy;
1899 }
1900 
1901 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1902   return &_soft_ref_policy;
1903 }
1904 
1905 size_t G1CollectedHeap::capacity() const {
1906   return _hrm.length() * HeapRegion::GrainBytes;
1907 }
1908 
1909 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1910   return _hrm.total_free_bytes();
1911 }
1912 
1913 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
1914   _hot_card_cache->drain(cl, worker_i);
1915 }
1916 
1917 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
1918   DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1919   size_t n_completed_buffers = 0;
1920   while (dcqs.apply_closure_during_gc(cl, worker_i)) {
1921     n_completed_buffers++;
1922   }
1923   g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers);
1924   dcqs.clear_n_completed_buffers();
1925   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
1926 }
1927 
1928 // Computes the sum of the storage used by the various regions.
1929 size_t G1CollectedHeap::used() const {
1930   size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1931   if (_archive_allocator != NULL) {
1932     result += _archive_allocator->used();
1933   }
1934   return result;
1935 }
1936 
1937 size_t G1CollectedHeap::used_unlocked() const {
1938   return _summary_bytes_used;
1939 }
1940 
1941 class SumUsedClosure: public HeapRegionClosure {
1942   size_t _used;
1943 public:
1944   SumUsedClosure() : _used(0) {}
1945   bool do_heap_region(HeapRegion* r) {
1946     _used += r->used();
1947     return false;
1948   }
1949   size_t result() { return _used; }
1950 };
1951 
1952 size_t G1CollectedHeap::recalculate_used() const {
1953   SumUsedClosure blk;
1954   heap_region_iterate(&blk);
1955   return blk.result();
1956 }
1957 
1958 bool  G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
1959   switch (cause) {
1960     case GCCause::_java_lang_system_gc:                 return ExplicitGCInvokesConcurrent;
1961     case GCCause::_dcmd_gc_run:                         return ExplicitGCInvokesConcurrent;
1962     case GCCause::_wb_conc_mark:                        return true;
1963     default :                                           return false;
1964   }
1965 }
1966 
1967 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
1968   switch (cause) {
1969     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
1970     case GCCause::_g1_humongous_allocation: return true;
1971     default:                                return is_user_requested_concurrent_full_gc(cause);
1972   }
1973 }
1974 
1975 #ifndef PRODUCT
1976 void G1CollectedHeap::allocate_dummy_regions() {
1977   // Let's fill up most of the region
1978   size_t word_size = HeapRegion::GrainWords - 1024;
1979   // And as a result the region we'll allocate will be humongous.
1980   guarantee(is_humongous(word_size), "sanity");
1981 
1982   // _filler_array_max_size is set to humongous object threshold
1983   // but temporarily change it to use CollectedHeap::fill_with_object().
1984   SizeTFlagSetting fs(_filler_array_max_size, word_size);
1985 
1986   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
1987     // Let's use the existing mechanism for the allocation
1988     HeapWord* dummy_obj = humongous_obj_allocate(word_size);
1989     if (dummy_obj != NULL) {
1990       MemRegion mr(dummy_obj, word_size);
1991       CollectedHeap::fill_with_object(mr);
1992     } else {
1993       // If we can't allocate once, we probably cannot allocate
1994       // again. Let's get out of the loop.
1995       break;
1996     }
1997   }
1998 }
1999 #endif // !PRODUCT
2000 
2001 void G1CollectedHeap::increment_old_marking_cycles_started() {
2002   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2003          _old_marking_cycles_started == _old_marking_cycles_completed + 1,
2004          "Wrong marking cycle count (started: %d, completed: %d)",
2005          _old_marking_cycles_started, _old_marking_cycles_completed);
2006 
2007   _old_marking_cycles_started++;
2008 }
2009 
2010 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2011   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2012 
2013   // We assume that if concurrent == true, then the caller is a
2014   // concurrent thread that was joined the Suspendible Thread
2015   // Set. If there's ever a cheap way to check this, we should add an
2016   // assert here.
2017 
2018   // Given that this method is called at the end of a Full GC or of a
2019   // concurrent cycle, and those can be nested (i.e., a Full GC can
2020   // interrupt a concurrent cycle), the number of full collections
2021   // completed should be either one (in the case where there was no
2022   // nesting) or two (when a Full GC interrupted a concurrent cycle)
2023   // behind the number of full collections started.
2024 
2025   // This is the case for the inner caller, i.e. a Full GC.
2026   assert(concurrent ||
2027          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2028          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2029          "for inner caller (Full GC): _old_marking_cycles_started = %u "
2030          "is inconsistent with _old_marking_cycles_completed = %u",
2031          _old_marking_cycles_started, _old_marking_cycles_completed);
2032 
2033   // This is the case for the outer caller, i.e. the concurrent cycle.
2034   assert(!concurrent ||
2035          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2036          "for outer caller (concurrent cycle): "
2037          "_old_marking_cycles_started = %u "
2038          "is inconsistent with _old_marking_cycles_completed = %u",
2039          _old_marking_cycles_started, _old_marking_cycles_completed);
2040 
2041   _old_marking_cycles_completed += 1;
2042 
2043   // We need to clear the "in_progress" flag in the CM thread before
2044   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2045   // is set) so that if a waiter requests another System.gc() it doesn't
2046   // incorrectly see that a marking cycle is still in progress.
2047   if (concurrent) {
2048     _cm_thread->set_idle();
2049   }
2050 
2051   // This notify_all() will ensure that a thread that called
2052   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2053   // and it's waiting for a full GC to finish will be woken up. It is
2054   // waiting in VM_G1CollectForAllocation::doit_epilogue().
2055   FullGCCount_lock->notify_all();
2056 }
2057 
2058 void G1CollectedHeap::collect(GCCause::Cause cause) {
2059   assert_heap_not_locked();
2060 
2061   uint gc_count_before;
2062   uint old_marking_count_before;
2063   uint full_gc_count_before;
2064   bool retry_gc;
2065 
2066   do {
2067     retry_gc = false;
2068 
2069     {
2070       MutexLocker ml(Heap_lock);
2071 
2072       // Read the GC count while holding the Heap_lock
2073       gc_count_before = total_collections();
2074       full_gc_count_before = total_full_collections();
2075       old_marking_count_before = _old_marking_cycles_started;
2076     }
2077 
2078     if (should_do_concurrent_full_gc(cause)) {
2079       // Schedule an initial-mark evacuation pause that will start a
2080       // concurrent cycle. We're setting word_size to 0 which means that
2081       // we are not requesting a post-GC allocation.
2082       VM_G1CollectForAllocation op(0,     /* word_size */
2083                                    gc_count_before,
2084                                    cause,
2085                                    true,  /* should_initiate_conc_mark */
2086                                    g1_policy()->max_pause_time_ms());
2087       VMThread::execute(&op);
2088       if (!op.pause_succeeded()) {
2089         if (old_marking_count_before == _old_marking_cycles_started) {
2090           retry_gc = op.should_retry_gc();
2091         } else {
2092           // A Full GC happened while we were trying to schedule the
2093           // initial-mark GC. No point in starting a new cycle given
2094           // that the whole heap was collected anyway.
2095         }
2096 
2097         if (retry_gc) {
2098           if (GCLocker::is_active_and_needs_gc()) {
2099             GCLocker::stall_until_clear();
2100           }
2101         }
2102       }
2103     } else {
2104       if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2105           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2106 
2107         // Schedule a standard evacuation pause. We're setting word_size
2108         // to 0 which means that we are not requesting a post-GC allocation.
2109         VM_G1CollectForAllocation op(0,     /* word_size */
2110                                      gc_count_before,
2111                                      cause,
2112                                      false, /* should_initiate_conc_mark */
2113                                      g1_policy()->max_pause_time_ms());
2114         VMThread::execute(&op);
2115       } else {
2116         // Schedule a Full GC.
2117         VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2118         VMThread::execute(&op);
2119       }
2120     }
2121   } while (retry_gc);
2122 }
2123 
2124 bool G1CollectedHeap::is_in(const void* p) const {
2125   if (_hrm.reserved().contains(p)) {
2126     // Given that we know that p is in the reserved space,
2127     // heap_region_containing() should successfully
2128     // return the containing region.
2129     HeapRegion* hr = heap_region_containing(p);
2130     return hr->is_in(p);
2131   } else {
2132     return false;
2133   }
2134 }
2135 
2136 #ifdef ASSERT
2137 bool G1CollectedHeap::is_in_exact(const void* p) const {
2138   bool contains = reserved_region().contains(p);
2139   bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2140   if (contains && available) {
2141     return true;
2142   } else {
2143     return false;
2144   }
2145 }
2146 #endif
2147 
2148 // Iteration functions.
2149 
2150 // Iterates an ObjectClosure over all objects within a HeapRegion.
2151 
2152 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2153   ObjectClosure* _cl;
2154 public:
2155   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2156   bool do_heap_region(HeapRegion* r) {
2157     if (!r->is_continues_humongous()) {
2158       r->object_iterate(_cl);
2159     }
2160     return false;
2161   }
2162 };
2163 
2164 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2165   IterateObjectClosureRegionClosure blk(cl);
2166   heap_region_iterate(&blk);
2167 }
2168 
2169 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2170   _hrm.iterate(cl);
2171 }
2172 
2173 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
2174                                                                  HeapRegionClaimer *hrclaimer,
2175                                                                  uint worker_id) const {
2176   _hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
2177 }
2178 
2179 void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
2180                                                          HeapRegionClaimer *hrclaimer) const {
2181   _hrm.par_iterate(cl, hrclaimer, 0);
2182 }
2183 
2184 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2185   _collection_set.iterate(cl);
2186 }
2187 
2188 void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
2189   _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
2190 }
2191 
2192 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2193   HeapRegion* hr = heap_region_containing(addr);
2194   return hr->block_start(addr);
2195 }
2196 
2197 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2198   HeapRegion* hr = heap_region_containing(addr);
2199   return hr->block_size(addr);
2200 }
2201 
2202 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2203   HeapRegion* hr = heap_region_containing(addr);
2204   return hr->block_is_obj(addr);
2205 }
2206 
2207 bool G1CollectedHeap::supports_tlab_allocation() const {
2208   return true;
2209 }
2210 
2211 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2212   return (_g1_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
2213 }
2214 
2215 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2216   return _eden.length() * HeapRegion::GrainBytes;
2217 }
2218 
2219 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2220 // must be equal to the humongous object limit.
2221 size_t G1CollectedHeap::max_tlab_size() const {
2222   return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2223 }
2224 
2225 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2226   return _allocator->unsafe_max_tlab_alloc();
2227 }
2228 
2229 size_t G1CollectedHeap::max_capacity() const {
2230   return _hrm.reserved().byte_size();
2231 }
2232 
2233 jlong G1CollectedHeap::millis_since_last_gc() {
2234   // See the notes in GenCollectedHeap::millis_since_last_gc()
2235   // for more information about the implementation.
2236   jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2237     _g1_policy->collection_pause_end_millis();
2238   if (ret_val < 0) {
2239     log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2240       ". returning zero instead.", ret_val);
2241     return 0;
2242   }
2243   return ret_val;
2244 }
2245 
2246 void G1CollectedHeap::deduplicate_string(oop str) {
2247   assert(java_lang_String::is_instance(str), "invariant");
2248 
2249   if (G1StringDedup::is_enabled()) {
2250     G1StringDedup::deduplicate(str);
2251   }
2252 }
2253 
2254 void G1CollectedHeap::prepare_for_verify() {
2255   _verifier->prepare_for_verify();
2256 }
2257 
2258 void G1CollectedHeap::verify(VerifyOption vo) {
2259   _verifier->verify(vo);
2260 }
2261 
2262 bool G1CollectedHeap::supports_concurrent_phase_control() const {
2263   return true;
2264 }
2265 
2266 const char* const* G1CollectedHeap::concurrent_phases() const {
2267   return _cm_thread->concurrent_phases();
2268 }
2269 
2270 bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
2271   return _cm_thread->request_concurrent_phase(phase);
2272 }
2273 
2274 class PrintRegionClosure: public HeapRegionClosure {
2275   outputStream* _st;
2276 public:
2277   PrintRegionClosure(outputStream* st) : _st(st) {}
2278   bool do_heap_region(HeapRegion* r) {
2279     r->print_on(_st);
2280     return false;
2281   }
2282 };
2283 
2284 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2285                                        const HeapRegion* hr,
2286                                        const VerifyOption vo) const {
2287   switch (vo) {
2288   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2289   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2290   case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj, hr);
2291   default:                            ShouldNotReachHere();
2292   }
2293   return false; // keep some compilers happy
2294 }
2295 
2296 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2297                                        const VerifyOption vo) const {
2298   switch (vo) {
2299   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
2300   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
2301   case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj);
2302   default:                            ShouldNotReachHere();
2303   }
2304   return false; // keep some compilers happy
2305 }
2306 
2307 void G1CollectedHeap::print_heap_regions() const {
2308   LogTarget(Trace, gc, heap, region) lt;
2309   if (lt.is_enabled()) {
2310     LogStream ls(lt);
2311     print_regions_on(&ls);
2312   }
2313 }
2314 
2315 void G1CollectedHeap::print_on(outputStream* st) const {
2316   st->print(" %-20s", "garbage-first heap");
2317   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
2318             capacity()/K, used_unlocked()/K);
2319   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")",
2320             p2i(_hrm.reserved().start()),
2321             p2i(_hrm.reserved().end()));
2322   st->cr();
2323   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
2324   uint young_regions = young_regions_count();
2325   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
2326             (size_t) young_regions * HeapRegion::GrainBytes / K);
2327   uint survivor_regions = survivor_regions_count();
2328   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
2329             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
2330   st->cr();
2331   MetaspaceUtils::print_on(st);
2332 }
2333 
2334 void G1CollectedHeap::print_regions_on(outputStream* st) const {
2335   st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
2336                "HS=humongous(starts), HC=humongous(continues), "
2337                "CS=collection set, F=free, A=archive, "
2338                "TAMS=top-at-mark-start (previous, next)");
2339   PrintRegionClosure blk(st);
2340   heap_region_iterate(&blk);
2341 }
2342 
2343 void G1CollectedHeap::print_extended_on(outputStream* st) const {
2344   print_on(st);
2345 
2346   // Print the per-region information.
2347   print_regions_on(st);
2348 }
2349 
2350 void G1CollectedHeap::print_on_error(outputStream* st) const {
2351   this->CollectedHeap::print_on_error(st);
2352 
2353   if (_cm != NULL) {
2354     st->cr();
2355     _cm->print_on_error(st);
2356   }
2357 }
2358 
2359 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
2360   workers()->print_worker_threads_on(st);
2361   _cm_thread->print_on(st);
2362   st->cr();
2363   _cm->print_worker_threads_on(st);
2364   _cr->print_threads_on(st);
2365   _young_gen_sampling_thread->print_on(st);
2366   if (G1StringDedup::is_enabled()) {
2367     G1StringDedup::print_worker_threads_on(st);
2368   }
2369 }
2370 
2371 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2372   workers()->threads_do(tc);
2373   tc->do_thread(_cm_thread);
2374   _cm->threads_do(tc);
2375   _cr->threads_do(tc);
2376   tc->do_thread(_young_gen_sampling_thread);
2377   if (G1StringDedup::is_enabled()) {
2378     G1StringDedup::threads_do(tc);
2379   }
2380 }
2381 
2382 void G1CollectedHeap::print_tracing_info() const {
2383   g1_rem_set()->print_summary_info();
2384   concurrent_mark()->print_summary_info();
2385 }
2386 
2387 #ifndef PRODUCT
2388 // Helpful for debugging RSet issues.
2389 
2390 class PrintRSetsClosure : public HeapRegionClosure {
2391 private:
2392   const char* _msg;
2393   size_t _occupied_sum;
2394 
2395 public:
2396   bool do_heap_region(HeapRegion* r) {
2397     HeapRegionRemSet* hrrs = r->rem_set();
2398     size_t occupied = hrrs->occupied();
2399     _occupied_sum += occupied;
2400 
2401     tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));
2402     if (occupied == 0) {
2403       tty->print_cr("  RSet is empty");
2404     } else {
2405       hrrs->print();
2406     }
2407     tty->print_cr("----------");
2408     return false;
2409   }
2410 
2411   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
2412     tty->cr();
2413     tty->print_cr("========================================");
2414     tty->print_cr("%s", msg);
2415     tty->cr();
2416   }
2417 
2418   ~PrintRSetsClosure() {
2419     tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
2420     tty->print_cr("========================================");
2421     tty->cr();
2422   }
2423 };
2424 
2425 void G1CollectedHeap::print_cset_rsets() {
2426   PrintRSetsClosure cl("Printing CSet RSets");
2427   collection_set_iterate(&cl);
2428 }
2429 
2430 void G1CollectedHeap::print_all_rsets() {
2431   PrintRSetsClosure cl("Printing All RSets");;
2432   heap_region_iterate(&cl);
2433 }
2434 #endif // PRODUCT
2435 
2436 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
2437 
2438   size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes;
2439   size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes;
2440   size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
2441 
2442   size_t eden_capacity_bytes =
2443     (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
2444 
2445   VirtualSpaceSummary heap_summary = create_heap_space_summary();
2446   return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
2447                        eden_capacity_bytes, survivor_used_bytes, num_regions());
2448 }
2449 
2450 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
2451   return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),
2452                        stats->unused(), stats->used(), stats->region_end_waste(),
2453                        stats->regions_filled(), stats->direct_allocated(),
2454                        stats->failure_used(), stats->failure_waste());
2455 }
2456 
2457 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2458   const G1HeapSummary& heap_summary = create_g1_heap_summary();
2459   gc_tracer->report_gc_heap_summary(when, heap_summary);
2460 
2461   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
2462   gc_tracer->report_metaspace_summary(when, metaspace_summary);
2463 }
2464 
2465 G1CollectedHeap* G1CollectedHeap::heap() {
2466   CollectedHeap* heap = Universe::heap();
2467   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
2468   assert(heap->kind() == CollectedHeap::G1, "Invalid name");
2469   return (G1CollectedHeap*)heap;
2470 }
2471 
2472 void G1CollectedHeap::gc_prologue(bool full) {
2473   // always_do_update_barrier = false;
2474   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2475 
2476   // This summary needs to be printed before incrementing total collections.
2477   g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2478 
2479   // Update common counters.
2480   increment_total_collections(full /* full gc */);
2481   if (full) {
2482     increment_old_marking_cycles_started();
2483   }
2484 
2485   // Fill TLAB's and such
2486   double start = os::elapsedTime();
2487   accumulate_statistics_all_tlabs();
2488   ensure_parsability(true);
2489   g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2490 }
2491 
2492 void G1CollectedHeap::gc_epilogue(bool full) {
2493   // Update common counters.
2494   if (full) {
2495     // Update the number of full collections that have been completed.
2496     increment_old_marking_cycles_completed(false /* concurrent */);
2497   }
2498 
2499   // We are at the end of the GC. Total collections has already been increased.
2500   g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2501 
2502   // FIXME: what is this about?
2503   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2504   // is set.
2505 #if COMPILER2_OR_JVMCI
2506   assert(DerivedPointerTable::is_empty(), "derived pointer present");
2507 #endif
2508   // always_do_update_barrier = true;
2509 
2510   double start = os::elapsedTime();
2511   resize_all_tlabs();
2512   g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2513 
2514   MemoryService::track_memory_usage();
2515   // We have just completed a GC. Update the soft reference
2516   // policy with the new heap occupancy
2517   Universe::update_heap_info_at_gc();
2518 }
2519 
2520 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2521                                                uint gc_count_before,
2522                                                bool* succeeded,
2523                                                GCCause::Cause gc_cause) {
2524   assert_heap_not_locked_and_not_at_safepoint();
2525   VM_G1CollectForAllocation op(word_size,
2526                                gc_count_before,
2527                                gc_cause,
2528                                false, /* should_initiate_conc_mark */
2529                                g1_policy()->max_pause_time_ms());
2530   VMThread::execute(&op);
2531 
2532   HeapWord* result = op.result();
2533   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2534   assert(result == NULL || ret_succeeded,
2535          "the result should be NULL if the VM did not succeed");
2536   *succeeded = ret_succeeded;
2537 
2538   assert_heap_not_locked();
2539   return result;
2540 }
2541 
2542 void G1CollectedHeap::do_concurrent_mark() {
2543   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2544   if (!_cm_thread->in_progress()) {
2545     _cm_thread->set_started();
2546     CGC_lock->notify();
2547   }
2548 }
2549 
2550 size_t G1CollectedHeap::pending_card_num() {
2551   size_t extra_cards = 0;
2552   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *curr = jtiwh.next(); ) {
2553     DirtyCardQueue& dcq = G1ThreadLocalData::dirty_card_queue(curr);
2554     extra_cards += dcq.size();
2555   }
2556   DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2557   size_t buffer_size = dcqs.buffer_size();
2558   size_t buffer_num = dcqs.completed_buffers_num();
2559 
2560   return buffer_size * buffer_num + extra_cards;
2561 }
2562 
2563 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2564   // We don't nominate objects with many remembered set entries, on
2565   // the assumption that such objects are likely still live.
2566   HeapRegionRemSet* rem_set = r->rem_set();
2567 
2568   return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2569          rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2570          G1EagerReclaimHumongousObjects && rem_set->is_empty();
2571 }
2572 
2573 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2574  private:
2575   size_t _total_humongous;
2576   size_t _candidate_humongous;
2577 
2578   DirtyCardQueue _dcq;
2579 
2580   bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {
2581     assert(region->is_starts_humongous(), "Must start a humongous object");
2582 
2583     oop obj = oop(region->bottom());
2584 
2585     // Dead objects cannot be eager reclaim candidates. Due to class
2586     // unloading it is unsafe to query their classes so we return early.
2587     if (g1h->is_obj_dead(obj, region)) {
2588       return false;
2589     }
2590 
2591     // If we do not have a complete remembered set for the region, then we can
2592     // not be sure that we have all references to it.
2593     if (!region->rem_set()->is_complete()) {
2594       return false;
2595     }
2596     // Candidate selection must satisfy the following constraints
2597     // while concurrent marking is in progress:
2598     //
2599     // * In order to maintain SATB invariants, an object must not be
2600     // reclaimed if it was allocated before the start of marking and
2601     // has not had its references scanned.  Such an object must have
2602     // its references (including type metadata) scanned to ensure no
2603     // live objects are missed by the marking process.  Objects
2604     // allocated after the start of concurrent marking don't need to
2605     // be scanned.
2606     //
2607     // * An object must not be reclaimed if it is on the concurrent
2608     // mark stack.  Objects allocated after the start of concurrent
2609     // marking are never pushed on the mark stack.
2610     //
2611     // Nominating only objects allocated after the start of concurrent
2612     // marking is sufficient to meet both constraints.  This may miss
2613     // some objects that satisfy the constraints, but the marking data
2614     // structures don't support efficiently performing the needed
2615     // additional tests or scrubbing of the mark stack.
2616     //
2617     // However, we presently only nominate is_typeArray() objects.
2618     // A humongous object containing references induces remembered
2619     // set entries on other regions.  In order to reclaim such an
2620     // object, those remembered sets would need to be cleaned up.
2621     //
2622     // We also treat is_typeArray() objects specially, allowing them
2623     // to be reclaimed even if allocated before the start of
2624     // concurrent mark.  For this we rely on mark stack insertion to
2625     // exclude is_typeArray() objects, preventing reclaiming an object
2626     // that is in the mark stack.  We also rely on the metadata for
2627     // such objects to be built-in and so ensured to be kept live.
2628     // Frequent allocation and drop of large binary blobs is an
2629     // important use case for eager reclaim, and this special handling
2630     // may reduce needed headroom.
2631 
2632     return obj->is_typeArray() &&
2633            g1h->is_potential_eager_reclaim_candidate(region);
2634   }
2635 
2636  public:
2637   RegisterHumongousWithInCSetFastTestClosure()
2638   : _total_humongous(0),
2639     _candidate_humongous(0),
2640     _dcq(&G1BarrierSet::dirty_card_queue_set()) {
2641   }
2642 
2643   virtual bool do_heap_region(HeapRegion* r) {
2644     if (!r->is_starts_humongous()) {
2645       return false;
2646     }
2647     G1CollectedHeap* g1h = G1CollectedHeap::heap();
2648 
2649     bool is_candidate = humongous_region_is_candidate(g1h, r);
2650     uint rindex = r->hrm_index();
2651     g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2652     if (is_candidate) {
2653       _candidate_humongous++;
2654       g1h->register_humongous_region_with_cset(rindex);
2655       // Is_candidate already filters out humongous object with large remembered sets.
2656       // If we have a humongous object with a few remembered sets, we simply flush these
2657       // remembered set entries into the DCQS. That will result in automatic
2658       // re-evaluation of their remembered set entries during the following evacuation
2659       // phase.
2660       if (!r->rem_set()->is_empty()) {
2661         guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
2662                   "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
2663         G1CardTable* ct = g1h->card_table();
2664         HeapRegionRemSetIterator hrrs(r->rem_set());
2665         size_t card_index;
2666         while (hrrs.has_next(card_index)) {
2667           jbyte* card_ptr = (jbyte*)ct->byte_for_index(card_index);
2668           // The remembered set might contain references to already freed
2669           // regions. Filter out such entries to avoid failing card table
2670           // verification.
2671           if (g1h->is_in_closed_subset(ct->addr_for(card_ptr))) {
2672             if (*card_ptr != G1CardTable::dirty_card_val()) {
2673               *card_ptr = G1CardTable::dirty_card_val();
2674               _dcq.enqueue(card_ptr);
2675             }
2676           }
2677         }
2678         assert(hrrs.n_yielded() == r->rem_set()->occupied(),
2679                "Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",
2680                hrrs.n_yielded(), r->rem_set()->occupied());
2681         // We should only clear the card based remembered set here as we will not
2682         // implicitly rebuild anything else during eager reclaim. Note that at the moment
2683         // (and probably never) we do not enter this path if there are other kind of
2684         // remembered sets for this region.
2685         r->rem_set()->clear_locked(true /* only_cardset */);
2686         // Clear_locked() above sets the state to Empty. However we want to continue
2687         // collecting remembered set entries for humongous regions that were not
2688         // reclaimed.
2689         r->rem_set()->set_state_complete();
2690       }
2691       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
2692     }
2693     _total_humongous++;
2694 
2695     return false;
2696   }
2697 
2698   size_t total_humongous() const { return _total_humongous; }
2699   size_t candidate_humongous() const { return _candidate_humongous; }
2700 
2701   void flush_rem_set_entries() { _dcq.flush(); }
2702 };
2703 
2704 void G1CollectedHeap::register_humongous_regions_with_cset() {
2705   if (!G1EagerReclaimHumongousObjects) {
2706     g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
2707     return;
2708   }
2709   double time = os::elapsed_counter();
2710 
2711   // Collect reclaim candidate information and register candidates with cset.
2712   RegisterHumongousWithInCSetFastTestClosure cl;
2713   heap_region_iterate(&cl);
2714 
2715   time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
2716   g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
2717                                                                   cl.total_humongous(),
2718                                                                   cl.candidate_humongous());
2719   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
2720 
2721   // Finally flush all remembered set entries to re-check into the global DCQS.
2722   cl.flush_rem_set_entries();
2723 }
2724 
2725 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2726   public:
2727     bool do_heap_region(HeapRegion* hr) {
2728       if (!hr->is_archive() && !hr->is_continues_humongous()) {
2729         hr->verify_rem_set();
2730       }
2731       return false;
2732     }
2733 };
2734 
2735 uint G1CollectedHeap::num_task_queues() const {
2736   return _task_queues->size();
2737 }
2738 
2739 #if TASKQUEUE_STATS
2740 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
2741   st->print_raw_cr("GC Task Stats");
2742   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
2743   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
2744 }
2745 
2746 void G1CollectedHeap::print_taskqueue_stats() const {
2747   if (!log_is_enabled(Trace, gc, task, stats)) {
2748     return;
2749   }
2750   Log(gc, task, stats) log;
2751   ResourceMark rm;
2752   LogStream ls(log.trace());
2753   outputStream* st = &ls;
2754 
2755   print_taskqueue_stats_hdr(st);
2756 
2757   TaskQueueStats totals;
2758   const uint n = num_task_queues();
2759   for (uint i = 0; i < n; ++i) {
2760     st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
2761     totals += task_queue(i)->stats;
2762   }
2763   st->print_raw("tot "); totals.print(st); st->cr();
2764 
2765   DEBUG_ONLY(totals.verify());
2766 }
2767 
2768 void G1CollectedHeap::reset_taskqueue_stats() {
2769   const uint n = num_task_queues();
2770   for (uint i = 0; i < n; ++i) {
2771     task_queue(i)->stats.reset();
2772   }
2773 }
2774 #endif // TASKQUEUE_STATS
2775 
2776 void G1CollectedHeap::wait_for_root_region_scanning() {
2777   double scan_wait_start = os::elapsedTime();
2778   // We have to wait until the CM threads finish scanning the
2779   // root regions as it's the only way to ensure that all the
2780   // objects on them have been correctly scanned before we start
2781   // moving them during the GC.
2782   bool waited = _cm->root_regions()->wait_until_scan_finished();
2783   double wait_time_ms = 0.0;
2784   if (waited) {
2785     double scan_wait_end = os::elapsedTime();
2786     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2787   }
2788   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2789 }
2790 
2791 class G1PrintCollectionSetClosure : public HeapRegionClosure {
2792 private:
2793   G1HRPrinter* _hr_printer;
2794 public:
2795   G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
2796 
2797   virtual bool do_heap_region(HeapRegion* r) {
2798     _hr_printer->cset(r);
2799     return false;
2800   }
2801 };
2802 
2803 void G1CollectedHeap::start_new_collection_set() {
2804   collection_set()->start_incremental_building();
2805 
2806   clear_cset_fast_test();
2807 
2808   guarantee(_eden.length() == 0, "eden should have been cleared");
2809   g1_policy()->transfer_survivors_to_cset(survivor());
2810 }
2811 
2812 bool
2813 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2814   assert_at_safepoint_on_vm_thread();
2815   guarantee(!is_gc_active(), "collection is not reentrant");
2816 
2817   if (GCLocker::check_active_before_gc()) {
2818     return false;
2819   }
2820 
2821   _gc_timer_stw->register_gc_start();
2822 
2823   GCIdMark gc_id_mark;
2824   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
2825 
2826   SvcGCMarker sgcm(SvcGCMarker::MINOR);
2827   ResourceMark rm;
2828 
2829   g1_policy()->note_gc_start();
2830 
2831   wait_for_root_region_scanning();
2832 
2833   print_heap_before_gc();
2834   print_heap_regions();
2835   trace_heap_before_gc(_gc_tracer_stw);
2836 
2837   _verifier->verify_region_sets_optional();
2838   _verifier->verify_dirty_young_regions();
2839 
2840   // We should not be doing initial mark unless the conc mark thread is running
2841   if (!_cm_thread->should_terminate()) {
2842     // This call will decide whether this pause is an initial-mark
2843     // pause. If it is, in_initial_mark_gc() will return true
2844     // for the duration of this pause.
2845     g1_policy()->decide_on_conc_mark_initiation();
2846   }
2847 
2848   // We do not allow initial-mark to be piggy-backed on a mixed GC.
2849   assert(!collector_state()->in_initial_mark_gc() ||
2850           collector_state()->in_young_only_phase(), "sanity");
2851 
2852   // We also do not allow mixed GCs during marking.
2853   assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
2854 
2855   // Record whether this pause is an initial mark. When the current
2856   // thread has completed its logging output and it's safe to signal
2857   // the CM thread, the flag's value in the policy has been reset.
2858   bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
2859 
2860   // Inner scope for scope based logging, timers, and stats collection
2861   {
2862     EvacuationInfo evacuation_info;
2863 
2864     if (collector_state()->in_initial_mark_gc()) {
2865       // We are about to start a marking cycle, so we increment the
2866       // full collection counter.
2867       increment_old_marking_cycles_started();
2868       _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
2869     }
2870 
2871     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
2872 
2873     GCTraceCPUTime tcpu;
2874 
2875     G1HeapVerifier::G1VerifyType verify_type;
2876     FormatBuffer<> gc_string("Pause Young ");
2877     if (collector_state()->in_initial_mark_gc()) {
2878       gc_string.append("(Concurrent Start)");
2879       verify_type = G1HeapVerifier::G1VerifyConcurrentStart;
2880     } else if (collector_state()->in_young_only_phase()) {
2881       if (collector_state()->in_young_gc_before_mixed()) {
2882         gc_string.append("(Prepare Mixed)");
2883       } else {
2884         gc_string.append("(Normal)");
2885       }
2886       verify_type = G1HeapVerifier::G1VerifyYoungNormal;
2887     } else {
2888       gc_string.append("(Mixed)");
2889       verify_type = G1HeapVerifier::G1VerifyMixed;
2890     }
2891     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2892 
2893     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2894                                                                   workers()->active_workers(),
2895                                                                   Threads::number_of_non_daemon_threads());
2896     active_workers = workers()->update_active_workers(active_workers);
2897     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2898 
2899     G1MonitoringScope ms(g1mm(),
2900                          false /* full_gc */,
2901                          collector_state()->yc_type() == Mixed /* all_memory_pools_affected */);
2902 
2903     G1HeapTransition heap_transition(this);
2904     size_t heap_used_bytes_before_gc = used();
2905 
2906     // Don't dynamically change the number of GC threads this early.  A value of
2907     // 0 is used to indicate serial work.  When parallel work is done,
2908     // it will be set.
2909 
2910     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2911       IsGCActiveMark x;
2912 
2913       gc_prologue(false);
2914 
2915       if (VerifyRememberedSets) {
2916         log_info(gc, verify)("[Verifying RemSets before GC]");
2917         VerifyRegionRemSetClosure v_cl;
2918         heap_region_iterate(&v_cl);
2919       }
2920 
2921       _verifier->verify_before_gc(verify_type);
2922 
2923       _verifier->check_bitmaps("GC Start");
2924 
2925 #if COMPILER2_OR_JVMCI
2926       DerivedPointerTable::clear();
2927 #endif
2928 
2929       // Please see comment in g1CollectedHeap.hpp and
2930       // G1CollectedHeap::ref_processing_init() to see how
2931       // reference processing currently works in G1.
2932 
2933       // Enable discovery in the STW reference processor
2934       _ref_processor_stw->enable_discovery();
2935 
2936       {
2937         // We want to temporarily turn off discovery by the
2938         // CM ref processor, if necessary, and turn it back on
2939         // on again later if we do. Using a scoped
2940         // NoRefDiscovery object will do this.
2941         NoRefDiscovery no_cm_discovery(_ref_processor_cm);
2942 
2943         // Forget the current alloc region (we might even choose it to be part
2944         // of the collection set!).
2945         _allocator->release_mutator_alloc_region();
2946 
2947         // This timing is only used by the ergonomics to handle our pause target.
2948         // It is unclear why this should not include the full pause. We will
2949         // investigate this in CR 7178365.
2950         //
2951         // Preserving the old comment here if that helps the investigation:
2952         //
2953         // The elapsed time induced by the start time below deliberately elides
2954         // the possible verification above.
2955         double sample_start_time_sec = os::elapsedTime();
2956 
2957         g1_policy()->record_collection_pause_start(sample_start_time_sec);
2958 
2959         if (collector_state()->in_initial_mark_gc()) {
2960           concurrent_mark()->pre_initial_mark();
2961         }
2962 
2963         g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
2964 
2965         evacuation_info.set_collectionset_regions(collection_set()->region_length());
2966 
2967         // Make sure the remembered sets are up to date. This needs to be
2968         // done before register_humongous_regions_with_cset(), because the
2969         // remembered sets are used there to choose eager reclaim candidates.
2970         // If the remembered sets are not up to date we might miss some
2971         // entries that need to be handled.
2972         g1_rem_set()->cleanupHRRS();
2973 
2974         register_humongous_regions_with_cset();
2975 
2976         assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
2977 
2978         // We call this after finalize_cset() to
2979         // ensure that the CSet has been finalized.
2980         _cm->verify_no_cset_oops();
2981 
2982         if (_hr_printer.is_active()) {
2983           G1PrintCollectionSetClosure cl(&_hr_printer);
2984           _collection_set.iterate(&cl);
2985         }
2986 
2987         // Initialize the GC alloc regions.
2988         _allocator->init_gc_alloc_regions(evacuation_info);
2989 
2990         G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
2991         pre_evacuate_collection_set();
2992 
2993         // Actually do the work...
2994         evacuate_collection_set(&per_thread_states);
2995 
2996         post_evacuate_collection_set(evacuation_info, &per_thread_states);
2997 
2998         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
2999         free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
3000 
3001         eagerly_reclaim_humongous_regions();
3002 
3003         record_obj_copy_mem_stats();
3004         _survivor_evac_stats.adjust_desired_plab_sz();
3005         _old_evac_stats.adjust_desired_plab_sz();
3006 
3007         double start = os::elapsedTime();
3008         start_new_collection_set();
3009         g1_policy()->phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
3010 
3011         if (evacuation_failed()) {
3012           double recalculate_used_start = os::elapsedTime();
3013           set_used(recalculate_used());
3014           g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
3015 
3016           if (_archive_allocator != NULL) {
3017             _archive_allocator->clear_used();
3018           }
3019           for (uint i = 0; i < ParallelGCThreads; i++) {
3020             if (_evacuation_failed_info_array[i].has_failed()) {
3021               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3022             }
3023           }
3024         } else {
3025           // The "used" of the the collection set have already been subtracted
3026           // when they were freed.  Add in the bytes evacuated.
3027           increase_used(g1_policy()->bytes_copied_during_gc());
3028         }
3029 
3030         if (collector_state()->in_initial_mark_gc()) {
3031           // We have to do this before we notify the CM threads that
3032           // they can start working to make sure that all the
3033           // appropriate initialization is done on the CM object.
3034           concurrent_mark()->post_initial_mark();
3035           // Note that we don't actually trigger the CM thread at
3036           // this point. We do that later when we're sure that
3037           // the current thread has completed its logging output.
3038         }
3039 
3040         allocate_dummy_regions();
3041 
3042         _allocator->init_mutator_alloc_region();
3043 
3044         {
3045           size_t expand_bytes = _heap_sizing_policy->expansion_amount();
3046           if (expand_bytes > 0) {
3047             size_t bytes_before = capacity();
3048             // No need for an ergo logging here,
3049             // expansion_amount() does this when it returns a value > 0.
3050             double expand_ms;
3051             if (!expand(expand_bytes, _workers, &expand_ms)) {
3052               // We failed to expand the heap. Cannot do anything about it.
3053             }
3054             g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3055           }
3056         }
3057 
3058         // We redo the verification but now wrt to the new CSet which
3059         // has just got initialized after the previous CSet was freed.
3060         _cm->verify_no_cset_oops();
3061 
3062         // This timing is only used by the ergonomics to handle our pause target.
3063         // It is unclear why this should not include the full pause. We will
3064         // investigate this in CR 7178365.
3065         double sample_end_time_sec = os::elapsedTime();
3066         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3067         size_t total_cards_scanned = g1_policy()->phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards);
3068         g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
3069 
3070         evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
3071         evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
3072 
3073         if (VerifyRememberedSets) {
3074           log_info(gc, verify)("[Verifying RemSets after GC]");
3075           VerifyRegionRemSetClosure v_cl;
3076           heap_region_iterate(&v_cl);
3077         }
3078 
3079         _verifier->verify_after_gc(verify_type);
3080         _verifier->check_bitmaps("GC End");
3081 
3082         assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");
3083         _ref_processor_stw->verify_no_references_recorded();
3084 
3085         // CM reference discovery will be re-enabled if necessary.
3086       }
3087 
3088 #ifdef TRACESPINNING
3089       ParallelTaskTerminator::print_termination_counts();
3090 #endif
3091 
3092       gc_epilogue(false);
3093     }
3094 
3095     // Print the remainder of the GC log output.
3096     if (evacuation_failed()) {
3097       log_info(gc)("To-space exhausted");
3098     }
3099 
3100     g1_policy()->print_phases();
3101     heap_transition.print();
3102 
3103     // It is not yet to safe to tell the concurrent mark to
3104     // start as we have some optional output below. We don't want the
3105     // output from the concurrent mark thread interfering with this
3106     // logging output either.
3107 
3108     _hrm.verify_optional();
3109     _verifier->verify_region_sets_optional();
3110 
3111     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3112     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3113 
3114     print_heap_after_gc();
3115     print_heap_regions();
3116     trace_heap_after_gc(_gc_tracer_stw);
3117 
3118     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3119     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3120     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3121     // before any GC notifications are raised.
3122     g1mm()->update_sizes();
3123 
3124     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3125     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
3126     _gc_timer_stw->register_gc_end();
3127     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3128   }
3129   // It should now be safe to tell the concurrent mark thread to start
3130   // without its logging output interfering with the logging output
3131   // that came from the pause.
3132 
3133   if (should_start_conc_mark) {
3134     // CAUTION: after the doConcurrentMark() call below,
3135     // the concurrent marking thread(s) could be running
3136     // concurrently with us. Make sure that anything after
3137     // this point does not assume that we are the only GC thread
3138     // running. Note: of course, the actual marking work will
3139     // not start until the safepoint itself is released in
3140     // SuspendibleThreadSet::desynchronize().
3141     do_concurrent_mark();
3142   }
3143 
3144   return true;
3145 }
3146 
3147 void G1CollectedHeap::remove_self_forwarding_pointers() {
3148   G1ParRemoveSelfForwardPtrsTask rsfp_task;
3149   workers()->run_task(&rsfp_task);
3150 }
3151 
3152 void G1CollectedHeap::restore_after_evac_failure() {
3153   double remove_self_forwards_start = os::elapsedTime();
3154 
3155   remove_self_forwarding_pointers();
3156   SharedRestorePreservedMarksTaskExecutor task_executor(workers());
3157   _preserved_marks_set.restore(&task_executor);
3158 
3159   g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
3160 }
3161 
3162 void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) {
3163   if (!_evacuation_failed) {
3164     _evacuation_failed = true;
3165   }
3166 
3167   _evacuation_failed_info_array[worker_id].register_copy_failure(obj->size());
3168   _preserved_marks_set.get(worker_id)->push_if_necessary(obj, m);
3169 }
3170 
3171 bool G1ParEvacuateFollowersClosure::offer_termination() {
3172   G1ParScanThreadState* const pss = par_scan_state();
3173   start_term_time();
3174   const bool res = terminator()->offer_termination();
3175   end_term_time();
3176   return res;
3177 }
3178 
3179 void G1ParEvacuateFollowersClosure::do_void() {
3180   G1ParScanThreadState* const pss = par_scan_state();
3181   pss->trim_queue();
3182   do {
3183     pss->steal_and_trim_queue(queues());
3184   } while (!offer_termination());
3185 }
3186 
3187 class G1ParTask : public AbstractGangTask {
3188 protected:
3189   G1CollectedHeap*         _g1h;
3190   G1ParScanThreadStateSet* _pss;
3191   RefToScanQueueSet*       _queues;
3192   G1RootProcessor*         _root_processor;
3193   ParallelTaskTerminator   _terminator;
3194   uint                     _n_workers;
3195 
3196 public:
3197   G1ParTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
3198     : AbstractGangTask("G1 collection"),
3199       _g1h(g1h),
3200       _pss(per_thread_states),
3201       _queues(task_queues),
3202       _root_processor(root_processor),
3203       _terminator(n_workers, _queues),
3204       _n_workers(n_workers)
3205   {}
3206 
3207   void work(uint worker_id) {
3208     if (worker_id >= _n_workers) return;  // no work needed this round
3209 
3210     double start_sec = os::elapsedTime();
3211     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, start_sec);
3212 
3213     {
3214       ResourceMark rm;
3215       HandleMark   hm;
3216 
3217       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
3218 
3219       G1ParScanThreadState*           pss = _pss->state_for_worker(worker_id);
3220       pss->set_ref_discoverer(rp);
3221 
3222       double start_strong_roots_sec = os::elapsedTime();
3223 
3224       _root_processor->evacuate_roots(pss, worker_id);
3225 
3226       // We pass a weak code blobs closure to the remembered set scanning because we want to avoid
3227       // treating the nmethods visited to act as roots for concurrent marking.
3228       // We only want to make sure that the oops in the nmethods are adjusted with regard to the
3229       // objects copied by the current evacuation.
3230       _g1h->g1_rem_set()->oops_into_collection_set_do(pss, worker_id);
3231 
3232       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
3233 
3234       double term_sec = 0.0;
3235       size_t evac_term_attempts = 0;
3236       {
3237         double start = os::elapsedTime();
3238         G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
3239         evac.do_void();
3240 
3241         evac_term_attempts = evac.term_attempts();
3242         term_sec = evac.term_time();
3243         double elapsed_sec = os::elapsedTime() - start;
3244 
3245         G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
3246         p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
3247         p->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
3248         p->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
3249       }
3250 
3251       assert(pss->queue_is_empty(), "should be empty");
3252 
3253       if (log_is_enabled(Debug, gc, task, stats)) {
3254         MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3255         size_t lab_waste;
3256         size_t lab_undo_waste;
3257         pss->waste(lab_waste, lab_undo_waste);
3258         _g1h->print_termination_stats(worker_id,
3259                                       (os::elapsedTime() - start_sec) * 1000.0,   /* elapsed time */
3260                                       strong_roots_sec * 1000.0,                  /* strong roots time */
3261                                       term_sec * 1000.0,                          /* evac term time */
3262                                       evac_term_attempts,                         /* evac term attempts */
3263                                       lab_waste,                                  /* alloc buffer waste */
3264                                       lab_undo_waste                              /* undo waste */
3265                                       );
3266       }
3267 
3268       // Close the inner scope so that the ResourceMark and HandleMark
3269       // destructors are executed here and are included as part of the
3270       // "GC Worker Time".
3271     }
3272     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
3273   }
3274 };
3275 
3276 void G1CollectedHeap::print_termination_stats_hdr() {
3277   log_debug(gc, task, stats)("GC Termination Stats");
3278   log_debug(gc, task, stats)("     elapsed  --strong roots-- -------termination------- ------waste (KiB)------");
3279   log_debug(gc, task, stats)("thr     ms        ms      %%        ms      %%    attempts  total   alloc    undo");
3280   log_debug(gc, task, stats)("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
3281 }
3282 
3283 void G1CollectedHeap::print_termination_stats(uint worker_id,
3284                                               double elapsed_ms,
3285                                               double strong_roots_ms,
3286                                               double term_ms,
3287                                               size_t term_attempts,
3288                                               size_t alloc_buffer_waste,
3289                                               size_t undo_waste) const {
3290   log_debug(gc, task, stats)
3291               ("%3d %9.2f %9.2f %6.2f "
3292                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
3293                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
3294                worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
3295                term_ms, term_ms * 100 / elapsed_ms, term_attempts,
3296                (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
3297                alloc_buffer_waste * HeapWordSize / K,
3298                undo_waste * HeapWordSize / K);
3299 }
3300 
3301 void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,
3302                                         bool class_unloading_occurred) {
3303   uint n_workers = workers()->active_workers();
3304 
3305   G1StringDedupUnlinkOrOopsDoClosure dedup_closure(is_alive, NULL, false);
3306   ParallelCleaningTask g1_unlink_task(is_alive, &dedup_closure, n_workers, class_unloading_occurred);
3307   workers()->run_task(&g1_unlink_task);
3308 }
3309 
3310 void G1CollectedHeap::partial_cleaning(BoolObjectClosure* is_alive,
3311                                        bool process_strings,
3312                                        bool process_string_dedup) {
3313   if (!process_strings && !process_string_dedup) {
3314     // Nothing to clean.
3315     return;
3316   }
3317 
3318   G1StringDedupUnlinkOrOopsDoClosure dedup_closure(is_alive, NULL, false);
3319   StringCleaningTask g1_unlink_task(is_alive, process_string_dedup ? &dedup_closure : NULL, process_strings);
3320   workers()->run_task(&g1_unlink_task);
3321 }
3322 
3323 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
3324  private:
3325   DirtyCardQueueSet* _queue;
3326   G1CollectedHeap* _g1h;
3327  public:
3328   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
3329     _queue(queue), _g1h(g1h) { }
3330 
3331   virtual void work(uint worker_id) {
3332     G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
3333     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
3334 
3335     RedirtyLoggedCardTableEntryClosure cl(_g1h);
3336     _queue->par_apply_closure_to_all_completed_buffers(&cl);
3337 
3338     phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3339   }
3340 };
3341 
3342 void G1CollectedHeap::redirty_logged_cards() {
3343   double redirty_logged_cards_start = os::elapsedTime();
3344 
3345   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3346   dirty_card_queue_set().reset_for_par_iteration();
3347   workers()->run_task(&redirty_task);
3348 
3349   DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
3350   dcq.merge_bufferlists(&dirty_card_queue_set());
3351   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3352 
3353   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3354 }
3355 
3356 // Weak Reference Processing support
3357 
3358 bool G1STWIsAliveClosure::do_object_b(oop p) {
3359   // An object is reachable if it is outside the collection set,
3360   // or is inside and copied.
3361   return !_g1h->is_in_cset(p) || p->is_forwarded();
3362 }
3363 
3364 bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {
3365   assert(obj != NULL, "must not be NULL");
3366   assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
3367   // The areas the CM and STW ref processor manage must be disjoint. The is_in_cset() below
3368   // may falsely indicate that this is not the case here: however the collection set only
3369   // contains old regions when concurrent mark is not running.
3370   return _g1h->is_in_cset(obj) || _g1h->heap_region_containing(obj)->is_survivor();
3371 }
3372 
3373 // Non Copying Keep Alive closure
3374 class G1KeepAliveClosure: public OopClosure {
3375   G1CollectedHeap*_g1h;
3376 public:
3377   G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {}
3378   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
3379   void do_oop(oop* p) {
3380     oop obj = *p;
3381     assert(obj != NULL, "the caller should have filtered out NULL values");
3382 
3383     const InCSetState cset_state =_g1h->in_cset_state(obj);
3384     if (!cset_state.is_in_cset_or_humongous()) {
3385       return;
3386     }
3387     if (cset_state.is_in_cset()) {
3388       assert( obj->is_forwarded(), "invariant" );
3389       *p = obj->forwardee();
3390     } else {
3391       assert(!obj->is_forwarded(), "invariant" );
3392       assert(cset_state.is_humongous(),
3393              "Only allowed InCSet state is IsHumongous, but is %d", cset_state.value());
3394      _g1h->set_humongous_is_live(obj);
3395     }
3396   }
3397 };
3398 
3399 // Copying Keep Alive closure - can be called from both
3400 // serial and parallel code as long as different worker
3401 // threads utilize different G1ParScanThreadState instances
3402 // and different queues.
3403 
3404 class G1CopyingKeepAliveClosure: public OopClosure {
3405   G1CollectedHeap*         _g1h;
3406   G1ParScanThreadState*    _par_scan_state;
3407 
3408 public:
3409   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
3410                             G1ParScanThreadState* pss):
3411     _g1h(g1h),
3412     _par_scan_state(pss)
3413   {}
3414 
3415   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
3416   virtual void do_oop(      oop* p) { do_oop_work(p); }
3417 
3418   template <class T> void do_oop_work(T* p) {
3419     oop obj = RawAccess<>::oop_load(p);
3420 
3421     if (_g1h->is_in_cset_or_humongous(obj)) {
3422       // If the referent object has been forwarded (either copied
3423       // to a new location or to itself in the event of an
3424       // evacuation failure) then we need to update the reference
3425       // field and, if both reference and referent are in the G1
3426       // heap, update the RSet for the referent.
3427       //
3428       // If the referent has not been forwarded then we have to keep
3429       // it alive by policy. Therefore we have copy the referent.
3430       //
3431       // When the queue is drained (after each phase of reference processing)
3432       // the object and it's followers will be copied, the reference field set
3433       // to point to the new location, and the RSet updated.
3434       _par_scan_state->push_on_queue(p);
3435     }
3436   }
3437 };
3438 
3439 // Serial drain queue closure. Called as the 'complete_gc'
3440 // closure for each discovered list in some of the
3441 // reference processing phases.
3442 
3443 class G1STWDrainQueueClosure: public VoidClosure {
3444 protected:
3445   G1CollectedHeap* _g1h;
3446   G1ParScanThreadState* _par_scan_state;
3447 
3448   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
3449 
3450 public:
3451   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
3452     _g1h(g1h),
3453     _par_scan_state(pss)
3454   { }
3455 
3456   void do_void() {
3457     G1ParScanThreadState* const pss = par_scan_state();
3458     pss->trim_queue();
3459   }
3460 };
3461 
3462 // Parallel Reference Processing closures
3463 
3464 // Implementation of AbstractRefProcTaskExecutor for parallel reference
3465 // processing during G1 evacuation pauses.
3466 
3467 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
3468 private:
3469   G1CollectedHeap*          _g1h;
3470   G1ParScanThreadStateSet*  _pss;
3471   RefToScanQueueSet*        _queues;
3472   WorkGang*                 _workers;
3473 
3474 public:
3475   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
3476                            G1ParScanThreadStateSet* per_thread_states,
3477                            WorkGang* workers,
3478                            RefToScanQueueSet *task_queues) :
3479     _g1h(g1h),
3480     _pss(per_thread_states),
3481     _queues(task_queues),
3482     _workers(workers)
3483   {
3484     g1h->ref_processor_stw()->set_active_mt_degree(workers->active_workers());
3485   }
3486 
3487   // Executes the given task using concurrent marking worker threads.
3488   virtual void execute(ProcessTask& task, uint ergo_workers);
3489 };
3490 
3491 // Gang task for possibly parallel reference processing
3492 
3493 class G1STWRefProcTaskProxy: public AbstractGangTask {
3494   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
3495   ProcessTask&     _proc_task;
3496   G1CollectedHeap* _g1h;
3497   G1ParScanThreadStateSet* _pss;
3498   RefToScanQueueSet* _task_queues;
3499   ParallelTaskTerminator* _terminator;
3500 
3501 public:
3502   G1STWRefProcTaskProxy(ProcessTask& proc_task,
3503                         G1CollectedHeap* g1h,
3504                         G1ParScanThreadStateSet* per_thread_states,
3505                         RefToScanQueueSet *task_queues,
3506                         ParallelTaskTerminator* terminator) :
3507     AbstractGangTask("Process reference objects in parallel"),
3508     _proc_task(proc_task),
3509     _g1h(g1h),
3510     _pss(per_thread_states),
3511     _task_queues(task_queues),
3512     _terminator(terminator)
3513   {}
3514 
3515   virtual void work(uint worker_id) {
3516     // The reference processing task executed by a single worker.
3517     ResourceMark rm;
3518     HandleMark   hm;
3519 
3520     G1STWIsAliveClosure is_alive(_g1h);
3521 
3522     G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
3523     pss->set_ref_discoverer(NULL);
3524 
3525     // Keep alive closure.
3526     G1CopyingKeepAliveClosure keep_alive(_g1h, pss);
3527 
3528     // Complete GC closure
3529     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator);
3530 
3531     // Call the reference processing task's work routine.
3532     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
3533 
3534     // Note we cannot assert that the refs array is empty here as not all
3535     // of the processing tasks (specifically phase2 - pp2_work) execute
3536     // the complete_gc closure (which ordinarily would drain the queue) so
3537     // the queue may not be empty.
3538   }
3539 };
3540 
3541 // Driver routine for parallel reference processing.
3542 // Creates an instance of the ref processing gang
3543 // task and has the worker threads execute it.
3544 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) {
3545   assert(_workers != NULL, "Need parallel worker threads.");
3546 
3547   assert(_workers->active_workers() >= ergo_workers,
3548          "Ergonomically chosen workers (%u) should be less than or equal to active workers (%u)",
3549          ergo_workers, _workers->active_workers());
3550   ParallelTaskTerminator terminator(ergo_workers, _queues);
3551   G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, &terminator);
3552 
3553   _workers->run_task(&proc_task_proxy, ergo_workers);
3554 }
3555 
3556 // End of weak reference support closures
3557 
3558 void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
3559   double ref_proc_start = os::elapsedTime();
3560 
3561   ReferenceProcessor* rp = _ref_processor_stw;
3562   assert(rp->discovery_enabled(), "should have been enabled");
3563 
3564   // Closure to test whether a referent is alive.
3565   G1STWIsAliveClosure is_alive(this);
3566 
3567   // Even when parallel reference processing is enabled, the processing
3568   // of JNI refs is serial and performed serially by the current thread
3569   // rather than by a worker. The following PSS will be used for processing
3570   // JNI refs.
3571 
3572   // Use only a single queue for this PSS.
3573   G1ParScanThreadState*          pss = per_thread_states->state_for_worker(0);
3574   pss->set_ref_discoverer(NULL);
3575   assert(pss->queue_is_empty(), "pre-condition");
3576 
3577   // Keep alive closure.
3578   G1CopyingKeepAliveClosure keep_alive(this, pss);
3579 
3580   // Serial Complete GC closure
3581   G1STWDrainQueueClosure drain_queue(this, pss);
3582 
3583   // Setup the soft refs policy...
3584   rp->setup_policy(false);
3585 
3586   ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times();
3587 
3588   ReferenceProcessorStats stats;
3589   if (!rp->processing_is_mt()) {
3590     // Serial reference processing...
3591     stats = rp->process_discovered_references(&is_alive,
3592                                               &keep_alive,
3593                                               &drain_queue,
3594                                               NULL,
3595                                               pt);
3596   } else {
3597     uint no_of_gc_workers = workers()->active_workers();
3598 
3599     // Parallel reference processing
3600     assert(no_of_gc_workers <= rp->max_num_queues(),
3601            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
3602            no_of_gc_workers,  rp->max_num_queues());
3603 
3604     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues);
3605     stats = rp->process_discovered_references(&is_alive,
3606                                               &keep_alive,
3607                                               &drain_queue,
3608                                               &par_task_executor,
3609                                               pt);
3610   }
3611 
3612   _gc_tracer_stw->report_gc_reference_stats(stats);
3613 
3614   // We have completed copying any necessary live referent objects.
3615   assert(pss->queue_is_empty(), "both queue and overflow should be empty");
3616 
3617   make_pending_list_reachable();
3618 
3619   rp->verify_no_references_recorded();
3620 
3621   double ref_proc_time = os::elapsedTime() - ref_proc_start;
3622   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
3623 }
3624 
3625 void G1CollectedHeap::make_pending_list_reachable() {
3626   if (collector_state()->in_initial_mark_gc()) {
3627     oop pll_head = Universe::reference_pending_list();
3628     if (pll_head != NULL) {
3629       // Any valid worker id is fine here as we are in the VM thread and single-threaded.
3630       _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
3631     }
3632   }
3633 }
3634 
3635 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
3636   double merge_pss_time_start = os::elapsedTime();
3637   per_thread_states->flush();
3638   g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
3639 }
3640 
3641 void G1CollectedHeap::pre_evacuate_collection_set() {
3642   _expand_heap_after_alloc_failure = true;
3643   _evacuation_failed = false;
3644 
3645   // Disable the hot card cache.
3646   _hot_card_cache->reset_hot_cache_claimed_index();
3647   _hot_card_cache->set_use_cache(false);
3648 
3649   g1_rem_set()->prepare_for_oops_into_collection_set_do();
3650   _preserved_marks_set.assert_empty();
3651 
3652   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
3653 
3654   // InitialMark needs claim bits to keep track of the marked-through CLDs.
3655   if (collector_state()->in_initial_mark_gc()) {
3656     double start_clear_claimed_marks = os::elapsedTime();
3657 
3658     ClassLoaderDataGraph::clear_claimed_marks();
3659 
3660     double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
3661     phase_times->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
3662   }
3663 }
3664 
3665 void G1CollectedHeap::evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3666   // Should G1EvacuationFailureALot be in effect for this GC?
3667   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
3668 
3669   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
3670 
3671   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
3672 
3673   double start_par_time_sec = os::elapsedTime();
3674   double end_par_time_sec;
3675 
3676   {
3677     const uint n_workers = workers()->active_workers();
3678     G1RootProcessor root_processor(this, n_workers);
3679     G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
3680 
3681     print_termination_stats_hdr();
3682 
3683     workers()->run_task(&g1_par_task);
3684     end_par_time_sec = os::elapsedTime();
3685 
3686     // Closing the inner scope will execute the destructor
3687     // for the G1RootProcessor object. We record the current
3688     // elapsed time before closing the scope so that time
3689     // taken for the destructor is NOT included in the
3690     // reported parallel time.
3691   }
3692 
3693   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
3694   phase_times->record_par_time(par_time_ms);
3695 
3696   double code_root_fixup_time_ms =
3697         (os::elapsedTime() - end_par_time_sec) * 1000.0;
3698   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
3699 }
3700 
3701 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
3702   // Also cleans the card table from temporary duplicate detection information used
3703   // during UpdateRS/ScanRS.
3704   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
3705 
3706   // Process any discovered reference objects - we have
3707   // to do this _before_ we retire the GC alloc regions
3708   // as we may have to copy some 'reachable' referent
3709   // objects (and their reachable sub-graphs) that were
3710   // not copied during the pause.
3711   process_discovered_references(per_thread_states);
3712 
3713   // FIXME
3714   // CM's reference processing also cleans up the string table.
3715   // Should we do that here also? We could, but it is a serial operation
3716   // and could significantly increase the pause time.
3717 
3718   G1STWIsAliveClosure is_alive(this);
3719   G1KeepAliveClosure keep_alive(this);
3720 
3721   WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive,
3722                               g1_policy()->phase_times()->weak_phase_times());
3723 
3724   if (G1StringDedup::is_enabled()) {
3725     double fixup_start = os::elapsedTime();
3726 
3727     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
3728 
3729     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
3730     g1_policy()->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
3731   }
3732 
3733   if (evacuation_failed()) {
3734     restore_after_evac_failure();
3735 
3736     // Reset the G1EvacuationFailureALot counters and flags
3737     // Note: the values are reset only when an actual
3738     // evacuation failure occurs.
3739     NOT_PRODUCT(reset_evacuation_should_fail();)
3740   }
3741 
3742   _preserved_marks_set.assert_empty();
3743 
3744   _allocator->release_gc_alloc_regions(evacuation_info);
3745 
3746   merge_per_thread_state_info(per_thread_states);
3747 
3748   // Reset and re-enable the hot card cache.
3749   // Note the counts for the cards in the regions in the
3750   // collection set are reset when the collection set is freed.
3751   _hot_card_cache->reset_hot_cache();
3752   _hot_card_cache->set_use_cache(true);
3753 
3754   purge_code_root_memory();
3755 
3756   redirty_logged_cards();
3757 #if COMPILER2_OR_JVMCI
3758   double start = os::elapsedTime();
3759   DerivedPointerTable::update_pointers();
3760   g1_policy()->phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0);
3761 #endif
3762   g1_policy()->print_age_table();
3763 }
3764 
3765 void G1CollectedHeap::record_obj_copy_mem_stats() {
3766   g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
3767 
3768   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
3769                                                create_g1_evac_summary(&_old_evac_stats));
3770 }
3771 
3772 void G1CollectedHeap::free_region(HeapRegion* hr,
3773                                   FreeRegionList* free_list,
3774                                   bool skip_remset,
3775                                   bool skip_hot_card_cache,
3776                                   bool locked) {
3777   assert(!hr->is_free(), "the region should not be free");
3778   assert(!hr->is_empty(), "the region should not be empty");
3779   assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
3780   assert(free_list != NULL, "pre-condition");
3781 
3782   if (G1VerifyBitmaps) {
3783     MemRegion mr(hr->bottom(), hr->end());
3784     concurrent_mark()->clear_range_in_prev_bitmap(mr);
3785   }
3786 
3787   // Clear the card counts for this region.
3788   // Note: we only need to do this if the region is not young
3789   // (since we don't refine cards in young regions).
3790   if (!skip_hot_card_cache && !hr->is_young()) {
3791     _hot_card_cache->reset_card_counts(hr);
3792   }
3793   hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */);
3794   _g1_policy->remset_tracker()->update_at_free(hr);
3795   free_list->add_ordered(hr);
3796 }
3797 
3798 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
3799                                             FreeRegionList* free_list) {
3800   assert(hr->is_humongous(), "this is only for humongous regions");
3801   assert(free_list != NULL, "pre-condition");
3802   hr->clear_humongous();
3803   free_region(hr, free_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
3804 }
3805 
3806 void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
3807                                            const uint humongous_regions_removed) {
3808   if (old_regions_removed > 0 || humongous_regions_removed > 0) {
3809     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
3810     _old_set.bulk_remove(old_regions_removed);
3811     _humongous_set.bulk_remove(humongous_regions_removed);
3812   }
3813 
3814 }
3815 
3816 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
3817   assert(list != NULL, "list can't be null");
3818   if (!list->is_empty()) {
3819     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
3820     _hrm.insert_list_into_free_list(list);
3821   }
3822 }
3823 
3824 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
3825   decrease_used(bytes);
3826 }
3827 
3828 class G1FreeCollectionSetTask : public AbstractGangTask {
3829 private:
3830 
3831   // Closure applied to all regions in the collection set to do work that needs to
3832   // be done serially in a single thread.
3833   class G1SerialFreeCollectionSetClosure : public HeapRegionClosure {
3834   private:
3835     EvacuationInfo* _evacuation_info;
3836     const size_t* _surviving_young_words;
3837 
3838     // Bytes used in successfully evacuated regions before the evacuation.
3839     size_t _before_used_bytes;
3840     // Bytes used in unsucessfully evacuated regions before the evacuation
3841     size_t _after_used_bytes;
3842 
3843     size_t _bytes_allocated_in_old_since_last_gc;
3844 
3845     size_t _failure_used_words;
3846     size_t _failure_waste_words;
3847 
3848     FreeRegionList _local_free_list;
3849   public:
3850     G1SerialFreeCollectionSetClosure(EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
3851       HeapRegionClosure(),
3852       _evacuation_info(evacuation_info),
3853       _surviving_young_words(surviving_young_words),
3854       _before_used_bytes(0),
3855       _after_used_bytes(0),
3856       _bytes_allocated_in_old_since_last_gc(0),
3857       _failure_used_words(0),
3858       _failure_waste_words(0),
3859       _local_free_list("Local Region List for CSet Freeing") {
3860     }
3861 
3862     virtual bool do_heap_region(HeapRegion* r) {
3863       G1CollectedHeap* g1h = G1CollectedHeap::heap();
3864 
3865       assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
3866       g1h->clear_in_cset(r);
3867 
3868       if (r->is_young()) {
3869         assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(),
3870                "Young index %d is wrong for region %u of type %s with %u young regions",
3871                r->young_index_in_cset(),
3872                r->hrm_index(),
3873                r->get_type_str(),
3874                g1h->collection_set()->young_region_length());
3875         size_t words_survived = _surviving_young_words[r->young_index_in_cset()];
3876         r->record_surv_words_in_group(words_survived);
3877       }
3878 
3879       if (!r->evacuation_failed()) {
3880         assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
3881         _before_used_bytes += r->used();
3882         g1h->free_region(r,
3883                          &_local_free_list,
3884                          true, /* skip_remset */
3885                          true, /* skip_hot_card_cache */
3886                          true  /* locked */);
3887       } else {
3888         r->uninstall_surv_rate_group();
3889         r->set_young_index_in_cset(-1);
3890         r->set_evacuation_failed(false);
3891         // When moving a young gen region to old gen, we "allocate" that whole region
3892         // there. This is in addition to any already evacuated objects. Notify the
3893         // policy about that.
3894         // Old gen regions do not cause an additional allocation: both the objects
3895         // still in the region and the ones already moved are accounted for elsewhere.
3896         if (r->is_young()) {
3897           _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
3898         }
3899         // The region is now considered to be old.
3900         r->set_old();
3901         // Do some allocation statistics accounting. Regions that failed evacuation
3902         // are always made old, so there is no need to update anything in the young
3903         // gen statistics, but we need to update old gen statistics.
3904         size_t used_words = r->marked_bytes() / HeapWordSize;
3905 
3906         _failure_used_words += used_words;
3907         _failure_waste_words += HeapRegion::GrainWords - used_words;
3908 
3909         g1h->old_set_add(r);
3910         _after_used_bytes += r->used();
3911       }
3912       return false;
3913     }
3914 
3915     void complete_work() {
3916       G1CollectedHeap* g1h = G1CollectedHeap::heap();
3917 
3918       _evacuation_info->set_regions_freed(_local_free_list.length());
3919       _evacuation_info->increment_collectionset_used_after(_after_used_bytes);
3920 
3921       g1h->prepend_to_freelist(&_local_free_list);
3922       g1h->decrement_summary_bytes(_before_used_bytes);
3923 
3924       G1Policy* policy = g1h->g1_policy();
3925       policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
3926 
3927       g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
3928     }
3929   };
3930 
3931   G1CollectionSet* _collection_set;
3932   G1SerialFreeCollectionSetClosure _cl;
3933   const size_t* _surviving_young_words;
3934 
3935   size_t _rs_lengths;
3936 
3937   volatile jint _serial_work_claim;
3938 
3939   struct WorkItem {
3940     uint region_idx;
3941     bool is_young;
3942     bool evacuation_failed;
3943 
3944     WorkItem(HeapRegion* r) {
3945       region_idx = r->hrm_index();
3946       is_young = r->is_young();
3947       evacuation_failed = r->evacuation_failed();
3948     }
3949   };
3950 
3951   volatile size_t _parallel_work_claim;
3952   size_t _num_work_items;
3953   WorkItem* _work_items;
3954 
3955   void do_serial_work() {
3956     // Need to grab the lock to be allowed to modify the old region list.
3957     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
3958     _collection_set->iterate(&_cl);
3959   }
3960 
3961   void do_parallel_work_for_region(uint region_idx, bool is_young, bool evacuation_failed) {
3962     G1CollectedHeap* g1h = G1CollectedHeap::heap();
3963 
3964     HeapRegion* r = g1h->region_at(region_idx);
3965     assert(!g1h->is_on_master_free_list(r), "sanity");
3966 
3967     Atomic::add(r->rem_set()->occupied_locked(), &_rs_lengths);
3968 
3969     if (!is_young) {
3970       g1h->_hot_card_cache->reset_card_counts(r);
3971     }
3972 
3973     if (!evacuation_failed) {
3974       r->rem_set()->clear_locked();
3975     }
3976   }
3977 
3978   class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {
3979   private:
3980     size_t _cur_idx;
3981     WorkItem* _work_items;
3982   public:
3983     G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
3984 
3985     virtual bool do_heap_region(HeapRegion* r) {
3986       _work_items[_cur_idx++] = WorkItem(r);
3987       return false;
3988     }
3989   };
3990 
3991   void prepare_work() {
3992     G1PrepareFreeCollectionSetClosure cl(_work_items);
3993     _collection_set->iterate(&cl);
3994   }
3995 
3996   void complete_work() {
3997     _cl.complete_work();
3998 
3999     G1Policy* policy = G1CollectedHeap::heap()->g1_policy();
4000     policy->record_max_rs_lengths(_rs_lengths);
4001     policy->cset_regions_freed();
4002   }
4003 public:
4004   G1FreeCollectionSetTask(G1CollectionSet* collection_set, EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4005     AbstractGangTask("G1 Free Collection Set"),
4006     _collection_set(collection_set),
4007     _cl(evacuation_info, surviving_young_words),
4008     _surviving_young_words(surviving_young_words),
4009     _rs_lengths(0),
4010     _serial_work_claim(0),
4011     _parallel_work_claim(0),
4012     _num_work_items(collection_set->region_length()),
4013     _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
4014     prepare_work();
4015   }
4016 
4017   ~G1FreeCollectionSetTask() {
4018     complete_work();
4019     FREE_C_HEAP_ARRAY(WorkItem, _work_items);
4020   }
4021 
4022   // Chunk size for work distribution. The chosen value has been determined experimentally
4023   // to be a good tradeoff between overhead and achievable parallelism.
4024   static uint chunk_size() { return 32; }
4025 
4026   virtual void work(uint worker_id) {
4027     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
4028 
4029     // Claim serial work.
4030     if (_serial_work_claim == 0) {
4031       jint value = Atomic::add(1, &_serial_work_claim) - 1;
4032       if (value == 0) {
4033         double serial_time = os::elapsedTime();
4034         do_serial_work();
4035         timer->record_serial_free_cset_time_ms((os::elapsedTime() - serial_time) * 1000.0);
4036       }
4037     }
4038 
4039     // Start parallel work.
4040     double young_time = 0.0;
4041     bool has_young_time = false;
4042     double non_young_time = 0.0;
4043     bool has_non_young_time = false;
4044 
4045     while (true) {
4046       size_t end = Atomic::add(chunk_size(), &_parallel_work_claim);
4047       size_t cur = end - chunk_size();
4048 
4049       if (cur >= _num_work_items) {
4050         break;
4051       }
4052 
4053       double start_time = os::elapsedTime();
4054 
4055       end = MIN2(end, _num_work_items);
4056 
4057       for (; cur < end; cur++) {
4058         bool is_young = _work_items[cur].is_young;
4059 
4060         do_parallel_work_for_region(_work_items[cur].region_idx, is_young, _work_items[cur].evacuation_failed);
4061 
4062         double end_time = os::elapsedTime();
4063         double time_taken = end_time - start_time;
4064         if (is_young) {
4065           young_time += time_taken;
4066           has_young_time = true;
4067         } else {
4068           non_young_time += time_taken;
4069           has_non_young_time = true;
4070         }
4071         start_time = end_time;
4072       }
4073     }
4074 
4075     if (has_young_time) {
4076       timer->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, worker_id, young_time);
4077     }
4078     if (has_non_young_time) {
4079       timer->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, worker_id, non_young_time);
4080     }
4081   }
4082 };
4083 
4084 void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4085   _eden.clear();
4086 
4087   double free_cset_start_time = os::elapsedTime();
4088 
4089   {
4090     uint const num_chunks = MAX2(_collection_set.region_length() / G1FreeCollectionSetTask::chunk_size(), 1U);
4091     uint const num_workers = MIN2(workers()->active_workers(), num_chunks);
4092 
4093     G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words);
4094 
4095     log_debug(gc, ergo)("Running %s using %u workers for collection set length %u",
4096                         cl.name(),
4097                         num_workers,
4098                         _collection_set.region_length());
4099     workers()->run_task(&cl, num_workers);
4100   }
4101   g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
4102 
4103   collection_set->clear();
4104 }
4105 
4106 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
4107  private:
4108   FreeRegionList* _free_region_list;
4109   HeapRegionSet* _proxy_set;
4110   uint _humongous_objects_reclaimed;
4111   uint _humongous_regions_reclaimed;
4112   size_t _freed_bytes;
4113  public:
4114 
4115   G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
4116     _free_region_list(free_region_list), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) {
4117   }
4118 
4119   virtual bool do_heap_region(HeapRegion* r) {
4120     if (!r->is_starts_humongous()) {
4121       return false;
4122     }
4123 
4124     G1CollectedHeap* g1h = G1CollectedHeap::heap();
4125 
4126     oop obj = (oop)r->bottom();
4127     G1CMBitMap* next_bitmap = g1h->concurrent_mark()->next_mark_bitmap();
4128 
4129     // The following checks whether the humongous object is live are sufficient.
4130     // The main additional check (in addition to having a reference from the roots
4131     // or the young gen) is whether the humongous object has a remembered set entry.
4132     //
4133     // A humongous object cannot be live if there is no remembered set for it
4134     // because:
4135     // - there can be no references from within humongous starts regions referencing
4136     // the object because we never allocate other objects into them.
4137     // (I.e. there are no intra-region references that may be missed by the
4138     // remembered set)
4139     // - as soon there is a remembered set entry to the humongous starts region
4140     // (i.e. it has "escaped" to an old object) this remembered set entry will stay
4141     // until the end of a concurrent mark.
4142     //
4143     // It is not required to check whether the object has been found dead by marking
4144     // or not, in fact it would prevent reclamation within a concurrent cycle, as
4145     // all objects allocated during that time are considered live.
4146     // SATB marking is even more conservative than the remembered set.
4147     // So if at this point in the collection there is no remembered set entry,
4148     // nobody has a reference to it.
4149     // At the start of collection we flush all refinement logs, and remembered sets
4150     // are completely up-to-date wrt to references to the humongous object.
4151     //
4152     // Other implementation considerations:
4153     // - never consider object arrays at this time because they would pose
4154     // considerable effort for cleaning up the the remembered sets. This is
4155     // required because stale remembered sets might reference locations that
4156     // are currently allocated into.
4157     uint region_idx = r->hrm_index();
4158     if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
4159         !r->rem_set()->is_empty()) {
4160       log_debug(gc, humongous)("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT "  with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
4161                                region_idx,
4162                                (size_t)obj->size() * HeapWordSize,
4163                                p2i(r->bottom()),
4164                                r->rem_set()->occupied(),
4165                                r->rem_set()->strong_code_roots_list_length(),
4166                                next_bitmap->is_marked(r->bottom()),
4167                                g1h->is_humongous_reclaim_candidate(region_idx),
4168                                obj->is_typeArray()
4169                               );
4170       return false;
4171     }
4172 
4173     guarantee(obj->is_typeArray(),
4174               "Only eagerly reclaiming type arrays is supported, but the object "
4175               PTR_FORMAT " is not.", p2i(r->bottom()));
4176 
4177     log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
4178                              region_idx,
4179                              (size_t)obj->size() * HeapWordSize,
4180                              p2i(r->bottom()),
4181                              r->rem_set()->occupied(),
4182                              r->rem_set()->strong_code_roots_list_length(),
4183                              next_bitmap->is_marked(r->bottom()),
4184                              g1h->is_humongous_reclaim_candidate(region_idx),
4185                              obj->is_typeArray()
4186                             );
4187 
4188     G1ConcurrentMark* const cm = g1h->concurrent_mark();
4189     cm->humongous_object_eagerly_reclaimed(r);
4190     assert(!cm->is_marked_in_prev_bitmap(obj) && !cm->is_marked_in_next_bitmap(obj),
4191            "Eagerly reclaimed humongous region %u should not be marked at all but is in prev %s next %s",
4192            region_idx,
4193            BOOL_TO_STR(cm->is_marked_in_prev_bitmap(obj)),
4194            BOOL_TO_STR(cm->is_marked_in_next_bitmap(obj)));
4195     _humongous_objects_reclaimed++;
4196     do {
4197       HeapRegion* next = g1h->next_region_in_humongous(r);
4198       _freed_bytes += r->used();
4199       r->set_containing_set(NULL);
4200       _humongous_regions_reclaimed++;
4201       g1h->free_humongous_region(r, _free_region_list);
4202       r = next;
4203     } while (r != NULL);
4204 
4205     return false;
4206   }
4207 
4208   uint humongous_objects_reclaimed() {
4209     return _humongous_objects_reclaimed;
4210   }
4211 
4212   uint humongous_regions_reclaimed() {
4213     return _humongous_regions_reclaimed;
4214   }
4215 
4216   size_t bytes_freed() const {
4217     return _freed_bytes;
4218   }
4219 };
4220 
4221 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
4222   assert_at_safepoint_on_vm_thread();
4223 
4224   if (!G1EagerReclaimHumongousObjects ||
4225       (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
4226     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
4227     return;
4228   }
4229 
4230   double start_time = os::elapsedTime();
4231 
4232   FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
4233 
4234   G1FreeHumongousRegionClosure cl(&local_cleanup_list);
4235   heap_region_iterate(&cl);
4236 
4237   remove_from_old_sets(0, cl.humongous_regions_reclaimed());
4238 
4239   G1HRPrinter* hrp = hr_printer();
4240   if (hrp->is_active()) {
4241     FreeRegionListIterator iter(&local_cleanup_list);
4242     while (iter.more_available()) {
4243       HeapRegion* hr = iter.get_next();
4244       hrp->cleanup(hr);
4245     }
4246   }
4247 
4248   prepend_to_freelist(&local_cleanup_list);
4249   decrement_summary_bytes(cl.bytes_freed());
4250 
4251   g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
4252                                                                     cl.humongous_objects_reclaimed());
4253 }
4254 
4255 class G1AbandonCollectionSetClosure : public HeapRegionClosure {
4256 public:
4257   virtual bool do_heap_region(HeapRegion* r) {
4258     assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
4259     G1CollectedHeap::heap()->clear_in_cset(r);
4260     r->set_young_index_in_cset(-1);
4261     return false;
4262   }
4263 };
4264 
4265 void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
4266   G1AbandonCollectionSetClosure cl;
4267   collection_set->iterate(&cl);
4268 
4269   collection_set->clear();
4270   collection_set->stop_incremental_building();
4271 }
4272 
4273 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
4274   return _allocator->is_retained_old_region(hr);
4275 }
4276 
4277 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
4278   _eden.add(hr);
4279   _g1_policy->set_region_eden(hr);
4280 }
4281 
4282 #ifdef ASSERT
4283 
4284 class NoYoungRegionsClosure: public HeapRegionClosure {
4285 private:
4286   bool _success;
4287 public:
4288   NoYoungRegionsClosure() : _success(true) { }
4289   bool do_heap_region(HeapRegion* r) {
4290     if (r->is_young()) {
4291       log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
4292                             p2i(r->bottom()), p2i(r->end()));
4293       _success = false;
4294     }
4295     return false;
4296   }
4297   bool success() { return _success; }
4298 };
4299 
4300 bool G1CollectedHeap::check_young_list_empty() {
4301   bool ret = (young_regions_count() == 0);
4302 
4303   NoYoungRegionsClosure closure;
4304   heap_region_iterate(&closure);
4305   ret = ret && closure.success();
4306 
4307   return ret;
4308 }
4309 
4310 #endif // ASSERT
4311 
4312 class TearDownRegionSetsClosure : public HeapRegionClosure {
4313   HeapRegionSet *_old_set;
4314 
4315 public:
4316   TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
4317 
4318   bool do_heap_region(HeapRegion* r) {
4319     if (r->is_old()) {
4320       _old_set->remove(r);
4321     } else if(r->is_young()) {
4322       r->uninstall_surv_rate_group();
4323     } else {
4324       // We ignore free regions, we'll empty the free list afterwards.
4325       // We ignore humongous and archive regions, we're not tearing down these
4326       // sets.
4327       assert(r->is_archive() || r->is_free() || r->is_humongous(),
4328              "it cannot be another type");
4329     }
4330     return false;
4331   }
4332 
4333   ~TearDownRegionSetsClosure() {
4334     assert(_old_set->is_empty(), "post-condition");
4335   }
4336 };
4337 
4338 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
4339   assert_at_safepoint_on_vm_thread();
4340 
4341   if (!free_list_only) {
4342     TearDownRegionSetsClosure cl(&_old_set);
4343     heap_region_iterate(&cl);
4344 
4345     // Note that emptying the _young_list is postponed and instead done as
4346     // the first step when rebuilding the regions sets again. The reason for
4347     // this is that during a full GC string deduplication needs to know if
4348     // a collected region was young or old when the full GC was initiated.
4349   }
4350   _hrm.remove_all_free_regions();
4351 }
4352 
4353 void G1CollectedHeap::increase_used(size_t bytes) {
4354   _summary_bytes_used += bytes;
4355 }
4356 
4357 void G1CollectedHeap::decrease_used(size_t bytes) {
4358   assert(_summary_bytes_used >= bytes,
4359          "invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,
4360          _summary_bytes_used, bytes);
4361   _summary_bytes_used -= bytes;
4362 }
4363 
4364 void G1CollectedHeap::set_used(size_t bytes) {
4365   _summary_bytes_used = bytes;
4366 }
4367 
4368 class RebuildRegionSetsClosure : public HeapRegionClosure {
4369 private:
4370   bool _free_list_only;
4371 
4372   HeapRegionSet* _old_set;
4373   HeapRegionManager* _hrm;
4374 
4375   size_t _total_used;
4376 
4377 public:
4378   RebuildRegionSetsClosure(bool free_list_only,
4379                            HeapRegionSet* old_set,
4380                            HeapRegionManager* hrm) :
4381     _free_list_only(free_list_only),
4382     _old_set(old_set), _hrm(hrm), _total_used(0) {
4383     assert(_hrm->num_free_regions() == 0, "pre-condition");
4384     if (!free_list_only) {
4385       assert(_old_set->is_empty(), "pre-condition");
4386     }
4387   }
4388 
4389   bool do_heap_region(HeapRegion* r) {
4390     // After full GC, no region should have a remembered set.
4391     r->rem_set()->clear(true);
4392     if (r->is_empty()) {
4393       // Add free regions to the free list
4394       r->set_free();
4395       _hrm->insert_into_free_list(r);
4396     } else if (!_free_list_only) {
4397 
4398       if (r->is_archive() || r->is_humongous()) {
4399         // We ignore archive and humongous regions. We left these sets unchanged.
4400       } else {
4401         assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
4402         // We now move all (non-humongous, non-old, non-archive) regions to old gen, and register them as such.
4403         r->move_to_old();
4404         _old_set->add(r);
4405       }
4406       _total_used += r->used();
4407     }
4408 
4409     return false;
4410   }
4411 
4412   size_t total_used() {
4413     return _total_used;
4414   }
4415 };
4416 
4417 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
4418   assert_at_safepoint_on_vm_thread();
4419 
4420   if (!free_list_only) {
4421     _eden.clear();
4422     _survivor.clear();
4423   }
4424 
4425   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
4426   heap_region_iterate(&cl);
4427 
4428   if (!free_list_only) {
4429     set_used(cl.total_used());
4430     if (_archive_allocator != NULL) {
4431       _archive_allocator->clear_used();
4432     }
4433   }
4434   assert(used_unlocked() == recalculate_used(),
4435          "inconsistent used_unlocked(), "
4436          "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
4437          used_unlocked(), recalculate_used());
4438 }
4439 
4440 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
4441   HeapRegion* hr = heap_region_containing(p);
4442   return hr->is_in(p);
4443 }
4444 
4445 // Methods for the mutator alloc region
4446 
4447 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4448                                                       bool force) {
4449   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4450   bool should_allocate = g1_policy()->should_allocate_mutator_region();
4451   if (force || should_allocate) {
4452     HeapRegion* new_alloc_region = new_region(word_size,
4453                                               false /* is_old */,
4454                                               false /* do_expand */);
4455     if (new_alloc_region != NULL) {
4456       set_region_short_lived_locked(new_alloc_region);
4457       _hr_printer.alloc(new_alloc_region, !should_allocate);
4458       _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
4459       _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);
4460       return new_alloc_region;
4461     }
4462   }
4463   return NULL;
4464 }
4465 
4466 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
4467                                                   size_t allocated_bytes) {
4468   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4469   assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
4470 
4471   collection_set()->add_eden_region(alloc_region);
4472   increase_used(allocated_bytes);
4473   _hr_printer.retire(alloc_region);
4474   // We update the eden sizes here, when the region is retired,
4475   // instead of when it's allocated, since this is the point that its
4476   // used space has been recorded in _summary_bytes_used.
4477   g1mm()->update_eden_size();
4478 }
4479 
4480 // Methods for the GC alloc regions
4481 
4482 bool G1CollectedHeap::has_more_regions(InCSetState dest) {
4483   if (dest.is_old()) {
4484     return true;
4485   } else {
4486     return survivor_regions_count() < g1_policy()->max_survivor_regions();
4487   }
4488 }
4489 
4490 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) {
4491   assert(FreeList_lock->owned_by_self(), "pre-condition");
4492 
4493   if (!has_more_regions(dest)) {
4494     return NULL;
4495   }
4496 
4497   const bool is_survivor = dest.is_young();
4498 
4499   HeapRegion* new_alloc_region = new_region(word_size,
4500                                             !is_survivor,
4501                                             true /* do_expand */);
4502   if (new_alloc_region != NULL) {
4503     if (is_survivor) {
4504       new_alloc_region->set_survivor();
4505       _survivor.add(new_alloc_region);
4506       _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
4507     } else {
4508       new_alloc_region->set_old();
4509       _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
4510     }
4511     _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);
4512     _hr_printer.alloc(new_alloc_region);
4513     bool during_im = collector_state()->in_initial_mark_gc();
4514     new_alloc_region->note_start_of_copying(during_im);
4515     return new_alloc_region;
4516   }
4517   return NULL;
4518 }
4519 
4520 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
4521                                              size_t allocated_bytes,
4522                                              InCSetState dest) {
4523   bool during_im = collector_state()->in_initial_mark_gc();
4524   alloc_region->note_end_of_copying(during_im);
4525   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
4526   if (dest.is_old()) {
4527     old_set_add(alloc_region);
4528   }
4529   _hr_printer.retire(alloc_region);
4530 }
4531 
4532 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
4533   bool expanded = false;
4534   uint index = _hrm.find_highest_free(&expanded);
4535 
4536   if (index != G1_NO_HRM_INDEX) {
4537     if (expanded) {
4538       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
4539                                 HeapRegion::GrainWords * HeapWordSize);
4540     }
4541     _hrm.allocate_free_regions_starting_at(index, 1);
4542     return region_at(index);
4543   }
4544   return NULL;
4545 }
4546 
4547 // Optimized nmethod scanning
4548 
4549 class RegisterNMethodOopClosure: public OopClosure {
4550   G1CollectedHeap* _g1h;
4551   nmethod* _nm;
4552 
4553   template <class T> void do_oop_work(T* p) {
4554     T heap_oop = RawAccess<>::oop_load(p);
4555     if (!CompressedOops::is_null(heap_oop)) {
4556       oop obj = CompressedOops::decode_not_null(heap_oop);
4557       HeapRegion* hr = _g1h->heap_region_containing(obj);
4558       assert(!hr->is_continues_humongous(),
4559              "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
4560              " starting at " HR_FORMAT,
4561              p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
4562 
4563       // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
4564       hr->add_strong_code_root_locked(_nm);
4565     }
4566   }
4567 
4568 public:
4569   RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
4570     _g1h(g1h), _nm(nm) {}
4571 
4572   void do_oop(oop* p)       { do_oop_work(p); }
4573   void do_oop(narrowOop* p) { do_oop_work(p); }
4574 };
4575 
4576 class UnregisterNMethodOopClosure: public OopClosure {
4577   G1CollectedHeap* _g1h;
4578   nmethod* _nm;
4579 
4580   template <class T> void do_oop_work(T* p) {
4581     T heap_oop = RawAccess<>::oop_load(p);
4582     if (!CompressedOops::is_null(heap_oop)) {
4583       oop obj = CompressedOops::decode_not_null(heap_oop);
4584       HeapRegion* hr = _g1h->heap_region_containing(obj);
4585       assert(!hr->is_continues_humongous(),
4586              "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
4587              " starting at " HR_FORMAT,
4588              p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
4589 
4590       hr->remove_strong_code_root(_nm);
4591     }
4592   }
4593 
4594 public:
4595   UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
4596     _g1h(g1h), _nm(nm) {}
4597 
4598   void do_oop(oop* p)       { do_oop_work(p); }
4599   void do_oop(narrowOop* p) { do_oop_work(p); }
4600 };
4601 
4602 // Returns true if the reference points to an object that
4603 // can move in an incremental collection.
4604 bool G1CollectedHeap::is_scavengable(oop obj) {
4605   HeapRegion* hr = heap_region_containing(obj);
4606   return !hr->is_pinned();
4607 }
4608 
4609 void G1CollectedHeap::register_nmethod(nmethod* nm) {
4610   guarantee(nm != NULL, "sanity");
4611   RegisterNMethodOopClosure reg_cl(this, nm);
4612   nm->oops_do(&reg_cl);
4613 }
4614 
4615 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
4616   guarantee(nm != NULL, "sanity");
4617   UnregisterNMethodOopClosure reg_cl(this, nm);
4618   nm->oops_do(&reg_cl, true);
4619 }
4620 
4621 void G1CollectedHeap::purge_code_root_memory() {
4622   double purge_start = os::elapsedTime();
4623   G1CodeRootSet::purge();
4624   double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
4625   g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
4626 }
4627 
4628 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
4629   G1CollectedHeap* _g1h;
4630 
4631 public:
4632   RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
4633     _g1h(g1h) {}
4634 
4635   void do_code_blob(CodeBlob* cb) {
4636     nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
4637     if (nm == NULL) {
4638       return;
4639     }
4640 
4641     if (ScavengeRootsInCode) {
4642       _g1h->register_nmethod(nm);
4643     }
4644   }
4645 };
4646 
4647 void G1CollectedHeap::rebuild_strong_code_roots() {
4648   RebuildStrongCodeRootClosure blob_cl(this);
4649   CodeCache::blobs_do(&blob_cl);
4650 }
4651 
4652 void G1CollectedHeap::initialize_serviceability() {
4653   _g1mm->initialize_serviceability();
4654 }
4655 
4656 MemoryUsage G1CollectedHeap::memory_usage() {
4657   return _g1mm->memory_usage();
4658 }
4659 
4660 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
4661   return _g1mm->memory_managers();
4662 }
4663 
4664 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
4665   return _g1mm->memory_pools();
4666 }