1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/g1/bufferingOopClosure.hpp"
  32 #include "gc/g1/concurrentG1Refine.hpp"
  33 #include "gc/g1/concurrentG1RefineThread.hpp"
  34 #include "gc/g1/concurrentMarkThread.inline.hpp"
  35 #include "gc/g1/g1Allocator.inline.hpp"
  36 #include "gc/g1/g1CollectedHeap.inline.hpp"
  37 #include "gc/g1/g1CollectionSet.hpp"
  38 #include "gc/g1/g1CollectorPolicy.hpp"
  39 #include "gc/g1/g1CollectorState.hpp"
  40 #include "gc/g1/g1EvacStats.inline.hpp"
  41 #include "gc/g1/g1GCPhaseTimes.hpp"
  42 #include "gc/g1/g1HeapSizingPolicy.hpp"
  43 #include "gc/g1/g1HeapTransition.hpp"
  44 #include "gc/g1/g1HeapVerifier.hpp"
  45 #include "gc/g1/g1HotCardCache.hpp"
  46 #include "gc/g1/g1MarkSweep.hpp"
  47 #include "gc/g1/g1OopClosures.inline.hpp"
  48 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  49 #include "gc/g1/g1Policy.hpp"
  50 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  51 #include "gc/g1/g1RemSet.inline.hpp"
  52 #include "gc/g1/g1RootClosures.hpp"
  53 #include "gc/g1/g1RootProcessor.hpp"
  54 #include "gc/g1/g1StringDedup.hpp"
  55 #include "gc/g1/g1YCTypes.hpp"
  56 #include "gc/g1/heapRegion.inline.hpp"
  57 #include "gc/g1/heapRegionRemSet.hpp"
  58 #include "gc/g1/heapRegionSet.inline.hpp"
  59 #include "gc/g1/suspendibleThreadSet.hpp"
  60 #include "gc/g1/vm_operations_g1.hpp"
  61 #include "gc/shared/gcHeapSummary.hpp"
  62 #include "gc/shared/gcId.hpp"
  63 #include "gc/shared/gcLocker.inline.hpp"
  64 #include "gc/shared/gcTimer.hpp"
  65 #include "gc/shared/gcTrace.hpp"
  66 #include "gc/shared/gcTraceTime.inline.hpp"
  67 #include "gc/shared/generationSpec.hpp"
  68 #include "gc/shared/isGCActiveMark.hpp"
  69 #include "gc/shared/preservedMarks.inline.hpp"
  70 #include "gc/shared/referenceProcessor.inline.hpp"
  71 #include "gc/shared/taskqueue.inline.hpp"
  72 #include "logging/log.hpp"
  73 #include "memory/allocation.hpp"
  74 #include "memory/iterator.hpp"
  75 #include "memory/resourceArea.hpp"
  76 #include "oops/oop.inline.hpp"
  77 #include "runtime/atomic.hpp"
  78 #include "runtime/init.hpp"
  79 #include "runtime/orderAccess.inline.hpp"
  80 #include "runtime/vmThread.hpp"
  81 #include "utilities/globalDefinitions.hpp"
  82 #include "utilities/stack.inline.hpp"
  83 
  84 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  85 
  86 // INVARIANTS/NOTES
  87 //
  88 // All allocation activity covered by the G1CollectedHeap interface is
  89 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  90 // and allocate_new_tlab, which are the "entry" points to the
  91 // allocation code from the rest of the JVM.  (Note that this does not
  92 // apply to TLAB allocation, which is not part of this interface: it
  93 // is done by clients of this interface.)
  94 
  95 // Local to this file.
  96 
  97 class RefineCardTableEntryClosure: public CardTableEntryClosure {
  98   bool _concurrent;
  99 public:
 100   RefineCardTableEntryClosure() : _concurrent(true) { }
 101 
 102   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 103     bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, NULL);
 104     // This path is executed by the concurrent refine or mutator threads,
 105     // concurrently, and so we do not care if card_ptr contains references
 106     // that point into the collection set.
 107     assert(!oops_into_cset, "should be");
 108 
 109     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 110       // Caller will actually yield.
 111       return false;
 112     }
 113     // Otherwise, we finished successfully; return true.
 114     return true;
 115   }
 116 
 117   void set_concurrent(bool b) { _concurrent = b; }
 118 };
 119 
 120 
 121 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
 122  private:
 123   size_t _num_dirtied;
 124   G1CollectedHeap* _g1h;
 125   G1SATBCardTableLoggingModRefBS* _g1_bs;
 126 
 127   HeapRegion* region_for_card(jbyte* card_ptr) const {
 128     return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr));
 129   }
 130 
 131   bool will_become_free(HeapRegion* hr) const {
 132     // A region will be freed by free_collection_set if the region is in the
 133     // collection set and has not had an evacuation failure.
 134     return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
 135   }
 136 
 137  public:
 138   RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
 139     _num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { }
 140 
 141   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 142     HeapRegion* hr = region_for_card(card_ptr);
 143 
 144     // Should only dirty cards in regions that won't be freed.
 145     if (!will_become_free(hr)) {
 146       *card_ptr = CardTableModRefBS::dirty_card_val();
 147       _num_dirtied++;
 148     }
 149 
 150     return true;
 151   }
 152 
 153   size_t num_dirtied()   const { return _num_dirtied; }
 154 };
 155 
 156 
 157 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
 158   HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
 159 }
 160 
 161 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 162   // The from card cache is not the memory that is actually committed. So we cannot
 163   // take advantage of the zero_filled parameter.
 164   reset_from_card_cache(start_idx, num_regions);
 165 }
 166 
 167 // Returns true if the reference points to an object that
 168 // can move in an incremental collection.
 169 bool G1CollectedHeap::is_scavengable(const void* p) {
 170   HeapRegion* hr = heap_region_containing(p);
 171   return !hr->is_pinned();
 172 }
 173 
 174 // Private methods.
 175 
 176 HeapRegion*
 177 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 178   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 179   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 180     if (!_secondary_free_list.is_empty()) {
 181       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
 182                                       "secondary_free_list has %u entries",
 183                                       _secondary_free_list.length());
 184       // It looks as if there are free regions available on the
 185       // secondary_free_list. Let's move them to the free_list and try
 186       // again to allocate from it.
 187       append_secondary_free_list();
 188 
 189       assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
 190              "empty we should have moved at least one entry to the free_list");
 191       HeapRegion* res = _hrm.allocate_free_region(is_old);
 192       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
 193                                       "allocated " HR_FORMAT " from secondary_free_list",
 194                                       HR_FORMAT_PARAMS(res));
 195       return res;
 196     }
 197 
 198     // Wait here until we get notified either when (a) there are no
 199     // more free regions coming or (b) some regions have been moved on
 200     // the secondary_free_list.
 201     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 202   }
 203 
 204   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
 205                                   "could not allocate from secondary_free_list");
 206   return NULL;
 207 }
 208 
 209 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
 210   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
 211          "the only time we use this to allocate a humongous region is "
 212          "when we are allocating a single humongous region");
 213 
 214   HeapRegion* res;
 215   if (G1StressConcRegionFreeing) {
 216     if (!_secondary_free_list.is_empty()) {
 217       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
 218                                       "forced to look at the secondary_free_list");
 219       res = new_region_try_secondary_free_list(is_old);
 220       if (res != NULL) {
 221         return res;
 222       }
 223     }
 224   }
 225 
 226   res = _hrm.allocate_free_region(is_old);
 227 
 228   if (res == NULL) {
 229     log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
 230                                     "res == NULL, trying the secondary_free_list");
 231     res = new_region_try_secondary_free_list(is_old);
 232   }
 233   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 234     // Currently, only attempts to allocate GC alloc regions set
 235     // do_expand to true. So, we should only reach here during a
 236     // safepoint. If this assumption changes we might have to
 237     // reconsider the use of _expand_heap_after_alloc_failure.
 238     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 239 
 240     log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
 241                               word_size * HeapWordSize);
 242 
 243     if (expand(word_size * HeapWordSize)) {
 244       // Given that expand() succeeded in expanding the heap, and we
 245       // always expand the heap by an amount aligned to the heap
 246       // region size, the free list should in theory not be empty.
 247       // In either case allocate_free_region() will check for NULL.
 248       res = _hrm.allocate_free_region(is_old);
 249     } else {
 250       _expand_heap_after_alloc_failure = false;
 251     }
 252   }
 253   return res;
 254 }
 255 
 256 HeapWord*
 257 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 258                                                            uint num_regions,
 259                                                            size_t word_size,
 260                                                            AllocationContext_t context) {
 261   assert(first != G1_NO_HRM_INDEX, "pre-condition");
 262   assert(is_humongous(word_size), "word_size should be humongous");
 263   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 264 
 265   // Index of last region in the series.
 266   uint last = first + num_regions - 1;
 267 
 268   // We need to initialize the region(s) we just discovered. This is
 269   // a bit tricky given that it can happen concurrently with
 270   // refinement threads refining cards on these regions and
 271   // potentially wanting to refine the BOT as they are scanning
 272   // those cards (this can happen shortly after a cleanup; see CR
 273   // 6991377). So we have to set up the region(s) carefully and in
 274   // a specific order.
 275 
 276   // The word size sum of all the regions we will allocate.
 277   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
 278   assert(word_size <= word_size_sum, "sanity");
 279 
 280   // This will be the "starts humongous" region.
 281   HeapRegion* first_hr = region_at(first);
 282   // The header of the new object will be placed at the bottom of
 283   // the first region.
 284   HeapWord* new_obj = first_hr->bottom();
 285   // This will be the new top of the new object.
 286   HeapWord* obj_top = new_obj + word_size;
 287 
 288   // First, we need to zero the header of the space that we will be
 289   // allocating. When we update top further down, some refinement
 290   // threads might try to scan the region. By zeroing the header we
 291   // ensure that any thread that will try to scan the region will
 292   // come across the zero klass word and bail out.
 293   //
 294   // NOTE: It would not have been correct to have used
 295   // CollectedHeap::fill_with_object() and make the space look like
 296   // an int array. The thread that is doing the allocation will
 297   // later update the object header to a potentially different array
 298   // type and, for a very short period of time, the klass and length
 299   // fields will be inconsistent. This could cause a refinement
 300   // thread to calculate the object size incorrectly.
 301   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
 302 
 303   // How many words we use for filler objects.
 304   size_t word_fill_size = word_size_sum - word_size;
 305 
 306   // How many words memory we "waste" which cannot hold a filler object.
 307   size_t words_not_fillable = 0;
 308 
 309   if (word_fill_size >= min_fill_size()) {
 310     fill_with_objects(obj_top, word_fill_size);
 311   } else if (word_fill_size > 0) {
 312     // We have space to fill, but we cannot fit an object there.
 313     words_not_fillable = word_fill_size;
 314     word_fill_size = 0;
 315   }
 316 
 317   // We will set up the first region as "starts humongous". This
 318   // will also update the BOT covering all the regions to reflect
 319   // that there is a single object that starts at the bottom of the
 320   // first region.
 321   first_hr->set_starts_humongous(obj_top, word_fill_size);
 322   first_hr->set_allocation_context(context);
 323   // Then, if there are any, we will set up the "continues
 324   // humongous" regions.
 325   HeapRegion* hr = NULL;
 326   for (uint i = first + 1; i <= last; ++i) {
 327     hr = region_at(i);
 328     hr->set_continues_humongous(first_hr);
 329     hr->set_allocation_context(context);
 330   }
 331 
 332   // Up to this point no concurrent thread would have been able to
 333   // do any scanning on any region in this series. All the top
 334   // fields still point to bottom, so the intersection between
 335   // [bottom,top] and [card_start,card_end] will be empty. Before we
 336   // update the top fields, we'll do a storestore to make sure that
 337   // no thread sees the update to top before the zeroing of the
 338   // object header and the BOT initialization.
 339   OrderAccess::storestore();
 340 
 341   // Now, we will update the top fields of the "continues humongous"
 342   // regions except the last one.
 343   for (uint i = first; i < last; ++i) {
 344     hr = region_at(i);
 345     hr->set_top(hr->end());
 346   }
 347 
 348   hr = region_at(last);
 349   // If we cannot fit a filler object, we must set top to the end
 350   // of the humongous object, otherwise we cannot iterate the heap
 351   // and the BOT will not be complete.
 352   hr->set_top(hr->end() - words_not_fillable);
 353 
 354   assert(hr->bottom() < obj_top && obj_top <= hr->end(),
 355          "obj_top should be in last region");
 356 
 357   _verifier->check_bitmaps("Humongous Region Allocation", first_hr);
 358 
 359   assert(words_not_fillable == 0 ||
 360          first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
 361          "Miscalculation in humongous allocation");
 362 
 363   increase_used((word_size_sum - words_not_fillable) * HeapWordSize);
 364 
 365   for (uint i = first; i <= last; ++i) {
 366     hr = region_at(i);
 367     _humongous_set.add(hr);
 368     _hr_printer.alloc(hr);
 369   }
 370 
 371   return new_obj;
 372 }
 373 
 374 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
 375   assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
 376   return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
 377 }
 378 
 379 // If could fit into free regions w/o expansion, try.
 380 // Otherwise, if can expand, do so.
 381 // Otherwise, if using ex regions might help, try with ex given back.
 382 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
 383   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 384 
 385   _verifier->verify_region_sets_optional();
 386 
 387   uint first = G1_NO_HRM_INDEX;
 388   uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
 389 
 390   if (obj_regions == 1) {
 391     // Only one region to allocate, try to use a fast path by directly allocating
 392     // from the free lists. Do not try to expand here, we will potentially do that
 393     // later.
 394     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
 395     if (hr != NULL) {
 396       first = hr->hrm_index();
 397     }
 398   } else {
 399     // We can't allocate humongous regions spanning more than one region while
 400     // cleanupComplete() is running, since some of the regions we find to be
 401     // empty might not yet be added to the free list. It is not straightforward
 402     // to know in which list they are on so that we can remove them. We only
 403     // need to do this if we need to allocate more than one region to satisfy the
 404     // current humongous allocation request. If we are only allocating one region
 405     // we use the one-region region allocation code (see above), that already
 406     // potentially waits for regions from the secondary free list.
 407     wait_while_free_regions_coming();
 408     append_secondary_free_list_if_not_empty_with_lock();
 409 
 410     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 411     // are lucky enough to find some.
 412     first = _hrm.find_contiguous_only_empty(obj_regions);
 413     if (first != G1_NO_HRM_INDEX) {
 414       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 415     }
 416   }
 417 
 418   if (first == G1_NO_HRM_INDEX) {
 419     // Policy: We could not find enough regions for the humongous object in the
 420     // free list. Look through the heap to find a mix of free and uncommitted regions.
 421     // If so, try expansion.
 422     first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
 423     if (first != G1_NO_HRM_INDEX) {
 424       // We found something. Make sure these regions are committed, i.e. expand
 425       // the heap. Alternatively we could do a defragmentation GC.
 426       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
 427                                     word_size * HeapWordSize);
 428 
 429 
 430       _hrm.expand_at(first, obj_regions);
 431       g1_policy()->record_new_heap_size(num_regions());
 432 
 433 #ifdef ASSERT
 434       for (uint i = first; i < first + obj_regions; ++i) {
 435         HeapRegion* hr = region_at(i);
 436         assert(hr->is_free(), "sanity");
 437         assert(hr->is_empty(), "sanity");
 438         assert(is_on_master_free_list(hr), "sanity");
 439       }
 440 #endif
 441       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 442     } else {
 443       // Policy: Potentially trigger a defragmentation GC.
 444     }
 445   }
 446 
 447   HeapWord* result = NULL;
 448   if (first != G1_NO_HRM_INDEX) {
 449     result = humongous_obj_allocate_initialize_regions(first, obj_regions,
 450                                                        word_size, context);
 451     assert(result != NULL, "it should always return a valid result");
 452 
 453     // A successful humongous object allocation changes the used space
 454     // information of the old generation so we need to recalculate the
 455     // sizes and update the jstat counters here.
 456     g1mm()->update_sizes();
 457   }
 458 
 459   _verifier->verify_region_sets_optional();
 460 
 461   return result;
 462 }
 463 
 464 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
 465   assert_heap_not_locked_and_not_at_safepoint();
 466   assert(!is_humongous(word_size), "we do not allow humongous TLABs");
 467 
 468   uint dummy_gc_count_before;
 469   uint dummy_gclocker_retry_count = 0;
 470   return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
 471 }
 472 
 473 HeapWord*
 474 G1CollectedHeap::mem_allocate(size_t word_size,
 475                               bool*  gc_overhead_limit_was_exceeded) {
 476   assert_heap_not_locked_and_not_at_safepoint();
 477 
 478   // Loop until the allocation is satisfied, or unsatisfied after GC.
 479   for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
 480     uint gc_count_before;
 481 
 482     HeapWord* result = NULL;
 483     if (!is_humongous(word_size)) {
 484       result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
 485     } else {
 486       result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
 487     }
 488     if (result != NULL) {
 489       return result;
 490     }
 491 
 492     // Create the garbage collection operation...
 493     VM_G1CollectForAllocation op(gc_count_before, word_size);
 494     op.set_allocation_context(AllocationContext::current());
 495 
 496     // ...and get the VM thread to execute it.
 497     VMThread::execute(&op);
 498 
 499     if (op.prologue_succeeded() && op.pause_succeeded()) {
 500       // If the operation was successful we'll return the result even
 501       // if it is NULL. If the allocation attempt failed immediately
 502       // after a Full GC, it's unlikely we'll be able to allocate now.
 503       HeapWord* result = op.result();
 504       if (result != NULL && !is_humongous(word_size)) {
 505         // Allocations that take place on VM operations do not do any
 506         // card dirtying and we have to do it here. We only have to do
 507         // this for non-humongous allocations, though.
 508         dirty_young_block(result, word_size);
 509       }
 510       return result;
 511     } else {
 512       if (gclocker_retry_count > GCLockerRetryAllocationCount) {
 513         return NULL;
 514       }
 515       assert(op.result() == NULL,
 516              "the result should be NULL if the VM op did not succeed");
 517     }
 518 
 519     // Give a warning if we seem to be looping forever.
 520     if ((QueuedAllocationWarningCount > 0) &&
 521         (try_count % QueuedAllocationWarningCount == 0)) {
 522       log_warning(gc)("G1CollectedHeap::mem_allocate retries %d times", try_count);
 523     }
 524   }
 525 
 526   ShouldNotReachHere();
 527   return NULL;
 528 }
 529 
 530 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
 531                                                    AllocationContext_t context,
 532                                                    uint* gc_count_before_ret,
 533                                                    uint* gclocker_retry_count_ret) {
 534   // Make sure you read the note in attempt_allocation_humongous().
 535 
 536   assert_heap_not_locked_and_not_at_safepoint();
 537   assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
 538          "be called for humongous allocation requests");
 539 
 540   // We should only get here after the first-level allocation attempt
 541   // (attempt_allocation()) failed to allocate.
 542 
 543   // We will loop until a) we manage to successfully perform the
 544   // allocation or b) we successfully schedule a collection which
 545   // fails to perform the allocation. b) is the only case when we'll
 546   // return NULL.
 547   HeapWord* result = NULL;
 548   for (int try_count = 1; /* we'll return */; try_count += 1) {
 549     bool should_try_gc;
 550     uint gc_count_before;
 551 
 552     {
 553       MutexLockerEx x(Heap_lock);
 554       result = _allocator->attempt_allocation_locked(word_size, context);
 555       if (result != NULL) {
 556         return result;
 557       }
 558 
 559       if (GCLocker::is_active_and_needs_gc()) {
 560         if (g1_policy()->can_expand_young_list()) {
 561           // No need for an ergo verbose message here,
 562           // can_expand_young_list() does this when it returns true.
 563           result = _allocator->attempt_allocation_force(word_size, context);
 564           if (result != NULL) {
 565             return result;
 566           }
 567         }
 568         should_try_gc = false;
 569       } else {
 570         // The GCLocker may not be active but the GCLocker initiated
 571         // GC may not yet have been performed (GCLocker::needs_gc()
 572         // returns true). In this case we do not try this GC and
 573         // wait until the GCLocker initiated GC is performed, and
 574         // then retry the allocation.
 575         if (GCLocker::needs_gc()) {
 576           should_try_gc = false;
 577         } else {
 578           // Read the GC count while still holding the Heap_lock.
 579           gc_count_before = total_collections();
 580           should_try_gc = true;
 581         }
 582       }
 583     }
 584 
 585     if (should_try_gc) {
 586       bool succeeded;
 587       result = do_collection_pause(word_size, gc_count_before, &succeeded,
 588                                    GCCause::_g1_inc_collection_pause);
 589       if (result != NULL) {
 590         assert(succeeded, "only way to get back a non-NULL result");
 591         return result;
 592       }
 593 
 594       if (succeeded) {
 595         // If we get here we successfully scheduled a collection which
 596         // failed to allocate. No point in trying to allocate
 597         // further. We'll just return NULL.
 598         MutexLockerEx x(Heap_lock);
 599         *gc_count_before_ret = total_collections();
 600         return NULL;
 601       }
 602     } else {
 603       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
 604         MutexLockerEx x(Heap_lock);
 605         *gc_count_before_ret = total_collections();
 606         return NULL;
 607       }
 608       // The GCLocker is either active or the GCLocker initiated
 609       // GC has not yet been performed. Stall until it is and
 610       // then retry the allocation.
 611       GCLocker::stall_until_clear();
 612       (*gclocker_retry_count_ret) += 1;
 613     }
 614 
 615     // We can reach here if we were unsuccessful in scheduling a
 616     // collection (because another thread beat us to it) or if we were
 617     // stalled due to the GC locker. In either can we should retry the
 618     // allocation attempt in case another thread successfully
 619     // performed a collection and reclaimed enough space. We do the
 620     // first attempt (without holding the Heap_lock) here and the
 621     // follow-on attempt will be at the start of the next loop
 622     // iteration (after taking the Heap_lock).
 623     result = _allocator->attempt_allocation(word_size, context);
 624     if (result != NULL) {
 625       return result;
 626     }
 627 
 628     // Give a warning if we seem to be looping forever.
 629     if ((QueuedAllocationWarningCount > 0) &&
 630         (try_count % QueuedAllocationWarningCount == 0)) {
 631       log_warning(gc)("G1CollectedHeap::attempt_allocation_slow() "
 632                       "retries %d times", try_count);
 633     }
 634   }
 635 
 636   ShouldNotReachHere();
 637   return NULL;
 638 }
 639 
 640 void G1CollectedHeap::begin_archive_alloc_range() {
 641   assert_at_safepoint(true /* should_be_vm_thread */);
 642   if (_archive_allocator == NULL) {
 643     _archive_allocator = G1ArchiveAllocator::create_allocator(this);
 644   }
 645 }
 646 
 647 bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
 648   // Allocations in archive regions cannot be of a size that would be considered
 649   // humongous even for a minimum-sized region, because G1 region sizes/boundaries
 650   // may be different at archive-restore time.
 651   return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
 652 }
 653 
 654 HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
 655   assert_at_safepoint(true /* should_be_vm_thread */);
 656   assert(_archive_allocator != NULL, "_archive_allocator not initialized");
 657   if (is_archive_alloc_too_large(word_size)) {
 658     return NULL;
 659   }
 660   return _archive_allocator->archive_mem_allocate(word_size);
 661 }
 662 
 663 void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 664                                               size_t end_alignment_in_bytes) {
 665   assert_at_safepoint(true /* should_be_vm_thread */);
 666   assert(_archive_allocator != NULL, "_archive_allocator not initialized");
 667 
 668   // Call complete_archive to do the real work, filling in the MemRegion
 669   // array with the archive regions.
 670   _archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
 671   delete _archive_allocator;
 672   _archive_allocator = NULL;
 673 }
 674 
 675 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
 676   assert(ranges != NULL, "MemRegion array NULL");
 677   assert(count != 0, "No MemRegions provided");
 678   MemRegion reserved = _hrm.reserved();
 679   for (size_t i = 0; i < count; i++) {
 680     if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
 681       return false;
 682     }
 683   }
 684   return true;
 685 }
 686 
 687 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
 688   assert(!is_init_completed(), "Expect to be called at JVM init time");
 689   assert(ranges != NULL, "MemRegion array NULL");
 690   assert(count != 0, "No MemRegions provided");
 691   MutexLockerEx x(Heap_lock);
 692 
 693   MemRegion reserved = _hrm.reserved();
 694   HeapWord* prev_last_addr = NULL;
 695   HeapRegion* prev_last_region = NULL;
 696 
 697   // Temporarily disable pretouching of heap pages. This interface is used
 698   // when mmap'ing archived heap data in, so pre-touching is wasted.
 699   FlagSetting fs(AlwaysPreTouch, false);
 700 
 701   // Enable archive object checking in G1MarkSweep. We have to let it know
 702   // about each archive range, so that objects in those ranges aren't marked.
 703   G1MarkSweep::enable_archive_object_check();
 704 
 705   // For each specified MemRegion range, allocate the corresponding G1
 706   // regions and mark them as archive regions. We expect the ranges in
 707   // ascending starting address order, without overlap.
 708   for (size_t i = 0; i < count; i++) {
 709     MemRegion curr_range = ranges[i];
 710     HeapWord* start_address = curr_range.start();
 711     size_t word_size = curr_range.word_size();
 712     HeapWord* last_address = curr_range.last();
 713     size_t commits = 0;
 714 
 715     guarantee(reserved.contains(start_address) && reserved.contains(last_address),
 716               "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
 717               p2i(start_address), p2i(last_address));
 718     guarantee(start_address > prev_last_addr,
 719               "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
 720               p2i(start_address), p2i(prev_last_addr));
 721     prev_last_addr = last_address;
 722 
 723     // Check for ranges that start in the same G1 region in which the previous
 724     // range ended, and adjust the start address so we don't try to allocate
 725     // the same region again. If the current range is entirely within that
 726     // region, skip it, just adjusting the recorded top.
 727     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 728     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
 729       start_address = start_region->end();
 730       if (start_address > last_address) {
 731         increase_used(word_size * HeapWordSize);
 732         start_region->set_top(last_address + 1);
 733         continue;
 734       }
 735       start_region->set_top(start_address);
 736       curr_range = MemRegion(start_address, last_address + 1);
 737       start_region = _hrm.addr_to_region(start_address);
 738     }
 739 
 740     // Perform the actual region allocation, exiting if it fails.
 741     // Then note how much new space we have allocated.
 742     if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
 743       return false;
 744     }
 745     increase_used(word_size * HeapWordSize);
 746     if (commits != 0) {
 747       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
 748                                 HeapRegion::GrainWords * HeapWordSize * commits);
 749 
 750     }
 751 
 752     // Mark each G1 region touched by the range as archive, add it to the old set,
 753     // and set the allocation context and top.
 754     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 755     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 756     prev_last_region = last_region;
 757 
 758     while (curr_region != NULL) {
 759       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 760              "Region already in use (index %u)", curr_region->hrm_index());
 761       curr_region->set_allocation_context(AllocationContext::system());
 762       curr_region->set_archive();
 763       _hr_printer.alloc(curr_region);
 764       _old_set.add(curr_region);
 765       if (curr_region != last_region) {
 766         curr_region->set_top(curr_region->end());
 767         curr_region = _hrm.next_region_in_heap(curr_region);
 768       } else {
 769         curr_region->set_top(last_address + 1);
 770         curr_region = NULL;
 771       }
 772     }
 773 
 774     // Notify mark-sweep of the archive range.
 775     G1MarkSweep::set_range_archive(curr_range, true);
 776   }
 777   return true;
 778 }
 779 
 780 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
 781   assert(!is_init_completed(), "Expect to be called at JVM init time");
 782   assert(ranges != NULL, "MemRegion array NULL");
 783   assert(count != 0, "No MemRegions provided");
 784   MemRegion reserved = _hrm.reserved();
 785   HeapWord *prev_last_addr = NULL;
 786   HeapRegion* prev_last_region = NULL;
 787 
 788   // For each MemRegion, create filler objects, if needed, in the G1 regions
 789   // that contain the address range. The address range actually within the
 790   // MemRegion will not be modified. That is assumed to have been initialized
 791   // elsewhere, probably via an mmap of archived heap data.
 792   MutexLockerEx x(Heap_lock);
 793   for (size_t i = 0; i < count; i++) {
 794     HeapWord* start_address = ranges[i].start();
 795     HeapWord* last_address = ranges[i].last();
 796 
 797     assert(reserved.contains(start_address) && reserved.contains(last_address),
 798            "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
 799            p2i(start_address), p2i(last_address));
 800     assert(start_address > prev_last_addr,
 801            "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
 802            p2i(start_address), p2i(prev_last_addr));
 803 
 804     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 805     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 806     HeapWord* bottom_address = start_region->bottom();
 807 
 808     // Check for a range beginning in the same region in which the
 809     // previous one ended.
 810     if (start_region == prev_last_region) {
 811       bottom_address = prev_last_addr + 1;
 812     }
 813 
 814     // Verify that the regions were all marked as archive regions by
 815     // alloc_archive_regions.
 816     HeapRegion* curr_region = start_region;
 817     while (curr_region != NULL) {
 818       guarantee(curr_region->is_archive(),
 819                 "Expected archive region at index %u", curr_region->hrm_index());
 820       if (curr_region != last_region) {
 821         curr_region = _hrm.next_region_in_heap(curr_region);
 822       } else {
 823         curr_region = NULL;
 824       }
 825     }
 826 
 827     prev_last_addr = last_address;
 828     prev_last_region = last_region;
 829 
 830     // Fill the memory below the allocated range with dummy object(s),
 831     // if the region bottom does not match the range start, or if the previous
 832     // range ended within the same G1 region, and there is a gap.
 833     if (start_address != bottom_address) {
 834       size_t fill_size = pointer_delta(start_address, bottom_address);
 835       G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
 836       increase_used(fill_size * HeapWordSize);
 837     }
 838   }
 839 }
 840 
 841 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
 842                                                      uint* gc_count_before_ret,
 843                                                      uint* gclocker_retry_count_ret) {
 844   assert_heap_not_locked_and_not_at_safepoint();
 845   assert(!is_humongous(word_size), "attempt_allocation() should not "
 846          "be called for humongous allocation requests");
 847 
 848   AllocationContext_t context = AllocationContext::current();
 849   HeapWord* result = _allocator->attempt_allocation(word_size, context);
 850 
 851   if (result == NULL) {
 852     result = attempt_allocation_slow(word_size,
 853                                      context,
 854                                      gc_count_before_ret,
 855                                      gclocker_retry_count_ret);
 856   }
 857   assert_heap_not_locked();
 858   if (result != NULL) {
 859     dirty_young_block(result, word_size);
 860   }
 861   return result;
 862 }
 863 
 864 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
 865   assert(!is_init_completed(), "Expect to be called at JVM init time");
 866   assert(ranges != NULL, "MemRegion array NULL");
 867   assert(count != 0, "No MemRegions provided");
 868   MemRegion reserved = _hrm.reserved();
 869   HeapWord* prev_last_addr = NULL;
 870   HeapRegion* prev_last_region = NULL;
 871   size_t size_used = 0;
 872   size_t uncommitted_regions = 0;
 873 
 874   // For each Memregion, free the G1 regions that constitute it, and
 875   // notify mark-sweep that the range is no longer to be considered 'archive.'
 876   MutexLockerEx x(Heap_lock);
 877   for (size_t i = 0; i < count; i++) {
 878     HeapWord* start_address = ranges[i].start();
 879     HeapWord* last_address = ranges[i].last();
 880 
 881     assert(reserved.contains(start_address) && reserved.contains(last_address),
 882            "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
 883            p2i(start_address), p2i(last_address));
 884     assert(start_address > prev_last_addr,
 885            "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
 886            p2i(start_address), p2i(prev_last_addr));
 887     size_used += ranges[i].byte_size();
 888     prev_last_addr = last_address;
 889 
 890     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 891     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 892 
 893     // Check for ranges that start in the same G1 region in which the previous
 894     // range ended, and adjust the start address so we don't try to free
 895     // the same region again. If the current range is entirely within that
 896     // region, skip it.
 897     if (start_region == prev_last_region) {
 898       start_address = start_region->end();
 899       if (start_address > last_address) {
 900         continue;
 901       }
 902       start_region = _hrm.addr_to_region(start_address);
 903     }
 904     prev_last_region = last_region;
 905 
 906     // After verifying that each region was marked as an archive region by
 907     // alloc_archive_regions, set it free and empty and uncommit it.
 908     HeapRegion* curr_region = start_region;
 909     while (curr_region != NULL) {
 910       guarantee(curr_region->is_archive(),
 911                 "Expected archive region at index %u", curr_region->hrm_index());
 912       uint curr_index = curr_region->hrm_index();
 913       _old_set.remove(curr_region);
 914       curr_region->set_free();
 915       curr_region->set_top(curr_region->bottom());
 916       if (curr_region != last_region) {
 917         curr_region = _hrm.next_region_in_heap(curr_region);
 918       } else {
 919         curr_region = NULL;
 920       }
 921       _hrm.shrink_at(curr_index, 1);
 922       uncommitted_regions++;
 923     }
 924 
 925     // Notify mark-sweep that this is no longer an archive range.
 926     G1MarkSweep::set_range_archive(ranges[i], false);
 927   }
 928 
 929   if (uncommitted_regions != 0) {
 930     log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
 931                               HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
 932   }
 933   decrease_used(size_used);
 934 }
 935 
 936 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
 937                                                         uint* gc_count_before_ret,
 938                                                         uint* gclocker_retry_count_ret) {
 939   // The structure of this method has a lot of similarities to
 940   // attempt_allocation_slow(). The reason these two were not merged
 941   // into a single one is that such a method would require several "if
 942   // allocation is not humongous do this, otherwise do that"
 943   // conditional paths which would obscure its flow. In fact, an early
 944   // version of this code did use a unified method which was harder to
 945   // follow and, as a result, it had subtle bugs that were hard to
 946   // track down. So keeping these two methods separate allows each to
 947   // be more readable. It will be good to keep these two in sync as
 948   // much as possible.
 949 
 950   assert_heap_not_locked_and_not_at_safepoint();
 951   assert(is_humongous(word_size), "attempt_allocation_humongous() "
 952          "should only be called for humongous allocations");
 953 
 954   // Humongous objects can exhaust the heap quickly, so we should check if we
 955   // need to start a marking cycle at each humongous object allocation. We do
 956   // the check before we do the actual allocation. The reason for doing it
 957   // before the allocation is that we avoid having to keep track of the newly
 958   // allocated memory while we do a GC.
 959   if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
 960                                            word_size)) {
 961     collect(GCCause::_g1_humongous_allocation);
 962   }
 963 
 964   // We will loop until a) we manage to successfully perform the
 965   // allocation or b) we successfully schedule a collection which
 966   // fails to perform the allocation. b) is the only case when we'll
 967   // return NULL.
 968   HeapWord* result = NULL;
 969   for (int try_count = 1; /* we'll return */; try_count += 1) {
 970     bool should_try_gc;
 971     uint gc_count_before;
 972 
 973     {
 974       MutexLockerEx x(Heap_lock);
 975 
 976       // Given that humongous objects are not allocated in young
 977       // regions, we'll first try to do the allocation without doing a
 978       // collection hoping that there's enough space in the heap.
 979       result = humongous_obj_allocate(word_size, AllocationContext::current());
 980       if (result != NULL) {
 981         size_t size_in_regions = humongous_obj_size_in_regions(word_size);
 982         g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
 983         return result;
 984       }
 985 
 986       if (GCLocker::is_active_and_needs_gc()) {
 987         should_try_gc = false;
 988       } else {
 989          // The GCLocker may not be active but the GCLocker initiated
 990         // GC may not yet have been performed (GCLocker::needs_gc()
 991         // returns true). In this case we do not try this GC and
 992         // wait until the GCLocker initiated GC is performed, and
 993         // then retry the allocation.
 994         if (GCLocker::needs_gc()) {
 995           should_try_gc = false;
 996         } else {
 997           // Read the GC count while still holding the Heap_lock.
 998           gc_count_before = total_collections();
 999           should_try_gc = true;
1000         }
1001       }
1002     }
1003 
1004     if (should_try_gc) {
1005       // If we failed to allocate the humongous object, we should try to
1006       // do a collection pause (if we're allowed) in case it reclaims
1007       // enough space for the allocation to succeed after the pause.
1008 
1009       bool succeeded;
1010       result = do_collection_pause(word_size, gc_count_before, &succeeded,
1011                                    GCCause::_g1_humongous_allocation);
1012       if (result != NULL) {
1013         assert(succeeded, "only way to get back a non-NULL result");
1014         return result;
1015       }
1016 
1017       if (succeeded) {
1018         // If we get here we successfully scheduled a collection which
1019         // failed to allocate. No point in trying to allocate
1020         // further. We'll just return NULL.
1021         MutexLockerEx x(Heap_lock);
1022         *gc_count_before_ret = total_collections();
1023         return NULL;
1024       }
1025     } else {
1026       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1027         MutexLockerEx x(Heap_lock);
1028         *gc_count_before_ret = total_collections();
1029         return NULL;
1030       }
1031       // The GCLocker is either active or the GCLocker initiated
1032       // GC has not yet been performed. Stall until it is and
1033       // then retry the allocation.
1034       GCLocker::stall_until_clear();
1035       (*gclocker_retry_count_ret) += 1;
1036     }
1037 
1038     // We can reach here if we were unsuccessful in scheduling a
1039     // collection (because another thread beat us to it) or if we were
1040     // stalled due to the GC locker. In either can we should retry the
1041     // allocation attempt in case another thread successfully
1042     // performed a collection and reclaimed enough space.  Give a
1043     // warning if we seem to be looping forever.
1044 
1045     if ((QueuedAllocationWarningCount > 0) &&
1046         (try_count % QueuedAllocationWarningCount == 0)) {
1047       log_warning(gc)("G1CollectedHeap::attempt_allocation_humongous() "
1048                       "retries %d times", try_count);
1049     }
1050   }
1051 
1052   ShouldNotReachHere();
1053   return NULL;
1054 }
1055 
1056 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1057                                                            AllocationContext_t context,
1058                                                            bool expect_null_mutator_alloc_region) {
1059   assert_at_safepoint(true /* should_be_vm_thread */);
1060   assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
1061          "the current alloc region was unexpectedly found to be non-NULL");
1062 
1063   if (!is_humongous(word_size)) {
1064     return _allocator->attempt_allocation_locked(word_size, context);
1065   } else {
1066     HeapWord* result = humongous_obj_allocate(word_size, context);
1067     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1068       collector_state()->set_initiate_conc_mark_if_possible(true);
1069     }
1070     return result;
1071   }
1072 
1073   ShouldNotReachHere();
1074 }
1075 
1076 class PostMCRemSetClearClosure: public HeapRegionClosure {
1077   G1CollectedHeap* _g1h;
1078   ModRefBarrierSet* _mr_bs;
1079 public:
1080   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1081     _g1h(g1h), _mr_bs(mr_bs) {}
1082 
1083   bool doHeapRegion(HeapRegion* r) {
1084     HeapRegionRemSet* hrrs = r->rem_set();
1085 
1086     _g1h->reset_gc_time_stamps(r);
1087 
1088     if (r->is_continues_humongous()) {
1089       // We'll assert that the strong code root list and RSet is empty
1090       assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
1091       assert(hrrs->occupied() == 0, "RSet should be empty");
1092     } else {
1093       hrrs->clear();
1094     }
1095     // You might think here that we could clear just the cards
1096     // corresponding to the used region.  But no: if we leave a dirty card
1097     // in a region we might allocate into, then it would prevent that card
1098     // from being enqueued, and cause it to be missed.
1099     // Re: the performance cost: we shouldn't be doing full GC anyway!
1100     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1101 
1102     return false;
1103   }
1104 };
1105 
1106 void G1CollectedHeap::clear_rsets_post_compaction() {
1107   PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
1108   heap_region_iterate(&rs_clear);
1109 }
1110 
1111 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1112   G1CollectedHeap*   _g1h;
1113   UpdateRSOopClosure _cl;
1114 public:
1115   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, uint worker_i = 0) :
1116     _cl(g1->g1_rem_set(), worker_i),
1117     _g1h(g1)
1118   { }
1119 
1120   bool doHeapRegion(HeapRegion* r) {
1121     if (!r->is_continues_humongous()) {
1122       _cl.set_from(r);
1123       r->oop_iterate(&_cl);
1124     }
1125     return false;
1126   }
1127 };
1128 
1129 class ParRebuildRSTask: public AbstractGangTask {
1130   G1CollectedHeap* _g1;
1131   HeapRegionClaimer _hrclaimer;
1132 
1133 public:
1134   ParRebuildRSTask(G1CollectedHeap* g1) :
1135       AbstractGangTask("ParRebuildRSTask"), _g1(g1), _hrclaimer(g1->workers()->active_workers()) {}
1136 
1137   void work(uint worker_id) {
1138     RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
1139     _g1->heap_region_par_iterate(&rebuild_rs, worker_id, &_hrclaimer);
1140   }
1141 };
1142 
1143 class PostCompactionPrinterClosure: public HeapRegionClosure {
1144 private:
1145   G1HRPrinter* _hr_printer;
1146 public:
1147   bool doHeapRegion(HeapRegion* hr) {
1148     assert(!hr->is_young(), "not expecting to find young regions");
1149     _hr_printer->post_compaction(hr);
1150     return false;
1151   }
1152 
1153   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1154     : _hr_printer(hr_printer) { }
1155 };
1156 
1157 void G1CollectedHeap::print_hrm_post_compaction() {
1158   if (_hr_printer.is_active()) {
1159     PostCompactionPrinterClosure cl(hr_printer());
1160     heap_region_iterate(&cl);
1161   }
1162 
1163 }
1164 
1165 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1166                                          bool clear_all_soft_refs) {
1167   assert_at_safepoint(true /* should_be_vm_thread */);
1168 
1169   if (GCLocker::check_active_before_gc()) {
1170     return false;
1171   }
1172 
1173   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1174   gc_timer->register_gc_start();
1175 
1176   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1177   GCIdMark gc_id_mark;
1178   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1179 
1180   SvcGCMarker sgcm(SvcGCMarker::FULL);
1181   ResourceMark rm;
1182 
1183   print_heap_before_gc();
1184   print_heap_regions();
1185   trace_heap_before_gc(gc_tracer);
1186 
1187   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1188 
1189   _verifier->verify_region_sets_optional();
1190 
1191   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1192                            collector_policy()->should_clear_all_soft_refs();
1193 
1194   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1195 
1196   {
1197     IsGCActiveMark x;
1198 
1199     // Timing
1200     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1201     GCTraceCPUTime tcpu;
1202 
1203     {
1204       GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1205       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1206       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1207 
1208       G1HeapTransition heap_transition(this);
1209       g1_policy()->record_full_collection_start();
1210 
1211       // Note: When we have a more flexible GC logging framework that
1212       // allows us to add optional attributes to a GC log record we
1213       // could consider timing and reporting how long we wait in the
1214       // following two methods.
1215       wait_while_free_regions_coming();
1216       // If we start the compaction before the CM threads finish
1217       // scanning the root regions we might trip them over as we'll
1218       // be moving objects / updating references. So let's wait until
1219       // they are done. By telling them to abort, they should complete
1220       // early.
1221       _cm->root_regions()->abort();
1222       _cm->root_regions()->wait_until_scan_finished();
1223       append_secondary_free_list_if_not_empty_with_lock();
1224 
1225       gc_prologue(true);
1226       increment_total_collections(true /* full gc */);
1227       increment_old_marking_cycles_started();
1228 
1229       assert(used() == recalculate_used(), "Should be equal");
1230 
1231       _verifier->verify_before_gc();
1232 
1233       _verifier->check_bitmaps("Full GC Start");
1234       pre_full_gc_dump(gc_timer);
1235 
1236 #if defined(COMPILER2) || INCLUDE_JVMCI
1237       DerivedPointerTable::clear();
1238 #endif
1239 
1240       // Disable discovery and empty the discovered lists
1241       // for the CM ref processor.
1242       ref_processor_cm()->disable_discovery();
1243       ref_processor_cm()->abandon_partial_discovery();
1244       ref_processor_cm()->verify_no_references_recorded();
1245 
1246       // Abandon current iterations of concurrent marking and concurrent
1247       // refinement, if any are in progress.
1248       concurrent_mark()->abort();
1249 
1250       // Make sure we'll choose a new allocation region afterwards.
1251       _allocator->release_mutator_alloc_region();
1252       _allocator->abandon_gc_alloc_regions();
1253       g1_rem_set()->cleanupHRRS();
1254 
1255       // We may have added regions to the current incremental collection
1256       // set between the last GC or pause and now. We need to clear the
1257       // incremental collection set and then start rebuilding it afresh
1258       // after this full GC.
1259       abandon_collection_set(collection_set());
1260 
1261       tear_down_region_sets(false /* free_list_only */);
1262       collector_state()->set_gcs_are_young(true);
1263 
1264       // See the comments in g1CollectedHeap.hpp and
1265       // G1CollectedHeap::ref_processing_init() about
1266       // how reference processing currently works in G1.
1267 
1268       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1269       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1270 
1271       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1272       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1273 
1274       ref_processor_stw()->enable_discovery();
1275       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1276 
1277       // Do collection work
1278       {
1279         HandleMark hm;  // Discard invalid handles created during gc
1280         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1281       }
1282 
1283       assert(num_free_regions() == 0, "we should not have added any free regions");
1284       rebuild_region_sets(false /* free_list_only */);
1285 
1286       // Enqueue any discovered reference objects that have
1287       // not been removed from the discovered lists.
1288       ref_processor_stw()->enqueue_discovered_references();
1289 
1290 #if defined(COMPILER2) || INCLUDE_JVMCI
1291       DerivedPointerTable::update_pointers();
1292 #endif
1293 
1294       MemoryService::track_memory_usage();
1295 
1296       assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1297       ref_processor_stw()->verify_no_references_recorded();
1298 
1299       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1300       ClassLoaderDataGraph::purge();
1301       MetaspaceAux::verify_metrics();
1302 
1303       // Note: since we've just done a full GC, concurrent
1304       // marking is no longer active. Therefore we need not
1305       // re-enable reference discovery for the CM ref processor.
1306       // That will be done at the start of the next marking cycle.
1307       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1308       ref_processor_cm()->verify_no_references_recorded();
1309 
1310       reset_gc_time_stamp();
1311       // Since everything potentially moved, we will clear all remembered
1312       // sets, and clear all cards.  Later we will rebuild remembered
1313       // sets. We will also reset the GC time stamps of the regions.
1314       clear_rsets_post_compaction();
1315       check_gc_time_stamps();
1316 
1317       resize_if_necessary_after_full_collection();
1318 
1319       // We should do this after we potentially resize the heap so
1320       // that all the COMMIT / UNCOMMIT events are generated before
1321       // the compaction events.
1322       print_hrm_post_compaction();
1323 
1324       if (_hot_card_cache->use_cache()) {
1325         _hot_card_cache->reset_card_counts();
1326         _hot_card_cache->reset_hot_cache();
1327       }
1328 
1329       // Rebuild remembered sets of all regions.
1330       uint n_workers =
1331         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1332                                                 workers()->active_workers(),
1333                                                 Threads::number_of_non_daemon_threads());
1334       workers()->update_active_workers(n_workers);
1335       log_info(gc,task)("Using %u workers of %u to rebuild remembered set", n_workers, workers()->total_workers());
1336 
1337       ParRebuildRSTask rebuild_rs_task(this);
1338       workers()->run_task(&rebuild_rs_task);
1339 
1340       // Rebuild the strong code root lists for each region
1341       rebuild_strong_code_roots();
1342 
1343       if (true) { // FIXME
1344         MetaspaceGC::compute_new_size();
1345       }
1346 
1347 #ifdef TRACESPINNING
1348       ParallelTaskTerminator::print_termination_counts();
1349 #endif
1350 
1351       // Discard all rset updates
1352       JavaThread::dirty_card_queue_set().abandon_logs();
1353       assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1354 
1355       // At this point there should be no regions in the
1356       // entire heap tagged as young.
1357       assert(check_young_list_empty(), "young list should be empty at this point");
1358 
1359       // Update the number of full collections that have been completed.
1360       increment_old_marking_cycles_completed(false /* concurrent */);
1361 
1362       _hrm.verify_optional();
1363       _verifier->verify_region_sets_optional();
1364 
1365       _verifier->verify_after_gc();
1366 
1367       // Clear the previous marking bitmap, if needed for bitmap verification.
1368       // Note we cannot do this when we clear the next marking bitmap in
1369       // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1370       // objects marked during a full GC against the previous bitmap.
1371       // But we need to clear it before calling check_bitmaps below since
1372       // the full GC has compacted objects and updated TAMS but not updated
1373       // the prev bitmap.
1374       if (G1VerifyBitmaps) {
1375         GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
1376         _cm->clear_prev_bitmap(workers());
1377       }
1378       _verifier->check_bitmaps("Full GC End");
1379 
1380       // Start a new incremental collection set for the next pause
1381       collection_set()->start_incremental_building();
1382 
1383       clear_cset_fast_test();
1384 
1385       _allocator->init_mutator_alloc_region();
1386 
1387       g1_policy()->record_full_collection_end();
1388 
1389       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1390       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1391       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1392       // before any GC notifications are raised.
1393       g1mm()->update_sizes();
1394 
1395       gc_epilogue(true);
1396 
1397       heap_transition.print();
1398 
1399       print_heap_after_gc();
1400       print_heap_regions();
1401       trace_heap_after_gc(gc_tracer);
1402 
1403       post_full_gc_dump(gc_timer);
1404     }
1405 
1406     gc_timer->register_gc_end();
1407     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1408   }
1409 
1410   return true;
1411 }
1412 
1413 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1414   // Currently, there is no facility in the do_full_collection(bool) API to notify
1415   // the caller that the collection did not succeed (e.g., because it was locked
1416   // out by the GC locker). So, right now, we'll ignore the return value.
1417   bool dummy = do_full_collection(true,                /* explicit_gc */
1418                                   clear_all_soft_refs);
1419 }
1420 
1421 void G1CollectedHeap::resize_if_necessary_after_full_collection() {
1422   // Include bytes that will be pre-allocated to support collections, as "used".
1423   const size_t used_after_gc = used();
1424   const size_t capacity_after_gc = capacity();
1425   const size_t free_after_gc = capacity_after_gc - used_after_gc;
1426 
1427   // This is enforced in arguments.cpp.
1428   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1429          "otherwise the code below doesn't make sense");
1430 
1431   // We don't have floating point command-line arguments
1432   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1433   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1434   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1435   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1436 
1437   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1438   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1439 
1440   // We have to be careful here as these two calculations can overflow
1441   // 32-bit size_t's.
1442   double used_after_gc_d = (double) used_after_gc;
1443   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1444   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1445 
1446   // Let's make sure that they are both under the max heap size, which
1447   // by default will make them fit into a size_t.
1448   double desired_capacity_upper_bound = (double) max_heap_size;
1449   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1450                                     desired_capacity_upper_bound);
1451   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1452                                     desired_capacity_upper_bound);
1453 
1454   // We can now safely turn them into size_t's.
1455   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1456   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1457 
1458   // This assert only makes sense here, before we adjust them
1459   // with respect to the min and max heap size.
1460   assert(minimum_desired_capacity <= maximum_desired_capacity,
1461          "minimum_desired_capacity = " SIZE_FORMAT ", "
1462          "maximum_desired_capacity = " SIZE_FORMAT,
1463          minimum_desired_capacity, maximum_desired_capacity);
1464 
1465   // Should not be greater than the heap max size. No need to adjust
1466   // it with respect to the heap min size as it's a lower bound (i.e.,
1467   // we'll try to make the capacity larger than it, not smaller).
1468   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1469   // Should not be less than the heap min size. No need to adjust it
1470   // with respect to the heap max size as it's an upper bound (i.e.,
1471   // we'll try to make the capacity smaller than it, not greater).
1472   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1473 
1474   if (capacity_after_gc < minimum_desired_capacity) {
1475     // Don't expand unless it's significant
1476     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1477 
1478     log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
1479                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1480                               capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1481 
1482     expand(expand_bytes);
1483 
1484     // No expansion, now see if we want to shrink
1485   } else if (capacity_after_gc > maximum_desired_capacity) {
1486     // Capacity too large, compute shrinking size
1487     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1488 
1489     log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
1490                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1491                               capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1492 
1493     shrink(shrink_bytes);
1494   }
1495 }
1496 
1497 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1498                                                             AllocationContext_t context,
1499                                                             bool do_gc,
1500                                                             bool clear_all_soft_refs,
1501                                                             bool expect_null_mutator_alloc_region,
1502                                                             bool* gc_succeeded) {
1503   *gc_succeeded = true;
1504   // Let's attempt the allocation first.
1505   HeapWord* result =
1506     attempt_allocation_at_safepoint(word_size,
1507                                     context,
1508                                     expect_null_mutator_alloc_region);
1509   if (result != NULL) {
1510     assert(*gc_succeeded, "sanity");
1511     return result;
1512   }
1513 
1514   // In a G1 heap, we're supposed to keep allocation from failing by
1515   // incremental pauses.  Therefore, at least for now, we'll favor
1516   // expansion over collection.  (This might change in the future if we can
1517   // do something smarter than full collection to satisfy a failed alloc.)
1518   result = expand_and_allocate(word_size, context);
1519   if (result != NULL) {
1520     assert(*gc_succeeded, "sanity");
1521     return result;
1522   }
1523 
1524   if (do_gc) {
1525     // Expansion didn't work, we'll try to do a Full GC.
1526     *gc_succeeded = do_full_collection(false, /* explicit_gc */
1527                                        clear_all_soft_refs);
1528   }
1529 
1530   return NULL;
1531 }
1532 
1533 HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1534                                                      AllocationContext_t context,
1535                                                      bool* succeeded) {
1536   assert_at_safepoint(true /* should_be_vm_thread */);
1537 
1538   // Attempts to allocate followed by Full GC.
1539   HeapWord* result =
1540     satisfy_failed_allocation_helper(word_size,
1541                                      context,
1542                                      true,  /* do_gc */
1543                                      false, /* clear_all_soft_refs */
1544                                      false, /* expect_null_mutator_alloc_region */
1545                                      succeeded);
1546 
1547   if (result != NULL || !*succeeded) {
1548     return result;
1549   }
1550 
1551   // Attempts to allocate followed by Full GC that will collect all soft references.
1552   result = satisfy_failed_allocation_helper(word_size,
1553                                             context,
1554                                             true, /* do_gc */
1555                                             true, /* clear_all_soft_refs */
1556                                             true, /* expect_null_mutator_alloc_region */
1557                                             succeeded);
1558 
1559   if (result != NULL || !*succeeded) {
1560     return result;
1561   }
1562 
1563   // Attempts to allocate, no GC
1564   result = satisfy_failed_allocation_helper(word_size,
1565                                             context,
1566                                             false, /* do_gc */
1567                                             false, /* clear_all_soft_refs */
1568                                             true,  /* expect_null_mutator_alloc_region */
1569                                             succeeded);
1570 
1571   if (result != NULL) {
1572     assert(*succeeded, "sanity");
1573     return result;
1574   }
1575 
1576   assert(!collector_policy()->should_clear_all_soft_refs(),
1577          "Flag should have been handled and cleared prior to this point");
1578 
1579   // What else?  We might try synchronous finalization later.  If the total
1580   // space available is large enough for the allocation, then a more
1581   // complete compaction phase than we've tried so far might be
1582   // appropriate.
1583   assert(*succeeded, "sanity");
1584   return NULL;
1585 }
1586 
1587 // Attempting to expand the heap sufficiently
1588 // to support an allocation of the given "word_size".  If
1589 // successful, perform the allocation and return the address of the
1590 // allocated block, or else "NULL".
1591 
1592 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1593   assert_at_safepoint(true /* should_be_vm_thread */);
1594 
1595   _verifier->verify_region_sets_optional();
1596 
1597   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1598   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1599                             word_size * HeapWordSize);
1600 
1601 
1602   if (expand(expand_bytes)) {
1603     _hrm.verify_optional();
1604     _verifier->verify_region_sets_optional();
1605     return attempt_allocation_at_safepoint(word_size,
1606                                            context,
1607                                            false /* expect_null_mutator_alloc_region */);
1608   }
1609   return NULL;
1610 }
1611 
1612 bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
1613   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1614   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1615                                        HeapRegion::GrainBytes);
1616 
1617   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount:" SIZE_FORMAT "B expansion amount:" SIZE_FORMAT "B",
1618                             expand_bytes, aligned_expand_bytes);
1619 
1620   if (is_maximal_no_gc()) {
1621     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1622     return false;
1623   }
1624 
1625   double expand_heap_start_time_sec = os::elapsedTime();
1626   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1627   assert(regions_to_expand > 0, "Must expand by at least one region");
1628 
1629   uint expanded_by = _hrm.expand_by(regions_to_expand);
1630   if (expand_time_ms != NULL) {
1631     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1632   }
1633 
1634   if (expanded_by > 0) {
1635     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1636     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1637     g1_policy()->record_new_heap_size(num_regions());
1638   } else {
1639     log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1640 
1641     // The expansion of the virtual storage space was unsuccessful.
1642     // Let's see if it was because we ran out of swap.
1643     if (G1ExitOnExpansionFailure &&
1644         _hrm.available() >= regions_to_expand) {
1645       // We had head room...
1646       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1647     }
1648   }
1649   return regions_to_expand > 0;
1650 }
1651 
1652 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1653   size_t aligned_shrink_bytes =
1654     ReservedSpace::page_align_size_down(shrink_bytes);
1655   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1656                                          HeapRegion::GrainBytes);
1657   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1658 
1659   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1660   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1661 
1662 
1663   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1664                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1665   if (num_regions_removed > 0) {
1666     g1_policy()->record_new_heap_size(num_regions());
1667   } else {
1668     log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1669   }
1670 }
1671 
1672 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1673   _verifier->verify_region_sets_optional();
1674 
1675   // We should only reach here at the end of a Full GC which means we
1676   // should not not be holding to any GC alloc regions. The method
1677   // below will make sure of that and do any remaining clean up.
1678   _allocator->abandon_gc_alloc_regions();
1679 
1680   // Instead of tearing down / rebuilding the free lists here, we
1681   // could instead use the remove_all_pending() method on free_list to
1682   // remove only the ones that we need to remove.
1683   tear_down_region_sets(true /* free_list_only */);
1684   shrink_helper(shrink_bytes);
1685   rebuild_region_sets(true /* free_list_only */);
1686 
1687   _hrm.verify_optional();
1688   _verifier->verify_region_sets_optional();
1689 }
1690 
1691 // Public methods.
1692 
1693 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1694   CollectedHeap(),
1695   _collector_policy(collector_policy),
1696   _g1_policy(create_g1_policy()),
1697   _collection_set(this, _g1_policy),
1698   _dirty_card_queue_set(false),
1699   _is_alive_closure_cm(this),
1700   _is_alive_closure_stw(this),
1701   _ref_processor_cm(NULL),
1702   _ref_processor_stw(NULL),
1703   _bot(NULL),
1704   _hot_card_cache(NULL),
1705   _g1_rem_set(NULL),
1706   _cg1r(NULL),
1707   _g1mm(NULL),
1708   _refine_cte_cl(NULL),
1709   _preserved_marks_set(true /* in_c_heap */),
1710   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1711   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1712   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1713   _humongous_reclaim_candidates(),
1714   _has_humongous_reclaim_candidates(false),
1715   _archive_allocator(NULL),
1716   _free_regions_coming(false),
1717   _gc_time_stamp(0),
1718   _summary_bytes_used(0),
1719   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1720   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1721   _expand_heap_after_alloc_failure(true),
1722   _old_marking_cycles_started(0),
1723   _old_marking_cycles_completed(0),
1724   _in_cset_fast_test(),
1725   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1726   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
1727 
1728   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1729                           /* are_GC_task_threads */true,
1730                           /* are_ConcurrentGC_threads */false);
1731   _workers->initialize_workers();
1732   _verifier = new G1HeapVerifier(this);
1733 
1734   _allocator = G1Allocator::create_allocator(this);
1735 
1736   _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
1737 
1738   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1739 
1740   // Override the default _filler_array_max_size so that no humongous filler
1741   // objects are created.
1742   _filler_array_max_size = _humongous_object_threshold_in_words;
1743 
1744   uint n_queues = ParallelGCThreads;
1745   _task_queues = new RefToScanQueueSet(n_queues);
1746 
1747   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1748 
1749   for (uint i = 0; i < n_queues; i++) {
1750     RefToScanQueue* q = new RefToScanQueue();
1751     q->initialize();
1752     _task_queues->register_queue(i, q);
1753     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1754   }
1755 
1756   // Initialize the G1EvacuationFailureALot counters and flags.
1757   NOT_PRODUCT(reset_evacuation_should_fail();)
1758 
1759   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1760 }
1761 
1762 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1763                                                                  size_t size,
1764                                                                  size_t translation_factor) {
1765   size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1766   // Allocate a new reserved space, preferring to use large pages.
1767   ReservedSpace rs(size, preferred_page_size);
1768   G1RegionToSpaceMapper* result  =
1769     G1RegionToSpaceMapper::create_mapper(rs,
1770                                          size,
1771                                          rs.alignment(),
1772                                          HeapRegion::GrainBytes,
1773                                          translation_factor,
1774                                          mtGC);
1775 
1776   os::trace_page_sizes_for_requested_size(description,
1777                                           size,
1778                                           preferred_page_size,
1779                                           rs.alignment(),
1780                                           rs.base(),
1781                                           rs.size());
1782 
1783   return result;
1784 }
1785 
1786 jint G1CollectedHeap::initialize() {
1787   CollectedHeap::pre_initialize();
1788   os::enable_vtime();
1789 
1790   // Necessary to satisfy locking discipline assertions.
1791 
1792   MutexLocker x(Heap_lock);
1793 
1794   // While there are no constraints in the GC code that HeapWordSize
1795   // be any particular value, there are multiple other areas in the
1796   // system which believe this to be true (e.g. oop->object_size in some
1797   // cases incorrectly returns the size in wordSize units rather than
1798   // HeapWordSize).
1799   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1800 
1801   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1802   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1803   size_t heap_alignment = collector_policy()->heap_alignment();
1804 
1805   // Ensure that the sizes are properly aligned.
1806   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1807   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1808   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1809 
1810   _refine_cte_cl = new RefineCardTableEntryClosure();
1811 
1812   jint ecode = JNI_OK;
1813   _cg1r = ConcurrentG1Refine::create(_refine_cte_cl, &ecode);
1814   if (_cg1r == NULL) {
1815     return ecode;
1816   }
1817 
1818   // Reserve the maximum.
1819 
1820   // When compressed oops are enabled, the preferred heap base
1821   // is calculated by subtracting the requested size from the
1822   // 32Gb boundary and using the result as the base address for
1823   // heap reservation. If the requested size is not aligned to
1824   // HeapRegion::GrainBytes (i.e. the alignment that is passed
1825   // into the ReservedHeapSpace constructor) then the actual
1826   // base of the reserved heap may end up differing from the
1827   // address that was requested (i.e. the preferred heap base).
1828   // If this happens then we could end up using a non-optimal
1829   // compressed oops mode.
1830 
1831   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1832                                                  heap_alignment);
1833 
1834   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1835 
1836   // Create the barrier set for the entire reserved region.
1837   G1SATBCardTableLoggingModRefBS* bs
1838     = new G1SATBCardTableLoggingModRefBS(reserved_region());
1839   bs->initialize();
1840   assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
1841   set_barrier_set(bs);
1842 
1843   // Create the hot card cache.
1844   _hot_card_cache = new G1HotCardCache(this);
1845 
1846   // Also create a G1 rem set.
1847   _g1_rem_set = new G1RemSet(this, g1_barrier_set(), _hot_card_cache);
1848 
1849   // Carve out the G1 part of the heap.
1850   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1851   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1852   G1RegionToSpaceMapper* heap_storage =
1853     G1RegionToSpaceMapper::create_mapper(g1_rs,
1854                                          g1_rs.size(),
1855                                          page_size,
1856                                          HeapRegion::GrainBytes,
1857                                          1,
1858                                          mtJavaHeap);
1859   os::trace_page_sizes("Heap",
1860                        collector_policy()->min_heap_byte_size(),
1861                        max_byte_size,
1862                        page_size,
1863                        heap_rs.base(),
1864                        heap_rs.size());
1865   heap_storage->set_mapping_changed_listener(&_listener);
1866 
1867   // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1868   G1RegionToSpaceMapper* bot_storage =
1869     create_aux_memory_mapper("Block Offset Table",
1870                              G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
1871                              G1BlockOffsetTable::heap_map_factor());
1872 
1873   ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
1874   G1RegionToSpaceMapper* cardtable_storage =
1875     create_aux_memory_mapper("Card Table",
1876                              G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
1877                              G1SATBCardTableLoggingModRefBS::heap_map_factor());
1878 
1879   G1RegionToSpaceMapper* card_counts_storage =
1880     create_aux_memory_mapper("Card Counts Table",
1881                              G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
1882                              G1CardCounts::heap_map_factor());
1883 
1884   size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
1885   G1RegionToSpaceMapper* prev_bitmap_storage =
1886     create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1887   G1RegionToSpaceMapper* next_bitmap_storage =
1888     create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1889 
1890   _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1891   g1_barrier_set()->initialize(cardtable_storage);
1892   // Do later initialization work for concurrent refinement.
1893   _hot_card_cache->initialize(card_counts_storage);
1894 
1895   // 6843694 - ensure that the maximum region index can fit
1896   // in the remembered set structures.
1897   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1898   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1899 
1900   g1_rem_set()->initialize(max_capacity(), max_regions());
1901 
1902   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1903   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1904   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1905             "too many cards per region");
1906 
1907   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1908 
1909   _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1910 
1911   {
1912     HeapWord* start = _hrm.reserved().start();
1913     HeapWord* end = _hrm.reserved().end();
1914     size_t granularity = HeapRegion::GrainBytes;
1915 
1916     _in_cset_fast_test.initialize(start, end, granularity);
1917     _humongous_reclaim_candidates.initialize(start, end, granularity);
1918   }
1919 
1920   // Create the G1ConcurrentMark data structure and thread.
1921   // (Must do this late, so that "max_regions" is defined.)
1922   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1923   if (_cm == NULL || !_cm->completed_initialization()) {
1924     vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1925     return JNI_ENOMEM;
1926   }
1927   _cmThread = _cm->cmThread();
1928 
1929   // Now expand into the initial heap size.
1930   if (!expand(init_byte_size)) {
1931     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1932     return JNI_ENOMEM;
1933   }
1934 
1935   // Perform any initialization actions delegated to the policy.
1936   g1_policy()->init(this, &_collection_set);
1937 
1938   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1939                                                SATB_Q_FL_lock,
1940                                                G1SATBProcessCompletedThreshold,
1941                                                Shared_SATB_Q_lock);
1942 
1943   JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
1944                                                 DirtyCardQ_CBL_mon,
1945                                                 DirtyCardQ_FL_lock,
1946                                                 (int)concurrent_g1_refine()->yellow_zone(),
1947                                                 (int)concurrent_g1_refine()->red_zone(),
1948                                                 Shared_DirtyCardQ_lock,
1949                                                 NULL,  // fl_owner
1950                                                 true); // init_free_ids
1951 
1952   dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
1953                                     DirtyCardQ_CBL_mon,
1954                                     DirtyCardQ_FL_lock,
1955                                     -1, // never trigger processing
1956                                     -1, // no limit on length
1957                                     Shared_DirtyCardQ_lock,
1958                                     &JavaThread::dirty_card_queue_set());
1959 
1960   // Here we allocate the dummy HeapRegion that is required by the
1961   // G1AllocRegion class.
1962   HeapRegion* dummy_region = _hrm.get_dummy_region();
1963 
1964   // We'll re-use the same region whether the alloc region will
1965   // require BOT updates or not and, if it doesn't, then a non-young
1966   // region will complain that it cannot support allocations without
1967   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1968   dummy_region->set_eden();
1969   // Make sure it's full.
1970   dummy_region->set_top(dummy_region->end());
1971   G1AllocRegion::setup(this, dummy_region);
1972 
1973   _allocator->init_mutator_alloc_region();
1974 
1975   // Do create of the monitoring and management support so that
1976   // values in the heap have been properly initialized.
1977   _g1mm = new G1MonitoringSupport(this);
1978 
1979   G1StringDedup::initialize();
1980 
1981   _preserved_marks_set.init(ParallelGCThreads);
1982 
1983   _collection_set.initialize(max_regions());
1984 
1985   return JNI_OK;
1986 }
1987 
1988 void G1CollectedHeap::stop() {
1989   // Stop all concurrent threads. We do this to make sure these threads
1990   // do not continue to execute and access resources (e.g. logging)
1991   // that are destroyed during shutdown.
1992   _cg1r->stop();
1993   _cmThread->stop();
1994   if (G1StringDedup::is_enabled()) {
1995     G1StringDedup::stop();
1996   }
1997 }
1998 
1999 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2000   return HeapRegion::max_region_size();
2001 }
2002 
2003 void G1CollectedHeap::post_initialize() {
2004   ref_processing_init();
2005 }
2006 
2007 void G1CollectedHeap::ref_processing_init() {
2008   // Reference processing in G1 currently works as follows:
2009   //
2010   // * There are two reference processor instances. One is
2011   //   used to record and process discovered references
2012   //   during concurrent marking; the other is used to
2013   //   record and process references during STW pauses
2014   //   (both full and incremental).
2015   // * Both ref processors need to 'span' the entire heap as
2016   //   the regions in the collection set may be dotted around.
2017   //
2018   // * For the concurrent marking ref processor:
2019   //   * Reference discovery is enabled at initial marking.
2020   //   * Reference discovery is disabled and the discovered
2021   //     references processed etc during remarking.
2022   //   * Reference discovery is MT (see below).
2023   //   * Reference discovery requires a barrier (see below).
2024   //   * Reference processing may or may not be MT
2025   //     (depending on the value of ParallelRefProcEnabled
2026   //     and ParallelGCThreads).
2027   //   * A full GC disables reference discovery by the CM
2028   //     ref processor and abandons any entries on it's
2029   //     discovered lists.
2030   //
2031   // * For the STW processor:
2032   //   * Non MT discovery is enabled at the start of a full GC.
2033   //   * Processing and enqueueing during a full GC is non-MT.
2034   //   * During a full GC, references are processed after marking.
2035   //
2036   //   * Discovery (may or may not be MT) is enabled at the start
2037   //     of an incremental evacuation pause.
2038   //   * References are processed near the end of a STW evacuation pause.
2039   //   * For both types of GC:
2040   //     * Discovery is atomic - i.e. not concurrent.
2041   //     * Reference discovery will not need a barrier.
2042 
2043   MemRegion mr = reserved_region();
2044 
2045   // Concurrent Mark ref processor
2046   _ref_processor_cm =
2047     new ReferenceProcessor(mr,    // span
2048                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2049                                 // mt processing
2050                            ParallelGCThreads,
2051                                 // degree of mt processing
2052                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2053                                 // mt discovery
2054                            MAX2(ParallelGCThreads, ConcGCThreads),
2055                                 // degree of mt discovery
2056                            false,
2057                                 // Reference discovery is not atomic
2058                            &_is_alive_closure_cm);
2059                                 // is alive closure
2060                                 // (for efficiency/performance)
2061 
2062   // STW ref processor
2063   _ref_processor_stw =
2064     new ReferenceProcessor(mr,    // span
2065                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2066                                 // mt processing
2067                            ParallelGCThreads,
2068                                 // degree of mt processing
2069                            (ParallelGCThreads > 1),
2070                                 // mt discovery
2071                            ParallelGCThreads,
2072                                 // degree of mt discovery
2073                            true,
2074                                 // Reference discovery is atomic
2075                            &_is_alive_closure_stw);
2076                                 // is alive closure
2077                                 // (for efficiency/performance)
2078 }
2079 
2080 CollectorPolicy* G1CollectedHeap::collector_policy() const {
2081   return _collector_policy;
2082 }
2083 
2084 size_t G1CollectedHeap::capacity() const {
2085   return _hrm.length() * HeapRegion::GrainBytes;
2086 }
2087 
2088 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2089   hr->reset_gc_time_stamp();
2090 }
2091 
2092 #ifndef PRODUCT
2093 
2094 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2095 private:
2096   unsigned _gc_time_stamp;
2097   bool _failures;
2098 
2099 public:
2100   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2101     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2102 
2103   virtual bool doHeapRegion(HeapRegion* hr) {
2104     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2105     if (_gc_time_stamp != region_gc_time_stamp) {
2106       log_error(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
2107                             region_gc_time_stamp, _gc_time_stamp);
2108       _failures = true;
2109     }
2110     return false;
2111   }
2112 
2113   bool failures() { return _failures; }
2114 };
2115 
2116 void G1CollectedHeap::check_gc_time_stamps() {
2117   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2118   heap_region_iterate(&cl);
2119   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2120 }
2121 #endif // PRODUCT
2122 
2123 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2124   _hot_card_cache->drain(cl, worker_i);
2125 }
2126 
2127 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
2128   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2129   size_t n_completed_buffers = 0;
2130   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2131     n_completed_buffers++;
2132   }
2133   g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
2134   dcqs.clear_n_completed_buffers();
2135   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2136 }
2137 
2138 // Computes the sum of the storage used by the various regions.
2139 size_t G1CollectedHeap::used() const {
2140   size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
2141   if (_archive_allocator != NULL) {
2142     result += _archive_allocator->used();
2143   }
2144   return result;
2145 }
2146 
2147 size_t G1CollectedHeap::used_unlocked() const {
2148   return _summary_bytes_used;
2149 }
2150 
2151 class SumUsedClosure: public HeapRegionClosure {
2152   size_t _used;
2153 public:
2154   SumUsedClosure() : _used(0) {}
2155   bool doHeapRegion(HeapRegion* r) {
2156     _used += r->used();
2157     return false;
2158   }
2159   size_t result() { return _used; }
2160 };
2161 
2162 size_t G1CollectedHeap::recalculate_used() const {
2163   double recalculate_used_start = os::elapsedTime();
2164 
2165   SumUsedClosure blk;
2166   heap_region_iterate(&blk);
2167 
2168   g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2169   return blk.result();
2170 }
2171 
2172 bool  G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
2173   switch (cause) {
2174     case GCCause::_java_lang_system_gc:                 return ExplicitGCInvokesConcurrent;
2175     case GCCause::_dcmd_gc_run:                         return ExplicitGCInvokesConcurrent;
2176     case GCCause::_update_allocation_context_stats_inc: return true;
2177     case GCCause::_wb_conc_mark:                        return true;
2178     default :                                           return false;
2179   }
2180 }
2181 
2182 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2183   switch (cause) {
2184     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
2185     case GCCause::_g1_humongous_allocation: return true;
2186     default:                                return is_user_requested_concurrent_full_gc(cause);
2187   }
2188 }
2189 
2190 #ifndef PRODUCT
2191 void G1CollectedHeap::allocate_dummy_regions() {
2192   // Let's fill up most of the region
2193   size_t word_size = HeapRegion::GrainWords - 1024;
2194   // And as a result the region we'll allocate will be humongous.
2195   guarantee(is_humongous(word_size), "sanity");
2196 
2197   // _filler_array_max_size is set to humongous object threshold
2198   // but temporarily change it to use CollectedHeap::fill_with_object().
2199   SizeTFlagSetting fs(_filler_array_max_size, word_size);
2200 
2201   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2202     // Let's use the existing mechanism for the allocation
2203     HeapWord* dummy_obj = humongous_obj_allocate(word_size,
2204                                                  AllocationContext::system());
2205     if (dummy_obj != NULL) {
2206       MemRegion mr(dummy_obj, word_size);
2207       CollectedHeap::fill_with_object(mr);
2208     } else {
2209       // If we can't allocate once, we probably cannot allocate
2210       // again. Let's get out of the loop.
2211       break;
2212     }
2213   }
2214 }
2215 #endif // !PRODUCT
2216 
2217 void G1CollectedHeap::increment_old_marking_cycles_started() {
2218   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2219          _old_marking_cycles_started == _old_marking_cycles_completed + 1,
2220          "Wrong marking cycle count (started: %d, completed: %d)",
2221          _old_marking_cycles_started, _old_marking_cycles_completed);
2222 
2223   _old_marking_cycles_started++;
2224 }
2225 
2226 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2227   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2228 
2229   // We assume that if concurrent == true, then the caller is a
2230   // concurrent thread that was joined the Suspendible Thread
2231   // Set. If there's ever a cheap way to check this, we should add an
2232   // assert here.
2233 
2234   // Given that this method is called at the end of a Full GC or of a
2235   // concurrent cycle, and those can be nested (i.e., a Full GC can
2236   // interrupt a concurrent cycle), the number of full collections
2237   // completed should be either one (in the case where there was no
2238   // nesting) or two (when a Full GC interrupted a concurrent cycle)
2239   // behind the number of full collections started.
2240 
2241   // This is the case for the inner caller, i.e. a Full GC.
2242   assert(concurrent ||
2243          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2244          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2245          "for inner caller (Full GC): _old_marking_cycles_started = %u "
2246          "is inconsistent with _old_marking_cycles_completed = %u",
2247          _old_marking_cycles_started, _old_marking_cycles_completed);
2248 
2249   // This is the case for the outer caller, i.e. the concurrent cycle.
2250   assert(!concurrent ||
2251          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2252          "for outer caller (concurrent cycle): "
2253          "_old_marking_cycles_started = %u "
2254          "is inconsistent with _old_marking_cycles_completed = %u",
2255          _old_marking_cycles_started, _old_marking_cycles_completed);
2256 
2257   _old_marking_cycles_completed += 1;
2258 
2259   // We need to clear the "in_progress" flag in the CM thread before
2260   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2261   // is set) so that if a waiter requests another System.gc() it doesn't
2262   // incorrectly see that a marking cycle is still in progress.
2263   if (concurrent) {
2264     _cmThread->set_idle();
2265   }
2266 
2267   // This notify_all() will ensure that a thread that called
2268   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2269   // and it's waiting for a full GC to finish will be woken up. It is
2270   // waiting in VM_G1IncCollectionPause::doit_epilogue().
2271   FullGCCount_lock->notify_all();
2272 }
2273 
2274 void G1CollectedHeap::collect(GCCause::Cause cause) {
2275   assert_heap_not_locked();
2276 
2277   uint gc_count_before;
2278   uint old_marking_count_before;
2279   uint full_gc_count_before;
2280   bool retry_gc;
2281 
2282   do {
2283     retry_gc = false;
2284 
2285     {
2286       MutexLocker ml(Heap_lock);
2287 
2288       // Read the GC count while holding the Heap_lock
2289       gc_count_before = total_collections();
2290       full_gc_count_before = total_full_collections();
2291       old_marking_count_before = _old_marking_cycles_started;
2292     }
2293 
2294     if (should_do_concurrent_full_gc(cause)) {
2295       // Schedule an initial-mark evacuation pause that will start a
2296       // concurrent cycle. We're setting word_size to 0 which means that
2297       // we are not requesting a post-GC allocation.
2298       VM_G1IncCollectionPause op(gc_count_before,
2299                                  0,     /* word_size */
2300                                  true,  /* should_initiate_conc_mark */
2301                                  g1_policy()->max_pause_time_ms(),
2302                                  cause);
2303       op.set_allocation_context(AllocationContext::current());
2304 
2305       VMThread::execute(&op);
2306       if (!op.pause_succeeded()) {
2307         if (old_marking_count_before == _old_marking_cycles_started) {
2308           retry_gc = op.should_retry_gc();
2309         } else {
2310           // A Full GC happened while we were trying to schedule the
2311           // initial-mark GC. No point in starting a new cycle given
2312           // that the whole heap was collected anyway.
2313         }
2314 
2315         if (retry_gc) {
2316           if (GCLocker::is_active_and_needs_gc()) {
2317             GCLocker::stall_until_clear();
2318           }
2319         }
2320       }
2321     } else {
2322       if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2323           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2324 
2325         // Schedule a standard evacuation pause. We're setting word_size
2326         // to 0 which means that we are not requesting a post-GC allocation.
2327         VM_G1IncCollectionPause op(gc_count_before,
2328                                    0,     /* word_size */
2329                                    false, /* should_initiate_conc_mark */
2330                                    g1_policy()->max_pause_time_ms(),
2331                                    cause);
2332         VMThread::execute(&op);
2333       } else {
2334         // Schedule a Full GC.
2335         VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2336         VMThread::execute(&op);
2337       }
2338     }
2339   } while (retry_gc);
2340 }
2341 
2342 bool G1CollectedHeap::is_in(const void* p) const {
2343   if (_hrm.reserved().contains(p)) {
2344     // Given that we know that p is in the reserved space,
2345     // heap_region_containing() should successfully
2346     // return the containing region.
2347     HeapRegion* hr = heap_region_containing(p);
2348     return hr->is_in(p);
2349   } else {
2350     return false;
2351   }
2352 }
2353 
2354 #ifdef ASSERT
2355 bool G1CollectedHeap::is_in_exact(const void* p) const {
2356   bool contains = reserved_region().contains(p);
2357   bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2358   if (contains && available) {
2359     return true;
2360   } else {
2361     return false;
2362   }
2363 }
2364 #endif
2365 
2366 bool G1CollectedHeap::obj_in_cs(oop obj) {
2367   HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
2368   return r != NULL && r->in_collection_set();
2369 }
2370 
2371 // Iteration functions.
2372 
2373 // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
2374 
2375 class IterateOopClosureRegionClosure: public HeapRegionClosure {
2376   ExtendedOopClosure* _cl;
2377 public:
2378   IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
2379   bool doHeapRegion(HeapRegion* r) {
2380     if (!r->is_continues_humongous()) {
2381       r->oop_iterate(_cl);
2382     }
2383     return false;
2384   }
2385 };
2386 
2387 // Iterates an ObjectClosure over all objects within a HeapRegion.
2388 
2389 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2390   ObjectClosure* _cl;
2391 public:
2392   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2393   bool doHeapRegion(HeapRegion* r) {
2394     if (!r->is_continues_humongous()) {
2395       r->object_iterate(_cl);
2396     }
2397     return false;
2398   }
2399 };
2400 
2401 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2402   IterateObjectClosureRegionClosure blk(cl);
2403   heap_region_iterate(&blk);
2404 }
2405 
2406 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2407   _hrm.iterate(cl);
2408 }
2409 
2410 void
2411 G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
2412                                          uint worker_id,
2413                                          HeapRegionClaimer *hrclaimer,
2414                                          bool concurrent) const {
2415   _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
2416 }
2417 
2418 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2419   _collection_set.iterate(cl);
2420 }
2421 
2422 void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
2423   _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
2424 }
2425 
2426 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2427   HeapRegion* result = _hrm.next_region_in_heap(from);
2428   while (result != NULL && result->is_pinned()) {
2429     result = _hrm.next_region_in_heap(result);
2430   }
2431   return result;
2432 }
2433 
2434 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2435   HeapRegion* hr = heap_region_containing(addr);
2436   return hr->block_start(addr);
2437 }
2438 
2439 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2440   HeapRegion* hr = heap_region_containing(addr);
2441   return hr->block_size(addr);
2442 }
2443 
2444 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2445   HeapRegion* hr = heap_region_containing(addr);
2446   return hr->block_is_obj(addr);
2447 }
2448 
2449 bool G1CollectedHeap::supports_tlab_allocation() const {
2450   return true;
2451 }
2452 
2453 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2454   return (_g1_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
2455 }
2456 
2457 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2458   return _eden.length() * HeapRegion::GrainBytes;
2459 }
2460 
2461 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2462 // must be equal to the humongous object limit.
2463 size_t G1CollectedHeap::max_tlab_size() const {
2464   return align_size_down(_humongous_object_threshold_in_words, MinObjAlignment);
2465 }
2466 
2467 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2468   AllocationContext_t context = AllocationContext::current();
2469   return _allocator->unsafe_max_tlab_alloc(context);
2470 }
2471 
2472 size_t G1CollectedHeap::max_capacity() const {
2473   return _hrm.reserved().byte_size();
2474 }
2475 
2476 jlong G1CollectedHeap::millis_since_last_gc() {
2477   // assert(false, "NYI");
2478   return 0;
2479 }
2480 
2481 void G1CollectedHeap::prepare_for_verify() {
2482   _verifier->prepare_for_verify();
2483 }
2484 
2485 void G1CollectedHeap::verify(VerifyOption vo) {
2486   _verifier->verify(vo);
2487 }
2488 
2489 class PrintRegionClosure: public HeapRegionClosure {
2490   outputStream* _st;
2491 public:
2492   PrintRegionClosure(outputStream* st) : _st(st) {}
2493   bool doHeapRegion(HeapRegion* r) {
2494     r->print_on(_st);
2495     return false;
2496   }
2497 };
2498 
2499 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2500                                        const HeapRegion* hr,
2501                                        const VerifyOption vo) const {
2502   switch (vo) {
2503   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2504   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2505   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked() && !hr->is_archive();
2506   default:                            ShouldNotReachHere();
2507   }
2508   return false; // keep some compilers happy
2509 }
2510 
2511 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2512                                        const VerifyOption vo) const {
2513   switch (vo) {
2514   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
2515   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
2516   case VerifyOption_G1UseMarkWord: {
2517     HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj);
2518     return !obj->is_gc_marked() && !hr->is_archive();
2519   }
2520   default:                            ShouldNotReachHere();
2521   }
2522   return false; // keep some compilers happy
2523 }
2524 
2525 void G1CollectedHeap::print_heap_regions() const {
2526   Log(gc, heap, region) log;
2527   if (log.is_trace()) {
2528     ResourceMark rm;
2529     print_regions_on(log.trace_stream());
2530   }
2531 }
2532 
2533 void G1CollectedHeap::print_on(outputStream* st) const {
2534   st->print(" %-20s", "garbage-first heap");
2535   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
2536             capacity()/K, used_unlocked()/K);
2537   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
2538             p2i(_hrm.reserved().start()),
2539             p2i(_hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords),
2540             p2i(_hrm.reserved().end()));
2541   st->cr();
2542   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
2543   uint young_regions = young_regions_count();
2544   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
2545             (size_t) young_regions * HeapRegion::GrainBytes / K);
2546   uint survivor_regions = survivor_regions_count();
2547   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
2548             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
2549   st->cr();
2550   MetaspaceAux::print_on(st);
2551 }
2552 
2553 void G1CollectedHeap::print_regions_on(outputStream* st) const {
2554   st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
2555                "HS=humongous(starts), HC=humongous(continues), "
2556                "CS=collection set, F=free, A=archive, TS=gc time stamp, "
2557                "AC=allocation context, "
2558                "TAMS=top-at-mark-start (previous, next)");
2559   PrintRegionClosure blk(st);
2560   heap_region_iterate(&blk);
2561 }
2562 
2563 void G1CollectedHeap::print_extended_on(outputStream* st) const {
2564   print_on(st);
2565 
2566   // Print the per-region information.
2567   print_regions_on(st);
2568 }
2569 
2570 void G1CollectedHeap::print_on_error(outputStream* st) const {
2571   this->CollectedHeap::print_on_error(st);
2572 
2573   if (_cm != NULL) {
2574     st->cr();
2575     _cm->print_on_error(st);
2576   }
2577 }
2578 
2579 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
2580   workers()->print_worker_threads_on(st);
2581   _cmThread->print_on(st);
2582   st->cr();
2583   _cm->print_worker_threads_on(st);
2584   _cg1r->print_worker_threads_on(st); // also prints the sample thread
2585   if (G1StringDedup::is_enabled()) {
2586     G1StringDedup::print_worker_threads_on(st);
2587   }
2588 }
2589 
2590 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2591   workers()->threads_do(tc);
2592   tc->do_thread(_cmThread);
2593   _cm->threads_do(tc);
2594   _cg1r->threads_do(tc); // also iterates over the sample thread
2595   if (G1StringDedup::is_enabled()) {
2596     G1StringDedup::threads_do(tc);
2597   }
2598 }
2599 
2600 void G1CollectedHeap::print_tracing_info() const {
2601   g1_rem_set()->print_summary_info();
2602   concurrent_mark()->print_summary_info();
2603 }
2604 
2605 #ifndef PRODUCT
2606 // Helpful for debugging RSet issues.
2607 
2608 class PrintRSetsClosure : public HeapRegionClosure {
2609 private:
2610   const char* _msg;
2611   size_t _occupied_sum;
2612 
2613 public:
2614   bool doHeapRegion(HeapRegion* r) {
2615     HeapRegionRemSet* hrrs = r->rem_set();
2616     size_t occupied = hrrs->occupied();
2617     _occupied_sum += occupied;
2618 
2619     tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));
2620     if (occupied == 0) {
2621       tty->print_cr("  RSet is empty");
2622     } else {
2623       hrrs->print();
2624     }
2625     tty->print_cr("----------");
2626     return false;
2627   }
2628 
2629   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
2630     tty->cr();
2631     tty->print_cr("========================================");
2632     tty->print_cr("%s", msg);
2633     tty->cr();
2634   }
2635 
2636   ~PrintRSetsClosure() {
2637     tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
2638     tty->print_cr("========================================");
2639     tty->cr();
2640   }
2641 };
2642 
2643 void G1CollectedHeap::print_cset_rsets() {
2644   PrintRSetsClosure cl("Printing CSet RSets");
2645   collection_set_iterate(&cl);
2646 }
2647 
2648 void G1CollectedHeap::print_all_rsets() {
2649   PrintRSetsClosure cl("Printing All RSets");;
2650   heap_region_iterate(&cl);
2651 }
2652 #endif // PRODUCT
2653 
2654 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
2655 
2656   size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes;
2657   size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes;
2658   size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
2659 
2660   size_t eden_capacity_bytes =
2661     (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
2662 
2663   VirtualSpaceSummary heap_summary = create_heap_space_summary();
2664   return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
2665                        eden_capacity_bytes, survivor_used_bytes, num_regions());
2666 }
2667 
2668 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
2669   return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),
2670                        stats->unused(), stats->used(), stats->region_end_waste(),
2671                        stats->regions_filled(), stats->direct_allocated(),
2672                        stats->failure_used(), stats->failure_waste());
2673 }
2674 
2675 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2676   const G1HeapSummary& heap_summary = create_g1_heap_summary();
2677   gc_tracer->report_gc_heap_summary(when, heap_summary);
2678 
2679   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
2680   gc_tracer->report_metaspace_summary(when, metaspace_summary);
2681 }
2682 
2683 G1CollectedHeap* G1CollectedHeap::heap() {
2684   CollectedHeap* heap = Universe::heap();
2685   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
2686   assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
2687   return (G1CollectedHeap*)heap;
2688 }
2689 
2690 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
2691   // always_do_update_barrier = false;
2692   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2693   // Fill TLAB's and such
2694   accumulate_statistics_all_tlabs();
2695   ensure_parsability(true);
2696 
2697   g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2698 }
2699 
2700 void G1CollectedHeap::gc_epilogue(bool full) {
2701   // we are at the end of the GC. Total collections has already been increased.
2702   g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2703 
2704   // FIXME: what is this about?
2705   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2706   // is set.
2707 #if defined(COMPILER2) || INCLUDE_JVMCI
2708   assert(DerivedPointerTable::is_empty(), "derived pointer present");
2709 #endif
2710   // always_do_update_barrier = true;
2711 
2712   resize_all_tlabs();
2713   allocation_context_stats().update(full);
2714 
2715   // We have just completed a GC. Update the soft reference
2716   // policy with the new heap occupancy
2717   Universe::update_heap_info_at_gc();
2718 }
2719 
2720 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2721                                                uint gc_count_before,
2722                                                bool* succeeded,
2723                                                GCCause::Cause gc_cause) {
2724   assert_heap_not_locked_and_not_at_safepoint();
2725   VM_G1IncCollectionPause op(gc_count_before,
2726                              word_size,
2727                              false, /* should_initiate_conc_mark */
2728                              g1_policy()->max_pause_time_ms(),
2729                              gc_cause);
2730 
2731   op.set_allocation_context(AllocationContext::current());
2732   VMThread::execute(&op);
2733 
2734   HeapWord* result = op.result();
2735   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2736   assert(result == NULL || ret_succeeded,
2737          "the result should be NULL if the VM did not succeed");
2738   *succeeded = ret_succeeded;
2739 
2740   assert_heap_not_locked();
2741   return result;
2742 }
2743 
2744 void
2745 G1CollectedHeap::doConcurrentMark() {
2746   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2747   if (!_cmThread->in_progress()) {
2748     _cmThread->set_started();
2749     CGC_lock->notify();
2750   }
2751 }
2752 
2753 size_t G1CollectedHeap::pending_card_num() {
2754   size_t extra_cards = 0;
2755   JavaThread *curr = Threads::first();
2756   while (curr != NULL) {
2757     DirtyCardQueue& dcq = curr->dirty_card_queue();
2758     extra_cards += dcq.size();
2759     curr = curr->next();
2760   }
2761   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2762   size_t buffer_size = dcqs.buffer_size();
2763   size_t buffer_num = dcqs.completed_buffers_num();
2764 
2765   // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
2766   // in bytes - not the number of 'entries'. We need to convert
2767   // into a number of cards.
2768   return (buffer_size * buffer_num + extra_cards) / oopSize;
2769 }
2770 
2771 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2772  private:
2773   size_t _total_humongous;
2774   size_t _candidate_humongous;
2775 
2776   DirtyCardQueue _dcq;
2777 
2778   // We don't nominate objects with many remembered set entries, on
2779   // the assumption that such objects are likely still live.
2780   bool is_remset_small(HeapRegion* region) const {
2781     HeapRegionRemSet* const rset = region->rem_set();
2782     return G1EagerReclaimHumongousObjectsWithStaleRefs
2783       ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
2784       : rset->is_empty();
2785   }
2786 
2787   bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
2788     assert(region->is_starts_humongous(), "Must start a humongous object");
2789 
2790     oop obj = oop(region->bottom());
2791 
2792     // Dead objects cannot be eager reclaim candidates. Due to class
2793     // unloading it is unsafe to query their classes so we return early.
2794     if (heap->is_obj_dead(obj, region)) {
2795       return false;
2796     }
2797 
2798     // Candidate selection must satisfy the following constraints
2799     // while concurrent marking is in progress:
2800     //
2801     // * In order to maintain SATB invariants, an object must not be
2802     // reclaimed if it was allocated before the start of marking and
2803     // has not had its references scanned.  Such an object must have
2804     // its references (including type metadata) scanned to ensure no
2805     // live objects are missed by the marking process.  Objects
2806     // allocated after the start of concurrent marking don't need to
2807     // be scanned.
2808     //
2809     // * An object must not be reclaimed if it is on the concurrent
2810     // mark stack.  Objects allocated after the start of concurrent
2811     // marking are never pushed on the mark stack.
2812     //
2813     // Nominating only objects allocated after the start of concurrent
2814     // marking is sufficient to meet both constraints.  This may miss
2815     // some objects that satisfy the constraints, but the marking data
2816     // structures don't support efficiently performing the needed
2817     // additional tests or scrubbing of the mark stack.
2818     //
2819     // However, we presently only nominate is_typeArray() objects.
2820     // A humongous object containing references induces remembered
2821     // set entries on other regions.  In order to reclaim such an
2822     // object, those remembered sets would need to be cleaned up.
2823     //
2824     // We also treat is_typeArray() objects specially, allowing them
2825     // to be reclaimed even if allocated before the start of
2826     // concurrent mark.  For this we rely on mark stack insertion to
2827     // exclude is_typeArray() objects, preventing reclaiming an object
2828     // that is in the mark stack.  We also rely on the metadata for
2829     // such objects to be built-in and so ensured to be kept live.
2830     // Frequent allocation and drop of large binary blobs is an
2831     // important use case for eager reclaim, and this special handling
2832     // may reduce needed headroom.
2833 
2834     return obj->is_typeArray() && is_remset_small(region);
2835   }
2836 
2837  public:
2838   RegisterHumongousWithInCSetFastTestClosure()
2839   : _total_humongous(0),
2840     _candidate_humongous(0),
2841     _dcq(&JavaThread::dirty_card_queue_set()) {
2842   }
2843 
2844   virtual bool doHeapRegion(HeapRegion* r) {
2845     if (!r->is_starts_humongous()) {
2846       return false;
2847     }
2848     G1CollectedHeap* g1h = G1CollectedHeap::heap();
2849 
2850     bool is_candidate = humongous_region_is_candidate(g1h, r);
2851     uint rindex = r->hrm_index();
2852     g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2853     if (is_candidate) {
2854       _candidate_humongous++;
2855       g1h->register_humongous_region_with_cset(rindex);
2856       // Is_candidate already filters out humongous object with large remembered sets.
2857       // If we have a humongous object with a few remembered sets, we simply flush these
2858       // remembered set entries into the DCQS. That will result in automatic
2859       // re-evaluation of their remembered set entries during the following evacuation
2860       // phase.
2861       if (!r->rem_set()->is_empty()) {
2862         guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
2863                   "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
2864         G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
2865         HeapRegionRemSetIterator hrrs(r->rem_set());
2866         size_t card_index;
2867         while (hrrs.has_next(card_index)) {
2868           jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
2869           // The remembered set might contain references to already freed
2870           // regions. Filter out such entries to avoid failing card table
2871           // verification.
2872           if (g1h->is_in_closed_subset(bs->addr_for(card_ptr))) {
2873             if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
2874               *card_ptr = CardTableModRefBS::dirty_card_val();
2875               _dcq.enqueue(card_ptr);
2876             }
2877           }
2878         }
2879         assert(hrrs.n_yielded() == r->rem_set()->occupied(),
2880                "Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",
2881                hrrs.n_yielded(), r->rem_set()->occupied());
2882         r->rem_set()->clear_locked();
2883       }
2884       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
2885     }
2886     _total_humongous++;
2887 
2888     return false;
2889   }
2890 
2891   size_t total_humongous() const { return _total_humongous; }
2892   size_t candidate_humongous() const { return _candidate_humongous; }
2893 
2894   void flush_rem_set_entries() { _dcq.flush(); }
2895 };
2896 
2897 void G1CollectedHeap::register_humongous_regions_with_cset() {
2898   if (!G1EagerReclaimHumongousObjects) {
2899     g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
2900     return;
2901   }
2902   double time = os::elapsed_counter();
2903 
2904   // Collect reclaim candidate information and register candidates with cset.
2905   RegisterHumongousWithInCSetFastTestClosure cl;
2906   heap_region_iterate(&cl);
2907 
2908   time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
2909   g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
2910                                                                   cl.total_humongous(),
2911                                                                   cl.candidate_humongous());
2912   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
2913 
2914   // Finally flush all remembered set entries to re-check into the global DCQS.
2915   cl.flush_rem_set_entries();
2916 }
2917 
2918 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2919   public:
2920     bool doHeapRegion(HeapRegion* hr) {
2921       if (!hr->is_archive() && !hr->is_continues_humongous()) {
2922         hr->verify_rem_set();
2923       }
2924       return false;
2925     }
2926 };
2927 
2928 uint G1CollectedHeap::num_task_queues() const {
2929   return _task_queues->size();
2930 }
2931 
2932 #if TASKQUEUE_STATS
2933 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
2934   st->print_raw_cr("GC Task Stats");
2935   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
2936   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
2937 }
2938 
2939 void G1CollectedHeap::print_taskqueue_stats() const {
2940   if (!log_is_enabled(Trace, gc, task, stats)) {
2941     return;
2942   }
2943   Log(gc, task, stats) log;
2944   ResourceMark rm;
2945   outputStream* st = log.trace_stream();
2946 
2947   print_taskqueue_stats_hdr(st);
2948 
2949   TaskQueueStats totals;
2950   const uint n = num_task_queues();
2951   for (uint i = 0; i < n; ++i) {
2952     st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
2953     totals += task_queue(i)->stats;
2954   }
2955   st->print_raw("tot "); totals.print(st); st->cr();
2956 
2957   DEBUG_ONLY(totals.verify());
2958 }
2959 
2960 void G1CollectedHeap::reset_taskqueue_stats() {
2961   const uint n = num_task_queues();
2962   for (uint i = 0; i < n; ++i) {
2963     task_queue(i)->stats.reset();
2964   }
2965 }
2966 #endif // TASKQUEUE_STATS
2967 
2968 void G1CollectedHeap::wait_for_root_region_scanning() {
2969   double scan_wait_start = os::elapsedTime();
2970   // We have to wait until the CM threads finish scanning the
2971   // root regions as it's the only way to ensure that all the
2972   // objects on them have been correctly scanned before we start
2973   // moving them during the GC.
2974   bool waited = _cm->root_regions()->wait_until_scan_finished();
2975   double wait_time_ms = 0.0;
2976   if (waited) {
2977     double scan_wait_end = os::elapsedTime();
2978     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2979   }
2980   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2981 }
2982 
2983 class G1PrintCollectionSetClosure : public HeapRegionClosure {
2984 private:
2985   G1HRPrinter* _hr_printer;
2986 public:
2987   G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
2988 
2989   virtual bool doHeapRegion(HeapRegion* r) {
2990     _hr_printer->cset(r);
2991     return false;
2992   }
2993 };
2994 
2995 bool
2996 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2997   assert_at_safepoint(true /* should_be_vm_thread */);
2998   guarantee(!is_gc_active(), "collection is not reentrant");
2999 
3000   if (GCLocker::check_active_before_gc()) {
3001     return false;
3002   }
3003 
3004   _gc_timer_stw->register_gc_start();
3005 
3006   GCIdMark gc_id_mark;
3007   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3008 
3009   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3010   ResourceMark rm;
3011 
3012   g1_policy()->note_gc_start();
3013 
3014   wait_for_root_region_scanning();
3015 
3016   print_heap_before_gc();
3017   print_heap_regions();
3018   trace_heap_before_gc(_gc_tracer_stw);
3019 
3020   _verifier->verify_region_sets_optional();
3021   _verifier->verify_dirty_young_regions();
3022 
3023   // We should not be doing initial mark unless the conc mark thread is running
3024   if (!_cmThread->should_terminate()) {
3025     // This call will decide whether this pause is an initial-mark
3026     // pause. If it is, during_initial_mark_pause() will return true
3027     // for the duration of this pause.
3028     g1_policy()->decide_on_conc_mark_initiation();
3029   }
3030 
3031   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3032   assert(!collector_state()->during_initial_mark_pause() ||
3033           collector_state()->gcs_are_young(), "sanity");
3034 
3035   // We also do not allow mixed GCs during marking.
3036   assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3037 
3038   // Record whether this pause is an initial mark. When the current
3039   // thread has completed its logging output and it's safe to signal
3040   // the CM thread, the flag's value in the policy has been reset.
3041   bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3042 
3043   // Inner scope for scope based logging, timers, and stats collection
3044   {
3045     EvacuationInfo evacuation_info;
3046 
3047     if (collector_state()->during_initial_mark_pause()) {
3048       // We are about to start a marking cycle, so we increment the
3049       // full collection counter.
3050       increment_old_marking_cycles_started();
3051       _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
3052     }
3053 
3054     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3055 
3056     GCTraceCPUTime tcpu;
3057 
3058     FormatBuffer<> gc_string("Pause ");
3059     if (collector_state()->during_initial_mark_pause()) {
3060       gc_string.append("Initial Mark");
3061     } else if (collector_state()->gcs_are_young()) {
3062       gc_string.append("Young");
3063     } else {
3064       gc_string.append("Mixed");
3065     }
3066     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3067 
3068     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3069                                                                   workers()->active_workers(),
3070                                                                   Threads::number_of_non_daemon_threads());
3071     workers()->update_active_workers(active_workers);
3072     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
3073 
3074     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3075     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3076 
3077     // If the secondary_free_list is not empty, append it to the
3078     // free_list. No need to wait for the cleanup operation to finish;
3079     // the region allocation code will check the secondary_free_list
3080     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3081     // set, skip this step so that the region allocation code has to
3082     // get entries from the secondary_free_list.
3083     if (!G1StressConcRegionFreeing) {
3084       append_secondary_free_list_if_not_empty_with_lock();
3085     }
3086 
3087     G1HeapTransition heap_transition(this);
3088     size_t heap_used_bytes_before_gc = used();
3089 
3090     // Don't dynamically change the number of GC threads this early.  A value of
3091     // 0 is used to indicate serial work.  When parallel work is done,
3092     // it will be set.
3093 
3094     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3095       IsGCActiveMark x;
3096 
3097       gc_prologue(false);
3098       increment_total_collections(false /* full gc */);
3099       increment_gc_time_stamp();
3100 
3101       if (VerifyRememberedSets) {
3102         log_info(gc, verify)("[Verifying RemSets before GC]");
3103         VerifyRegionRemSetClosure v_cl;
3104         heap_region_iterate(&v_cl);
3105       }
3106 
3107       _verifier->verify_before_gc();
3108 
3109       _verifier->check_bitmaps("GC Start");
3110 
3111 #if defined(COMPILER2) || INCLUDE_JVMCI
3112       DerivedPointerTable::clear();
3113 #endif
3114 
3115       // Please see comment in g1CollectedHeap.hpp and
3116       // G1CollectedHeap::ref_processing_init() to see how
3117       // reference processing currently works in G1.
3118 
3119       // Enable discovery in the STW reference processor
3120       if (g1_policy()->should_process_references()) {
3121         ref_processor_stw()->enable_discovery();
3122       } else {
3123         ref_processor_stw()->disable_discovery();
3124       }
3125 
3126       {
3127         // We want to temporarily turn off discovery by the
3128         // CM ref processor, if necessary, and turn it back on
3129         // on again later if we do. Using a scoped
3130         // NoRefDiscovery object will do this.
3131         NoRefDiscovery no_cm_discovery(ref_processor_cm());
3132 
3133         // Forget the current alloc region (we might even choose it to be part
3134         // of the collection set!).
3135         _allocator->release_mutator_alloc_region();
3136 
3137         // This timing is only used by the ergonomics to handle our pause target.
3138         // It is unclear why this should not include the full pause. We will
3139         // investigate this in CR 7178365.
3140         //
3141         // Preserving the old comment here if that helps the investigation:
3142         //
3143         // The elapsed time induced by the start time below deliberately elides
3144         // the possible verification above.
3145         double sample_start_time_sec = os::elapsedTime();
3146 
3147         g1_policy()->record_collection_pause_start(sample_start_time_sec);
3148 
3149         if (collector_state()->during_initial_mark_pause()) {
3150           concurrent_mark()->checkpointRootsInitialPre();
3151         }
3152 
3153         g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
3154 
3155         evacuation_info.set_collectionset_regions(collection_set()->region_length());
3156 
3157         // Make sure the remembered sets are up to date. This needs to be
3158         // done before register_humongous_regions_with_cset(), because the
3159         // remembered sets are used there to choose eager reclaim candidates.
3160         // If the remembered sets are not up to date we might miss some
3161         // entries that need to be handled.
3162         g1_rem_set()->cleanupHRRS();
3163 
3164         register_humongous_regions_with_cset();
3165 
3166         assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3167 
3168         _cm->note_start_of_gc();
3169         // We call this after finalize_cset() to
3170         // ensure that the CSet has been finalized.
3171         _cm->verify_no_cset_oops();
3172 
3173         if (_hr_printer.is_active()) {
3174           G1PrintCollectionSetClosure cl(&_hr_printer);
3175           _collection_set.iterate(&cl);
3176         }
3177 
3178         // Initialize the GC alloc regions.
3179         _allocator->init_gc_alloc_regions(evacuation_info);
3180 
3181         G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
3182         pre_evacuate_collection_set();
3183 
3184         // Actually do the work...
3185         evacuate_collection_set(evacuation_info, &per_thread_states);
3186 
3187         post_evacuate_collection_set(evacuation_info, &per_thread_states);
3188 
3189         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3190         free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
3191 
3192         eagerly_reclaim_humongous_regions();
3193 
3194         record_obj_copy_mem_stats();
3195         _survivor_evac_stats.adjust_desired_plab_sz();
3196         _old_evac_stats.adjust_desired_plab_sz();
3197 
3198         // Start a new incremental collection set for the next pause.
3199         collection_set()->start_incremental_building();
3200 
3201         clear_cset_fast_test();
3202 
3203         guarantee(_eden.length() == 0, "eden should have been cleared");
3204         g1_policy()->transfer_survivors_to_cset(survivor());
3205 
3206         if (evacuation_failed()) {
3207           set_used(recalculate_used());
3208           if (_archive_allocator != NULL) {
3209             _archive_allocator->clear_used();
3210           }
3211           for (uint i = 0; i < ParallelGCThreads; i++) {
3212             if (_evacuation_failed_info_array[i].has_failed()) {
3213               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3214             }
3215           }
3216         } else {
3217           // The "used" of the the collection set have already been subtracted
3218           // when they were freed.  Add in the bytes evacuated.
3219           increase_used(g1_policy()->bytes_copied_during_gc());
3220         }
3221 
3222         if (collector_state()->during_initial_mark_pause()) {
3223           // We have to do this before we notify the CM threads that
3224           // they can start working to make sure that all the
3225           // appropriate initialization is done on the CM object.
3226           concurrent_mark()->checkpointRootsInitialPost();
3227           collector_state()->set_mark_in_progress(true);
3228           // Note that we don't actually trigger the CM thread at
3229           // this point. We do that later when we're sure that
3230           // the current thread has completed its logging output.
3231         }
3232 
3233         allocate_dummy_regions();
3234 
3235         _allocator->init_mutator_alloc_region();
3236 
3237         {
3238           size_t expand_bytes = _heap_sizing_policy->expansion_amount();
3239           if (expand_bytes > 0) {
3240             size_t bytes_before = capacity();
3241             // No need for an ergo logging here,
3242             // expansion_amount() does this when it returns a value > 0.
3243             double expand_ms;
3244             if (!expand(expand_bytes, &expand_ms)) {
3245               // We failed to expand the heap. Cannot do anything about it.
3246             }
3247             g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3248           }
3249         }
3250 
3251         // We redo the verification but now wrt to the new CSet which
3252         // has just got initialized after the previous CSet was freed.
3253         _cm->verify_no_cset_oops();
3254         _cm->note_end_of_gc();
3255 
3256         // This timing is only used by the ergonomics to handle our pause target.
3257         // It is unclear why this should not include the full pause. We will
3258         // investigate this in CR 7178365.
3259         double sample_end_time_sec = os::elapsedTime();
3260         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3261         size_t total_cards_scanned = per_thread_states.total_cards_scanned();
3262         g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
3263 
3264         evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
3265         evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
3266 
3267         MemoryService::track_memory_usage();
3268 
3269         // In prepare_for_verify() below we'll need to scan the deferred
3270         // update buffers to bring the RSets up-to-date if
3271         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
3272         // the update buffers we'll probably need to scan cards on the
3273         // regions we just allocated to (i.e., the GC alloc
3274         // regions). However, during the last GC we called
3275         // set_saved_mark() on all the GC alloc regions, so card
3276         // scanning might skip the [saved_mark_word()...top()] area of
3277         // those regions (i.e., the area we allocated objects into
3278         // during the last GC). But it shouldn't. Given that
3279         // saved_mark_word() is conditional on whether the GC time stamp
3280         // on the region is current or not, by incrementing the GC time
3281         // stamp here we invalidate all the GC time stamps on all the
3282         // regions and saved_mark_word() will simply return top() for
3283         // all the regions. This is a nicer way of ensuring this rather
3284         // than iterating over the regions and fixing them. In fact, the
3285         // GC time stamp increment here also ensures that
3286         // saved_mark_word() will return top() between pauses, i.e.,
3287         // during concurrent refinement. So we don't need the
3288         // is_gc_active() check to decided which top to use when
3289         // scanning cards (see CR 7039627).
3290         increment_gc_time_stamp();
3291 
3292         if (VerifyRememberedSets) {
3293           log_info(gc, verify)("[Verifying RemSets after GC]");
3294           VerifyRegionRemSetClosure v_cl;
3295           heap_region_iterate(&v_cl);
3296         }
3297 
3298         _verifier->verify_after_gc();
3299         _verifier->check_bitmaps("GC End");
3300 
3301         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3302         ref_processor_stw()->verify_no_references_recorded();
3303 
3304         // CM reference discovery will be re-enabled if necessary.
3305       }
3306 
3307 #ifdef TRACESPINNING
3308       ParallelTaskTerminator::print_termination_counts();
3309 #endif
3310 
3311       gc_epilogue(false);
3312     }
3313 
3314     // Print the remainder of the GC log output.
3315     if (evacuation_failed()) {
3316       log_info(gc)("To-space exhausted");
3317     }
3318 
3319     g1_policy()->print_phases();
3320     heap_transition.print();
3321 
3322     // It is not yet to safe to tell the concurrent mark to
3323     // start as we have some optional output below. We don't want the
3324     // output from the concurrent mark thread interfering with this
3325     // logging output either.
3326 
3327     _hrm.verify_optional();
3328     _verifier->verify_region_sets_optional();
3329 
3330     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3331     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3332 
3333     print_heap_after_gc();
3334     print_heap_regions();
3335     trace_heap_after_gc(_gc_tracer_stw);
3336 
3337     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3338     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3339     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3340     // before any GC notifications are raised.
3341     g1mm()->update_sizes();
3342 
3343     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3344     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
3345     _gc_timer_stw->register_gc_end();
3346     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3347   }
3348   // It should now be safe to tell the concurrent mark thread to start
3349   // without its logging output interfering with the logging output
3350   // that came from the pause.
3351 
3352   if (should_start_conc_mark) {
3353     // CAUTION: after the doConcurrentMark() call below,
3354     // the concurrent marking thread(s) could be running
3355     // concurrently with us. Make sure that anything after
3356     // this point does not assume that we are the only GC thread
3357     // running. Note: of course, the actual marking work will
3358     // not start until the safepoint itself is released in
3359     // SuspendibleThreadSet::desynchronize().
3360     doConcurrentMark();
3361   }
3362 
3363   return true;
3364 }
3365 
3366 void G1CollectedHeap::remove_self_forwarding_pointers() {
3367   G1ParRemoveSelfForwardPtrsTask rsfp_task;
3368   workers()->run_task(&rsfp_task);
3369 }
3370 
3371 void G1CollectedHeap::restore_after_evac_failure() {
3372   double remove_self_forwards_start = os::elapsedTime();
3373 
3374   remove_self_forwarding_pointers();
3375   SharedRestorePreservedMarksTaskExecutor task_executor(workers());
3376   _preserved_marks_set.restore(&task_executor);
3377 
3378   g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
3379 }
3380 
3381 void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) {
3382   if (!_evacuation_failed) {
3383     _evacuation_failed = true;
3384   }
3385 
3386   _evacuation_failed_info_array[worker_id].register_copy_failure(obj->size());
3387   _preserved_marks_set.get(worker_id)->push_if_necessary(obj, m);
3388 }
3389 
3390 bool G1ParEvacuateFollowersClosure::offer_termination() {
3391   G1ParScanThreadState* const pss = par_scan_state();
3392   start_term_time();
3393   const bool res = terminator()->offer_termination();
3394   end_term_time();
3395   return res;
3396 }
3397 
3398 void G1ParEvacuateFollowersClosure::do_void() {
3399   G1ParScanThreadState* const pss = par_scan_state();
3400   pss->trim_queue();
3401   do {
3402     pss->steal_and_trim_queue(queues());
3403   } while (!offer_termination());
3404 }
3405 
3406 class G1ParTask : public AbstractGangTask {
3407 protected:
3408   G1CollectedHeap*         _g1h;
3409   G1ParScanThreadStateSet* _pss;
3410   RefToScanQueueSet*       _queues;
3411   G1RootProcessor*         _root_processor;
3412   ParallelTaskTerminator   _terminator;
3413   uint                     _n_workers;
3414 
3415 public:
3416   G1ParTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
3417     : AbstractGangTask("G1 collection"),
3418       _g1h(g1h),
3419       _pss(per_thread_states),
3420       _queues(task_queues),
3421       _root_processor(root_processor),
3422       _terminator(n_workers, _queues),
3423       _n_workers(n_workers)
3424   {}
3425 
3426   void work(uint worker_id) {
3427     if (worker_id >= _n_workers) return;  // no work needed this round
3428 
3429     double start_sec = os::elapsedTime();
3430     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, start_sec);
3431 
3432     {
3433       ResourceMark rm;
3434       HandleMark   hm;
3435 
3436       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
3437 
3438       G1ParScanThreadState*           pss = _pss->state_for_worker(worker_id);
3439       pss->set_ref_processor(rp);
3440 
3441       double start_strong_roots_sec = os::elapsedTime();
3442 
3443       _root_processor->evacuate_roots(pss->closures(), worker_id);
3444 
3445       G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss);
3446 
3447       // We pass a weak code blobs closure to the remembered set scanning because we want to avoid
3448       // treating the nmethods visited to act as roots for concurrent marking.
3449       // We only want to make sure that the oops in the nmethods are adjusted with regard to the
3450       // objects copied by the current evacuation.
3451       size_t cards_scanned = _g1h->g1_rem_set()->oops_into_collection_set_do(&push_heap_rs_cl,
3452                                                                              pss->closures()->weak_codeblobs(),
3453                                                                              worker_id);
3454 
3455       _pss->add_cards_scanned(worker_id, cards_scanned);
3456 
3457       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
3458 
3459       double term_sec = 0.0;
3460       size_t evac_term_attempts = 0;
3461       {
3462         double start = os::elapsedTime();
3463         G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
3464         evac.do_void();
3465 
3466         evac_term_attempts = evac.term_attempts();
3467         term_sec = evac.term_time();
3468         double elapsed_sec = os::elapsedTime() - start;
3469         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
3470         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
3471         _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
3472       }
3473 
3474       assert(pss->queue_is_empty(), "should be empty");
3475 
3476       if (log_is_enabled(Debug, gc, task, stats)) {
3477         MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3478         size_t lab_waste;
3479         size_t lab_undo_waste;
3480         pss->waste(lab_waste, lab_undo_waste);
3481         _g1h->print_termination_stats(worker_id,
3482                                       (os::elapsedTime() - start_sec) * 1000.0,   /* elapsed time */
3483                                       strong_roots_sec * 1000.0,                  /* strong roots time */
3484                                       term_sec * 1000.0,                          /* evac term time */
3485                                       evac_term_attempts,                         /* evac term attempts */
3486                                       lab_waste,                                  /* alloc buffer waste */
3487                                       lab_undo_waste                              /* undo waste */
3488                                       );
3489       }
3490 
3491       // Close the inner scope so that the ResourceMark and HandleMark
3492       // destructors are executed here and are included as part of the
3493       // "GC Worker Time".
3494     }
3495     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
3496   }
3497 };
3498 
3499 void G1CollectedHeap::print_termination_stats_hdr() {
3500   log_debug(gc, task, stats)("GC Termination Stats");
3501   log_debug(gc, task, stats)("     elapsed  --strong roots-- -------termination------- ------waste (KiB)------");
3502   log_debug(gc, task, stats)("thr     ms        ms      %%        ms      %%    attempts  total   alloc    undo");
3503   log_debug(gc, task, stats)("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
3504 }
3505 
3506 void G1CollectedHeap::print_termination_stats(uint worker_id,
3507                                               double elapsed_ms,
3508                                               double strong_roots_ms,
3509                                               double term_ms,
3510                                               size_t term_attempts,
3511                                               size_t alloc_buffer_waste,
3512                                               size_t undo_waste) const {
3513   log_debug(gc, task, stats)
3514               ("%3d %9.2f %9.2f %6.2f "
3515                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
3516                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
3517                worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
3518                term_ms, term_ms * 100 / elapsed_ms, term_attempts,
3519                (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
3520                alloc_buffer_waste * HeapWordSize / K,
3521                undo_waste * HeapWordSize / K);
3522 }
3523 
3524 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
3525 private:
3526   BoolObjectClosure* _is_alive;
3527   int _initial_string_table_size;
3528   int _initial_symbol_table_size;
3529 
3530   bool  _process_strings;
3531   int _strings_processed;
3532   int _strings_removed;
3533 
3534   bool  _process_symbols;
3535   int _symbols_processed;
3536   int _symbols_removed;
3537 
3538 public:
3539   G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
3540     AbstractGangTask("String/Symbol Unlinking"),
3541     _is_alive(is_alive),
3542     _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
3543     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
3544 
3545     _initial_string_table_size = StringTable::the_table()->table_size();
3546     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
3547     if (process_strings) {
3548       StringTable::clear_parallel_claimed_index();
3549     }
3550     if (process_symbols) {
3551       SymbolTable::clear_parallel_claimed_index();
3552     }
3553   }
3554 
3555   ~G1StringSymbolTableUnlinkTask() {
3556     guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
3557               "claim value %d after unlink less than initial string table size %d",
3558               StringTable::parallel_claimed_index(), _initial_string_table_size);
3559     guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
3560               "claim value %d after unlink less than initial symbol table size %d",
3561               SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
3562 
3563     log_info(gc, stringtable)(
3564         "Cleaned string and symbol table, "
3565         "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
3566         "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
3567         strings_processed(), strings_removed(),
3568         symbols_processed(), symbols_removed());
3569   }
3570 
3571   void work(uint worker_id) {
3572     int strings_processed = 0;
3573     int strings_removed = 0;
3574     int symbols_processed = 0;
3575     int symbols_removed = 0;
3576     if (_process_strings) {
3577       StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
3578       Atomic::add(strings_processed, &_strings_processed);
3579       Atomic::add(strings_removed, &_strings_removed);
3580     }
3581     if (_process_symbols) {
3582       SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
3583       Atomic::add(symbols_processed, &_symbols_processed);
3584       Atomic::add(symbols_removed, &_symbols_removed);
3585     }
3586   }
3587 
3588   size_t strings_processed() const { return (size_t)_strings_processed; }
3589   size_t strings_removed()   const { return (size_t)_strings_removed; }
3590 
3591   size_t symbols_processed() const { return (size_t)_symbols_processed; }
3592   size_t symbols_removed()   const { return (size_t)_symbols_removed; }
3593 };
3594 
3595 class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
3596 private:
3597   static Monitor* _lock;
3598 
3599   BoolObjectClosure* const _is_alive;
3600   const bool               _unloading_occurred;
3601   const uint               _num_workers;
3602 
3603   // Variables used to claim nmethods.
3604   CompiledMethod* _first_nmethod;
3605   volatile CompiledMethod* _claimed_nmethod;
3606 
3607   // The list of nmethods that need to be processed by the second pass.
3608   volatile CompiledMethod* _postponed_list;
3609   volatile uint            _num_entered_barrier;
3610 
3611  public:
3612   G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
3613       _is_alive(is_alive),
3614       _unloading_occurred(unloading_occurred),
3615       _num_workers(num_workers),
3616       _first_nmethod(NULL),
3617       _claimed_nmethod(NULL),
3618       _postponed_list(NULL),
3619       _num_entered_barrier(0)
3620   {
3621     CompiledMethod::increase_unloading_clock();
3622     // Get first alive nmethod
3623     CompiledMethodIterator iter = CompiledMethodIterator();
3624     if(iter.next_alive()) {
3625       _first_nmethod = iter.method();
3626     }
3627     _claimed_nmethod = (volatile CompiledMethod*)_first_nmethod;
3628   }
3629 
3630   ~G1CodeCacheUnloadingTask() {
3631     CodeCache::verify_clean_inline_caches();
3632 
3633     CodeCache::set_needs_cache_clean(false);
3634     guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
3635 
3636     CodeCache::verify_icholder_relocations();
3637   }
3638 
3639  private:
3640   void add_to_postponed_list(CompiledMethod* nm) {
3641       CompiledMethod* old;
3642       do {
3643         old = (CompiledMethod*)_postponed_list;
3644         nm->set_unloading_next(old);
3645       } while ((CompiledMethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
3646   }
3647 
3648   void clean_nmethod(CompiledMethod* nm) {
3649     bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
3650 
3651     if (postponed) {
3652       // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
3653       add_to_postponed_list(nm);
3654     }
3655 
3656     // Mark that this thread has been cleaned/unloaded.
3657     // After this call, it will be safe to ask if this nmethod was unloaded or not.
3658     nm->set_unloading_clock(CompiledMethod::global_unloading_clock());
3659   }
3660 
3661   void clean_nmethod_postponed(CompiledMethod* nm) {
3662     nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
3663   }
3664 
3665   static const int MaxClaimNmethods = 16;
3666 
3667   void claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) {
3668     CompiledMethod* first;
3669     CompiledMethodIterator last;
3670 
3671     do {
3672       *num_claimed_nmethods = 0;
3673 
3674       first = (CompiledMethod*)_claimed_nmethod;
3675       last = CompiledMethodIterator(first);
3676 
3677       if (first != NULL) {
3678 
3679         for (int i = 0; i < MaxClaimNmethods; i++) {
3680           if (!last.next_alive()) {
3681             break;
3682           }
3683           claimed_nmethods[i] = last.method();
3684           (*num_claimed_nmethods)++;
3685         }
3686       }
3687 
3688     } while ((CompiledMethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
3689   }
3690 
3691   CompiledMethod* claim_postponed_nmethod() {
3692     CompiledMethod* claim;
3693     CompiledMethod* next;
3694 
3695     do {
3696       claim = (CompiledMethod*)_postponed_list;
3697       if (claim == NULL) {
3698         return NULL;
3699       }
3700 
3701       next = claim->unloading_next();
3702 
3703     } while ((CompiledMethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
3704 
3705     return claim;
3706   }
3707 
3708  public:
3709   // Mark that we're done with the first pass of nmethod cleaning.
3710   void barrier_mark(uint worker_id) {
3711     MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
3712     _num_entered_barrier++;
3713     if (_num_entered_barrier == _num_workers) {
3714       ml.notify_all();
3715     }
3716   }
3717 
3718   // See if we have to wait for the other workers to
3719   // finish their first-pass nmethod cleaning work.
3720   void barrier_wait(uint worker_id) {
3721     if (_num_entered_barrier < _num_workers) {
3722       MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
3723       while (_num_entered_barrier < _num_workers) {
3724           ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
3725       }
3726     }
3727   }
3728 
3729   // Cleaning and unloading of nmethods. Some work has to be postponed
3730   // to the second pass, when we know which nmethods survive.
3731   void work_first_pass(uint worker_id) {
3732     // The first nmethods is claimed by the first worker.
3733     if (worker_id == 0 && _first_nmethod != NULL) {
3734       clean_nmethod(_first_nmethod);
3735       _first_nmethod = NULL;
3736     }
3737 
3738     int num_claimed_nmethods;
3739     CompiledMethod* claimed_nmethods[MaxClaimNmethods];
3740 
3741     while (true) {
3742       claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
3743 
3744       if (num_claimed_nmethods == 0) {
3745         break;
3746       }
3747 
3748       for (int i = 0; i < num_claimed_nmethods; i++) {
3749         clean_nmethod(claimed_nmethods[i]);
3750       }
3751     }
3752   }
3753 
3754   void work_second_pass(uint worker_id) {
3755     CompiledMethod* nm;
3756     // Take care of postponed nmethods.
3757     while ((nm = claim_postponed_nmethod()) != NULL) {
3758       clean_nmethod_postponed(nm);
3759     }
3760   }
3761 };
3762 
3763 Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock", false, Monitor::_safepoint_check_never);
3764 
3765 class G1KlassCleaningTask : public StackObj {
3766   BoolObjectClosure*                      _is_alive;
3767   volatile jint                           _clean_klass_tree_claimed;
3768   ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
3769 
3770  public:
3771   G1KlassCleaningTask(BoolObjectClosure* is_alive) :
3772       _is_alive(is_alive),
3773       _clean_klass_tree_claimed(0),
3774       _klass_iterator() {
3775   }
3776 
3777  private:
3778   bool claim_clean_klass_tree_task() {
3779     if (_clean_klass_tree_claimed) {
3780       return false;
3781     }
3782 
3783     return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;
3784   }
3785 
3786   InstanceKlass* claim_next_klass() {
3787     Klass* klass;
3788     do {
3789       klass =_klass_iterator.next_klass();
3790     } while (klass != NULL && !klass->is_instance_klass());
3791 
3792     // this can be null so don't call InstanceKlass::cast
3793     return static_cast<InstanceKlass*>(klass);
3794   }
3795 
3796 public:
3797 
3798   void clean_klass(InstanceKlass* ik) {
3799     ik->clean_weak_instanceklass_links(_is_alive);
3800   }
3801 
3802   void work() {
3803     ResourceMark rm;
3804 
3805     // One worker will clean the subklass/sibling klass tree.
3806     if (claim_clean_klass_tree_task()) {
3807       Klass::clean_subklass_tree(_is_alive);
3808     }
3809 
3810     // All workers will help cleaning the classes,
3811     InstanceKlass* klass;
3812     while ((klass = claim_next_klass()) != NULL) {
3813       clean_klass(klass);
3814     }
3815   }
3816 };
3817 
3818 // To minimize the remark pause times, the tasks below are done in parallel.
3819 class G1ParallelCleaningTask : public AbstractGangTask {
3820 private:
3821   G1StringSymbolTableUnlinkTask _string_symbol_task;
3822   G1CodeCacheUnloadingTask      _code_cache_task;
3823   G1KlassCleaningTask           _klass_cleaning_task;
3824 
3825 public:
3826   // The constructor is run in the VMThread.
3827   G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
3828       AbstractGangTask("Parallel Cleaning"),
3829       _string_symbol_task(is_alive, process_strings, process_symbols),
3830       _code_cache_task(num_workers, is_alive, unloading_occurred),
3831       _klass_cleaning_task(is_alive) {
3832   }
3833 
3834   // The parallel work done by all worker threads.
3835   void work(uint worker_id) {
3836     // Do first pass of code cache cleaning.
3837     _code_cache_task.work_first_pass(worker_id);
3838 
3839     // Let the threads mark that the first pass is done.
3840     _code_cache_task.barrier_mark(worker_id);
3841 
3842     // Clean the Strings and Symbols.
3843     _string_symbol_task.work(worker_id);
3844 
3845     // Wait for all workers to finish the first code cache cleaning pass.
3846     _code_cache_task.barrier_wait(worker_id);
3847 
3848     // Do the second code cache cleaning work, which realize on
3849     // the liveness information gathered during the first pass.
3850     _code_cache_task.work_second_pass(worker_id);
3851 
3852     // Clean all klasses that were not unloaded.
3853     _klass_cleaning_task.work();
3854   }
3855 };
3856 
3857 
3858 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
3859                                         bool process_strings,
3860                                         bool process_symbols,
3861                                         bool class_unloading_occurred) {
3862   uint n_workers = workers()->active_workers();
3863 
3864   G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
3865                                         n_workers, class_unloading_occurred);
3866   workers()->run_task(&g1_unlink_task);
3867 }
3868 
3869 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
3870                                                      bool process_strings, bool process_symbols) {
3871   { // Timing scope
3872     G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
3873     workers()->run_task(&g1_unlink_task);
3874   }
3875 }
3876 
3877 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
3878  private:
3879   DirtyCardQueueSet* _queue;
3880   G1CollectedHeap* _g1h;
3881  public:
3882   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
3883     _queue(queue), _g1h(g1h) { }
3884 
3885   virtual void work(uint worker_id) {
3886     G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
3887     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
3888 
3889     RedirtyLoggedCardTableEntryClosure cl(_g1h);
3890     _queue->par_apply_closure_to_all_completed_buffers(&cl);
3891 
3892     phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3893   }
3894 };
3895 
3896 void G1CollectedHeap::redirty_logged_cards() {
3897   double redirty_logged_cards_start = os::elapsedTime();
3898 
3899   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3900   dirty_card_queue_set().reset_for_par_iteration();
3901   workers()->run_task(&redirty_task);
3902 
3903   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
3904   dcq.merge_bufferlists(&dirty_card_queue_set());
3905   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3906 
3907   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3908 }
3909 
3910 // Weak Reference Processing support
3911 
3912 // An always "is_alive" closure that is used to preserve referents.
3913 // If the object is non-null then it's alive.  Used in the preservation
3914 // of referent objects that are pointed to by reference objects
3915 // discovered by the CM ref processor.
3916 class G1AlwaysAliveClosure: public BoolObjectClosure {
3917   G1CollectedHeap* _g1;
3918 public:
3919   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3920   bool do_object_b(oop p) {
3921     if (p != NULL) {
3922       return true;
3923     }
3924     return false;
3925   }
3926 };
3927 
3928 bool G1STWIsAliveClosure::do_object_b(oop p) {
3929   // An object is reachable if it is outside the collection set,
3930   // or is inside and copied.
3931   return !_g1->is_in_cset(p) || p->is_forwarded();
3932 }
3933 
3934 // Non Copying Keep Alive closure
3935 class G1KeepAliveClosure: public OopClosure {
3936   G1CollectedHeap* _g1;
3937 public:
3938   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3939   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
3940   void do_oop(oop* p) {
3941     oop obj = *p;
3942     assert(obj != NULL, "the caller should have filtered out NULL values");
3943 
3944     const InCSetState cset_state = _g1->in_cset_state(obj);
3945     if (!cset_state.is_in_cset_or_humongous()) {
3946       return;
3947     }
3948     if (cset_state.is_in_cset()) {
3949       assert( obj->is_forwarded(), "invariant" );
3950       *p = obj->forwardee();
3951     } else {
3952       assert(!obj->is_forwarded(), "invariant" );
3953       assert(cset_state.is_humongous(),
3954              "Only allowed InCSet state is IsHumongous, but is %d", cset_state.value());
3955       _g1->set_humongous_is_live(obj);
3956     }
3957   }
3958 };
3959 
3960 // Copying Keep Alive closure - can be called from both
3961 // serial and parallel code as long as different worker
3962 // threads utilize different G1ParScanThreadState instances
3963 // and different queues.
3964 
3965 class G1CopyingKeepAliveClosure: public OopClosure {
3966   G1CollectedHeap*         _g1h;
3967   OopClosure*              _copy_non_heap_obj_cl;
3968   G1ParScanThreadState*    _par_scan_state;
3969 
3970 public:
3971   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
3972                             OopClosure* non_heap_obj_cl,
3973                             G1ParScanThreadState* pss):
3974     _g1h(g1h),
3975     _copy_non_heap_obj_cl(non_heap_obj_cl),
3976     _par_scan_state(pss)
3977   {}
3978 
3979   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
3980   virtual void do_oop(      oop* p) { do_oop_work(p); }
3981 
3982   template <class T> void do_oop_work(T* p) {
3983     oop obj = oopDesc::load_decode_heap_oop(p);
3984 
3985     if (_g1h->is_in_cset_or_humongous(obj)) {
3986       // If the referent object has been forwarded (either copied
3987       // to a new location or to itself in the event of an
3988       // evacuation failure) then we need to update the reference
3989       // field and, if both reference and referent are in the G1
3990       // heap, update the RSet for the referent.
3991       //
3992       // If the referent has not been forwarded then we have to keep
3993       // it alive by policy. Therefore we have copy the referent.
3994       //
3995       // If the reference field is in the G1 heap then we can push
3996       // on the PSS queue. When the queue is drained (after each
3997       // phase of reference processing) the object and it's followers
3998       // will be copied, the reference field set to point to the
3999       // new location, and the RSet updated. Otherwise we need to
4000       // use the the non-heap or metadata closures directly to copy
4001       // the referent object and update the pointer, while avoiding
4002       // updating the RSet.
4003 
4004       if (_g1h->is_in_g1_reserved(p)) {
4005         _par_scan_state->push_on_queue(p);
4006       } else {
4007         assert(!Metaspace::contains((const void*)p),
4008                "Unexpectedly found a pointer from metadata: " PTR_FORMAT, p2i(p));
4009         _copy_non_heap_obj_cl->do_oop(p);
4010       }
4011     }
4012   }
4013 };
4014 
4015 // Serial drain queue closure. Called as the 'complete_gc'
4016 // closure for each discovered list in some of the
4017 // reference processing phases.
4018 
4019 class G1STWDrainQueueClosure: public VoidClosure {
4020 protected:
4021   G1CollectedHeap* _g1h;
4022   G1ParScanThreadState* _par_scan_state;
4023 
4024   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
4025 
4026 public:
4027   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
4028     _g1h(g1h),
4029     _par_scan_state(pss)
4030   { }
4031 
4032   void do_void() {
4033     G1ParScanThreadState* const pss = par_scan_state();
4034     pss->trim_queue();
4035   }
4036 };
4037 
4038 // Parallel Reference Processing closures
4039 
4040 // Implementation of AbstractRefProcTaskExecutor for parallel reference
4041 // processing during G1 evacuation pauses.
4042 
4043 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
4044 private:
4045   G1CollectedHeap*          _g1h;
4046   G1ParScanThreadStateSet*  _pss;
4047   RefToScanQueueSet*        _queues;
4048   WorkGang*                 _workers;
4049   uint                      _active_workers;
4050 
4051 public:
4052   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
4053                            G1ParScanThreadStateSet* per_thread_states,
4054                            WorkGang* workers,
4055                            RefToScanQueueSet *task_queues,
4056                            uint n_workers) :
4057     _g1h(g1h),
4058     _pss(per_thread_states),
4059     _queues(task_queues),
4060     _workers(workers),
4061     _active_workers(n_workers)
4062   {
4063     g1h->ref_processor_stw()->set_active_mt_degree(n_workers);
4064   }
4065 
4066   // Executes the given task using concurrent marking worker threads.
4067   virtual void execute(ProcessTask& task);
4068   virtual void execute(EnqueueTask& task);
4069 };
4070 
4071 // Gang task for possibly parallel reference processing
4072 
4073 class G1STWRefProcTaskProxy: public AbstractGangTask {
4074   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
4075   ProcessTask&     _proc_task;
4076   G1CollectedHeap* _g1h;
4077   G1ParScanThreadStateSet* _pss;
4078   RefToScanQueueSet* _task_queues;
4079   ParallelTaskTerminator* _terminator;
4080 
4081 public:
4082   G1STWRefProcTaskProxy(ProcessTask& proc_task,
4083                         G1CollectedHeap* g1h,
4084                         G1ParScanThreadStateSet* per_thread_states,
4085                         RefToScanQueueSet *task_queues,
4086                         ParallelTaskTerminator* terminator) :
4087     AbstractGangTask("Process reference objects in parallel"),
4088     _proc_task(proc_task),
4089     _g1h(g1h),
4090     _pss(per_thread_states),
4091     _task_queues(task_queues),
4092     _terminator(terminator)
4093   {}
4094 
4095   virtual void work(uint worker_id) {
4096     // The reference processing task executed by a single worker.
4097     ResourceMark rm;
4098     HandleMark   hm;
4099 
4100     G1STWIsAliveClosure is_alive(_g1h);
4101 
4102     G1ParScanThreadState*          pss = _pss->state_for_worker(worker_id);
4103     pss->set_ref_processor(NULL);
4104 
4105     // Keep alive closure.
4106     G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
4107 
4108     // Complete GC closure
4109     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator);
4110 
4111     // Call the reference processing task's work routine.
4112     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
4113 
4114     // Note we cannot assert that the refs array is empty here as not all
4115     // of the processing tasks (specifically phase2 - pp2_work) execute
4116     // the complete_gc closure (which ordinarily would drain the queue) so
4117     // the queue may not be empty.
4118   }
4119 };
4120 
4121 // Driver routine for parallel reference processing.
4122 // Creates an instance of the ref processing gang
4123 // task and has the worker threads execute it.
4124 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
4125   assert(_workers != NULL, "Need parallel worker threads.");
4126 
4127   ParallelTaskTerminator terminator(_active_workers, _queues);
4128   G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, &terminator);
4129 
4130   _workers->run_task(&proc_task_proxy);
4131 }
4132 
4133 // Gang task for parallel reference enqueueing.
4134 
4135 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
4136   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
4137   EnqueueTask& _enq_task;
4138 
4139 public:
4140   G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
4141     AbstractGangTask("Enqueue reference objects in parallel"),
4142     _enq_task(enq_task)
4143   { }
4144 
4145   virtual void work(uint worker_id) {
4146     _enq_task.work(worker_id);
4147   }
4148 };
4149 
4150 // Driver routine for parallel reference enqueueing.
4151 // Creates an instance of the ref enqueueing gang
4152 // task and has the worker threads execute it.
4153 
4154 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
4155   assert(_workers != NULL, "Need parallel worker threads.");
4156 
4157   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
4158 
4159   _workers->run_task(&enq_task_proxy);
4160 }
4161 
4162 // End of weak reference support closures
4163 
4164 // Abstract task used to preserve (i.e. copy) any referent objects
4165 // that are in the collection set and are pointed to by reference
4166 // objects discovered by the CM ref processor.
4167 
4168 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
4169 protected:
4170   G1CollectedHeap*         _g1h;
4171   G1ParScanThreadStateSet* _pss;
4172   RefToScanQueueSet*       _queues;
4173   ParallelTaskTerminator   _terminator;
4174   uint                     _n_workers;
4175 
4176 public:
4177   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, int workers, RefToScanQueueSet *task_queues) :
4178     AbstractGangTask("ParPreserveCMReferents"),
4179     _g1h(g1h),
4180     _pss(per_thread_states),
4181     _queues(task_queues),
4182     _terminator(workers, _queues),
4183     _n_workers(workers)
4184   {
4185     g1h->ref_processor_cm()->set_active_mt_degree(workers);
4186   }
4187 
4188   void work(uint worker_id) {
4189     G1GCParPhaseTimesTracker x(_g1h->g1_policy()->phase_times(), G1GCPhaseTimes::PreserveCMReferents, worker_id);
4190 
4191     ResourceMark rm;
4192     HandleMark   hm;
4193 
4194     G1ParScanThreadState*          pss = _pss->state_for_worker(worker_id);
4195     pss->set_ref_processor(NULL);
4196     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
4197 
4198     // Is alive closure
4199     G1AlwaysAliveClosure always_alive(_g1h);
4200 
4201     // Copying keep alive closure. Applied to referent objects that need
4202     // to be copied.
4203     G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
4204 
4205     ReferenceProcessor* rp = _g1h->ref_processor_cm();
4206 
4207     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
4208     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
4209 
4210     // limit is set using max_num_q() - which was set using ParallelGCThreads.
4211     // So this must be true - but assert just in case someone decides to
4212     // change the worker ids.
4213     assert(worker_id < limit, "sanity");
4214     assert(!rp->discovery_is_atomic(), "check this code");
4215 
4216     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
4217     for (uint idx = worker_id; idx < limit; idx += stride) {
4218       DiscoveredList& ref_list = rp->discovered_refs()[idx];
4219 
4220       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
4221       while (iter.has_next()) {
4222         // Since discovery is not atomic for the CM ref processor, we
4223         // can see some null referent objects.
4224         iter.load_ptrs(DEBUG_ONLY(true));
4225         oop ref = iter.obj();
4226 
4227         // This will filter nulls.
4228         if (iter.is_referent_alive()) {
4229           iter.make_referent_alive();
4230         }
4231         iter.move_to_next();
4232       }
4233     }
4234 
4235     // Drain the queue - which may cause stealing
4236     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
4237     drain_queue.do_void();
4238     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
4239     assert(pss->queue_is_empty(), "should be");
4240   }
4241 };
4242 
4243 void G1CollectedHeap::process_weak_jni_handles() {
4244   double ref_proc_start = os::elapsedTime();
4245 
4246   G1STWIsAliveClosure is_alive(this);
4247   G1KeepAliveClosure keep_alive(this);
4248   JNIHandles::weak_oops_do(&is_alive, &keep_alive);
4249 
4250   double ref_proc_time = os::elapsedTime() - ref_proc_start;
4251   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
4252 }
4253 
4254 void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
4255   // Any reference objects, in the collection set, that were 'discovered'
4256   // by the CM ref processor should have already been copied (either by
4257   // applying the external root copy closure to the discovered lists, or
4258   // by following an RSet entry).
4259   //
4260   // But some of the referents, that are in the collection set, that these
4261   // reference objects point to may not have been copied: the STW ref
4262   // processor would have seen that the reference object had already
4263   // been 'discovered' and would have skipped discovering the reference,
4264   // but would not have treated the reference object as a regular oop.
4265   // As a result the copy closure would not have been applied to the
4266   // referent object.
4267   //
4268   // We need to explicitly copy these referent objects - the references
4269   // will be processed at the end of remarking.
4270   //
4271   // We also need to do this copying before we process the reference
4272   // objects discovered by the STW ref processor in case one of these
4273   // referents points to another object which is also referenced by an
4274   // object discovered by the STW ref processor.
4275   double preserve_cm_referents_time = 0.0;
4276 
4277   // To avoid spawning task when there is no work to do, check that
4278   // a concurrent cycle is active and that some references have been
4279   // discovered.
4280   if (concurrent_mark()->cmThread()->during_cycle() &&
4281       ref_processor_cm()->has_discovered_references()) {
4282     double preserve_cm_referents_start = os::elapsedTime();
4283     uint no_of_gc_workers = workers()->active_workers();
4284     G1ParPreserveCMReferentsTask keep_cm_referents(this,
4285                                                    per_thread_states,
4286                                                    no_of_gc_workers,
4287                                                    _task_queues);
4288     workers()->run_task(&keep_cm_referents);
4289     preserve_cm_referents_time = os::elapsedTime() - preserve_cm_referents_start;
4290   }
4291 
4292   g1_policy()->phase_times()->record_preserve_cm_referents_time_ms(preserve_cm_referents_time * 1000.0);
4293 }
4294 
4295 // Weak Reference processing during an evacuation pause (part 1).
4296 void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
4297   double ref_proc_start = os::elapsedTime();
4298 
4299   ReferenceProcessor* rp = _ref_processor_stw;
4300   assert(rp->discovery_enabled(), "should have been enabled");
4301 
4302   // Closure to test whether a referent is alive.
4303   G1STWIsAliveClosure is_alive(this);
4304 
4305   // Even when parallel reference processing is enabled, the processing
4306   // of JNI refs is serial and performed serially by the current thread
4307   // rather than by a worker. The following PSS will be used for processing
4308   // JNI refs.
4309 
4310   // Use only a single queue for this PSS.
4311   G1ParScanThreadState*          pss = per_thread_states->state_for_worker(0);
4312   pss->set_ref_processor(NULL);
4313   assert(pss->queue_is_empty(), "pre-condition");
4314 
4315   // Keep alive closure.
4316   G1CopyingKeepAliveClosure keep_alive(this, pss->closures()->raw_strong_oops(), pss);
4317 
4318   // Serial Complete GC closure
4319   G1STWDrainQueueClosure drain_queue(this, pss);
4320 
4321   // Setup the soft refs policy...
4322   rp->setup_policy(false);
4323 
4324   ReferenceProcessorStats stats;
4325   if (!rp->processing_is_mt()) {
4326     // Serial reference processing...
4327     stats = rp->process_discovered_references(&is_alive,
4328                                               &keep_alive,
4329                                               &drain_queue,
4330                                               NULL,
4331                                               _gc_timer_stw);
4332   } else {
4333     uint no_of_gc_workers = workers()->active_workers();
4334 
4335     // Parallel reference processing
4336     assert(no_of_gc_workers <= rp->max_num_q(),
4337            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
4338            no_of_gc_workers,  rp->max_num_q());
4339 
4340     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
4341     stats = rp->process_discovered_references(&is_alive,
4342                                               &keep_alive,
4343                                               &drain_queue,
4344                                               &par_task_executor,
4345                                               _gc_timer_stw);
4346   }
4347 
4348   _gc_tracer_stw->report_gc_reference_stats(stats);
4349 
4350   // We have completed copying any necessary live referent objects.
4351   assert(pss->queue_is_empty(), "both queue and overflow should be empty");
4352 
4353   double ref_proc_time = os::elapsedTime() - ref_proc_start;
4354   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
4355 }
4356 
4357 // Weak Reference processing during an evacuation pause (part 2).
4358 void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
4359   double ref_enq_start = os::elapsedTime();
4360 
4361   ReferenceProcessor* rp = _ref_processor_stw;
4362   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
4363 
4364   // Now enqueue any remaining on the discovered lists on to
4365   // the pending list.
4366   if (!rp->processing_is_mt()) {
4367     // Serial reference processing...
4368     rp->enqueue_discovered_references();
4369   } else {
4370     // Parallel reference enqueueing
4371 
4372     uint n_workers = workers()->active_workers();
4373 
4374     assert(n_workers <= rp->max_num_q(),
4375            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
4376            n_workers,  rp->max_num_q());
4377 
4378     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers);
4379     rp->enqueue_discovered_references(&par_task_executor);
4380   }
4381 
4382   rp->verify_no_references_recorded();
4383   assert(!rp->discovery_enabled(), "should have been disabled");
4384 
4385   // FIXME
4386   // CM's reference processing also cleans up the string and symbol tables.
4387   // Should we do that here also? We could, but it is a serial operation
4388   // and could significantly increase the pause time.
4389 
4390   double ref_enq_time = os::elapsedTime() - ref_enq_start;
4391   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
4392 }
4393 
4394 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
4395   double merge_pss_time_start = os::elapsedTime();
4396   per_thread_states->flush();
4397   g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
4398 }
4399 
4400 void G1CollectedHeap::pre_evacuate_collection_set() {
4401   _expand_heap_after_alloc_failure = true;
4402   _evacuation_failed = false;
4403 
4404   // Disable the hot card cache.
4405   _hot_card_cache->reset_hot_cache_claimed_index();
4406   _hot_card_cache->set_use_cache(false);
4407 
4408   g1_rem_set()->prepare_for_oops_into_collection_set_do();
4409   _preserved_marks_set.assert_empty();
4410 }
4411 
4412 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4413   // Should G1EvacuationFailureALot be in effect for this GC?
4414   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
4415 
4416   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
4417   double start_par_time_sec = os::elapsedTime();
4418   double end_par_time_sec;
4419 
4420   {
4421     const uint n_workers = workers()->active_workers();
4422     G1RootProcessor root_processor(this, n_workers);
4423     G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
4424     // InitialMark needs claim bits to keep track of the marked-through CLDs.
4425     if (collector_state()->during_initial_mark_pause()) {
4426       ClassLoaderDataGraph::clear_claimed_marks();
4427     }
4428 
4429     print_termination_stats_hdr();
4430 
4431     workers()->run_task(&g1_par_task);
4432     end_par_time_sec = os::elapsedTime();
4433 
4434     // Closing the inner scope will execute the destructor
4435     // for the G1RootProcessor object. We record the current
4436     // elapsed time before closing the scope so that time
4437     // taken for the destructor is NOT included in the
4438     // reported parallel time.
4439   }
4440 
4441   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
4442 
4443   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
4444   phase_times->record_par_time(par_time_ms);
4445 
4446   double code_root_fixup_time_ms =
4447         (os::elapsedTime() - end_par_time_sec) * 1000.0;
4448   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4449 }
4450 
4451 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4452   // Process any discovered reference objects - we have
4453   // to do this _before_ we retire the GC alloc regions
4454   // as we may have to copy some 'reachable' referent
4455   // objects (and their reachable sub-graphs) that were
4456   // not copied during the pause.
4457   if (g1_policy()->should_process_references()) {
4458     preserve_cm_referents(per_thread_states);
4459     process_discovered_references(per_thread_states);
4460   } else {
4461     ref_processor_stw()->verify_no_references_recorded();
4462     process_weak_jni_handles();
4463   }
4464 
4465   if (G1StringDedup::is_enabled()) {
4466     double fixup_start = os::elapsedTime();
4467 
4468     G1STWIsAliveClosure is_alive(this);
4469     G1KeepAliveClosure keep_alive(this);
4470     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4471 
4472     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
4473     g1_policy()->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
4474   }
4475 
4476   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4477 
4478   if (evacuation_failed()) {
4479     restore_after_evac_failure();
4480 
4481     // Reset the G1EvacuationFailureALot counters and flags
4482     // Note: the values are reset only when an actual
4483     // evacuation failure occurs.
4484     NOT_PRODUCT(reset_evacuation_should_fail();)
4485   }
4486 
4487   _preserved_marks_set.assert_empty();
4488 
4489   // Enqueue any remaining references remaining on the STW
4490   // reference processor's discovered lists. We need to do
4491   // this after the card table is cleaned (and verified) as
4492   // the act of enqueueing entries on to the pending list
4493   // will log these updates (and dirty their associated
4494   // cards). We need these updates logged to update any
4495   // RSets.
4496   if (g1_policy()->should_process_references()) {
4497     enqueue_discovered_references(per_thread_states);
4498   } else {
4499     g1_policy()->phase_times()->record_ref_enq_time(0);
4500   }
4501 
4502   _allocator->release_gc_alloc_regions(evacuation_info);
4503 
4504   merge_per_thread_state_info(per_thread_states);
4505 
4506   // Reset and re-enable the hot card cache.
4507   // Note the counts for the cards in the regions in the
4508   // collection set are reset when the collection set is freed.
4509   _hot_card_cache->reset_hot_cache();
4510   _hot_card_cache->set_use_cache(true);
4511 
4512   purge_code_root_memory();
4513 
4514   redirty_logged_cards();
4515 #if defined(COMPILER2) || INCLUDE_JVMCI
4516   DerivedPointerTable::update_pointers();
4517 #endif
4518   g1_policy()->print_age_table();
4519 }
4520 
4521 void G1CollectedHeap::record_obj_copy_mem_stats() {
4522   g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
4523 
4524   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
4525                                                create_g1_evac_summary(&_old_evac_stats));
4526 }
4527 
4528 void G1CollectedHeap::free_region(HeapRegion* hr,
4529                                   FreeRegionList* free_list,
4530                                   bool skip_remset,
4531                                   bool skip_hot_card_cache,
4532                                   bool locked) {
4533   assert(!hr->is_free(), "the region should not be free");
4534   assert(!hr->is_empty(), "the region should not be empty");
4535   assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
4536   assert(free_list != NULL, "pre-condition");
4537 
4538   if (G1VerifyBitmaps) {
4539     MemRegion mr(hr->bottom(), hr->end());
4540     concurrent_mark()->clearRangePrevBitmap(mr);
4541   }
4542 
4543   // Clear the card counts for this region.
4544   // Note: we only need to do this if the region is not young
4545   // (since we don't refine cards in young regions).
4546   if (!skip_hot_card_cache && !hr->is_young()) {
4547     _hot_card_cache->reset_card_counts(hr);
4548   }
4549   hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */);
4550   free_list->add_ordered(hr);
4551 }
4552 
4553 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
4554                                             FreeRegionList* free_list,
4555                                             bool skip_remset) {
4556   assert(hr->is_humongous(), "this is only for humongous regions");
4557   assert(free_list != NULL, "pre-condition");
4558   hr->clear_humongous();
4559   free_region(hr, free_list, skip_remset);
4560 }
4561 
4562 void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
4563                                            const uint humongous_regions_removed) {
4564   if (old_regions_removed > 0 || humongous_regions_removed > 0) {
4565     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4566     _old_set.bulk_remove(old_regions_removed);
4567     _humongous_set.bulk_remove(humongous_regions_removed);
4568   }
4569 
4570 }
4571 
4572 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
4573   assert(list != NULL, "list can't be null");
4574   if (!list->is_empty()) {
4575     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
4576     _hrm.insert_list_into_free_list(list);
4577   }
4578 }
4579 
4580 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
4581   decrease_used(bytes);
4582 }
4583 
4584 class G1ParScrubRemSetTask: public AbstractGangTask {
4585 protected:
4586   G1RemSet* _g1rs;
4587   HeapRegionClaimer _hrclaimer;
4588 
4589 public:
4590   G1ParScrubRemSetTask(G1RemSet* g1_rs, uint num_workers) :
4591     AbstractGangTask("G1 ScrubRS"),
4592     _g1rs(g1_rs),
4593     _hrclaimer(num_workers) {
4594   }
4595 
4596   void work(uint worker_id) {
4597     _g1rs->scrub(worker_id, &_hrclaimer);
4598   }
4599 };
4600 
4601 void G1CollectedHeap::scrub_rem_set() {
4602   uint num_workers = workers()->active_workers();
4603   G1ParScrubRemSetTask g1_par_scrub_rs_task(g1_rem_set(), num_workers);
4604   workers()->run_task(&g1_par_scrub_rs_task);
4605 }
4606 
4607 class G1FreeCollectionSetTask : public AbstractGangTask {
4608 private:
4609 
4610   // Closure applied to all regions in the collection set to do work that needs to
4611   // be done serially in a single thread.
4612   class G1SerialFreeCollectionSetClosure : public HeapRegionClosure {
4613   private:
4614     EvacuationInfo* _evacuation_info;
4615     const size_t* _surviving_young_words;
4616 
4617     // Bytes used in successfully evacuated regions before the evacuation.
4618     size_t _before_used_bytes;
4619     // Bytes used in unsucessfully evacuated regions before the evacuation
4620     size_t _after_used_bytes;
4621 
4622     size_t _bytes_allocated_in_old_since_last_gc;
4623 
4624     size_t _failure_used_words;
4625     size_t _failure_waste_words;
4626 
4627     FreeRegionList _local_free_list;
4628   public:
4629     G1SerialFreeCollectionSetClosure(EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4630       HeapRegionClosure(),
4631       _evacuation_info(evacuation_info),
4632       _surviving_young_words(surviving_young_words),
4633       _before_used_bytes(0),
4634       _after_used_bytes(0),
4635       _bytes_allocated_in_old_since_last_gc(0),
4636       _failure_used_words(0),
4637       _failure_waste_words(0),
4638       _local_free_list("Local Region List for CSet Freeing") {
4639     }
4640 
4641     virtual bool doHeapRegion(HeapRegion* r) {
4642       G1CollectedHeap* g1h = G1CollectedHeap::heap();
4643 
4644       assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
4645       g1h->clear_in_cset(r);
4646 
4647       if (r->is_young()) {
4648         assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(),
4649                "Young index %d is wrong for region %u of type %s with %u young regions",
4650                r->young_index_in_cset(),
4651                r->hrm_index(),
4652                r->get_type_str(),
4653                g1h->collection_set()->young_region_length());
4654         size_t words_survived = _surviving_young_words[r->young_index_in_cset()];
4655         r->record_surv_words_in_group(words_survived);
4656       }
4657 
4658       if (!r->evacuation_failed()) {
4659         assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
4660         _before_used_bytes += r->used();
4661         g1h->free_region(r,
4662                          &_local_free_list,
4663                          true, /* skip_remset */
4664                          true, /* skip_hot_card_cache */
4665                          true  /* locked */);
4666       } else {
4667         r->uninstall_surv_rate_group();
4668         r->set_young_index_in_cset(-1);
4669         r->set_evacuation_failed(false);
4670         // When moving a young gen region to old gen, we "allocate" that whole region
4671         // there. This is in addition to any already evacuated objects. Notify the
4672         // policy about that.
4673         // Old gen regions do not cause an additional allocation: both the objects
4674         // still in the region and the ones already moved are accounted for elsewhere.
4675         if (r->is_young()) {
4676           _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
4677         }
4678         // The region is now considered to be old.
4679         r->set_old();
4680         // Do some allocation statistics accounting. Regions that failed evacuation
4681         // are always made old, so there is no need to update anything in the young
4682         // gen statistics, but we need to update old gen statistics.
4683         size_t used_words = r->marked_bytes() / HeapWordSize;
4684 
4685         _failure_used_words += used_words;
4686         _failure_waste_words += HeapRegion::GrainWords - used_words;
4687 
4688         g1h->old_set_add(r);
4689         _after_used_bytes += r->used();
4690       }
4691       return false;
4692     }
4693 
4694     void complete_work() {
4695       G1CollectedHeap* g1h = G1CollectedHeap::heap();
4696 
4697       _evacuation_info->set_regions_freed(_local_free_list.length());
4698       _evacuation_info->increment_collectionset_used_after(_after_used_bytes);
4699 
4700       g1h->prepend_to_freelist(&_local_free_list);
4701       g1h->decrement_summary_bytes(_before_used_bytes);
4702 
4703       G1Policy* policy = g1h->g1_policy();
4704       policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
4705 
4706       g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
4707     }
4708   };
4709 
4710   G1CollectionSet* _collection_set;
4711   G1SerialFreeCollectionSetClosure _cl;
4712   const size_t* _surviving_young_words;
4713 
4714   size_t _rs_lengths;
4715 
4716   volatile jint _serial_work_claim;
4717 
4718   struct WorkItem {
4719     uint region_idx;
4720     bool is_young;
4721     bool evacuation_failed;
4722 
4723     WorkItem(HeapRegion* r) {
4724       region_idx = r->hrm_index();
4725       is_young = r->is_young();
4726       evacuation_failed = r->evacuation_failed();
4727     }
4728   };
4729 
4730   volatile size_t _parallel_work_claim;
4731   size_t _num_work_items;
4732   WorkItem* _work_items;
4733 
4734   void do_serial_work() {
4735     // Need to grab the lock to be allowed to modify the old region list.
4736     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4737     _collection_set->iterate(&_cl);
4738   }
4739 
4740   void do_parallel_work_for_region(uint region_idx, bool is_young, bool evacuation_failed) {
4741     G1CollectedHeap* g1h = G1CollectedHeap::heap();
4742 
4743     HeapRegion* r = g1h->region_at(region_idx);
4744     assert(!g1h->is_on_master_free_list(r), "sanity");
4745 
4746     Atomic::add(r->rem_set()->occupied_locked(), &_rs_lengths);
4747 
4748     if (!is_young) {
4749       g1h->_hot_card_cache->reset_card_counts(r);
4750     }
4751 
4752     if (!evacuation_failed) {
4753       r->rem_set()->clear_locked();
4754     }
4755   }
4756 
4757   class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {
4758   private:
4759     size_t _cur_idx;
4760     WorkItem* _work_items;
4761   public:
4762     G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
4763 
4764     virtual bool doHeapRegion(HeapRegion* r) {
4765       _work_items[_cur_idx++] = WorkItem(r);
4766       return false;
4767     }
4768   };
4769 
4770   void prepare_work() {
4771     G1PrepareFreeCollectionSetClosure cl(_work_items);
4772     _collection_set->iterate(&cl);
4773   }
4774 
4775   void complete_work() {
4776     _cl.complete_work();
4777 
4778     G1Policy* policy = G1CollectedHeap::heap()->g1_policy();
4779     policy->record_max_rs_lengths(_rs_lengths);
4780     policy->cset_regions_freed();
4781   }
4782 public:
4783   G1FreeCollectionSetTask(G1CollectionSet* collection_set, EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4784     AbstractGangTask("G1 Free Collection Set"),
4785     _cl(evacuation_info, surviving_young_words),
4786     _collection_set(collection_set),
4787     _surviving_young_words(surviving_young_words),
4788     _serial_work_claim(0),
4789     _rs_lengths(0),
4790     _parallel_work_claim(0),
4791     _num_work_items(collection_set->region_length()),
4792     _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
4793     prepare_work();
4794   }
4795 
4796   ~G1FreeCollectionSetTask() {
4797     complete_work();
4798     FREE_C_HEAP_ARRAY(WorkItem, _work_items);
4799   }
4800 
4801   // Chunk size for work distribution. The chosen value has been determined experimentally
4802   // to be a good tradeoff between overhead and achievable parallelism.
4803   static uint chunk_size() { return 32; }
4804 
4805   virtual void work(uint worker_id) {
4806     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
4807 
4808     // Claim serial work.
4809     if (_serial_work_claim == 0) {
4810       jint value = Atomic::add(1, &_serial_work_claim) - 1;
4811       if (value == 0) {
4812         double serial_time = os::elapsedTime();
4813         do_serial_work();
4814         timer->record_serial_free_cset_time_ms((os::elapsedTime() - serial_time) * 1000.0);
4815       }
4816     }
4817 
4818     // Start parallel work.
4819     double young_time = 0.0;
4820     bool has_young_time = false;
4821     double non_young_time = 0.0;
4822     bool has_non_young_time = false;
4823 
4824     while (true) {
4825       size_t end = Atomic::add(chunk_size(), &_parallel_work_claim);
4826       size_t cur = end - chunk_size();
4827 
4828       if (cur >= _num_work_items) {
4829         break;
4830       }
4831 
4832       double start_time = os::elapsedTime();
4833 
4834       end = MIN2(end, _num_work_items);
4835 
4836       for (; cur < end; cur++) {
4837         bool is_young = _work_items[cur].is_young;
4838 
4839         do_parallel_work_for_region(_work_items[cur].region_idx, is_young, _work_items[cur].evacuation_failed);
4840 
4841         double end_time = os::elapsedTime();
4842         double time_taken = end_time - start_time;
4843         if (is_young) {
4844           young_time += time_taken;
4845           has_young_time = true;
4846         } else {
4847           non_young_time += time_taken;
4848           has_non_young_time = true;
4849         }
4850         start_time = end_time;
4851       }
4852     }
4853 
4854     if (has_young_time) {
4855       timer->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, worker_id, young_time);
4856     }
4857     if (has_non_young_time) {
4858       timer->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, worker_id, young_time);
4859     }
4860   }
4861 };
4862 
4863 void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4864   _eden.clear();
4865 
4866   double free_cset_start_time = os::elapsedTime();
4867 
4868   {
4869     uint const num_chunks = MAX2(_collection_set.region_length() / G1FreeCollectionSetTask::chunk_size(), 1U);
4870     uint const num_workers = MIN2(workers()->active_workers(), num_chunks);
4871 
4872     G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words);
4873 
4874     log_debug(gc, ergo)("Running %s using %u workers for collection set length %u",
4875                         cl.name(),
4876                         num_workers,
4877                         _collection_set.region_length());
4878     workers()->run_task(&cl, num_workers);
4879   }
4880   g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
4881 
4882   collection_set->clear();
4883 }
4884 
4885 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
4886  private:
4887   FreeRegionList* _free_region_list;
4888   HeapRegionSet* _proxy_set;
4889   uint _humongous_regions_removed;
4890   size_t _freed_bytes;
4891  public:
4892 
4893   G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
4894     _free_region_list(free_region_list), _humongous_regions_removed(0), _freed_bytes(0) {
4895   }
4896 
4897   virtual bool doHeapRegion(HeapRegion* r) {
4898     if (!r->is_starts_humongous()) {
4899       return false;
4900     }
4901 
4902     G1CollectedHeap* g1h = G1CollectedHeap::heap();
4903 
4904     oop obj = (oop)r->bottom();
4905     G1CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap();
4906 
4907     // The following checks whether the humongous object is live are sufficient.
4908     // The main additional check (in addition to having a reference from the roots
4909     // or the young gen) is whether the humongous object has a remembered set entry.
4910     //
4911     // A humongous object cannot be live if there is no remembered set for it
4912     // because:
4913     // - there can be no references from within humongous starts regions referencing
4914     // the object because we never allocate other objects into them.
4915     // (I.e. there are no intra-region references that may be missed by the
4916     // remembered set)
4917     // - as soon there is a remembered set entry to the humongous starts region
4918     // (i.e. it has "escaped" to an old object) this remembered set entry will stay
4919     // until the end of a concurrent mark.
4920     //
4921     // It is not required to check whether the object has been found dead by marking
4922     // or not, in fact it would prevent reclamation within a concurrent cycle, as
4923     // all objects allocated during that time are considered live.
4924     // SATB marking is even more conservative than the remembered set.
4925     // So if at this point in the collection there is no remembered set entry,
4926     // nobody has a reference to it.
4927     // At the start of collection we flush all refinement logs, and remembered sets
4928     // are completely up-to-date wrt to references to the humongous object.
4929     //
4930     // Other implementation considerations:
4931     // - never consider object arrays at this time because they would pose
4932     // considerable effort for cleaning up the the remembered sets. This is
4933     // required because stale remembered sets might reference locations that
4934     // are currently allocated into.
4935     uint region_idx = r->hrm_index();
4936     if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
4937         !r->rem_set()->is_empty()) {
4938       log_debug(gc, humongous)("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT "  with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
4939                                region_idx,
4940                                (size_t)obj->size() * HeapWordSize,
4941                                p2i(r->bottom()),
4942                                r->rem_set()->occupied(),
4943                                r->rem_set()->strong_code_roots_list_length(),
4944                                next_bitmap->isMarked(r->bottom()),
4945                                g1h->is_humongous_reclaim_candidate(region_idx),
4946                                obj->is_typeArray()
4947                               );
4948       return false;
4949     }
4950 
4951     guarantee(obj->is_typeArray(),
4952               "Only eagerly reclaiming type arrays is supported, but the object "
4953               PTR_FORMAT " is not.", p2i(r->bottom()));
4954 
4955     log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
4956                              region_idx,
4957                              (size_t)obj->size() * HeapWordSize,
4958                              p2i(r->bottom()),
4959                              r->rem_set()->occupied(),
4960                              r->rem_set()->strong_code_roots_list_length(),
4961                              next_bitmap->isMarked(r->bottom()),
4962                              g1h->is_humongous_reclaim_candidate(region_idx),
4963                              obj->is_typeArray()
4964                             );
4965 
4966     // Need to clear mark bit of the humongous object if already set.
4967     if (next_bitmap->isMarked(r->bottom())) {
4968       next_bitmap->clear(r->bottom());
4969     }
4970     do {
4971       HeapRegion* next = g1h->next_region_in_humongous(r);
4972       _freed_bytes += r->used();
4973       r->set_containing_set(NULL);
4974       _humongous_regions_removed++;
4975       g1h->free_humongous_region(r, _free_region_list, false /* skip_remset */ );
4976       r = next;
4977     } while (r != NULL);
4978 
4979     return false;
4980   }
4981 
4982   uint humongous_free_count() {
4983     return _humongous_regions_removed;
4984   }
4985 
4986   size_t bytes_freed() const {
4987     return _freed_bytes;
4988   }
4989 };
4990 
4991 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
4992   assert_at_safepoint(true);
4993 
4994   if (!G1EagerReclaimHumongousObjects ||
4995       (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
4996     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
4997     return;
4998   }
4999 
5000   double start_time = os::elapsedTime();
5001 
5002   FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
5003 
5004   G1FreeHumongousRegionClosure cl(&local_cleanup_list);
5005   heap_region_iterate(&cl);
5006 
5007   remove_from_old_sets(0, cl.humongous_free_count());
5008 
5009   G1HRPrinter* hrp = hr_printer();
5010   if (hrp->is_active()) {
5011     FreeRegionListIterator iter(&local_cleanup_list);
5012     while (iter.more_available()) {
5013       HeapRegion* hr = iter.get_next();
5014       hrp->cleanup(hr);
5015     }
5016   }
5017 
5018   prepend_to_freelist(&local_cleanup_list);
5019   decrement_summary_bytes(cl.bytes_freed());
5020 
5021   g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
5022                                                                     cl.humongous_free_count());
5023 }
5024 
5025 class G1AbandonCollectionSetClosure : public HeapRegionClosure {
5026 public:
5027   virtual bool doHeapRegion(HeapRegion* r) {
5028     assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
5029     G1CollectedHeap::heap()->clear_in_cset(r);
5030     r->set_young_index_in_cset(-1);
5031     return false;
5032   }
5033 };
5034 
5035 void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
5036   G1AbandonCollectionSetClosure cl;
5037   collection_set->iterate(&cl);
5038 
5039   collection_set->clear();
5040   collection_set->stop_incremental_building();
5041 }
5042 
5043 void G1CollectedHeap::set_free_regions_coming() {
5044   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
5045 
5046   assert(!free_regions_coming(), "pre-condition");
5047   _free_regions_coming = true;
5048 }
5049 
5050 void G1CollectedHeap::reset_free_regions_coming() {
5051   assert(free_regions_coming(), "pre-condition");
5052 
5053   {
5054     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5055     _free_regions_coming = false;
5056     SecondaryFreeList_lock->notify_all();
5057   }
5058 
5059   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : reset free regions coming");
5060 }
5061 
5062 void G1CollectedHeap::wait_while_free_regions_coming() {
5063   // Most of the time we won't have to wait, so let's do a quick test
5064   // first before we take the lock.
5065   if (!free_regions_coming()) {
5066     return;
5067   }
5068 
5069   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : waiting for free regions");
5070 
5071   {
5072     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5073     while (free_regions_coming()) {
5074       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
5075     }
5076   }
5077 
5078   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : done waiting for free regions");
5079 }
5080 
5081 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
5082   return _allocator->is_retained_old_region(hr);
5083 }
5084 
5085 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5086   _eden.add(hr);
5087   _g1_policy->set_region_eden(hr);
5088 }
5089 
5090 #ifdef ASSERT
5091 
5092 class NoYoungRegionsClosure: public HeapRegionClosure {
5093 private:
5094   bool _success;
5095 public:
5096   NoYoungRegionsClosure() : _success(true) { }
5097   bool doHeapRegion(HeapRegion* r) {
5098     if (r->is_young()) {
5099       log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5100                             p2i(r->bottom()), p2i(r->end()));
5101       _success = false;
5102     }
5103     return false;
5104   }
5105   bool success() { return _success; }
5106 };
5107 
5108 bool G1CollectedHeap::check_young_list_empty() {
5109   bool ret = (young_regions_count() == 0);
5110 
5111   NoYoungRegionsClosure closure;
5112   heap_region_iterate(&closure);
5113   ret = ret && closure.success();
5114 
5115   return ret;
5116 }
5117 
5118 #endif // ASSERT
5119 
5120 class TearDownRegionSetsClosure : public HeapRegionClosure {
5121 private:
5122   HeapRegionSet *_old_set;
5123 
5124 public:
5125   TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
5126 
5127   bool doHeapRegion(HeapRegion* r) {
5128     if (r->is_old()) {
5129       _old_set->remove(r);
5130     } else if(r->is_young()) {
5131       r->uninstall_surv_rate_group();
5132     } else {
5133       // We ignore free regions, we'll empty the free list afterwards.
5134       // We ignore humongous regions, we're not tearing down the
5135       // humongous regions set.
5136       assert(r->is_free() || r->is_humongous(),
5137              "it cannot be another type");
5138     }
5139     return false;
5140   }
5141 
5142   ~TearDownRegionSetsClosure() {
5143     assert(_old_set->is_empty(), "post-condition");
5144   }
5145 };
5146 
5147 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
5148   assert_at_safepoint(true /* should_be_vm_thread */);
5149 
5150   if (!free_list_only) {
5151     TearDownRegionSetsClosure cl(&_old_set);
5152     heap_region_iterate(&cl);
5153 
5154     // Note that emptying the _young_list is postponed and instead done as
5155     // the first step when rebuilding the regions sets again. The reason for
5156     // this is that during a full GC string deduplication needs to know if
5157     // a collected region was young or old when the full GC was initiated.
5158   }
5159   _hrm.remove_all_free_regions();
5160 }
5161 
5162 void G1CollectedHeap::increase_used(size_t bytes) {
5163   _summary_bytes_used += bytes;
5164 }
5165 
5166 void G1CollectedHeap::decrease_used(size_t bytes) {
5167   assert(_summary_bytes_used >= bytes,
5168          "invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,
5169          _summary_bytes_used, bytes);
5170   _summary_bytes_used -= bytes;
5171 }
5172 
5173 void G1CollectedHeap::set_used(size_t bytes) {
5174   _summary_bytes_used = bytes;
5175 }
5176 
5177 class RebuildRegionSetsClosure : public HeapRegionClosure {
5178 private:
5179   bool            _free_list_only;
5180   HeapRegionSet*   _old_set;
5181   HeapRegionManager*   _hrm;
5182   size_t          _total_used;
5183 
5184 public:
5185   RebuildRegionSetsClosure(bool free_list_only,
5186                            HeapRegionSet* old_set, HeapRegionManager* hrm) :
5187     _free_list_only(free_list_only),
5188     _old_set(old_set), _hrm(hrm), _total_used(0) {
5189     assert(_hrm->num_free_regions() == 0, "pre-condition");
5190     if (!free_list_only) {
5191       assert(_old_set->is_empty(), "pre-condition");
5192     }
5193   }
5194 
5195   bool doHeapRegion(HeapRegion* r) {
5196     if (r->is_empty()) {
5197       // Add free regions to the free list
5198       r->set_free();
5199       r->set_allocation_context(AllocationContext::system());
5200       _hrm->insert_into_free_list(r);
5201     } else if (!_free_list_only) {
5202 
5203       if (r->is_humongous()) {
5204         // We ignore humongous regions. We left the humongous set unchanged.
5205       } else {
5206         assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
5207         // We now consider all regions old, so register as such. Leave
5208         // archive regions set that way, however, while still adding
5209         // them to the old set.
5210         if (!r->is_archive()) {
5211           r->set_old();
5212         }
5213         _old_set->add(r);
5214       }
5215       _total_used += r->used();
5216     }
5217 
5218     return false;
5219   }
5220 
5221   size_t total_used() {
5222     return _total_used;
5223   }
5224 };
5225 
5226 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
5227   assert_at_safepoint(true /* should_be_vm_thread */);
5228 
5229   if (!free_list_only) {
5230     _eden.clear();
5231     _survivor.clear();
5232   }
5233 
5234   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
5235   heap_region_iterate(&cl);
5236 
5237   if (!free_list_only) {
5238     set_used(cl.total_used());
5239     if (_archive_allocator != NULL) {
5240       _archive_allocator->clear_used();
5241     }
5242   }
5243   assert(used_unlocked() == recalculate_used(),
5244          "inconsistent used_unlocked(), "
5245          "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
5246          used_unlocked(), recalculate_used());
5247 }
5248 
5249 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
5250   _refine_cte_cl->set_concurrent(concurrent);
5251 }
5252 
5253 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
5254   HeapRegion* hr = heap_region_containing(p);
5255   return hr->is_in(p);
5256 }
5257 
5258 // Methods for the mutator alloc region
5259 
5260 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
5261                                                       bool force) {
5262   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5263   assert(!force || g1_policy()->can_expand_young_list(),
5264          "if force is true we should be able to expand the young list");
5265   bool should_allocate = g1_policy()->should_allocate_mutator_region();
5266   if (force || should_allocate) {
5267     HeapRegion* new_alloc_region = new_region(word_size,
5268                                               false /* is_old */,
5269                                               false /* do_expand */);
5270     if (new_alloc_region != NULL) {
5271       set_region_short_lived_locked(new_alloc_region);
5272       _hr_printer.alloc(new_alloc_region, !should_allocate);
5273       _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
5274       return new_alloc_region;
5275     }
5276   }
5277   return NULL;
5278 }
5279 
5280 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
5281                                                   size_t allocated_bytes) {
5282   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5283   assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
5284 
5285   collection_set()->add_eden_region(alloc_region);
5286   increase_used(allocated_bytes);
5287   _hr_printer.retire(alloc_region);
5288   // We update the eden sizes here, when the region is retired,
5289   // instead of when it's allocated, since this is the point that its
5290   // used space has been recored in _summary_bytes_used.
5291   g1mm()->update_eden_size();
5292 }
5293 
5294 // Methods for the GC alloc regions
5295 
5296 bool G1CollectedHeap::has_more_regions(InCSetState dest) {
5297   if (dest.is_old()) {
5298     return true;
5299   } else {
5300     return survivor_regions_count() < g1_policy()->max_survivor_regions();
5301   }
5302 }
5303 
5304 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) {
5305   assert(FreeList_lock->owned_by_self(), "pre-condition");
5306 
5307   if (!has_more_regions(dest)) {
5308     return NULL;
5309   }
5310 
5311   const bool is_survivor = dest.is_young();
5312 
5313   HeapRegion* new_alloc_region = new_region(word_size,
5314                                             !is_survivor,
5315                                             true /* do_expand */);
5316   if (new_alloc_region != NULL) {
5317     // We really only need to do this for old regions given that we
5318     // should never scan survivors. But it doesn't hurt to do it
5319     // for survivors too.
5320     new_alloc_region->record_timestamp();
5321     if (is_survivor) {
5322       new_alloc_region->set_survivor();
5323       _survivor.add(new_alloc_region);
5324       _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
5325     } else {
5326       new_alloc_region->set_old();
5327       _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
5328     }
5329     _hr_printer.alloc(new_alloc_region);
5330     bool during_im = collector_state()->during_initial_mark_pause();
5331     new_alloc_region->note_start_of_copying(during_im);
5332     return new_alloc_region;
5333   }
5334   return NULL;
5335 }
5336 
5337 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
5338                                              size_t allocated_bytes,
5339                                              InCSetState dest) {
5340   bool during_im = collector_state()->during_initial_mark_pause();
5341   alloc_region->note_end_of_copying(during_im);
5342   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
5343   if (dest.is_old()) {
5344     _old_set.add(alloc_region);
5345   }
5346   _hr_printer.retire(alloc_region);
5347 }
5348 
5349 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
5350   bool expanded = false;
5351   uint index = _hrm.find_highest_free(&expanded);
5352 
5353   if (index != G1_NO_HRM_INDEX) {
5354     if (expanded) {
5355       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
5356                                 HeapRegion::GrainWords * HeapWordSize);
5357     }
5358     _hrm.allocate_free_regions_starting_at(index, 1);
5359     return region_at(index);
5360   }
5361   return NULL;
5362 }
5363 
5364 // Optimized nmethod scanning
5365 
5366 class RegisterNMethodOopClosure: public OopClosure {
5367   G1CollectedHeap* _g1h;
5368   nmethod* _nm;
5369 
5370   template <class T> void do_oop_work(T* p) {
5371     T heap_oop = oopDesc::load_heap_oop(p);
5372     if (!oopDesc::is_null(heap_oop)) {
5373       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
5374       HeapRegion* hr = _g1h->heap_region_containing(obj);
5375       assert(!hr->is_continues_humongous(),
5376              "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
5377              " starting at " HR_FORMAT,
5378              p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
5379 
5380       // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
5381       hr->add_strong_code_root_locked(_nm);
5382     }
5383   }
5384 
5385 public:
5386   RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
5387     _g1h(g1h), _nm(nm) {}
5388 
5389   void do_oop(oop* p)       { do_oop_work(p); }
5390   void do_oop(narrowOop* p) { do_oop_work(p); }
5391 };
5392 
5393 class UnregisterNMethodOopClosure: public OopClosure {
5394   G1CollectedHeap* _g1h;
5395   nmethod* _nm;
5396 
5397   template <class T> void do_oop_work(T* p) {
5398     T heap_oop = oopDesc::load_heap_oop(p);
5399     if (!oopDesc::is_null(heap_oop)) {
5400       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
5401       HeapRegion* hr = _g1h->heap_region_containing(obj);
5402       assert(!hr->is_continues_humongous(),
5403              "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
5404              " starting at " HR_FORMAT,
5405              p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
5406 
5407       hr->remove_strong_code_root(_nm);
5408     }
5409   }
5410 
5411 public:
5412   UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
5413     _g1h(g1h), _nm(nm) {}
5414 
5415   void do_oop(oop* p)       { do_oop_work(p); }
5416   void do_oop(narrowOop* p) { do_oop_work(p); }
5417 };
5418 
5419 void G1CollectedHeap::register_nmethod(nmethod* nm) {
5420   CollectedHeap::register_nmethod(nm);
5421 
5422   guarantee(nm != NULL, "sanity");
5423   RegisterNMethodOopClosure reg_cl(this, nm);
5424   nm->oops_do(&reg_cl);
5425 }
5426 
5427 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
5428   CollectedHeap::unregister_nmethod(nm);
5429 
5430   guarantee(nm != NULL, "sanity");
5431   UnregisterNMethodOopClosure reg_cl(this, nm);
5432   nm->oops_do(&reg_cl, true);
5433 }
5434 
5435 void G1CollectedHeap::purge_code_root_memory() {
5436   double purge_start = os::elapsedTime();
5437   G1CodeRootSet::purge();
5438   double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
5439   g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
5440 }
5441 
5442 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
5443   G1CollectedHeap* _g1h;
5444 
5445 public:
5446   RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
5447     _g1h(g1h) {}
5448 
5449   void do_code_blob(CodeBlob* cb) {
5450     nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
5451     if (nm == NULL) {
5452       return;
5453     }
5454 
5455     if (ScavengeRootsInCode) {
5456       _g1h->register_nmethod(nm);
5457     }
5458   }
5459 };
5460 
5461 void G1CollectedHeap::rebuild_strong_code_roots() {
5462   RebuildStrongCodeRootClosure blob_cl(this);
5463   CodeCache::blobs_do(&blob_cl);
5464 }