1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/g1/bufferingOopClosure.hpp"
  32 #include "gc/g1/concurrentG1Refine.hpp"
  33 #include "gc/g1/concurrentG1RefineThread.hpp"
  34 #include "gc/g1/concurrentMarkThread.inline.hpp"
  35 #include "gc/g1/g1Allocator.inline.hpp"
  36 #include "gc/g1/g1CollectedHeap.inline.hpp"
  37 #include "gc/g1/g1CollectionSet.hpp"
  38 #include "gc/g1/g1CollectorPolicy.hpp"
  39 #include "gc/g1/g1CollectorState.hpp"
  40 #include "gc/g1/g1EvacStats.inline.hpp"
  41 #include "gc/g1/g1GCPhaseTimes.hpp"
  42 #include "gc/g1/g1HeapSizingPolicy.hpp"
  43 #include "gc/g1/g1HeapTransition.hpp"
  44 #include "gc/g1/g1HeapVerifier.hpp"
  45 #include "gc/g1/g1HotCardCache.hpp"
  46 #include "gc/g1/g1MarkSweep.hpp"
  47 #include "gc/g1/g1OopClosures.inline.hpp"
  48 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  49 #include "gc/g1/g1Policy.hpp"
  50 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  51 #include "gc/g1/g1RemSet.inline.hpp"
  52 #include "gc/g1/g1RootClosures.hpp"
  53 #include "gc/g1/g1RootProcessor.hpp"
  54 #include "gc/g1/g1StringDedup.hpp"
  55 #include "gc/g1/g1YCTypes.hpp"
  56 #include "gc/g1/heapRegion.inline.hpp"
  57 #include "gc/g1/heapRegionRemSet.hpp"
  58 #include "gc/g1/heapRegionSet.inline.hpp"
  59 #include "gc/g1/suspendibleThreadSet.hpp"
  60 #include "gc/g1/vm_operations_g1.hpp"
  61 #include "gc/shared/gcHeapSummary.hpp"
  62 #include "gc/shared/gcId.hpp"
  63 #include "gc/shared/gcLocker.inline.hpp"
  64 #include "gc/shared/gcTimer.hpp"
  65 #include "gc/shared/gcTrace.hpp"
  66 #include "gc/shared/gcTraceTime.inline.hpp"
  67 #include "gc/shared/generationSpec.hpp"
  68 #include "gc/shared/isGCActiveMark.hpp"
  69 #include "gc/shared/preservedMarks.inline.hpp"
  70 #include "gc/shared/referenceProcessor.inline.hpp"
  71 #include "gc/shared/taskqueue.inline.hpp"
  72 #include "logging/log.hpp"
  73 #include "memory/allocation.hpp"
  74 #include "memory/iterator.hpp"
  75 #include "memory/resourceArea.hpp"
  76 #include "oops/oop.inline.hpp"
  77 #include "prims/resolvedMethodTable.hpp"
  78 #include "runtime/atomic.hpp"
  79 #include "runtime/init.hpp"
  80 #include "runtime/orderAccess.inline.hpp"
  81 #include "runtime/vmThread.hpp"
  82 #include "utilities/globalDefinitions.hpp"
  83 #include "utilities/stack.inline.hpp"
  84 
  85 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  86 
  87 // INVARIANTS/NOTES
  88 //
  89 // All allocation activity covered by the G1CollectedHeap interface is
  90 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  91 // and allocate_new_tlab, which are the "entry" points to the
  92 // allocation code from the rest of the JVM.  (Note that this does not
  93 // apply to TLAB allocation, which is not part of this interface: it
  94 // is done by clients of this interface.)
  95 
  96 // Local to this file.
  97 
  98 class RefineCardTableEntryClosure: public CardTableEntryClosure {
  99   bool _concurrent;
 100 public:
 101   RefineCardTableEntryClosure() : _concurrent(true) { }
 102 
 103   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 104     G1CollectedHeap::heap()->g1_rem_set()->refine_card_concurrently(card_ptr, worker_i);
 105 
 106     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 107       // Caller will actually yield.
 108       return false;
 109     }
 110     // Otherwise, we finished successfully; return true.
 111     return true;
 112   }
 113 
 114   void set_concurrent(bool b) { _concurrent = b; }
 115 };
 116 
 117 
 118 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
 119  private:
 120   size_t _num_dirtied;
 121   G1CollectedHeap* _g1h;
 122   G1SATBCardTableLoggingModRefBS* _g1_bs;
 123 
 124   HeapRegion* region_for_card(jbyte* card_ptr) const {
 125     return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr));
 126   }
 127 
 128   bool will_become_free(HeapRegion* hr) const {
 129     // A region will be freed by free_collection_set if the region is in the
 130     // collection set and has not had an evacuation failure.
 131     return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
 132   }
 133 
 134  public:
 135   RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
 136     _num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { }
 137 
 138   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 139     HeapRegion* hr = region_for_card(card_ptr);
 140 
 141     // Should only dirty cards in regions that won't be freed.
 142     if (!will_become_free(hr)) {
 143       *card_ptr = CardTableModRefBS::dirty_card_val();
 144       _num_dirtied++;
 145     }
 146 
 147     return true;
 148   }
 149 
 150   size_t num_dirtied()   const { return _num_dirtied; }
 151 };
 152 
 153 
 154 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
 155   HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
 156 }
 157 
 158 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 159   // The from card cache is not the memory that is actually committed. So we cannot
 160   // take advantage of the zero_filled parameter.
 161   reset_from_card_cache(start_idx, num_regions);
 162 }
 163 
 164 // Returns true if the reference points to an object that
 165 // can move in an incremental collection.
 166 bool G1CollectedHeap::is_scavengable(const void* p) {
 167   HeapRegion* hr = heap_region_containing(p);
 168   return !hr->is_pinned();
 169 }
 170 
 171 // Private methods.
 172 
 173 HeapRegion*
 174 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 175   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 176   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 177     if (!_secondary_free_list.is_empty()) {
 178       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
 179                                       "secondary_free_list has %u entries",
 180                                       _secondary_free_list.length());
 181       // It looks as if there are free regions available on the
 182       // secondary_free_list. Let's move them to the free_list and try
 183       // again to allocate from it.
 184       append_secondary_free_list();
 185 
 186       assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
 187              "empty we should have moved at least one entry to the free_list");
 188       HeapRegion* res = _hrm.allocate_free_region(is_old);
 189       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
 190                                       "allocated " HR_FORMAT " from secondary_free_list",
 191                                       HR_FORMAT_PARAMS(res));
 192       return res;
 193     }
 194 
 195     // Wait here until we get notified either when (a) there are no
 196     // more free regions coming or (b) some regions have been moved on
 197     // the secondary_free_list.
 198     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 199   }
 200 
 201   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
 202                                   "could not allocate from secondary_free_list");
 203   return NULL;
 204 }
 205 
 206 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
 207   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
 208          "the only time we use this to allocate a humongous region is "
 209          "when we are allocating a single humongous region");
 210 
 211   HeapRegion* res;
 212   if (G1StressConcRegionFreeing) {
 213     if (!_secondary_free_list.is_empty()) {
 214       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
 215                                       "forced to look at the secondary_free_list");
 216       res = new_region_try_secondary_free_list(is_old);
 217       if (res != NULL) {
 218         return res;
 219       }
 220     }
 221   }
 222 
 223   res = _hrm.allocate_free_region(is_old);
 224 
 225   if (res == NULL) {
 226     log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
 227                                     "res == NULL, trying the secondary_free_list");
 228     res = new_region_try_secondary_free_list(is_old);
 229   }
 230   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 231     // Currently, only attempts to allocate GC alloc regions set
 232     // do_expand to true. So, we should only reach here during a
 233     // safepoint. If this assumption changes we might have to
 234     // reconsider the use of _expand_heap_after_alloc_failure.
 235     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 236 
 237     log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
 238                               word_size * HeapWordSize);
 239 
 240     if (expand(word_size * HeapWordSize)) {
 241       // Given that expand() succeeded in expanding the heap, and we
 242       // always expand the heap by an amount aligned to the heap
 243       // region size, the free list should in theory not be empty.
 244       // In either case allocate_free_region() will check for NULL.
 245       res = _hrm.allocate_free_region(is_old);
 246     } else {
 247       _expand_heap_after_alloc_failure = false;
 248     }
 249   }
 250   return res;
 251 }
 252 
 253 HeapWord*
 254 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 255                                                            uint num_regions,
 256                                                            size_t word_size,
 257                                                            AllocationContext_t context) {
 258   assert(first != G1_NO_HRM_INDEX, "pre-condition");
 259   assert(is_humongous(word_size), "word_size should be humongous");
 260   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 261 
 262   // Index of last region in the series.
 263   uint last = first + num_regions - 1;
 264 
 265   // We need to initialize the region(s) we just discovered. This is
 266   // a bit tricky given that it can happen concurrently with
 267   // refinement threads refining cards on these regions and
 268   // potentially wanting to refine the BOT as they are scanning
 269   // those cards (this can happen shortly after a cleanup; see CR
 270   // 6991377). So we have to set up the region(s) carefully and in
 271   // a specific order.
 272 
 273   // The word size sum of all the regions we will allocate.
 274   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
 275   assert(word_size <= word_size_sum, "sanity");
 276 
 277   // This will be the "starts humongous" region.
 278   HeapRegion* first_hr = region_at(first);
 279   // The header of the new object will be placed at the bottom of
 280   // the first region.
 281   HeapWord* new_obj = first_hr->bottom();
 282   // This will be the new top of the new object.
 283   HeapWord* obj_top = new_obj + word_size;
 284 
 285   // First, we need to zero the header of the space that we will be
 286   // allocating. When we update top further down, some refinement
 287   // threads might try to scan the region. By zeroing the header we
 288   // ensure that any thread that will try to scan the region will
 289   // come across the zero klass word and bail out.
 290   //
 291   // NOTE: It would not have been correct to have used
 292   // CollectedHeap::fill_with_object() and make the space look like
 293   // an int array. The thread that is doing the allocation will
 294   // later update the object header to a potentially different array
 295   // type and, for a very short period of time, the klass and length
 296   // fields will be inconsistent. This could cause a refinement
 297   // thread to calculate the object size incorrectly.
 298   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
 299 
 300   // Next, pad out the unused tail of the last region with filler
 301   // objects, for improved usage accounting.
 302   // How many words we use for filler objects.
 303   size_t word_fill_size = word_size_sum - word_size;
 304 
 305   // How many words memory we "waste" which cannot hold a filler object.
 306   size_t words_not_fillable = 0;
 307 
 308   if (word_fill_size >= min_fill_size()) {
 309     fill_with_objects(obj_top, word_fill_size);
 310   } else if (word_fill_size > 0) {
 311     // We have space to fill, but we cannot fit an object there.
 312     words_not_fillable = word_fill_size;
 313     word_fill_size = 0;
 314   }
 315 
 316   // We will set up the first region as "starts humongous". This
 317   // will also update the BOT covering all the regions to reflect
 318   // that there is a single object that starts at the bottom of the
 319   // first region.
 320   first_hr->set_starts_humongous(obj_top, word_fill_size);
 321   first_hr->set_allocation_context(context);
 322   // Then, if there are any, we will set up the "continues
 323   // humongous" regions.
 324   HeapRegion* hr = NULL;
 325   for (uint i = first + 1; i <= last; ++i) {
 326     hr = region_at(i);
 327     hr->set_continues_humongous(first_hr);
 328     hr->set_allocation_context(context);
 329   }
 330 
 331   // Up to this point no concurrent thread would have been able to
 332   // do any scanning on any region in this series. All the top
 333   // fields still point to bottom, so the intersection between
 334   // [bottom,top] and [card_start,card_end] will be empty. Before we
 335   // update the top fields, we'll do a storestore to make sure that
 336   // no thread sees the update to top before the zeroing of the
 337   // object header and the BOT initialization.
 338   OrderAccess::storestore();
 339 
 340   // Now, we will update the top fields of the "continues humongous"
 341   // regions except the last one.
 342   for (uint i = first; i < last; ++i) {
 343     hr = region_at(i);
 344     hr->set_top(hr->end());
 345   }
 346 
 347   hr = region_at(last);
 348   // If we cannot fit a filler object, we must set top to the end
 349   // of the humongous object, otherwise we cannot iterate the heap
 350   // and the BOT will not be complete.
 351   hr->set_top(hr->end() - words_not_fillable);
 352 
 353   assert(hr->bottom() < obj_top && obj_top <= hr->end(),
 354          "obj_top should be in last region");
 355 
 356   _verifier->check_bitmaps("Humongous Region Allocation", first_hr);
 357 
 358   assert(words_not_fillable == 0 ||
 359          first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
 360          "Miscalculation in humongous allocation");
 361 
 362   increase_used((word_size_sum - words_not_fillable) * HeapWordSize);
 363 
 364   for (uint i = first; i <= last; ++i) {
 365     hr = region_at(i);
 366     _humongous_set.add(hr);
 367     _hr_printer.alloc(hr);
 368   }
 369 
 370   return new_obj;
 371 }
 372 
 373 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
 374   assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
 375   return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
 376 }
 377 
 378 // If could fit into free regions w/o expansion, try.
 379 // Otherwise, if can expand, do so.
 380 // Otherwise, if using ex regions might help, try with ex given back.
 381 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
 382   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 383 
 384   _verifier->verify_region_sets_optional();
 385 
 386   uint first = G1_NO_HRM_INDEX;
 387   uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
 388 
 389   if (obj_regions == 1) {
 390     // Only one region to allocate, try to use a fast path by directly allocating
 391     // from the free lists. Do not try to expand here, we will potentially do that
 392     // later.
 393     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
 394     if (hr != NULL) {
 395       first = hr->hrm_index();
 396     }
 397   } else {
 398     // We can't allocate humongous regions spanning more than one region while
 399     // cleanupComplete() is running, since some of the regions we find to be
 400     // empty might not yet be added to the free list. It is not straightforward
 401     // to know in which list they are on so that we can remove them. We only
 402     // need to do this if we need to allocate more than one region to satisfy the
 403     // current humongous allocation request. If we are only allocating one region
 404     // we use the one-region region allocation code (see above), that already
 405     // potentially waits for regions from the secondary free list.
 406     wait_while_free_regions_coming();
 407     append_secondary_free_list_if_not_empty_with_lock();
 408 
 409     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 410     // are lucky enough to find some.
 411     first = _hrm.find_contiguous_only_empty(obj_regions);
 412     if (first != G1_NO_HRM_INDEX) {
 413       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 414     }
 415   }
 416 
 417   if (first == G1_NO_HRM_INDEX) {
 418     // Policy: We could not find enough regions for the humongous object in the
 419     // free list. Look through the heap to find a mix of free and uncommitted regions.
 420     // If so, try expansion.
 421     first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
 422     if (first != G1_NO_HRM_INDEX) {
 423       // We found something. Make sure these regions are committed, i.e. expand
 424       // the heap. Alternatively we could do a defragmentation GC.
 425       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
 426                                     word_size * HeapWordSize);
 427 
 428       _hrm.expand_at(first, obj_regions, workers());
 429       g1_policy()->record_new_heap_size(num_regions());
 430 
 431 #ifdef ASSERT
 432       for (uint i = first; i < first + obj_regions; ++i) {
 433         HeapRegion* hr = region_at(i);
 434         assert(hr->is_free(), "sanity");
 435         assert(hr->is_empty(), "sanity");
 436         assert(is_on_master_free_list(hr), "sanity");
 437       }
 438 #endif
 439       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 440     } else {
 441       // Policy: Potentially trigger a defragmentation GC.
 442     }
 443   }
 444 
 445   HeapWord* result = NULL;
 446   if (first != G1_NO_HRM_INDEX) {
 447     result = humongous_obj_allocate_initialize_regions(first, obj_regions,
 448                                                        word_size, context);
 449     assert(result != NULL, "it should always return a valid result");
 450 
 451     // A successful humongous object allocation changes the used space
 452     // information of the old generation so we need to recalculate the
 453     // sizes and update the jstat counters here.
 454     g1mm()->update_sizes();
 455   }
 456 
 457   _verifier->verify_region_sets_optional();
 458 
 459   return result;
 460 }
 461 
 462 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
 463   assert_heap_not_locked_and_not_at_safepoint();
 464   assert(!is_humongous(word_size), "we do not allow humongous TLABs");
 465 
 466   uint dummy_gc_count_before;
 467   uint dummy_gclocker_retry_count = 0;
 468   return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
 469 }
 470 
 471 HeapWord*
 472 G1CollectedHeap::mem_allocate(size_t word_size,
 473                               bool*  gc_overhead_limit_was_exceeded) {
 474   assert_heap_not_locked_and_not_at_safepoint();
 475 
 476   // Loop until the allocation is satisfied, or unsatisfied after GC.
 477   for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
 478     uint gc_count_before;
 479 
 480     HeapWord* result = NULL;
 481     if (!is_humongous(word_size)) {
 482       result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
 483     } else {
 484       result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
 485     }
 486     if (result != NULL) {
 487       return result;
 488     }
 489 
 490     // Create the garbage collection operation...
 491     VM_G1CollectForAllocation op(gc_count_before, word_size);
 492     op.set_allocation_context(AllocationContext::current());
 493 
 494     // ...and get the VM thread to execute it.
 495     VMThread::execute(&op);
 496 
 497     if (op.prologue_succeeded() && op.pause_succeeded()) {
 498       // If the operation was successful we'll return the result even
 499       // if it is NULL. If the allocation attempt failed immediately
 500       // after a Full GC, it's unlikely we'll be able to allocate now.
 501       HeapWord* result = op.result();
 502       if (result != NULL && !is_humongous(word_size)) {
 503         // Allocations that take place on VM operations do not do any
 504         // card dirtying and we have to do it here. We only have to do
 505         // this for non-humongous allocations, though.
 506         dirty_young_block(result, word_size);
 507       }
 508       return result;
 509     } else {
 510       if (gclocker_retry_count > GCLockerRetryAllocationCount) {
 511         return NULL;
 512       }
 513       assert(op.result() == NULL,
 514              "the result should be NULL if the VM op did not succeed");
 515     }
 516 
 517     // Give a warning if we seem to be looping forever.
 518     if ((QueuedAllocationWarningCount > 0) &&
 519         (try_count % QueuedAllocationWarningCount == 0)) {
 520       log_warning(gc)("G1CollectedHeap::mem_allocate retries %d times", try_count);
 521     }
 522   }
 523 
 524   ShouldNotReachHere();
 525   return NULL;
 526 }
 527 
 528 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
 529                                                    AllocationContext_t context,
 530                                                    uint* gc_count_before_ret,
 531                                                    uint* gclocker_retry_count_ret) {
 532   // Make sure you read the note in attempt_allocation_humongous().
 533 
 534   assert_heap_not_locked_and_not_at_safepoint();
 535   assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
 536          "be called for humongous allocation requests");
 537 
 538   // We should only get here after the first-level allocation attempt
 539   // (attempt_allocation()) failed to allocate.
 540 
 541   // We will loop until a) we manage to successfully perform the
 542   // allocation or b) we successfully schedule a collection which
 543   // fails to perform the allocation. b) is the only case when we'll
 544   // return NULL.
 545   HeapWord* result = NULL;
 546   for (int try_count = 1; /* we'll return */; try_count += 1) {
 547     bool should_try_gc;
 548     uint gc_count_before;
 549 
 550     {
 551       MutexLockerEx x(Heap_lock);
 552       result = _allocator->attempt_allocation_locked(word_size, context);
 553       if (result != NULL) {
 554         return result;
 555       }
 556 
 557       if (GCLocker::is_active_and_needs_gc()) {
 558         if (g1_policy()->can_expand_young_list()) {
 559           // No need for an ergo verbose message here,
 560           // can_expand_young_list() does this when it returns true.
 561           result = _allocator->attempt_allocation_force(word_size, context);
 562           if (result != NULL) {
 563             return result;
 564           }
 565         }
 566         should_try_gc = false;
 567       } else {
 568         // The GCLocker may not be active but the GCLocker initiated
 569         // GC may not yet have been performed (GCLocker::needs_gc()
 570         // returns true). In this case we do not try this GC and
 571         // wait until the GCLocker initiated GC is performed, and
 572         // then retry the allocation.
 573         if (GCLocker::needs_gc()) {
 574           should_try_gc = false;
 575         } else {
 576           // Read the GC count while still holding the Heap_lock.
 577           gc_count_before = total_collections();
 578           should_try_gc = true;
 579         }
 580       }
 581     }
 582 
 583     if (should_try_gc) {
 584       bool succeeded;
 585       result = do_collection_pause(word_size, gc_count_before, &succeeded,
 586                                    GCCause::_g1_inc_collection_pause);
 587       if (result != NULL) {
 588         assert(succeeded, "only way to get back a non-NULL result");
 589         return result;
 590       }
 591 
 592       if (succeeded) {
 593         // If we get here we successfully scheduled a collection which
 594         // failed to allocate. No point in trying to allocate
 595         // further. We'll just return NULL.
 596         MutexLockerEx x(Heap_lock);
 597         *gc_count_before_ret = total_collections();
 598         return NULL;
 599       }
 600     } else {
 601       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
 602         MutexLockerEx x(Heap_lock);
 603         *gc_count_before_ret = total_collections();
 604         return NULL;
 605       }
 606       // The GCLocker is either active or the GCLocker initiated
 607       // GC has not yet been performed. Stall until it is and
 608       // then retry the allocation.
 609       GCLocker::stall_until_clear();
 610       (*gclocker_retry_count_ret) += 1;
 611     }
 612 
 613     // We can reach here if we were unsuccessful in scheduling a
 614     // collection (because another thread beat us to it) or if we were
 615     // stalled due to the GC locker. In either can we should retry the
 616     // allocation attempt in case another thread successfully
 617     // performed a collection and reclaimed enough space. We do the
 618     // first attempt (without holding the Heap_lock) here and the
 619     // follow-on attempt will be at the start of the next loop
 620     // iteration (after taking the Heap_lock).
 621     result = _allocator->attempt_allocation(word_size, context);
 622     if (result != NULL) {
 623       return result;
 624     }
 625 
 626     // Give a warning if we seem to be looping forever.
 627     if ((QueuedAllocationWarningCount > 0) &&
 628         (try_count % QueuedAllocationWarningCount == 0)) {
 629       log_warning(gc)("G1CollectedHeap::attempt_allocation_slow() "
 630                       "retries %d times", try_count);
 631     }
 632   }
 633 
 634   ShouldNotReachHere();
 635   return NULL;
 636 }
 637 
 638 void G1CollectedHeap::begin_archive_alloc_range() {
 639   assert_at_safepoint(true /* should_be_vm_thread */);
 640   if (_archive_allocator == NULL) {
 641     _archive_allocator = G1ArchiveAllocator::create_allocator(this);
 642   }
 643 }
 644 
 645 bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
 646   // Allocations in archive regions cannot be of a size that would be considered
 647   // humongous even for a minimum-sized region, because G1 region sizes/boundaries
 648   // may be different at archive-restore time.
 649   return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
 650 }
 651 
 652 HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
 653   assert_at_safepoint(true /* should_be_vm_thread */);
 654   assert(_archive_allocator != NULL, "_archive_allocator not initialized");
 655   if (is_archive_alloc_too_large(word_size)) {
 656     return NULL;
 657   }
 658   return _archive_allocator->archive_mem_allocate(word_size);
 659 }
 660 
 661 void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 662                                               size_t end_alignment_in_bytes) {
 663   assert_at_safepoint(true /* should_be_vm_thread */);
 664   assert(_archive_allocator != NULL, "_archive_allocator not initialized");
 665 
 666   // Call complete_archive to do the real work, filling in the MemRegion
 667   // array with the archive regions.
 668   _archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
 669   delete _archive_allocator;
 670   _archive_allocator = NULL;
 671 }
 672 
 673 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
 674   assert(ranges != NULL, "MemRegion array NULL");
 675   assert(count != 0, "No MemRegions provided");
 676   MemRegion reserved = _hrm.reserved();
 677   for (size_t i = 0; i < count; i++) {
 678     if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
 679       return false;
 680     }
 681   }
 682   return true;
 683 }
 684 
 685 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
 686   assert(!is_init_completed(), "Expect to be called at JVM init time");
 687   assert(ranges != NULL, "MemRegion array NULL");
 688   assert(count != 0, "No MemRegions provided");
 689   MutexLockerEx x(Heap_lock);
 690 
 691   MemRegion reserved = _hrm.reserved();
 692   HeapWord* prev_last_addr = NULL;
 693   HeapRegion* prev_last_region = NULL;
 694 
 695   // Temporarily disable pretouching of heap pages. This interface is used
 696   // when mmap'ing archived heap data in, so pre-touching is wasted.
 697   FlagSetting fs(AlwaysPreTouch, false);
 698 
 699   // Enable archive object checking used by G1MarkSweep. We have to let it know
 700   // about each archive range, so that objects in those ranges aren't marked.
 701   G1ArchiveAllocator::enable_archive_object_check();
 702 
 703   // For each specified MemRegion range, allocate the corresponding G1
 704   // regions and mark them as archive regions. We expect the ranges in
 705   // ascending starting address order, without overlap.
 706   for (size_t i = 0; i < count; i++) {
 707     MemRegion curr_range = ranges[i];
 708     HeapWord* start_address = curr_range.start();
 709     size_t word_size = curr_range.word_size();
 710     HeapWord* last_address = curr_range.last();
 711     size_t commits = 0;
 712 
 713     guarantee(reserved.contains(start_address) && reserved.contains(last_address),
 714               "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
 715               p2i(start_address), p2i(last_address));
 716     guarantee(start_address > prev_last_addr,
 717               "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
 718               p2i(start_address), p2i(prev_last_addr));
 719     prev_last_addr = last_address;
 720 
 721     // Check for ranges that start in the same G1 region in which the previous
 722     // range ended, and adjust the start address so we don't try to allocate
 723     // the same region again. If the current range is entirely within that
 724     // region, skip it, just adjusting the recorded top.
 725     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 726     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
 727       start_address = start_region->end();
 728       if (start_address > last_address) {
 729         increase_used(word_size * HeapWordSize);
 730         start_region->set_top(last_address + 1);
 731         continue;
 732       }
 733       start_region->set_top(start_address);
 734       curr_range = MemRegion(start_address, last_address + 1);
 735       start_region = _hrm.addr_to_region(start_address);
 736     }
 737 
 738     // Perform the actual region allocation, exiting if it fails.
 739     // Then note how much new space we have allocated.
 740     if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
 741       return false;
 742     }
 743     increase_used(word_size * HeapWordSize);
 744     if (commits != 0) {
 745       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
 746                                 HeapRegion::GrainWords * HeapWordSize * commits);
 747 
 748     }
 749 
 750     // Mark each G1 region touched by the range as archive, add it to the old set,
 751     // and set the allocation context and top.
 752     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 753     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 754     prev_last_region = last_region;
 755 
 756     while (curr_region != NULL) {
 757       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 758              "Region already in use (index %u)", curr_region->hrm_index());
 759       curr_region->set_allocation_context(AllocationContext::system());
 760       curr_region->set_archive();
 761       _hr_printer.alloc(curr_region);
 762       _old_set.add(curr_region);
 763       if (curr_region != last_region) {
 764         curr_region->set_top(curr_region->end());
 765         curr_region = _hrm.next_region_in_heap(curr_region);
 766       } else {
 767         curr_region->set_top(last_address + 1);
 768         curr_region = NULL;
 769       }
 770     }
 771 
 772     // Notify mark-sweep of the archive range.
 773     G1ArchiveAllocator::set_range_archive(curr_range, true);
 774   }
 775   return true;
 776 }
 777 
 778 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
 779   assert(!is_init_completed(), "Expect to be called at JVM init time");
 780   assert(ranges != NULL, "MemRegion array NULL");
 781   assert(count != 0, "No MemRegions provided");
 782   MemRegion reserved = _hrm.reserved();
 783   HeapWord *prev_last_addr = NULL;
 784   HeapRegion* prev_last_region = NULL;
 785 
 786   // For each MemRegion, create filler objects, if needed, in the G1 regions
 787   // that contain the address range. The address range actually within the
 788   // MemRegion will not be modified. That is assumed to have been initialized
 789   // elsewhere, probably via an mmap of archived heap data.
 790   MutexLockerEx x(Heap_lock);
 791   for (size_t i = 0; i < count; i++) {
 792     HeapWord* start_address = ranges[i].start();
 793     HeapWord* last_address = ranges[i].last();
 794 
 795     assert(reserved.contains(start_address) && reserved.contains(last_address),
 796            "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
 797            p2i(start_address), p2i(last_address));
 798     assert(start_address > prev_last_addr,
 799            "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
 800            p2i(start_address), p2i(prev_last_addr));
 801 
 802     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 803     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 804     HeapWord* bottom_address = start_region->bottom();
 805 
 806     // Check for a range beginning in the same region in which the
 807     // previous one ended.
 808     if (start_region == prev_last_region) {
 809       bottom_address = prev_last_addr + 1;
 810     }
 811 
 812     // Verify that the regions were all marked as archive regions by
 813     // alloc_archive_regions.
 814     HeapRegion* curr_region = start_region;
 815     while (curr_region != NULL) {
 816       guarantee(curr_region->is_archive(),
 817                 "Expected archive region at index %u", curr_region->hrm_index());
 818       if (curr_region != last_region) {
 819         curr_region = _hrm.next_region_in_heap(curr_region);
 820       } else {
 821         curr_region = NULL;
 822       }
 823     }
 824 
 825     prev_last_addr = last_address;
 826     prev_last_region = last_region;
 827 
 828     // Fill the memory below the allocated range with dummy object(s),
 829     // if the region bottom does not match the range start, or if the previous
 830     // range ended within the same G1 region, and there is a gap.
 831     if (start_address != bottom_address) {
 832       size_t fill_size = pointer_delta(start_address, bottom_address);
 833       G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
 834       increase_used(fill_size * HeapWordSize);
 835     }
 836   }
 837 }
 838 
 839 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
 840                                                      uint* gc_count_before_ret,
 841                                                      uint* gclocker_retry_count_ret) {
 842   assert_heap_not_locked_and_not_at_safepoint();
 843   assert(!is_humongous(word_size), "attempt_allocation() should not "
 844          "be called for humongous allocation requests");
 845 
 846   AllocationContext_t context = AllocationContext::current();
 847   HeapWord* result = _allocator->attempt_allocation(word_size, context);
 848 
 849   if (result == NULL) {
 850     result = attempt_allocation_slow(word_size,
 851                                      context,
 852                                      gc_count_before_ret,
 853                                      gclocker_retry_count_ret);
 854   }
 855   assert_heap_not_locked();
 856   if (result != NULL) {
 857     dirty_young_block(result, word_size);
 858   }
 859   return result;
 860 }
 861 
 862 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
 863   assert(!is_init_completed(), "Expect to be called at JVM init time");
 864   assert(ranges != NULL, "MemRegion array NULL");
 865   assert(count != 0, "No MemRegions provided");
 866   MemRegion reserved = _hrm.reserved();
 867   HeapWord* prev_last_addr = NULL;
 868   HeapRegion* prev_last_region = NULL;
 869   size_t size_used = 0;
 870   size_t uncommitted_regions = 0;
 871 
 872   // For each Memregion, free the G1 regions that constitute it, and
 873   // notify mark-sweep that the range is no longer to be considered 'archive.'
 874   MutexLockerEx x(Heap_lock);
 875   for (size_t i = 0; i < count; i++) {
 876     HeapWord* start_address = ranges[i].start();
 877     HeapWord* last_address = ranges[i].last();
 878 
 879     assert(reserved.contains(start_address) && reserved.contains(last_address),
 880            "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
 881            p2i(start_address), p2i(last_address));
 882     assert(start_address > prev_last_addr,
 883            "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
 884            p2i(start_address), p2i(prev_last_addr));
 885     size_used += ranges[i].byte_size();
 886     prev_last_addr = last_address;
 887 
 888     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 889     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 890 
 891     // Check for ranges that start in the same G1 region in which the previous
 892     // range ended, and adjust the start address so we don't try to free
 893     // the same region again. If the current range is entirely within that
 894     // region, skip it.
 895     if (start_region == prev_last_region) {
 896       start_address = start_region->end();
 897       if (start_address > last_address) {
 898         continue;
 899       }
 900       start_region = _hrm.addr_to_region(start_address);
 901     }
 902     prev_last_region = last_region;
 903 
 904     // After verifying that each region was marked as an archive region by
 905     // alloc_archive_regions, set it free and empty and uncommit it.
 906     HeapRegion* curr_region = start_region;
 907     while (curr_region != NULL) {
 908       guarantee(curr_region->is_archive(),
 909                 "Expected archive region at index %u", curr_region->hrm_index());
 910       uint curr_index = curr_region->hrm_index();
 911       _old_set.remove(curr_region);
 912       curr_region->set_free();
 913       curr_region->set_top(curr_region->bottom());
 914       if (curr_region != last_region) {
 915         curr_region = _hrm.next_region_in_heap(curr_region);
 916       } else {
 917         curr_region = NULL;
 918       }
 919       _hrm.shrink_at(curr_index, 1);
 920       uncommitted_regions++;
 921     }
 922 
 923     // Notify mark-sweep that this is no longer an archive range.
 924     G1ArchiveAllocator::set_range_archive(ranges[i], false);
 925   }
 926 
 927   if (uncommitted_regions != 0) {
 928     log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
 929                               HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
 930   }
 931   decrease_used(size_used);
 932 }
 933 
 934 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
 935                                                         uint* gc_count_before_ret,
 936                                                         uint* gclocker_retry_count_ret) {
 937   // The structure of this method has a lot of similarities to
 938   // attempt_allocation_slow(). The reason these two were not merged
 939   // into a single one is that such a method would require several "if
 940   // allocation is not humongous do this, otherwise do that"
 941   // conditional paths which would obscure its flow. In fact, an early
 942   // version of this code did use a unified method which was harder to
 943   // follow and, as a result, it had subtle bugs that were hard to
 944   // track down. So keeping these two methods separate allows each to
 945   // be more readable. It will be good to keep these two in sync as
 946   // much as possible.
 947 
 948   assert_heap_not_locked_and_not_at_safepoint();
 949   assert(is_humongous(word_size), "attempt_allocation_humongous() "
 950          "should only be called for humongous allocations");
 951 
 952   // Humongous objects can exhaust the heap quickly, so we should check if we
 953   // need to start a marking cycle at each humongous object allocation. We do
 954   // the check before we do the actual allocation. The reason for doing it
 955   // before the allocation is that we avoid having to keep track of the newly
 956   // allocated memory while we do a GC.
 957   if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
 958                                            word_size)) {
 959     collect(GCCause::_g1_humongous_allocation);
 960   }
 961 
 962   // We will loop until a) we manage to successfully perform the
 963   // allocation or b) we successfully schedule a collection which
 964   // fails to perform the allocation. b) is the only case when we'll
 965   // return NULL.
 966   HeapWord* result = NULL;
 967   for (int try_count = 1; /* we'll return */; try_count += 1) {
 968     bool should_try_gc;
 969     uint gc_count_before;
 970 
 971     {
 972       MutexLockerEx x(Heap_lock);
 973 
 974       // Given that humongous objects are not allocated in young
 975       // regions, we'll first try to do the allocation without doing a
 976       // collection hoping that there's enough space in the heap.
 977       result = humongous_obj_allocate(word_size, AllocationContext::current());
 978       if (result != NULL) {
 979         size_t size_in_regions = humongous_obj_size_in_regions(word_size);
 980         g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
 981         return result;
 982       }
 983 
 984       if (GCLocker::is_active_and_needs_gc()) {
 985         should_try_gc = false;
 986       } else {
 987          // The GCLocker may not be active but the GCLocker initiated
 988         // GC may not yet have been performed (GCLocker::needs_gc()
 989         // returns true). In this case we do not try this GC and
 990         // wait until the GCLocker initiated GC is performed, and
 991         // then retry the allocation.
 992         if (GCLocker::needs_gc()) {
 993           should_try_gc = false;
 994         } else {
 995           // Read the GC count while still holding the Heap_lock.
 996           gc_count_before = total_collections();
 997           should_try_gc = true;
 998         }
 999       }
1000     }
1001 
1002     if (should_try_gc) {
1003       // If we failed to allocate the humongous object, we should try to
1004       // do a collection pause (if we're allowed) in case it reclaims
1005       // enough space for the allocation to succeed after the pause.
1006 
1007       bool succeeded;
1008       result = do_collection_pause(word_size, gc_count_before, &succeeded,
1009                                    GCCause::_g1_humongous_allocation);
1010       if (result != NULL) {
1011         assert(succeeded, "only way to get back a non-NULL result");
1012         return result;
1013       }
1014 
1015       if (succeeded) {
1016         // If we get here we successfully scheduled a collection which
1017         // failed to allocate. No point in trying to allocate
1018         // further. We'll just return NULL.
1019         MutexLockerEx x(Heap_lock);
1020         *gc_count_before_ret = total_collections();
1021         return NULL;
1022       }
1023     } else {
1024       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1025         MutexLockerEx x(Heap_lock);
1026         *gc_count_before_ret = total_collections();
1027         return NULL;
1028       }
1029       // The GCLocker is either active or the GCLocker initiated
1030       // GC has not yet been performed. Stall until it is and
1031       // then retry the allocation.
1032       GCLocker::stall_until_clear();
1033       (*gclocker_retry_count_ret) += 1;
1034     }
1035 
1036     // We can reach here if we were unsuccessful in scheduling a
1037     // collection (because another thread beat us to it) or if we were
1038     // stalled due to the GC locker. In either can we should retry the
1039     // allocation attempt in case another thread successfully
1040     // performed a collection and reclaimed enough space.  Give a
1041     // warning if we seem to be looping forever.
1042 
1043     if ((QueuedAllocationWarningCount > 0) &&
1044         (try_count % QueuedAllocationWarningCount == 0)) {
1045       log_warning(gc)("G1CollectedHeap::attempt_allocation_humongous() "
1046                       "retries %d times", try_count);
1047     }
1048   }
1049 
1050   ShouldNotReachHere();
1051   return NULL;
1052 }
1053 
1054 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1055                                                            AllocationContext_t context,
1056                                                            bool expect_null_mutator_alloc_region) {
1057   assert_at_safepoint(true /* should_be_vm_thread */);
1058   assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
1059          "the current alloc region was unexpectedly found to be non-NULL");
1060 
1061   if (!is_humongous(word_size)) {
1062     return _allocator->attempt_allocation_locked(word_size, context);
1063   } else {
1064     HeapWord* result = humongous_obj_allocate(word_size, context);
1065     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1066       collector_state()->set_initiate_conc_mark_if_possible(true);
1067     }
1068     return result;
1069   }
1070 
1071   ShouldNotReachHere();
1072 }
1073 
1074 class PostMCRemSetClearClosure: public HeapRegionClosure {
1075   G1CollectedHeap* _g1h;
1076   ModRefBarrierSet* _mr_bs;
1077 public:
1078   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1079     _g1h(g1h), _mr_bs(mr_bs) {}
1080 
1081   bool doHeapRegion(HeapRegion* r) {
1082     HeapRegionRemSet* hrrs = r->rem_set();
1083 
1084     _g1h->reset_gc_time_stamps(r);
1085 
1086     if (r->is_continues_humongous()) {
1087       // We'll assert that the strong code root list and RSet is empty
1088       assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
1089       assert(hrrs->occupied() == 0, "RSet should be empty");
1090     } else {
1091       hrrs->clear();
1092     }
1093     // You might think here that we could clear just the cards
1094     // corresponding to the used region.  But no: if we leave a dirty card
1095     // in a region we might allocate into, then it would prevent that card
1096     // from being enqueued, and cause it to be missed.
1097     // Re: the performance cost: we shouldn't be doing full GC anyway!
1098     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1099 
1100     return false;
1101   }
1102 };
1103 
1104 void G1CollectedHeap::clear_rsets_post_compaction() {
1105   PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
1106   heap_region_iterate(&rs_clear);
1107 }
1108 
1109 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1110   G1CollectedHeap*   _g1h;
1111   UpdateRSOopClosure _cl;
1112 public:
1113   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, uint worker_i = 0) :
1114     _cl(g1->g1_rem_set(), worker_i),
1115     _g1h(g1)
1116   { }
1117 
1118   bool doHeapRegion(HeapRegion* r) {
1119     if (!r->is_continues_humongous()) {
1120       _cl.set_from(r);
1121       r->oop_iterate(&_cl);
1122     }
1123     return false;
1124   }
1125 };
1126 
1127 class ParRebuildRSTask: public AbstractGangTask {
1128   G1CollectedHeap* _g1;
1129   HeapRegionClaimer _hrclaimer;
1130 
1131 public:
1132   ParRebuildRSTask(G1CollectedHeap* g1) :
1133       AbstractGangTask("ParRebuildRSTask"), _g1(g1), _hrclaimer(g1->workers()->active_workers()) {}
1134 
1135   void work(uint worker_id) {
1136     RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
1137     _g1->heap_region_par_iterate(&rebuild_rs, worker_id, &_hrclaimer);
1138   }
1139 };
1140 
1141 class PostCompactionPrinterClosure: public HeapRegionClosure {
1142 private:
1143   G1HRPrinter* _hr_printer;
1144 public:
1145   bool doHeapRegion(HeapRegion* hr) {
1146     assert(!hr->is_young(), "not expecting to find young regions");
1147     _hr_printer->post_compaction(hr);
1148     return false;
1149   }
1150 
1151   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1152     : _hr_printer(hr_printer) { }
1153 };
1154 
1155 void G1CollectedHeap::print_hrm_post_compaction() {
1156   if (_hr_printer.is_active()) {
1157     PostCompactionPrinterClosure cl(hr_printer());
1158     heap_region_iterate(&cl);
1159   }
1160 
1161 }
1162 
1163 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1164                                          bool clear_all_soft_refs) {
1165   assert_at_safepoint(true /* should_be_vm_thread */);
1166 
1167   if (GCLocker::check_active_before_gc()) {
1168     return false;
1169   }
1170 
1171   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1172   gc_timer->register_gc_start();
1173 
1174   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1175   GCIdMark gc_id_mark;
1176   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1177 
1178   SvcGCMarker sgcm(SvcGCMarker::FULL);
1179   ResourceMark rm;
1180 
1181   print_heap_before_gc();
1182   print_heap_regions();
1183   trace_heap_before_gc(gc_tracer);
1184 
1185   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1186 
1187   _verifier->verify_region_sets_optional();
1188 
1189   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1190                            collector_policy()->should_clear_all_soft_refs();
1191 
1192   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1193 
1194   {
1195     IsGCActiveMark x;
1196 
1197     // Timing
1198     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1199     GCTraceCPUTime tcpu;
1200 
1201     {
1202       GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1203       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1204       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1205 
1206       G1HeapTransition heap_transition(this);
1207       g1_policy()->record_full_collection_start();
1208 
1209       // Note: When we have a more flexible GC logging framework that
1210       // allows us to add optional attributes to a GC log record we
1211       // could consider timing and reporting how long we wait in the
1212       // following two methods.
1213       wait_while_free_regions_coming();
1214       // If we start the compaction before the CM threads finish
1215       // scanning the root regions we might trip them over as we'll
1216       // be moving objects / updating references. So let's wait until
1217       // they are done. By telling them to abort, they should complete
1218       // early.
1219       _cm->root_regions()->abort();
1220       _cm->root_regions()->wait_until_scan_finished();
1221       append_secondary_free_list_if_not_empty_with_lock();
1222 
1223       gc_prologue(true);
1224       increment_total_collections(true /* full gc */);
1225       increment_old_marking_cycles_started();
1226 
1227       assert(used() == recalculate_used(), "Should be equal");
1228 
1229       _verifier->verify_before_gc();
1230 
1231       _verifier->check_bitmaps("Full GC Start");
1232       pre_full_gc_dump(gc_timer);
1233 
1234 #if defined(COMPILER2) || INCLUDE_JVMCI
1235       DerivedPointerTable::clear();
1236 #endif
1237 
1238       // Disable discovery and empty the discovered lists
1239       // for the CM ref processor.
1240       ref_processor_cm()->disable_discovery();
1241       ref_processor_cm()->abandon_partial_discovery();
1242       ref_processor_cm()->verify_no_references_recorded();
1243 
1244       // Abandon current iterations of concurrent marking and concurrent
1245       // refinement, if any are in progress.
1246       concurrent_mark()->abort();
1247 
1248       // Make sure we'll choose a new allocation region afterwards.
1249       _allocator->release_mutator_alloc_region();
1250       _allocator->abandon_gc_alloc_regions();
1251       g1_rem_set()->cleanupHRRS();
1252 
1253       // We may have added regions to the current incremental collection
1254       // set between the last GC or pause and now. We need to clear the
1255       // incremental collection set and then start rebuilding it afresh
1256       // after this full GC.
1257       abandon_collection_set(collection_set());
1258 
1259       tear_down_region_sets(false /* free_list_only */);
1260       collector_state()->set_gcs_are_young(true);
1261 
1262       // See the comments in g1CollectedHeap.hpp and
1263       // G1CollectedHeap::ref_processing_init() about
1264       // how reference processing currently works in G1.
1265 
1266       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1267       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1268 
1269       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1270       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1271 
1272       ref_processor_stw()->enable_discovery();
1273       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1274 
1275       // Do collection work
1276       {
1277         HandleMark hm;  // Discard invalid handles created during gc
1278         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1279       }
1280 
1281       assert(num_free_regions() == 0, "we should not have added any free regions");
1282       rebuild_region_sets(false /* free_list_only */);
1283 
1284       // Enqueue any discovered reference objects that have
1285       // not been removed from the discovered lists.
1286       ref_processor_stw()->enqueue_discovered_references();
1287 
1288 #if defined(COMPILER2) || INCLUDE_JVMCI
1289       DerivedPointerTable::update_pointers();
1290 #endif
1291 
1292       MemoryService::track_memory_usage();
1293 
1294       assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1295       ref_processor_stw()->verify_no_references_recorded();
1296 
1297       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1298       ClassLoaderDataGraph::purge();
1299       MetaspaceAux::verify_metrics();
1300 
1301       // Note: since we've just done a full GC, concurrent
1302       // marking is no longer active. Therefore we need not
1303       // re-enable reference discovery for the CM ref processor.
1304       // That will be done at the start of the next marking cycle.
1305       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1306       ref_processor_cm()->verify_no_references_recorded();
1307 
1308       reset_gc_time_stamp();
1309       // Since everything potentially moved, we will clear all remembered
1310       // sets, and clear all cards.  Later we will rebuild remembered
1311       // sets. We will also reset the GC time stamps of the regions.
1312       clear_rsets_post_compaction();
1313       check_gc_time_stamps();
1314 
1315       resize_if_necessary_after_full_collection();
1316 
1317       // We should do this after we potentially resize the heap so
1318       // that all the COMMIT / UNCOMMIT events are generated before
1319       // the compaction events.
1320       print_hrm_post_compaction();
1321 
1322       if (_hot_card_cache->use_cache()) {
1323         _hot_card_cache->reset_card_counts();
1324         _hot_card_cache->reset_hot_cache();
1325       }
1326 
1327       // Rebuild remembered sets of all regions.
1328       uint n_workers =
1329         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1330                                                 workers()->active_workers(),
1331                                                 Threads::number_of_non_daemon_threads());
1332       workers()->update_active_workers(n_workers);
1333       log_info(gc,task)("Using %u workers of %u to rebuild remembered set", n_workers, workers()->total_workers());
1334 
1335       ParRebuildRSTask rebuild_rs_task(this);
1336       workers()->run_task(&rebuild_rs_task);
1337 
1338       // Rebuild the strong code root lists for each region
1339       rebuild_strong_code_roots();
1340 
1341       if (true) { // FIXME
1342         MetaspaceGC::compute_new_size();
1343       }
1344 
1345 #ifdef TRACESPINNING
1346       ParallelTaskTerminator::print_termination_counts();
1347 #endif
1348 
1349       // Discard all rset updates
1350       JavaThread::dirty_card_queue_set().abandon_logs();
1351       assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1352 
1353       // At this point there should be no regions in the
1354       // entire heap tagged as young.
1355       assert(check_young_list_empty(), "young list should be empty at this point");
1356 
1357       // Update the number of full collections that have been completed.
1358       increment_old_marking_cycles_completed(false /* concurrent */);
1359 
1360       _hrm.verify_optional();
1361       _verifier->verify_region_sets_optional();
1362 
1363       _verifier->verify_after_gc();
1364 
1365       // Clear the previous marking bitmap, if needed for bitmap verification.
1366       // Note we cannot do this when we clear the next marking bitmap in
1367       // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1368       // objects marked during a full GC against the previous bitmap.
1369       // But we need to clear it before calling check_bitmaps below since
1370       // the full GC has compacted objects and updated TAMS but not updated
1371       // the prev bitmap.
1372       if (G1VerifyBitmaps) {
1373         GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
1374         _cm->clear_prev_bitmap(workers());
1375       }
1376       _verifier->check_bitmaps("Full GC End");
1377 
1378       start_new_collection_set();
1379 
1380       _allocator->init_mutator_alloc_region();
1381 
1382       g1_policy()->record_full_collection_end();
1383 
1384       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1385       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1386       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1387       // before any GC notifications are raised.
1388       g1mm()->update_sizes();
1389 
1390       gc_epilogue(true);
1391 
1392       heap_transition.print();
1393 
1394       print_heap_after_gc();
1395       print_heap_regions();
1396       trace_heap_after_gc(gc_tracer);
1397 
1398       post_full_gc_dump(gc_timer);
1399     }
1400 
1401     gc_timer->register_gc_end();
1402     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1403   }
1404 
1405   return true;
1406 }
1407 
1408 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1409   // Currently, there is no facility in the do_full_collection(bool) API to notify
1410   // the caller that the collection did not succeed (e.g., because it was locked
1411   // out by the GC locker). So, right now, we'll ignore the return value.
1412   bool dummy = do_full_collection(true,                /* explicit_gc */
1413                                   clear_all_soft_refs);
1414 }
1415 
1416 void G1CollectedHeap::resize_if_necessary_after_full_collection() {
1417   // Include bytes that will be pre-allocated to support collections, as "used".
1418   const size_t used_after_gc = used();
1419   const size_t capacity_after_gc = capacity();
1420   const size_t free_after_gc = capacity_after_gc - used_after_gc;
1421 
1422   // This is enforced in arguments.cpp.
1423   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1424          "otherwise the code below doesn't make sense");
1425 
1426   // We don't have floating point command-line arguments
1427   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1428   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1429   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1430   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1431 
1432   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1433   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1434 
1435   // We have to be careful here as these two calculations can overflow
1436   // 32-bit size_t's.
1437   double used_after_gc_d = (double) used_after_gc;
1438   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1439   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1440 
1441   // Let's make sure that they are both under the max heap size, which
1442   // by default will make them fit into a size_t.
1443   double desired_capacity_upper_bound = (double) max_heap_size;
1444   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1445                                     desired_capacity_upper_bound);
1446   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1447                                     desired_capacity_upper_bound);
1448 
1449   // We can now safely turn them into size_t's.
1450   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1451   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1452 
1453   // This assert only makes sense here, before we adjust them
1454   // with respect to the min and max heap size.
1455   assert(minimum_desired_capacity <= maximum_desired_capacity,
1456          "minimum_desired_capacity = " SIZE_FORMAT ", "
1457          "maximum_desired_capacity = " SIZE_FORMAT,
1458          minimum_desired_capacity, maximum_desired_capacity);
1459 
1460   // Should not be greater than the heap max size. No need to adjust
1461   // it with respect to the heap min size as it's a lower bound (i.e.,
1462   // we'll try to make the capacity larger than it, not smaller).
1463   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1464   // Should not be less than the heap min size. No need to adjust it
1465   // with respect to the heap max size as it's an upper bound (i.e.,
1466   // we'll try to make the capacity smaller than it, not greater).
1467   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1468 
1469   if (capacity_after_gc < minimum_desired_capacity) {
1470     // Don't expand unless it's significant
1471     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1472 
1473     log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
1474                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1475                               capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1476 
1477     expand(expand_bytes, _workers);
1478 
1479     // No expansion, now see if we want to shrink
1480   } else if (capacity_after_gc > maximum_desired_capacity) {
1481     // Capacity too large, compute shrinking size
1482     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1483 
1484     log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
1485                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1486                               capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1487 
1488     shrink(shrink_bytes);
1489   }
1490 }
1491 
1492 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1493                                                             AllocationContext_t context,
1494                                                             bool do_gc,
1495                                                             bool clear_all_soft_refs,
1496                                                             bool expect_null_mutator_alloc_region,
1497                                                             bool* gc_succeeded) {
1498   *gc_succeeded = true;
1499   // Let's attempt the allocation first.
1500   HeapWord* result =
1501     attempt_allocation_at_safepoint(word_size,
1502                                     context,
1503                                     expect_null_mutator_alloc_region);
1504   if (result != NULL) {
1505     assert(*gc_succeeded, "sanity");
1506     return result;
1507   }
1508 
1509   // In a G1 heap, we're supposed to keep allocation from failing by
1510   // incremental pauses.  Therefore, at least for now, we'll favor
1511   // expansion over collection.  (This might change in the future if we can
1512   // do something smarter than full collection to satisfy a failed alloc.)
1513   result = expand_and_allocate(word_size, context);
1514   if (result != NULL) {
1515     assert(*gc_succeeded, "sanity");
1516     return result;
1517   }
1518 
1519   if (do_gc) {
1520     // Expansion didn't work, we'll try to do a Full GC.
1521     *gc_succeeded = do_full_collection(false, /* explicit_gc */
1522                                        clear_all_soft_refs);
1523   }
1524 
1525   return NULL;
1526 }
1527 
1528 HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1529                                                      AllocationContext_t context,
1530                                                      bool* succeeded) {
1531   assert_at_safepoint(true /* should_be_vm_thread */);
1532 
1533   // Attempts to allocate followed by Full GC.
1534   HeapWord* result =
1535     satisfy_failed_allocation_helper(word_size,
1536                                      context,
1537                                      true,  /* do_gc */
1538                                      false, /* clear_all_soft_refs */
1539                                      false, /* expect_null_mutator_alloc_region */
1540                                      succeeded);
1541 
1542   if (result != NULL || !*succeeded) {
1543     return result;
1544   }
1545 
1546   // Attempts to allocate followed by Full GC that will collect all soft references.
1547   result = satisfy_failed_allocation_helper(word_size,
1548                                             context,
1549                                             true, /* do_gc */
1550                                             true, /* clear_all_soft_refs */
1551                                             true, /* expect_null_mutator_alloc_region */
1552                                             succeeded);
1553 
1554   if (result != NULL || !*succeeded) {
1555     return result;
1556   }
1557 
1558   // Attempts to allocate, no GC
1559   result = satisfy_failed_allocation_helper(word_size,
1560                                             context,
1561                                             false, /* do_gc */
1562                                             false, /* clear_all_soft_refs */
1563                                             true,  /* expect_null_mutator_alloc_region */
1564                                             succeeded);
1565 
1566   if (result != NULL) {
1567     assert(*succeeded, "sanity");
1568     return result;
1569   }
1570 
1571   assert(!collector_policy()->should_clear_all_soft_refs(),
1572          "Flag should have been handled and cleared prior to this point");
1573 
1574   // What else?  We might try synchronous finalization later.  If the total
1575   // space available is large enough for the allocation, then a more
1576   // complete compaction phase than we've tried so far might be
1577   // appropriate.
1578   assert(*succeeded, "sanity");
1579   return NULL;
1580 }
1581 
1582 // Attempting to expand the heap sufficiently
1583 // to support an allocation of the given "word_size".  If
1584 // successful, perform the allocation and return the address of the
1585 // allocated block, or else "NULL".
1586 
1587 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1588   assert_at_safepoint(true /* should_be_vm_thread */);
1589 
1590   _verifier->verify_region_sets_optional();
1591 
1592   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1593   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1594                             word_size * HeapWordSize);
1595 
1596 
1597   if (expand(expand_bytes, _workers)) {
1598     _hrm.verify_optional();
1599     _verifier->verify_region_sets_optional();
1600     return attempt_allocation_at_safepoint(word_size,
1601                                            context,
1602                                            false /* expect_null_mutator_alloc_region */);
1603   }
1604   return NULL;
1605 }
1606 
1607 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
1608   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1609   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1610                                        HeapRegion::GrainBytes);
1611 
1612   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1613                             expand_bytes, aligned_expand_bytes);
1614 
1615   if (is_maximal_no_gc()) {
1616     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1617     return false;
1618   }
1619 
1620   double expand_heap_start_time_sec = os::elapsedTime();
1621   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1622   assert(regions_to_expand > 0, "Must expand by at least one region");
1623 
1624   uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
1625   if (expand_time_ms != NULL) {
1626     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1627   }
1628 
1629   if (expanded_by > 0) {
1630     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1631     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1632     g1_policy()->record_new_heap_size(num_regions());
1633   } else {
1634     log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1635 
1636     // The expansion of the virtual storage space was unsuccessful.
1637     // Let's see if it was because we ran out of swap.
1638     if (G1ExitOnExpansionFailure &&
1639         _hrm.available() >= regions_to_expand) {
1640       // We had head room...
1641       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1642     }
1643   }
1644   return regions_to_expand > 0;
1645 }
1646 
1647 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1648   size_t aligned_shrink_bytes =
1649     ReservedSpace::page_align_size_down(shrink_bytes);
1650   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1651                                          HeapRegion::GrainBytes);
1652   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1653 
1654   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1655   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1656 
1657 
1658   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1659                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1660   if (num_regions_removed > 0) {
1661     g1_policy()->record_new_heap_size(num_regions());
1662   } else {
1663     log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1664   }
1665 }
1666 
1667 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1668   _verifier->verify_region_sets_optional();
1669 
1670   // We should only reach here at the end of a Full GC which means we
1671   // should not not be holding to any GC alloc regions. The method
1672   // below will make sure of that and do any remaining clean up.
1673   _allocator->abandon_gc_alloc_regions();
1674 
1675   // Instead of tearing down / rebuilding the free lists here, we
1676   // could instead use the remove_all_pending() method on free_list to
1677   // remove only the ones that we need to remove.
1678   tear_down_region_sets(true /* free_list_only */);
1679   shrink_helper(shrink_bytes);
1680   rebuild_region_sets(true /* free_list_only */);
1681 
1682   _hrm.verify_optional();
1683   _verifier->verify_region_sets_optional();
1684 }
1685 
1686 // Public methods.
1687 
1688 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1689   CollectedHeap(),
1690   _collector_policy(collector_policy),
1691   _g1_policy(create_g1_policy()),
1692   _collection_set(this, _g1_policy),
1693   _dirty_card_queue_set(false),
1694   _is_alive_closure_cm(this),
1695   _is_alive_closure_stw(this),
1696   _ref_processor_cm(NULL),
1697   _ref_processor_stw(NULL),
1698   _bot(NULL),
1699   _hot_card_cache(NULL),
1700   _g1_rem_set(NULL),
1701   _cg1r(NULL),
1702   _g1mm(NULL),
1703   _refine_cte_cl(NULL),
1704   _preserved_marks_set(true /* in_c_heap */),
1705   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1706   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1707   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1708   _humongous_reclaim_candidates(),
1709   _has_humongous_reclaim_candidates(false),
1710   _archive_allocator(NULL),
1711   _free_regions_coming(false),
1712   _gc_time_stamp(0),
1713   _summary_bytes_used(0),
1714   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1715   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1716   _expand_heap_after_alloc_failure(true),
1717   _old_marking_cycles_started(0),
1718   _old_marking_cycles_completed(0),
1719   _in_cset_fast_test(),
1720   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1721   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
1722 
1723   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1724                           /* are_GC_task_threads */true,
1725                           /* are_ConcurrentGC_threads */false);
1726   _workers->initialize_workers();
1727   _verifier = new G1HeapVerifier(this);
1728 
1729   _allocator = G1Allocator::create_allocator(this);
1730 
1731   _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
1732 
1733   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1734 
1735   // Override the default _filler_array_max_size so that no humongous filler
1736   // objects are created.
1737   _filler_array_max_size = _humongous_object_threshold_in_words;
1738 
1739   uint n_queues = ParallelGCThreads;
1740   _task_queues = new RefToScanQueueSet(n_queues);
1741 
1742   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1743 
1744   for (uint i = 0; i < n_queues; i++) {
1745     RefToScanQueue* q = new RefToScanQueue();
1746     q->initialize();
1747     _task_queues->register_queue(i, q);
1748     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1749   }
1750 
1751   // Initialize the G1EvacuationFailureALot counters and flags.
1752   NOT_PRODUCT(reset_evacuation_should_fail();)
1753 
1754   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1755 }
1756 
1757 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1758                                                                  size_t size,
1759                                                                  size_t translation_factor) {
1760   size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1761   // Allocate a new reserved space, preferring to use large pages.
1762   ReservedSpace rs(size, preferred_page_size);
1763   G1RegionToSpaceMapper* result  =
1764     G1RegionToSpaceMapper::create_mapper(rs,
1765                                          size,
1766                                          rs.alignment(),
1767                                          HeapRegion::GrainBytes,
1768                                          translation_factor,
1769                                          mtGC);
1770 
1771   os::trace_page_sizes_for_requested_size(description,
1772                                           size,
1773                                           preferred_page_size,
1774                                           rs.alignment(),
1775                                           rs.base(),
1776                                           rs.size());
1777 
1778   return result;
1779 }
1780 
1781 jint G1CollectedHeap::initialize() {
1782   CollectedHeap::pre_initialize();
1783   os::enable_vtime();
1784 
1785   // Necessary to satisfy locking discipline assertions.
1786 
1787   MutexLocker x(Heap_lock);
1788 
1789   // While there are no constraints in the GC code that HeapWordSize
1790   // be any particular value, there are multiple other areas in the
1791   // system which believe this to be true (e.g. oop->object_size in some
1792   // cases incorrectly returns the size in wordSize units rather than
1793   // HeapWordSize).
1794   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1795 
1796   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1797   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1798   size_t heap_alignment = collector_policy()->heap_alignment();
1799 
1800   // Ensure that the sizes are properly aligned.
1801   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1802   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1803   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1804 
1805   _refine_cte_cl = new RefineCardTableEntryClosure();
1806 
1807   jint ecode = JNI_OK;
1808   _cg1r = ConcurrentG1Refine::create(_refine_cte_cl, &ecode);
1809   if (_cg1r == NULL) {
1810     return ecode;
1811   }
1812 
1813   // Reserve the maximum.
1814 
1815   // When compressed oops are enabled, the preferred heap base
1816   // is calculated by subtracting the requested size from the
1817   // 32Gb boundary and using the result as the base address for
1818   // heap reservation. If the requested size is not aligned to
1819   // HeapRegion::GrainBytes (i.e. the alignment that is passed
1820   // into the ReservedHeapSpace constructor) then the actual
1821   // base of the reserved heap may end up differing from the
1822   // address that was requested (i.e. the preferred heap base).
1823   // If this happens then we could end up using a non-optimal
1824   // compressed oops mode.
1825 
1826   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1827                                                  heap_alignment);
1828 
1829   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1830 
1831   // Create the barrier set for the entire reserved region.
1832   G1SATBCardTableLoggingModRefBS* bs
1833     = new G1SATBCardTableLoggingModRefBS(reserved_region());
1834   bs->initialize();
1835   assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
1836   set_barrier_set(bs);
1837 
1838   // Create the hot card cache.
1839   _hot_card_cache = new G1HotCardCache(this);
1840 
1841   // Also create a G1 rem set.
1842   _g1_rem_set = new G1RemSet(this, g1_barrier_set(), _hot_card_cache);
1843 
1844   // Carve out the G1 part of the heap.
1845   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1846   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1847   G1RegionToSpaceMapper* heap_storage =
1848     G1RegionToSpaceMapper::create_mapper(g1_rs,
1849                                          g1_rs.size(),
1850                                          page_size,
1851                                          HeapRegion::GrainBytes,
1852                                          1,
1853                                          mtJavaHeap);
1854   os::trace_page_sizes("Heap",
1855                        collector_policy()->min_heap_byte_size(),
1856                        max_byte_size,
1857                        page_size,
1858                        heap_rs.base(),
1859                        heap_rs.size());
1860   heap_storage->set_mapping_changed_listener(&_listener);
1861 
1862   // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1863   G1RegionToSpaceMapper* bot_storage =
1864     create_aux_memory_mapper("Block Offset Table",
1865                              G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
1866                              G1BlockOffsetTable::heap_map_factor());
1867 
1868   ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
1869   G1RegionToSpaceMapper* cardtable_storage =
1870     create_aux_memory_mapper("Card Table",
1871                              G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
1872                              G1SATBCardTableLoggingModRefBS::heap_map_factor());
1873 
1874   G1RegionToSpaceMapper* card_counts_storage =
1875     create_aux_memory_mapper("Card Counts Table",
1876                              G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
1877                              G1CardCounts::heap_map_factor());
1878 
1879   size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
1880   G1RegionToSpaceMapper* prev_bitmap_storage =
1881     create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1882   G1RegionToSpaceMapper* next_bitmap_storage =
1883     create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1884 
1885   _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1886   g1_barrier_set()->initialize(cardtable_storage);
1887   // Do later initialization work for concurrent refinement.
1888   _hot_card_cache->initialize(card_counts_storage);
1889 
1890   // 6843694 - ensure that the maximum region index can fit
1891   // in the remembered set structures.
1892   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1893   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1894 
1895   g1_rem_set()->initialize(max_capacity(), max_regions());
1896 
1897   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1898   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1899   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1900             "too many cards per region");
1901 
1902   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1903 
1904   _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1905 
1906   {
1907     HeapWord* start = _hrm.reserved().start();
1908     HeapWord* end = _hrm.reserved().end();
1909     size_t granularity = HeapRegion::GrainBytes;
1910 
1911     _in_cset_fast_test.initialize(start, end, granularity);
1912     _humongous_reclaim_candidates.initialize(start, end, granularity);
1913   }
1914 
1915   // Create the G1ConcurrentMark data structure and thread.
1916   // (Must do this late, so that "max_regions" is defined.)
1917   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1918   if (_cm == NULL || !_cm->completed_initialization()) {
1919     vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1920     return JNI_ENOMEM;
1921   }
1922   _cmThread = _cm->cmThread();
1923 
1924   // Now expand into the initial heap size.
1925   if (!expand(init_byte_size, _workers)) {
1926     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1927     return JNI_ENOMEM;
1928   }
1929 
1930   // Perform any initialization actions delegated to the policy.
1931   g1_policy()->init(this, &_collection_set);
1932 
1933   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1934                                                SATB_Q_FL_lock,
1935                                                G1SATBProcessCompletedThreshold,
1936                                                Shared_SATB_Q_lock);
1937 
1938   JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
1939                                                 DirtyCardQ_CBL_mon,
1940                                                 DirtyCardQ_FL_lock,
1941                                                 (int)concurrent_g1_refine()->yellow_zone(),
1942                                                 (int)concurrent_g1_refine()->red_zone(),
1943                                                 Shared_DirtyCardQ_lock,
1944                                                 NULL,  // fl_owner
1945                                                 true); // init_free_ids
1946 
1947   dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
1948                                     DirtyCardQ_CBL_mon,
1949                                     DirtyCardQ_FL_lock,
1950                                     -1, // never trigger processing
1951                                     -1, // no limit on length
1952                                     Shared_DirtyCardQ_lock,
1953                                     &JavaThread::dirty_card_queue_set());
1954 
1955   // Here we allocate the dummy HeapRegion that is required by the
1956   // G1AllocRegion class.
1957   HeapRegion* dummy_region = _hrm.get_dummy_region();
1958 
1959   // We'll re-use the same region whether the alloc region will
1960   // require BOT updates or not and, if it doesn't, then a non-young
1961   // region will complain that it cannot support allocations without
1962   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1963   dummy_region->set_eden();
1964   // Make sure it's full.
1965   dummy_region->set_top(dummy_region->end());
1966   G1AllocRegion::setup(this, dummy_region);
1967 
1968   _allocator->init_mutator_alloc_region();
1969 
1970   // Do create of the monitoring and management support so that
1971   // values in the heap have been properly initialized.
1972   _g1mm = new G1MonitoringSupport(this);
1973 
1974   G1StringDedup::initialize();
1975 
1976   _preserved_marks_set.init(ParallelGCThreads);
1977 
1978   _collection_set.initialize(max_regions());
1979 
1980   return JNI_OK;
1981 }
1982 
1983 void G1CollectedHeap::stop() {
1984   // Stop all concurrent threads. We do this to make sure these threads
1985   // do not continue to execute and access resources (e.g. logging)
1986   // that are destroyed during shutdown.
1987   _cg1r->stop();
1988   _cmThread->stop();
1989   if (G1StringDedup::is_enabled()) {
1990     G1StringDedup::stop();
1991   }
1992 }
1993 
1994 size_t G1CollectedHeap::conservative_max_heap_alignment() {
1995   return HeapRegion::max_region_size();
1996 }
1997 
1998 void G1CollectedHeap::post_initialize() {
1999   ref_processing_init();
2000 }
2001 
2002 void G1CollectedHeap::ref_processing_init() {
2003   // Reference processing in G1 currently works as follows:
2004   //
2005   // * There are two reference processor instances. One is
2006   //   used to record and process discovered references
2007   //   during concurrent marking; the other is used to
2008   //   record and process references during STW pauses
2009   //   (both full and incremental).
2010   // * Both ref processors need to 'span' the entire heap as
2011   //   the regions in the collection set may be dotted around.
2012   //
2013   // * For the concurrent marking ref processor:
2014   //   * Reference discovery is enabled at initial marking.
2015   //   * Reference discovery is disabled and the discovered
2016   //     references processed etc during remarking.
2017   //   * Reference discovery is MT (see below).
2018   //   * Reference discovery requires a barrier (see below).
2019   //   * Reference processing may or may not be MT
2020   //     (depending on the value of ParallelRefProcEnabled
2021   //     and ParallelGCThreads).
2022   //   * A full GC disables reference discovery by the CM
2023   //     ref processor and abandons any entries on it's
2024   //     discovered lists.
2025   //
2026   // * For the STW processor:
2027   //   * Non MT discovery is enabled at the start of a full GC.
2028   //   * Processing and enqueueing during a full GC is non-MT.
2029   //   * During a full GC, references are processed after marking.
2030   //
2031   //   * Discovery (may or may not be MT) is enabled at the start
2032   //     of an incremental evacuation pause.
2033   //   * References are processed near the end of a STW evacuation pause.
2034   //   * For both types of GC:
2035   //     * Discovery is atomic - i.e. not concurrent.
2036   //     * Reference discovery will not need a barrier.
2037 
2038   MemRegion mr = reserved_region();
2039 
2040   // Concurrent Mark ref processor
2041   _ref_processor_cm =
2042     new ReferenceProcessor(mr,    // span
2043                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2044                                 // mt processing
2045                            ParallelGCThreads,
2046                                 // degree of mt processing
2047                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2048                                 // mt discovery
2049                            MAX2(ParallelGCThreads, ConcGCThreads),
2050                                 // degree of mt discovery
2051                            false,
2052                                 // Reference discovery is not atomic
2053                            &_is_alive_closure_cm);
2054                                 // is alive closure
2055                                 // (for efficiency/performance)
2056 
2057   // STW ref processor
2058   _ref_processor_stw =
2059     new ReferenceProcessor(mr,    // span
2060                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2061                                 // mt processing
2062                            ParallelGCThreads,
2063                                 // degree of mt processing
2064                            (ParallelGCThreads > 1),
2065                                 // mt discovery
2066                            ParallelGCThreads,
2067                                 // degree of mt discovery
2068                            true,
2069                                 // Reference discovery is atomic
2070                            &_is_alive_closure_stw);
2071                                 // is alive closure
2072                                 // (for efficiency/performance)
2073 }
2074 
2075 CollectorPolicy* G1CollectedHeap::collector_policy() const {
2076   return _collector_policy;
2077 }
2078 
2079 size_t G1CollectedHeap::capacity() const {
2080   return _hrm.length() * HeapRegion::GrainBytes;
2081 }
2082 
2083 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2084   hr->reset_gc_time_stamp();
2085 }
2086 
2087 #ifndef PRODUCT
2088 
2089 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2090 private:
2091   unsigned _gc_time_stamp;
2092   bool _failures;
2093 
2094 public:
2095   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2096     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2097 
2098   virtual bool doHeapRegion(HeapRegion* hr) {
2099     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2100     if (_gc_time_stamp != region_gc_time_stamp) {
2101       log_error(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
2102                             region_gc_time_stamp, _gc_time_stamp);
2103       _failures = true;
2104     }
2105     return false;
2106   }
2107 
2108   bool failures() { return _failures; }
2109 };
2110 
2111 void G1CollectedHeap::check_gc_time_stamps() {
2112   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2113   heap_region_iterate(&cl);
2114   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2115 }
2116 #endif // PRODUCT
2117 
2118 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2119   _hot_card_cache->drain(cl, worker_i);
2120 }
2121 
2122 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
2123   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2124   size_t n_completed_buffers = 0;
2125   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2126     n_completed_buffers++;
2127   }
2128   g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
2129   dcqs.clear_n_completed_buffers();
2130   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2131 }
2132 
2133 // Computes the sum of the storage used by the various regions.
2134 size_t G1CollectedHeap::used() const {
2135   size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
2136   if (_archive_allocator != NULL) {
2137     result += _archive_allocator->used();
2138   }
2139   return result;
2140 }
2141 
2142 size_t G1CollectedHeap::used_unlocked() const {
2143   return _summary_bytes_used;
2144 }
2145 
2146 class SumUsedClosure: public HeapRegionClosure {
2147   size_t _used;
2148 public:
2149   SumUsedClosure() : _used(0) {}
2150   bool doHeapRegion(HeapRegion* r) {
2151     _used += r->used();
2152     return false;
2153   }
2154   size_t result() { return _used; }
2155 };
2156 
2157 size_t G1CollectedHeap::recalculate_used() const {
2158   double recalculate_used_start = os::elapsedTime();
2159 
2160   SumUsedClosure blk;
2161   heap_region_iterate(&blk);
2162 
2163   g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2164   return blk.result();
2165 }
2166 
2167 bool  G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
2168   switch (cause) {
2169     case GCCause::_java_lang_system_gc:                 return ExplicitGCInvokesConcurrent;
2170     case GCCause::_dcmd_gc_run:                         return ExplicitGCInvokesConcurrent;
2171     case GCCause::_update_allocation_context_stats_inc: return true;
2172     case GCCause::_wb_conc_mark:                        return true;
2173     default :                                           return false;
2174   }
2175 }
2176 
2177 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2178   switch (cause) {
2179     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
2180     case GCCause::_g1_humongous_allocation: return true;
2181     default:                                return is_user_requested_concurrent_full_gc(cause);
2182   }
2183 }
2184 
2185 #ifndef PRODUCT
2186 void G1CollectedHeap::allocate_dummy_regions() {
2187   // Let's fill up most of the region
2188   size_t word_size = HeapRegion::GrainWords - 1024;
2189   // And as a result the region we'll allocate will be humongous.
2190   guarantee(is_humongous(word_size), "sanity");
2191 
2192   // _filler_array_max_size is set to humongous object threshold
2193   // but temporarily change it to use CollectedHeap::fill_with_object().
2194   SizeTFlagSetting fs(_filler_array_max_size, word_size);
2195 
2196   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2197     // Let's use the existing mechanism for the allocation
2198     HeapWord* dummy_obj = humongous_obj_allocate(word_size,
2199                                                  AllocationContext::system());
2200     if (dummy_obj != NULL) {
2201       MemRegion mr(dummy_obj, word_size);
2202       CollectedHeap::fill_with_object(mr);
2203     } else {
2204       // If we can't allocate once, we probably cannot allocate
2205       // again. Let's get out of the loop.
2206       break;
2207     }
2208   }
2209 }
2210 #endif // !PRODUCT
2211 
2212 void G1CollectedHeap::increment_old_marking_cycles_started() {
2213   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2214          _old_marking_cycles_started == _old_marking_cycles_completed + 1,
2215          "Wrong marking cycle count (started: %d, completed: %d)",
2216          _old_marking_cycles_started, _old_marking_cycles_completed);
2217 
2218   _old_marking_cycles_started++;
2219 }
2220 
2221 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2222   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2223 
2224   // We assume that if concurrent == true, then the caller is a
2225   // concurrent thread that was joined the Suspendible Thread
2226   // Set. If there's ever a cheap way to check this, we should add an
2227   // assert here.
2228 
2229   // Given that this method is called at the end of a Full GC or of a
2230   // concurrent cycle, and those can be nested (i.e., a Full GC can
2231   // interrupt a concurrent cycle), the number of full collections
2232   // completed should be either one (in the case where there was no
2233   // nesting) or two (when a Full GC interrupted a concurrent cycle)
2234   // behind the number of full collections started.
2235 
2236   // This is the case for the inner caller, i.e. a Full GC.
2237   assert(concurrent ||
2238          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2239          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2240          "for inner caller (Full GC): _old_marking_cycles_started = %u "
2241          "is inconsistent with _old_marking_cycles_completed = %u",
2242          _old_marking_cycles_started, _old_marking_cycles_completed);
2243 
2244   // This is the case for the outer caller, i.e. the concurrent cycle.
2245   assert(!concurrent ||
2246          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2247          "for outer caller (concurrent cycle): "
2248          "_old_marking_cycles_started = %u "
2249          "is inconsistent with _old_marking_cycles_completed = %u",
2250          _old_marking_cycles_started, _old_marking_cycles_completed);
2251 
2252   _old_marking_cycles_completed += 1;
2253 
2254   // We need to clear the "in_progress" flag in the CM thread before
2255   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2256   // is set) so that if a waiter requests another System.gc() it doesn't
2257   // incorrectly see that a marking cycle is still in progress.
2258   if (concurrent) {
2259     _cmThread->set_idle();
2260   }
2261 
2262   // This notify_all() will ensure that a thread that called
2263   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2264   // and it's waiting for a full GC to finish will be woken up. It is
2265   // waiting in VM_G1IncCollectionPause::doit_epilogue().
2266   FullGCCount_lock->notify_all();
2267 }
2268 
2269 void G1CollectedHeap::collect(GCCause::Cause cause) {
2270   assert_heap_not_locked();
2271 
2272   uint gc_count_before;
2273   uint old_marking_count_before;
2274   uint full_gc_count_before;
2275   bool retry_gc;
2276 
2277   do {
2278     retry_gc = false;
2279 
2280     {
2281       MutexLocker ml(Heap_lock);
2282 
2283       // Read the GC count while holding the Heap_lock
2284       gc_count_before = total_collections();
2285       full_gc_count_before = total_full_collections();
2286       old_marking_count_before = _old_marking_cycles_started;
2287     }
2288 
2289     if (should_do_concurrent_full_gc(cause)) {
2290       // Schedule an initial-mark evacuation pause that will start a
2291       // concurrent cycle. We're setting word_size to 0 which means that
2292       // we are not requesting a post-GC allocation.
2293       VM_G1IncCollectionPause op(gc_count_before,
2294                                  0,     /* word_size */
2295                                  true,  /* should_initiate_conc_mark */
2296                                  g1_policy()->max_pause_time_ms(),
2297                                  cause);
2298       op.set_allocation_context(AllocationContext::current());
2299 
2300       VMThread::execute(&op);
2301       if (!op.pause_succeeded()) {
2302         if (old_marking_count_before == _old_marking_cycles_started) {
2303           retry_gc = op.should_retry_gc();
2304         } else {
2305           // A Full GC happened while we were trying to schedule the
2306           // initial-mark GC. No point in starting a new cycle given
2307           // that the whole heap was collected anyway.
2308         }
2309 
2310         if (retry_gc) {
2311           if (GCLocker::is_active_and_needs_gc()) {
2312             GCLocker::stall_until_clear();
2313           }
2314         }
2315       }
2316     } else {
2317       if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2318           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2319 
2320         // Schedule a standard evacuation pause. We're setting word_size
2321         // to 0 which means that we are not requesting a post-GC allocation.
2322         VM_G1IncCollectionPause op(gc_count_before,
2323                                    0,     /* word_size */
2324                                    false, /* should_initiate_conc_mark */
2325                                    g1_policy()->max_pause_time_ms(),
2326                                    cause);
2327         VMThread::execute(&op);
2328       } else {
2329         // Schedule a Full GC.
2330         VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2331         VMThread::execute(&op);
2332       }
2333     }
2334   } while (retry_gc);
2335 }
2336 
2337 bool G1CollectedHeap::is_in(const void* p) const {
2338   if (_hrm.reserved().contains(p)) {
2339     // Given that we know that p is in the reserved space,
2340     // heap_region_containing() should successfully
2341     // return the containing region.
2342     HeapRegion* hr = heap_region_containing(p);
2343     return hr->is_in(p);
2344   } else {
2345     return false;
2346   }
2347 }
2348 
2349 #ifdef ASSERT
2350 bool G1CollectedHeap::is_in_exact(const void* p) const {
2351   bool contains = reserved_region().contains(p);
2352   bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2353   if (contains && available) {
2354     return true;
2355   } else {
2356     return false;
2357   }
2358 }
2359 #endif
2360 
2361 // Iteration functions.
2362 
2363 // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
2364 
2365 class IterateOopClosureRegionClosure: public HeapRegionClosure {
2366   ExtendedOopClosure* _cl;
2367 public:
2368   IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
2369   bool doHeapRegion(HeapRegion* r) {
2370     if (!r->is_continues_humongous()) {
2371       r->oop_iterate(_cl);
2372     }
2373     return false;
2374   }
2375 };
2376 
2377 // Iterates an ObjectClosure over all objects within a HeapRegion.
2378 
2379 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2380   ObjectClosure* _cl;
2381 public:
2382   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2383   bool doHeapRegion(HeapRegion* r) {
2384     if (!r->is_continues_humongous()) {
2385       r->object_iterate(_cl);
2386     }
2387     return false;
2388   }
2389 };
2390 
2391 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2392   IterateObjectClosureRegionClosure blk(cl);
2393   heap_region_iterate(&blk);
2394 }
2395 
2396 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2397   _hrm.iterate(cl);
2398 }
2399 
2400 void
2401 G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
2402                                          uint worker_id,
2403                                          HeapRegionClaimer *hrclaimer,
2404                                          bool concurrent) const {
2405   _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
2406 }
2407 
2408 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2409   _collection_set.iterate(cl);
2410 }
2411 
2412 void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
2413   _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
2414 }
2415 
2416 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2417   HeapRegion* result = _hrm.next_region_in_heap(from);
2418   while (result != NULL && result->is_pinned()) {
2419     result = _hrm.next_region_in_heap(result);
2420   }
2421   return result;
2422 }
2423 
2424 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2425   HeapRegion* hr = heap_region_containing(addr);
2426   return hr->block_start(addr);
2427 }
2428 
2429 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2430   HeapRegion* hr = heap_region_containing(addr);
2431   return hr->block_size(addr);
2432 }
2433 
2434 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2435   HeapRegion* hr = heap_region_containing(addr);
2436   return hr->block_is_obj(addr);
2437 }
2438 
2439 bool G1CollectedHeap::supports_tlab_allocation() const {
2440   return true;
2441 }
2442 
2443 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2444   return (_g1_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
2445 }
2446 
2447 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2448   return _eden.length() * HeapRegion::GrainBytes;
2449 }
2450 
2451 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2452 // must be equal to the humongous object limit.
2453 size_t G1CollectedHeap::max_tlab_size() const {
2454   return align_size_down(_humongous_object_threshold_in_words, MinObjAlignment);
2455 }
2456 
2457 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2458   AllocationContext_t context = AllocationContext::current();
2459   return _allocator->unsafe_max_tlab_alloc(context);
2460 }
2461 
2462 size_t G1CollectedHeap::max_capacity() const {
2463   return _hrm.reserved().byte_size();
2464 }
2465 
2466 jlong G1CollectedHeap::millis_since_last_gc() {
2467   // See the notes in GenCollectedHeap::millis_since_last_gc()
2468   // for more information about the implementation.
2469   jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2470     _g1_policy->collection_pause_end_millis();
2471   if (ret_val < 0) {
2472     log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2473       ". returning zero instead.", ret_val);
2474     return 0;
2475   }
2476   return ret_val;
2477 }
2478 
2479 void G1CollectedHeap::prepare_for_verify() {
2480   _verifier->prepare_for_verify();
2481 }
2482 
2483 void G1CollectedHeap::verify(VerifyOption vo) {
2484   _verifier->verify(vo);
2485 }
2486 
2487 bool G1CollectedHeap::supports_concurrent_phase_control() const {
2488   return true;
2489 }
2490 
2491 const char* const* G1CollectedHeap::concurrent_phases() const {
2492   return _cmThread->concurrent_phases();
2493 }
2494 
2495 bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
2496   return _cmThread->request_concurrent_phase(phase);
2497 }
2498 
2499 class PrintRegionClosure: public HeapRegionClosure {
2500   outputStream* _st;
2501 public:
2502   PrintRegionClosure(outputStream* st) : _st(st) {}
2503   bool doHeapRegion(HeapRegion* r) {
2504     r->print_on(_st);
2505     return false;
2506   }
2507 };
2508 
2509 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2510                                        const HeapRegion* hr,
2511                                        const VerifyOption vo) const {
2512   switch (vo) {
2513   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2514   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2515   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked() && !hr->is_archive();
2516   default:                            ShouldNotReachHere();
2517   }
2518   return false; // keep some compilers happy
2519 }
2520 
2521 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2522                                        const VerifyOption vo) const {
2523   switch (vo) {
2524   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
2525   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
2526   case VerifyOption_G1UseMarkWord: {
2527     HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj);
2528     return !obj->is_gc_marked() && !hr->is_archive();
2529   }
2530   default:                            ShouldNotReachHere();
2531   }
2532   return false; // keep some compilers happy
2533 }
2534 
2535 void G1CollectedHeap::print_heap_regions() const {
2536   Log(gc, heap, region) log;
2537   if (log.is_trace()) {
2538     ResourceMark rm;
2539     print_regions_on(log.trace_stream());
2540   }
2541 }
2542 
2543 void G1CollectedHeap::print_on(outputStream* st) const {
2544   st->print(" %-20s", "garbage-first heap");
2545   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
2546             capacity()/K, used_unlocked()/K);
2547   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
2548             p2i(_hrm.reserved().start()),
2549             p2i(_hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords),
2550             p2i(_hrm.reserved().end()));
2551   st->cr();
2552   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
2553   uint young_regions = young_regions_count();
2554   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
2555             (size_t) young_regions * HeapRegion::GrainBytes / K);
2556   uint survivor_regions = survivor_regions_count();
2557   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
2558             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
2559   st->cr();
2560   MetaspaceAux::print_on(st);
2561 }
2562 
2563 void G1CollectedHeap::print_regions_on(outputStream* st) const {
2564   st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
2565                "HS=humongous(starts), HC=humongous(continues), "
2566                "CS=collection set, F=free, A=archive, TS=gc time stamp, "
2567                "AC=allocation context, "
2568                "TAMS=top-at-mark-start (previous, next)");
2569   PrintRegionClosure blk(st);
2570   heap_region_iterate(&blk);
2571 }
2572 
2573 void G1CollectedHeap::print_extended_on(outputStream* st) const {
2574   print_on(st);
2575 
2576   // Print the per-region information.
2577   print_regions_on(st);
2578 }
2579 
2580 void G1CollectedHeap::print_on_error(outputStream* st) const {
2581   this->CollectedHeap::print_on_error(st);
2582 
2583   if (_cm != NULL) {
2584     st->cr();
2585     _cm->print_on_error(st);
2586   }
2587 }
2588 
2589 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
2590   workers()->print_worker_threads_on(st);
2591   _cmThread->print_on(st);
2592   st->cr();
2593   _cm->print_worker_threads_on(st);
2594   _cg1r->print_worker_threads_on(st); // also prints the sample thread
2595   if (G1StringDedup::is_enabled()) {
2596     G1StringDedup::print_worker_threads_on(st);
2597   }
2598 }
2599 
2600 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2601   workers()->threads_do(tc);
2602   tc->do_thread(_cmThread);
2603   _cm->threads_do(tc);
2604   _cg1r->threads_do(tc); // also iterates over the sample thread
2605   if (G1StringDedup::is_enabled()) {
2606     G1StringDedup::threads_do(tc);
2607   }
2608 }
2609 
2610 void G1CollectedHeap::print_tracing_info() const {
2611   g1_rem_set()->print_summary_info();
2612   concurrent_mark()->print_summary_info();
2613 }
2614 
2615 #ifndef PRODUCT
2616 // Helpful for debugging RSet issues.
2617 
2618 class PrintRSetsClosure : public HeapRegionClosure {
2619 private:
2620   const char* _msg;
2621   size_t _occupied_sum;
2622 
2623 public:
2624   bool doHeapRegion(HeapRegion* r) {
2625     HeapRegionRemSet* hrrs = r->rem_set();
2626     size_t occupied = hrrs->occupied();
2627     _occupied_sum += occupied;
2628 
2629     tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));
2630     if (occupied == 0) {
2631       tty->print_cr("  RSet is empty");
2632     } else {
2633       hrrs->print();
2634     }
2635     tty->print_cr("----------");
2636     return false;
2637   }
2638 
2639   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
2640     tty->cr();
2641     tty->print_cr("========================================");
2642     tty->print_cr("%s", msg);
2643     tty->cr();
2644   }
2645 
2646   ~PrintRSetsClosure() {
2647     tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
2648     tty->print_cr("========================================");
2649     tty->cr();
2650   }
2651 };
2652 
2653 void G1CollectedHeap::print_cset_rsets() {
2654   PrintRSetsClosure cl("Printing CSet RSets");
2655   collection_set_iterate(&cl);
2656 }
2657 
2658 void G1CollectedHeap::print_all_rsets() {
2659   PrintRSetsClosure cl("Printing All RSets");;
2660   heap_region_iterate(&cl);
2661 }
2662 #endif // PRODUCT
2663 
2664 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
2665 
2666   size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes;
2667   size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes;
2668   size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
2669 
2670   size_t eden_capacity_bytes =
2671     (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
2672 
2673   VirtualSpaceSummary heap_summary = create_heap_space_summary();
2674   return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
2675                        eden_capacity_bytes, survivor_used_bytes, num_regions());
2676 }
2677 
2678 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
2679   return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),
2680                        stats->unused(), stats->used(), stats->region_end_waste(),
2681                        stats->regions_filled(), stats->direct_allocated(),
2682                        stats->failure_used(), stats->failure_waste());
2683 }
2684 
2685 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2686   const G1HeapSummary& heap_summary = create_g1_heap_summary();
2687   gc_tracer->report_gc_heap_summary(when, heap_summary);
2688 
2689   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
2690   gc_tracer->report_metaspace_summary(when, metaspace_summary);
2691 }
2692 
2693 G1CollectedHeap* G1CollectedHeap::heap() {
2694   CollectedHeap* heap = Universe::heap();
2695   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
2696   assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
2697   return (G1CollectedHeap*)heap;
2698 }
2699 
2700 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
2701   // always_do_update_barrier = false;
2702   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2703 
2704   double start = os::elapsedTime();
2705   // Fill TLAB's and such
2706   accumulate_statistics_all_tlabs();
2707   ensure_parsability(true);
2708   g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2709 
2710   g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2711 }
2712 
2713 void G1CollectedHeap::gc_epilogue(bool full) {
2714   // we are at the end of the GC. Total collections has already been increased.
2715   g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2716 
2717   // FIXME: what is this about?
2718   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2719   // is set.
2720 #if defined(COMPILER2) || INCLUDE_JVMCI
2721   assert(DerivedPointerTable::is_empty(), "derived pointer present");
2722 #endif
2723   // always_do_update_barrier = true;
2724 
2725   double start = os::elapsedTime();
2726   resize_all_tlabs();
2727   g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2728 
2729   allocation_context_stats().update(full);
2730 
2731   // We have just completed a GC. Update the soft reference
2732   // policy with the new heap occupancy
2733   Universe::update_heap_info_at_gc();
2734 }
2735 
2736 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2737                                                uint gc_count_before,
2738                                                bool* succeeded,
2739                                                GCCause::Cause gc_cause) {
2740   assert_heap_not_locked_and_not_at_safepoint();
2741   VM_G1IncCollectionPause op(gc_count_before,
2742                              word_size,
2743                              false, /* should_initiate_conc_mark */
2744                              g1_policy()->max_pause_time_ms(),
2745                              gc_cause);
2746 
2747   op.set_allocation_context(AllocationContext::current());
2748   VMThread::execute(&op);
2749 
2750   HeapWord* result = op.result();
2751   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2752   assert(result == NULL || ret_succeeded,
2753          "the result should be NULL if the VM did not succeed");
2754   *succeeded = ret_succeeded;
2755 
2756   assert_heap_not_locked();
2757   return result;
2758 }
2759 
2760 void
2761 G1CollectedHeap::doConcurrentMark() {
2762   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2763   if (!_cmThread->in_progress()) {
2764     _cmThread->set_started();
2765     CGC_lock->notify();
2766   }
2767 }
2768 
2769 size_t G1CollectedHeap::pending_card_num() {
2770   size_t extra_cards = 0;
2771   JavaThread *curr = Threads::first();
2772   while (curr != NULL) {
2773     DirtyCardQueue& dcq = curr->dirty_card_queue();
2774     extra_cards += dcq.size();
2775     curr = curr->next();
2776   }
2777   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2778   size_t buffer_size = dcqs.buffer_size();
2779   size_t buffer_num = dcqs.completed_buffers_num();
2780 
2781   return buffer_size * buffer_num + extra_cards;
2782 }
2783 
2784 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2785  private:
2786   size_t _total_humongous;
2787   size_t _candidate_humongous;
2788 
2789   DirtyCardQueue _dcq;
2790 
2791   // We don't nominate objects with many remembered set entries, on
2792   // the assumption that such objects are likely still live.
2793   bool is_remset_small(HeapRegion* region) const {
2794     HeapRegionRemSet* const rset = region->rem_set();
2795     return G1EagerReclaimHumongousObjectsWithStaleRefs
2796       ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
2797       : rset->is_empty();
2798   }
2799 
2800   bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
2801     assert(region->is_starts_humongous(), "Must start a humongous object");
2802 
2803     oop obj = oop(region->bottom());
2804 
2805     // Dead objects cannot be eager reclaim candidates. Due to class
2806     // unloading it is unsafe to query their classes so we return early.
2807     if (heap->is_obj_dead(obj, region)) {
2808       return false;
2809     }
2810 
2811     // Candidate selection must satisfy the following constraints
2812     // while concurrent marking is in progress:
2813     //
2814     // * In order to maintain SATB invariants, an object must not be
2815     // reclaimed if it was allocated before the start of marking and
2816     // has not had its references scanned.  Such an object must have
2817     // its references (including type metadata) scanned to ensure no
2818     // live objects are missed by the marking process.  Objects
2819     // allocated after the start of concurrent marking don't need to
2820     // be scanned.
2821     //
2822     // * An object must not be reclaimed if it is on the concurrent
2823     // mark stack.  Objects allocated after the start of concurrent
2824     // marking are never pushed on the mark stack.
2825     //
2826     // Nominating only objects allocated after the start of concurrent
2827     // marking is sufficient to meet both constraints.  This may miss
2828     // some objects that satisfy the constraints, but the marking data
2829     // structures don't support efficiently performing the needed
2830     // additional tests or scrubbing of the mark stack.
2831     //
2832     // However, we presently only nominate is_typeArray() objects.
2833     // A humongous object containing references induces remembered
2834     // set entries on other regions.  In order to reclaim such an
2835     // object, those remembered sets would need to be cleaned up.
2836     //
2837     // We also treat is_typeArray() objects specially, allowing them
2838     // to be reclaimed even if allocated before the start of
2839     // concurrent mark.  For this we rely on mark stack insertion to
2840     // exclude is_typeArray() objects, preventing reclaiming an object
2841     // that is in the mark stack.  We also rely on the metadata for
2842     // such objects to be built-in and so ensured to be kept live.
2843     // Frequent allocation and drop of large binary blobs is an
2844     // important use case for eager reclaim, and this special handling
2845     // may reduce needed headroom.
2846 
2847     return obj->is_typeArray() && is_remset_small(region);
2848   }
2849 
2850  public:
2851   RegisterHumongousWithInCSetFastTestClosure()
2852   : _total_humongous(0),
2853     _candidate_humongous(0),
2854     _dcq(&JavaThread::dirty_card_queue_set()) {
2855   }
2856 
2857   virtual bool doHeapRegion(HeapRegion* r) {
2858     if (!r->is_starts_humongous()) {
2859       return false;
2860     }
2861     G1CollectedHeap* g1h = G1CollectedHeap::heap();
2862 
2863     bool is_candidate = humongous_region_is_candidate(g1h, r);
2864     uint rindex = r->hrm_index();
2865     g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2866     if (is_candidate) {
2867       _candidate_humongous++;
2868       g1h->register_humongous_region_with_cset(rindex);
2869       // Is_candidate already filters out humongous object with large remembered sets.
2870       // If we have a humongous object with a few remembered sets, we simply flush these
2871       // remembered set entries into the DCQS. That will result in automatic
2872       // re-evaluation of their remembered set entries during the following evacuation
2873       // phase.
2874       if (!r->rem_set()->is_empty()) {
2875         guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
2876                   "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
2877         G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
2878         HeapRegionRemSetIterator hrrs(r->rem_set());
2879         size_t card_index;
2880         while (hrrs.has_next(card_index)) {
2881           jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
2882           // The remembered set might contain references to already freed
2883           // regions. Filter out such entries to avoid failing card table
2884           // verification.
2885           if (g1h->is_in_closed_subset(bs->addr_for(card_ptr))) {
2886             if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
2887               *card_ptr = CardTableModRefBS::dirty_card_val();
2888               _dcq.enqueue(card_ptr);
2889             }
2890           }
2891         }
2892         assert(hrrs.n_yielded() == r->rem_set()->occupied(),
2893                "Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",
2894                hrrs.n_yielded(), r->rem_set()->occupied());
2895         r->rem_set()->clear_locked();
2896       }
2897       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
2898     }
2899     _total_humongous++;
2900 
2901     return false;
2902   }
2903 
2904   size_t total_humongous() const { return _total_humongous; }
2905   size_t candidate_humongous() const { return _candidate_humongous; }
2906 
2907   void flush_rem_set_entries() { _dcq.flush(); }
2908 };
2909 
2910 void G1CollectedHeap::register_humongous_regions_with_cset() {
2911   if (!G1EagerReclaimHumongousObjects) {
2912     g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
2913     return;
2914   }
2915   double time = os::elapsed_counter();
2916 
2917   // Collect reclaim candidate information and register candidates with cset.
2918   RegisterHumongousWithInCSetFastTestClosure cl;
2919   heap_region_iterate(&cl);
2920 
2921   time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
2922   g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
2923                                                                   cl.total_humongous(),
2924                                                                   cl.candidate_humongous());
2925   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
2926 
2927   // Finally flush all remembered set entries to re-check into the global DCQS.
2928   cl.flush_rem_set_entries();
2929 }
2930 
2931 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2932   public:
2933     bool doHeapRegion(HeapRegion* hr) {
2934       if (!hr->is_archive() && !hr->is_continues_humongous()) {
2935         hr->verify_rem_set();
2936       }
2937       return false;
2938     }
2939 };
2940 
2941 uint G1CollectedHeap::num_task_queues() const {
2942   return _task_queues->size();
2943 }
2944 
2945 #if TASKQUEUE_STATS
2946 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
2947   st->print_raw_cr("GC Task Stats");
2948   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
2949   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
2950 }
2951 
2952 void G1CollectedHeap::print_taskqueue_stats() const {
2953   if (!log_is_enabled(Trace, gc, task, stats)) {
2954     return;
2955   }
2956   Log(gc, task, stats) log;
2957   ResourceMark rm;
2958   outputStream* st = log.trace_stream();
2959 
2960   print_taskqueue_stats_hdr(st);
2961 
2962   TaskQueueStats totals;
2963   const uint n = num_task_queues();
2964   for (uint i = 0; i < n; ++i) {
2965     st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
2966     totals += task_queue(i)->stats;
2967   }
2968   st->print_raw("tot "); totals.print(st); st->cr();
2969 
2970   DEBUG_ONLY(totals.verify());
2971 }
2972 
2973 void G1CollectedHeap::reset_taskqueue_stats() {
2974   const uint n = num_task_queues();
2975   for (uint i = 0; i < n; ++i) {
2976     task_queue(i)->stats.reset();
2977   }
2978 }
2979 #endif // TASKQUEUE_STATS
2980 
2981 void G1CollectedHeap::wait_for_root_region_scanning() {
2982   double scan_wait_start = os::elapsedTime();
2983   // We have to wait until the CM threads finish scanning the
2984   // root regions as it's the only way to ensure that all the
2985   // objects on them have been correctly scanned before we start
2986   // moving them during the GC.
2987   bool waited = _cm->root_regions()->wait_until_scan_finished();
2988   double wait_time_ms = 0.0;
2989   if (waited) {
2990     double scan_wait_end = os::elapsedTime();
2991     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2992   }
2993   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2994 }
2995 
2996 class G1PrintCollectionSetClosure : public HeapRegionClosure {
2997 private:
2998   G1HRPrinter* _hr_printer;
2999 public:
3000   G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
3001 
3002   virtual bool doHeapRegion(HeapRegion* r) {
3003     _hr_printer->cset(r);
3004     return false;
3005   }
3006 };
3007 
3008 void G1CollectedHeap::start_new_collection_set() {
3009   collection_set()->start_incremental_building();
3010 
3011   clear_cset_fast_test();
3012 
3013   guarantee(_eden.length() == 0, "eden should have been cleared");
3014   g1_policy()->transfer_survivors_to_cset(survivor());
3015 }
3016 
3017 bool
3018 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3019   assert_at_safepoint(true /* should_be_vm_thread */);
3020   guarantee(!is_gc_active(), "collection is not reentrant");
3021 
3022   if (GCLocker::check_active_before_gc()) {
3023     return false;
3024   }
3025 
3026   _gc_timer_stw->register_gc_start();
3027 
3028   GCIdMark gc_id_mark;
3029   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3030 
3031   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3032   ResourceMark rm;
3033 
3034   g1_policy()->note_gc_start();
3035 
3036   wait_for_root_region_scanning();
3037 
3038   print_heap_before_gc();
3039   print_heap_regions();
3040   trace_heap_before_gc(_gc_tracer_stw);
3041 
3042   _verifier->verify_region_sets_optional();
3043   _verifier->verify_dirty_young_regions();
3044 
3045   // We should not be doing initial mark unless the conc mark thread is running
3046   if (!_cmThread->should_terminate()) {
3047     // This call will decide whether this pause is an initial-mark
3048     // pause. If it is, during_initial_mark_pause() will return true
3049     // for the duration of this pause.
3050     g1_policy()->decide_on_conc_mark_initiation();
3051   }
3052 
3053   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3054   assert(!collector_state()->during_initial_mark_pause() ||
3055           collector_state()->gcs_are_young(), "sanity");
3056 
3057   // We also do not allow mixed GCs during marking.
3058   assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3059 
3060   // Record whether this pause is an initial mark. When the current
3061   // thread has completed its logging output and it's safe to signal
3062   // the CM thread, the flag's value in the policy has been reset.
3063   bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3064 
3065   // Inner scope for scope based logging, timers, and stats collection
3066   {
3067     EvacuationInfo evacuation_info;
3068 
3069     if (collector_state()->during_initial_mark_pause()) {
3070       // We are about to start a marking cycle, so we increment the
3071       // full collection counter.
3072       increment_old_marking_cycles_started();
3073       _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
3074     }
3075 
3076     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3077 
3078     GCTraceCPUTime tcpu;
3079 
3080     FormatBuffer<> gc_string("Pause ");
3081     if (collector_state()->during_initial_mark_pause()) {
3082       gc_string.append("Initial Mark");
3083     } else if (collector_state()->gcs_are_young()) {
3084       gc_string.append("Young");
3085     } else {
3086       gc_string.append("Mixed");
3087     }
3088     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3089 
3090     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3091                                                                   workers()->active_workers(),
3092                                                                   Threads::number_of_non_daemon_threads());
3093     workers()->update_active_workers(active_workers);
3094     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
3095 
3096     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3097     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3098 
3099     // If the secondary_free_list is not empty, append it to the
3100     // free_list. No need to wait for the cleanup operation to finish;
3101     // the region allocation code will check the secondary_free_list
3102     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3103     // set, skip this step so that the region allocation code has to
3104     // get entries from the secondary_free_list.
3105     if (!G1StressConcRegionFreeing) {
3106       append_secondary_free_list_if_not_empty_with_lock();
3107     }
3108 
3109     G1HeapTransition heap_transition(this);
3110     size_t heap_used_bytes_before_gc = used();
3111 
3112     // Don't dynamically change the number of GC threads this early.  A value of
3113     // 0 is used to indicate serial work.  When parallel work is done,
3114     // it will be set.
3115 
3116     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3117       IsGCActiveMark x;
3118 
3119       gc_prologue(false);
3120       increment_total_collections(false /* full gc */);
3121       increment_gc_time_stamp();
3122 
3123       if (VerifyRememberedSets) {
3124         log_info(gc, verify)("[Verifying RemSets before GC]");
3125         VerifyRegionRemSetClosure v_cl;
3126         heap_region_iterate(&v_cl);
3127       }
3128 
3129       _verifier->verify_before_gc();
3130 
3131       _verifier->check_bitmaps("GC Start");
3132 
3133 #if defined(COMPILER2) || INCLUDE_JVMCI
3134       DerivedPointerTable::clear();
3135 #endif
3136 
3137       // Please see comment in g1CollectedHeap.hpp and
3138       // G1CollectedHeap::ref_processing_init() to see how
3139       // reference processing currently works in G1.
3140 
3141       // Enable discovery in the STW reference processor
3142       if (g1_policy()->should_process_references()) {
3143         ref_processor_stw()->enable_discovery();
3144       } else {
3145         ref_processor_stw()->disable_discovery();
3146       }
3147 
3148       {
3149         // We want to temporarily turn off discovery by the
3150         // CM ref processor, if necessary, and turn it back on
3151         // on again later if we do. Using a scoped
3152         // NoRefDiscovery object will do this.
3153         NoRefDiscovery no_cm_discovery(ref_processor_cm());
3154 
3155         // Forget the current alloc region (we might even choose it to be part
3156         // of the collection set!).
3157         _allocator->release_mutator_alloc_region();
3158 
3159         // This timing is only used by the ergonomics to handle our pause target.
3160         // It is unclear why this should not include the full pause. We will
3161         // investigate this in CR 7178365.
3162         //
3163         // Preserving the old comment here if that helps the investigation:
3164         //
3165         // The elapsed time induced by the start time below deliberately elides
3166         // the possible verification above.
3167         double sample_start_time_sec = os::elapsedTime();
3168 
3169         g1_policy()->record_collection_pause_start(sample_start_time_sec);
3170 
3171         if (collector_state()->during_initial_mark_pause()) {
3172           concurrent_mark()->checkpointRootsInitialPre();
3173         }
3174 
3175         g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
3176 
3177         evacuation_info.set_collectionset_regions(collection_set()->region_length());
3178 
3179         // Make sure the remembered sets are up to date. This needs to be
3180         // done before register_humongous_regions_with_cset(), because the
3181         // remembered sets are used there to choose eager reclaim candidates.
3182         // If the remembered sets are not up to date we might miss some
3183         // entries that need to be handled.
3184         g1_rem_set()->cleanupHRRS();
3185 
3186         register_humongous_regions_with_cset();
3187 
3188         assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3189 
3190         // We call this after finalize_cset() to
3191         // ensure that the CSet has been finalized.
3192         _cm->verify_no_cset_oops();
3193 
3194         if (_hr_printer.is_active()) {
3195           G1PrintCollectionSetClosure cl(&_hr_printer);
3196           _collection_set.iterate(&cl);
3197         }
3198 
3199         // Initialize the GC alloc regions.
3200         _allocator->init_gc_alloc_regions(evacuation_info);
3201 
3202         G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
3203         pre_evacuate_collection_set();
3204 
3205         // Actually do the work...
3206         evacuate_collection_set(evacuation_info, &per_thread_states);
3207 
3208         post_evacuate_collection_set(evacuation_info, &per_thread_states);
3209 
3210         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3211         free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
3212 
3213         eagerly_reclaim_humongous_regions();
3214 
3215         record_obj_copy_mem_stats();
3216         _survivor_evac_stats.adjust_desired_plab_sz();
3217         _old_evac_stats.adjust_desired_plab_sz();
3218 
3219         double start = os::elapsedTime();
3220         start_new_collection_set();
3221         g1_policy()->phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
3222 
3223         if (evacuation_failed()) {
3224           set_used(recalculate_used());
3225           if (_archive_allocator != NULL) {
3226             _archive_allocator->clear_used();
3227           }
3228           for (uint i = 0; i < ParallelGCThreads; i++) {
3229             if (_evacuation_failed_info_array[i].has_failed()) {
3230               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3231             }
3232           }
3233         } else {
3234           // The "used" of the the collection set have already been subtracted
3235           // when they were freed.  Add in the bytes evacuated.
3236           increase_used(g1_policy()->bytes_copied_during_gc());
3237         }
3238 
3239         if (collector_state()->during_initial_mark_pause()) {
3240           // We have to do this before we notify the CM threads that
3241           // they can start working to make sure that all the
3242           // appropriate initialization is done on the CM object.
3243           concurrent_mark()->checkpointRootsInitialPost();
3244           collector_state()->set_mark_in_progress(true);
3245           // Note that we don't actually trigger the CM thread at
3246           // this point. We do that later when we're sure that
3247           // the current thread has completed its logging output.
3248         }
3249 
3250         allocate_dummy_regions();
3251 
3252         _allocator->init_mutator_alloc_region();
3253 
3254         {
3255           size_t expand_bytes = _heap_sizing_policy->expansion_amount();
3256           if (expand_bytes > 0) {
3257             size_t bytes_before = capacity();
3258             // No need for an ergo logging here,
3259             // expansion_amount() does this when it returns a value > 0.
3260             double expand_ms;
3261             if (!expand(expand_bytes, _workers, &expand_ms)) {
3262               // We failed to expand the heap. Cannot do anything about it.
3263             }
3264             g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3265           }
3266         }
3267 
3268         // We redo the verification but now wrt to the new CSet which
3269         // has just got initialized after the previous CSet was freed.
3270         _cm->verify_no_cset_oops();
3271 
3272         // This timing is only used by the ergonomics to handle our pause target.
3273         // It is unclear why this should not include the full pause. We will
3274         // investigate this in CR 7178365.
3275         double sample_end_time_sec = os::elapsedTime();
3276         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3277         size_t total_cards_scanned = per_thread_states.total_cards_scanned();
3278         g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
3279 
3280         evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
3281         evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
3282 
3283         MemoryService::track_memory_usage();
3284 
3285         // In prepare_for_verify() below we'll need to scan the deferred
3286         // update buffers to bring the RSets up-to-date if
3287         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
3288         // the update buffers we'll probably need to scan cards on the
3289         // regions we just allocated to (i.e., the GC alloc
3290         // regions). However, during the last GC we called
3291         // set_saved_mark() on all the GC alloc regions, so card
3292         // scanning might skip the [saved_mark_word()...top()] area of
3293         // those regions (i.e., the area we allocated objects into
3294         // during the last GC). But it shouldn't. Given that
3295         // saved_mark_word() is conditional on whether the GC time stamp
3296         // on the region is current or not, by incrementing the GC time
3297         // stamp here we invalidate all the GC time stamps on all the
3298         // regions and saved_mark_word() will simply return top() for
3299         // all the regions. This is a nicer way of ensuring this rather
3300         // than iterating over the regions and fixing them. In fact, the
3301         // GC time stamp increment here also ensures that
3302         // saved_mark_word() will return top() between pauses, i.e.,
3303         // during concurrent refinement. So we don't need the
3304         // is_gc_active() check to decided which top to use when
3305         // scanning cards (see CR 7039627).
3306         increment_gc_time_stamp();
3307 
3308         if (VerifyRememberedSets) {
3309           log_info(gc, verify)("[Verifying RemSets after GC]");
3310           VerifyRegionRemSetClosure v_cl;
3311           heap_region_iterate(&v_cl);
3312         }
3313 
3314         _verifier->verify_after_gc();
3315         _verifier->check_bitmaps("GC End");
3316 
3317         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3318         ref_processor_stw()->verify_no_references_recorded();
3319 
3320         // CM reference discovery will be re-enabled if necessary.
3321       }
3322 
3323 #ifdef TRACESPINNING
3324       ParallelTaskTerminator::print_termination_counts();
3325 #endif
3326 
3327       gc_epilogue(false);
3328     }
3329 
3330     // Print the remainder of the GC log output.
3331     if (evacuation_failed()) {
3332       log_info(gc)("To-space exhausted");
3333     }
3334 
3335     g1_policy()->print_phases();
3336     heap_transition.print();
3337 
3338     // It is not yet to safe to tell the concurrent mark to
3339     // start as we have some optional output below. We don't want the
3340     // output from the concurrent mark thread interfering with this
3341     // logging output either.
3342 
3343     _hrm.verify_optional();
3344     _verifier->verify_region_sets_optional();
3345 
3346     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3347     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3348 
3349     print_heap_after_gc();
3350     print_heap_regions();
3351     trace_heap_after_gc(_gc_tracer_stw);
3352 
3353     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3354     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3355     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3356     // before any GC notifications are raised.
3357     g1mm()->update_sizes();
3358 
3359     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3360     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
3361     _gc_timer_stw->register_gc_end();
3362     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3363   }
3364   // It should now be safe to tell the concurrent mark thread to start
3365   // without its logging output interfering with the logging output
3366   // that came from the pause.
3367 
3368   if (should_start_conc_mark) {
3369     // CAUTION: after the doConcurrentMark() call below,
3370     // the concurrent marking thread(s) could be running
3371     // concurrently with us. Make sure that anything after
3372     // this point does not assume that we are the only GC thread
3373     // running. Note: of course, the actual marking work will
3374     // not start until the safepoint itself is released in
3375     // SuspendibleThreadSet::desynchronize().
3376     doConcurrentMark();
3377   }
3378 
3379   return true;
3380 }
3381 
3382 void G1CollectedHeap::remove_self_forwarding_pointers() {
3383   G1ParRemoveSelfForwardPtrsTask rsfp_task;
3384   workers()->run_task(&rsfp_task);
3385 }
3386 
3387 void G1CollectedHeap::restore_after_evac_failure() {
3388   double remove_self_forwards_start = os::elapsedTime();
3389 
3390   remove_self_forwarding_pointers();
3391   SharedRestorePreservedMarksTaskExecutor task_executor(workers());
3392   _preserved_marks_set.restore(&task_executor);
3393 
3394   g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
3395 }
3396 
3397 void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) {
3398   if (!_evacuation_failed) {
3399     _evacuation_failed = true;
3400   }
3401 
3402   _evacuation_failed_info_array[worker_id].register_copy_failure(obj->size());
3403   _preserved_marks_set.get(worker_id)->push_if_necessary(obj, m);
3404 }
3405 
3406 bool G1ParEvacuateFollowersClosure::offer_termination() {
3407   G1ParScanThreadState* const pss = par_scan_state();
3408   start_term_time();
3409   const bool res = terminator()->offer_termination();
3410   end_term_time();
3411   return res;
3412 }
3413 
3414 void G1ParEvacuateFollowersClosure::do_void() {
3415   G1ParScanThreadState* const pss = par_scan_state();
3416   pss->trim_queue();
3417   do {
3418     pss->steal_and_trim_queue(queues());
3419   } while (!offer_termination());
3420 }
3421 
3422 class G1ParTask : public AbstractGangTask {
3423 protected:
3424   G1CollectedHeap*         _g1h;
3425   G1ParScanThreadStateSet* _pss;
3426   RefToScanQueueSet*       _queues;
3427   G1RootProcessor*         _root_processor;
3428   ParallelTaskTerminator   _terminator;
3429   uint                     _n_workers;
3430 
3431 public:
3432   G1ParTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
3433     : AbstractGangTask("G1 collection"),
3434       _g1h(g1h),
3435       _pss(per_thread_states),
3436       _queues(task_queues),
3437       _root_processor(root_processor),
3438       _terminator(n_workers, _queues),
3439       _n_workers(n_workers)
3440   {}
3441 
3442   void work(uint worker_id) {
3443     if (worker_id >= _n_workers) return;  // no work needed this round
3444 
3445     double start_sec = os::elapsedTime();
3446     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, start_sec);
3447 
3448     {
3449       ResourceMark rm;
3450       HandleMark   hm;
3451 
3452       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
3453 
3454       G1ParScanThreadState*           pss = _pss->state_for_worker(worker_id);
3455       pss->set_ref_processor(rp);
3456 
3457       double start_strong_roots_sec = os::elapsedTime();
3458 
3459       _root_processor->evacuate_roots(pss->closures(), worker_id);
3460 
3461       G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss);
3462 
3463       // We pass a weak code blobs closure to the remembered set scanning because we want to avoid
3464       // treating the nmethods visited to act as roots for concurrent marking.
3465       // We only want to make sure that the oops in the nmethods are adjusted with regard to the
3466       // objects copied by the current evacuation.
3467       size_t cards_scanned = _g1h->g1_rem_set()->oops_into_collection_set_do(&push_heap_rs_cl,
3468                                                                              pss->closures()->weak_codeblobs(),
3469                                                                              worker_id);
3470 
3471       _pss->add_cards_scanned(worker_id, cards_scanned);
3472 
3473       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
3474 
3475       double term_sec = 0.0;
3476       size_t evac_term_attempts = 0;
3477       {
3478         double start = os::elapsedTime();
3479         G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
3480         evac.do_void();
3481 
3482         evac_term_attempts = evac.term_attempts();
3483         term_sec = evac.term_time();
3484         double elapsed_sec = os::elapsedTime() - start;
3485         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
3486         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
3487         _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
3488       }
3489 
3490       assert(pss->queue_is_empty(), "should be empty");
3491 
3492       if (log_is_enabled(Debug, gc, task, stats)) {
3493         MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3494         size_t lab_waste;
3495         size_t lab_undo_waste;
3496         pss->waste(lab_waste, lab_undo_waste);
3497         _g1h->print_termination_stats(worker_id,
3498                                       (os::elapsedTime() - start_sec) * 1000.0,   /* elapsed time */
3499                                       strong_roots_sec * 1000.0,                  /* strong roots time */
3500                                       term_sec * 1000.0,                          /* evac term time */
3501                                       evac_term_attempts,                         /* evac term attempts */
3502                                       lab_waste,                                  /* alloc buffer waste */
3503                                       lab_undo_waste                              /* undo waste */
3504                                       );
3505       }
3506 
3507       // Close the inner scope so that the ResourceMark and HandleMark
3508       // destructors are executed here and are included as part of the
3509       // "GC Worker Time".
3510     }
3511     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
3512   }
3513 };
3514 
3515 void G1CollectedHeap::print_termination_stats_hdr() {
3516   log_debug(gc, task, stats)("GC Termination Stats");
3517   log_debug(gc, task, stats)("     elapsed  --strong roots-- -------termination------- ------waste (KiB)------");
3518   log_debug(gc, task, stats)("thr     ms        ms      %%        ms      %%    attempts  total   alloc    undo");
3519   log_debug(gc, task, stats)("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
3520 }
3521 
3522 void G1CollectedHeap::print_termination_stats(uint worker_id,
3523                                               double elapsed_ms,
3524                                               double strong_roots_ms,
3525                                               double term_ms,
3526                                               size_t term_attempts,
3527                                               size_t alloc_buffer_waste,
3528                                               size_t undo_waste) const {
3529   log_debug(gc, task, stats)
3530               ("%3d %9.2f %9.2f %6.2f "
3531                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
3532                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
3533                worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
3534                term_ms, term_ms * 100 / elapsed_ms, term_attempts,
3535                (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
3536                alloc_buffer_waste * HeapWordSize / K,
3537                undo_waste * HeapWordSize / K);
3538 }
3539 
3540 class G1StringAndSymbolCleaningTask : public AbstractGangTask {
3541 private:
3542   BoolObjectClosure* _is_alive;
3543   G1StringDedupUnlinkOrOopsDoClosure _dedup_closure;
3544 
3545   int _initial_string_table_size;
3546   int _initial_symbol_table_size;
3547 
3548   bool  _process_strings;
3549   int _strings_processed;
3550   int _strings_removed;
3551 
3552   bool  _process_symbols;
3553   int _symbols_processed;
3554   int _symbols_removed;
3555 
3556   bool _process_string_dedup;
3557 
3558 public:
3559   G1StringAndSymbolCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool process_string_dedup) :
3560     AbstractGangTask("String/Symbol Unlinking"),
3561     _is_alive(is_alive),
3562     _dedup_closure(is_alive, NULL, false),
3563     _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
3564     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0),
3565     _process_string_dedup(process_string_dedup) {
3566 
3567     _initial_string_table_size = StringTable::the_table()->table_size();
3568     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
3569     if (process_strings) {
3570       StringTable::clear_parallel_claimed_index();
3571     }
3572     if (process_symbols) {
3573       SymbolTable::clear_parallel_claimed_index();
3574     }
3575   }
3576 
3577   ~G1StringAndSymbolCleaningTask() {
3578     guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
3579               "claim value %d after unlink less than initial string table size %d",
3580               StringTable::parallel_claimed_index(), _initial_string_table_size);
3581     guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
3582               "claim value %d after unlink less than initial symbol table size %d",
3583               SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
3584 
3585     log_info(gc, stringtable)(
3586         "Cleaned string and symbol table, "
3587         "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
3588         "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
3589         strings_processed(), strings_removed(),
3590         symbols_processed(), symbols_removed());
3591   }
3592 
3593   void work(uint worker_id) {
3594     int strings_processed = 0;
3595     int strings_removed = 0;
3596     int symbols_processed = 0;
3597     int symbols_removed = 0;
3598     if (_process_strings) {
3599       StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
3600       Atomic::add(strings_processed, &_strings_processed);
3601       Atomic::add(strings_removed, &_strings_removed);
3602     }
3603     if (_process_symbols) {
3604       SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
3605       Atomic::add(symbols_processed, &_symbols_processed);
3606       Atomic::add(symbols_removed, &_symbols_removed);
3607     }
3608     if (_process_string_dedup) {
3609       G1StringDedup::parallel_unlink(&_dedup_closure, worker_id);
3610     }
3611   }
3612 
3613   size_t strings_processed() const { return (size_t)_strings_processed; }
3614   size_t strings_removed()   const { return (size_t)_strings_removed; }
3615 
3616   size_t symbols_processed() const { return (size_t)_symbols_processed; }
3617   size_t symbols_removed()   const { return (size_t)_symbols_removed; }
3618 };
3619 
3620 class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
3621 private:
3622   static Monitor* _lock;
3623 
3624   BoolObjectClosure* const _is_alive;
3625   const bool               _unloading_occurred;
3626   const uint               _num_workers;
3627 
3628   // Variables used to claim nmethods.
3629   CompiledMethod* _first_nmethod;
3630   volatile CompiledMethod* _claimed_nmethod;
3631 
3632   // The list of nmethods that need to be processed by the second pass.
3633   volatile CompiledMethod* _postponed_list;
3634   volatile uint            _num_entered_barrier;
3635 
3636  public:
3637   G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
3638       _is_alive(is_alive),
3639       _unloading_occurred(unloading_occurred),
3640       _num_workers(num_workers),
3641       _first_nmethod(NULL),
3642       _claimed_nmethod(NULL),
3643       _postponed_list(NULL),
3644       _num_entered_barrier(0)
3645   {
3646     CompiledMethod::increase_unloading_clock();
3647     // Get first alive nmethod
3648     CompiledMethodIterator iter = CompiledMethodIterator();
3649     if(iter.next_alive()) {
3650       _first_nmethod = iter.method();
3651     }
3652     _claimed_nmethod = (volatile CompiledMethod*)_first_nmethod;
3653   }
3654 
3655   ~G1CodeCacheUnloadingTask() {
3656     CodeCache::verify_clean_inline_caches();
3657 
3658     CodeCache::set_needs_cache_clean(false);
3659     guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
3660 
3661     CodeCache::verify_icholder_relocations();
3662   }
3663 
3664  private:
3665   void add_to_postponed_list(CompiledMethod* nm) {
3666       CompiledMethod* old;
3667       do {
3668         old = (CompiledMethod*)_postponed_list;
3669         nm->set_unloading_next(old);
3670       } while ((CompiledMethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
3671   }
3672 
3673   void clean_nmethod(CompiledMethod* nm) {
3674     bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
3675 
3676     if (postponed) {
3677       // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
3678       add_to_postponed_list(nm);
3679     }
3680 
3681     // Mark that this thread has been cleaned/unloaded.
3682     // After this call, it will be safe to ask if this nmethod was unloaded or not.
3683     nm->set_unloading_clock(CompiledMethod::global_unloading_clock());
3684   }
3685 
3686   void clean_nmethod_postponed(CompiledMethod* nm) {
3687     nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
3688   }
3689 
3690   static const int MaxClaimNmethods = 16;
3691 
3692   void claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) {
3693     CompiledMethod* first;
3694     CompiledMethodIterator last;
3695 
3696     do {
3697       *num_claimed_nmethods = 0;
3698 
3699       first = (CompiledMethod*)_claimed_nmethod;
3700       last = CompiledMethodIterator(first);
3701 
3702       if (first != NULL) {
3703 
3704         for (int i = 0; i < MaxClaimNmethods; i++) {
3705           if (!last.next_alive()) {
3706             break;
3707           }
3708           claimed_nmethods[i] = last.method();
3709           (*num_claimed_nmethods)++;
3710         }
3711       }
3712 
3713     } while ((CompiledMethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
3714   }
3715 
3716   CompiledMethod* claim_postponed_nmethod() {
3717     CompiledMethod* claim;
3718     CompiledMethod* next;
3719 
3720     do {
3721       claim = (CompiledMethod*)_postponed_list;
3722       if (claim == NULL) {
3723         return NULL;
3724       }
3725 
3726       next = claim->unloading_next();
3727 
3728     } while ((CompiledMethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
3729 
3730     return claim;
3731   }
3732 
3733  public:
3734   // Mark that we're done with the first pass of nmethod cleaning.
3735   void barrier_mark(uint worker_id) {
3736     MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
3737     _num_entered_barrier++;
3738     if (_num_entered_barrier == _num_workers) {
3739       ml.notify_all();
3740     }
3741   }
3742 
3743   // See if we have to wait for the other workers to
3744   // finish their first-pass nmethod cleaning work.
3745   void barrier_wait(uint worker_id) {
3746     if (_num_entered_barrier < _num_workers) {
3747       MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
3748       while (_num_entered_barrier < _num_workers) {
3749           ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
3750       }
3751     }
3752   }
3753 
3754   // Cleaning and unloading of nmethods. Some work has to be postponed
3755   // to the second pass, when we know which nmethods survive.
3756   void work_first_pass(uint worker_id) {
3757     // The first nmethods is claimed by the first worker.
3758     if (worker_id == 0 && _first_nmethod != NULL) {
3759       clean_nmethod(_first_nmethod);
3760       _first_nmethod = NULL;
3761     }
3762 
3763     int num_claimed_nmethods;
3764     CompiledMethod* claimed_nmethods[MaxClaimNmethods];
3765 
3766     while (true) {
3767       claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
3768 
3769       if (num_claimed_nmethods == 0) {
3770         break;
3771       }
3772 
3773       for (int i = 0; i < num_claimed_nmethods; i++) {
3774         clean_nmethod(claimed_nmethods[i]);
3775       }
3776     }
3777   }
3778 
3779   void work_second_pass(uint worker_id) {
3780     CompiledMethod* nm;
3781     // Take care of postponed nmethods.
3782     while ((nm = claim_postponed_nmethod()) != NULL) {
3783       clean_nmethod_postponed(nm);
3784     }
3785   }
3786 };
3787 
3788 Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock", false, Monitor::_safepoint_check_never);
3789 
3790 class G1KlassCleaningTask : public StackObj {
3791   BoolObjectClosure*                      _is_alive;
3792   volatile jint                           _clean_klass_tree_claimed;
3793   ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
3794 
3795  public:
3796   G1KlassCleaningTask(BoolObjectClosure* is_alive) :
3797       _is_alive(is_alive),
3798       _clean_klass_tree_claimed(0),
3799       _klass_iterator() {
3800   }
3801 
3802  private:
3803   bool claim_clean_klass_tree_task() {
3804     if (_clean_klass_tree_claimed) {
3805       return false;
3806     }
3807 
3808     return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;
3809   }
3810 
3811   InstanceKlass* claim_next_klass() {
3812     Klass* klass;
3813     do {
3814       klass =_klass_iterator.next_klass();
3815     } while (klass != NULL && !klass->is_instance_klass());
3816 
3817     // this can be null so don't call InstanceKlass::cast
3818     return static_cast<InstanceKlass*>(klass);
3819   }
3820 
3821 public:
3822 
3823   void clean_klass(InstanceKlass* ik) {
3824     ik->clean_weak_instanceklass_links(_is_alive);
3825   }
3826 
3827   void work() {
3828     ResourceMark rm;
3829 
3830     // One worker will clean the subklass/sibling klass tree.
3831     if (claim_clean_klass_tree_task()) {
3832       Klass::clean_subklass_tree(_is_alive);
3833     }
3834 
3835     // All workers will help cleaning the classes,
3836     InstanceKlass* klass;
3837     while ((klass = claim_next_klass()) != NULL) {
3838       clean_klass(klass);
3839     }
3840   }
3841 };
3842 
3843 class G1ResolvedMethodCleaningTask : public StackObj {
3844   BoolObjectClosure* _is_alive;
3845   volatile jint      _resolved_method_task_claimed;
3846 public:
3847   G1ResolvedMethodCleaningTask(BoolObjectClosure* is_alive) :
3848       _is_alive(is_alive), _resolved_method_task_claimed(0) {}
3849 
3850   bool claim_resolved_method_task() {
3851     if (_resolved_method_task_claimed) {
3852       return false;
3853     }
3854     return Atomic::cmpxchg(1, (jint*)&_resolved_method_task_claimed, 0) == 0;
3855   }
3856 
3857   // These aren't big, one thread can do it all.
3858   void work() {
3859     if (claim_resolved_method_task()) {
3860       ResolvedMethodTable::unlink(_is_alive);
3861     }
3862   }
3863 };
3864 
3865 
3866 // To minimize the remark pause times, the tasks below are done in parallel.
3867 class G1ParallelCleaningTask : public AbstractGangTask {
3868 private:
3869   G1StringAndSymbolCleaningTask _string_symbol_task;
3870   G1CodeCacheUnloadingTask      _code_cache_task;
3871   G1KlassCleaningTask           _klass_cleaning_task;
3872   G1ResolvedMethodCleaningTask  _resolved_method_cleaning_task;
3873 
3874 public:
3875   // The constructor is run in the VMThread.
3876   G1ParallelCleaningTask(BoolObjectClosure* is_alive, uint num_workers, bool unloading_occurred) :
3877       AbstractGangTask("Parallel Cleaning"),
3878       _string_symbol_task(is_alive, true, true, G1StringDedup::is_enabled()),
3879       _code_cache_task(num_workers, is_alive, unloading_occurred),
3880       _klass_cleaning_task(is_alive),
3881       _resolved_method_cleaning_task(is_alive) {
3882   }
3883 
3884   // The parallel work done by all worker threads.
3885   void work(uint worker_id) {
3886     // Do first pass of code cache cleaning.
3887     _code_cache_task.work_first_pass(worker_id);
3888 
3889     // Let the threads mark that the first pass is done.
3890     _code_cache_task.barrier_mark(worker_id);
3891 
3892     // Clean the Strings and Symbols.
3893     _string_symbol_task.work(worker_id);
3894 
3895     // Clean unreferenced things in the ResolvedMethodTable
3896     _resolved_method_cleaning_task.work();
3897 
3898     // Wait for all workers to finish the first code cache cleaning pass.
3899     _code_cache_task.barrier_wait(worker_id);
3900 
3901     // Do the second code cache cleaning work, which realize on
3902     // the liveness information gathered during the first pass.
3903     _code_cache_task.work_second_pass(worker_id);
3904 
3905     // Clean all klasses that were not unloaded.
3906     _klass_cleaning_task.work();
3907   }
3908 };
3909 
3910 
3911 void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,
3912                                         bool class_unloading_occurred) {
3913   uint n_workers = workers()->active_workers();
3914 
3915   G1ParallelCleaningTask g1_unlink_task(is_alive, n_workers, class_unloading_occurred);
3916   workers()->run_task(&g1_unlink_task);
3917 }
3918 
3919 void G1CollectedHeap::partial_cleaning(BoolObjectClosure* is_alive,
3920                                        bool process_strings,
3921                                        bool process_symbols,
3922                                        bool process_string_dedup) {
3923   if (!process_strings && !process_symbols && !process_string_dedup) {
3924     // Nothing to clean.
3925     return;
3926   }
3927 
3928   G1StringAndSymbolCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols, process_string_dedup);
3929   workers()->run_task(&g1_unlink_task);
3930 
3931 }
3932 
3933 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
3934  private:
3935   DirtyCardQueueSet* _queue;
3936   G1CollectedHeap* _g1h;
3937  public:
3938   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
3939     _queue(queue), _g1h(g1h) { }
3940 
3941   virtual void work(uint worker_id) {
3942     G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
3943     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
3944 
3945     RedirtyLoggedCardTableEntryClosure cl(_g1h);
3946     _queue->par_apply_closure_to_all_completed_buffers(&cl);
3947 
3948     phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3949   }
3950 };
3951 
3952 void G1CollectedHeap::redirty_logged_cards() {
3953   double redirty_logged_cards_start = os::elapsedTime();
3954 
3955   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3956   dirty_card_queue_set().reset_for_par_iteration();
3957   workers()->run_task(&redirty_task);
3958 
3959   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
3960   dcq.merge_bufferlists(&dirty_card_queue_set());
3961   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3962 
3963   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3964 }
3965 
3966 // Weak Reference Processing support
3967 
3968 // An always "is_alive" closure that is used to preserve referents.
3969 // If the object is non-null then it's alive.  Used in the preservation
3970 // of referent objects that are pointed to by reference objects
3971 // discovered by the CM ref processor.
3972 class G1AlwaysAliveClosure: public BoolObjectClosure {
3973   G1CollectedHeap* _g1;
3974 public:
3975   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3976   bool do_object_b(oop p) {
3977     if (p != NULL) {
3978       return true;
3979     }
3980     return false;
3981   }
3982 };
3983 
3984 bool G1STWIsAliveClosure::do_object_b(oop p) {
3985   // An object is reachable if it is outside the collection set,
3986   // or is inside and copied.
3987   return !_g1->is_in_cset(p) || p->is_forwarded();
3988 }
3989 
3990 // Non Copying Keep Alive closure
3991 class G1KeepAliveClosure: public OopClosure {
3992   G1CollectedHeap* _g1;
3993 public:
3994   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3995   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
3996   void do_oop(oop* p) {
3997     oop obj = *p;
3998     assert(obj != NULL, "the caller should have filtered out NULL values");
3999 
4000     const InCSetState cset_state = _g1->in_cset_state(obj);
4001     if (!cset_state.is_in_cset_or_humongous()) {
4002       return;
4003     }
4004     if (cset_state.is_in_cset()) {
4005       assert( obj->is_forwarded(), "invariant" );
4006       *p = obj->forwardee();
4007     } else {
4008       assert(!obj->is_forwarded(), "invariant" );
4009       assert(cset_state.is_humongous(),
4010              "Only allowed InCSet state is IsHumongous, but is %d", cset_state.value());
4011       _g1->set_humongous_is_live(obj);
4012     }
4013   }
4014 };
4015 
4016 // Copying Keep Alive closure - can be called from both
4017 // serial and parallel code as long as different worker
4018 // threads utilize different G1ParScanThreadState instances
4019 // and different queues.
4020 
4021 class G1CopyingKeepAliveClosure: public OopClosure {
4022   G1CollectedHeap*         _g1h;
4023   OopClosure*              _copy_non_heap_obj_cl;
4024   G1ParScanThreadState*    _par_scan_state;
4025 
4026 public:
4027   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
4028                             OopClosure* non_heap_obj_cl,
4029                             G1ParScanThreadState* pss):
4030     _g1h(g1h),
4031     _copy_non_heap_obj_cl(non_heap_obj_cl),
4032     _par_scan_state(pss)
4033   {}
4034 
4035   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
4036   virtual void do_oop(      oop* p) { do_oop_work(p); }
4037 
4038   template <class T> void do_oop_work(T* p) {
4039     oop obj = oopDesc::load_decode_heap_oop(p);
4040 
4041     if (_g1h->is_in_cset_or_humongous(obj)) {
4042       // If the referent object has been forwarded (either copied
4043       // to a new location or to itself in the event of an
4044       // evacuation failure) then we need to update the reference
4045       // field and, if both reference and referent are in the G1
4046       // heap, update the RSet for the referent.
4047       //
4048       // If the referent has not been forwarded then we have to keep
4049       // it alive by policy. Therefore we have copy the referent.
4050       //
4051       // If the reference field is in the G1 heap then we can push
4052       // on the PSS queue. When the queue is drained (after each
4053       // phase of reference processing) the object and it's followers
4054       // will be copied, the reference field set to point to the
4055       // new location, and the RSet updated. Otherwise we need to
4056       // use the the non-heap or metadata closures directly to copy
4057       // the referent object and update the pointer, while avoiding
4058       // updating the RSet.
4059 
4060       if (_g1h->is_in_g1_reserved(p)) {
4061         _par_scan_state->push_on_queue(p);
4062       } else {
4063         assert(!Metaspace::contains((const void*)p),
4064                "Unexpectedly found a pointer from metadata: " PTR_FORMAT, p2i(p));
4065         _copy_non_heap_obj_cl->do_oop(p);
4066       }
4067     }
4068   }
4069 };
4070 
4071 // Serial drain queue closure. Called as the 'complete_gc'
4072 // closure for each discovered list in some of the
4073 // reference processing phases.
4074 
4075 class G1STWDrainQueueClosure: public VoidClosure {
4076 protected:
4077   G1CollectedHeap* _g1h;
4078   G1ParScanThreadState* _par_scan_state;
4079 
4080   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
4081 
4082 public:
4083   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
4084     _g1h(g1h),
4085     _par_scan_state(pss)
4086   { }
4087 
4088   void do_void() {
4089     G1ParScanThreadState* const pss = par_scan_state();
4090     pss->trim_queue();
4091   }
4092 };
4093 
4094 // Parallel Reference Processing closures
4095 
4096 // Implementation of AbstractRefProcTaskExecutor for parallel reference
4097 // processing during G1 evacuation pauses.
4098 
4099 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
4100 private:
4101   G1CollectedHeap*          _g1h;
4102   G1ParScanThreadStateSet*  _pss;
4103   RefToScanQueueSet*        _queues;
4104   WorkGang*                 _workers;
4105   uint                      _active_workers;
4106 
4107 public:
4108   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
4109                            G1ParScanThreadStateSet* per_thread_states,
4110                            WorkGang* workers,
4111                            RefToScanQueueSet *task_queues,
4112                            uint n_workers) :
4113     _g1h(g1h),
4114     _pss(per_thread_states),
4115     _queues(task_queues),
4116     _workers(workers),
4117     _active_workers(n_workers)
4118   {
4119     g1h->ref_processor_stw()->set_active_mt_degree(n_workers);
4120   }
4121 
4122   // Executes the given task using concurrent marking worker threads.
4123   virtual void execute(ProcessTask& task);
4124   virtual void execute(EnqueueTask& task);
4125 };
4126 
4127 // Gang task for possibly parallel reference processing
4128 
4129 class G1STWRefProcTaskProxy: public AbstractGangTask {
4130   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
4131   ProcessTask&     _proc_task;
4132   G1CollectedHeap* _g1h;
4133   G1ParScanThreadStateSet* _pss;
4134   RefToScanQueueSet* _task_queues;
4135   ParallelTaskTerminator* _terminator;
4136 
4137 public:
4138   G1STWRefProcTaskProxy(ProcessTask& proc_task,
4139                         G1CollectedHeap* g1h,
4140                         G1ParScanThreadStateSet* per_thread_states,
4141                         RefToScanQueueSet *task_queues,
4142                         ParallelTaskTerminator* terminator) :
4143     AbstractGangTask("Process reference objects in parallel"),
4144     _proc_task(proc_task),
4145     _g1h(g1h),
4146     _pss(per_thread_states),
4147     _task_queues(task_queues),
4148     _terminator(terminator)
4149   {}
4150 
4151   virtual void work(uint worker_id) {
4152     // The reference processing task executed by a single worker.
4153     ResourceMark rm;
4154     HandleMark   hm;
4155 
4156     G1STWIsAliveClosure is_alive(_g1h);
4157 
4158     G1ParScanThreadState*          pss = _pss->state_for_worker(worker_id);
4159     pss->set_ref_processor(NULL);
4160 
4161     // Keep alive closure.
4162     G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
4163 
4164     // Complete GC closure
4165     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator);
4166 
4167     // Call the reference processing task's work routine.
4168     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
4169 
4170     // Note we cannot assert that the refs array is empty here as not all
4171     // of the processing tasks (specifically phase2 - pp2_work) execute
4172     // the complete_gc closure (which ordinarily would drain the queue) so
4173     // the queue may not be empty.
4174   }
4175 };
4176 
4177 // Driver routine for parallel reference processing.
4178 // Creates an instance of the ref processing gang
4179 // task and has the worker threads execute it.
4180 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
4181   assert(_workers != NULL, "Need parallel worker threads.");
4182 
4183   ParallelTaskTerminator terminator(_active_workers, _queues);
4184   G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, &terminator);
4185 
4186   _workers->run_task(&proc_task_proxy);
4187 }
4188 
4189 // Gang task for parallel reference enqueueing.
4190 
4191 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
4192   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
4193   EnqueueTask& _enq_task;
4194 
4195 public:
4196   G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
4197     AbstractGangTask("Enqueue reference objects in parallel"),
4198     _enq_task(enq_task)
4199   { }
4200 
4201   virtual void work(uint worker_id) {
4202     _enq_task.work(worker_id);
4203   }
4204 };
4205 
4206 // Driver routine for parallel reference enqueueing.
4207 // Creates an instance of the ref enqueueing gang
4208 // task and has the worker threads execute it.
4209 
4210 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
4211   assert(_workers != NULL, "Need parallel worker threads.");
4212 
4213   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
4214 
4215   _workers->run_task(&enq_task_proxy);
4216 }
4217 
4218 // End of weak reference support closures
4219 
4220 // Abstract task used to preserve (i.e. copy) any referent objects
4221 // that are in the collection set and are pointed to by reference
4222 // objects discovered by the CM ref processor.
4223 
4224 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
4225 protected:
4226   G1CollectedHeap*         _g1h;
4227   G1ParScanThreadStateSet* _pss;
4228   RefToScanQueueSet*       _queues;
4229   ParallelTaskTerminator   _terminator;
4230   uint                     _n_workers;
4231 
4232 public:
4233   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, int workers, RefToScanQueueSet *task_queues) :
4234     AbstractGangTask("ParPreserveCMReferents"),
4235     _g1h(g1h),
4236     _pss(per_thread_states),
4237     _queues(task_queues),
4238     _terminator(workers, _queues),
4239     _n_workers(workers)
4240   {
4241     g1h->ref_processor_cm()->set_active_mt_degree(workers);
4242   }
4243 
4244   void work(uint worker_id) {
4245     G1GCParPhaseTimesTracker x(_g1h->g1_policy()->phase_times(), G1GCPhaseTimes::PreserveCMReferents, worker_id);
4246 
4247     ResourceMark rm;
4248     HandleMark   hm;
4249 
4250     G1ParScanThreadState*          pss = _pss->state_for_worker(worker_id);
4251     pss->set_ref_processor(NULL);
4252     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
4253 
4254     // Is alive closure
4255     G1AlwaysAliveClosure always_alive(_g1h);
4256 
4257     // Copying keep alive closure. Applied to referent objects that need
4258     // to be copied.
4259     G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
4260 
4261     ReferenceProcessor* rp = _g1h->ref_processor_cm();
4262 
4263     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
4264     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
4265 
4266     // limit is set using max_num_q() - which was set using ParallelGCThreads.
4267     // So this must be true - but assert just in case someone decides to
4268     // change the worker ids.
4269     assert(worker_id < limit, "sanity");
4270     assert(!rp->discovery_is_atomic(), "check this code");
4271 
4272     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
4273     for (uint idx = worker_id; idx < limit; idx += stride) {
4274       DiscoveredList& ref_list = rp->discovered_refs()[idx];
4275 
4276       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
4277       while (iter.has_next()) {
4278         // Since discovery is not atomic for the CM ref processor, we
4279         // can see some null referent objects.
4280         iter.load_ptrs(DEBUG_ONLY(true));
4281         oop ref = iter.obj();
4282 
4283         // This will filter nulls.
4284         if (iter.is_referent_alive()) {
4285           iter.make_referent_alive();
4286         }
4287         iter.move_to_next();
4288       }
4289     }
4290 
4291     // Drain the queue - which may cause stealing
4292     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
4293     drain_queue.do_void();
4294     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
4295     assert(pss->queue_is_empty(), "should be");
4296   }
4297 };
4298 
4299 void G1CollectedHeap::process_weak_jni_handles() {
4300   double ref_proc_start = os::elapsedTime();
4301 
4302   G1STWIsAliveClosure is_alive(this);
4303   G1KeepAliveClosure keep_alive(this);
4304   JNIHandles::weak_oops_do(&is_alive, &keep_alive);
4305 
4306   double ref_proc_time = os::elapsedTime() - ref_proc_start;
4307   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
4308 }
4309 
4310 void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
4311   // Any reference objects, in the collection set, that were 'discovered'
4312   // by the CM ref processor should have already been copied (either by
4313   // applying the external root copy closure to the discovered lists, or
4314   // by following an RSet entry).
4315   //
4316   // But some of the referents, that are in the collection set, that these
4317   // reference objects point to may not have been copied: the STW ref
4318   // processor would have seen that the reference object had already
4319   // been 'discovered' and would have skipped discovering the reference,
4320   // but would not have treated the reference object as a regular oop.
4321   // As a result the copy closure would not have been applied to the
4322   // referent object.
4323   //
4324   // We need to explicitly copy these referent objects - the references
4325   // will be processed at the end of remarking.
4326   //
4327   // We also need to do this copying before we process the reference
4328   // objects discovered by the STW ref processor in case one of these
4329   // referents points to another object which is also referenced by an
4330   // object discovered by the STW ref processor.
4331   double preserve_cm_referents_time = 0.0;
4332 
4333   // To avoid spawning task when there is no work to do, check that
4334   // a concurrent cycle is active and that some references have been
4335   // discovered.
4336   if (concurrent_mark()->cmThread()->during_cycle() &&
4337       ref_processor_cm()->has_discovered_references()) {
4338     double preserve_cm_referents_start = os::elapsedTime();
4339     uint no_of_gc_workers = workers()->active_workers();
4340     G1ParPreserveCMReferentsTask keep_cm_referents(this,
4341                                                    per_thread_states,
4342                                                    no_of_gc_workers,
4343                                                    _task_queues);
4344     workers()->run_task(&keep_cm_referents);
4345     preserve_cm_referents_time = os::elapsedTime() - preserve_cm_referents_start;
4346   }
4347 
4348   g1_policy()->phase_times()->record_preserve_cm_referents_time_ms(preserve_cm_referents_time * 1000.0);
4349 }
4350 
4351 // Weak Reference processing during an evacuation pause (part 1).
4352 void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
4353   double ref_proc_start = os::elapsedTime();
4354 
4355   ReferenceProcessor* rp = _ref_processor_stw;
4356   assert(rp->discovery_enabled(), "should have been enabled");
4357 
4358   // Closure to test whether a referent is alive.
4359   G1STWIsAliveClosure is_alive(this);
4360 
4361   // Even when parallel reference processing is enabled, the processing
4362   // of JNI refs is serial and performed serially by the current thread
4363   // rather than by a worker. The following PSS will be used for processing
4364   // JNI refs.
4365 
4366   // Use only a single queue for this PSS.
4367   G1ParScanThreadState*          pss = per_thread_states->state_for_worker(0);
4368   pss->set_ref_processor(NULL);
4369   assert(pss->queue_is_empty(), "pre-condition");
4370 
4371   // Keep alive closure.
4372   G1CopyingKeepAliveClosure keep_alive(this, pss->closures()->raw_strong_oops(), pss);
4373 
4374   // Serial Complete GC closure
4375   G1STWDrainQueueClosure drain_queue(this, pss);
4376 
4377   // Setup the soft refs policy...
4378   rp->setup_policy(false);
4379 
4380   ReferenceProcessorStats stats;
4381   if (!rp->processing_is_mt()) {
4382     // Serial reference processing...
4383     stats = rp->process_discovered_references(&is_alive,
4384                                               &keep_alive,
4385                                               &drain_queue,
4386                                               NULL,
4387                                               _gc_timer_stw);
4388   } else {
4389     uint no_of_gc_workers = workers()->active_workers();
4390 
4391     // Parallel reference processing
4392     assert(no_of_gc_workers <= rp->max_num_q(),
4393            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
4394            no_of_gc_workers,  rp->max_num_q());
4395 
4396     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
4397     stats = rp->process_discovered_references(&is_alive,
4398                                               &keep_alive,
4399                                               &drain_queue,
4400                                               &par_task_executor,
4401                                               _gc_timer_stw);
4402   }
4403 
4404   _gc_tracer_stw->report_gc_reference_stats(stats);
4405 
4406   // We have completed copying any necessary live referent objects.
4407   assert(pss->queue_is_empty(), "both queue and overflow should be empty");
4408 
4409   double ref_proc_time = os::elapsedTime() - ref_proc_start;
4410   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
4411 }
4412 
4413 // Weak Reference processing during an evacuation pause (part 2).
4414 void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
4415   double ref_enq_start = os::elapsedTime();
4416 
4417   ReferenceProcessor* rp = _ref_processor_stw;
4418   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
4419 
4420   // Now enqueue any remaining on the discovered lists on to
4421   // the pending list.
4422   if (!rp->processing_is_mt()) {
4423     // Serial reference processing...
4424     rp->enqueue_discovered_references();
4425   } else {
4426     // Parallel reference enqueueing
4427 
4428     uint n_workers = workers()->active_workers();
4429 
4430     assert(n_workers <= rp->max_num_q(),
4431            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
4432            n_workers,  rp->max_num_q());
4433 
4434     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers);
4435     rp->enqueue_discovered_references(&par_task_executor);
4436   }
4437 
4438   rp->verify_no_references_recorded();
4439   assert(!rp->discovery_enabled(), "should have been disabled");
4440 
4441   // FIXME
4442   // CM's reference processing also cleans up the string and symbol tables.
4443   // Should we do that here also? We could, but it is a serial operation
4444   // and could significantly increase the pause time.
4445 
4446   double ref_enq_time = os::elapsedTime() - ref_enq_start;
4447   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
4448 }
4449 
4450 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
4451   double merge_pss_time_start = os::elapsedTime();
4452   per_thread_states->flush();
4453   g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
4454 }
4455 
4456 void G1CollectedHeap::pre_evacuate_collection_set() {
4457   _expand_heap_after_alloc_failure = true;
4458   _evacuation_failed = false;
4459 
4460   // Disable the hot card cache.
4461   _hot_card_cache->reset_hot_cache_claimed_index();
4462   _hot_card_cache->set_use_cache(false);
4463 
4464   g1_rem_set()->prepare_for_oops_into_collection_set_do();
4465   _preserved_marks_set.assert_empty();
4466 
4467   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
4468 
4469   // InitialMark needs claim bits to keep track of the marked-through CLDs.
4470   if (collector_state()->during_initial_mark_pause()) {
4471     double start_clear_claimed_marks = os::elapsedTime();
4472 
4473     ClassLoaderDataGraph::clear_claimed_marks();
4474 
4475     double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
4476     phase_times->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
4477   }
4478 }
4479 
4480 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4481   // Should G1EvacuationFailureALot be in effect for this GC?
4482   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
4483 
4484   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
4485 
4486   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
4487 
4488   double start_par_time_sec = os::elapsedTime();
4489   double end_par_time_sec;
4490 
4491   {
4492     const uint n_workers = workers()->active_workers();
4493     G1RootProcessor root_processor(this, n_workers);
4494     G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
4495 
4496     print_termination_stats_hdr();
4497 
4498     workers()->run_task(&g1_par_task);
4499     end_par_time_sec = os::elapsedTime();
4500 
4501     // Closing the inner scope will execute the destructor
4502     // for the G1RootProcessor object. We record the current
4503     // elapsed time before closing the scope so that time
4504     // taken for the destructor is NOT included in the
4505     // reported parallel time.
4506   }
4507 
4508   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
4509   phase_times->record_par_time(par_time_ms);
4510 
4511   double code_root_fixup_time_ms =
4512         (os::elapsedTime() - end_par_time_sec) * 1000.0;
4513   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4514 }
4515 
4516 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4517   // Process any discovered reference objects - we have
4518   // to do this _before_ we retire the GC alloc regions
4519   // as we may have to copy some 'reachable' referent
4520   // objects (and their reachable sub-graphs) that were
4521   // not copied during the pause.
4522   if (g1_policy()->should_process_references()) {
4523     preserve_cm_referents(per_thread_states);
4524     process_discovered_references(per_thread_states);
4525   } else {
4526     ref_processor_stw()->verify_no_references_recorded();
4527     process_weak_jni_handles();
4528   }
4529 
4530   if (G1StringDedup::is_enabled()) {
4531     double fixup_start = os::elapsedTime();
4532 
4533     G1STWIsAliveClosure is_alive(this);
4534     G1KeepAliveClosure keep_alive(this);
4535     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4536 
4537     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
4538     g1_policy()->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
4539   }
4540 
4541   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4542 
4543   if (evacuation_failed()) {
4544     restore_after_evac_failure();
4545 
4546     // Reset the G1EvacuationFailureALot counters and flags
4547     // Note: the values are reset only when an actual
4548     // evacuation failure occurs.
4549     NOT_PRODUCT(reset_evacuation_should_fail();)
4550   }
4551 
4552   _preserved_marks_set.assert_empty();
4553 
4554   // Enqueue any remaining references remaining on the STW
4555   // reference processor's discovered lists. We need to do
4556   // this after the card table is cleaned (and verified) as
4557   // the act of enqueueing entries on to the pending list
4558   // will log these updates (and dirty their associated
4559   // cards). We need these updates logged to update any
4560   // RSets.
4561   if (g1_policy()->should_process_references()) {
4562     enqueue_discovered_references(per_thread_states);
4563   } else {
4564     g1_policy()->phase_times()->record_ref_enq_time(0);
4565   }
4566 
4567   _allocator->release_gc_alloc_regions(evacuation_info);
4568 
4569   merge_per_thread_state_info(per_thread_states);
4570 
4571   // Reset and re-enable the hot card cache.
4572   // Note the counts for the cards in the regions in the
4573   // collection set are reset when the collection set is freed.
4574   _hot_card_cache->reset_hot_cache();
4575   _hot_card_cache->set_use_cache(true);
4576 
4577   purge_code_root_memory();
4578 
4579   redirty_logged_cards();
4580 #if defined(COMPILER2) || INCLUDE_JVMCI
4581   double start = os::elapsedTime();
4582   DerivedPointerTable::update_pointers();
4583   g1_policy()->phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0);
4584 #endif
4585   g1_policy()->print_age_table();
4586 }
4587 
4588 void G1CollectedHeap::record_obj_copy_mem_stats() {
4589   g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
4590 
4591   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
4592                                                create_g1_evac_summary(&_old_evac_stats));
4593 }
4594 
4595 void G1CollectedHeap::free_region(HeapRegion* hr,
4596                                   FreeRegionList* free_list,
4597                                   bool skip_remset,
4598                                   bool skip_hot_card_cache,
4599                                   bool locked) {
4600   assert(!hr->is_free(), "the region should not be free");
4601   assert(!hr->is_empty(), "the region should not be empty");
4602   assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
4603   assert(free_list != NULL, "pre-condition");
4604 
4605   if (G1VerifyBitmaps) {
4606     MemRegion mr(hr->bottom(), hr->end());
4607     concurrent_mark()->clearRangePrevBitmap(mr);
4608   }
4609 
4610   // Clear the card counts for this region.
4611   // Note: we only need to do this if the region is not young
4612   // (since we don't refine cards in young regions).
4613   if (!skip_hot_card_cache && !hr->is_young()) {
4614     _hot_card_cache->reset_card_counts(hr);
4615   }
4616   hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */);
4617   free_list->add_ordered(hr);
4618 }
4619 
4620 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
4621                                             FreeRegionList* free_list,
4622                                             bool skip_remset) {
4623   assert(hr->is_humongous(), "this is only for humongous regions");
4624   assert(free_list != NULL, "pre-condition");
4625   hr->clear_humongous();
4626   free_region(hr, free_list, skip_remset);
4627 }
4628 
4629 void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
4630                                            const uint humongous_regions_removed) {
4631   if (old_regions_removed > 0 || humongous_regions_removed > 0) {
4632     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4633     _old_set.bulk_remove(old_regions_removed);
4634     _humongous_set.bulk_remove(humongous_regions_removed);
4635   }
4636 
4637 }
4638 
4639 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
4640   assert(list != NULL, "list can't be null");
4641   if (!list->is_empty()) {
4642     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
4643     _hrm.insert_list_into_free_list(list);
4644   }
4645 }
4646 
4647 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
4648   decrease_used(bytes);
4649 }
4650 
4651 class G1ParScrubRemSetTask: public AbstractGangTask {
4652 protected:
4653   G1RemSet* _g1rs;
4654   HeapRegionClaimer _hrclaimer;
4655 
4656 public:
4657   G1ParScrubRemSetTask(G1RemSet* g1_rs, uint num_workers) :
4658     AbstractGangTask("G1 ScrubRS"),
4659     _g1rs(g1_rs),
4660     _hrclaimer(num_workers) {
4661   }
4662 
4663   void work(uint worker_id) {
4664     _g1rs->scrub(worker_id, &_hrclaimer);
4665   }
4666 };
4667 
4668 void G1CollectedHeap::scrub_rem_set() {
4669   uint num_workers = workers()->active_workers();
4670   G1ParScrubRemSetTask g1_par_scrub_rs_task(g1_rem_set(), num_workers);
4671   workers()->run_task(&g1_par_scrub_rs_task);
4672 }
4673 
4674 class G1FreeCollectionSetTask : public AbstractGangTask {
4675 private:
4676 
4677   // Closure applied to all regions in the collection set to do work that needs to
4678   // be done serially in a single thread.
4679   class G1SerialFreeCollectionSetClosure : public HeapRegionClosure {
4680   private:
4681     EvacuationInfo* _evacuation_info;
4682     const size_t* _surviving_young_words;
4683 
4684     // Bytes used in successfully evacuated regions before the evacuation.
4685     size_t _before_used_bytes;
4686     // Bytes used in unsucessfully evacuated regions before the evacuation
4687     size_t _after_used_bytes;
4688 
4689     size_t _bytes_allocated_in_old_since_last_gc;
4690 
4691     size_t _failure_used_words;
4692     size_t _failure_waste_words;
4693 
4694     FreeRegionList _local_free_list;
4695   public:
4696     G1SerialFreeCollectionSetClosure(EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4697       HeapRegionClosure(),
4698       _evacuation_info(evacuation_info),
4699       _surviving_young_words(surviving_young_words),
4700       _before_used_bytes(0),
4701       _after_used_bytes(0),
4702       _bytes_allocated_in_old_since_last_gc(0),
4703       _failure_used_words(0),
4704       _failure_waste_words(0),
4705       _local_free_list("Local Region List for CSet Freeing") {
4706     }
4707 
4708     virtual bool doHeapRegion(HeapRegion* r) {
4709       G1CollectedHeap* g1h = G1CollectedHeap::heap();
4710 
4711       assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
4712       g1h->clear_in_cset(r);
4713 
4714       if (r->is_young()) {
4715         assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(),
4716                "Young index %d is wrong for region %u of type %s with %u young regions",
4717                r->young_index_in_cset(),
4718                r->hrm_index(),
4719                r->get_type_str(),
4720                g1h->collection_set()->young_region_length());
4721         size_t words_survived = _surviving_young_words[r->young_index_in_cset()];
4722         r->record_surv_words_in_group(words_survived);
4723       }
4724 
4725       if (!r->evacuation_failed()) {
4726         assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
4727         _before_used_bytes += r->used();
4728         g1h->free_region(r,
4729                          &_local_free_list,
4730                          true, /* skip_remset */
4731                          true, /* skip_hot_card_cache */
4732                          true  /* locked */);
4733       } else {
4734         r->uninstall_surv_rate_group();
4735         r->set_young_index_in_cset(-1);
4736         r->set_evacuation_failed(false);
4737         // When moving a young gen region to old gen, we "allocate" that whole region
4738         // there. This is in addition to any already evacuated objects. Notify the
4739         // policy about that.
4740         // Old gen regions do not cause an additional allocation: both the objects
4741         // still in the region and the ones already moved are accounted for elsewhere.
4742         if (r->is_young()) {
4743           _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
4744         }
4745         // The region is now considered to be old.
4746         r->set_old();
4747         // Do some allocation statistics accounting. Regions that failed evacuation
4748         // are always made old, so there is no need to update anything in the young
4749         // gen statistics, but we need to update old gen statistics.
4750         size_t used_words = r->marked_bytes() / HeapWordSize;
4751 
4752         _failure_used_words += used_words;
4753         _failure_waste_words += HeapRegion::GrainWords - used_words;
4754 
4755         g1h->old_set_add(r);
4756         _after_used_bytes += r->used();
4757       }
4758       return false;
4759     }
4760 
4761     void complete_work() {
4762       G1CollectedHeap* g1h = G1CollectedHeap::heap();
4763 
4764       _evacuation_info->set_regions_freed(_local_free_list.length());
4765       _evacuation_info->increment_collectionset_used_after(_after_used_bytes);
4766 
4767       g1h->prepend_to_freelist(&_local_free_list);
4768       g1h->decrement_summary_bytes(_before_used_bytes);
4769 
4770       G1Policy* policy = g1h->g1_policy();
4771       policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
4772 
4773       g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
4774     }
4775   };
4776 
4777   G1CollectionSet* _collection_set;
4778   G1SerialFreeCollectionSetClosure _cl;
4779   const size_t* _surviving_young_words;
4780 
4781   size_t _rs_lengths;
4782 
4783   volatile jint _serial_work_claim;
4784 
4785   struct WorkItem {
4786     uint region_idx;
4787     bool is_young;
4788     bool evacuation_failed;
4789 
4790     WorkItem(HeapRegion* r) {
4791       region_idx = r->hrm_index();
4792       is_young = r->is_young();
4793       evacuation_failed = r->evacuation_failed();
4794     }
4795   };
4796 
4797   volatile size_t _parallel_work_claim;
4798   size_t _num_work_items;
4799   WorkItem* _work_items;
4800 
4801   void do_serial_work() {
4802     // Need to grab the lock to be allowed to modify the old region list.
4803     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4804     _collection_set->iterate(&_cl);
4805   }
4806 
4807   void do_parallel_work_for_region(uint region_idx, bool is_young, bool evacuation_failed) {
4808     G1CollectedHeap* g1h = G1CollectedHeap::heap();
4809 
4810     HeapRegion* r = g1h->region_at(region_idx);
4811     assert(!g1h->is_on_master_free_list(r), "sanity");
4812 
4813     Atomic::add(r->rem_set()->occupied_locked(), &_rs_lengths);
4814 
4815     if (!is_young) {
4816       g1h->_hot_card_cache->reset_card_counts(r);
4817     }
4818 
4819     if (!evacuation_failed) {
4820       r->rem_set()->clear_locked();
4821     }
4822   }
4823 
4824   class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {
4825   private:
4826     size_t _cur_idx;
4827     WorkItem* _work_items;
4828   public:
4829     G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
4830 
4831     virtual bool doHeapRegion(HeapRegion* r) {
4832       _work_items[_cur_idx++] = WorkItem(r);
4833       return false;
4834     }
4835   };
4836 
4837   void prepare_work() {
4838     G1PrepareFreeCollectionSetClosure cl(_work_items);
4839     _collection_set->iterate(&cl);
4840   }
4841 
4842   void complete_work() {
4843     _cl.complete_work();
4844 
4845     G1Policy* policy = G1CollectedHeap::heap()->g1_policy();
4846     policy->record_max_rs_lengths(_rs_lengths);
4847     policy->cset_regions_freed();
4848   }
4849 public:
4850   G1FreeCollectionSetTask(G1CollectionSet* collection_set, EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4851     AbstractGangTask("G1 Free Collection Set"),
4852     _cl(evacuation_info, surviving_young_words),
4853     _collection_set(collection_set),
4854     _surviving_young_words(surviving_young_words),
4855     _serial_work_claim(0),
4856     _rs_lengths(0),
4857     _parallel_work_claim(0),
4858     _num_work_items(collection_set->region_length()),
4859     _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
4860     prepare_work();
4861   }
4862 
4863   ~G1FreeCollectionSetTask() {
4864     complete_work();
4865     FREE_C_HEAP_ARRAY(WorkItem, _work_items);
4866   }
4867 
4868   // Chunk size for work distribution. The chosen value has been determined experimentally
4869   // to be a good tradeoff between overhead and achievable parallelism.
4870   static uint chunk_size() { return 32; }
4871 
4872   virtual void work(uint worker_id) {
4873     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
4874 
4875     // Claim serial work.
4876     if (_serial_work_claim == 0) {
4877       jint value = Atomic::add(1, &_serial_work_claim) - 1;
4878       if (value == 0) {
4879         double serial_time = os::elapsedTime();
4880         do_serial_work();
4881         timer->record_serial_free_cset_time_ms((os::elapsedTime() - serial_time) * 1000.0);
4882       }
4883     }
4884 
4885     // Start parallel work.
4886     double young_time = 0.0;
4887     bool has_young_time = false;
4888     double non_young_time = 0.0;
4889     bool has_non_young_time = false;
4890 
4891     while (true) {
4892       size_t end = Atomic::add(chunk_size(), &_parallel_work_claim);
4893       size_t cur = end - chunk_size();
4894 
4895       if (cur >= _num_work_items) {
4896         break;
4897       }
4898 
4899       double start_time = os::elapsedTime();
4900 
4901       end = MIN2(end, _num_work_items);
4902 
4903       for (; cur < end; cur++) {
4904         bool is_young = _work_items[cur].is_young;
4905 
4906         do_parallel_work_for_region(_work_items[cur].region_idx, is_young, _work_items[cur].evacuation_failed);
4907 
4908         double end_time = os::elapsedTime();
4909         double time_taken = end_time - start_time;
4910         if (is_young) {
4911           young_time += time_taken;
4912           has_young_time = true;
4913         } else {
4914           non_young_time += time_taken;
4915           has_non_young_time = true;
4916         }
4917         start_time = end_time;
4918       }
4919     }
4920 
4921     if (has_young_time) {
4922       timer->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, worker_id, young_time);
4923     }
4924     if (has_non_young_time) {
4925       timer->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, worker_id, young_time);
4926     }
4927   }
4928 };
4929 
4930 void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4931   _eden.clear();
4932 
4933   double free_cset_start_time = os::elapsedTime();
4934 
4935   {
4936     uint const num_chunks = MAX2(_collection_set.region_length() / G1FreeCollectionSetTask::chunk_size(), 1U);
4937     uint const num_workers = MIN2(workers()->active_workers(), num_chunks);
4938 
4939     G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words);
4940 
4941     log_debug(gc, ergo)("Running %s using %u workers for collection set length %u",
4942                         cl.name(),
4943                         num_workers,
4944                         _collection_set.region_length());
4945     workers()->run_task(&cl, num_workers);
4946   }
4947   g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
4948 
4949   collection_set->clear();
4950 }
4951 
4952 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
4953  private:
4954   FreeRegionList* _free_region_list;
4955   HeapRegionSet* _proxy_set;
4956   uint _humongous_objects_reclaimed;
4957   uint _humongous_regions_reclaimed;
4958   size_t _freed_bytes;
4959  public:
4960 
4961   G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
4962     _free_region_list(free_region_list), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) {
4963   }
4964 
4965   virtual bool doHeapRegion(HeapRegion* r) {
4966     if (!r->is_starts_humongous()) {
4967       return false;
4968     }
4969 
4970     G1CollectedHeap* g1h = G1CollectedHeap::heap();
4971 
4972     oop obj = (oop)r->bottom();
4973     G1CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap();
4974 
4975     // The following checks whether the humongous object is live are sufficient.
4976     // The main additional check (in addition to having a reference from the roots
4977     // or the young gen) is whether the humongous object has a remembered set entry.
4978     //
4979     // A humongous object cannot be live if there is no remembered set for it
4980     // because:
4981     // - there can be no references from within humongous starts regions referencing
4982     // the object because we never allocate other objects into them.
4983     // (I.e. there are no intra-region references that may be missed by the
4984     // remembered set)
4985     // - as soon there is a remembered set entry to the humongous starts region
4986     // (i.e. it has "escaped" to an old object) this remembered set entry will stay
4987     // until the end of a concurrent mark.
4988     //
4989     // It is not required to check whether the object has been found dead by marking
4990     // or not, in fact it would prevent reclamation within a concurrent cycle, as
4991     // all objects allocated during that time are considered live.
4992     // SATB marking is even more conservative than the remembered set.
4993     // So if at this point in the collection there is no remembered set entry,
4994     // nobody has a reference to it.
4995     // At the start of collection we flush all refinement logs, and remembered sets
4996     // are completely up-to-date wrt to references to the humongous object.
4997     //
4998     // Other implementation considerations:
4999     // - never consider object arrays at this time because they would pose
5000     // considerable effort for cleaning up the the remembered sets. This is
5001     // required because stale remembered sets might reference locations that
5002     // are currently allocated into.
5003     uint region_idx = r->hrm_index();
5004     if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5005         !r->rem_set()->is_empty()) {
5006       log_debug(gc, humongous)("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT "  with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5007                                region_idx,
5008                                (size_t)obj->size() * HeapWordSize,
5009                                p2i(r->bottom()),
5010                                r->rem_set()->occupied(),
5011                                r->rem_set()->strong_code_roots_list_length(),
5012                                next_bitmap->isMarked(r->bottom()),
5013                                g1h->is_humongous_reclaim_candidate(region_idx),
5014                                obj->is_typeArray()
5015                               );
5016       return false;
5017     }
5018 
5019     guarantee(obj->is_typeArray(),
5020               "Only eagerly reclaiming type arrays is supported, but the object "
5021               PTR_FORMAT " is not.", p2i(r->bottom()));
5022 
5023     log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5024                              region_idx,
5025                              (size_t)obj->size() * HeapWordSize,
5026                              p2i(r->bottom()),
5027                              r->rem_set()->occupied(),
5028                              r->rem_set()->strong_code_roots_list_length(),
5029                              next_bitmap->isMarked(r->bottom()),
5030                              g1h->is_humongous_reclaim_candidate(region_idx),
5031                              obj->is_typeArray()
5032                             );
5033 
5034     // Need to clear mark bit of the humongous object if already set.
5035     if (next_bitmap->isMarked(r->bottom())) {
5036       next_bitmap->clear(r->bottom());
5037     }
5038     _humongous_objects_reclaimed++;
5039     do {
5040       HeapRegion* next = g1h->next_region_in_humongous(r);
5041       _freed_bytes += r->used();
5042       r->set_containing_set(NULL);
5043       _humongous_regions_reclaimed++;
5044       g1h->free_humongous_region(r, _free_region_list, false /* skip_remset */ );
5045       r = next;
5046     } while (r != NULL);
5047 
5048     return false;
5049   }
5050 
5051   uint humongous_objects_reclaimed() {
5052     return _humongous_objects_reclaimed;
5053   }
5054 
5055   uint humongous_regions_reclaimed() {
5056     return _humongous_regions_reclaimed;
5057   }
5058 
5059   size_t bytes_freed() const {
5060     return _freed_bytes;
5061   }
5062 };
5063 
5064 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5065   assert_at_safepoint(true);
5066 
5067   if (!G1EagerReclaimHumongousObjects ||
5068       (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
5069     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
5070     return;
5071   }
5072 
5073   double start_time = os::elapsedTime();
5074 
5075   FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
5076 
5077   G1FreeHumongousRegionClosure cl(&local_cleanup_list);
5078   heap_region_iterate(&cl);
5079 
5080   remove_from_old_sets(0, cl.humongous_regions_reclaimed());
5081 
5082   G1HRPrinter* hrp = hr_printer();
5083   if (hrp->is_active()) {
5084     FreeRegionListIterator iter(&local_cleanup_list);
5085     while (iter.more_available()) {
5086       HeapRegion* hr = iter.get_next();
5087       hrp->cleanup(hr);
5088     }
5089   }
5090 
5091   prepend_to_freelist(&local_cleanup_list);
5092   decrement_summary_bytes(cl.bytes_freed());
5093 
5094   g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
5095                                                                     cl.humongous_objects_reclaimed());
5096 }
5097 
5098 class G1AbandonCollectionSetClosure : public HeapRegionClosure {
5099 public:
5100   virtual bool doHeapRegion(HeapRegion* r) {
5101     assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
5102     G1CollectedHeap::heap()->clear_in_cset(r);
5103     r->set_young_index_in_cset(-1);
5104     return false;
5105   }
5106 };
5107 
5108 void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
5109   G1AbandonCollectionSetClosure cl;
5110   collection_set->iterate(&cl);
5111 
5112   collection_set->clear();
5113   collection_set->stop_incremental_building();
5114 }
5115 
5116 void G1CollectedHeap::set_free_regions_coming() {
5117   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
5118 
5119   assert(!free_regions_coming(), "pre-condition");
5120   _free_regions_coming = true;
5121 }
5122 
5123 void G1CollectedHeap::reset_free_regions_coming() {
5124   assert(free_regions_coming(), "pre-condition");
5125 
5126   {
5127     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5128     _free_regions_coming = false;
5129     SecondaryFreeList_lock->notify_all();
5130   }
5131 
5132   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : reset free regions coming");
5133 }
5134 
5135 void G1CollectedHeap::wait_while_free_regions_coming() {
5136   // Most of the time we won't have to wait, so let's do a quick test
5137   // first before we take the lock.
5138   if (!free_regions_coming()) {
5139     return;
5140   }
5141 
5142   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : waiting for free regions");
5143 
5144   {
5145     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5146     while (free_regions_coming()) {
5147       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
5148     }
5149   }
5150 
5151   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : done waiting for free regions");
5152 }
5153 
5154 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
5155   return _allocator->is_retained_old_region(hr);
5156 }
5157 
5158 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5159   _eden.add(hr);
5160   _g1_policy->set_region_eden(hr);
5161 }
5162 
5163 #ifdef ASSERT
5164 
5165 class NoYoungRegionsClosure: public HeapRegionClosure {
5166 private:
5167   bool _success;
5168 public:
5169   NoYoungRegionsClosure() : _success(true) { }
5170   bool doHeapRegion(HeapRegion* r) {
5171     if (r->is_young()) {
5172       log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5173                             p2i(r->bottom()), p2i(r->end()));
5174       _success = false;
5175     }
5176     return false;
5177   }
5178   bool success() { return _success; }
5179 };
5180 
5181 bool G1CollectedHeap::check_young_list_empty() {
5182   bool ret = (young_regions_count() == 0);
5183 
5184   NoYoungRegionsClosure closure;
5185   heap_region_iterate(&closure);
5186   ret = ret && closure.success();
5187 
5188   return ret;
5189 }
5190 
5191 #endif // ASSERT
5192 
5193 class TearDownRegionSetsClosure : public HeapRegionClosure {
5194 private:
5195   HeapRegionSet *_old_set;
5196 
5197 public:
5198   TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
5199 
5200   bool doHeapRegion(HeapRegion* r) {
5201     if (r->is_old()) {
5202       _old_set->remove(r);
5203     } else if(r->is_young()) {
5204       r->uninstall_surv_rate_group();
5205     } else {
5206       // We ignore free regions, we'll empty the free list afterwards.
5207       // We ignore humongous regions, we're not tearing down the
5208       // humongous regions set.
5209       assert(r->is_free() || r->is_humongous(),
5210              "it cannot be another type");
5211     }
5212     return false;
5213   }
5214 
5215   ~TearDownRegionSetsClosure() {
5216     assert(_old_set->is_empty(), "post-condition");
5217   }
5218 };
5219 
5220 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
5221   assert_at_safepoint(true /* should_be_vm_thread */);
5222 
5223   if (!free_list_only) {
5224     TearDownRegionSetsClosure cl(&_old_set);
5225     heap_region_iterate(&cl);
5226 
5227     // Note that emptying the _young_list is postponed and instead done as
5228     // the first step when rebuilding the regions sets again. The reason for
5229     // this is that during a full GC string deduplication needs to know if
5230     // a collected region was young or old when the full GC was initiated.
5231   }
5232   _hrm.remove_all_free_regions();
5233 }
5234 
5235 void G1CollectedHeap::increase_used(size_t bytes) {
5236   _summary_bytes_used += bytes;
5237 }
5238 
5239 void G1CollectedHeap::decrease_used(size_t bytes) {
5240   assert(_summary_bytes_used >= bytes,
5241          "invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,
5242          _summary_bytes_used, bytes);
5243   _summary_bytes_used -= bytes;
5244 }
5245 
5246 void G1CollectedHeap::set_used(size_t bytes) {
5247   _summary_bytes_used = bytes;
5248 }
5249 
5250 class RebuildRegionSetsClosure : public HeapRegionClosure {
5251 private:
5252   bool            _free_list_only;
5253   HeapRegionSet*   _old_set;
5254   HeapRegionManager*   _hrm;
5255   size_t          _total_used;
5256 
5257 public:
5258   RebuildRegionSetsClosure(bool free_list_only,
5259                            HeapRegionSet* old_set, HeapRegionManager* hrm) :
5260     _free_list_only(free_list_only),
5261     _old_set(old_set), _hrm(hrm), _total_used(0) {
5262     assert(_hrm->num_free_regions() == 0, "pre-condition");
5263     if (!free_list_only) {
5264       assert(_old_set->is_empty(), "pre-condition");
5265     }
5266   }
5267 
5268   bool doHeapRegion(HeapRegion* r) {
5269     if (r->is_empty()) {
5270       // Add free regions to the free list
5271       r->set_free();
5272       r->set_allocation_context(AllocationContext::system());
5273       _hrm->insert_into_free_list(r);
5274     } else if (!_free_list_only) {
5275 
5276       if (r->is_humongous()) {
5277         // We ignore humongous regions. We left the humongous set unchanged.
5278       } else {
5279         assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
5280         // We now consider all regions old, so register as such. Leave
5281         // archive regions set that way, however, while still adding
5282         // them to the old set.
5283         if (!r->is_archive()) {
5284           r->set_old();
5285         }
5286         _old_set->add(r);
5287       }
5288       _total_used += r->used();
5289     }
5290 
5291     return false;
5292   }
5293 
5294   size_t total_used() {
5295     return _total_used;
5296   }
5297 };
5298 
5299 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
5300   assert_at_safepoint(true /* should_be_vm_thread */);
5301 
5302   if (!free_list_only) {
5303     _eden.clear();
5304     _survivor.clear();
5305   }
5306 
5307   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
5308   heap_region_iterate(&cl);
5309 
5310   if (!free_list_only) {
5311     set_used(cl.total_used());
5312     if (_archive_allocator != NULL) {
5313       _archive_allocator->clear_used();
5314     }
5315   }
5316   assert(used_unlocked() == recalculate_used(),
5317          "inconsistent used_unlocked(), "
5318          "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
5319          used_unlocked(), recalculate_used());
5320 }
5321 
5322 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
5323   _refine_cte_cl->set_concurrent(concurrent);
5324 }
5325 
5326 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
5327   HeapRegion* hr = heap_region_containing(p);
5328   return hr->is_in(p);
5329 }
5330 
5331 // Methods for the mutator alloc region
5332 
5333 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
5334                                                       bool force) {
5335   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5336   bool should_allocate = g1_policy()->should_allocate_mutator_region();
5337   if (force || should_allocate) {
5338     HeapRegion* new_alloc_region = new_region(word_size,
5339                                               false /* is_old */,
5340                                               false /* do_expand */);
5341     if (new_alloc_region != NULL) {
5342       set_region_short_lived_locked(new_alloc_region);
5343       _hr_printer.alloc(new_alloc_region, !should_allocate);
5344       _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
5345       return new_alloc_region;
5346     }
5347   }
5348   return NULL;
5349 }
5350 
5351 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
5352                                                   size_t allocated_bytes) {
5353   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5354   assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
5355 
5356   collection_set()->add_eden_region(alloc_region);
5357   increase_used(allocated_bytes);
5358   _hr_printer.retire(alloc_region);
5359   // We update the eden sizes here, when the region is retired,
5360   // instead of when it's allocated, since this is the point that its
5361   // used space has been recored in _summary_bytes_used.
5362   g1mm()->update_eden_size();
5363 }
5364 
5365 // Methods for the GC alloc regions
5366 
5367 bool G1CollectedHeap::has_more_regions(InCSetState dest) {
5368   if (dest.is_old()) {
5369     return true;
5370   } else {
5371     return survivor_regions_count() < g1_policy()->max_survivor_regions();
5372   }
5373 }
5374 
5375 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) {
5376   assert(FreeList_lock->owned_by_self(), "pre-condition");
5377 
5378   if (!has_more_regions(dest)) {
5379     return NULL;
5380   }
5381 
5382   const bool is_survivor = dest.is_young();
5383 
5384   HeapRegion* new_alloc_region = new_region(word_size,
5385                                             !is_survivor,
5386                                             true /* do_expand */);
5387   if (new_alloc_region != NULL) {
5388     // We really only need to do this for old regions given that we
5389     // should never scan survivors. But it doesn't hurt to do it
5390     // for survivors too.
5391     new_alloc_region->record_timestamp();
5392     if (is_survivor) {
5393       new_alloc_region->set_survivor();
5394       _survivor.add(new_alloc_region);
5395       _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
5396     } else {
5397       new_alloc_region->set_old();
5398       _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
5399     }
5400     _hr_printer.alloc(new_alloc_region);
5401     bool during_im = collector_state()->during_initial_mark_pause();
5402     new_alloc_region->note_start_of_copying(during_im);
5403     return new_alloc_region;
5404   }
5405   return NULL;
5406 }
5407 
5408 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
5409                                              size_t allocated_bytes,
5410                                              InCSetState dest) {
5411   bool during_im = collector_state()->during_initial_mark_pause();
5412   alloc_region->note_end_of_copying(during_im);
5413   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
5414   if (dest.is_old()) {
5415     _old_set.add(alloc_region);
5416   }
5417   _hr_printer.retire(alloc_region);
5418 }
5419 
5420 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
5421   bool expanded = false;
5422   uint index = _hrm.find_highest_free(&expanded);
5423 
5424   if (index != G1_NO_HRM_INDEX) {
5425     if (expanded) {
5426       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
5427                                 HeapRegion::GrainWords * HeapWordSize);
5428     }
5429     _hrm.allocate_free_regions_starting_at(index, 1);
5430     return region_at(index);
5431   }
5432   return NULL;
5433 }
5434 
5435 // Optimized nmethod scanning
5436 
5437 class RegisterNMethodOopClosure: public OopClosure {
5438   G1CollectedHeap* _g1h;
5439   nmethod* _nm;
5440 
5441   template <class T> void do_oop_work(T* p) {
5442     T heap_oop = oopDesc::load_heap_oop(p);
5443     if (!oopDesc::is_null(heap_oop)) {
5444       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
5445       HeapRegion* hr = _g1h->heap_region_containing(obj);
5446       assert(!hr->is_continues_humongous(),
5447              "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
5448              " starting at " HR_FORMAT,
5449              p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
5450 
5451       // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
5452       hr->add_strong_code_root_locked(_nm);
5453     }
5454   }
5455 
5456 public:
5457   RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
5458     _g1h(g1h), _nm(nm) {}
5459 
5460   void do_oop(oop* p)       { do_oop_work(p); }
5461   void do_oop(narrowOop* p) { do_oop_work(p); }
5462 };
5463 
5464 class UnregisterNMethodOopClosure: public OopClosure {
5465   G1CollectedHeap* _g1h;
5466   nmethod* _nm;
5467 
5468   template <class T> void do_oop_work(T* p) {
5469     T heap_oop = oopDesc::load_heap_oop(p);
5470     if (!oopDesc::is_null(heap_oop)) {
5471       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
5472       HeapRegion* hr = _g1h->heap_region_containing(obj);
5473       assert(!hr->is_continues_humongous(),
5474              "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
5475              " starting at " HR_FORMAT,
5476              p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
5477 
5478       hr->remove_strong_code_root(_nm);
5479     }
5480   }
5481 
5482 public:
5483   UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
5484     _g1h(g1h), _nm(nm) {}
5485 
5486   void do_oop(oop* p)       { do_oop_work(p); }
5487   void do_oop(narrowOop* p) { do_oop_work(p); }
5488 };
5489 
5490 void G1CollectedHeap::register_nmethod(nmethod* nm) {
5491   CollectedHeap::register_nmethod(nm);
5492 
5493   guarantee(nm != NULL, "sanity");
5494   RegisterNMethodOopClosure reg_cl(this, nm);
5495   nm->oops_do(&reg_cl);
5496 }
5497 
5498 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
5499   CollectedHeap::unregister_nmethod(nm);
5500 
5501   guarantee(nm != NULL, "sanity");
5502   UnregisterNMethodOopClosure reg_cl(this, nm);
5503   nm->oops_do(&reg_cl, true);
5504 }
5505 
5506 void G1CollectedHeap::purge_code_root_memory() {
5507   double purge_start = os::elapsedTime();
5508   G1CodeRootSet::purge();
5509   double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
5510   g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
5511 }
5512 
5513 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
5514   G1CollectedHeap* _g1h;
5515 
5516 public:
5517   RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
5518     _g1h(g1h) {}
5519 
5520   void do_code_blob(CodeBlob* cb) {
5521     nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
5522     if (nm == NULL) {
5523       return;
5524     }
5525 
5526     if (ScavengeRootsInCode) {
5527       _g1h->register_nmethod(nm);
5528     }
5529   }
5530 };
5531 
5532 void G1CollectedHeap::rebuild_strong_code_roots() {
5533   RebuildStrongCodeRootClosure blob_cl(this);
5534   CodeCache::blobs_do(&blob_cl);
5535 }